diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000000..84c06b8194f --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +agent/Godeps/_workspace/pkg/ diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 14a45eb0bf1..097e0e6f38d 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -3,22 +3,27 @@ For urgent operational issues, please contact AWS Support directly. https://aws.amazon.com/premiumsupport/ For potential security issues, please do not post it in the Issues. -Instead, please follow the instructions in https://aws.amazon.com/security/vulnerability-reporting/ or email AWS security directly at aws-security@amazon.com. +Instead, please follow the instructions https://aws.amazon.com/security/vulnerability-reporting/ or email AWS security directly at aws-security@amazon.com. Please provide the following information: --> + ### Summary + ### Description + + + ### Expected Behavior - + ### Observed Behavior - + ### Environment Details + ### Supporting Log Snippets + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3e5099002b0..5d777cfab3b 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,6 @@ @@ -8,33 +8,33 @@ Please provide the following information: ### Summary - ### Implementation details - - + ### Testing + New tests cover the changes: ### Description for the changelog - ### Licensing - -This contribution is under the terms of the Apache 2.0 License: + +By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml new file mode 100644 index 00000000000..b7f7a76ba15 --- /dev/null +++ b/.github/workflows/linux.yml @@ -0,0 +1,22 @@ +name: Linux + +on: [pull_request] + +jobs: + unit-tests: + name: Linux unit tests + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v2 + with: + go-version: 1.15.4 + - uses: actions/checkout@v2 + with: + submodules: true + path: src/github.com/aws/amazon-ecs-agent + - name: make test + run: | + export GOPATH=$GITHUB_WORKSPACE + cd $GITHUB_WORKSPACE/src/github.com/aws/amazon-ecs-agent + make test-silent + make analyze-cover-profile diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml new file mode 100644 index 00000000000..42160dbff29 --- /dev/null +++ b/.github/workflows/static.yml @@ -0,0 +1,58 @@ +name: Static Checks + +on: [push, pull_request] + +jobs: + static-check: + name: Static Analysis + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v2 + with: + go-version: 1.15.4 + - uses: actions/checkout@v2 + with: + path: src/github.com/aws/amazon-ecs-agent + - name: run static checks + run: | + export GOPATH=$GITHUB_WORKSPACE + export PATH=$PATH:$(go env GOPATH)/bin + cd $GITHUB_WORKSPACE/src/github.com/aws/amazon-ecs-agent + make get-deps + make static-check + + init-check: + name: Static Analysis Init + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v2 + with: + go-version: 1.15.4 + - uses: actions/checkout@v2 + with: + path: src/github.com/aws/amazon-ecs-agent + - name: run static checks + run: | + export GOPATH=$GITHUB_WORKSPACE + export PATH=$PATH:$(go env GOPATH)/bin + export GO111MODULE=auto + cd $GITHUB_WORKSPACE/src/github.com/aws/amazon-ecs-agent + make get-deps-init + make static-check-init + + x-platform-build: + name: Cross platform build + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v2 + with: + go-version: 1.15.4 + - uses: actions/checkout@v2 + with: + submodules: true + path: src/github.com/aws/amazon-ecs-agent + - name: make xplatform-build + run: | + export GOPATH=$GITHUB_WORKSPACE + cd $GITHUB_WORKSPACE/src/github.com/aws/amazon-ecs-agent + make xplatform-build diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml new file mode 100644 index 00000000000..2ae6171d4a1 --- /dev/null +++ b/.github/workflows/windows.yml @@ -0,0 +1,23 @@ +name: Windows + +on: [pull_request] + +jobs: + windows-unit-tests: + name: Windows unit tests + runs-on: windows-latest + steps: + - uses: actions/setup-go@v2 + with: + go-version: 1.12 + - uses: actions/checkout@v2 + with: + submodules: true + path: src/github.com/aws/amazon-ecs-agent + - name: run tests + working-directory: + run: | + $Env:GOPATH = "$Env:GITHUB_WORKSPACE" + cd "$Env:GITHUB_WORKSPACE" + cd "src/github.com/aws/amazon-ecs-agent" + go test -v -race -tags unit -timeout 40s ./agent/... diff --git a/.gitignore b/.gitignore index 00ed47d8cf9..42faca1b80d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,32 @@ +misc/certs/ca-certificates.crt +out/ +coverage/ +_bin/ +*.exe +*.exe~ +*.zip +*.swp +*.orig +/agent/version/_version.go +.agignore +*.sublime-* +.DS_Store +/misc/windows-iam/devcon* +/misc/pause-container/pause +*-stamp +/.idea/ +/misc/taskmetadata-validator/taskmetadata-validator +/bin +*.iml +cover.out +coverprofile.out /amazon-ecs-init* /BUILDROOT/ /x86_64/ /sources.tar /ecs-init-* -*.swp /ecs.conf +/.deb-done /.rpm-done /.srpm-done /BUILD @@ -14,8 +36,5 @@ /ecs-init.spec /sources.tgz ecs-agent-*.tar -ecs.service +/ecs.service *.log -.idea/ -cover.out -coverprofile.out diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..f618d7e6c2e --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "amazon-ecs-cni-plugins"] + path = amazon-ecs-cni-plugins + url = https://github.com/aws/amazon-ecs-cni-plugins.git +[submodule "amazon-vpc-cni-plugins"] + path = amazon-vpc-cni-plugins + url = https://github.com/aws/amazon-vpc-cni-plugins.git diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 357fa1a3b8e..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -go_import_path: github.com/aws/amazon-ecs-init -sudo: false -go: - - 1.15 - -matrix: - include: - - os: linux - script: - - make get-deps - - make static-check - - make test - - make analyze-cover-profile diff --git a/CHANGELOG.md b/CHANGELOG.md index 5cd617079dc..030272918d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,373 +1,601 @@ # Changelog -## 1.51.0-1 -* Cache Agent version 1.51.0 - -## 1.50.3-1 -* Cache Agent version 1.50.3 - -## 1.50.2-1 -* Cache Agent version 1.50.2 - -## 1.50.1-1 -* Cache Agent version 1.50.1 -* Does not restart ECS Agent when it exits with exit code 5 - -## 1.50.0-1 -* Cache Agent version 1.50.0 -* Allows ECS customers to execute interactive commands inside containers. - -## 1.49.0-1 -* Cache Agent version 1.49.0 -* Removes iptable rule that drops packets to port 51678 unconditionally on ecs service stop - -## 1.48.1-1 -* Cache Agent version 1.48.1 - -## 1.48.0-2 -* Cache Agent version 1.48.0 - -## 1.47.0-1 -* Cache Agent version 1.47.0 - -## 1.46.0-1 -* Cache Agent version 1.46.0 - -## 1.45.0-1 -* Cache Agent version 1.45.0 -* Block offhost access to agent's introspection port by default. Configurable via env ECS\_ALLOW\_OFFHOST\_INTROSPECTION\_ACCESS - -## 1.44.4-1 -* Cache Agent version 1.44.4 - -## 1.44.3-1 -* Cache Agent version 1.44.3 - -## 1.44.2-1 -* Cache Agent version 1.44.2 - -## 1.44.1-1 -* Cache Agent version 1.44.1 - -## 1.44.0-1 -* Cache Agent version 1.44.0 -* Add support for configuring Agent container logs - -## 1.43.0-2 -* Cache Agent version 1.43.0 - -## 1.42.0-1 -* Cache Agent version 1.42.0 -* Add a flag ECS\_SKIP\_LOCALHOST\_TRAFFIC\_FILTER to allow skipping local traffic filtering - -## 1.41.1-2 -* Drop traffic to 127.0.0.1 that isn't originated from the host - -## 1.41.1-1 -* Cache Agent version 1.41.1 - -## 1.41.0-1 -* Cache Agent version 1.41.0 - -## 1.40.0-1 -* Cache Agent version 1.40.0 - -## 1.39.0-2 -* Cache Agent version 1.39.0 -* Ignore IPv6 disable failure if already disabled - -## 1.38.0-1 -* Cache Agent version 1.38.0 -* Adds support for ECS volume plugin -* Disable ipv6 router advertisements for optimization - -## 1.37.0-2 -* Cache Agent version 1.37.0 -* Add '/etc/alternatives' to mounts - -## 1.36.2-1 -* Cache Agent version 1.36.2 -* update sbin mount point to avoid conflict with Docker >= 19.03.5 - -## 1.36.1-1 -* Cache Agent version 1.36.1 - -## 1.36.0-1 -* Cache Agent version 1.36.0 -* capture a fixed tail of container logs when removing a container - -## 1.35.0-1 -* Cache Agent version 1.35.0 -* Fix bug where stopping agent gracefully would still restart ecs-init - -## 1.33.0-1 -* Cache Agent version 1.33.0 -* Fix destination path in docker socket bind mount to match the one specified using DOCKER\_HOST on Amazon Linux 2 - -## 1.32.1-1 -* Cache Agent version 1.32.1 -* Add the ability to set Agent container's labels - -## 1.32.0-1 -* Cache Agent version 1.32.0 - -## 1.31.0-1 -* Cache Agent version 1.31.0 - -## 1.30.0-1 -* Cache Agent version 1.30.0 - -## 1.29.1-1 -* Cache Agent version 1.29.1 - -## 1.29.0-1 -* Cache Agent version 1.29.0 - -## 1.28.1-2 -* Cache Agent version 1.28.1 -* Use exponential backoff when restarting agent - -## 1.28.0-1 -* Cache Agent version 1.28.0 - -## 1.27.0-1 -* Cache Agent version 1.27.0 - -## 1.26.1-1 -* Cache Agent version 1.26.1 - -## 1.26.0-1 -* Cache Agent version 1.26.0 -* Add support for running iptables within agent container - -## 1.25.3-1 -* Cache Agent version 1.25.3 - -## 1.25.2-1 -* Cache Agent version 1.25.2 - -## 1.25.1-1 -* Cache Agent version 1.25.1 -* Update ecr models for private link support - -## 1.25.0-1 -* Cache Agent version 1.25.0 -* Add Nvidia GPU support for p2 and p3 instances - -## 1.24.0-1 -* Cache Agent version 1.24.0 - -## 1.22.0-4 -* Cache ECS agent version 1.22.0 for x86\_64 & ARM -* Support ARM architecture builds - -## 1.22.0-3 -* Rebuild - -## 1.22.0-2 -* Cache Agent version 1.22.0 - -## 1.21.0-1 -* Cache Agent version 1.21.0 -* Support configurable logconfig for Agent container to reduce disk usage -* ECS Agent will use the host's cert store on the Amazon Linux platform - -## 1.20.3-1 -* Cache Agent version 1.20.3 - -## 1.20.2-1 -* Cache Agent version 1.20.2 - -## 1.20.1-1 -* Cache Agent version 1.20.1 - -## 1.20.0-1 -* Cache Agent version 1.20.0 - -## 1.19.1-1 -* Cache Agent version 1.19.1 - -## 1.19.0-1 -* Cache Agent version 1.19.0 - -## 1.18.0-2 -* Spec file cleanups -* Enable builds for both AL1 and AL2 - -## 1.18.0-1 -* Cache Agent version 1.18.0 -* Add support for regional buckets -* Bundle ECS Agent tarball in package -* Download agent based on the partition -* Mount Docker plugin files dir - -## 1.17.3-1 -* Cache Agent version 1.17.3 -* Use s3client instead of httpclient when downloading - -## 1.17.2-1 -* Cache Agent version 1.17.2 - -## 1.17.1-1 -* Cache Agent version 1.17.1 - -## 1.17.0-2 -* Cache Agent version 1.17.0 - -## 1.16.2-1 -* Cache Agent version 1.16.2 -* Add GovCloud endpoint - -## 1.16.1-1 -* Cache Agent version 1.16.1 -* Improve startup behavior when Docker socket doesn't exist yet - -## 1.16.0-1 -* Cache Agent version 1.16.0 - -## 1.15.2-1 -* Cache Agent version 1.15.2 - -## 1.15.1-1 -* Update ECS Agent version - -## 1.15.0-1 -* Update ECS Agent version - -## 1.14.5-1 -* Update ECS Agent version - -## 1.14.4-1 -* Update ECS Agent version - -## 1.14.2-2 -* Cache Agent version 1.14.2 -* Add functionality for running agent with userns=host when Docker has userns-remap enabled -* Add support for Docker 17.03.1ce - -## 1.14.1-1 -* Cache Agent version 1.14.1 - -## 1.14.0-2 -* Add retry-backoff for pinging the Docker socket when creating the Docker client - -## 1.14.0-1 -* Cache Agent version 1.14.0 - -## 1.13.1-2 -* Update Requires to indicate support for docker <= 1.12.6 - -## 1.13.1-1 -* Cache Agent version 1.13.1 - -## 1.13.0-1 -* Cache Agent version 1.13.0 - -## 1.12.2-1 -* Cache Agent version 1.12.2 - -## 1.12.1-1 -* Cache Agent version 1.12.1 - -## 1.12.0-1 -* Cache Agent version 1.12.0 -* Add netfilter rules to support host network reaching credentials proxy - -## 1.11.1-1 -* Cache Agent version 1.11.1 - -## 1.11.0-1 -* Cache Agent version 1.11.0 -* Add support for Docker 1.11.2 -* Modify iptables and netfilter to support credentials proxy -* Eliminate requirement that /tmp and /var/cache be on the same filesystem -* Start agent with host network mode - -## 1.10.0-1 -* Cache Agent version 1.10.0 -* Add support for Docker 1.11.1 - -## 1.9.0-1 -* Cache Agent version 1.9.0 -* Make awslogs driver available by default - -## 1.8.2-1 -* Cache Agent version 1.8.2 - -## 1.8.1-1 -* Cache Agent version 1.8.1 - -## 1.8.0-1 -* Cache Agent version 1.8.0 - -## 1.7.1-1 -* Cache Agent version 1.7.1 - -## 1.7.0-1 -* Cache Agent version 1.7.0 -* Add support for Docker 1.9.1 - -## 1.6.0-1 -* Cache Agent version 1.6.0 -* Updated source dependencies - -## 1.5.0-1 -* Cache Agent version 1.5.0 -* Improved merge strategy for user-supplied environment variables -* Add default supported logging drivers - -## 1.4.0-2 -* Add support for Docker 1.7.1 - -## 1.4.0-1 -* Cache Agent version 1.4.0 - -## 1.3.1-1 -* Cache Agent version 1.3.1 -* Read Docker endpoint from environment variable DOCKER\_HOST if present - -## 1.3.0-1 -* Cache Agent version 1.3.0 - -## 1.2.1-2 -* Cache Agent version 1.2.1 - -## 1.2.0-1 -* Update versioning scheme to match Agent version -* Cache Agent version 1.2.0 -* Mount cgroup and execdriver directories for Telemetry feature - -## 1.0-5 -* Add support for Docker 1.6.2 - -## 1.0-4 -* Properly restart if the ecs-init package is upgraded in isolation - -## 1.0-3 -* Restart on upgrade if already running - -## 1.0-2 -* Cache Agent version 1.1.0 -* Add support for Docker 1.6.0 -* Force cache load on install/upgrade -* Add man page - -## 1.0-1 -* Re-start Agent on non-terminal exit codes -* Enable Agent self-updates -* Cache Agent version 1.0.0 -* Added rollback to cached Agent version for failed updates - -## 0.3-0 -* Migrate to statically-compiled Go binary - -## 0.2-3 -* Test for existing container agent and force remove it - -## 0.2-2 -* Mount data directory for state persistence -* Enable JSON-based configuration - -## 0.2-1 -* Naive update functionality - +## 1.51.0 +* Enhancement - Add configurable agent healthcheck localhost ip env var. [#2834](https://github.com/aws/amazon-ecs-agent/pull/2834) +* Bug - Fix bug that could incorrectly clean up pause container before other containers. [#2838](https://github.com/aws/amazon-ecs-agent/pull/2838) +* Bug - Fix task's network stats by omitting pause container in the network metrics calculation. [#2836](https://github.com/aws/amazon-ecs-agent/pull/2836) + +## 1.50.3 +* Enhancement - Eliminate benign docker stats "context canceled" warning messages from logs [#2813](https://github.com/aws/amazon-ecs-agent/pull/2813) +* Bug - Fix bug where pause container was not always cleaned up [#2824](https://github.com/aws/amazon-ecs-agent/pull/2824) + +## 1.50.2 +* Bug - Fix potential deadlock due to seelog's string marshalling of task struct [#2811](https://github.com/aws/amazon-ecs-agent/pull/2811) + +## 1.50.1 +* Enhancement - Implementation of structured logs on top of seelog [#2797](https://github.com/aws/amazon-ecs-agent/pull/2797) +* Bug - Fixed a task status deadlock and pulled container state for cached images when ECS_PULL_DEPENDENT_CONTAINERS_UPFRONT is enabled [#2800](https://github.com/aws/amazon-ecs-agent/pull/2800) + +## 1.50.0 +* Feature - Allows ECS customers to execute interactive commands inside containers [#2798](https://github.com/aws/amazon-ecs-agent/pull/2798) +* Enhancement - Add error responses into TMDEv4 taskWithTags responses [#2789](https://github.com/aws/amazon-ecs-agent/pull/2789) +* Bug - Fixed the number of cpu units the Agent will reserve for the Linux container instances [#2783](https://github.com/aws/amazon-ecs-agent/pull/2783) + +## 1.49.0 +* Enhancement - Allow task metadata endpoint to return metadata for task when some of the container does not have network metadata [#2747](https://github.com/aws/amazon-ecs-agent/pull/2747) +* Enhancement - Improve error and info logging around credentials requests [#2705](https://github.com/aws/amazon-ecs-agent/pull/2705) +* Enhancement - Introduce new environment variable ECS_CONTAINER_CREATE_TIMEOUT to make Docker create timeout configurable. Minimum value is 1m. Default value is 4m. [#2781](https://github.com/aws/amazon-ecs-agent/pull/2781) +* Bug - Add missing error handling in getContainerStatsNotStreamed. [#2757](https://github.com/aws/amazon-ecs-agent/pull/2757) + +## 1.48.1 +* Bug - Fix an edge case that can cause container dependency deadlock [#2734](https://github.com/aws/amazon-ecs-agent/pull/2734) +* Bug - Revert the change that adds client token persistence [#2708](https://github.com/aws/amazon-ecs-agent/pull/2708) + +## 1.48.0 +* Enhancement - Docker stop timeout buffer increased from 30s to 2m [#2697](https://github.com/aws/amazon-ecs-agent/pull/2697) +* Enhancement - More informative ENI attachment logs [#2703](https://github.com/aws/amazon-ecs-agent/pull/2703) +* Enhancement - Introduce new environment variable ECS_PULL_DEPENDENT_CONTAINERS_UPFRONT to pull images of dependent containers even before dependsOn condition is satisfied. This feature is turned off by default [#2731](https://github.com/aws/amazon-ecs-agent/pull/2731) +* Enhancement - Add pulled containers metadata to Task Metadata Endpoint V4 [#2731](https://github.com/aws/amazon-ecs-agent/pull/2731) +* Bug - Fix a bug where agent persists RCI client token to avoid being registered as different container instance ARNs [#2708](https://github.com/aws/amazon-ecs-agent/pull/2708) +* Bug - Fix jumbled min & max for engine connection retry delays [#2721](https://github.com/aws/amazon-ecs-agent/pull/2721) + +## 1.47.0 +* Feature - Add support for FSxWindowsFileServerVolumeConfiguration in task definition [#2690](https://github.com/aws/amazon-ecs-agent/pull/2690) +* Bug - Fixed Makefile to use Go1.12 for Agent windows build +[#2688](https://github.com/aws/amazon-ecs-agent/pull/2688) +* Bug - Initialize the logger from the agent’s main() [#2644](https://github.com/aws/amazon-ecs-agent/pull/2644) + +## 1.46.0 +* Enhancement - Use Go 1.15 for Linux platforms and Go 1.12 for Windows platforms [#2653](https://github.com/aws/amazon-ecs-agent/pull/2653) +* Bug - Currently, while polling docker stats, there is no timeout for the API call. So the call could be stuck until the container is stopped. Adding poll stats timeout [#2656](https://github.com/aws/amazon-ecs-agent/pull/2656) + +## 1.45.0 +* Feature - ECS metadata for AWS service lens and x-ray. We have added new fields to the TMDEv4 endpoint. Specfically, ContainerARN, LogDriver, LogOptions, and LaunchType [#2623](https://github.com/aws/amazon-ecs-agent/pull/2623) +* Feature - add IPv6 support for task networking [#2646](https://github.com/aws/amazon-ecs-agent/pull/2646) +* Enhancement - Propagate responses to the Amazon ECS Container Agent Log when an error occurs [#2641](https://github.com/aws/amazon-ecs-agent/pull/2641) +* Bug - Fix HTTP response code to TMDE requests when they fail for internal reasons. Previously we returned 400 Bad Request, and will now return 500 Internal Server Error [#2643](https://github.com/aws/amazon-ecs-agent/pull/2643) + +## 1.44.4 +* Bug - Fix a bug where the ECS Agent did not iterate through all the dependencies of a particular container [#2615](https://github.com/aws/amazon-ecs-agent/pull/2615) +* Bug - Fix a bug where the ECS Agent can lose track of containers if it's stopped by SIGKILL instead of SIGTERM [#2609](https://github.com/aws/amazon-ecs-agent/pull/2609) +* Bug - Fix a bug where the a Docker API call could be made with a blank string instead of a Docker ID [#2608](https://github.com/aws/amazon-ecs-agent/pull/2608) +* Bug - Fix a bug where the ECS Agent was expecting ECS_LOGFILE to be present as an environment variable [#2598](https://github.com/aws/amazon-ecs-agent/pull/2598) + +## 1.44.3 +* Bug - Revert Introspection API scope change [#2605](https://github.com/aws/amazon-ecs-agent/pull/2605) +* Bug - Fix a bug where ECS_LOGLEVEL stopped controlling logging level on instance [#2597](https://github.com/aws/amazon-ecs-agent/pull/2597) + +## 1.44.2 +* Bug - Fix Introspection API scope and bind to localhost [#2588](https://github.com/aws/amazon-ecs-agent/pull/2588) +* Enhancement - Make image pull timeout configurable [#2565](https://github.com/aws/amazon-ecs-agent/pull/2565) + + +## 1.44.1 +* Bug - Fixes a bug where ENI is attached before Agent starts and there is a delay in acknowledgement of ENI attachment by Agent [#2581](https://github.com/aws/amazon-ecs-agent/pull/2581) +* Bug - Fixes a deadlock scenario when the agent restores the state from its data file and the tasks are using environment files feature [#2580](https://github.com/aws/amazon-ecs-agent/pull/2580) +* Bug - Fixed a bug that can cause stats endpoint to return empty response to container that just starts up [#2578](https://github.com/aws/amazon-ecs-agent/pull/2578) +* Bug - Fixed a bug where parsing logic from env var parsing was introducing error depending on the value [#2573](https://github.com/aws/amazon-ecs-agent/pull/2573) + +## 1.44.0 +* Feature - Add support for customers to configure the destination for the Agent container logs, by setting a Docker-supported logging driver in the Agent config file - [#2548](https://github.com/aws/amazon-ecs-agent/pull/2548) +* Enhancement - Agent's internal state management mechanism is changed from a custom json state file to boltdb. This change is made to reduce its resource consumption especially under high task density/mutation rate - [#2562](https://github.com/aws/amazon-ecs-agent/pull/2562) + +## 1.43.0 +* Feature - Collect network stats for awsvpc network mode and display network rate stats for bridge and awsvpc network mode through v4 metadata endpoint - [#2545](https://github.com/aws/amazon-ecs-agent/pull/2545) + +## 1.42.0 +* Feature - Support for sub second precision in FluentD [#2538](https://github.com/aws/amazon-ecs-agent/pull/2538). +* Bug - Fixed a bug that caused configured values for ImageCleanupExclusionList +to be ignored in some situations [#2513](https://github.com/aws/amazon-ecs-agent/pull/2513) + +## 1.41.1 +* Bug - Fixed a bug [#2476](https://github.com/aws/amazon-ecs-agent/issues/2476) where HostPort is not present in ECS Task Metadata Endpoint response with bridge network type [#2495](https://github.com/aws/amazon-ecs-agent/pull/2495) + +## 1.41.0 +* Feature - Add inferentia support [#2458](https://github.com/aws/amazon-ecs-agent/pull/2458) +* Bug - fixes a bug where env file feature would not accept "=", which is the delimiter in the values of a env var [#2487](https://github.com/aws/amazon-ecs-agent/pull/2487) + +## 1.40.0 +* Enhancement - Agent's default stats gathering is changing from docker streaming stats to polling. This should not affect the metrics that customers ultimately see in cloudwatch, but it does affect how the agent gathers the underlying metrics from docker. This change was made for considerable performance gains. Customers with high CPU loads may see their cluster utilization increase; this is a good thing because it means the containers are utilizing more of the cluster, and agent/dockerd/containerd are utilizing less [#2452](https://github.com/aws/amazon-ecs-agent/pull/2452) +* Enhancement - Adds a jitter to this so that we don't query docker for every container's state all at the same time [#2444](https://github.com/aws/amazon-ecs-agent/pull/2444) +* Bug - Register custom logger before it gets used to ensure that the formatter is initiated before it is loaded [#2438](https://github.com/aws/amazon-ecs-agent/pull/2438) + +## 1.39.0 +* Feature - Add support for bulk loading env vars through environmentFiles field in task definition [#2420](https://github.com/aws/amazon-ecs-agent/pull/2420) +* Feature - Add v4 task metadata endpoint, which includes additional network information compared to v3 [#2396](https://github.com/aws/amazon-ecs-agent/pull/2396) +* Bug - Fixed an edge case that can cause task failed to start when using container ordering success condition [#2404](https://github.com/aws/amazon-ecs-agent/pull/2404). + +## 1.38.0 +* Feature - add integration with EFS's access point and IAM authorization features; support EFS volume for task in awsvpc network mode +* Enhancement - adding Runtime ID of container to agent logs [#2399](https://github.com/aws/amazon-ecs-agent/pull/2399) + +## 1.37.0 +* Feature - additional parameters allowed when specifying secretsmanager secret [#2358](https://github.com/aws/amazon-ecs-agent/pull/2358) +* Bug - fixed a bug where Firelens container could not use config file from S3 bucket in us-east-1 [#2356](https://github.com/aws/amazon-ecs-agent/pull/2356) + +## 1.36.2 +* Bug - fix windows logfile writing [#2347](https://github.com/aws/amazon-ecs-agent/pull/2347) +* Bug - update sbin mount point to avoid conflict with Docker >= 19.03.5 [#2345](https://github.com/aws/amazon-ecs-agent/pull/2345) + +## 1.36.1 +* Bug - Fixed potential file descriptor leak with context logger [#2337](https://github.com/aws/amazon-ecs-agent/pull/2337) + +## 1.36.0 +* Feature - structured logs and logfile rollover features [#2311](https://github.com/aws/amazon-ecs-agent/pull/2311), [#2319](https://github.com/aws/amazon-ecs-agent/pull/2319), [#2330](https://github.com/aws/amazon-ecs-agent/pull/2330) + +## 1.35.0 +* Feature - EFS Preview [#2301](https://github.com/aws/amazon-ecs-agent/pull/2301) +* Bug - Load pause container for use by PID/IPC even if task networking is disabled [#2300](https://github.com/aws/amazon-ecs-agent/pull/2300) +* Bug - Fixed a race condition that might cause the agent to crash during container creation [#2299](https://github.com/aws/amazon-ecs-agent/pull/2299) + +## 1.34.0 +* Feature - Add Windows gMSA (group Managed Service Account) support on ECS. +* Bug - Binding metadata directory in Z mode for selinux enabled docker, which enables read access to the metadata files from container processes. [#2273](https://github.com/aws/amazon-ecs-agent/pull/2273) + +## 1.33.0 +* Feature - Agent performs a sync between task state on the instance and on the backend everytime Agent establishes a connection with the backend. This ensures that task state is as expected on the instance after the instance reconnects with the instance after a disconnection [#2191](https://github.com/aws/amazon-ecs-agent/pull/2191) +* Enhancement - Update Docker LoadImage API timeout based on benchmarking test [#2269](https://github.com/aws/amazon-ecs-agent/pull/2269) +* Enhancement - Enable the detection of health status of ECS Agent using HEALTHCHECK directive [#2260](https://github.com/aws/amazon-ecs-agent/pull/2260) +* Enhancement - Add `NON_ECS_IMAGE_MINIMUM_CLEANUP_AGE` flag which when set allows the user to set the minimum time interval between when a non ECS image is created and when it can be considered for automated image cleanup [#2251](https://github.com/aws/amazon-ecs-agent/pull/2251) + +## 1.32.1 +* Enhancement - Add `ECS_ENABLE_MEMORY_UNBOUNDED_WINDOWS_WORKAROUND` flag which when set ignores the memory reservation +parameter along with memory bounded tasks in windows [@julienduchesne](https://github.com/julienduchesn) [#2239](https://github.com/aws/amazon-ecs-agent/pull/2239) +* Bug - Fixed a bug when config attribute in hostConfig is nil when starting a task [#2249](https://github.com/aws/amazon-ecs-agent/pull/2249) +* Bug - Fixed a bug where start container failed with EOF and container is started anyways [#2245](https://github.com/aws/amazon-ecs-agent/pull/2245) +* Bug - Fixed a bug where incorrect error type was detected when the `vpc-id` could not be detected on ec2-classic [#2243](https://github.com/aws/amazon-ecs-agent/pull/2243) +* Bug - Fixed a bug where Agent did not reopen Docker event stream when it gets EOF/UnexpectedEOF error [#2240](https://github.com/aws/amazon-ecs-agent/pull/2240) + +## 1.32.0 +* Feature - Add support for automatic spot instance draining [#2205](https://github.com/aws/amazon-ecs-agent/pull/2205) +* Bug - Fixed a bug where the agent might crash if it's restarted right after launching a task in awsvpc network mode [#2219](https://github.com/aws/amazon-ecs-agent/pull/2219) + +## 1.31.0 +* Feature - Add support for showing container's ImageDigest Pulled from ECR in ECS DescribeTasks [#2201](https://github.com/aws/amazon-ecs-agent/pull/2201) +* Enhancement - Add more functionalities to firelens (log router) feature: allow including external config from s3 and local file; add fluent logger support for bridge and awsvpc network mode; add health check support for aws-for-fluent-bit image +* Enhancement - Add support for Windows Named Pipes in volumes [@ericdalling](https://github.com/ericdalling) [#2185](https://github.com/aws/amazon-ecs-agent/pull/2185) + +## 1.30.0 +* Feature - Add log router support (beta) +* Feature - Add elastic inference support +* Feature - Add support for showing container's Docker ID in ECS DescribeTasks and StopTask APIs [#2138](https://github.com/aws/amazon-ecs-agent/pull/2138) + +## 1.29.1 +* Enhancement - Update task cleanup wait logic to clean task resources immediately instead of waiting 3 hours [#2084](https://github.com/aws/amazon-ecs-agent/pull/2084) +* Bug - Fixed Agent reporting incorrect capabilities on Windows [#2070](https://github.com/aws/amazon-ecs-agent/pull/2070) +* Bug - Fixed a bug where Agent fails to invoke IPAM DEL command when cleaning up AWSVPC task [#2085](https://github.com/aws/amazon-ecs-agent/pull/2085) +* Bug - Fixed a bug where task resource unmarshal error was ignored rather than returned [#2098](https://github.com/aws/amazon-ecs-agent/pull/2098) +* Bug - Update amazon-vpc-plugins that allows AWSVPCTrunking to work without ec2-net-utils [#2093](https://github.com/aws/amazon-ecs-agent/pull/2093) + +## 1.29.0 +* Feature - Adds container network and storage metrics as part of ongoing [work](https://github.com/aws/containers-roadmap/issues/70) [#2072](https://github.com/aws/amazon-ecs-agent/pull/2072) + +## 1.28.1 +* Enhancement - Non-ECS images cleanup: clean up dangling images with image ID [#2023](https://github.com/aws/amazon-ecs-agent/pull/2023) +* Bug - Pick up latest version of amazon-vpc-cni-plugins and amazon-ecs-cni-plugins to include recent bug fixes ([f09fd7c](https://github.com/aws/amazon-vpc-cni-plugins/commit/f09fd7c6ba0cf319b0c6ad23762091e25091fbce), [d90eebe](https://github.com/aws/amazon-vpc-cni-plugins/commit/d90eebe9907cde58c47756f65eccd7efc693e1d6), [06cbba2](https://github.com/aws/amazon-ecs-cni-plugins/commit/06cbba25cab0eb0aa466b0c5f72b55b61c87a2c5)) +* Bug - Fixed error detection case when image that is being deleted does not exist [@bendavies](https://github.com/bendavies) [#2008](https://github.com/aws/amazon-ecs-agent/pull/2008) +* Bug - Fixed a bug where docker volume deletion resulted in nullpointer [#2059](https://github.com/aws/amazon-ecs-agent/pull/2059) + +## 1.28.0 +* Feature - Introduce high density awsvpc tasks support +* Enhancement - Introduce `ECS_CGROUP_CPU_PERIOD` to make cgroup cpu period configurable [@boynux](https://github.com/boynux) [#1941](https://github.com/aws/amazon-ecs-agent/pull/1941) +* Enhancement - Add Private Host IPv4 address to container metadata [@bencord0](https://github.com/bencord0) [#2000](https://github.com/aws/amazon-ecs-agent/pull/2000) +* Enhancement - Set terminal reason for volume task resource [#2004](https://github.com/aws/amazon-ecs-agent/pull/2004) +* Bug - Fixed a bug where container health status is not updated when container status isn't changed [#1972](https://github.com/aws/amazon-ecs-agent/pull/1972) +* Bug - Fixed a bug where containers in 'dead' or 'created' status are not cleaned up by the agent [#2015](https://github.com/aws/amazon-ecs-agent/pull/2015) + +## 1.27.0 +* Feature - Add secret support for log drivers + +## 1.26.1 +* Enhancement - Set up pause container user the same as proxy container when App Mesh enabled and pause container not using default image +* Bug - Fixed a bug where network stats are not presented in container stats [#1932](https://github.com/aws/amazon-ecs-agent/pull/1932) + +## 1.26.0 +* Feature - Startup order can now be explicitly set via DependsOn field in the Task Definition [#1904](https://github.com/aws/amazon-ecs-agent/pull/1904) +* Feature - Containers in a task can now have individual start and stop timeouts [#1904](https://github.com/aws/amazon-ecs-agent/pull/1904) +* Feature - AWS App Mesh CNI plugin support [#1898](https://github.com/aws/amazon-ecs-agent/pull/1898) +* Enhancement - Containers with links and volumes defined will now shutdown in the correct order [#1904](https://github.com/aws/amazon-ecs-agent/pull/1904) +* Bug - Image cleanup errors fixed [#1897](https://github.com/aws/amazon-ecs-agent/pull/1897) + +## 1.25.3 +* Bug - Fixed a bug where agent no longer redirected malformed credentials or metadata http requests [#1844](https://github.com/aws/amazon-ecs-agent/pull/1844) +* Bug - Populate v3 metadata networks response for non-awsvpc tasks [#1833](https://github.com/aws/amazon-ecs-agent/pull/1833) +* Bug - Avoid image pull behavior type check for container namespaces pause image [#1840](https://github.com/aws/amazon-ecs-agent/pull/1840) + +## 1.25.2 +* Bug - Update pull image retry to longer interval to mitigate being throttled by ECR [#1808](https://github.com/aws/amazon-ecs-agent/pull/1808) + +## 1.25.1 +* Bug - Update ecr models for private link support + +## 1.25.0 +* Feature - Add Nvidia GPU support for p2 and p3 instances +* Feature - Introduce `ECS_DISABLE_DOCKER_HEALTH_CHECK` to make docker health check configurable [#1624](https://github.com/aws/amazon-ecs-agent/pull/1624) + +## 1.24.0 +* Feature - Configurable poll duration for container stats [@jcbowman](https://github.com/jcbowman) [#1646](https://github.com/aws/amazon-ecs-agent/pull/1646) +* Feature - Add support to remove containers and images that are not part of ECS tasks [#1752](https://github.com/aws/amazon-ecs-agent/pull/1752) +* Feature - Introduce prometheus support for agent metrics [#1745](https://github.com/aws/amazon-ecs-agent/pull/1745) +* Feature - Add Host EC2 instance Public IPv4 address to container metadata file [#1730](https://github.com/aws/amazon-ecs-agent/pull/1730) +* Enhancement - Docker SDK migration replacing go-dockerclient [#1743](https://github.com/aws/amazon-ecs-agent/pull/1743) +* Enhancement - Propagating Container Instance and Task Tags to Task Metadata endpoint [#1720](https://github.com/aws/amazon-ecs-agent/pull/1720) + +## 1.23.0 +* Feature - Add support for ECS Secrets integrating with AWS Secrets Manager [#1713](https://github.com/aws/amazon-ecs-agent/pull/1713) +* Enhancement - Add availability zone to task metadata endpoint [#1674](https://github.com/aws/amazon-ecs-agent/pull/1674) +* Enhancement - Add availability zone to ECS metadata file [#1675](https://github.com/aws/amazon-ecs-agent/pull/1675) +* Bug - Fixed a bug where agent can register container instance back to back and gets + assigned two container instance ARNs [#1711](https://github.com/aws/amazon-ecs-agent/pull/1711) +* Bug - Fixed a bug where propagated `aws:` tags are passed through RegisterContainerInstance API call [#1706](https://github.com/aws/amazon-ecs-agent/pull/1706) + +## 1.22.0 +* Feature - Add support for ECS Secrets integrating with AWS Systems Manager Parameter Store +* Feature - Support for `--pid`, `--ipc` Docker run flags. [#1584](https://github.com/aws/amazon-ecs-agent/pull/1584) +* Feature - Introduce two environment variables `ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM` and `ECS_CONTAINER_INSTANCE_TAGS` to support ECS tagging [#1618](https://github.com/aws/amazon-ecs-agent/pull/1618) + +## 1.21.0 +* Feature - Add v3 task metadata support for awsvpc, host and bridge network mode +* Enhancement - Update the `amazon-ecs-cni-plugins` to `2018.10.0` [#1610](https://github.com/aws/amazon-ecs-agent/pull/1610) +* Enhancement - Configurable image pull inactivity timeout [@wattdave](https://github.com/wattdave) [#1566](https://github.com/aws/amazon-ecs-agent/pull/1566) +* Bug - Fixed a bug where Windows drive volume couldn't be mounted [#1571](https://github.com/aws/amazon-ecs-agent/pull/1571) +* Bug - Fixed a bug where the Agent's Windows binaries didn't use consistent naming [#1573](https://github.com/aws/amazon-ecs-agent/pull/1573) +* Bug - Fixed a bug where a port used by WinRM service was not reserved by the Agent by default [#1577](https://github.com/aws/amazon-ecs-agent/pull/1577) + +## 1.20.3 +* Enhancement - Deprecate support for serial docker image pull [#1569](https://github.com/aws/amazon-ecs-agent/pull/1569) +* Enhancement - Update the `amazon-ecs-cni-plugins` to `2018.08.0` + +## 1.20.2 +* Enhancement - Added ECS config field `ECS_SHARED_VOLUME_MATCH_FULL_CONFIG` to +make the volume labels and driver options comparison configurable for shared volume [#1519](https://github.com/aws/amazon-ecs-agent/pull/1519) +* Enhancement - Added Volumes metadata as part of v1 and v2 metadata endpoints [#1531](https://github.com/aws/amazon-ecs-agent/pull/1531) +* Bug - Fixed a bug where unrecognized task cannot be stopped [#1467](https://github.com/aws/amazon-ecs-agent/pull/1467) +* Bug - Fixed a bug where tasks with CPU windows unbounded field set are not honored +on restart due to non-persistence of `PlatformFields` in agent state file [@julienduchesne](https://github.com/julienduchesne) [#1480](https://github.com/aws/amazon-ecs-agent/pull/1480) + +## 1.20.1 +* Bug - Fixed a bug where the agent couldn't be upgraded if there are tasks that + use volumes in the task definition on the instance +* Bug - Fixed a bug where volumes driver may not work with mountpoint + +## 1.20.0 +* Feature - Add support for Docker volume drivers, third party drivers are only supported on linux +* Enhancement - Replace the empty container with Docker local volume +* Enhancement - Deprecate support for Docker version older than 1.9.0 [#1477](https://github.com/aws/amazon-ecs-agent/pull/1477) +* Bug - Fixed a bug where container marked as stopped comes back with a running status [#1446](https://github.com/aws/amazon-ecs-agent/pull/1446) + +## 1.19.1 +* Bug - Fixed a bug where responses of introspection API break backward compatibility [#1473](https://github.com/aws/amazon-ecs-agent/pull/1473) + +## 1.19.0 +* Feature - Private registry can be authenticated through task definition using AWS Secrets Manager [#1427](https://github.com/aws/amazon-ecs-agent/pull/1427) + +## 1.18.0 +* Feature - Configurable container image pull behavior [#1348](https://github.com/aws/amazon-ecs-agent/pull/1348) +* Bug - Fixed a bug where Docker Version() API never returns by adding a timeout [#1363](https://github.com/aws/amazon-ecs-agent/pull/1363) +* Bug - Fixed a bug where tasks could get stuck waiting for execution of CNI plugin [#1358](https://github.com/aws/amazon-ecs-agent/pull/1358) +* Bug - Fixed a bug where task cleanup could be blocked due to incorrect sentstatus [#1383](https://github.com/aws/amazon-ecs-agent/pull/1383) + +## 1.17.3 +* Enhancement - Distinct startContainerTimeouts for windows/linux, introduce a new environment variable `ECS_CONTAINER_START_TIMEOUT` to make it configurable [#1321](https://github.com/aws/amazon-ecs-agent/pull/1321) +* Enhancement - Add support for containers to inherit ENI private DNS hostnames for `awsvpc` tasks [#1278](https://github.com/aws/amazon-ecs-agent/pull/1278) +* Enhancement - Expose task definition family and task definition revision in container metadata file [#1295](https://github.com/aws/amazon-ecs-agent/pull/1295) +* Enhancement - Fail image pulls if there's inactivity during image pull progress [#1290](https://github.com/aws/amazon-ecs-agent/pull/1290) +* Enhancement - Parallelize the container transition in the same task [#1305](https://github.com/aws/amazon-ecs-agent/pull/1306) +* Bug - Fixed a bug where a stale websocket connection could linger [#1310](https://github.com/aws/amazon-ecs-agent/pull/1310) + +## 1.17.2 +* Enhancement - Update the `amazon-ecs-cni-plugins` to `2018.02.0` [#1272](https://github.com/aws/amazon-ecs-agent/pull/1272) +* Enhancement - Add container port mapping and ENI information in introspection +API [#1271](https://github.com/aws/amazon-ecs-agent/pull/1271) + +## 1.17.1 +* Bug - Fixed a bug that was causing a runtime panic by accessing negative + index in the health check log slice [#1239](https://github.com/aws/amazon-ecs-agent/pull/1239) +* Bug - Workaround for an issue where CPU percent was set to 1 when CPU was not + set or set to zero(unbounded) in Windows [#1227](https://github.com/aws/amazon-ecs-agent/pull/1227) +* Bug - Fixed a bug where steady state throttle limits for task metadata endpoints + were too low for applications [#1240](https://github.com/aws/amazon-ecs-agent/pull/1240) + +## 1.17.0 +* Feature - Support a HTTP endpoint for `awsvpc` tasks to query metadata +* Feature - Support Docker health check +* Bug - Fixed a bug where `-version` fails due to its dependency on docker + client [#1118](https://github.com/aws/amazon-ecs-agent/pull/1118) +* Bug - Persist container exit code in agent state file + [#1125](https://github.com/aws/amazon-ecs-agent/pull/1125) +* Bug - Fixed a bug where the agent could lose track of running containers when + Docker APIs timeout [#1217](https://github.com/aws/amazon-ecs-agent/pull/1217) +* Bug - Task level memory.use_hierarchy was not being set and memory limits + were not being enforced [#1195](https://github.com/aws/amazon-ecs-agent/pull/1195) +* Bug - Fixed a bug where CPU utilization wasn't correctly reported on Windows + [@bboerst](https://github.com/bboerst) [#1219](https://github.com/aws/amazon-ecs-agent/pull/1219) + +## 1.16.2 +* Bug - Fixed a bug where the ticker would submit empty container state change + transitions when a task is STOPPED. [#1178](https://github.com/aws/amazon-ecs-agent/pull/1178) + +## 1.16.1 +* Bug - Fixed a bug where the agent could miss sending an ENI attachment to ECS + because of address propagation delays. [#1148](https://github.com/aws/amazon-ecs-agent/pull/1148) +* Enhancement - Upgrade the `amazon-ecs-cni-plugins` to `2017.10.1`. [#1155](https://github.com/aws/amazon-ecs-agent/pull/1155) + +## 1.16.0 +* Feature - Support pulling from Amazon ECR with specified IAM role in task definition +* Feature - Enable support for task level CPU and memory constraints. +* Feature - Enable the ECS agent to run as a Windows service. [#1070](https://github.com/aws/amazon-ecs-agent/pull/1070) +* Enhancement - Support CloudWatch metrics for Windows. [#1077](https://github.com/aws/amazon-ecs-agent/pull/1077) +* Enhancement - Enforce memory limits on Windows. [#1069](https://github.com/aws/amazon-ecs-agent/pull/1069) +* Enhancement - Enforce CPU limits on Windows. [#1089](https://github.com/aws/amazon-ecs-agent/pull/1089) +* Enhancement - Simplify task IAM credential host setup. [#1105](https://github.com/aws/amazon-ecs-agent/pull/1105) + +## 1.15.2 +* Bug - Fixed a bug where container state information wasn't reported. [#1076](https://github.com/aws/amazon-ecs-agent/pull/1076) + +## 1.15.1 +* Bug - Fixed a bug where container state information wasn't reported. [#1067](https://github.com/aws/amazon-ecs-agent/pull/1067) +* Bug - Fixed a bug where a task can be blocked in creating state. [#1048](https://github.com/aws/amazon-ecs-agent/pull/1048) +* Bug - Fixed dynamic HostPort in container metadata. [#1052](https://github.com/aws/amazon-ecs-agent/pull/1052) +* Bug - Fixed bug on Windows where container memory limits are not enforced. [#1069](https://github.com/aws/amazon-ecs-agent/pull/1069) + +## 1.15.0 +* Feature - Support for provisioning tasks with ENIs. +* Feature - Support for `--init` Docker run flag. [#996](https://github.com/aws/amazon-ecs-agent/pull/996) +* Feature - Introduces container level metadata. [#981](https://github.com/aws/amazon-ecs-agent/pull/981) +* Enhancement - Enable 'none' logging driver capability by default. + [#1041](https://github.com/aws/amazon-ecs-agent/pull/1041) +* Bug - Fixed a bug where tasks that fail to pull containers can cause the agent + to fail to restore properly after a restart. [#1033](https://github.com/aws/amazon-ecs-agent/pull/1033) +* Bug - Fixed default logging level issue. [#1016](https://github.com/aws/amazon-ecs-agent/pull/1016) +* Bug - Fixed a bug where unsupported Docker API client versions could be registered. + [#1014](https://github.com/aws/amazon-ecs-agent/pull/1014) +* Bug - Fixed a bug where non-essential container state changes were sometimes not submitted. + [#1026](https://github.com/aws/amazon-ecs-agent/pull/1026) + +## 1.14.5 +* Enhancement - Retry failed container image pull operations [#975](https://github.com/aws/amazon-ecs-agent/pull/975) +* Enhancement - Set read and write timeouts for websocket connectons [#993](https://github.com/aws/amazon-ecs-agent/pull/993) +* Enhancement - Add support for the SumoLogic Docker log driver plugin + [#992](https://github.com/aws/amazon-ecs-agent/pull/992) +* Bug - Fixed a memory leak issue when submitting the task state change [#967](https://github.com/aws/amazon-ecs-agent/pull/967) +* Bug - Fixed a race condition where a container can be created twice when agent restarts. [#939](https://github.com/aws/amazon-ecs-agent/pull/939) +* Bug - Fixed an issue where `microsoft/windowsservercore:latest` was not + pulled on Windows under certain conditions. + [#990](https://github.com/aws/amazon-ecs-agent/pull/990) +* Bug - Fixed an issue where task IAM role credentials could be logged to disk. [#998](https://github.com/aws/amazon-ecs-agent/pull/998) + +## 1.14.4 +* Enhancement - Batch container state change events. [#867](https://github.com/aws/amazon-ecs-agent/pull/867) +* Enhancement - Improve the error message when reserved memory is larger than the available memory. [#897](https://github.com/aws/amazon-ecs-agent/pull/897) +* Enhancement - Allow plain HTTP connections through wsclient. [#899](https://github.com/aws/amazon-ecs-agent/pull/899) +* Enhancement - Support Logentries log driver by [@opsline-radek](https://github.com/opsline-radek). [#870](https://github.com/aws/amazon-ecs-agent/pull/870) +* Enhancement - Allow instance attributes to be provided from config file + by [@ejholmes](https://github.com/ejholmes). [#908](https://github.com/aws/amazon-ecs-agent/pull/908) +* Enhancement - Reduce the disconnection period to the backend for idle connections. [#912](https://github.com/aws/amazon-ecs-agent/pull/912) +* Bug - Fixed data race where a pointer was returned in Getter. [#889](https://github.com/aws/amazon-ecs-agent/pull/899) +* Bug - Reset agent state if the instance id changed on agent restart. [#892](https://github.com/aws/amazon-ecs-agent/pull/892) +* Bug - Fixed a situation in which containers may be falsely reported as STOPPED + in the case of a Docker "stop" API failure. [#910](https://github.com/aws/amazon-ecs-agent/pull/910) +* Bug - Fixed typo in log string by [@sharuzzaman](https://github.com/sharuzzaman). [#930](https://github.com/aws/amazon-ecs-agent/pull/930) + +## 1.14.3 +* Bug - Fixed a deadlock that was caused by the ImageCleanup and Image Pull. [#836](https://github.com/aws/amazon-ecs-agent/pull/836) + +## 1.14.2 +* Enhancement - Added introspection API for querying tasks by short docker ID, by [@aaronwalker](https://github.com/aaronwalker). [#813](https://github.com/aws/amazon-ecs-agent/pull/813) +* Bug - Added checks for circular task dependencies. [#796](https://github.com/aws/amazon-ecs-agent/pull/796) +* Bug - Fixed an issue with Docker auth configuration overrides. [#751](https://github.com/aws/amazon-ecs-agent/pull/751) +* Bug - Fixed a race condition in the task clean up code path. [#737](https://github.com/aws/amazon-ecs-agent/pull/737) +* Bug - Fixed an issue involving concurrent map writes. [#743](https://github.com/aws/amazon-ecs-agent/pull/743) + +## 1.14.1 +* Enhancement - Log completion of image pulls. [#715](https://github.com/aws/amazon-ecs-agent/pull/715) +* Enhancement - Increase start and create timeouts to improve reliability under + some workloads. [#696](https://github.com/aws/amazon-ecs-agent/pull/696) +* Bug - Fixed a bug where throttles on state change reporting could lead to + corrupted state. [#705](https://github.com/aws/amazon-ecs-agent/pull/705) +* Bug - Correct formatting of log messages from tcshandler. [#693](https://github.com/aws/amazon-ecs-agent/pull/693) +* Bug - Fixed an issue where agent could crash. [#692](https://github.com/aws/amazon-ecs-agent/pull/692) + +## 1.14.0 +* Feature - Support definition of custom attributes on agent registration. +* Feature - Support Docker on Windows Server 2016. +* Enhancement - Enable concurrent docker pull for docker version >= 1.11.1. +* Bug - Fixes a bug where a task could be prematurely marked as stopped. +* Bug - Fixes an issue where ECS Agent would keep reconnecting to ACS without any backoff. +* Bug - Fix memory metric to exclude cache value. + +## 1.13.1 +* Enhancement - Added cache for DiscoverPollEndPoint API. +* Enhancement - Expose port 51679 so docker tasks can fetch IAM credentials. +* Bug - fixed a bug that could lead to exhausting the open file limit. +* Bug - Fixed a bug where images were not deleted when using image cleanup. +* Bug - Fixed a bug where task status may be reported as pending while task is running. +* Bug - Fixed a bug where task may have a temporary "RUNNING" state when + task failed to start. +* Bug - Fixed a bug where CPU metrics would be reported incorrectly for kernel >= 4.7.0. +* Bug - Fixed a bug that may cause agent not report metrics. + +## 1.13.0 +* Feature - Implemented automated image cleanup. +* Enhancement - Add credential caching for ECR. +* Enhancement - Add support for security-opt=no-new-privileges. +* Bug - Fixed a potential deadlock in dockerstate. + +## 1.12.2 +* Bug - Fixed a bug where agent keeps fetching stats of stopped containers. + +## 1.12.1 +* Bug - Fixed a bug where agent keeps fetching stats of stopped containers. +* Bug - Fixed a bug that could lead to exhausting the open file limit. +* Bug - Fixed a bug where the introspection API could return the wrong response code. + +## 1.12.0 +* Enhancement - Support Task IAM Role for containers launched with 'host' network mode. + +## 1.11.1 +* Bug - Fixed a bug where telemetry data would fail to serialize properly. +* Bug - Addressed an issue where telemetry would be reported after the + container instance was deregistered. + +## 1.11.0 +* Feature - Support IAM roles for tasks. +* Feature - Add support for the Splunk logging driver. +* Enhancement - Reduced pull status verbosity in debug mode. +* Enhancement - Add a Docker label for ECS cluster. +* Bug - Fixed a bug that could cause a container to be marked as STOPPED while + still running on the instance. +* Bug - Fixed a potential race condition in metrics collection. +* Bug - Resolved a bug where some state could be retained across different + container instances when launching from a snapshotted AMI. + +## 1.10.0 +* Feature - Make the `docker stop` timeout configurable. +* Enhancement - Use `docker stats` as the data source for CloudWatch metrics. +* Bug - Fixed an issue where update requests would not be properly acknowledged + when updates were disabled. + +## 1.9.0 +* Feature - Add Amazon CloudWatch Logs logging driver. +* Bug - Fixed ACS handler when acking blank message ids. +* Bug - Fixed an issue where CPU utilization could be reported incorrectly. +* Bug - Resolved a bug where containers would not get cleaned up in some cases. + +## 1.8.2 +* Bug - Fixed an issue where `exec_create` and `exec_start` events were not + correctly ignored with some Docker versions. +* Bug - Fixed memory utilization computation. +* Bug - Resolved a bug where sending a signal to a container caused the + agent to treat the container as dead. + +## 1.8.1 +* Bug - Fixed a potential deadlock in docker_task_engine. + +## 1.8.0 +* Feature - Task cleanup wait time is now configurable. +* Enhancement - Improved testing for HTTP handler tests. +* Enhancement - Updated AWS SDK to v.1.0.11. +* Bug - Fixed a race condition in a docker-task-engine test. +* Bug - Fixed an issue where dockerID was not persisted in the case of an + error. + +## 1.7.1 +* Enhancement - Increase `docker inspect` timeout to improve reliability under + some workloads. +* Enhancement - Increase connect timeout for websockets to improve reliability + under some workloads. +* Bug - Fixed memory leak in telemetry ticker loop. + +## 1.7.0 +* Feature - Add support for pulling from Amazon EC2 Container Registry. +* Bug - Resolved an issue where containers could be incorrectly assumed stopped + when an OOM event was emitted by Docker. +* Bug - Fixed an issue where a crash could cause recently-created containers to + become untracked. + +## 1.6.0 + +* Feature - Add experimental HTTP proxy support. +* Enhancement - No longer erroneously store an archive of all logs in the + container, greatly decreasing memory and CPU usage when rotating at the + hour. +* Enhancement - Increase `docker create` timeout to improve reliability under + some workloads. +* Bug - Resolved an issue where private repositories required a schema in + `AuthData` to work. +* Bug - Fixed issue whereby metric submission could fail and never retry. + +## 1.5.0 +* Feature - Add support for additional Docker features. +* Feature - Detect and register capabilities. +* Feature - Add -license flag and /license handler. +* Enhancement - Properly handle throttling. +* Enhancement - Make it harder to accidentally expose sensitive data. +* Enhancement - Increased reliability in functional tests. +* Bug - Fixed potential divide-by-zero error with metrics. + +## 1.4.0 +* Feature - Telemetry reporting for Services and Clusters. +* Bug - Fixed an issue where some network errors would cause a panic. + +## 1.3.1 +* Feature - Add debug handler for SIGUSR1. +* Enhancement - Trim untrusted cert from CA bundle. +* Enhancement - Add retries to EC2 Metadata fetches. +* Enhancement - Logging improvements. +* Bug - Resolved an issue with ACS heartbeats. +* Bug - Fixed memory leak in ACS payload handler. +* Bug - Fixed multiple deadlocks. + +## 1.3.0 + +* Feature - Add support for re-registering a container instance. + +## 1.2.1 + +* Security issue - Avoid logging configured AuthData at the debug level on startup +* Feature - Add configuration option for reserving memory from the ECS Agent + +## 1.2.0 +* Feature - UDP support for port bindings. +* Feature - Set labels on launched containers with `task-arn`, + `container-name`, `task-definition-family`, and `task-definition-revision`. +* Enhancement - Logging improvements. +* Bug - Improved the behavior when CPU shares in a `Container Definition` are + set to 0. +* Bug - Fixed an issue where `BindIP` could be reported incorrectly. +* Bug - Resolved an issue computing API endpoint when region is provided. +* Bug - Fixed an issue where not specifiying a tag would pull all image tags. +* Bug - Resolved an issue where some logs would not flush on exit. +* Bug - Resolved an issue where some instance identity documents would fail to + parse. + + +## 1.1.0 +* Feature - Logs rotate hourly and log file names are suffixed with timestamp. +* Enhancement - Improve error messages for containers (visible as 'reason' in + describe calls). +* Enhancement - Be more permissive in configuration regarding whitespace. +* Enhancement - Docker 1.6 support. +* Bug - Resolve an issue where data-volume containers could result in containers + stuck in PENDING. +* Bug - Fixed an issue where unknown images resulted in containers stuck in + PENDING. +* Bug - Correctly sequence task changes to avoid resource contention. For + example, stopping and starting a container using a host port should work + reliably now. + +## 1.0.0 + +* Feature - Added the ability to update via ACS when running under + amazon-ecs-init. +* Feature - Added version information (available via the version flag or the + introspection API). +* Enhancement - Clarified reporting of task state in introspection API. +* Bug - Fix a lock scoping issue that could cause an invalid checkpoint file + to be written. +* Bug - Correctly recognize various fatal messages from ACS to error out more + cleanly. + +## 0.0.3 (2015-02-19) + +* Feature - Volume support for 'host' and 'empty' volumes. +* Feature - Support for specifying 'VolumesFrom' other containers within a task. +* Feature - Checkpoint state, including ContainerInstance and running tasks, to + disk so that agent restarts do not leave dangling containers. +* Feature - Add a "/tasks" endpoint to the introspection API. +* Feature - Add basic support for DockerAuth. +* Feature - Remove stopped ECS containers after a few hours. +* Feature - Send a "reason" string for some of the errors that might occur while + running a container. +* Bug - Resolve several issues where a container would remain stuck in PENDING. +* Bug - Correctly set 'EntryPoint' for containers when configured. +* Bug - Fix an issue where exit codes would not be sent properly. +* Bug - Fix an issue where containers with multiple ports EXPOSEd, but not + all forwarded, would not start. + +## 0.0.2 (2014-12-17) + +* Bug - Worked around an issue preventing some tasks to start due to devicemapper + issues. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6fa337317eb..4997be3d683 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,8 +1,8 @@ -# Contributing to Amazon ECS Init +# Contributing to the Amazon ECS Agent -Contributions to Amazon ECS Init should be made via GitHub [pull -requests](https://github.com/aws/amazon-ecs-init/pulls) and discussed using -GitHub [issues](https://github.com/aws/amazon-ecs-init/issues). +Contributions to the Amazon ECS Agent should be made via GitHub [pull +requests](https://github.com/aws/amazon-ecs-agent/pulls) and discussed using +GitHub [issues](https://github.com/aws/amazon-ecs-agent/issues). ### Before you start @@ -14,30 +14,18 @@ an issue to discuss it. Development takes place against the `dev` branch of this repository and pull requests should be opened against that branch. -Non-code changes, such as updating the README, may be against master if they are -applicable to master. - ### Testing -Any contributions should pass all tests. - -You may run all test by either running the `make test` target (requires `go` -and `go cover` to be installed) or by running the `make test-in-docker` target -which requires only Docker to be installed. - -### Packaging - -Amazon ECS Init is officially supported when packaged as an RPM for -the Amazon Linux AMI. We welcome other packaging contributions in the -`packaging` folder. +Any contributions should pass all tests, including those not run by our +current CI system. -The RPM packaging includes `Provides` declarations for bundled sources -(vendored golang libraries) that should be maintained and kept up to -date as dependencies are added and removed. +You may run all test by either running the `make test` target (requires `go`, +and `go cover` to be installed), or by running the `make test-in-docker` +target which requires only Docker to be installed. ## Licensing -Amazon ECS Init is released under an [Apache +The Amazon ECS Agent is released under an [Apache 2.0](http://aws.amazon.com/apache-2-0/) license. Any code you submit will be released under that license. diff --git a/GO_VERSION b/GO_VERSION new file mode 100644 index 00000000000..e34208c9371 --- /dev/null +++ b/GO_VERSION @@ -0,0 +1 @@ +1.15.4 diff --git a/GO_VERSION_WINDOWS b/GO_VERSION_WINDOWS new file mode 100644 index 00000000000..35d51f33b34 --- /dev/null +++ b/GO_VERSION_WINDOWS @@ -0,0 +1 @@ +1.12 \ No newline at end of file diff --git a/INIT_CHANGELOG.md b/INIT_CHANGELOG.md new file mode 100644 index 00000000000..5cd617079dc --- /dev/null +++ b/INIT_CHANGELOG.md @@ -0,0 +1,373 @@ +# Changelog + +## 1.51.0-1 +* Cache Agent version 1.51.0 + +## 1.50.3-1 +* Cache Agent version 1.50.3 + +## 1.50.2-1 +* Cache Agent version 1.50.2 + +## 1.50.1-1 +* Cache Agent version 1.50.1 +* Does not restart ECS Agent when it exits with exit code 5 + +## 1.50.0-1 +* Cache Agent version 1.50.0 +* Allows ECS customers to execute interactive commands inside containers. + +## 1.49.0-1 +* Cache Agent version 1.49.0 +* Removes iptable rule that drops packets to port 51678 unconditionally on ecs service stop + +## 1.48.1-1 +* Cache Agent version 1.48.1 + +## 1.48.0-2 +* Cache Agent version 1.48.0 + +## 1.47.0-1 +* Cache Agent version 1.47.0 + +## 1.46.0-1 +* Cache Agent version 1.46.0 + +## 1.45.0-1 +* Cache Agent version 1.45.0 +* Block offhost access to agent's introspection port by default. Configurable via env ECS\_ALLOW\_OFFHOST\_INTROSPECTION\_ACCESS + +## 1.44.4-1 +* Cache Agent version 1.44.4 + +## 1.44.3-1 +* Cache Agent version 1.44.3 + +## 1.44.2-1 +* Cache Agent version 1.44.2 + +## 1.44.1-1 +* Cache Agent version 1.44.1 + +## 1.44.0-1 +* Cache Agent version 1.44.0 +* Add support for configuring Agent container logs + +## 1.43.0-2 +* Cache Agent version 1.43.0 + +## 1.42.0-1 +* Cache Agent version 1.42.0 +* Add a flag ECS\_SKIP\_LOCALHOST\_TRAFFIC\_FILTER to allow skipping local traffic filtering + +## 1.41.1-2 +* Drop traffic to 127.0.0.1 that isn't originated from the host + +## 1.41.1-1 +* Cache Agent version 1.41.1 + +## 1.41.0-1 +* Cache Agent version 1.41.0 + +## 1.40.0-1 +* Cache Agent version 1.40.0 + +## 1.39.0-2 +* Cache Agent version 1.39.0 +* Ignore IPv6 disable failure if already disabled + +## 1.38.0-1 +* Cache Agent version 1.38.0 +* Adds support for ECS volume plugin +* Disable ipv6 router advertisements for optimization + +## 1.37.0-2 +* Cache Agent version 1.37.0 +* Add '/etc/alternatives' to mounts + +## 1.36.2-1 +* Cache Agent version 1.36.2 +* update sbin mount point to avoid conflict with Docker >= 19.03.5 + +## 1.36.1-1 +* Cache Agent version 1.36.1 + +## 1.36.0-1 +* Cache Agent version 1.36.0 +* capture a fixed tail of container logs when removing a container + +## 1.35.0-1 +* Cache Agent version 1.35.0 +* Fix bug where stopping agent gracefully would still restart ecs-init + +## 1.33.0-1 +* Cache Agent version 1.33.0 +* Fix destination path in docker socket bind mount to match the one specified using DOCKER\_HOST on Amazon Linux 2 + +## 1.32.1-1 +* Cache Agent version 1.32.1 +* Add the ability to set Agent container's labels + +## 1.32.0-1 +* Cache Agent version 1.32.0 + +## 1.31.0-1 +* Cache Agent version 1.31.0 + +## 1.30.0-1 +* Cache Agent version 1.30.0 + +## 1.29.1-1 +* Cache Agent version 1.29.1 + +## 1.29.0-1 +* Cache Agent version 1.29.0 + +## 1.28.1-2 +* Cache Agent version 1.28.1 +* Use exponential backoff when restarting agent + +## 1.28.0-1 +* Cache Agent version 1.28.0 + +## 1.27.0-1 +* Cache Agent version 1.27.0 + +## 1.26.1-1 +* Cache Agent version 1.26.1 + +## 1.26.0-1 +* Cache Agent version 1.26.0 +* Add support for running iptables within agent container + +## 1.25.3-1 +* Cache Agent version 1.25.3 + +## 1.25.2-1 +* Cache Agent version 1.25.2 + +## 1.25.1-1 +* Cache Agent version 1.25.1 +* Update ecr models for private link support + +## 1.25.0-1 +* Cache Agent version 1.25.0 +* Add Nvidia GPU support for p2 and p3 instances + +## 1.24.0-1 +* Cache Agent version 1.24.0 + +## 1.22.0-4 +* Cache ECS agent version 1.22.0 for x86\_64 & ARM +* Support ARM architecture builds + +## 1.22.0-3 +* Rebuild + +## 1.22.0-2 +* Cache Agent version 1.22.0 + +## 1.21.0-1 +* Cache Agent version 1.21.0 +* Support configurable logconfig for Agent container to reduce disk usage +* ECS Agent will use the host's cert store on the Amazon Linux platform + +## 1.20.3-1 +* Cache Agent version 1.20.3 + +## 1.20.2-1 +* Cache Agent version 1.20.2 + +## 1.20.1-1 +* Cache Agent version 1.20.1 + +## 1.20.0-1 +* Cache Agent version 1.20.0 + +## 1.19.1-1 +* Cache Agent version 1.19.1 + +## 1.19.0-1 +* Cache Agent version 1.19.0 + +## 1.18.0-2 +* Spec file cleanups +* Enable builds for both AL1 and AL2 + +## 1.18.0-1 +* Cache Agent version 1.18.0 +* Add support for regional buckets +* Bundle ECS Agent tarball in package +* Download agent based on the partition +* Mount Docker plugin files dir + +## 1.17.3-1 +* Cache Agent version 1.17.3 +* Use s3client instead of httpclient when downloading + +## 1.17.2-1 +* Cache Agent version 1.17.2 + +## 1.17.1-1 +* Cache Agent version 1.17.1 + +## 1.17.0-2 +* Cache Agent version 1.17.0 + +## 1.16.2-1 +* Cache Agent version 1.16.2 +* Add GovCloud endpoint + +## 1.16.1-1 +* Cache Agent version 1.16.1 +* Improve startup behavior when Docker socket doesn't exist yet + +## 1.16.0-1 +* Cache Agent version 1.16.0 + +## 1.15.2-1 +* Cache Agent version 1.15.2 + +## 1.15.1-1 +* Update ECS Agent version + +## 1.15.0-1 +* Update ECS Agent version + +## 1.14.5-1 +* Update ECS Agent version + +## 1.14.4-1 +* Update ECS Agent version + +## 1.14.2-2 +* Cache Agent version 1.14.2 +* Add functionality for running agent with userns=host when Docker has userns-remap enabled +* Add support for Docker 17.03.1ce + +## 1.14.1-1 +* Cache Agent version 1.14.1 + +## 1.14.0-2 +* Add retry-backoff for pinging the Docker socket when creating the Docker client + +## 1.14.0-1 +* Cache Agent version 1.14.0 + +## 1.13.1-2 +* Update Requires to indicate support for docker <= 1.12.6 + +## 1.13.1-1 +* Cache Agent version 1.13.1 + +## 1.13.0-1 +* Cache Agent version 1.13.0 + +## 1.12.2-1 +* Cache Agent version 1.12.2 + +## 1.12.1-1 +* Cache Agent version 1.12.1 + +## 1.12.0-1 +* Cache Agent version 1.12.0 +* Add netfilter rules to support host network reaching credentials proxy + +## 1.11.1-1 +* Cache Agent version 1.11.1 + +## 1.11.0-1 +* Cache Agent version 1.11.0 +* Add support for Docker 1.11.2 +* Modify iptables and netfilter to support credentials proxy +* Eliminate requirement that /tmp and /var/cache be on the same filesystem +* Start agent with host network mode + +## 1.10.0-1 +* Cache Agent version 1.10.0 +* Add support for Docker 1.11.1 + +## 1.9.0-1 +* Cache Agent version 1.9.0 +* Make awslogs driver available by default + +## 1.8.2-1 +* Cache Agent version 1.8.2 + +## 1.8.1-1 +* Cache Agent version 1.8.1 + +## 1.8.0-1 +* Cache Agent version 1.8.0 + +## 1.7.1-1 +* Cache Agent version 1.7.1 + +## 1.7.0-1 +* Cache Agent version 1.7.0 +* Add support for Docker 1.9.1 + +## 1.6.0-1 +* Cache Agent version 1.6.0 +* Updated source dependencies + +## 1.5.0-1 +* Cache Agent version 1.5.0 +* Improved merge strategy for user-supplied environment variables +* Add default supported logging drivers + +## 1.4.0-2 +* Add support for Docker 1.7.1 + +## 1.4.0-1 +* Cache Agent version 1.4.0 + +## 1.3.1-1 +* Cache Agent version 1.3.1 +* Read Docker endpoint from environment variable DOCKER\_HOST if present + +## 1.3.0-1 +* Cache Agent version 1.3.0 + +## 1.2.1-2 +* Cache Agent version 1.2.1 + +## 1.2.0-1 +* Update versioning scheme to match Agent version +* Cache Agent version 1.2.0 +* Mount cgroup and execdriver directories for Telemetry feature + +## 1.0-5 +* Add support for Docker 1.6.2 + +## 1.0-4 +* Properly restart if the ecs-init package is upgraded in isolation + +## 1.0-3 +* Restart on upgrade if already running + +## 1.0-2 +* Cache Agent version 1.1.0 +* Add support for Docker 1.6.0 +* Force cache load on install/upgrade +* Add man page + +## 1.0-1 +* Re-start Agent on non-terminal exit codes +* Enable Agent self-updates +* Cache Agent version 1.0.0 +* Added rollback to cached Agent version for failed updates + +## 0.3-0 +* Migrate to statically-compiled Go binary + +## 0.2-3 +* Test for existing container agent and force remove it + +## 0.2-2 +* Mount data directory for state persistence +* Enable JSON-based configuration + +## 0.2-1 +* Naive update functionality + diff --git a/INIT_README.md b/INIT_README.md new file mode 100644 index 00000000000..3f951ef63c7 --- /dev/null +++ b/INIT_README.md @@ -0,0 +1,62 @@ +# Amazon Elastic Container Service RPM + +[![Build Status](https://travis-ci.org/aws/amazon-ecs-init.svg?branch=master)](https://travis-ci.org/aws/amazon-ecs-init) + +The Amazon Elastic Container Service RPM is software developed to support the [Amazon ECS Container +Agent](http://github.com/aws/amazon-ecs-agent). The Amazon ECS RPM is packaged for RPM-based systems that utilize +[Upstart](http://upstart.ubuntu.com) as the init system. + +## Behavior +The upstart script installed by the Amazon ECS RPM runs at the completion of runlevel 3, 4, or 5 as the system starts. +The script will clean up any previous copies of the Amazon ECS Container Agent, and then start a new copy. Logs from +the RPM are available at `/var/log/ecs/ecs-init.log`, while logs from the Amazon ECS Container Agent are available at +`/var/log/ecs/ecs-agent.log`. The Amazon ECS RPM makes the Amazon ECS Container Agent introspection endpoint available +at `http://127.0.0.1:51678/v1`. Configuration for the Amazon ECS Container Agent is read from `/etc/ecs/ecs.config`. +All of the configurations in this file are used as environment variables of the ECS Agent container. Additionally, some +configurations can be used to configure other properties of the ECS Agent container, as described below. + +| Configuration Key | Example Value(s) | Description | Default value | +|:----------------|:----------------------------|:------------|:-----------------------| +| `ECS_AGENT_LABELS` | `{"test.label.1":"value1","test.label.2":"value2"}` | The labels to add to the ECS Agent container. | | + +Additionally, the following environment variable(s) can be used to configure the behavior of the RPM: + +| Environment Variable Name | Example Value(s) | Description | Default value | +|:----------------|:----------------------------|:------------|:-----------------------| +| `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER` | <true | false> | By default, the ecs-init service adds an iptable rule to drop non-local packets to localhost if they're not part of an existing forwarded connection or DNAT, and removes the rule upon stop. If `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER` is set to true, this rule will not be added/removed. | false | +| `ECS_ALLOW_OFFHOST_INTROSPECTION_ACCESS` | <true | false> | By default, the ecs-init service adds an iptable rule to block access to ECS Agent's introspection port from off-host (or containers in awsvpc network mode), and removes the rule upon stop. If `ECS_ALLOW_OFFHOST_INTROSPECTION_ACCESS` is set to true, this rule will not be added/removed. | false | + +The above environment variable(s) can be used in the following way +- On Amazon Linux 1, the flag `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER` can be turned on by adding `env ECS_SKIP_LOCALHOST_TRAFFIC_FILTER=true` to /etc/init/ecs.conf. +- On Amazon Linux 2, the flag `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER` can be turned on by adding `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER=true` to /etc/ecs/ecs.config. + +## Usage +The upstart script installed by the Amazon Elastic Container Service RPM can be started or stopped with the following commands respectively: + +* `sudo start ecs` +* `sudo stop ecs` + +### Updates +Updates to the Amazon ECS Container Agent should be performed through the Amazon ECS Container Agent. In the case where +an update failed and the Amazon ECS Container Agent is no longer functional, a rollback can be initiated as follows: + +1. `sudo stop ecs` +2. `sudo /usr/libexec/amazon-ecs-init reload-cache` +3. `sudo start ecs` + +## Security disclosures +If you think you’ve found a potential security issue, please do not post it in the Issues. Instead, please follow the instructions [here](https://aws.amazon.com/security/vulnerability-reporting/) or [email AWS security directly](mailto:aws-security@amazon.com). + +## Development + +#### Dev dependencies + +Run `make get-deps` to get dependencies for running tests and generating mocks. + +#### Generating mocks + +Mocks can be generated using the `make generate` Makefile target. **NOTE** that this must be run on a linux machine. + +## License + +The Amazon Elastic Container Service RPM is licensed under the Apache 2.0 License. diff --git a/Makefile b/Makefile index 08fe1eddc27..96ec6c18f9e 100644 --- a/Makefile +++ b/Makefile @@ -1,87 +1,368 @@ -# Copyright 2014-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the -# "License"). You may not use this file except in compliance -# with the License. A copy of the License is located at +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at # -# http://aws.amazon.com/apache2.0/ +# http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and -# limitations under the License. -VERSION := $(shell git describe --tags | sed -e 's/v//' -e 's/-.*//') -DEB_SIGN ?= 1 +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. -.PHONY: dev generate lint static test build-mock-images sources rpm srpm govet +USERID=$(shell id -u) -dev: - ./scripts/gobuild.sh dev +.PHONY: all gobuild static xplatform-build docker release certs test clean netkitten test-registry benchmark-test gogenerate run-integ-tests pause-container get-cni-sources cni-plugins test-artifacts +BUILD_PLATFORM:=$(shell uname -m) -generate: - PATH=$(PATH):$(shell pwd)/scripts go generate -v ./... +ifeq (${BUILD_PLATFORM},aarch64) + GOARCH=arm64 +else + GOARCH=amd64 +endif -PLATFORM:=$(shell uname -s) -ifeq (${PLATFORM},Linux) - dep_arch=linux-386 - else ifeq (${PLATFORM},Darwin) - dep_arch=darwin-386 - endif +ifeq (${TARGET_OS},windows) + GO_VERSION=$(shell cat ./GO_VERSION_WINDOWS) +else + GO_VERSION=$(shell cat ./GO_VERSION) +endif -DEP_VERSION=v0.5.0 -.PHONY: get-dep -get-dep: bin/dep +all: docker + +# Dynamic go build; useful in that it does not have -a so it won't recompile +# everything every time +gobuild: + ./scripts/build false -bin/dep: - mkdir -p ./bin - curl -L https://github.com/golang/dep/releases/download/$(DEP_VERSION)/dep-${dep_arch} -o ./bin/dep - chmod +x ./bin/dep +# create output directories +.out-stamp: + mkdir -p ./out/test-artifacts ./out/cni-plugins ./out/amazon-ecs-cni-plugins ./out/amazon-vpc-cni-plugins + touch .out-stamp + +# Basic go build static: + ./scripts/build + +static-init: ./scripts/gobuild.sh -govet: - go vet ./... +static-with-pause: + ./scripts/build true "" false true + +# Cross-platform build target for static checks +xplatform-build: + GOOS=linux GOARCH=arm64 ./scripts/build true "" false + GOOS=windows GOARCH=amd64 ./scripts/build true "" false + GOOS=darwin GOARCH=amd64 ./scripts/build true "" false + +BUILDER_IMAGE="amazon/amazon-ecs-agent-build:make" +.builder-image-stamp: scripts/dockerfiles/Dockerfile.build + @docker build --build-arg GO_VERSION=$(GO_VERSION) -f scripts/dockerfiles/Dockerfile.build -t $(BUILDER_IMAGE) . + touch .builder-image-stamp + +# 'build-in-docker' builds the agent within a dockerfile and saves it to the ./out +# directory +# TODO: make this idempotent +build-in-docker: .builder-image-stamp .out-stamp + @docker run --net=none \ + --env TARGET_OS="${TARGET_OS}" \ + --env LDFLAGS="-X github.com/aws/amazon-ecs-agent/agent/config.DefaultPauseContainerTag=$(PAUSE_CONTAINER_TAG) \ + -X github.com/aws/amazon-ecs-agent/agent/config.DefaultPauseContainerImageName=$(PAUSE_CONTAINER_IMAGE)" \ + --volume "$(PWD)/out:/out" \ + --volume "$(PWD):/go/src/github.com/aws/amazon-ecs-agent" \ + --user "$(USERID)" \ + --rm \ + $(BUILDER_IMAGE) + +# 'docker' builds the agent dockerfile from the current sourcecode tree, dirty +# or not +docker: certs build-in-docker pause-container-release cni-plugins .out-stamp + @cd scripts && ./create-amazon-ecs-scratch + @docker build -f scripts/dockerfiles/Dockerfile.release -t "amazon/amazon-ecs-agent:make" . + @echo "Built Docker image \"amazon/amazon-ecs-agent:make\"" + +ifeq (${TARGET_OS},windows) + BUILD="cleanbuild-${TARGET_OS}" +else + BUILD=cleanbuild +endif + +# 'docker-release' builds the agent from a clean snapshot of the git repo in +# 'RELEASE' mode +# TODO: make this idempotent +docker-release: pause-container-release cni-plugins .out-stamp + @docker build --build-arg GO_VERSION=${GO_VERSION} -f scripts/dockerfiles/Dockerfile.cleanbuild -t "amazon/amazon-ecs-agent-${BUILD}:make" . + @docker run --net=none \ + --env TARGET_OS="${TARGET_OS}" \ + --env LDFLAGS="-X github.com/aws/amazon-ecs-agent/agent/config.DefaultPauseContainerTag=$(PAUSE_CONTAINER_TAG) \ + -X github.com/aws/amazon-ecs-agent/agent/config.DefaultPauseContainerImageName=$(PAUSE_CONTAINER_IMAGE)" \ + --user "$(USERID)" \ + --volume "$(PWD)/out:/out" \ + --volume "$(PWD):/src/amazon-ecs-agent" \ + --rm \ + "amazon/amazon-ecs-agent-${BUILD}:make" + +# Release packages our agent into a "scratch" based dockerfile +release: certs docker-release + @./scripts/create-amazon-ecs-scratch + @docker build -f scripts/dockerfiles/Dockerfile.release -t "amazon/amazon-ecs-agent:latest" . + @echo "Built Docker image \"amazon/amazon-ecs-agent:latest\"" + +# We need to bundle certificates with our scratch-based container +certs: misc/certs/ca-certificates.crt +misc/certs/ca-certificates.crt: + docker build -t "amazon/amazon-ecs-agent-cert-source:make" misc/certs/ + docker run "amazon/amazon-ecs-agent-cert-source:make" cat /etc/ssl/certs/ca-certificates.crt > misc/certs/ca-certificates.crt + +gogenerate: + go generate -x ./agent/... + $(MAKE) goimports + +gogenerate-init: + PATH=$(PATH):$(shell pwd)/scripts go generate -x ./ecs-init/... + $(MAKE) goimports + +# 'go' may not be on the $PATH for sudo tests +GO_EXECUTABLE=$(shell command -v go 2> /dev/null) + +# VERBOSE includes the options that make the test opt loud +VERBOSE=-v -cover + +# -count=1 runs the test without using the build cache. The build cache can +# provide false positives when running integ tests, so we err on the side of +# caution. See `go help test` +# unit tests include the coverage profile +GOTEST=${GO_EXECUTABLE} test -count=1 ${VERBOSE} + +# -race sometimes causes compile issues on Arm +ifneq (${BUILD_PLATFORM},aarch64) + GOTEST += -race +endif test: - go test -count=1 -short -v -coverprofile cover.out ./... + ${GOTEST} -tags unit -coverprofile cover.out -timeout=60s ./agent/... + go tool cover -func cover.out > coverprofile.out + +test-init: + go test -count=1 -short -v -coverprofile cover.out ./ecs-init/... + go tool cover -func cover.out > coverprofile-init.out + +test-silent: + $(eval VERBOSE=) + ${GOTEST} -tags unit -coverprofile cover.out -timeout=60s ./agent/... go tool cover -func cover.out > coverprofile.out .PHONY: analyze-cover-profile analyze-cover-profile: coverprofile.out ./scripts/analyze-cover-profile -# all .go files in the ecs-init -GOFILES:=$(shell go list -f '{{$$p := .}}{{range $$f := .GoFiles}}{{$$p.Dir}}/{{$$f}} {{end}}' ./ecs-init/...) +.PHONY: analyze-cover-profile-init +analyze-cover-profile-init: coverprofile-init.out + ./scripts/analyze-cover-profile-init + +run-integ-tests: test-registry gremlin container-health-check-image run-sudo-tests + ECS_LOGLEVEL=debug ${GOTEST} -tags integration -timeout=30m ./agent/... + +run-sudo-tests: + sudo -E ${GOTEST} -tags sudo -timeout=10m ./agent/... + +benchmark-test: + go test -run=XX -bench=. ./agent/... + + +.PHONY: build-image-for-ecr upload-images replicate-images + +build-image-for-ecr: netkitten volumes-test image-cleanup-test-images fluentd exec-command-agent-test + +upload-images: build-image-for-ecr + @./scripts/upload-images $(STANDARD_REGION) $(STANDARD_REPOSITORY) + +replicate-images: build-image-for-ecr + @./scripts/upload-images $(REPLICATE_REGION) $(REPLICATE_REPOSITORY) + +PAUSE_CONTAINER_IMAGE = "amazon/amazon-ecs-pause" +PAUSE_CONTAINER_TAG = "0.1.0" +PAUSE_CONTAINER_TARBALL = "amazon-ecs-pause.tar" + +pause-container: .out-stamp + $(MAKE) -C misc/pause-container $(MFLAGS) + +pause-container-release: pause-container + @docker save ${PAUSE_CONTAINER_IMAGE}:${PAUSE_CONTAINER_TAG} > "$(PWD)/out/${PAUSE_CONTAINER_TARBALL}" + +# Variable to determine branch/tag of amazon-ecs-cni-plugins +ECS_CNI_REPOSITORY_REVISION=master + +# Variable to override cni repository location +ECS_CNI_REPOSITORY_SRC_DIR=$(PWD)/amazon-ecs-cni-plugins +VPC_CNI_REPOSITORY_SRC_DIR=$(PWD)/amazon-vpc-cni-plugins + +get-cni-sources: + git submodule update --init --recursive + +build-ecs-cni-plugins: + @docker build --build-arg GO_VERSION=$(GO_VERSION) -f scripts/dockerfiles/Dockerfile.buildECSCNIPlugins -t "amazon/amazon-ecs-build-ecs-cni-plugins:make" . + docker run --rm --net=none \ + -e GIT_SHORT_HASH=$(shell cd $(ECS_CNI_REPOSITORY_SRC_DIR) && git rev-parse --short=8 HEAD) \ + -e GIT_PORCELAIN=$(shell cd $(ECS_CNI_REPOSITORY_SRC_DIR) && git status --porcelain 2> /dev/null | wc -l | sed 's/^ *//') \ + -u "$(USERID)" \ + -v "$(PWD)/out/amazon-ecs-cni-plugins:/go/src/github.com/aws/amazon-ecs-cni-plugins/bin/plugins" \ + -v "$(ECS_CNI_REPOSITORY_SRC_DIR):/go/src/github.com/aws/amazon-ecs-cni-plugins" \ + "amazon/amazon-ecs-build-ecs-cni-plugins:make" + @echo "Built amazon-ecs-cni-plugins successfully." + +build-vpc-cni-plugins: + @docker build --build-arg GOARCH=$(GOARCH) --build-arg GO_VERSION=$(GO_VERSION) -f scripts/dockerfiles/Dockerfile.buildVPCCNIPlugins -t "amazon/amazon-ecs-build-vpc-cni-plugins:make" . + docker run --rm --net=none \ + -e GIT_SHORT_HASH=$(shell cd $(VPC_CNI_REPOSITORY_SRC_DIR) && git rev-parse --short=8 HEAD) \ + -u "$(USERID)" \ + -v "$(PWD)/out/amazon-vpc-cni-plugins:/go/src/github.com/aws/amazon-vpc-cni-plugins/build/linux_$(GOARCH)" \ + -v "$(VPC_CNI_REPOSITORY_SRC_DIR):/go/src/github.com/aws/amazon-vpc-cni-plugins" \ + "amazon/amazon-ecs-build-vpc-cni-plugins:make" + @echo "Built amazon-vpc-cni-plugins successfully." + +cni-plugins: get-cni-sources .out-stamp build-ecs-cni-plugins build-vpc-cni-plugins + mv $(PWD)/out/amazon-ecs-cni-plugins/* $(PWD)/out/cni-plugins + mv $(PWD)/out/amazon-vpc-cni-plugins/* $(PWD)/out/cni-plugins + @echo "Built all cni plugins successfully." + + +.PHONY: codebuild +codebuild: .out-stamp + $(MAKE) release TARGET_OS="linux" + TARGET_OS="linux" ./scripts/local-save + $(MAKE) docker-release TARGET_OS="windows" + TARGET_OS="windows" ./scripts/local-save + +netkitten: + $(MAKE) -C misc/netkitten $(MFLAGS) + +volumes-test: + $(MAKE) -C misc/volumes-test $(MFLAGS) + +# Run our 'test' registry needed for integ tests +test-registry: netkitten volumes-test pause-container image-cleanup-test-images fluentd exec-command-agent-test + +exec-command-agent-test: + $(MAKE) -C misc/exec-command-agent-test $(MFLAGS) + + @./scripts/setup-test-registry + +.PHONY: fluentd gremlin image-cleanup-test-images + +gremlin: + $(MAKE) -C misc/gremlin $(MFLAGS) + +fluentd: + $(MAKE) -C misc/fluentd $(MFLAGS) + +image-cleanup-test-images: + $(MAKE) -C misc/image-cleanup-test-images $(MFLAGS) + +container-health-check-image: + $(MAKE) -C misc/container-health $(MFLAGS) + + +# all .go files in the agent, excluding vendor/, model/ and testutils/ directories, and all *_test.go and *_mocks.go files +GOFILES:=$(shell go list -f '{{$$p := .}}{{range $$f := .GoFiles}}{{$$p.Dir}}/{{$$f}} {{end}}' ./agent/... \ + | grep -v /testutils/ | grep -v _test\.go$ | grep -v _mocks\.go$ | grep -v /model) .PHONY: gocyclo gocyclo: # Run gocyclo over all .go files - gocyclo -over 12 ${GOFILES} + gocyclo -over 17 ${GOFILES} + +# same as gofiles above, but without the `-f` +.PHONY: govet +govet: + go vet $(shell go list ./agent/... | grep -v /testutils/ | grep -v _test\.go$ | grep -v /mocks | grep -v /model) -GOFMTFILES:=$(shell find ./ecs-init -not -path './ecs-init/vendor/*' -type f -iregex '.*\.go') +GOFMTFILES:=$(shell find ./agent -not -path './agent/vendor/*' -type f -iregex '.*\.go') .PHONY: importcheck importcheck: $(eval DIFFS:=$(shell goimports -l $(GOFMTFILES))) @if [ -n "$(DIFFS)" ]; then echo "Files incorrectly formatted. Fix formatting by running goimports:"; echo "$(DIFFS)"; exit 1; fi +.PHONY: gogenerate-check +gogenerate-check: gogenerate + # check that gogenerate does not generate a diff. + git diff --exit-code + .PHONY: static-check -static-check: gocyclo govet importcheck +static-check: gocyclo govet importcheck gogenerate-check + # use default checks of staticcheck tool, except style checks (-ST*) and depracation checks (-SA1019) + # depracation checks have been left out for now; removing their warnings requires error handling for newer suggested APIs, changes in function signatures and their usages. + # https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck + staticcheck -tests=false -checks "inherit,-ST*,-SA1019,-SA9002,-SA4006" ./agent/... + +# all .go files in the ecs-init +GOFILES_INIT:=$(shell go list -f '{{$$p := .}}{{range $$f := .GoFiles}}{{$$p.Dir}}/{{$$f}} {{end}}' ./ecs-init/...) +.PHONY: gocyclo-init +gocyclo-init: + # Run gocyclo over all .go files + gocyclo -over 12 ${GOFILES_INIT} + +.PHONY: govet-init +govet-init: + go vet ./ecs-init/... + +GOFMTFILES_INIT:=$(shell find ./ecs-init -not -path './ecs-init/vendor/*' -type f -iregex '.*\.go') + +.PHONY: importcheck-init +importcheck-init: + $(eval DIFFS:=$(shell goimports -l $(GOFMTFILES_INIT))) + @if [ -n "$(DIFFS)" ]; then echo "Files incorrectly formatted. Fix formatting by running goimports:"; echo "$(DIFFS)"; exit 1; fi + +.PHONY: static-check-init +static-check-init: gocyclo-init govet-init importcheck-init # use default checks of staticcheck tool, except style checks (-ST*) # https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck staticcheck -tests=false -checks "inherit,-ST*" ./ecs-init/... -test-in-docker: - docker build -f scripts/dockerfiles/test.dockerfile -t "amazon/amazon-ecs-init-test:make" . - docker run -v "$(shell pwd):/go/src/github.com/aws/amazon-ecs-init" "amazon/amazon-ecs-init-test:make" +.PHONY: goimports +goimports: + goimports -w $(GOFMTFILES) -build-mock-images: - docker build -t "test.localhost/amazon/mock-ecs-agent" -f "scripts/dockerfiles/mock-agent.dockerfile" . - docker build -t "test.localhost/amazon/wants-update" -f "scripts/dockerfiles/wants-update.dockerfile" . - docker build -t "test.localhost/amazon/exit-success" -f "scripts/dockerfiles/exit-success.dockerfile" . +GOPATH=$(shell go env GOPATH) +.get-deps-stamp: + go get golang.org/x/tools/cmd/cover + go get github.com/golang/mock/mockgen + cd "${GOPATH}/src/github.com/golang/mock/mockgen" && git checkout 1.3.1 && go get ./... && go install ./... && cd - + go get golang.org/x/tools/cmd/goimports + GO111MODULE=on go get github.com/fzipp/gocyclo/cmd/gocyclo@v0.3.1 + go get honnef.co/go/tools/cmd/staticcheck + touch .get-deps-stamp + +get-deps: .get-deps-stamp + +get-deps-init: + go get golang.org/x/tools/cover + go get golang.org/x/tools/cmd/cover + GO111MODULE=on go get github.com/fzipp/gocyclo/cmd/gocyclo@v0.3.1 + go get golang.org/x/tools/cmd/goimports + go get github.com/golang/mock/mockgen + go get honnef.co/go/tools/cmd/staticcheck + +PLATFORM:=$(shell uname -s) +ifeq (${PLATFORM},Linux) + dep_arch=linux-386 + else ifeq (${PLATFORM},Darwin) + dep_arch=darwin-386 + endif +DEP_VERSION=v0.5.0 +.PHONY: get-dep +get-dep: bin/dep + +bin/dep: + mkdir -p ./bin + curl -L https://github.com/golang/dep/releases/download/$(DEP_VERSION)/dep-${dep_arch} -o ./bin/dep + chmod +x ./bin/dep + +# init targets sources.tgz: ./scripts/update-version.sh cp packaging/amazon-linux-ami/ecs-init.spec ecs-init.spec @@ -113,26 +394,70 @@ srpm: .srpm-done rpm: .rpm-done -ubuntu-trusty: - cp packaging/ubuntu-trusty/ecs.conf ecs.conf - tar -czf ./amazon-ecs-init_${VERSION}.orig.tar.gz ecs-init ecs.conf scripts README.md - mkdir -p BUILDROOT - cp -r packaging/ubuntu-trusty/debian BUILDROOT/debian - cp -r ecs-init BUILDROOT - cp packaging/ubuntu-trusty/ecs.conf BUILDROOT - cp -r scripts BUILDROOT - cp README.md BUILDROOT - cd BUILDROOT && debuild $(shell [ "$(DEB_SIGN)" -ne "0" ] || echo "-uc -us") - -get-deps: - go get golang.org/x/tools/cover - go get golang.org/x/tools/cmd/cover - go get github.com/fzipp/gocyclo/cmd/gocyclo - go get golang.org/x/tools/cmd/goimports - go get github.com/golang/mock/mockgen - go get honnef.co/go/tools/cmd/staticcheck +sources-with-agent.tgz: get-cni-sources + ./scripts/update-version.sh + cp packaging/amazon-linux-ami-integrated/ecs-init.spec ecs-init.spec + cp packaging/amazon-linux-ami-integrated/ecs.conf ecs.conf + cp packaging/amazon-linux-ami-integrated/ecs.service ecs.service + cp packaging/amazon-linux-ami-integrated/amazon-ecs-volume-plugin.conf amazon-ecs-volume-plugin.conf + cp packaging/amazon-linux-ami-integrated/amazon-ecs-volume-plugin.service amazon-ecs-volume-plugin.service + cp packaging/amazon-linux-ami-integrated/amazon-ecs-volume-plugin.socket amazon-ecs-volume-plugin.socket + tar -czf ./sources-with-agent.tgz ecs-init scripts agent amazon-ecs-cni-plugins amazon-vpc-cni-plugins misc agent-container VERSION + +.rpm-with-agent-done: sources-with-agent.tgz + test -e SOURCES || ln -s . SOURCES + rpmbuild --define "%_topdir $(PWD)" -bb ecs-init.spec + find RPMS/ -type f -exec cp {} . \; + touch .rpm-with-agent-done + +rpm-with-agent: .rpm-with-agent-done + +test-in-docker-init: + docker build -f scripts/dockerfiles/test-init.dockerfile -t "amazon/amazon-ecs-init-test:make" . + docker run -v "$(shell pwd):/go/src/github.com/aws/amazon-ecs-agent" "amazon/amazon-ecs-init-test:make" + +build-mock-images-init: + docker build -t "test.localhost/amazon/mock-ecs-agent" -f "scripts/dockerfiles/mock-agent.dockerfile" . + docker build -t "test.localhost/amazon/wants-update" -f "scripts/dockerfiles/wants-update.dockerfile" . + docker build -t "test.localhost/amazon/exit-success" -f "scripts/dockerfiles/exit-success.dockerfile" . + +# Dockerfree targets +# TODO: arm +dockerfree-pause: + GOOS=linux GOARCH=amd64 ./scripts/build-pause + +dockerfree-certs: + GOOS=linux GOARCH=amd64 ./scripts/get-host-certs + +dockerfree-cni-plugins: get-cni-sources + GOOS=linux GOARCH=amd64 ./scripts/build-cni-plugins + +dockerfree-agent-image: dockerfree-pause dockerfree-certs dockerfree-cni-plugins static-with-pause + GOOS=linux GOARCH=amd64 ./scripts/build-agent-image + +dockerfree-all: dockerfree-agent-image rpm clean: + # ensure docker is running and we can talk to it, abort if not: + docker ps > /dev/null + -docker rmi $(BUILDER_IMAGE) "amazon/amazon-ecs-agent-cleanbuild:make" + -docker rmi $(BUILDER_IMAGE) "amazon/amazon-ecs-agent-cleanbuild-windows:make" + rm -f misc/certs/ca-certificates.crt &> /dev/null + rm -rf out/ + -$(MAKE) -C $(ECS_CNI_REPOSITORY_SRC_DIR) clean + -$(MAKE) -C misc/netkitten $(MFLAGS) clean + -$(MAKE) -C misc/volumes-test $(MFLAGS) clean + -$(MAKE) -C misc/exec-command-agent-test $(MFLAGS) clean + -$(MAKE) -C misc/gremlin $(MFLAGS) clean + -$(MAKE) -C misc/image-cleanup-test-images $(MFLAGS) clean + -$(MAKE) -C misc/container-health $(MFLAGS) clean + -rm -f .get-deps-stamp + -rm -f .builder-image-stamp + -rm -f .out-stamp + -rm -rf $(PWD)/bin + -rm -rf cover.out + -rm -rf coverprofile.out + # ecs init cleanup -rm -f ecs-init.spec -rm -f ecs.conf -rm -f ecs.service @@ -140,7 +465,7 @@ clean: -rm -f amazon-ecs-volume-plugin.service -rm -f amazon-ecs-volume-plugin.socket -rm -rf ./bin - -rm -f ./sources.tgz + -rm -f ./sources.tgz sources-with-agent.tgz -rm -f ./amazon-ecs-init -rm -f ./ecs-agent-*.tar -rm -f ./ecs-init-*.src.rpm @@ -148,6 +473,11 @@ clean: -rm -rf ./BUILDROOT BUILD RPMS SRPMS SOURCES SPECS -rm -rf ./x86_64 -rm -f ./amazon-ecs-init_${VERSION}* - -rm -f .srpm-done .rpm-done - -rm -f cover.out - -rm -f coverprofile.out + -rm -f .srpm-done .rpm-done .rpm-with-agent-done + -rm -rf coverprofile-init.out + -rm -f misc/certs/host-certs.crt &> /dev/null + -rm -rf misc/pause-container/image/ + -rm -rf misc/pause-container/rootfs/ + -rm -rf misc/plugins/ + -rm -f misc/pause-container/amazon-ecs-pause.tar + -rm -rf rootfs/ diff --git a/NOTICE b/NOTICE index cb8f830cb5c..edc8905a51a 100644 --- a/NOTICE +++ b/NOTICE @@ -1,2 +1,34 @@ -Amazon Elastic Container Service RPM -Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Amazon Elastic Container Service Agent +Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + + +********************** +THIRD PARTY COMPONENTS +********************** +This software includes third party software subject to the following copyrights: + +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +This product contains software (https://github.com/fsouza/go-dockerclient) developed +by Francisco Souza under the Apache License 2.0 (see: inactivity_timeout_handler.go) + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. + +Kubernetes, Copyright 2016 The Kubernetes Authors. +kubernetes/build/pause Copyright 2016 The Kubernetes Authors. diff --git a/README.md b/README.md index 3f951ef63c7..cfee0ef050a 100644 --- a/README.md +++ b/README.md @@ -1,62 +1,279 @@ -# Amazon Elastic Container Service RPM +# Amazon ECS Container Agent -[![Build Status](https://travis-ci.org/aws/amazon-ecs-init.svg?branch=master)](https://travis-ci.org/aws/amazon-ecs-init) +![Amazon ECS logo](doc/ecs.png "Amazon ECS") -The Amazon Elastic Container Service RPM is software developed to support the [Amazon ECS Container -Agent](http://github.com/aws/amazon-ecs-agent). The Amazon ECS RPM is packaged for RPM-based systems that utilize -[Upstart](http://upstart.ubuntu.com) as the init system. +![Build Status](https://github.com/aws/amazon-ecs-agent/workflows/Build/badge.svg?branch=dev) -## Behavior -The upstart script installed by the Amazon ECS RPM runs at the completion of runlevel 3, 4, or 5 as the system starts. -The script will clean up any previous copies of the Amazon ECS Container Agent, and then start a new copy. Logs from -the RPM are available at `/var/log/ecs/ecs-init.log`, while logs from the Amazon ECS Container Agent are available at -`/var/log/ecs/ecs-agent.log`. The Amazon ECS RPM makes the Amazon ECS Container Agent introspection endpoint available -at `http://127.0.0.1:51678/v1`. Configuration for the Amazon ECS Container Agent is read from `/etc/ecs/ecs.config`. -All of the configurations in this file are used as environment variables of the ECS Agent container. Additionally, some -configurations can be used to configure other properties of the ECS Agent container, as described below. +The Amazon ECS Container Agent is a component of Amazon Elastic Container Service +([Amazon ECS](http://aws.amazon.com/ecs/)) and is responsible for managing containers on behalf of Amazon ECS. -| Configuration Key | Example Value(s) | Description | Default value | -|:----------------|:----------------------------|:------------|:-----------------------| -| `ECS_AGENT_LABELS` | `{"test.label.1":"value1","test.label.2":"value2"}` | The labels to add to the ECS Agent container. | | +## Usage -Additionally, the following environment variable(s) can be used to configure the behavior of the RPM: +The best source of information on running this software is the +[Amazon ECS documentation](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_agent.html). -| Environment Variable Name | Example Value(s) | Description | Default value | -|:----------------|:----------------------------|:------------|:-----------------------| -| `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER` | <true | false> | By default, the ecs-init service adds an iptable rule to drop non-local packets to localhost if they're not part of an existing forwarded connection or DNAT, and removes the rule upon stop. If `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER` is set to true, this rule will not be added/removed. | false | -| `ECS_ALLOW_OFFHOST_INTROSPECTION_ACCESS` | <true | false> | By default, the ecs-init service adds an iptable rule to block access to ECS Agent's introspection port from off-host (or containers in awsvpc network mode), and removes the rule upon stop. If `ECS_ALLOW_OFFHOST_INTROSPECTION_ACCESS` is set to true, this rule will not be added/removed. | false | +Please note that from Agent version 1.20.0, Minimum required Docker version is 1.9.0, corresponding to Docker API version 1.21. For more information, please visit [Amazon ECS Container Agent Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container_agent_versions.html). -The above environment variable(s) can be used in the following way -- On Amazon Linux 1, the flag `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER` can be turned on by adding `env ECS_SKIP_LOCALHOST_TRAFFIC_FILTER=true` to /etc/init/ecs.conf. -- On Amazon Linux 2, the flag `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER` can be turned on by adding `ECS_SKIP_LOCALHOST_TRAFFIC_FILTER=true` to /etc/ecs/ecs.config. +### On the Amazon Linux AMI -## Usage -The upstart script installed by the Amazon Elastic Container Service RPM can be started or stopped with the following commands respectively: +On the [Amazon Linux AMI](https://aws.amazon.com/amazon-linux-ami/), we provide an installable RPM which can be used via +`sudo yum install ecs-init && sudo start ecs`. This is the recommended way to run it in this environment. -* `sudo start ecs` -* `sudo stop ecs` +### On Other Linux AMIs -### Updates -Updates to the Amazon ECS Container Agent should be performed through the Amazon ECS Container Agent. In the case where -an update failed and the Amazon ECS Container Agent is no longer functional, a rollback can be initiated as follows: +The Amazon ECS Container Agent may also be run in a Docker container on an EC2 instance with a recent Docker version +installed. A Docker image is available in our +[Docker Hub Repository](https://registry.hub.docker.com/u/amazon/amazon-ecs-agent/). -1. `sudo stop ecs` -2. `sudo /usr/libexec/amazon-ecs-init reload-cache` -3. `sudo start ecs` +```bash +$ # Set up directories the agent uses +$ mkdir -p /var/log/ecs /etc/ecs /var/lib/ecs/data +$ touch /etc/ecs/ecs.config +$ # Set up necessary rules to enable IAM roles for tasks +$ sysctl -w net.ipv4.conf.all.route_localnet=1 +$ iptables -t nat -A PREROUTING -p tcp -d 169.254.170.2 --dport 80 -j DNAT --to-destination 127.0.0.1:51679 +$ iptables -t nat -A OUTPUT -d 169.254.170.2 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 51679 +$ # Run the agent +$ docker run --name ecs-agent \ + --detach=true \ + --restart=on-failure:10 \ + --volume=/var/run/docker.sock:/var/run/docker.sock \ + --volume=/var/log/ecs:/log \ + --volume=/var/lib/ecs/data:/data \ + --net=host \ + --env-file=/etc/ecs/ecs.config \ + --env=ECS_LOGFILE=/log/ecs-agent.log \ + --env=ECS_DATADIR=/data/ \ + --env=ECS_ENABLE_TASK_IAM_ROLE=true \ + --env=ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST=true \ + amazon/amazon-ecs-agent:latest +``` -## Security disclosures -If you think you’ve found a potential security issue, please do not post it in the Issues. Instead, please follow the instructions [here](https://aws.amazon.com/security/vulnerability-reporting/) or [email AWS security directly](mailto:aws-security@amazon.com). +### On Other Linux AMIs when awsvpc networking mode is enabled + +For the AWS VPC networking mode, ECS agent requires CNI plugin and dhclient to be available. ECS also needs the ecs-init to run as part of its startup. +The following is an example of docker run configuration for running ecs-agent with Task ENI enabled. Note that ECS agent currently only supports cgroupfs for cgroup driver. +``` +$ # Run the agent +$ /usr/bin/docker run --name ecs-agent \ +--init \ +--restart=on-failure:10 \ +--volume=/var/run:/var/run \ +--volume=/var/log/ecs/:/log:Z \ +--volume=/var/lib/ecs/data:/data:Z \ +--volume=/etc/ecs:/etc/ecs \ +--volume=/sbin:/host/sbin \ +--volume=/lib:/lib \ +--volume=/lib64:/lib64 \ +--volume=/usr/lib:/usr/lib \ +--volume=/usr/lib64:/usr/lib64 \ +--volume=/proc:/host/proc \ +--volume=/sys/fs/cgroup:/sys/fs/cgroup \ +--net=host \ +--env-file=/etc/ecs/ecs.config \ +--cap-add=sys_admin \ +--cap-add=net_admin \ +--env ECS_ENABLE_TASK_ENI=true \ +--env ECS_UPDATES_ENABLED=true \ +--env ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=1h \ +--env ECS_DATADIR=/data \ +--env ECS_ENABLE_TASK_IAM_ROLE=true \ +--env ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST=true \ +--env ECS_LOGFILE=/log/ecs-agent.log \ +--env ECS_AVAILABLE_LOGGING_DRIVERS='["json-file","awslogs","syslog","none"]' \ +--env ECS_LOGLEVEL=info \ +--detach \ +amazon/amazon-ecs-agent:latest +``` + +See also the Advanced Usage section below. + +### On the ECS Optimized Windows AMI + +ECS Optimized Windows AMI ships with a pre-installed PowerShell module called ECSTools to install, configure, and run the ECS Agent as a Windows service. +To install the service, you can run the following PowerShell commands on an EC2 instance. To launch into another cluster instead of windows, replace the 'windows' in the script below with the name of your cluster. + +```powershell +PS C:\> Import-Module ECSTools +PS C:\> # The -EnableTaskIAMRole option is required to enable IAM roles for tasks. +PS C:\> Initialize-ECSAgent -Cluster 'windows' -EnableTaskIAMRole +``` + +#### Downloading Different Version of ECS Agent + +To download different version of ECS Agent, you can do the following: + +```powershell +PS C:\> # use agentVersion = "latest" for the latest available agent version +PS C:\> $agentVersion = "v1.20.4" +PS C:\> Initialize-ECSAgent -Cluster 'windows' -EnableTaskIAMRole -Version $agentVersion +``` + +## Advanced Usage + +The Amazon ECS Container Agent supports a number of configuration options, most of which should be set through +environment variables. + +### Environment Variables + +The table below provides an overview of optional environment variables that can be used to configure the ECS agent. See +[the Amazon ECS developer guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) for +additional details on each available environment variable. + +| Environment Key | Example Value(s) | Description | Default value on Linux | Default value on Windows | +|:----------------|:----------------------------|:------------|:-----------------------|:-------------------------| +| `ECS_CLUSTER` | clusterName | The cluster this agent should check into. | default | default | +| `ECS_RESERVED_PORTS` | `[22, 80, 5000, 8080]` | An array of ports that should be marked as unavailable for scheduling on this container instance. | `[22, 2375, 2376, 51678, 51679]` | `[53, 135, 139, 445, 2375, 2376, 3389, 5985, 5986, 51678, 51679]` +| `ECS_RESERVED_PORTS_UDP` | `[53, 123]` | An array of UDP ports that should be marked as unavailable for scheduling on this container instance. | `[]` | `[]` | +| `ECS_ENGINE_AUTH_TYPE` | "docker" | "dockercfg" | The type of auth data that is stored in the `ECS_ENGINE_AUTH_DATA` key. | | | +| `ECS_ENGINE_AUTH_DATA` | See the [dockerauth documentation](https://godoc.org/github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerauth) | Docker [auth data](https://godoc.org/github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerauth) formatted as defined by `ECS_ENGINE_AUTH_TYPE`. | | | +| `AWS_DEFAULT_REGION` | <us-west-2>|<us-east-1>|… | The region to be used in API requests as well as to infer the correct backend host. | Taken from Amazon EC2 instance metadata. | Taken from Amazon EC2 instance metadata. | +| `AWS_ACCESS_KEY_ID` | AKIDEXAMPLE | The [access key](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) used by the agent for all calls. | Taken from Amazon EC2 instance metadata. | Taken from Amazon EC2 instance metadata. | +| `AWS_SECRET_ACCESS_KEY` | EXAMPLEKEY | The [secret key](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) used by the agent for all calls. | Taken from Amazon EC2 instance metadata. | Taken from Amazon EC2 instance metadata. | +| `AWS_SESSION_TOKEN` | | The [session token](http://docs.aws.amazon.com/STS/latest/UsingSTS/Welcome.html) used for temporary credentials. | Taken from Amazon EC2 instance metadata. | Taken from Amazon EC2 instance metadata. | +| `DOCKER_HOST` | `unix:///var/run/docker.sock` | Used to create a connection to the Docker daemon; behaves similarly to this environment variable as used by the Docker client. | `unix:///var/run/docker.sock` | `npipe:////./pipe/docker_engine` | +| `ECS_LOGLEVEL` | <crit> | <error> | <warn> | <info> | <debug> | The level of detail to be logged. | info | info | +| `ECS_LOGLEVEL_ON_INSTANCE` | <none> | <crit> | <error> | <warn> | <info> | <debug> | Can be used to override `ECS_LOGLEVEL` and set a level of detail that should be logged in the on-instance log file, separate from the level that is logged in the logging driver. If a logging driver is explicitly set, on-instance logs are turned off by default, but can be turned back on with this variable. | none if `ECS_LOG_DRIVER` is explicitly set to a non-empty value; otherwise the same value as `ECS_LOGLEVEL` | none if `ECS_LOG_DRIVER` is explicitly set to a non-empty value; otherwise the same value as `ECS_LOGLEVEL` | +| `ECS_LOGFILE` | /ecs-agent.log | The location where logs should be written. Log level is controlled by `ECS_LOGLEVEL`. | blank | blank | +| `ECS_CHECKPOINT` | <true | false> | Whether to checkpoint state to the DATADIR specified below. | true if `ECS_DATADIR` is explicitly set to a non-empty value; false otherwise | true if `ECS_DATADIR` is explicitly set to a non-empty value; false otherwise | +| `ECS_DATADIR` | /data/ | The container path where state is checkpointed for use across agent restarts. Note that on Linux, when you specify this, you will need to make sure that the Agent container has a bind mount of `$ECS_HOST_DATA_DIR/data:$ECS_DATADIR` with the corresponding values of `ECS_HOST_DATA_DIR` and `ECS_DATADIR`. | /data/ | `C:\ProgramData\Amazon\ECS\data` +| `ECS_UPDATES_ENABLED` | <true | false> | Whether to exit for an updater to apply updates when requested. | false | false | +| `ECS_DISABLE_METRICS` | <true | false> | Whether to disable metrics gathering for tasks. | false | true | +| `ECS_POLL_METRICS` | <true | false> | Whether to poll or stream when gathering metrics for tasks. Setting this value to `true` can help reduce the CPU usage of dockerd and containerd on the ECS container instance. See also ECS_POLL_METRICS_WAIT_DURATION for setting the poll interval. | `false` | `false` | +| `ECS_POLLING_METRICS_WAIT_DURATION` | 10s | Time to wait between polling for metrics for a task. Not used when ECS_POLL_METRICS is false. Maximum value is 20s and minimum value is 5s. If user sets above maximum it will be set to max, and if below minimum it will be set to min. | 10s | 10s | +| `ECS_PULL_DEPENDENT_CONTAINERS_UPFRONT` | <true | false> | Whether to pull images for containers with dependencies before the dependsOn condition has been satisfied. | false | false | +| `ECS_RESERVED_MEMORY` | 32 | Memory, in MiB, to reserve for use by things other than containers managed by Amazon ECS. | 0 | 0 | +| `ECS_AVAILABLE_LOGGING_DRIVERS` | `["awslogs","fluentd","gelf","json-file","journald","logentries","splunk","syslog"]` | Which logging drivers are available on the container instance. | `["json-file","none"]` | `["json-file","none"]` | +| `ECS_DISABLE_PRIVILEGED` | `true` | Whether launching privileged containers is disabled on the container instance. | `false` | `false` | +| `ECS_SELINUX_CAPABLE` | `true` | Whether SELinux is available on the container instance. | `false` | `false` | +| `ECS_APPARMOR_CAPABLE` | `true` | Whether AppArmor is available on the container instance. | `false` | `false` | +| `ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION` | 10m | Time to wait to delete containers for a stopped task. If set to less than 1 minute, the value is ignored. | 3h | 3h | +| `ECS_CONTAINER_STOP_TIMEOUT` | 10m | Instance scoped configuration for time to wait for the container to exit normally before being forcibly killed. | 30s | 30s | +| `ECS_CONTAINER_START_TIMEOUT` | 10m | Timeout before giving up on starting a container. | 3m | 8m | +| `ECS_CONTAINER_CREATE_TIMEOUT` | 10m | Timeout before giving up on creating a container. Minimum value is 1m. If user sets a value below minimum it will be set to min. | 4m | 4m | +| `ECS_ENABLE_TASK_IAM_ROLE` | `true` | Whether to enable IAM Roles for Tasks on the Container Instance | `false` | `false` | +| `ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST` | `true` | Whether to enable IAM Roles for Tasks when launched with `host` network mode on the Container Instance | `false` | `false` | +| `ECS_DISABLE_IMAGE_CLEANUP` | `true` | Whether to disable automated image cleanup for the ECS Agent. | `false` | `false` | +| `ECS_IMAGE_CLEANUP_INTERVAL` | 30m | The time interval between automated image cleanup cycles. If set to less than 10 minutes, the value is ignored. | 30m | 30m | +| `ECS_IMAGE_MINIMUM_CLEANUP_AGE` | 30m | The minimum time interval between when an image is pulled and when it can be considered for automated image cleanup. | 1h | 1h | +| `NON_ECS_IMAGE_MINIMUM_CLEANUP_AGE` | 30m | The minimum time interval between when a non ECS image is created and when it can be considered for automated image cleanup. | 1h | 1h | +| `ECS_NUM_IMAGES_DELETE_PER_CYCLE` | 5 | The maximum number of images to delete in a single automated image cleanup cycle. If set to less than 1, the value is ignored. | 5 | 5 | +| `ECS_IMAGE_PULL_BEHAVIOR` | <default | always | once | prefer-cached > | The behavior used to customize the pull image process. If `default` is specified, the image will be pulled remotely, if the pull fails then the cached image in the instance will be used. If `always` is specified, the image will be pulled remotely, if the pull fails then the task will fail. If `once` is specified, the image will be pulled remotely if it has not been pulled before or if the image was removed by image cleanup, otherwise the cached image in the instance will be used. If `prefer-cached` is specified, the image will be pulled remotely if there is no cached image, otherwise the cached image in the instance will be used. | default | default | +| `ECS_IMAGE_PULL_INACTIVITY_TIMEOUT` | 1m | The time to wait after docker pulls complete waiting for extraction of a container. Useful for tuning large Windows containers. | 1m | 3m | +| `ECS_IMAGE_PULL_TIMEOUT` | 1h | The time to wait for pulling docker image. | 2h | 2h | +| `ECS_INSTANCE_ATTRIBUTES` | `{"stack": "prod"}` | These attributes take effect only during initial registration. After the agent has joined an ECS cluster, use the PutAttributes API action to add additional attributes. For more information, see [Amazon ECS Container Agent Configuration](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the Amazon ECS Developer Guide.| `{}` | `{}` | +| `ECS_ENABLE_TASK_ENI` | `false` | Whether to enable task networking for task to be launched with its own network interface | `false` | Not applicable | +| `ECS_ENABLE_HIGH_DENSITY_ENI` | `false` | Whether to enable high density eni feature when using task networking | `true` | Not applicable | +| `ECS_CNI_PLUGINS_PATH` | `/ecs/cni` | The path where the cni binary file is located | `/amazon-ecs-cni-plugins` | Not applicable | +| `ECS_AWSVPC_BLOCK_IMDS` | `true` | Whether to block access to [Instance Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) for Tasks started with `awsvpc` network mode | `false` | Not applicable | +| `ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES` | `["10.0.15.0/24"]` | In `awsvpc` network mode, traffic to these prefixes will be routed via the host bridge instead of the task ENI | `[]` | Not applicable | +| `ECS_ENABLE_CONTAINER_METADATA` | `true` | When `true`, the agent will create a file describing the container's metadata and the file can be located and consumed by using the container enviornment variable `$ECS_CONTAINER_METADATA_FILE` | `false` | `false` | +| `ECS_HOST_DATA_DIR` | `/var/lib/ecs` | The source directory on the host from which ECS_DATADIR is mounted. We use this to determine the source mount path for container metadata files in the case the ECS Agent is running as a container. We do not use this value in Windows because the ECS Agent is not running as container in Windows. On Linux, note that when you specify this, you will need to make sure that the Agent container has a bind mount of `$ECS_HOST_DATA_DIR/data:$ECS_DATADIR` with the corresponding values of `ECS_HOST_DATA_DIR` and `ECS_DATADIR`. | `/var/lib/ecs` | `Not used` | +| `ECS_ENABLE_TASK_CPU_MEM_LIMIT` | `true` | Whether to enable task-level cpu and memory limits | `true` | `false` | +| `ECS_CGROUP_PATH` | `/sys/fs/cgroup` | The root cgroup path that is expected by the ECS agent. This is the path that accessible from the agent mount. | `/sys/fs/cgroup` | Not applicable | +| `ECS_CGROUP_CPU_PERIOD` | `10ms` | CGroups CPU period for task level limits. This value should be between 8ms to 100ms | `100ms` | Not applicable | +| `ECS_AGENT_HEALTHCHECK_HOST` | `localhost` | Override for the ecs-agent container's healthcheck localhost ip address| `localhost` | `localhost` | +| `ECS_ENABLE_CPU_UNBOUNDED_WINDOWS_WORKAROUND` | `true` | When `true`, ECS will allow CPU unbounded(CPU=`0`) tasks to run along with CPU bounded tasks in Windows. | Not applicable | `false` | +| `ECS_ENABLE_MEMORY_UNBOUNDED_WINDOWS_WORKAROUND` | `true` | When `true`, ECS will ignore the memory reservation parameter (soft limit) to run along with memory bounded tasks in Windows. To run a memory unbounded task, omit the memory hard limit and set any memory reservation, it will be ignored. | Not applicable | `false` | +| `ECS_TASK_METADATA_RPS_LIMIT` | `100,150` | Comma separated integer values for steady state and burst throttle limits for task metadata endpoint | `40,60` | `40,60` | +| `ECS_SHARED_VOLUME_MATCH_FULL_CONFIG` | `true` | When `true`, ECS Agent will compare name, driver options, and labels to make sure volumes are identical. When `false`, Agent will short circuit shared volume comparison if the names match. This is the default Docker behavior. If a volume is shared across instances, this should be set to `false`. | `false` | `false`| +| `ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM` | `ec2_instance` | If `ec2_instance` is specified, existing tags defined on the container instance will be registered to Amazon ECS and will be discoverable using the `ListTagsForResource` API. Using this requires that the IAM role associated with the container instance have the `ec2:DescribeTags` action allowed. | `none` | `none` | +| `ECS_CONTAINER_INSTANCE_TAGS` | `{"tag_key": "tag_val"}` | The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters. If tags also exist on your container instance that are propagated using the `ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM` parameter, those tags will be overwritten by the tags specified using `ECS_CONTAINER_INSTANCE_TAGS`. | `{}` | `{}` | +| `ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP` | `true` | Whether to allow the ECS agent to delete containers and images that are not part of ECS tasks. | `false` | `false` | +| `ECS_EXCLUDE_UNTRACKED_IMAGE` | `alpine:latest` | Comma seperated list of `imageName:tag` of images that should not be deleted by the ECS agent if `ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP` is enabled. | | | +| `ECS_DISABLE_DOCKER_HEALTH_CHECK` | `false` | Whether to disable the Docker Container health check for the ECS Agent. | `false` | `false` | +| `ECS_NVIDIA_RUNTIME` | nvidia | The Nvidia Runtime to be used to pass Nvidia GPU devices to containers. | nvidia | Not Applicable | +| `ECS_ENABLE_SPOT_INSTANCE_DRAINING` | `true` | Whether to enable Spot Instance draining for the container instance. If true, if the container instance receives a [spot interruption notice](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html), agent will set the instance's status to [DRAINING](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-draining.html), which gracefully shuts down and replaces all tasks running on the instance that are part of a service. It is recommended that this be set to `true` when using spot instances. | `false` | `false` | +| `ECS_LOG_ROLLOVER_TYPE` | `size` | `hourly` | Determines whether the container agent logfile will be rotated based on size or hourly. By default, the agent logfile is rotated each hour. | `hourly` | `hourly` | +| `ECS_LOG_OUTPUT_FORMAT` | `logfmt` | `json` | Determines the log output format. When the json format is used, each line in the log would be a structured JSON map. | `logfmt` | `logfmt` | +| `ECS_LOG_MAX_FILE_SIZE_MB` | `10` | When the ECS_LOG_ROLLOVER_TYPE variable is set to size, this variable determines the maximum size (in MB) the log file before it is rotated. If the rollover type is set to hourly then this variable is ignored. | `10` | `10` | +| `ECS_LOG_MAX_ROLL_COUNT` | `24` | Determines the number of rotated log files to keep. Older log files are deleted once this limit is reached. | `24` | `24` | +| `ECS_LOG_DRIVER` | `awslogs` | `fluentd` | `gelf` | `json-file` | `journald` | `logentries` | `syslog` | `splunk` | The logging driver to be used by the Agent container. | `json-file` | Not applicable | +| `ECS_LOG_OPTS` | `{"option":"value"}` | The options for configuring the logging driver set in `ECS_LOG_DRIVER`. | `{}` | Not applicable | +| `ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE` | `true` | Whether to enable awslogs log driver to authenticate via credentials of task execution IAM role. Needs to be true if you want to use awslogs log driver in a task that has task execution IAM role specified. When using the ecs-init RPM with version equal or later than V1.16.0-1, this env is set to true by default. | `false` | `false` | +| `ECS_FSX_WINDOWS_FILE_SERVER_SUPPORTED` | `true` | Whether FSx for Windows File Server volume type is supported on the container instance. This variable is only supported on agent versions 1.47.0 and later. | `false` | `true` | + +### Persistence + +When you run the Amazon ECS Container Agent in production, its `datadir` should be persisted between runs of the Docker +container. If this data is not persisted, the agent registers a new container instance ARN on each launch and is not +able to update the state of tasks it previously ran. + +### Flags + +The agent also supports the following flags: + +* `-k` — The agent will not require valid SSL certificates for the services that it communicates with. We + recommend against using this flag. +* ` -loglevel` — Options: `[||||]`. The agent will output on stdout at the given + level. This is overridden by the `ECS_LOGLEVEL` environment variable, if present. -## Development +## Building and Running from Source -#### Dev dependencies +**Running the Amazon ECS Container Agent outside of Amazon EC2 is not supported.** -Run `make get-deps` to get dependencies for running tests and generating mocks. +### Docker Image (on Linux) -#### Generating mocks +The Amazon ECS Container Agent may be built by typing `make` with the [Docker +daemon](https://docs.docker.com/installation/) (v1.5.0) running. -Mocks can be generated using the `make generate` Makefile target. **NOTE** that this must be run on a linux machine. +This produces an image tagged `amazon/ecs-container-agent:make` that +you may run as described above. + +### Standalone (on Linux) + +The Amazon ECS Container Agent may also be run outside of a Docker container as a Go binary. This is not recommended +for production on Linux, but it can be useful for development or easier integration with your local Go tools. + +The following commands run the agent outside of Docker: + +``` +make gobuild +./out/amazon-ecs-agent +``` + +### Make Targets (on Linux) + +The following targets are available. Each may be run with `make `. + +| Make Target | Description | +|:-----------------------|:------------| +| `release` | *(Default)* Builds the agent within a Docker container and and packages it into a scratch-based image | +| `gobuild` | Runs a normal `go build` of the agent and stores the binary in `./out/amazon-ecs-agent` | +| `static` | Runs `go build` to produce a static binary in `./out/amazon-ecs-agent` | +| `test` | Runs all unit tests using `go test` | +| `run-integ-tests` | Runs all integration tests in the `engine` and `stats` packages | +| `clean` | Removes build artifacts. *Note: this does not remove Docker images* | + +### Standalone (on Windows) + +The Amazon ECS Container Agent may be built by typing `go build -o amazon-ecs-agent.exe ./agent`. + +### Scripts (on Windows) + +The following scripts are available to help develop the Amazon ECS Container Agent on Windows: + +* `scripts\run-integ-tests.ps1` - Runs all integration tests in the `engine` and `stats` packages +* `misc\windows-deploy\Install-ECSAgent.ps1` - Install the ECS agent as a Windows service +* `misc\windows-deploy\amazon-ecs-agent.ps1` - Helper script to set up the host and run the agent as a process +* `misc\windows-deploy\user-data.ps1` - Sample user-data that can be used with the Windows Server 2016 with Containers + AMI to run the agent as a process + + +## Contributing + +Contributions and feedback are welcome! Proposals and pull requests will be considered and responded to. For more +information, see the [CONTRIBUTING.md](https://github.com/aws/amazon-ecs-agent/blob/master/CONTRIBUTING.md) file. + +If you have a bug/and issue around the behavior of the ECS agent, please open it here. + +If you have a feature request, please open it over at the [AWS Containers Roadmap](https://github.com/aws/containers-roadmap). + +Amazon Web Services does not currently provide support for modified copies of this software. + +## Security disclosures +If you think you’ve found a potential security issue, please do not post it in the Issues. Instead, please follow the instructions [here](https://aws.amazon.com/security/vulnerability-reporting/) or [email AWS security directly](mailto:aws-security@amazon.com). ## License -The Amazon Elastic Container Service RPM is licensed under the Apache 2.0 License. +The Amazon ECS Container Agent is licensed under the Apache 2.0 License. diff --git a/THIRD-PARTY b/THIRD-PARTY index 5ce32509fe2..e9e3038b8ff 100644 --- a/THIRD-PARTY +++ b/THIRD-PARTY @@ -1,437 +1,208 @@ -** aws-sdk-go; version 1.13.24 -- https://github.com/aws/aws-sdk-go -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. -** docker/docker; version 1.12.0-rc4 -- https://github.com/docker/docker -Docker -Copyright 2012-2015 Docker, Inc. +** github.com/aws/aws-sdk-go; version v1.36.0 -- https://github.com/aws/aws-sdk-go +** github.com/containerd/cgroups; version c3fc2b77b568af2406f3931cf3d3f17d76736886 -- https://github.com/containerd/cgroups +** github.com/containerd/continuity; version c220ac4f01b8a301edcb9c9c05d7600249138cfa -- https://github.com/containerd/continuity +** github.com/containernetworking/cni; version v0.7.1 -- https://github.com/containernetworking/cni +** github.com/coreos/go-iptables; version v0.1.0 -- https://github.com/coreos/go-iptables +** github.com/coreos/go-systemd; version d2196463941895ee908e13531a23a39feb9e1243 -- https://github.com/coreos/go-systemd +** github.com/docker/distribution; version 1cb4180b1a5b9c029b2f2beaeb38f0b5cf64e12e -- https://www.docker.com +** github.com/docker/docker; version 785fe99bdb7c004acd57d5d65d57ceb256d87ef1 -- https://github.com/moby/moby +** github.com/docker/go-connections; version v0.3.0 -- https://github.com/docker/go-connections +** github.com/docker/go-units; version v0.3.2 -- https://github.com/docker/go-units +** github.com/go-ini/ini; version ee900ca565931451fe4e4409bcbd4316331cec1c -- https://github.com/go-ini/ini +** github.com/golang/mock; version 1.3.1 -- https://github.com/golang/mock/blob/master/LICENSE +** github.com/jmespath/go-jmespath; version c2b33e84 -- https://github.com/jmespath/go-jmespath +** github.com/opencontainers/go-digest; version v1.0.0-rc1 -- https://github.com/opencontainers/go-digest +** github.com/opencontainers/image-spec; version v1.0.1 -- https://github.com/opencontainers/image-spec +** github.com/opencontainers/runc; version v0.1.1 -- https://github.com/opencontainers/runc +** github.com/opencontainers/runtime-spec; version d349388c43b01b2ea695965ae561b5bddb81318f -- https://github.com/opencontainers/runtime-spec +** github.com/prometheus/client_golang; version 0.9.0 -- https://github.com/prometheus/client_golang +** github.com/vishvananda/netlink; version fe3b5664d23a11b52ba59bece4ff29c52772a56b -- https://github.com/vishvananda/netlink +** github.com/vishvananda/netns; version be1fbeda19366dea804f00efff2dd73a1642fdcc -- https://github.com/vishvananda/netns -This product includes software developed at Docker, Inc. -(https://www.docker.com). +Apache License -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. +Version 2.0, January 2004 -The following is courtesy of our legal counsel: +http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND +DISTRIBUTION +1. Definitions. -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. -For more information, please see https://www.bis.doc.gov -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. -** github.com/coreos/go-systemd; version v22.0.0 -- -https://github.com/coreos/go-systemd -CoreOS Project -Copyright 2018 CoreOS, Inc +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this +document. -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). -** github.com/docker/go-connections; version -e15c02316c12de00874640cd76311849de2aeed5 -- -https://github.com/docker/go-connections -Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -** github.com/docker/go-plugins-helpers/; version -1e6269c305b8c75cfda1c8aa91349c38d7335814 -- -https://github.com/docker/go-plugins-helpers/ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. -(https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. -** github.com/golang/mock; version bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 -- -https://github.com/golang/mock/blob/master/LICENSE -Alex Reece -Google Inc. -** github.com/opencontainers/go-digest; version v1.0.0-rc1 -- -https://github.com/opencontainers/go-digest -Copyright © 2016 Docker, Inc. -** github.com/opencontainers/image-spec; version v1.0.1 -- -https://github.com/opencontainers/image-spec -Copyright 2016 The Linux Foundation. -** github.com/opencontainers/runc; version v0.1.1 -- -https://github.com/opencontainers/runc -Copyright 2014 Docker, Inc. -** go-ini/ini; version v1.21.1 -- https://github.com/go-ini/ini -Copyright 2016 -** jmespath/go-jmespath; version 0.2.2 -- -https://github.com/jmespath/go-jmespath -Copyright 2015 James Saryerwinnie -Apache License -Version 2.0, January 2004 +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. -http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND -DISTRIBUTION - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, and - distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by the - copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all other - entities that control, are controlled by, or are under common control - with that entity. For the purposes of this definition, "control" means - (i) the power, direct or indirect, to cause the direction or management - of such entity, whether by contract or otherwise, or (ii) ownership of - fifty percent (50%) or more of the outstanding shares, or (iii) - beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity exercising - permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation source, - and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but not limited - to compiled object code, generated documentation, and conversions to - other media types. - - "Work" shall mean the work of authorship, whether in Source or Object - form, made available under the License, as indicated by a copyright - notice that is included in or attached to the work (an example is - provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object form, - that is based on (or derived from) the Work and for which the editorial - revisions, annotations, elaborations, or other modifications represent, - as a whole, an original work of authorship. For the purposes of this - License, Derivative Works shall not include works that remain separable - from, or merely link (or bind by name) to the interfaces of, the Work and - Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including the original - version of the Work and any modifications or additions to that Work or - Derivative Works thereof, that is intentionally submitted to Licensor for - inclusion in the Work by the copyright owner or by an individual or Legal - Entity authorized to submit on behalf of the copyright owner. For the - purposes of this definition, "submitted" means any form of electronic, - verbal, or written communication sent to the Licensor or its - representatives, including but not limited to communication on electronic - mailing lists, source code control systems, and issue tracking systems - that are managed by, or on behalf of, the Licensor for the purpose of - discussing and improving the Work, but excluding communication that is - conspicuously marked or otherwise designated in writing by the copyright - owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity on - behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of this - License, each Contributor hereby grants to You a perpetual, worldwide, - non-exclusive, no-charge, royalty-free, irrevocable copyright license to - reproduce, prepare Derivative Works of, publicly display, publicly perform, - sublicense, and distribute the Work and such Derivative Works in Source or - Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of this - License, each Contributor hereby grants to You a perpetual, worldwide, - non-exclusive, no-charge, royalty-free, irrevocable (except as stated in - this section) patent license to make, have made, use, offer to sell, sell, - import, and otherwise transfer the Work, where such license applies only to - those patent claims licensable by such Contributor that are necessarily - infringed by their Contribution(s) alone or by combination of their - Contribution(s) with the Work to which such Contribution(s) was submitted. - If You institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work or a - Contribution incorporated within the Work constitutes direct or contributory - patent infringement, then any patent licenses granted to You under this - License for that Work shall terminate as of the date such litigation is - filed. - - 4. Redistribution. You may reproduce and distribute copies of the Work or - Derivative Works thereof in any medium, with or without modifications, and - in Source or Object form, provided that You meet the following conditions: - - (a) You must give any other recipients of the Work or Derivative Works a - copy of this License; and - - (b) You must cause any modified files to carry prominent notices stating - that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works that You - distribute, all copyright, patent, trademark, and attribution notices - from the Source form of the Work, excluding those notices that do not - pertain to any part of the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must include - a readable copy of the attribution notices contained within such NOTICE - file, excluding those notices that do not pertain to any part of the - Derivative Works, in at least one of the following places: within a - NOTICE text file distributed as part of the Derivative Works; within the - Source form or documentation, if provided along with the Derivative - Works; or, within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents of the - NOTICE file are for informational purposes only and do not modify the - License. You may add Your own attribution notices within Derivative Works - that You distribute, alongside or as an addendum to the NOTICE text from - the Work, provided that such additional attribution notices cannot be - construed as modifying the License. - - You may add Your own copyright statement to Your modifications and may - provide additional or different license terms and conditions for use, - reproduction, or distribution of Your modifications, or for any such - Derivative Works as a whole, provided Your use, reproduction, and - distribution of the Work otherwise complies with the conditions stated in - this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, any - Contribution intentionally submitted for inclusion in the Work by You to the - Licensor shall be under the terms and conditions of this License, without - any additional terms or conditions. Notwithstanding the above, nothing - herein shall supersede or modify the terms of any separate license agreement - you may have executed with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, except - as required for reasonable and customary use in describing the origin of the - Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in - writing, Licensor provides the Work (and each Contributor provides its - Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied, including, without limitation, any - warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or - FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining - the appropriateness of using or redistributing the Work and assume any risks - associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, whether - in tort (including negligence), contract, or otherwise, unless required by - applicable law (such as deliberate and grossly negligent acts) or agreed to - in writing, shall any Contributor be liable to You for damages, including - any direct, indirect, special, incidental, or consequential damages of any - character arising as a result of this License or out of the use or inability - to use the Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all other - commercial damages or losses), even if such Contributor has been advised of - the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing the Work - or Derivative Works thereof, You may choose to offer, and charge a fee for, - acceptance of support, warranty, indemnity, or other liability obligations - and/or rights consistent with this License. However, in accepting such - obligations, You may act only on Your own behalf and on Your sole - responsibility, not on behalf of any other Contributor, and only if You - agree to indemnify, defend, and hold each Contributor harmless for any - liability incurred by, or claims asserted against, such Contributor by - reason of your accepting any such warranty or additional liability. END OF - TERMS AND CONDITIONS + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such +entity. + + + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + + + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and +configuration files. + + + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object +code, generated documentation, and conversions to other media +types. + + + +"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that is +included in or attached to the work (an example is provided in the Appendix +below). + + + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works +thereof. + + + +"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the purposes +of this definition, "submitted" means any form of electronic, verbal, or +written communication sent to the Licensor or its representatives, including +but not limited to communication on electronic mailing lists, source code +control systems, and issue tracking systems that are managed by, or on behalf +of, the Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise designated in +writing by the copyright owner as "Not a Contribution." + + + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this +section) patent license to make, have made, use, offer to sell, sell, import, +and otherwise transfer the Work, where such license applies only to those +patent claims licensable by such Contributor that are necessarily infringed by +their Contribution(s) alone or by combination of their Contribution(s) with the +Work to which such Contribution(s) was submitted. If You institute patent +litigation against any entity (including a cross-claim or counterclaim in a +lawsuit) alleging that the Work or a Contribution incorporated within the Work +constitutes direct or contributory patent infringement, then any patent +licenses granted to You under this License for that Work shall terminate as of +the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and in +Source or Object form, provided that You meet the following conditions: + +(a) You must give any other recipients of the Work or Derivative Works a copy +of this License; and + +(b) You must cause any modified files to carry prominent notices stating that +You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from the +Source form of the Work, excluding those notices that do not pertain to any +part of the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its distribution, then +any Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a +whole, provided Your use, reproduction, and distribution of the Work otherwise +complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to the +Licensor shall be under the terms and conditions of this License, without any +additional terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may have +executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, +trademarks, service marks, or product names of the Licensor, except as required +for reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in +writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in +tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to in +writing, shall any Contributor be liable to You for damages, including any +direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or inability to +use the Work (including but not limited to damages for loss of goodwill, work +stoppage, computer failure or malfunction, or any and all other commercial +damages or losses), even if such Contributor has been advised of the +possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or +Derivative Works thereof, You may choose to offer, and charge a fee for, +acceptance of support, warranty, indemnity, or other liability obligations +and/or rights consistent with this License. However, in accepting such +obligations, You may act only on Your own behalf and on Your sole +responsibility, not on behalf of any other Contributor, and only if You agree +to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. END OF TERMS AND +CONDITIONS APPENDIX: How to apply the Apache License to your work. @@ -463,288 +234,102 @@ See the License for the specific language governing permissions and limitations under the License. -* For aws-sdk-go see also this required NOTICE: - AWS SDK for Go - Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. - Copyright 2014-2015 Stripe, Inc. -* For docker/docker see also this required NOTICE: - Docker - Copyright 2012-2015 Docker, Inc. - - This product includes software developed at Docker, Inc. - (https://www.docker.com). - - This product contains software (https://github.com/kr/pty) developed - by Keith Rarick, licensed under the MIT License. - - The following is courtesy of our legal counsel: - - - Use and transfer of Docker may be subject to certain restrictions by the - United States and other governments. - It is your responsibility to ensure that your use and/or transfer does not - violate applicable laws. - - For more information, please see https://www.bis.doc.gov +* For github.com/aws/aws-sdk-go see also this required NOTICE: +AWS SDK for Go +Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. +* For github.com/containerd/cgroups see also this required NOTICE: +Copyright (c) 2016-2017 the containerd authors +* For github.com/containerd/continuity see also this required NOTICE: +Copyright (c) 2016-2017 the containerd authors +* For github.com/containernetworking/cni see also this required NOTICE: +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +* For github.com/coreos/go-iptables see also this required NOTICE: +CoreOS Project +Copyright 2018 CoreOS, Inc - See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). * For github.com/coreos/go-systemd see also this required NOTICE: - CoreOS Project - Copyright 2018 CoreOS, Inc - - This product includes software developed at CoreOS, Inc. - (http://www.coreos.com/). +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +* For github.com/docker/distribution see also this required NOTICE: +Copyright 2013-2017 Docker, Inc. +* For github.com/docker/docker see also this required NOTICE: +Copyright 2013-2017 Docker, Inc. * For github.com/docker/go-connections see also this required NOTICE: - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other - modifications - represent, as a whole, an original work of authorship. For the - purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces - of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright - owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control - systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this - License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -* For github.com/docker/go-plugins-helpers/ see also this required NOTICE: - Docker - Copyright 2012-2015 Docker, Inc. - - This product includes software developed at Docker, Inc. - (https://www.docker.com). - - This product contains software (https://github.com/kr/pty) developed - by Keith Rarick, licensed under the MIT License. - - The following is courtesy of our legal counsel: - - - Use and transfer of Docker may be subject to certain restrictions by the - United States and other governments. - It is your responsibility to ensure that your use and/or transfer does not - violate applicable laws. - - For more information, please see https://www.bis.doc.gov - - See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. +Copyright 2015 Docker, Inc. +* For github.com/docker/go-units see also this required NOTICE: +Copyright 2015 Docker, Inc. +* For github.com/go-ini/ini see also this required NOTICE: +Copyright 2014 Unknwon * For github.com/golang/mock see also this required NOTICE: - Alex Reece - Google Inc. +Alex Reece +Google Inc. +* For github.com/jmespath/go-jmespath see also this required NOTICE: +Copyright 2015 James Saryerwinnie * For github.com/opencontainers/go-digest see also this required NOTICE: - Copyright © 2016 Docker, Inc. +Copyright © 2016 Docker, Inc. * For github.com/opencontainers/image-spec see also this required NOTICE: - Copyright 2016 The Linux Foundation. +Copyright 2016 The Linux Foundation. * For github.com/opencontainers/runc see also this required NOTICE: - Copyright 2014 Docker, Inc. -* For go-ini/ini see also this required NOTICE: - Copyright 2016 -* For jmespath/go-jmespath see also this required NOTICE: - Copyright 2015 James Saryerwinnie +Copyright 2014 Docker, Inc. +* For github.com/opencontainers/runtime-spec see also this required NOTICE: +Copyright 2015 The Linux Foundation. +* For github.com/prometheus/client_golang see also this required NOTICE: +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license +details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language +(golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 +* For github.com/vishvananda/netlink see also this required NOTICE: +Copyright 2014 Vishvananda Ishaya. +Copyright 2014 Docker, Inc. +* For github.com/vishvananda/netns see also this required NOTICE: +Copyright 2014 Vishvananda Ishaya. +Copyright 2014 Docker, Inc. ------- +----- -** github.com/fsouza/go-dockerclient; version -d25e8d2941bd448756b336a2ec3abf79802d6ddf -- -https://github.com/fsouza/go-dockerclient +** github.com/fsouza/go-dockerclient; version d25e8d2941bd448756b336a2ec3abf79802d6ddf -- https://github.com/fsouza/go-dockerclient Copyright (c) 2013-2017, go-dockerclient authors -** github.com/pkg/errors; version c605e284fe17294bda444b34710735b29d1a9d90 -- -https://github.com/pkg/errors +** github.com/godbus/dbus; version v4.1.0 -- https://github.com/godbus/dbus +Copyright (c) 2013, Georg Reinke (), Google +** github.com/gorilla/websocket; version 0868951cdb8e69bc42df4598bdc6164ff2f1a072 -- https://github.com/gorilla/websocket +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. +** github.com/Nvveen/Gotty; version cd527374f1e5bff4938207604a14f2e38a9cf512 -- https://github.com/Nvveen/Gotty +Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) +** github.com/pkg/errors; version c605e284fe17294bda444b34710735b29d1a9d90 -- https://github.com/pkg/errors Copyright (c) 2015, Dave Cheney Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED @@ -757,41 +342,139 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------- +----- -** github.com/pmezard/go-difflib; version v1.0.0 -- -https://github.com/pmezard/go-difflib -Copyright (c) 2013, Patrick Mezard +** golang.org/x/tools; version 1 -- https://github.com/golang/tools +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----- + +** gorilla/mux; version v1.6.2 -- https://github.com/gorilla/mux +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----- + +** golang.org/x/crypto; version (master) -- https://github.com/golang/crypto +Copyright (c) 2009 The Go Authors. All rights reserved. +** golang.org/x/net; version master -- https://go.googlesource.com/net/ +Copyright (c) 2009 The Go Authors. All rights reserved. +** https://github.com/golang/time; version NA -- https://github.com/golang/time +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----- + +** golang.org/x/sys; version master -- https://github.com/golang/sys +Copyright (c) 2009 The Go Authors. All rights reserved. + +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------- - -** github.com/NVIDIA/gpu-monitoring-tools/; version master -- -https://github.com/NVIDIA/gpu-monitoring-tools/ +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----- + +** github.com/NVIDIA/gpu-monitoring-tools/; version master -- https://github.com/NVIDIA/gpu-monitoring-tools/ Copyright (c) 2018, NVIDIA Corporation All rights reserved. @@ -799,15 +482,15 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE @@ -820,24 +503,28 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------- +----- -** github.com/cihub/seelog; version v2.6 -- https://github.com/cihub/seelog +** github.com/cihub/seelog; version 0fa7e41d2b9600ca6b7e1592478124e4f800f724 -- https://github.com/cihub/seelog Copyright (c) 2012, Cloud Instruments Co., Ltd. +** github.com/pborman/uuid; version ca53cad383cad2479bbba7f7a1a05797ec1386e4 -- https://github.com/pborman/uuid +Copyright (c) 2009,2014 Google Inc. All rights reserved. +** github.com/pmezard/go-difflib; version v1.0.0 -- https://github.com/pmezard/go-difflib +Copyright (c) 2013, Patrick Mezard Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED @@ -850,63 +537,63 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------- +----- -** github.com/davecgh/go-spew; version v1.1.1 -- -https://github.com/davecgh/go-spew +** github.com/davecgh/go-spew; version v1.1.0 -- https://github.com/davecgh/go-spew Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and/or distribute this software for any +ISC License + +Copyright (c) 2004-2010 by Internet Systems Consortium, Inc. ("ISC") + +Copyright (c) 1995-2003 by Internet Software Consortium + +Permission to use, copy, modify, and /or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD +TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. ------- +----- -** github.com/stretchr/testify; version v1.2.2 -- -https://github.com/stretchr/testify -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell +** github.com/patrickmn/go-cache; version v2.1.0 -- https://github.com/patrickmn/go-cache +Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors -Please consider promoting this project if you find it useful. +Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. ------- +----- -** github.com/Azure/go-ansiterm; version -fa152c58bc15761d0200cb75fe958b89a9d4888e -- -https://github.com/Azure/go-ansiterm +** github.com/Azure/go-ansiterm; version fa152c58bc15761d0200cb75fe958b89a9d4888e -- https://github.com/Azure/go-ansiterm Copyright (c) 2015 Microsoft Corporation -** github.com/Microsoft/go-winio; version v0.3.6 -- -https://github.com/Microsoft/go-winio +** github.com/deniswernert/udev; version 82fe5be8ca5f7d8255cc838723687f61bb175120 -- https://github.com/deniswernert/udev +Copyright (c) 2014 deniswernert +** github.com/Microsoft/go-winio; version v0.3.6 -- https://github.com/Microsoft/go-winio Copyright (c) 2015 Microsoft -** logrus; version 0.11.5 -- https://github.com/sirupsen/logrus +** github.com/Sirupsen/logrus; version 3ec0642a7fb6488f65b06f9040adc67e3990296a -- https://github.com/Sirupsen/logrus Copyright (c) 2014 Simon Eskildsen MIT License @@ -929,4 +616,141 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. + +----- + +** https://github.com/didip/tollbooth; version v3.0.2 -- https://github.com/didip/tollbooth +NA + +The MIT License (MIT) + +Copyright (c) 2015 Didip Kerabat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +----- + +** github.com/stretchr/testify; version d77da356e56a7428ad25149ca77381849a6a5232 -- https://github.com/stretchr/testify +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +----- + +** github.com/gogo/protobuf; version v0.5 -- https://github.com/gogo/protobuf +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----- + +** github.com/sparrc/go-ping; version v0.0.0-20181106165434-ef3ab45e41b0 -- https://github.com/sparrc/go-ping +Copyright (c) 2016 Cameron Sparr + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +----- + +** https://github.com/fluent/fluent-logger-golang; version v1.4.0 -- +https://github.com/fluent/fluent-logger-golang +Copyright (c) 2013 Tatsuo Kaniwa + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/VERSION b/VERSION new file mode 100644 index 00000000000..ba0a719118c --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.51.0 diff --git a/agent-container/agent-config.json b/agent-container/agent-config.json new file mode 100644 index 00000000000..247223602b5 --- /dev/null +++ b/agent-container/agent-config.json @@ -0,0 +1 @@ +{"author":"Amazon Web Services, Inc.","config":{"Cmd":["/agent"],"ArgsEscaped":true},"created":"~~timestamp~~","history":[{"created":"~~timestamp~~","author":"Amazon Web Services, Inc.","created_by":"p=np","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:~~digest~~"]}} diff --git a/agent-container/agent-image-VERSION b/agent-container/agent-image-VERSION new file mode 100644 index 00000000000..d3827e75a5c --- /dev/null +++ b/agent-container/agent-image-VERSION @@ -0,0 +1 @@ +1.0 diff --git a/agent-container/agent-manifest.json b/agent-container/agent-manifest.json new file mode 100644 index 00000000000..f6b4f1f020f --- /dev/null +++ b/agent-container/agent-manifest.json @@ -0,0 +1 @@ +[{"Config":"config.json","RepoTags":["amazon/amazon-ecs-agent:~~agentversion~~", "amazon/amazon-ecs-agent:latest"],"Layers":["rootfs/layer.tar"]}] diff --git a/agent-container/agent-repositories b/agent-container/agent-repositories new file mode 100644 index 00000000000..91624061cc3 --- /dev/null +++ b/agent-container/agent-repositories @@ -0,0 +1 @@ +{"amazon/amazon-ecs-agent":{"amazon-ecs":"rootfs"}} diff --git a/agent/Gopkg.lock b/agent/Gopkg.lock new file mode 100644 index 00000000000..7f2821a6aa3 --- /dev/null +++ b/agent/Gopkg.lock @@ -0,0 +1,598 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:bf42be3cb1519bf8018dfd99720b1005ee028d947124cab3ccf965da59381df6" + name = "github.com/Microsoft/go-winio" + packages = ["."] + pruneopts = "UT" + revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" + version = "v0.4.7" + +[[projects]] + digest = "1:5d44cd3c21be10148e4fe7fffb9bf6b4268bff6871d20d651530248ed0c8b5de" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/arn", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/processcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/context", + "internal/ini", + "internal/s3shared", + "internal/s3shared/arn", + "internal/s3shared/s3err", + "internal/sdkio", + "internal/sdkmath", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "internal/strings", + "internal/sync/singleflight", + "private/checksum", + "private/model/api", + "private/protocol", + "private/protocol/ec2query", + "private/protocol/eventstream", + "private/protocol/eventstream/eventstreamapi", + "private/protocol/json/jsonutil", + "private/protocol/jsonrpc", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/restxml", + "private/protocol/xml/xmlutil", + "private/util", + "service/cloudwatchlogs", + "service/ec2", + "service/fsx", + "service/s3", + "service/s3/s3iface", + "service/s3/s3manager", + "service/secretsmanager", + "service/secretsmanager/secretsmanageriface", + "service/ssm", + "service/sts", + "service/sts/stsiface", + ] + pruneopts = "UT" + revision = "a226e8d9a75c160c1830b31fc0049f76177016e0" + version = "v1.36.0" + +[[projects]] + branch = "master" + digest = "1:8611d685d67d4d44786ca9fe2d9641647e60087bfcb71674fd3424c86617bbb4" + name = "github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit" + packages = ["."] + pruneopts = "UT" + revision = "55d4fd2e6f353637f53296306078fa4e9d753c1f" + +[[projects]] + branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "UT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + digest = "1:de747471fda94a360884bd972aa5633e65a77666283ec9c0d9c0755b871f3a9b" + name = "github.com/cihub/seelog" + packages = [ + ".", + "archive", + "archive/gzip", + "archive/tar", + "archive/zip", + ] + pruneopts = "UT" + revision = "f561c5e57575bb1e0a2167028b7339b3a8d16fb4" + +[[projects]] + digest = "1:d9209774a52def50b9f6e60d4422383cc10b6370ab5eed7be05c54e90fb12825" + name = "github.com/containerd/cgroups" + packages = ["."] + pruneopts = "UT" + revision = "c3fc2b77b568af2406f3931cf3d3f17d76736886" + +[[projects]] + branch = "master" + digest = "1:e48c63e818c67fbf3d7afe20bba33134ab1a5bf384847385384fd027652a5a96" + name = "github.com/containerd/continuity" + packages = ["pathdriver"] + pruneopts = "UT" + revision = "c220ac4f01b8a301edcb9c9c05d7600249138cfa" + +[[projects]] + digest = "1:1a07bbfee1d0534e8dda4773948e6dcd3a061ea7ab047ce04619476946226483" + name = "github.com/containernetworking/cni" + packages = [ + "libcni", + "pkg/invoke", + "pkg/types", + "pkg/types/020", + "pkg/types/current", + "pkg/version", + ] + pruneopts = "UT" + revision = "4cfb7b568922a3c79a23e438dc52fe537fc9687e" + version = "v0.7.1" + +[[projects]] + digest = "1:b657713432ed8677bfb1467dc3da20de35d25438286f5d4530cbff4ab0e82af1" + name = "github.com/containernetworking/plugins" + packages = ["pkg/ns"] + pruneopts = "UT" + revision = "ad10b6fa91aacd720f1f9ab94341a97a82a24965" + version = "v0.8.6" + +[[projects]] + digest = "1:6bf7f66890675dcbc7952f7afd044a2a46a8bd3143f837c46fd3ccd15da383d7" + name = "github.com/coreos/go-systemd" + packages = ["dbus"] + pruneopts = "UT" + revision = "d2196463941895ee908e13531a23a39feb9e1243" + version = "v15" + +[[projects]] + digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + digest = "1:440d49b5c97e79f6440ea72018b4e810d012f894af2704621a380db845c7cc80" + name = "github.com/deniswernert/udev" + packages = ["."] + pruneopts = "UT" + revision = "82fe5be8ca5f7d8255cc838723687f61bb175120" + +[[projects]] + digest = "1:90668ed6a20982a029b433e60c9fb5cf5e81ce9c7551170881cf435f73daf3ee" + name = "github.com/didip/tollbooth" + packages = [ + ".", + "errors", + "libstring", + "limiter", + ] + pruneopts = "UT" + revision = "b65680a2d5f624ed528d36f8406f31535555828a" + version = "v3.0.2" + +[[projects]] + branch = "master" + digest = "1:4ddc17aeaa82cb18c5f0a25d7c253a10682f518f4b2558a82869506eec223d76" + name = "github.com/docker/distribution" + packages = [ + "digestset", + "reference", + ] + pruneopts = "UT" + revision = "1cb4180b1a5b9c029b2f2beaeb38f0b5cf64e12e" + +[[projects]] + branch = "master" + digest = "1:6f44c7d931db4a08c1ddcba0962586b6821f3298b0745cbe63cff1985badef6b" + name = "github.com/docker/docker" + packages = [ + "api", + "api/types", + "api/types/blkiodev", + "api/types/container", + "api/types/events", + "api/types/filters", + "api/types/image", + "api/types/mount", + "api/types/network", + "api/types/registry", + "api/types/strslice", + "api/types/swarm", + "api/types/swarm/runtime", + "api/types/time", + "api/types/versions", + "api/types/volume", + "client", + "errdefs", + "pkg/ioutils", + "pkg/longpath", + "pkg/mount", + "pkg/plugins", + "pkg/plugins/transport", + "pkg/system", + ] + pruneopts = "UT" + revision = "785fe99bdb7c004acd57d5d65d57ceb256d87ef1" + +[[projects]] + digest = "1:b6b5c3e8da0fb8073cd2886ba249a40f4402b4391ca6eba905a142cceea97a12" + name = "github.com/docker/go-connections" + packages = [ + "nat", + "sockets", + "tlsconfig", + ] + pruneopts = "UT" + revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" + version = "v0.3.0" + +[[projects]] + digest = "1:57d39983d01980c1317c2c5c6dd4b5b0c4a804ad2df800f2f6cbcd6a6d05f6ca" + name = "github.com/docker/go-units" + packages = ["."] + pruneopts = "UT" + revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52" + version = "v0.3.2" + +[[projects]] + digest = "1:42b837a2202ea13bc306fadc76967c9fd670b878b2ee27d0eb36ceaf45f79a64" + name = "github.com/etcd-io/bbolt" + packages = ["."] + pruneopts = "UT" + revision = "232d8fc87f50244f9c808f4745759e08a304c029" + version = "v1.3.5" + +[[projects]] + digest = "1:57fa4c058c21ce25d0b7272518dd746065117abf6cc706158b0d361202024520" + name = "github.com/godbus/dbus" + packages = ["."] + pruneopts = "UT" + revision = "a389bdde4dd695d414e47b755e95e72b7826432c" + version = "v4.1.0" + +[[projects]] + digest = "1:bbadccf3d3317ea03c0dac0b45b673b4b397c8f91a1d2eff550a3c51c4ad770e" + name = "github.com/gogo/protobuf" + packages = ["proto"] + pruneopts = "UT" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" + +[[projects]] + digest = "1:f92fd6e98da2322ddd7f60b9fd647b11bb67c6ab01dfdfb59f8f5ded2e7522d6" + name = "github.com/golang/mock" + packages = [ + "gomock", + "mockgen/model", + ] + pruneopts = "UT" + revision = "9fa652df1129bef0e734c9cf9bf6dbae9ef3b9fa" + version = "1.3.1" + +[[projects]] + digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d" + name = "github.com/golang/protobuf" + packages = ["proto"] + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + digest = "1:c79fb010be38a59d657c48c6ba1d003a8aa651fa56b579d959d74573b7dff8e1" + name = "github.com/gorilla/context" + packages = ["."] + pruneopts = "UT" + revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42" + version = "v1.1.1" + +[[projects]] + digest = "1:e73f5b0152105f18bc131fba127d9949305c8693f8a762588a82a48f61756f5f" + name = "github.com/gorilla/mux" + packages = ["."] + pruneopts = "UT" + revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" + version = "v1.6.2" + +[[projects]] + digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e" + name = "github.com/gorilla/websocket" + packages = ["."] + pruneopts = "UT" + revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" + version = "v1.2.0" + +[[projects]] + digest = "1:bb81097a5b62634f3e9fec1014657855610c82d19b9a40c17612e32651e35dca" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "UT" + revision = "c2b33e84" + +[[projects]] + digest = "1:0a69a1c0db3591fcefb47f115b224592c8dfa4368b7ba9fae509d5e16cdc95c8" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "UT" + revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242" + version = "v1.0.1" + +[[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:ee4d4af67d93cc7644157882329023ce9a7bcfce956a079069a9405521c7cc8d" + name = "github.com/opencontainers/go-digest" + packages = ["."] + pruneopts = "UT" + revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" + version = "v1.0.0-rc1" + +[[projects]] + digest = "1:11db38d694c130c800d0aefb502fb02519e514dc53d9804ce51d1ad25ec27db6" + name = "github.com/opencontainers/image-spec" + packages = [ + "specs-go", + "specs-go/v1", + ] + pruneopts = "UT" + revision = "d60099175f88c47cd379c4738d158884749ed235" + version = "v1.0.1" + +[[projects]] + digest = "1:ad6438b92f22ce6e80fa202bc731deff9526ddf456c7d994071f060e1d49ced5" + name = "github.com/opencontainers/runtime-spec" + packages = ["specs-go"] + pruneopts = "UT" + revision = "d349388c43b01b2ea695965ae561b5bddb81318f" + +[[projects]] + digest = "1:808cdddf087fb64baeae67b8dfaee2069034d9704923a3cb8bd96a995421a625" + name = "github.com/patrickmn/go-cache" + packages = ["."] + pruneopts = "UT" + revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0" + version = "v2.1.0" + +[[projects]] + digest = "1:9072181164e616e422cbfbe48ca9ac249a4d76301ca0876c9f56b937cf214a2f" + name = "github.com/pborman/uuid" + packages = ["."] + pruneopts = "UT" + revision = "ca53cad383cad2479bbba7f7a1a05797ec1386e4" + +[[projects]] + digest = "1:0599213459e7ef9398ab20b26722ebeaeed4d9d85255005ddf0a5c47f8c02670" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "UT" + revision = "c605e284fe17294bda444b34710735b29d1a9d90" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:20a8a18488bdef471795af0413d9a3c9c35dc24ca93342329b884ba3c15cbaab" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "UT" + revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4" + version = "v0.9.0" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "UT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" + +[[projects]] + branch = "master" + digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6" + +[[projects]] + branch = "master" + digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "UT" + revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" + +[[projects]] + digest = "1:3f53e9e4dfbb664cd62940c9c4b65a2171c66acd0b7621a1a6b8e78513525a52" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "UT" + revision = "ad15b42461921f1fb3529b058c6786c6a45d5162" + version = "v1.1.1" + +[[projects]] + digest = "1:5110e3d4f130772fd39e6ce8208ad1955b242ccfcc8ad9d158857250579c82f4" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + "suite", + ] + pruneopts = "UT" + revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" + +[[projects]] + digest = "1:03fa51a73e38bed76bbd22fcb9a77711540a75a44dc69fe831c3f63988828601" + name = "github.com/vishvananda/netlink" + packages = [ + ".", + "nl", + ] + pruneopts = "UT" + revision = "fe3b5664d23a11b52ba59bece4ff29c52772a56b" + +[[projects]] + branch = "master" + digest = "1:02b1d1b48bf853ea2a9bea029ffe54e3fd7804903a0eda9a78191db8984300d2" + name = "github.com/vishvananda/netns" + packages = ["."] + pruneopts = "UT" + revision = "be1fbeda19366dea804f00efff2dd73a1642fdcc" + +[[projects]] + branch = "master" + digest = "1:5a7b6061560d920434c4b853af04fd1027215d80c8645bba360841edb29e672a" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "UT" + revision = "9f005a07e0d31d45e6656d241bb5c0f2efd4bc94" + +[[projects]] + branch = "master" + digest = "1:b765f093fdb6d062f72faf803670d32f3cec565d88637f951943f4d4ac2a6754" + name = "golang.org/x/net" + packages = [ + "context", + "html", + "html/atom", + "proxy", + ] + pruneopts = "UT" + revision = "a337091b0525af65de94df2eb7e98bd9962dcbe2" + +[[projects]] + branch = "master" + digest = "1:4957ab15714ed2262e294e194634a9e55fe0d03248bfa6e860a9b8222b7c4bb6" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + "windows/registry", + "windows/svc", + "windows/svc/eventlog", + ] + pruneopts = "UT" + revision = "bf42f188b9bc6f2cf5b8ee5a912ef1aedd0eba4c" + +[[projects]] + branch = "master" + digest = "1:51a479a09b7ed06b7be5a854e27fcc328718ae0e5ad159f9ddeef12d0326c2e7" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "UT" + revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7" + +[[projects]] + digest = "1:bc085afda453f671056c6946862d76ae53946ce2971df0f0a9bf511da6571382" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "imports", + ] + pruneopts = "UT" + revision = "bd4635fd25596cdd56c1fb399c53b351d1a81f2d" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/arn", + "github.com/aws/aws-sdk-go/aws/awserr", + "github.com/aws/aws-sdk-go/aws/awsutil", + "github.com/aws/aws-sdk-go/aws/client", + "github.com/aws/aws-sdk-go/aws/client/metadata", + "github.com/aws/aws-sdk-go/aws/credentials", + "github.com/aws/aws-sdk-go/aws/defaults", + "github.com/aws/aws-sdk-go/aws/ec2metadata", + "github.com/aws/aws-sdk-go/aws/endpoints", + "github.com/aws/aws-sdk-go/aws/request", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/aws/signer/v4", + "github.com/aws/aws-sdk-go/private/model/api", + "github.com/aws/aws-sdk-go/private/protocol", + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", + "github.com/aws/aws-sdk-go/private/util", + "github.com/aws/aws-sdk-go/service/cloudwatchlogs", + "github.com/aws/aws-sdk-go/service/ec2", + "github.com/aws/aws-sdk-go/service/fsx", + "github.com/aws/aws-sdk-go/service/s3", + "github.com/aws/aws-sdk-go/service/s3/s3manager", + "github.com/aws/aws-sdk-go/service/secretsmanager", + "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface", + "github.com/aws/aws-sdk-go/service/ssm", + "github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit", + "github.com/cihub/seelog", + "github.com/containerd/cgroups", + "github.com/containernetworking/cni/libcni", + "github.com/containernetworking/cni/pkg/types", + "github.com/containernetworking/cni/pkg/types/current", + "github.com/containernetworking/plugins/pkg/ns", + "github.com/deniswernert/udev", + "github.com/didip/tollbooth", + "github.com/docker/docker/api/types", + "github.com/docker/docker/api/types/container", + "github.com/docker/docker/api/types/events", + "github.com/docker/docker/api/types/filters", + "github.com/docker/docker/api/types/network", + "github.com/docker/docker/api/types/volume", + "github.com/docker/docker/client", + "github.com/docker/docker/pkg/plugins", + "github.com/docker/docker/pkg/system", + "github.com/docker/go-connections/nat", + "github.com/docker/go-units", + "github.com/etcd-io/bbolt", + "github.com/golang/mock/gomock", + "github.com/golang/mock/mockgen/model", + "github.com/gorilla/mux", + "github.com/gorilla/websocket", + "github.com/opencontainers/runtime-spec/specs-go", + "github.com/pborman/uuid", + "github.com/pkg/errors", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/prometheus/client_model/go", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require", + "github.com/stretchr/testify/suite", + "github.com/vishvananda/netlink", + "golang.org/x/net/context", + "golang.org/x/sys/windows", + "golang.org/x/sys/windows/registry", + "golang.org/x/sys/windows/svc", + "golang.org/x/sys/windows/svc/eventlog", + "golang.org/x/tools/imports", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/agent/Gopkg.toml b/agent/Gopkg.toml new file mode 100644 index 00000000000..112d6ea123c --- /dev/null +++ b/agent/Gopkg.toml @@ -0,0 +1,101 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +required = ["github.com/golang/mock/mockgen/model"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "github.com/aws/aws-sdk-go" + version = "v1.36.0" + +[[constraint]] + name = "github.com/cihub/seelog" + revision = "f561c5e57575bb1e0a2167028b7339b3a8d16fb4" + +[[constraint]] + name = "github.com/containerd/cgroups" + revision ="c3fc2b77b568af2406f3931cf3d3f17d76736886" + +[[constraint]] + name = "github.com/containernetworking/cni" + version = "0.7.0" + +[[constraint]] + name = "github.com/deniswernert/udev" + revision ="82fe5be8ca5f7d8255cc838723687f61bb175120" + +[[constraint]] + name = "github.com/docker/docker" + branch = "master" + +[[override]] + name = "github.com/docker/distribution" + branch = "master" + +[[constraint]] + name = "github.com/golang/mock" + version ="1.1.1" + +[[constraint]] + name = "github.com/gorilla/websocket" + version = "v1.2.0" + +[[constraint]] + name = "github.com/opencontainers/runtime-spec" + revision ="d349388c43b01b2ea695965ae561b5bddb81318f" + +[[constraint]] + name = "github.com/pborman/uuid" + revision ="ca53cad383cad2479bbba7f7a1a05797ec1386e4" + +[[constraint]] + name = "github.com/pkg/errors" + revision ="c605e284fe17294bda444b34710735b29d1a9d90" + +[[constraint]] + name = "github.com/stretchr/testify" + revision ="f35b8ab0b5a2cef36673838d662e249dd9c94686" + +[[constraint]] + name = "github.com/vishvananda/netlink" + revision ="fe3b5664d23a11b52ba59bece4ff29c52772a56b" +[[constraint]] + name = "github.com/didip/tollbooth" + version = "3.0.2" + +[[constraint]] + name = "github.com/gorilla/mux" + version = "1.6.2" + +[[constraint]] + name = "github.com/prometheus/client_golang" + version = "0.9.0" + +[[constraint]] + branch = "master" + name = "github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit" + +[[constraint]] + name = "github.com/containernetworking/plugins" + version = "0.8.6" diff --git a/agent/acs/client/acs_client.go b/agent/acs/client/acs_client.go new file mode 100644 index 00000000000..574eb5a557d --- /dev/null +++ b/agent/acs/client/acs_client.go @@ -0,0 +1,65 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package acsclient wraps the generated aws-sdk-go client to provide marshalling +// and unmarshalling of data over a websocket connection in the format expected +// by ACS. It allows for bidirectional communication and acts as both a +// client-and-server in terms of requests, but only as a client in terms of +// connecting. +package acsclient + +import ( + "errors" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/cihub/seelog" +) + +// clientServer implements ClientServer for acs. +type clientServer struct { + wsclient.ClientServerImpl +} + +// New returns a client/server to bidirectionally communicate with ACS +// The returned struct should have both 'Connect' and 'Serve' called upon it +// before being used. +func New(url string, cfg *config.Config, credentialProvider *credentials.Credentials, rwTimeout time.Duration) wsclient.ClientServer { + cs := &clientServer{} + cs.URL = url + cs.CredentialProvider = credentialProvider + cs.AgentConfig = cfg + cs.ServiceError = &acsError{} + cs.RequestHandlers = make(map[string]wsclient.RequestHandler) + cs.TypeDecoder = NewACSDecoder() + cs.RWTimeout = rwTimeout + return cs +} + +// Serve begins serving requests using previously registered handlers (see +// AddRequestHandler). All request handlers should be added prior to making this +// call as unhandled requests will be discarded. +func (cs *clientServer) Serve() error { + seelog.Debug("ACS client starting websocket poll loop") + if !cs.IsReady() { + return errors.New("acs client: websocket not ready for connections") + } + return cs.ConsumeMessages() +} + +// Close closes the underlying connection +func (cs *clientServer) Close() error { + return cs.Disconnect() +} diff --git a/agent/acs/client/acs_client_test.go b/agent/acs/client/acs_client_test.go new file mode 100644 index 00000000000..5fb4fe8e092 --- /dev/null +++ b/agent/acs/client/acs_client_test.go @@ -0,0 +1,483 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package acsclient + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + mock_wsconn "github.com/aws/amazon-ecs-agent/agent/wsclient/wsconn/mock" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/golang/mock/gomock" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" +) + +const ( + sampleCredentialsMessage = ` +{ + "type": "IAMRoleCredentialsMessage", + "message": { + "messageId": "123", + "clusterArn": "default", + "taskArn": "t1", + "roleCredentials": { + "credentialsId": "credsId", + "accessKeyId": "accessKeyId", + "expiration": "2016-03-25T06:17:19.318+0000", + "roleArn": "roleArn", + "secretAccessKey": "secretAccessKey", + "sessionToken": "token" + } + } +} +` + sampleAttachENIMessage = ` +{ + "type": "AttachTaskNetworkInterfacesMessage", + "message": { + "messageId": "123", + "clusterArn": "default", + "taskArn": "task", + "elasticNetworkInterfaces":[{ + "attachmentArn": "attach_arn", + "ec2Id": "eni_id", + "ipv4Addresses":[{ + "primary": true, + "privateAddress": "ipv4" + }], + "ipv6Addresses":[{ + "address": "ipv6" + }], + "macAddress": "mac" + }] + } +} +` + sampleAttachInstanceENIMessage = ` +{ + "type": "AttachInstanceNetworkInterfacesMessage", + "message": { + "messageId": "123", + "clusterArn": "default", + "elasticNetworkInterfaces":[{ + "attachmentArn": "attach_arn", + "ec2Id": "eni_id", + "ipv4Addresses":[{ + "primary": true, + "privateAddress": "ipv4" + }], + "ipv6Addresses":[{ + "address": "ipv6" + }], + "macAddress": "mac" + }] + } +} +` +) + +const ( + TestClusterArn = "arn:aws:ec2:123:container/cluster:123456" + TestInstanceArn = "arn:aws:ec2:123:container/containerInstance/12345678" + rwTimeout = time.Second +) + +var testCreds = credentials.NewStaticCredentials("test-id", "test-secret", "test-token") + +var testCfg = &config.Config{ + AcceptInsecureCert: true, + AWSRegion: "us-east-1", +} + +func TestMakeUnrecognizedRequest(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil) + conn.EXPECT().Close() + + cs := testCS(conn) + defer cs.Close() + // 'testing.T' should not be a known type ;) + err := cs.MakeRequest(t) + if _, ok := err.(*wsclient.UnrecognizedWSRequestType); !ok { + t.Fatal("Expected unrecognized request type") + } +} + +func TestWriteAckRequest(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil).Times(2) + conn.EXPECT().Close() + cs := testCS(conn) + defer cs.Close() + + // capture bytes written + var writes []byte + conn.EXPECT().WriteMessage(gomock.Any(), gomock.Any()).Do(func(_ int, data []byte) { + writes = data + }) + + // send request + err := cs.MakeRequest(&ecsacs.AckRequest{}) + assert.NoError(t, err) + + // unmarshal bytes written to the socket + msg := &wsclient.RequestMessage{} + err = json.Unmarshal(writes, msg) + assert.NoError(t, err) + assert.Equal(t, "AckRequest", msg.Type) +} + +func TestPayloadHandlerCalled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + // Messages should be read from the connection at least once + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().ReadMessage().Return(websocket.TextMessage, + []byte(`{"type":"PayloadMessage","message":{"tasks":[{"arn":"arn"}]}}`), + nil).MinTimes(1) + // Invoked when closing the connection + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil) + conn.EXPECT().Close() + cs := testCS(conn) + defer cs.Close() + + messageChannel := make(chan *ecsacs.PayloadMessage) + reqHandler := func(payload *ecsacs.PayloadMessage) { + messageChannel <- payload + } + cs.AddRequestHandler(reqHandler) + go cs.Serve() + + expectedMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{{ + Arn: aws.String("arn"), + }}, + } + + assert.Equal(t, expectedMessage, <-messageChannel) +} + +func TestRefreshCredentialsHandlerCalled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + // Messages should be read from the connection at least once + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().ReadMessage().Return(websocket.TextMessage, + []byte(sampleCredentialsMessage), nil).MinTimes(1) + // Invoked when closing the connection + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil) + conn.EXPECT().Close() + cs := testCS(conn) + defer cs.Close() + + messageChannel := make(chan *ecsacs.IAMRoleCredentialsMessage) + reqHandler := func(message *ecsacs.IAMRoleCredentialsMessage) { + messageChannel <- message + } + cs.AddRequestHandler(reqHandler) + + go cs.Serve() + + expectedMessage := &ecsacs.IAMRoleCredentialsMessage{ + MessageId: aws.String("123"), + TaskArn: aws.String("t1"), + RoleCredentials: &ecsacs.IAMRoleCredentials{ + CredentialsId: aws.String("credsId"), + AccessKeyId: aws.String("accessKeyId"), + Expiration: aws.String("2016-03-25T06:17:19.318+0000"), + RoleArn: aws.String("roleArn"), + SecretAccessKey: aws.String("secretAccessKey"), + SessionToken: aws.String("token"), + }, + } + assert.Equal(t, <-messageChannel, expectedMessage) +} + +func TestClosingConnection(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Returning EOF tells the ClientServer that the connection is closed + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil) + conn.EXPECT().ReadMessage().Return(0, nil, io.EOF) + // SetWriteDeadline will be invoked once for WriteMessage() and + // once for Close() + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil).Times(2) + conn.EXPECT().WriteMessage(gomock.Any(), gomock.Any()).Return(io.EOF) + conn.EXPECT().Close() + cs := testCS(conn) + defer cs.Close() + + serveErr := cs.Serve() + assert.Error(t, serveErr) + + err := cs.MakeRequest(&ecsacs.AckRequest{}) + assert.Error(t, err) +} + +func TestConnect(t *testing.T) { + closeWS := make(chan bool) + server, serverChan, requestChan, serverErr, err := startMockAcsServer(t, closeWS) + defer server.Close() + if err != nil { + t.Fatal(err) + } + go func() { + t.Fatal(<-serverErr) + }() + + cs := New(server.URL, testCfg, testCreds, rwTimeout) + // Wait for up to a second for the mock server to launch + for i := 0; i < 100; i++ { + err = cs.Connect() + if err == nil { + break + } + time.Sleep(10 * time.Millisecond) + } + if err != nil { + t.Fatal(err) + } + + errs := make(chan error) + cs.AddRequestHandler(func(msg *ecsacs.PayloadMessage) { + if *msg.MessageId != "messageId" || len(msg.Tasks) != 1 || *msg.Tasks[0].Arn != "arn1" { + errs <- errors.New("incorrect payloadMessage arguments") + } else { + errs <- nil + } + }) + + go func() { + _ = cs.Serve() + }() + + go func() { + serverChan <- `{"type":"PayloadMessage","message":{"tasks":[{"arn":"arn1","desiredStatus":"RUNNING","overrides":"{}","family":"test","version":"v1","containers":[{"name":"c1","image":"redis","command":["arg1","arg2"],"cpu":10,"memory":20,"links":["db"],"portMappings":[{"containerPort":22,"hostPort":22}],"essential":true,"entryPoint":["bash"],"environment":{"key":"val"},"overrides":"{}","desiredStatus":"RUNNING"}]}],"messageId":"messageId"}}` + "\n" + }() + // Error for handling a 'PayloadMessage' request + err = <-errs + if err != nil { + t.Fatal(err) + } + + mid := "messageId" + cluster := TestClusterArn + ci := TestInstanceArn + go func() { + cs.MakeRequest(&ecsacs.AckRequest{ + MessageId: &mid, + Cluster: &cluster, + ContainerInstance: &ci, + }) + }() + + request := <-requestChan + + // A request should have a 'type' and a 'message' + intermediate := struct { + Type string `json:"type"` + Message *ecsacs.AckRequest `json:"message"` + }{} + err = json.Unmarshal([]byte(request), &intermediate) + if err != nil { + t.Fatal(err) + } + if intermediate.Type != "AckRequest" || *intermediate.Message.MessageId != mid || *intermediate.Message.ContainerInstance != ci || *intermediate.Message.Cluster != cluster { + t.Fatal("Unexpected request") + } + closeWS <- true + close(serverChan) +} + +func TestConnectClientError(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(400) + w.Write([]byte(`{"InvalidClusterException":"Invalid cluster"}` + "\n")) + })) + defer testServer.Close() + + cs := New(testServer.URL, testCfg, testCreds, rwTimeout) + err := cs.Connect() + _, ok := err.(*wsclient.WSError) + assert.True(t, ok, "Connect error expected to be a WSError type") + assert.EqualError(t, err, "InvalidClusterException: Invalid cluster") +} + +func testCS(conn *mock_wsconn.MockWebsocketConn) wsclient.ClientServer { + foo := New("localhost:443", testCfg, testCreds, rwTimeout) + cs := foo.(*clientServer) + cs.SetConnection(conn) + return cs +} + +// TODO: replace with gomock +func startMockAcsServer(t *testing.T, closeWS <-chan bool) (*httptest.Server, chan<- string, <-chan string, <-chan error, error) { + serverChan := make(chan string) + requestsChan := make(chan string) + errChan := make(chan error) + + upgrader := websocket.Upgrader{ReadBufferSize: 1024, WriteBufferSize: 1024} + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ws, err := upgrader.Upgrade(w, r, nil) + go func() { + <-closeWS + ws.Close() + }() + if err != nil { + errChan <- err + } + go func() { + _, msg, err := ws.ReadMessage() + if err != nil { + errChan <- err + } else { + requestsChan <- string(msg) + } + }() + for str := range serverChan { + err := ws.WriteMessage(websocket.TextMessage, []byte(str)) + if err != nil { + errChan <- err + } + } + }) + + server := httptest.NewTLSServer(handler) + return server, serverChan, requestsChan, errChan, nil +} + +func TestAttachENIHandlerCalled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + cs := testCS(conn) + defer cs.Close() + + // Messages should be read from the connection at least once + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().ReadMessage().Return(websocket.TextMessage, + []byte(sampleAttachENIMessage), nil).MinTimes(1) + // Invoked when closing the connection + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil) + conn.EXPECT().Close() + + messageChannel := make(chan *ecsacs.AttachTaskNetworkInterfacesMessage) + reqHandler := func(message *ecsacs.AttachTaskNetworkInterfacesMessage) { + messageChannel <- message + } + + cs.AddRequestHandler(reqHandler) + + go cs.Serve() + + expectedMessage := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String("123"), + ClusterArn: aws.String("default"), + TaskArn: aws.String("task"), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + {AttachmentArn: aws.String("attach_arn"), + Ec2Id: aws.String("eni_id"), + Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{ + { + Primary: aws.Bool(true), + PrivateAddress: aws.String("ipv4"), + }, + }, + Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{ + { + Address: aws.String("ipv6"), + }, + }, + MacAddress: aws.String("mac"), + }, + }, + } + + assert.Equal(t, <-messageChannel, expectedMessage) +} + +func TestAttachInstanceENIHandlerCalled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + cs := testCS(conn) + defer cs.Close() + + // Messages should be read from the connection at least once + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().ReadMessage().Return(websocket.TextMessage, + []byte(sampleAttachInstanceENIMessage), nil).MinTimes(1) + // Invoked when closing the connection + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil) + conn.EXPECT().Close() + + messageChannel := make(chan *ecsacs.AttachInstanceNetworkInterfacesMessage) + reqHandler := func(message *ecsacs.AttachInstanceNetworkInterfacesMessage) { + messageChannel <- message + } + + cs.AddRequestHandler(reqHandler) + + go cs.Serve() + + expectedMessage := &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String("123"), + ClusterArn: aws.String("default"), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + {AttachmentArn: aws.String("attach_arn"), + Ec2Id: aws.String("eni_id"), + Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{ + { + Primary: aws.Bool(true), + PrivateAddress: aws.String("ipv4"), + }, + }, + Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{ + { + Address: aws.String("ipv6"), + }, + }, + MacAddress: aws.String("mac"), + }, + }, + } + + assert.Equal(t, <-messageChannel, expectedMessage) +} diff --git a/agent/acs/client/acs_client_types.go b/agent/acs/client/acs_client_types.go new file mode 100644 index 00000000000..046f3ce119c --- /dev/null +++ b/agent/acs/client/acs_client_types.go @@ -0,0 +1,58 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package acsclient + +import ( + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/wsclient" +) + +var acsRecognizedTypes []interface{} + +func init() { + // This list is currently *manually updated* and assumes that the generated + // struct type-names within the package *exactly match* the type sent by ACS/TCS + // (true so far; careful with inflections) + // TODO, this list should be autogenerated + // I couldn't figure out how to get a list of all structs in a package via + // reflection, but that would solve this. The alternative is to either parse + // the .json model or the generated struct names. + acsRecognizedTypes = []interface{}{ + ecsacs.HeartbeatMessage{}, + ecsacs.PayloadMessage{}, + ecsacs.CloseMessage{}, + ecsacs.AckRequest{}, + ecsacs.NackRequest{}, + ecsacs.PerformUpdateMessage{}, + ecsacs.StageUpdateMessage{}, + ecsacs.IAMRoleCredentialsMessage{}, + ecsacs.IAMRoleCredentialsAckRequest{}, + ecsacs.ServerException{}, + ecsacs.BadRequestException{}, + ecsacs.InvalidClusterException{}, + ecsacs.InvalidInstanceException{}, + ecsacs.AccessDeniedException{}, + ecsacs.InactiveInstanceException{}, + ecsacs.ErrorMessage{}, + ecsacs.AttachTaskNetworkInterfacesMessage{}, + ecsacs.AttachInstanceNetworkInterfacesMessage{}, + ecsacs.TaskManifestMessage{}, + ecsacs.TaskStopVerificationAck{}, + ecsacs.TaskStopVerificationMessage{}, + } +} + +func NewACSDecoder() wsclient.TypeDecoder { + return wsclient.BuildTypeDecoder(acsRecognizedTypes) +} diff --git a/agent/acs/client/acs_error.go b/agent/acs/client/acs_error.go new file mode 100644 index 00000000000..e16898284c6 --- /dev/null +++ b/agent/acs/client/acs_error.go @@ -0,0 +1,52 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package acsclient + +import ( + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/wsclient" +) + +const errType = "ACSError" + +// ACSUnretriableErrors wraps all the typed errors that ACS may return +type ACSUnretriableErrors struct{} + +// Get gets the list of unretriable error types. +func (err *ACSUnretriableErrors) Get() []interface{} { + return unretriableErrors +} + +// acsError implements wsclient.ServiceError interface. +type acsError struct{} + +// NewError returns an error corresponding to a typed error returned from +// ACS. It is expected that the passed in interface{} is really a struct which +// has a 'Message' field of type *string. In that case, the Message will be +// conveyed as part of the Error string as well as the type. It is safe to pass +// anything into this constructor and it will also work reasonably well with +// anything fulfilling the 'error' interface. +func (ae *acsError) NewError(err interface{}) *wsclient.WSError { + return &wsclient.WSError{ErrObj: err, Type: errType, WSUnretriableErrors: &ACSUnretriableErrors{}} +} + +// These errors are all fatal and there's nothing we can do about them. +// AccessDeniedException is actually potentially fixable because you can change +// credentials at runtime, but still close to unretriable. +var unretriableErrors = []interface{}{ + &ecsacs.InvalidInstanceException{}, + &ecsacs.InvalidClusterException{}, + &ecsacs.InactiveInstanceException{}, + &ecsacs.AccessDeniedException{}, +} diff --git a/agent/acs/client/acs_error_test.go b/agent/acs/client/acs_error_test.go new file mode 100644 index 00000000000..53a5baa5b51 --- /dev/null +++ b/agent/acs/client/acs_error_test.go @@ -0,0 +1,75 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package acsclient + +import ( + "errors" + "strings" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/stretchr/testify/require" +) + +var acsErr *acsError + +func init() { + acsErr = &acsError{} +} + +func TestInvalidInstanceException(t *testing.T) { + errMsg := "Invalid instance" + err := acsErr.NewError(&ecsacs.InvalidInstanceException{Message_: &errMsg}) + + require.False(t, err.Retry(), "Expected InvalidInstanceException to not be retriable") + require.EqualError(t, err, "InvalidInstanceException: "+errMsg) +} + +func TestInvalidClusterException(t *testing.T) { + errMsg := "Invalid cluster" + err := acsErr.NewError(&ecsacs.InvalidClusterException{Message_: &errMsg}) + + require.False(t, err.Retry(), "Expected to not be retriable") + require.EqualError(t, err, "InvalidClusterException: "+errMsg) +} + +func TestServerException(t *testing.T) { + err := acsErr.NewError(&ecsacs.ServerException{Message_: nil}) + + require.True(t, err.Retry(), "Server exceptions are retriable") + require.EqualError(t, err, "ServerException: null") +} + +func TestGenericErrorConversion(t *testing.T) { + err := acsErr.NewError(errors.New("generic error")) + + require.True(t, err.Retry(), "Should default to retriable") + require.EqualError(t, err, "ACSError: generic error") +} + +func TestSomeRandomTypeConversion(t *testing.T) { + // This is really just an 'it doesn't panic' check. + err := acsErr.NewError(t) + require.True(t, err.Retry(), "Should default to retriable") + require.True(t, strings.HasPrefix(err.Error(), "ACSError: Unknown error")) +} + +func TestBadlyTypedMessage(t *testing.T) { + // Another 'does not panic' check + err := acsErr.NewError(struct{ Message int }{1}) + require.True(t, err.Retry(), "Should default to retriable") + require.NotNil(t, err.Error()) +} diff --git a/agent/acs/handler/acs_handler.go b/agent/acs/handler/acs_handler.go new file mode 100644 index 00000000000..b3b7e18132d --- /dev/null +++ b/agent/acs/handler/acs_handler.go @@ -0,0 +1,507 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package handler deals with appropriately reacting to all ACS messages as well +// as maintaining the connection to ACS. +package handler + +import ( + "context" + "io" + "net/url" + "strconv" + "strings" + "time" + + acsclient "github.com/aws/amazon-ecs-agent/agent/acs/client" + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + updater "github.com/aws/amazon-ecs-agent/agent/acs/update_handler" + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/config" + rolecredentials "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/eventhandler" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + "github.com/aws/amazon-ecs-agent/agent/version" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/cihub/seelog" +) + +const ( + // heartbeatTimeout is the maximum time to wait between heartbeats + // without disconnecting + heartbeatTimeout = 1 * time.Minute + heartbeatJitter = 1 * time.Minute + // wsRWTimeout is the duration of read and write deadline for the + // websocket connection + wsRWTimeout = 2*heartbeatTimeout + heartbeatJitter + + inactiveInstanceReconnectDelay = 1 * time.Hour + + connectionBackoffMin = 250 * time.Millisecond + connectionBackoffMax = 2 * time.Minute + connectionBackoffJitter = 0.2 + connectionBackoffMultiplier = 1.5 + // payloadMessageBufferSize is the maximum number of payload messages + // to queue up without having handled previous ones. + payloadMessageBufferSize = 10 + // sendCredentialsURLParameterName is the name of the URL parameter + // in the ACS URL that is used to indicate if ACS should send + // credentials for all tasks on establishing the connection + sendCredentialsURLParameterName = "sendCredentials" + inactiveInstanceExceptionPrefix = "InactiveInstanceException:" +) + +// Session defines an interface for handler's long-lived connection with ACS. +type Session interface { + Start() error +} + +// session encapsulates all arguments needed by the handler to connect to ACS +// and to handle messages received by ACS. The Session.Start() method can be used +// to start processing messages from ACS. +type session struct { + containerInstanceARN string + credentialsProvider *credentials.Credentials + agentConfig *config.Config + deregisterInstanceEventStream *eventstream.EventStream + taskEngine engine.TaskEngine + ecsClient api.ECSClient + state dockerstate.TaskEngineState + dataClient data.Client + credentialsManager rolecredentials.Manager + taskHandler *eventhandler.TaskHandler + ctx context.Context + cancel context.CancelFunc + backoff retry.Backoff + resources sessionResources + latestSeqNumTaskManifest *int64 + _heartbeatTimeout time.Duration + _heartbeatJitter time.Duration + _inactiveInstanceReconnectDelay time.Duration +} + +// sessionResources defines the resource creator interface for starting +// a session with ACS. This interface is intended to define methods +// that create resources used to establish the connection to ACS +// It is confined to just the createACSClient() method for now. It can be +// extended to include the acsWsURL() and newDisconnectionTimer() methods +// when needed +// The goal is to make it easier to test and inject dependencies +type sessionResources interface { + // createACSClient creates a new websocket client + createACSClient(url string, cfg *config.Config) wsclient.ClientServer + sessionState +} + +// acsSessionResources implements resource creator and session state interfaces +// to create resources needed to connect to ACS and to record session state +// for the same +type acsSessionResources struct { + credentialsProvider *credentials.Credentials + // sendCredentials is used to set the 'sendCredentials' URL parameter + // used to connect to ACS + // It is set to 'true' for the very first successful connection on + // agent start. It is set to false for all successive connections + sendCredentials bool +} + +// sessionState defines state recorder interface for the +// session established with ACS. It can be used to record and +// retrieve data shared across multiple connections to ACS +type sessionState interface { + // connectedToACS callback indicates that the client has + // connected to ACS + connectedToACS() + // getSendCredentialsURLParameter retrieves the value for + // the 'sendCredentials' URL parameter + getSendCredentialsURLParameter() string +} + +// NewSession creates a new Session object +func NewSession(ctx context.Context, + config *config.Config, + deregisterInstanceEventStream *eventstream.EventStream, + containerInstanceArn string, + credentialsProvider *credentials.Credentials, + ecsClient api.ECSClient, + taskEngineState dockerstate.TaskEngineState, + dataClient data.Client, + taskEngine engine.TaskEngine, + credentialsManager rolecredentials.Manager, + taskHandler *eventhandler.TaskHandler, latestSeqNumTaskManifest *int64) Session { + resources := newSessionResources(credentialsProvider) + backoff := retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, + connectionBackoffJitter, connectionBackoffMultiplier) + derivedContext, cancel := context.WithCancel(ctx) + + return &session{ + agentConfig: config, + deregisterInstanceEventStream: deregisterInstanceEventStream, + containerInstanceARN: containerInstanceArn, + credentialsProvider: credentialsProvider, + ecsClient: ecsClient, + state: taskEngineState, + dataClient: dataClient, + taskEngine: taskEngine, + credentialsManager: credentialsManager, + taskHandler: taskHandler, + ctx: derivedContext, + cancel: cancel, + backoff: backoff, + resources: resources, + latestSeqNumTaskManifest: latestSeqNumTaskManifest, + _heartbeatTimeout: heartbeatTimeout, + _heartbeatJitter: heartbeatJitter, + _inactiveInstanceReconnectDelay: inactiveInstanceReconnectDelay, + } +} + +// Start starts the session. It'll forever keep trying to connect to ACS unless +// the context is cancelled. +// +// If the context is cancelled, Start() would return with the error code returned +// by the context. +// If the instance is deregistered, Start() would emit an event to the +// deregister-instance event stream and sets the connection backoff time to 1 hour. +func (acsSession *session) Start() error { + // connectToACS channel is used to indicate the intent to connect to ACS + // It's processed by the select loop to connect to ACS + connectToACS := make(chan struct{}) + // This is required to trigger the first connection to ACS. Subsequent + // connections are triggered by the handleACSError() method + go func() { + connectToACS <- struct{}{} + }() + for { + select { + case <-connectToACS: + seelog.Debugf("Received connect to ACS message") + // Start a session with ACS + acsError := acsSession.startSessionOnce() + select { + case <-acsSession.ctx.Done(): + // agent is shutting down, exiting cleanly + return nil + default: + } + // Session with ACS was stopped with some error, start processing the error + isInactiveInstance := isInactiveInstanceError(acsError) + if isInactiveInstance { + // If the instance was deregistered, send an event to the event stream + // for the same + seelog.Debug("Container instance is deregistered, notifying listeners") + err := acsSession.deregisterInstanceEventStream.WriteToEventStream(struct{}{}) + if err != nil { + seelog.Debugf("Failed to write to deregister container instance event stream, err: %v", err) + } + } + if shouldReconnectWithoutBackoff(acsError) { + // If ACS closed the connection, there's no need to backoff, + // reconnect immediately + seelog.Infof("ACS Websocket connection closed for a valid reason: %v", acsError) + acsSession.backoff.Reset() + sendEmptyMessageOnChannel(connectToACS) + } else { + // Disconnected unexpectedly from ACS, compute backoff duration to + // reconnect + reconnectDelay := acsSession.computeReconnectDelay(isInactiveInstance) + seelog.Infof("Reconnecting to ACS in: %s", reconnectDelay.String()) + waitComplete := acsSession.waitForDuration(reconnectDelay) + if waitComplete { + // If the context was not cancelled and we've waited for the + // wait duration without any errors, send the message to the channel + // to reconnect to ACS + seelog.Info("Done waiting; reconnecting to ACS") + sendEmptyMessageOnChannel(connectToACS) + } else { + // Wait was interrupted. We expect the session to close as canceling + // the session context is the only way to end up here. Print a message + // to indicate the same + seelog.Info("Interrupted waiting for reconnect delay to elapse; Expect session to close") + } + } + case <-acsSession.ctx.Done(): + // agent is shutting down, exiting cleanly + return nil + } + + } +} + +// startSessionOnce creates a session with ACS and handles requests using the passed +// in arguments +func (acsSession *session) startSessionOnce() error { + acsEndpoint, err := acsSession.ecsClient.DiscoverPollEndpoint(acsSession.containerInstanceARN) + if err != nil { + seelog.Errorf("acs: unable to discover poll endpoint, err: %v", err) + return err + } + + url := acsWsURL(acsEndpoint, acsSession.agentConfig.Cluster, acsSession.containerInstanceARN, acsSession.taskEngine, acsSession.resources) + client := acsSession.resources.createACSClient(url, acsSession.agentConfig) + defer client.Close() + + return acsSession.startACSSession(client) +} + +// startACSSession starts a session with ACS. It adds request handlers for various +// kinds of messages expected from ACS. It returns on server disconnection or when +// the context is cancelled +func (acsSession *session) startACSSession(client wsclient.ClientServer) error { + cfg := acsSession.agentConfig + + refreshCredsHandler := newRefreshCredentialsHandler(acsSession.ctx, cfg.Cluster, acsSession.containerInstanceARN, + client, acsSession.credentialsManager, acsSession.taskEngine) + defer refreshCredsHandler.clearAcks() + refreshCredsHandler.start() + defer refreshCredsHandler.stop() + + client.AddRequestHandler(refreshCredsHandler.handlerFunc()) + + // Add handler to ack task ENI attach message + eniAttachHandler := newAttachTaskENIHandler( + acsSession.ctx, + cfg.Cluster, + acsSession.containerInstanceARN, + client, + acsSession.state, + acsSession.dataClient, + ) + eniAttachHandler.start() + defer eniAttachHandler.stop() + + client.AddRequestHandler(eniAttachHandler.handlerFunc()) + + // Add handler to ack instance ENI attach message + instanceENIAttachHandler := newAttachInstanceENIHandler( + acsSession.ctx, + cfg.Cluster, + acsSession.containerInstanceARN, + client, + acsSession.state, + acsSession.dataClient, + ) + instanceENIAttachHandler.start() + defer instanceENIAttachHandler.stop() + + client.AddRequestHandler(instanceENIAttachHandler.handlerFunc()) + + // Add TaskManifestHandler + taskManifestHandler := newTaskManifestHandler(acsSession.ctx, cfg.Cluster, acsSession.containerInstanceARN, + client, acsSession.dataClient, acsSession.taskEngine, acsSession.latestSeqNumTaskManifest) + + defer taskManifestHandler.clearAcks() + taskManifestHandler.start() + defer taskManifestHandler.stop() + + client.AddRequestHandler(taskManifestHandler.handlerFuncTaskManifestMessage()) + client.AddRequestHandler(taskManifestHandler.handlerFuncTaskStopVerificationMessage()) + + // Add request handler for handling payload messages from ACS + payloadHandler := newPayloadRequestHandler( + acsSession.ctx, + acsSession.taskEngine, + acsSession.ecsClient, + cfg.Cluster, + acsSession.containerInstanceARN, + client, + acsSession.dataClient, + refreshCredsHandler, + acsSession.credentialsManager, + acsSession.taskHandler, acsSession.latestSeqNumTaskManifest) + // Clear the acks channel on return because acks of messageids don't have any value across sessions + defer payloadHandler.clearAcks() + payloadHandler.start() + defer payloadHandler.stop() + + client.AddRequestHandler(payloadHandler.handlerFunc()) + + // Ignore heartbeat messages; anyMessageHandler gets 'em + client.AddRequestHandler(func(*ecsacs.HeartbeatMessage) {}) + + updater.AddAgentUpdateHandlers(client, cfg, acsSession.state, acsSession.dataClient, acsSession.taskEngine) + + err := client.Connect() + if err != nil { + seelog.Errorf("Error connecting to ACS: %v", err) + return err + } + + seelog.Info("Connected to ACS endpoint") + // Start inactivity timer for closing the connection + timer := newDisconnectionTimer(client, acsSession.heartbeatTimeout(), acsSession.heartbeatJitter()) + // Any message from the server resets the disconnect timeout + client.SetAnyRequestHandler(anyMessageHandler(timer, client)) + defer timer.Stop() + + acsSession.resources.connectedToACS() + + backoffResetTimer := time.AfterFunc( + retry.AddJitter(acsSession.heartbeatTimeout(), acsSession.heartbeatJitter()), func() { + // If we do not have an error connecting and remain connected for at + // least 1 or so minutes, reset the backoff. This prevents disconnect + // errors that only happen infrequently from damaging the reconnect + // delay as significantly. + acsSession.backoff.Reset() + }) + defer backoffResetTimer.Stop() + + serveErr := make(chan error, 1) + go func() { + serveErr <- client.Serve() + }() + + for { + select { + case <-acsSession.ctx.Done(): + // Stop receiving and sending messages from and to ACS when + // the context received from the main function is canceled + seelog.Infof("ACS session exited cleanly.") + return acsSession.ctx.Err() + case err := <-serveErr: + // Stop receiving and sending messages from and to ACS when + // client.Serve returns an error. This can happen when the + // the connection is closed by ACS or the agent + if err == nil || err == io.EOF { + seelog.Info("ACS Websocket connection closed for a valid reason") + } else { + seelog.Errorf("Error: lost websocket connection with Agent Communication Service (ACS): %v", err) + } + return err + } + } +} + +func (acsSession *session) computeReconnectDelay(isInactiveInstance bool) time.Duration { + if isInactiveInstance { + return acsSession._inactiveInstanceReconnectDelay + } + + return acsSession.backoff.Duration() +} + +// waitForDuration waits for the specified duration of time. If the wait is interrupted, +// it returns a false value. Else, it returns true, indicating completion of wait time. +func (acsSession *session) waitForDuration(delay time.Duration) bool { + reconnectTimer := time.NewTimer(delay) + select { + case <-reconnectTimer.C: + return true + case <-acsSession.ctx.Done(): + reconnectTimer.Stop() + return false + } +} + +func (acsSession *session) heartbeatTimeout() time.Duration { + return acsSession._heartbeatTimeout +} + +func (acsSession *session) heartbeatJitter() time.Duration { + return acsSession._heartbeatJitter +} + +// createACSClient creates the ACS Client using the specified URL +func (acsResources *acsSessionResources) createACSClient(url string, cfg *config.Config) wsclient.ClientServer { + return acsclient.New(url, cfg, acsResources.credentialsProvider, wsRWTimeout) +} + +// connectedToACS records a successful connection to ACS +// It sets sendCredentials to false on such an event +func (acsResources *acsSessionResources) connectedToACS() { + acsResources.sendCredentials = false +} + +// getSendCredentialsURLParameter gets the value to be set for the +// 'sendCredentials' URL parameter +func (acsResources *acsSessionResources) getSendCredentialsURLParameter() string { + return strconv.FormatBool(acsResources.sendCredentials) +} + +func newSessionResources(credentialsProvider *credentials.Credentials) sessionResources { + return &acsSessionResources{ + credentialsProvider: credentialsProvider, + sendCredentials: true, + } +} + +// acsWsURL returns the websocket url for ACS given the endpoint +func acsWsURL(endpoint, cluster, containerInstanceArn string, taskEngine engine.TaskEngine, acsSessionState sessionState) string { + acsURL := endpoint + if endpoint[len(endpoint)-1] != '/' { + acsURL += "/" + } + acsURL += "ws" + query := url.Values{} + query.Set("clusterArn", cluster) + query.Set("containerInstanceArn", containerInstanceArn) + query.Set("agentHash", version.GitHashString()) + query.Set("agentVersion", version.Version) + query.Set("seqNum", "1") + if dockerVersion, err := taskEngine.Version(); err == nil { + query.Set("dockerVersion", "DockerVersion: "+dockerVersion) + } + query.Set(sendCredentialsURLParameterName, acsSessionState.getSendCredentialsURLParameter()) + return acsURL + "?" + query.Encode() +} + +// newDisconnectionTimer creates a new time object, with a callback to +// disconnect from ACS on inactivity +func newDisconnectionTimer(client wsclient.ClientServer, timeout time.Duration, jitter time.Duration) ttime.Timer { + timer := time.AfterFunc(retry.AddJitter(timeout, jitter), func() { + seelog.Warn("ACS Connection hasn't had any activity for too long; closing connection") + if err := client.Close(); err != nil { + seelog.Warnf("Error disconnecting: %v", err) + } + seelog.Info("Disconnected from ACS") + }) + + return timer +} + +// anyMessageHandler handles any server message. Any server message means the +// connection is active and thus the heartbeat disconnect should not occur +func anyMessageHandler(timer ttime.Timer, client wsclient.ClientServer) func(interface{}) { + return func(interface{}) { + seelog.Debug("ACS activity occurred") + // Reset read deadline as there's activity on the channel + if err := client.SetReadDeadline(time.Now().Add(wsRWTimeout)); err != nil { + seelog.Warnf("Unable to extend read deadline for ACS connection: %v", err) + } + + // Reset heartbeat timer + timer.Reset(retry.AddJitter(heartbeatTimeout, heartbeatJitter)) + } +} + +func shouldReconnectWithoutBackoff(acsError error) bool { + return acsError == nil || acsError == io.EOF +} + +func isInactiveInstanceError(acsError error) bool { + return acsError != nil && strings.HasPrefix(acsError.Error(), inactiveInstanceExceptionPrefix) +} + +// sendEmptyMessageOnChannel sends an empty message using a go-routine on the +// specified channel +func sendEmptyMessageOnChannel(channel chan<- struct{}) { + go func() { + channel <- struct{}{} + }() +} diff --git a/agent/acs/handler/acs_handler_test.go b/agent/acs/handler/acs_handler_test.go new file mode 100644 index 00000000000..9cad5a4d02b --- /dev/null +++ b/agent/acs/handler/acs_handler_test.go @@ -0,0 +1,1146 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package handler + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "runtime" + "runtime/pprof" + "sync" + "testing" + "time" + + "context" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + rolecredentials "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + "github.com/aws/amazon-ecs-agent/agent/eventhandler" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + mock_retry "github.com/aws/amazon-ecs-agent/agent/utils/retry/mock" + "github.com/aws/amazon-ecs-agent/agent/version" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + mock_wsclient "github.com/aws/amazon-ecs-agent/agent/wsclient/mock" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/gorilla/websocket" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/golang/mock/gomock" +) + +const ( + samplePayloadMessage = ` +{ + "type": "PayloadMessage", + "message": { + "messageId": "123", + "tasks": [ + { + "taskDefinitionAccountId": "123", + "containers": [ + { + "environment": {}, + "name": "name", + "cpu": 1, + "essential": true, + "memory": 1, + "portMappings": [], + "overrides": "{}", + "image": "i", + "mountPoints": [], + "volumesFrom": [] + } + ], + "elasticNetworkInterfaces":[{ + "attachmentArn": "eni_attach_arn", + "ec2Id": "eni_id", + "ipv4Addresses":[{ + "primary": true, + "privateAddress": "ipv4" + }], + "ipv6Addresses": [{ + "address": "ipv6" + }], + "subnetGatewayIpv4Address": "ipv4/20", + "macAddress": "mac" + }], + "roleCredentials": { + "credentialsId": "credsId", + "accessKeyId": "accessKeyId", + "expiration": "2016-03-25T06:17:19.318+0000", + "roleArn": "r1", + "secretAccessKey": "secretAccessKey", + "sessionToken": "token" + }, + "version": "3", + "volumes": [], + "family": "f", + "arn": "arn", + "desiredStatus": "RUNNING" + } + ], + "generatedAt": 1, + "clusterArn": "1", + "containerInstanceArn": "1", + "seqNum": 1 + } +} +` + sampleRefreshCredentialsMessage = ` +{ + "type": "IAMRoleCredentialsMessage", + "message": { + "messageId": "123", + "clusterArn": "default", + "taskArn": "t1", + "roleType": "TaskApplication", + "roleCredentials": { + "credentialsId": "credsId", + "accessKeyId": "newakid", + "expiration": "later", + "roleArn": "r1", + "secretAccessKey": "newskid", + "sessionToken": "newstkn" + } + } +} +` + acsURL = "http://endpoint.tld" +) + +var testConfig = &config.Config{ + Cluster: "someCluster", + AcceptInsecureCert: true, +} + +var testCreds = credentials.NewStaticCredentials("test-id", "test-secret", "test-token") + +type mockSessionResources struct { + client wsclient.ClientServer +} + +func (m *mockSessionResources) createACSClient(url string, cfg *config.Config) wsclient.ClientServer { + return m.client +} + +func (m *mockSessionResources) connectedToACS() { +} + +func (m *mockSessionResources) getSendCredentialsURLParameter() string { + return "true" +} + +// TestACSWSURL tests if the URL is constructed correctly when connecting to ACS +func TestACSWSURL(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + + taskEngine.EXPECT().Version().Return("Docker version result", nil) + + wsurl := acsWsURL(acsURL, "myCluster", "myContainerInstance", taskEngine, &mockSessionResources{}) + + parsed, err := url.Parse(wsurl) + assert.NoError(t, err, "should be able to parse URL") + assert.Equal(t, "/ws", parsed.Path, "wrong path") + assert.Equal(t, "myCluster", parsed.Query().Get("clusterArn"), "wrong cluster") + assert.Equal(t, "myContainerInstance", parsed.Query().Get("containerInstanceArn"), "wrong container instance") + assert.Equal(t, version.Version, parsed.Query().Get("agentVersion"), "wrong agent version") + assert.Equal(t, version.GitHashString(), parsed.Query().Get("agentHash"), "wrong agent hash") + assert.Equal(t, "DockerVersion: Docker version result", parsed.Query().Get("dockerVersion"), "wrong docker version") + assert.Equalf(t, "true", parsed.Query().Get(sendCredentialsURLParameterName), "Wrong value set for: %s", sendCredentialsURLParameterName) + assert.Equal(t, "1", parsed.Query().Get("seqNum"), "wrong seqNum") +} + +// TestHandlerReconnectsOnConnectErrors tests if handler reconnects retries +// to establish the session with ACS when ClientServer.Connect() returns errors +func TestHandlerReconnectsOnConnectErrors(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(acsURL, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Serve().AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + gomock.InOrder( + // Connect fails 10 times + mockWsClient.EXPECT().Connect().Return(io.EOF).Times(10), + // Cancel trying to connect to ACS on the 11th attempt + // Failure to retry on Connect() errors should cause the + // test to time out as the context is never cancelled + mockWsClient.EXPECT().Connect().Do(func() { + cancel() + }).Return(nil).MinTimes(1), + ) + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + ctx: ctx, + cancel: cancel, + resources: &mockSessionResources{mockWsClient}, + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + } + go func() { + acsSession.Start() + }() + + // Wait for context to be cancelled + select { + case <-ctx.Done(): + } +} + +// TestIsInactiveInstanceErrorReturnsTrueForInactiveInstance tests if the 'InactiveInstance' +// exception is identified correctly by the handler +func TestIsInactiveInstanceErrorReturnsTrueForInactiveInstance(t *testing.T) { + assert.True(t, isInactiveInstanceError(fmt.Errorf("InactiveInstanceException: ")), + "inactive instance exception message parsed incorrectly") +} + +// TestIsInactiveInstanceErrorReturnsFalseForActiveInstance tests if non 'InactiveInstance' +// exceptions are identified correctly by the handler +func TestIsInactiveInstanceErrorReturnsFalseForActiveInstance(t *testing.T) { + assert.False(t, isInactiveInstanceError(io.EOF), + "inactive instance exception message parsed incorrectly") +} + +// TestComputeReconnectDelayForInactiveInstance tests if the reconnect delay is computed +// correctly for an inactive instance +func TestComputeReconnectDelayForInactiveInstance(t *testing.T) { + acsSession := session{_inactiveInstanceReconnectDelay: inactiveInstanceReconnectDelay} + assert.Equal(t, inactiveInstanceReconnectDelay, acsSession.computeReconnectDelay(true), + "Reconnect delay doesn't match expected value for inactive instance") +} + +// TestComputeReconnectDelayForActiveInstance tests if the reconnect delay is computed +// correctly for an active instance +func TestComputeReconnectDelayForActiveInstance(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockBackoff := mock_retry.NewMockBackoff(ctrl) + mockBackoff.EXPECT().Duration().Return(connectionBackoffMax) + + acsSession := session{backoff: mockBackoff} + assert.Equal(t, connectionBackoffMax, acsSession.computeReconnectDelay(false), + "Reconnect delay doesn't match expected value for active instance") +} + +// TestWaitForDurationReturnsTrueWhenContextNotCancelled tests if the +// waitForDurationOrCancelledSession method behaves correctly when the session context +// is not cancelled +func TestWaitForDurationReturnsTrueWhenContextNotCancelled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + acsSession := session{ + ctx: ctx, + cancel: cancel, + } + + assert.True(t, acsSession.waitForDuration(time.Millisecond), + "WaitForDuration should return true when uninterrupted") +} + +// TestWaitForDurationReturnsFalseWhenContextCancelled tests if the +// waitForDurationOrCancelledSession method behaves correctly when the session contexnt +// is cancelled +func TestWaitForDurationReturnsFalseWhenContextCancelled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + acsSession := session{ + ctx: ctx, + cancel: cancel, + } + cancel() + + assert.False(t, acsSession.waitForDuration(time.Millisecond), + "WaitForDuration should return false when interrupted") +} + +func TestShouldReconnectWithoutBackoffReturnsTrueForEOF(t *testing.T) { + assert.True(t, shouldReconnectWithoutBackoff(io.EOF), + "Reconnect without backoff should return true when connection is closed") +} + +func TestShouldReconnectWithoutBackoffReturnsFalseForNonEOF(t *testing.T) { + assert.False(t, shouldReconnectWithoutBackoff(fmt.Errorf("not EOF")), + "Reconnect without backoff should return false for non io.EOF error") +} + +// TestHandlerReconnectsWithoutBackoffOnEOFError tests if the session handler reconnects +// to ACS without any delay when the connection is closed with the io.EOF error +func TestHandlerReconnectsWithoutBackoffOnEOFError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(acsURL, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + deregisterInstanceEventStream := eventstream.NewEventStream("DeregisterContainerInstance", ctx) + deregisterInstanceEventStream.StartListening() + + mockBackoff := mock_retry.NewMockBackoff(ctrl) + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + gomock.InOrder( + mockWsClient.EXPECT().Connect().Return(io.EOF), + // The backoff.Reset() method is expected to be invoked when the connection + // is closed with io.EOF + mockBackoff.EXPECT().Reset(), + mockWsClient.EXPECT().Connect().Do(func() { + // cancel the context on the 2nd connect attempt, which should stop + // the test + cancel() + }).Return(io.EOF), + mockBackoff.EXPECT().Reset().AnyTimes(), + ) + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + deregisterInstanceEventStream: deregisterInstanceEventStream, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + backoff: mockBackoff, + ctx: ctx, + cancel: cancel, + resources: &mockSessionResources{mockWsClient}, + latestSeqNumTaskManifest: aws.Int64(10), + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + _inactiveInstanceReconnectDelay: inactiveInstanceReconnectDelay, + } + go func() { + acsSession.Start() + }() + + // Wait for context to be cancelled + select { + case <-ctx.Done(): + } +} + +// TestHandlerReconnectsWithoutBackoffOnEOFError tests if the session handler reconnects +// to ACS after a backoff duration when the connection is closed with non io.EOF error +func TestHandlerReconnectsWithBackoffOnNonEOFError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(acsURL, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + deregisterInstanceEventStream := eventstream.NewEventStream("DeregisterContainerInstance", ctx) + deregisterInstanceEventStream.StartListening() + + mockBackoff := mock_retry.NewMockBackoff(ctrl) + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + gomock.InOrder( + mockWsClient.EXPECT().Connect().Return(fmt.Errorf("not EOF")), + // The backoff.Duration() method is expected to be invoked when + // the connection is closed with a non-EOF error code to compute + // the backoff. Also, no calls to backoff.Reset() are expected + // in this code path. + mockBackoff.EXPECT().Duration().Return(time.Millisecond), + mockWsClient.EXPECT().Connect().Do(func() { + cancel() + }).Return(io.EOF), + mockBackoff.EXPECT().Reset().AnyTimes(), + ) + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + deregisterInstanceEventStream: deregisterInstanceEventStream, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + backoff: mockBackoff, + ctx: ctx, + cancel: cancel, + resources: &mockSessionResources{mockWsClient}, + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + } + go func() { + acsSession.Start() + }() + + // Wait for context to be cancelled + select { + case <-ctx.Done(): + } +} + +// TestHandlerGeneratesDeregisteredInstanceEvent tests if the session handler generates +// an event into the deregister instance event stream when the acs connection is closed +// with inactive instance error +func TestHandlerGeneratesDeregisteredInstanceEvent(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(acsURL, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + deregisterInstanceEventStream := eventstream.NewEventStream("DeregisterContainerInstance", ctx) + + // receiverFunc cancels the context when invoked. Any event on the deregister + // instance even stream would trigger this. + receiverFunc := func(...interface{}) error { + cancel() + return nil + } + err := deregisterInstanceEventStream.Subscribe("DeregisterContainerInstance", receiverFunc) + assert.NoError(t, err, "Error adding deregister instance event stream subscriber") + deregisterInstanceEventStream.StartListening() + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + mockWsClient.EXPECT().Connect().Return(fmt.Errorf("InactiveInstanceException:")) + inactiveInstanceReconnectDelay := 200 * time.Millisecond + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + deregisterInstanceEventStream: deregisterInstanceEventStream, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + ctx: ctx, + cancel: cancel, + resources: &mockSessionResources{mockWsClient}, + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + _inactiveInstanceReconnectDelay: inactiveInstanceReconnectDelay, + } + go func() { + acsSession.Start() + }() + + // Wait for context to be cancelled + select { + case <-ctx.Done(): + } +} + +// TestHandlerReconnectDelayForInactiveInstanceError tests if the session handler applies +// the proper reconnect delay with ACS when ClientServer.Connect() returns the +// InstanceInactive error +func TestHandlerReconnectDelayForInactiveInstanceError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(acsURL, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + deregisterInstanceEventStream := eventstream.NewEventStream("DeregisterContainerInstance", ctx) + deregisterInstanceEventStream.StartListening() + + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + var firstConnectionAttemptTime time.Time + inactiveInstanceReconnectDelay := 200 * time.Millisecond + gomock.InOrder( + mockWsClient.EXPECT().Connect().Do(func() { + firstConnectionAttemptTime = time.Now() + }).Return(fmt.Errorf("InactiveInstanceException:")), + mockWsClient.EXPECT().Connect().Do(func() { + reconnectDelay := time.Now().Sub(firstConnectionAttemptTime) + reconnectDelayTime := time.Now() + t.Logf("Delay between successive connections: %v", reconnectDelay) + timeSubFuncSlopAllowed := 2 * time.Millisecond + if reconnectDelay < inactiveInstanceReconnectDelay { + // On windows platform, we found issue with time.Now().Sub(...) reporting 199.9989 even + // after the code has already waited for time.NewTimer(200)ms. + assert.WithinDuration(t, reconnectDelayTime, firstConnectionAttemptTime.Add(inactiveInstanceReconnectDelay), timeSubFuncSlopAllowed) + } + cancel() + }).Return(io.EOF), + ) + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + deregisterInstanceEventStream: deregisterInstanceEventStream, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + ctx: ctx, + cancel: cancel, + resources: &mockSessionResources{mockWsClient}, + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + _inactiveInstanceReconnectDelay: inactiveInstanceReconnectDelay, + } + go func() { + acsSession.Start() + }() + + // Wait for context to be cancelled + select { + case <-ctx.Done(): + } +} + +// TestHandlerReconnectsOnServeErrors tests if the handler retries to +// to establish the session with ACS when ClientServer.Connect() returns errors +func TestHandlerReconnectsOnServeErrors(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(acsURL, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Connect().Return(nil).AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + gomock.InOrder( + // Serve fails 10 times + mockWsClient.EXPECT().Serve().Return(io.EOF).Times(10), + // Cancel trying to Serve ACS requests on the 11th attempt + // Failure to retry on Serve() errors should cause the + // test to time out as the context is never cancelled + mockWsClient.EXPECT().Serve().Do(func() { + cancel() + }), + ) + + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + ctx: ctx, + cancel: cancel, + resources: &mockSessionResources{mockWsClient}, + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + } + go func() { + acsSession.Start() + }() + + // Wait for context to be cancelled + select { + case <-ctx.Done(): + } +} + +// TestHandlerStopsWhenContextIsCancelled tests if the session's Start() method returns +// when session context is cancelled +func TestHandlerStopsWhenContextIsCancelled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(acsURL, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Connect().Return(nil).AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + gomock.InOrder( + mockWsClient.EXPECT().Serve().Return(io.EOF), + mockWsClient.EXPECT().Serve().Do(func() { + cancel() + }).Return(errors.New("InactiveInstanceException")), + ) + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + ctx: ctx, + cancel: cancel, + resources: &mockSessionResources{mockWsClient}, + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + } + + // The session error channel would have an event when the Start() method returns + // Cancelling the context should trigger this + sessionError := make(chan error) + go func() { + sessionError <- acsSession.Start() + }() + <-sessionError +} + +// TestHandlerReconnectsOnDiscoverPollEndpointError tests if handler retries +// to establish the session with ACS on DiscoverPollEndpoint errors +func TestHandlerReconnectsOnDiscoverPollEndpointError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Serve().AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + mockWsClient.EXPECT().Connect().Do(func() { + // Serve() cancels the context + cancel() + }).Return(nil).MinTimes(1) + + gomock.InOrder( + // DiscoverPollEndpoint returns an error on its first invocation + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return("", fmt.Errorf("oops")).Times(1), + // Second invocation returns a success + ecsClient.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(acsURL, nil).Times(1), + ) + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + ctx: ctx, + cancel: cancel, + resources: &mockSessionResources{mockWsClient}, + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + } + go func() { + acsSession.Start() + }() + start := time.Now() + + // Wait for context to be cancelled + select { + case <-ctx.Done(): + } + + // Measure the duration between retries + timeSinceStart := time.Since(start) + if timeSinceStart < connectionBackoffMin { + t.Errorf("Duration since start is less than minimum threshold for backoff: %s", timeSinceStart.String()) + } + + // The upper limit here should really be connectionBackoffMin + (connectionBackoffMin * jitter) + // But, it can be off by a few milliseconds to account for execution of other instructions + // In any case, it should never be higher than 4*connectionBackoffMin + if timeSinceStart > 4*connectionBackoffMin { + t.Errorf("Duration since start is greater than maximum anticipated wait time: %v", timeSinceStart.String()) + } +} + +// TestConnectionIsClosedOnIdle tests if the connection to ACS is closed +// when the channel is idle +func TestConnectionIsClosedOnIdle(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + ecsClient := mock_api.NewMockECSClient(ctrl) + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + defer cancel() + + wait := sync.WaitGroup{} + wait.Add(1) + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).Do(func(v interface{}) {}).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).Do(func(v interface{}) {}).AnyTimes() + mockWsClient.EXPECT().Connect().Return(nil) + mockWsClient.EXPECT().Serve().Do(func() { + wait.Done() + // Pretend as if the maximum heartbeatTimeout duration has + // been breached while Serving requests + time.Sleep(30 * time.Millisecond) + }).Return(io.EOF) + + connectionClosed := make(chan bool) + mockWsClient.EXPECT().Close().Do(func() { + wait.Wait() + // Record connection closed + connectionClosed <- true + }).Return(nil) + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + ctx: context.Background(), + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + resources: &mockSessionResources{}, + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + } + go acsSession.startACSSession(mockWsClient) + + // Wait for connection to be closed. If the connection is not closed + // due to inactivity, the test will time out + <-connectionClosed +} + +func TestHandlerDoesntLeakGoroutines(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + closeWS := make(chan bool) + server, serverIn, requests, errs, err := startMockAcsServer(t, closeWS) + if err != nil { + t.Fatal(err) + } + go func() { + for { + select { + case <-requests: + case <-errs: + } + } + }() + + timesConnected := 0 + ecsClient.EXPECT().DiscoverPollEndpoint("myArn").Return(server.URL, nil).AnyTimes().Do(func(_ interface{}) { + timesConnected++ + }) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + taskEngine.EXPECT().AddTask(gomock.Any()).AnyTimes() + + ended := make(chan bool, 1) + go func() { + + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + ctx: ctx, + _heartbeatTimeout: 1 * time.Second, + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + resources: newSessionResources(testCreds), + credentialsManager: rolecredentials.NewManager(), + latestSeqNumTaskManifest: aws.Int64(12), + } + acsSession.Start() + ended <- true + }() + // Warm it up + serverIn <- `{"type":"HeartbeatMessage","message":{"healthy":true}}` + serverIn <- samplePayloadMessage + + beforeGoroutines := runtime.NumGoroutine() + for i := 0; i < 100; i++ { + serverIn <- `{"type":"HeartbeatMessage","message":{"healthy":true}}` + serverIn <- samplePayloadMessage + closeWS <- true + } + + cancel() + <-ended + + // The number of goroutines finishing in the MockACSServer will affect + // the result unless we wait here. + time.Sleep(10 * time.Millisecond) + afterGoroutines := runtime.NumGoroutine() + + t.Logf("Goroutines after 1 and after %v acs messages: %v and %v", timesConnected, beforeGoroutines, afterGoroutines) + + if timesConnected < 50 { + t.Fatal("Expected times connected to be a large number, was ", timesConnected) + } + if afterGoroutines > beforeGoroutines+5 { + t.Error("Goroutine leak, oh no!") + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + } + +} + +// TestStartSessionHandlesRefreshCredentialsMessages tests the agent restart +// scenario where the payload to refresh credentials is processed immediately on +// connection establishment with ACS +func TestStartSessionHandlesRefreshCredentialsMessages(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + closeWS := make(chan bool) + server, serverIn, requestsChan, errChan, err := startMockAcsServer(t, closeWS) + if err != nil { + t.Fatal(err) + } + defer close(serverIn) + + go func() { + for { + select { + case <-requestsChan: + // Cancel the context when we get the ack request + cancel() + } + } + }() + + // DiscoverPollEndpoint returns the URL for the server that we started + ecsClient.EXPECT().DiscoverPollEndpoint("myArn").Return(server.URL, nil).Times(1) + taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + + latestSeqNumberTaskManifest := int64(10) + ended := make(chan bool, 1) + go func() { + acsSession := NewSession(ctx, + testConfig, + nil, + "myArn", + testCreds, + ecsClient, + dockerstate.NewTaskEngineState(), + data.NewNoopClient(), + taskEngine, + credentialsManager, + taskHandler, &latestSeqNumberTaskManifest, + ) + acsSession.Start() + // StartSession should never return unless the context is canceled + ended <- true + }() + + updatedCredentials := rolecredentials.TaskIAMRoleCredentials{} + taskFromEngine := &apitask.Task{} + credentialsIdInRefreshMessage := "credsId" + // Ensure that credentials manager interface methods are invoked in the + // correct order, with expected arguments + gomock.InOrder( + // Return a task from the engine for GetTaskByArn + taskEngine.EXPECT().GetTaskByArn("t1").Return(taskFromEngine, true), + // The last invocation of SetCredentials is to update + // credentials when a refresh message is received by the handler + credentialsManager.EXPECT().SetTaskCredentials(gomock.Any()).Do(func(creds *rolecredentials.TaskIAMRoleCredentials) { + updatedCredentials = *creds + // Validate parsed credentials after the update + expectedCreds := rolecredentials.TaskIAMRoleCredentials{ + ARN: "t1", + IAMRoleCredentials: rolecredentials.IAMRoleCredentials{ + RoleArn: "r1", + AccessKeyID: "newakid", + SecretAccessKey: "newskid", + SessionToken: "newstkn", + Expiration: "later", + CredentialsID: credentialsIdInRefreshMessage, + RoleType: "TaskApplication", + }, + } + if !reflect.DeepEqual(updatedCredentials, expectedCreds) { + t.Errorf("Mismatch between expected and credentials expected: %v, added: %v", expectedCreds, updatedCredentials) + } + }).Return(nil), + ) + serverIn <- sampleRefreshCredentialsMessage + + select { + case err := <-errChan: + t.Fatal("Error should not have been returned from server", err) + case <-ctx.Done(): + // Context is canceled when requestsChan receives an ack + } + + // Validate that the correct credentialsId is set for the task + credentialsIdFromTask := taskFromEngine.GetCredentialsID() + if credentialsIdFromTask != credentialsIdInRefreshMessage { + t.Errorf("Mismatch between expected and added credentials id for task, expected: %s, added: %s", credentialsIdInRefreshMessage, credentialsIdFromTask) + } + + server.Close() + // Cancel context should close the session + <-ended +} + +// TestACSSessionResourcesCorrectlySetsSendCredentials tests if acsSessionResources +// struct correctly sets 'sendCredentials' +func TestACSSessionResourcesCorrectlySetsSendCredentials(t *testing.T) { + acsResources := newSessionResources(nil) + // Validate that 'sendCredentials' is set to true on create + sendCredentials := acsResources.getSendCredentialsURLParameter() + if sendCredentials != "true" { + t.Errorf("Mismatch in sendCredentials URL parameter value, expected: 'true', got: %s", sendCredentials) + } + // Simulate a successful connection to ACS + acsResources.connectedToACS() + // On successful connection to ACS, 'sendCredentials' should be set to false + sendCredentials = acsResources.getSendCredentialsURLParameter() + if sendCredentials != "false" { + t.Errorf("Mismatch in sendCredentials URL parameter value, expected: 'false', got: %s", sendCredentials) + } +} + +// TestHandlerReconnectsCorrectlySetsSendCredentialsURLParameter tests if +// the 'sendCredentials' URL parameter is set correctly for successive +// invocations of startACSSession +func TestHandlerReconnectsCorrectlySetsSendCredentialsURLParameter(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + + mockWsClient.EXPECT().SetAnyRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().AddRequestHandler(gomock.Any()).AnyTimes() + mockWsClient.EXPECT().Close().Return(nil).AnyTimes() + mockWsClient.EXPECT().Serve().Return(io.EOF).AnyTimes() + resources := newSessionResources(testCreds) + gomock.InOrder( + // When the websocket client connects to ACS for the first + // time, 'sendCredentials' should be set to true + mockWsClient.EXPECT().Connect().Do(func() { + validateSendCredentialsInSession(t, resources, "true") + }).Return(nil), + // For all subsequent connections to ACS, 'sendCredentials' + // should be set to false + mockWsClient.EXPECT().Connect().Do(func() { + validateSendCredentialsInSession(t, resources, "false") + }).Return(nil).AnyTimes(), + ) + + acsSession := session{ + containerInstanceARN: "myArn", + credentialsProvider: testCreds, + agentConfig: testConfig, + taskEngine: taskEngine, + ecsClient: ecsClient, + dataClient: data.NewNoopClient(), + taskHandler: taskHandler, + ctx: ctx, + resources: resources, + backoff: retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier), + _heartbeatTimeout: 20 * time.Millisecond, + _heartbeatJitter: 10 * time.Millisecond, + } + go func() { + for i := 0; i < 10; i++ { + acsSession.startACSSession(mockWsClient) + } + cancel() + }() + + // Wait for context to be cancelled + select { + case <-ctx.Done(): + } +} + +// TODO: replace with gomock +func startMockAcsServer(t *testing.T, closeWS <-chan bool) (*httptest.Server, chan<- string, <-chan string, <-chan error, error) { + serverChan := make(chan string, 1) + requestsChan := make(chan string, 1) + errChan := make(chan error, 1) + + upgrader := websocket.Upgrader{ReadBufferSize: 1024, WriteBufferSize: 1024} + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ws, err := upgrader.Upgrade(w, r, nil) + + if err != nil { + errChan <- err + } + + go func() { + _, msg, err := ws.ReadMessage() + if err != nil { + errChan <- err + } else { + requestsChan <- string(msg) + } + }() + for { + select { + case str := <-serverChan: + err := ws.WriteMessage(websocket.TextMessage, []byte(str)) + if err != nil { + errChan <- err + } + + case <-closeWS: + ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + ws.Close() + errChan <- io.EOF + // Quit listening to serverChan if we've been closed + return + } + + } + }) + + server := httptest.NewTLSServer(handler) + return server, serverChan, requestsChan, errChan, nil +} + +// validateAddedTask validates fields in addedTask for expected values +// It returns an error if there's a mismatch +func validateAddedTask(expectedTask apitask.Task, addedTask apitask.Task) error { + // The ecsacs.Task -> apitask.Task conversion initializes all fields in apitask.Task + // with empty objects. So, we create a new object to compare with only those + // fields that we are intrested in for comparison + taskToCompareFromAdded := apitask.Task{ + Arn: addedTask.Arn, + Family: addedTask.Family, + Version: addedTask.Version, + DesiredStatusUnsafe: addedTask.GetDesiredStatus(), + StartSequenceNumber: addedTask.StartSequenceNumber, + } + + if !reflect.DeepEqual(expectedTask, taskToCompareFromAdded) { + return fmt.Errorf("Mismatch between added and expected task: expected: %v, added: %v", expectedTask, taskToCompareFromAdded) + } + + return nil +} + +// validateAddedContainer validates fields in addedContainer for expected values +// It returns an error if there's a mismatch +func validateAddedContainer(expectedContainer *apicontainer.Container, addedContainer *apicontainer.Container) error { + // The ecsacs.Task -> apitask.Task conversion initializes all fields in apicontainer.Container + // with empty objects. So, we create a new object to compare with only those + // fields that we are intrested in for comparison + containerToCompareFromAdded := &apicontainer.Container{ + Name: addedContainer.Name, + CPU: addedContainer.CPU, + Essential: addedContainer.Essential, + Memory: addedContainer.Memory, + Image: addedContainer.Image, + } + if !reflect.DeepEqual(expectedContainer, containerToCompareFromAdded) { + return fmt.Errorf("Mismatch between added and expected container: expected: %v, added: %v", expectedContainer, containerToCompareFromAdded) + } + return nil +} + +func validateSendCredentialsInSession(t *testing.T, state sessionState, expected string) { + sendCredentials := state.getSendCredentialsURLParameter() + if sendCredentials != expected { + t.Errorf("Incorrect value set for sendCredentials, expected: %s, got: %s", expected, sendCredentials) + } +} diff --git a/agent/acs/handler/attach_eni_handler_common.go b/agent/acs/handler/attach_eni_handler_common.go new file mode 100644 index 00000000000..c41e05081fd --- /dev/null +++ b/agent/acs/handler/attach_eni_handler_common.go @@ -0,0 +1,133 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handler + +import ( + "fmt" + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws" + + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// ackTimeoutHandler remove ENI attachment from agent state after the ENI ack timeout +type ackTimeoutHandler struct { + mac string + state dockerstate.TaskEngineState + dataClient data.Client +} + +func (handler *ackTimeoutHandler) handle() { + eniAttachment, ok := handler.state.ENIByMac(handler.mac) + if !ok { + seelog.Warnf("Ignoring unmanaged ENI attachment mac=%s", handler.mac) + return + } + if !eniAttachment.IsSent() { + seelog.Warnf("Timed out waiting for ENI ack; removing ENI attachment record %s", eniAttachment.String()) + handler.removeENIAttachmentData(handler.mac) + handler.state.RemoveENIAttachment(handler.mac) + } +} + +func (handler *ackTimeoutHandler) removeENIAttachmentData(mac string) { + attachmentToRemove, ok := handler.state.ENIByMac(mac) + if !ok { + seelog.Errorf("Unable to retrieve ENI Attachment for mac address %s: ", mac) + return + } + attachmentId, err := utils.GetENIAttachmentId(attachmentToRemove.AttachmentARN) + if err != nil { + seelog.Errorf("Failed to get attachment id for %s: %v", attachmentToRemove.AttachmentARN, err) + } else { + err = handler.dataClient.DeleteENIAttachment(attachmentId) + if err != nil { + seelog.Errorf("Failed to remove data for eni attachment %s: %v", attachmentId, err) + } + } +} + +// sendAck sends ack for a certain ACS message +func sendAck(acsClient wsclient.ClientServer, clusterArn *string, containerInstanceArn *string, messageId *string) { + if err := acsClient.MakeRequest(&ecsacs.AckRequest{ + Cluster: clusterArn, + ContainerInstance: containerInstanceArn, + MessageId: messageId, + }); err != nil { + seelog.Warnf("Failed to ack request with messageId: %s, error: %v", aws.StringValue(messageId), err) + } +} + +// handleENIAttachment handles an ENI attachment via the following: +// 1. Check whether we already have this attachment in state, if so, start its ack timer and return +// 2. Otherwise add the attachment to state, start its ack timer, and save the state +// These are common tasks for handling a task ENI attachment and an instance ENI attachment, so they are put +// into this function to be shared by both attachment handlers +func handleENIAttachment(attachmentType, attachmentARN, taskARN, mac string, + expiresAt time.Time, + state dockerstate.TaskEngineState, + dataClient data.Client) error { + seelog.Infof("Handling ENI attachment: %s", attachmentARN) + + if eniAttachment, ok := state.ENIByMac(mac); ok { + seelog.Infof("Duplicate %s attachment message for ENI mac=%s taskARN=%s attachmentARN=%s", + attachmentType, mac, taskARN, attachmentARN) + eniAckTimeoutHandler := ackTimeoutHandler{mac: mac, state: state, dataClient: dataClient} + return eniAttachment.StartTimer(eniAckTimeoutHandler.handle) + } + if err := addENIAttachmentToState(attachmentType, attachmentARN, taskARN, mac, expiresAt, state, dataClient); err != nil { + return errors.Wrapf(err, fmt.Sprintf("attach %s message handler: unable to add eni attachment to engine state mac=%s taskARN=%s attachmentARN=%s", + attachmentType, mac, taskARN, attachmentARN)) + } + return nil +} + +// addENIAttachmentToState adds an ENI attachment to state, and start its ack timer +func addENIAttachmentToState(attachmentType, attachmentARN, taskARN, mac string, expiresAt time.Time, state dockerstate.TaskEngineState, dataClient data.Client) error { + eniAttachment := &apieni.ENIAttachment{ + TaskARN: taskARN, + AttachmentType: attachmentType, + AttachmentARN: attachmentARN, + AttachStatusSent: false, + MACAddress: mac, + ExpiresAt: expiresAt, // Stop tracking the eni attachment after timeout + } + eniAckTimeoutHandler := ackTimeoutHandler{mac: mac, state: state, dataClient: dataClient} + if err := eniAttachment.StartTimer(eniAckTimeoutHandler.handle); err != nil { + return err + } + + switch attachmentType { + case apieni.ENIAttachmentTypeTaskENI: + seelog.Infof("Adding task eni attachment info for task '%s' to state, attachment=%s mac=%s", + taskARN, attachmentARN, mac) + case apieni.ENIAttachmentTypeInstanceENI: + seelog.Infof("Adding instance eni attachment info to state, attachment=%s mac=%s", attachmentARN, mac) + default: + return fmt.Errorf("unrecognized eni attachment type: %s", attachmentType) + } + state.AddENIAttachment(eniAttachment) + if err := dataClient.SaveENIAttachment(eniAttachment); err != nil { + seelog.Errorf("Failed to save data for eni attachment %s: %v", eniAttachment.AttachmentARN, err) + } + return nil +} diff --git a/agent/acs/handler/attach_eni_handler_common_test.go b/agent/acs/handler/attach_eni_handler_common_test.go new file mode 100644 index 00000000000..036d698754f --- /dev/null +++ b/agent/acs/handler/attach_eni_handler_common_test.go @@ -0,0 +1,130 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handler + +import ( + "testing" + "time" + + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + attachmentArn = "arn:aws:ecs:us-west-2:1234567890:attachment/abc" +) + +// TestTaskENIAckTimeout tests acknowledge timeout for a task eni before submit the state change +func TestTaskENIAckTimeout(t *testing.T) { + testENIAckTimeout(t, apieni.ENIAttachmentTypeTaskENI) +} + +// TestInstanceENIAckTimeout tests acknowledge timeout for an instance level eni before submit the state change +func TestInstanceENIAckTimeout(t *testing.T) { + testENIAckTimeout(t, apieni.ENIAttachmentTypeInstanceENI) +} + +func testENIAckTimeout(t *testing.T, attachmentType string) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngineState := dockerstate.NewTaskEngineState() + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + expiresAt := time.Now().Add(time.Millisecond * waitTimeoutMillis) + err := addENIAttachmentToState(attachmentType, attachmentArn, taskArn, randomMAC, expiresAt, taskEngineState, dataClient) + assert.NoError(t, err) + assert.Len(t, taskEngineState.(*dockerstate.DockerTaskEngineState).AllENIAttachments(), 1) + res, err := dataClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, res, 1) + for { + time.Sleep(time.Millisecond * waitTimeoutMillis) + if len(taskEngineState.(*dockerstate.DockerTaskEngineState).AllENIAttachments()) == 0 { + res, err := dataClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, res, 0) + break + } + } +} + +// TestTaskENIAckWithinTimeout tests the eni state change was reported before the timeout, for a task eni +func TestTaskENIAckWithinTimeout(t *testing.T) { + testENIAckWithinTimeout(t, apieni.ENIAttachmentTypeTaskENI) +} + +// TestInstanceENIAckWithinTimeout tests the eni state change was reported before the timeout, for an instance eni +func TestInstanceENIAckWithinTimeout(t *testing.T) { + testENIAckWithinTimeout(t, apieni.ENIAttachmentTypeInstanceENI) +} + +func testENIAckWithinTimeout(t *testing.T, attachmentType string) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngineState := dockerstate.NewTaskEngineState() + dataClient := data.NewNoopClient() + expiresAt := time.Now().Add(time.Millisecond * waitTimeoutMillis) + err := addENIAttachmentToState(attachmentType, attachmentArn, taskArn, randomMAC, expiresAt, taskEngineState, dataClient) + assert.NoError(t, err) + assert.Len(t, taskEngineState.(*dockerstate.DockerTaskEngineState).AllENIAttachments(), 1) + eniAttachment, ok := taskEngineState.(*dockerstate.DockerTaskEngineState).ENIByMac(randomMAC) + assert.True(t, ok) + eniAttachment.SetSentStatus() + + time.Sleep(time.Millisecond * waitTimeoutMillis) + + assert.Len(t, taskEngineState.(*dockerstate.DockerTaskEngineState).AllENIAttachments(), 1) +} + +// TestHandleENIAttachmentTaskENI tests handling a new task eni +func TestHandleENIAttachmentTaskENI(t *testing.T) { + testHandleENIAttachment(t, apieni.ENIAttachmentTypeTaskENI, taskArn) +} + +// TestHandleENIAttachmentInstanceENI tests handling a new instance eni +func TestHandleENIAttachmentInstanceENI(t *testing.T) { + testHandleENIAttachment(t, apieni.ENIAttachmentTypeInstanceENI, "") +} + +func testHandleENIAttachment(t *testing.T, attachmentType, taskArn string) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + taskEngineState := dockerstate.NewTaskEngineState() + expiresAt := time.Now().Add(time.Millisecond * waitTimeoutMillis) + err := handleENIAttachment(attachmentType, attachmentArn, taskArn, randomMAC, expiresAt, taskEngineState, dataClient) + assert.NoError(t, err) + assert.Len(t, taskEngineState.(*dockerstate.DockerTaskEngineState).AllENIAttachments(), 1) + eniAttachment, ok := taskEngineState.(*dockerstate.DockerTaskEngineState).ENIByMac(randomMAC) + assert.True(t, ok) + eniAttachment.SetSentStatus() + + time.Sleep(time.Millisecond * waitTimeoutMillis) + + assert.Len(t, taskEngineState.(*dockerstate.DockerTaskEngineState).AllENIAttachments(), 1) + res, err := dataClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, res, 1) +} diff --git a/agent/acs/handler/attach_instance_eni_handler.go b/agent/acs/handler/attach_instance_eni_handler.go new file mode 100644 index 00000000000..a53cdbb774f --- /dev/null +++ b/agent/acs/handler/attach_instance_eni_handler.go @@ -0,0 +1,153 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handler + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws" + + "github.com/cihub/seelog" + "github.com/pkg/errors" + + "context" +) + +// attachInstanceENIHandler handles instance ENI attach operation for the ACS client +type attachInstanceENIHandler struct { + messageBuffer chan *ecsacs.AttachInstanceNetworkInterfacesMessage + ctx context.Context + cancel context.CancelFunc + cluster *string + containerInstance *string + acsClient wsclient.ClientServer + state dockerstate.TaskEngineState + dataClient data.Client +} + +// newAttachInstanceENIHandler returns an instance of the attachInstanceENIHandler struct +func newAttachInstanceENIHandler(ctx context.Context, + cluster string, + containerInstanceArn string, + acsClient wsclient.ClientServer, + taskEngineState dockerstate.TaskEngineState, + dataClient data.Client) attachInstanceENIHandler { + // Create a cancelable context from the parent context + derivedContext, cancel := context.WithCancel(ctx) + return attachInstanceENIHandler{ + messageBuffer: make(chan *ecsacs.AttachInstanceNetworkInterfacesMessage), + ctx: derivedContext, + cancel: cancel, + cluster: aws.String(cluster), + containerInstance: aws.String(containerInstanceArn), + acsClient: acsClient, + state: taskEngineState, + dataClient: dataClient, + } +} + +// handlerFunc returns a function to enqueue requests onto attachENIHandler buffer +func (handler *attachInstanceENIHandler) handlerFunc() func(message *ecsacs.AttachInstanceNetworkInterfacesMessage) { + return func(message *ecsacs.AttachInstanceNetworkInterfacesMessage) { + handler.messageBuffer <- message + } +} + +// start invokes handleMessages to ack each enqueued request +func (handler *attachInstanceENIHandler) start() { + go handler.handleMessages() +} + +// stop is used to invoke a cancellation function +func (handler *attachInstanceENIHandler) stop() { + handler.cancel() +} + +// handleMessages handles each message one at a time +func (handler *attachInstanceENIHandler) handleMessages() { + for { + select { + case <-handler.ctx.Done(): + return + case message := <-handler.messageBuffer: + if err := handler.handleSingleMessage(message); err != nil { + seelog.Warnf("Unable to handle instance ENI Attachment message [%s]: %v", message.String(), err) + } + } + } +} + +// handleSingleMessage acks the message received +func (handler *attachInstanceENIHandler) handleSingleMessage(message *ecsacs.AttachInstanceNetworkInterfacesMessage) error { + receivedAt := time.Now() + // Validate fields in the message + if err := validateAttachInstanceNetworkInterfacesMessage(message); err != nil { + return errors.Wrapf(err, + "attach instance eni message handler: error validating AttachInstanceNetworkInterfacesMessage") + } + + // Send ACK + go sendAck(handler.acsClient, message.ClusterArn, message.ContainerInstanceArn, message.MessageId) + + // Handle the attachment + attachmentARN := aws.StringValue(message.ElasticNetworkInterfaces[0].AttachmentArn) + mac := aws.StringValue(message.ElasticNetworkInterfaces[0].MacAddress) + expiresAt := receivedAt.Add(time.Duration(aws.Int64Value(message.WaitTimeoutMs)) * time.Millisecond) + return handleENIAttachment(apieni.ENIAttachmentTypeInstanceENI, attachmentARN, "", mac, expiresAt, handler.state, handler.dataClient) +} + +// validateAttachInstanceNetworkInterfacesMessage performs validation checks on the +// AttachInstanceNetworkInterfacesMessage +func validateAttachInstanceNetworkInterfacesMessage(message *ecsacs.AttachInstanceNetworkInterfacesMessage) error { + if message == nil { + return errors.Errorf("message is empty") + } + + messageId := aws.StringValue(message.MessageId) + if messageId == "" { + return errors.Errorf("message id not set") + } + + clusterArn := aws.StringValue(message.ClusterArn) + if clusterArn == "" { + return errors.Errorf("clusterArn not set") + } + + containerInstanceArn := aws.StringValue(message.ContainerInstanceArn) + if containerInstanceArn == "" { + return errors.Errorf("containerInstanceArn not set") + } + + enis := message.ElasticNetworkInterfaces + if len(enis) != 1 { + return errors.Errorf("incorrect number of instance ENIs in message: %d", len(enis)) + } + + eni := enis[0] + if aws.StringValue(eni.MacAddress) == "" { + return errors.Errorf("MACAddress not provided") + } + + timeout := aws.Int64Value(message.WaitTimeoutMs) + if timeout <= 0 { + return errors.Errorf("invalid timeout specified: %d", timeout) + + } + return nil +} diff --git a/agent/acs/handler/attach_instance_eni_handler_test.go b/agent/acs/handler/attach_instance_eni_handler_test.go new file mode 100644 index 00000000000..938d99826a7 --- /dev/null +++ b/agent/acs/handler/attach_instance_eni_handler_test.go @@ -0,0 +1,280 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handler + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_wsclient "github.com/aws/amazon-ecs-agent/agent/wsclient/mock" + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +// TestInvalidAttachInstanceENIMessage tests various invalid formats of AttachInstanceNetworkInterfacesMessage +func TestInvalidAttachInstanceENIMessage(t *testing.T) { + tcs := []struct { + message *ecsacs.AttachInstanceNetworkInterfacesMessage + description string + }{ + { + message: &ecsacs.AttachInstanceNetworkInterfacesMessage{ + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{}, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + }, + description: "Message without message id should be invalid", + }, + { + message: &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{}, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + }, + description: "Message without cluster arn should be invalid", + }, + { + message: &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{}, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + }, + description: "Message without container instance arn should be invalid", + }, + { + message: &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + }, + description: "Message without network interfaces should be invalid", + }, + { + message: &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + { + MacAddress: aws.String(randomMAC), + Ec2Id: aws.String("1"), + }, + { + MacAddress: aws.String(randomMAC), + Ec2Id: aws.String("2"), + }, + }, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + }, + description: "Message with multiple network interfaces should be invalid", + }, + { + message: &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + {}, + }, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + }, + description: "Message without network details should be invalid", + }, + { + message: &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + { + Ec2Id: aws.String("1"), + }, + }, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + }, + description: "Message with a network interface without macAddress should be invalid", + }, + { + message: &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + { + MacAddress: aws.String(randomMAC), + Ec2Id: aws.String("1"), + }, + }, + }, + description: "Message without wait timeout should be invalid", + }, + } + + for _, tc := range tcs { + t.Run(tc.description, func(t *testing.T) { + assert.Error(t, validateAttachInstanceNetworkInterfacesMessage(tc.message)) + }) + } +} + +// TestInstanceENIAckSingleMessage checks the ack for a single message +func TestInstanceENIAckSingleMessage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngineState := dockerstate.NewTaskEngineState() + dataClient := data.NewNoopClient() + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + handler := newAttachInstanceENIHandler(ctx, clusterName, containerInstanceArn, mockWSClient, taskEngineState, dataClient) + + var ackSent sync.WaitGroup + ackSent.Add(1) + mockWSClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + assert.Equal(t, aws.StringValue(ackRequest.MessageId), eniMessageId) + ackSent.Done() + }) + + go handler.start() + + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + MacAddress: aws.String(randomMAC), + AttachmentArn: aws.String(attachmentArn), + } + message := &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + handler.messageBuffer <- message + + ackSent.Wait() + handler.stop() +} + +// TestInstanceENIAckSingleMessageDuplicateENIAttachmentMessageStartsTimer checks the ack for a single message +// and ensures that the ENI ack expiration timer is started +func TestInstanceENIAckSingleMessageDuplicateENIAttachmentMessageStartsTimer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + dataClient := data.NewNoopClient() + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + handler := newAttachInstanceENIHandler(ctx, clusterName, containerInstanceArn, mockWSClient, mockState, dataClient) + + // To check that the timer is started, we set the expiresAt value of the attachment to be a value in the past + // to trigger an error in attachment.StartTimer and checks the error + expiresAt := time.Unix(time.Now().Unix()-1, 0) + var ackSent sync.WaitGroup + ackSent.Add(1) + mockWSClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + assert.Equal(t, aws.StringValue(ackRequest.MessageId), eniMessageId) + ackSent.Done() + }) + gomock.InOrder( + // Sending an attachment with ExpiresAt set in the past results in an + // error in starting the timer. + // Ensuring that statemanager.Save() is not invoked should be a strong + // enough check to ensure that the timer was started (since StartTimer would be + // the only place to return error) + mockState.EXPECT().ENIByMac(randomMAC).Return(&apieni.ENIAttachment{ExpiresAt: expiresAt}, true), + ) + + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + MacAddress: aws.String(randomMAC), + AttachmentArn: aws.String("attachmentarn"), + } + message := &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + // Expect an error starting the timer because of <=0 duration + err := handler.handleSingleMessage(message) + assert.Error(t, err) + ackSent.Wait() +} + +// TestInstanceENIAckHappyPath tests the happy path for a typical AttachInstanceNetworkInterfacesMessage +func TestInstanceENIAckHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx := context.TODO() + taskEngineState := dockerstate.NewTaskEngineState() + dataClient := data.NewNoopClient() + + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + handler := newAttachInstanceENIHandler(ctx, clusterName, containerInstanceArn, mockWSClient, taskEngineState, dataClient) + + var ackSent sync.WaitGroup + ackSent.Add(1) + mockWSClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + assert.Equal(t, aws.StringValue(ackRequest.MessageId), eniMessageId) + ackSent.Done() + handler.stop() + }) + + go handler.start() + + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + MacAddress: aws.String(randomMAC), + } + message := &ecsacs.AttachInstanceNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + handler.messageBuffer <- message + + ackSent.Wait() + select { + case <-handler.ctx.Done(): + } +} diff --git a/agent/acs/handler/attach_task_eni_handler.go b/agent/acs/handler/attach_task_eni_handler.go new file mode 100644 index 00000000000..5af793b5294 --- /dev/null +++ b/agent/acs/handler/attach_task_eni_handler.go @@ -0,0 +1,161 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handler + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws" + + "github.com/cihub/seelog" + "github.com/pkg/errors" + + "context" +) + +// attachTaskENIHandler handles task ENI attach operation for the ACS client +type attachTaskENIHandler struct { + messageBuffer chan *ecsacs.AttachTaskNetworkInterfacesMessage + ctx context.Context + cancel context.CancelFunc + cluster *string + containerInstance *string + acsClient wsclient.ClientServer + state dockerstate.TaskEngineState + dataClient data.Client +} + +// newAttachTaskENIHandler returns an instance of the attachENIHandler struct +func newAttachTaskENIHandler(ctx context.Context, + cluster string, + containerInstanceArn string, + acsClient wsclient.ClientServer, + taskEngineState dockerstate.TaskEngineState, + dataClient data.Client) attachTaskENIHandler { + + // Create a cancelable context from the parent context + derivedContext, cancel := context.WithCancel(ctx) + return attachTaskENIHandler{ + messageBuffer: make(chan *ecsacs.AttachTaskNetworkInterfacesMessage), + ctx: derivedContext, + cancel: cancel, + cluster: aws.String(cluster), + containerInstance: aws.String(containerInstanceArn), + acsClient: acsClient, + state: taskEngineState, + dataClient: dataClient, + } +} + +// handlerFunc returns a function to enqueue requests onto attachENIHandler buffer +func (attachTaskENIHandler *attachTaskENIHandler) handlerFunc() func(message *ecsacs.AttachTaskNetworkInterfacesMessage) { + return func(message *ecsacs.AttachTaskNetworkInterfacesMessage) { + attachTaskENIHandler.messageBuffer <- message + } +} + +// start invokes handleMessages to ack each enqueued request +func (attachTaskENIHandler *attachTaskENIHandler) start() { + go attachTaskENIHandler.handleMessages() +} + +// stop is used to invoke a cancellation function +func (attachTaskENIHandler *attachTaskENIHandler) stop() { + attachTaskENIHandler.cancel() +} + +// handleMessages handles each message one at a time +func (attachTaskENIHandler *attachTaskENIHandler) handleMessages() { + for { + select { + case <-attachTaskENIHandler.ctx.Done(): + return + case message := <-attachTaskENIHandler.messageBuffer: + if err := attachTaskENIHandler.handleSingleMessage(message); err != nil { + seelog.Warnf("Unable to handle ENI Attachment message [%s]: %v", message.String(), err) + } + } + } +} + +// handleSingleMessage acks the message received +func (attachTaskENIHandler *attachTaskENIHandler) handleSingleMessage(message *ecsacs.AttachTaskNetworkInterfacesMessage) error { + receivedAt := time.Now() + // Validate fields in the message + if err := validateAttachTaskNetworkInterfacesMessage(message); err != nil { + return errors.Wrapf(err, + "attach eni message handler: error validating AttachTaskNetworkInterface message received from ECS") + } + + // Send ACK + go sendAck(attachTaskENIHandler.acsClient, message.ClusterArn, message.ContainerInstanceArn, message.MessageId) + + // Handle the attachment + attachmentARN := aws.StringValue(message.ElasticNetworkInterfaces[0].AttachmentArn) + taskARN := aws.StringValue(message.TaskArn) + mac := aws.StringValue(message.ElasticNetworkInterfaces[0].MacAddress) + expiresAt := receivedAt.Add(time.Duration(aws.Int64Value(message.WaitTimeoutMs)) * time.Millisecond) + return handleENIAttachment(apieni.ENIAttachmentTypeTaskENI, attachmentARN, taskARN, mac, expiresAt, attachTaskENIHandler.state, attachTaskENIHandler.dataClient) +} + +// validateAttachTaskNetworkInterfacesMessage performs validation checks on the +// AttachTaskNetworkInterfacesMessage +func validateAttachTaskNetworkInterfacesMessage(message *ecsacs.AttachTaskNetworkInterfacesMessage) error { + if message == nil { + return errors.Errorf("attach eni handler validation: empty AttachTaskNetworkInterface message received from ECS") + } + + messageId := aws.StringValue(message.MessageId) + if messageId == "" { + return errors.Errorf("attach eni handler validation: message id not set in AttachTaskNetworkInterface message received from ECS") + } + + clusterArn := aws.StringValue(message.ClusterArn) + if clusterArn == "" { + return errors.Errorf("attach eni handler validation: clusterArn not set in AttachTaskNetworkInterface message received from ECS") + } + + containerInstanceArn := aws.StringValue(message.ContainerInstanceArn) + if containerInstanceArn == "" { + return errors.Errorf("attach eni handler validation: containerInstanceArn not set in AttachTaskNetworkInterface message received from ECS") + } + + enis := message.ElasticNetworkInterfaces + if len(enis) != 1 { + return errors.Errorf("attach eni handler validation: incorrect number of ENIs in AttachTaskNetworkInterface message received from ECS. Obtained %d", len(enis)) + } + + eni := enis[0] + if aws.StringValue(eni.MacAddress) == "" { + return errors.Errorf("attach eni handler validation: MACAddress not listed in AttachTaskNetworkInterface message received from ECS") + } + + taskArn := aws.StringValue(message.TaskArn) + if taskArn == "" { + return errors.Errorf("attach eni handler validation: taskArn not set in AttachTaskNetworkInterface message received from ECS") + } + + timeout := aws.Int64Value(message.WaitTimeoutMs) + if timeout <= 0 { + return errors.Errorf("attach eni handler validation: invalid timeout listed in AttachTaskNetworkInterface message received from ECS") + + } + + return nil +} diff --git a/agent/acs/handler/attach_task_eni_handler_test.go b/agent/acs/handler/attach_task_eni_handler_test.go new file mode 100644 index 00000000000..1626e333d6d --- /dev/null +++ b/agent/acs/handler/attach_task_eni_handler_test.go @@ -0,0 +1,348 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handler + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_wsclient "github.com/aws/amazon-ecs-agent/agent/wsclient/mock" + + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + eniMessageId = "123" + randomMAC = "00:0a:95:9d:68:16" + waitTimeoutMillis = 1000 +) + +// TestAttachENIMessageWithNoMessageId checks the validator against an +// AttachTaskNetworkInterfacesMessage without a messageId +func TestAttachENIMessageWithNoMessageId(t *testing.T) { + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{}, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TestAttachENIMessageWithNoClusterArn checks the validator against an +// AttachTaskNetworkInterfacesMessage without a ClusterArn +func TestAttachENIMessageWithNoClusterArn(t *testing.T) { + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{}, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TestAttachENIMessageWithNoContainerInstanceArn checks the validator against an +// AttachTaskNetworkInterfacesMessage without a ContainerInstanceArn +func TestAttachENIMessageWithNoContainerInstanceArn(t *testing.T) { + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{}, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TestAttachENIMessageWithNoInterfaces checks the validator against an +// AttachTaskNetworkInterfacesMessage without any interface +func TestAttachENIMessageWithNoInterfaces(t *testing.T) { + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TestAttachENIMessageWithMultipleInterfaceschecks checks the validator against an +// AttachTaskNetworkInterfacesMessage with multiple interfaces +func TestAttachENIMessageWithMultipleInterfaces(t *testing.T) { + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + MacAddress: aws.String(randomMAC), + Ec2Id: aws.String("1"), + } + mockNetInterface2 := ecsacs.ElasticNetworkInterface{ + MacAddress: aws.String(randomMAC), + Ec2Id: aws.String("2"), + } + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + &mockNetInterface2, + }, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TestAttachENIMessageWithMissingNetworkDetails checks the validator against an +// AttachTaskNetworkInterfacesMessage without network details +func TestAttachENIMessageWithMissingNetworkDetails(t *testing.T) { + mockNetInterface1 := ecsacs.ElasticNetworkInterface{} + + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TestAttachENIMessageWithMissingMACAddress checks the validator against an +// AttachTaskNetworkInterfacesMessage without a MAC address +func TestAttachENIMessageWithMissingMACAddress(t *testing.T) { + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + } + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TODO: +// * Add TaskArn + Timeout Tests + +// TestAttachENIMessageWithMissingTaskArn checks the validator against an +// AttachTaskNetworkInterfacesMessage without a MAC address +func TestAttachENIMessageWithMissingTaskArn(t *testing.T) { + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + MacAddress: aws.String(randomMAC), + } + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TestAttachENIMessageWithMissingTimeout checks the validator against an +// AttachTaskNetworkInterfacesMessage without a MAC address +func TestAttachENIMessageWithMissingTimeout(t *testing.T) { + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + } + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + TaskArn: aws.String(taskArn), + } + + err := validateAttachTaskNetworkInterfacesMessage(message) + assert.Error(t, err) +} + +// TestENIAckSingleMessage checks the ack for a single message +func TestENIAckSingleMessage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngineState := dockerstate.NewTaskEngineState() + dataClient := data.NewNoopClient() + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + eniAttachHandler := newAttachTaskENIHandler(ctx, clusterName, containerInstanceArn, mockWSClient, taskEngineState, dataClient) + + var ackSent sync.WaitGroup + ackSent.Add(1) + mockWSClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + assert.Equal(t, aws.StringValue(ackRequest.MessageId), eniMessageId) + ackSent.Done() + }) + + go eniAttachHandler.start() + + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + MacAddress: aws.String(randomMAC), + AttachmentArn: aws.String("attachmentarn"), + } + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + eniAttachHandler.messageBuffer <- message + ackSent.Wait() + eniAttachHandler.stop() +} + +// TestENIAckSingleMessageDuplicateENIAttachmentMessageStartsTimer checks the ack for a single message +// and ensures that the ENI ack expiration timer is started +func TestENIAckSingleMessageDuplicateENIAttachmentMessageStartsTimer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + dataClient := data.NewNoopClient() + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + eniAttachHandler := newAttachTaskENIHandler(ctx, clusterName, containerInstanceArn, mockWSClient, mockState, dataClient) + + // Set expiresAt to a value in the past + expiresAt := time.Unix(time.Now().Unix()-1, 0) + var ackSent sync.WaitGroup + ackSent.Add(1) + mockWSClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + assert.Equal(t, aws.StringValue(ackRequest.MessageId), eniMessageId) + ackSent.Done() + }) + gomock.InOrder( + // Sending an attachment with ExpiresAt set in the past results in an + // error in starting the timer. + // Ensuring that statemanager.Save() is not invoked should be a strong + // enough check to ensure that the timer was started + mockState.EXPECT().ENIByMac(randomMAC).Return(&apieni.ENIAttachment{ExpiresAt: expiresAt}, true), + ) + + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + MacAddress: aws.String(randomMAC), + AttachmentArn: aws.String("attachmentarn"), + } + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + // Expect an error starting the timer because of <=0 duration + err := eniAttachHandler.handleSingleMessage(message) + assert.Error(t, err) + ackSent.Wait() +} + +// TestENIAckHappyPath tests the happy path for a typical AttachTaskNetworkInterfacesMessage +func TestENIAckHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx := context.TODO() + taskEngineState := dockerstate.NewTaskEngineState() + dataClient := data.NewNoopClient() + + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + eniAttachHandler := newAttachTaskENIHandler(ctx, clusterName, containerInstanceArn, mockWSClient, taskEngineState, dataClient) + + var ackSent sync.WaitGroup + ackSent.Add(1) + mockWSClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + assert.Equal(t, aws.StringValue(ackRequest.MessageId), eniMessageId) + ackSent.Done() + eniAttachHandler.stop() + }) + + go eniAttachHandler.start() + + mockNetInterface1 := ecsacs.ElasticNetworkInterface{ + Ec2Id: aws.String("1"), + MacAddress: aws.String(randomMAC), + } + message := &ecsacs.AttachTaskNetworkInterfacesMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + &mockNetInterface1, + }, + TaskArn: aws.String(taskArn), + WaitTimeoutMs: aws.Int64(waitTimeoutMillis), + } + + eniAttachHandler.messageBuffer <- message + + ackSent.Wait() + select { + case <-eniAttachHandler.ctx.Done(): + } +} diff --git a/agent/acs/handler/error.go b/agent/acs/handler/error.go new file mode 100644 index 00000000000..0af9f7d9d12 --- /dev/null +++ b/agent/acs/handler/error.go @@ -0,0 +1,22 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handler + +type UnrecognizedTaskError struct { + err error +} + +func (err UnrecognizedTaskError) Error() string { + return "UnrecogniedTaskError: Error loading task - " + err.err.Error() +} diff --git a/agent/acs/handler/payload_handler.go b/agent/acs/handler/payload_handler.go new file mode 100644 index 00000000000..8d82bdabcf4 --- /dev/null +++ b/agent/acs/handler/payload_handler.go @@ -0,0 +1,384 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package handler + +import ( + "context" + "fmt" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/api" + apiappmesh "github.com/aws/amazon-ecs-agent/agent/api/appmesh" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/eventhandler" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" +) + +// payloadRequestHandler represents the payload operation for the ACS client +type payloadRequestHandler struct { + // messageBuffer is used to process PayloadMessages received from the server + messageBuffer chan *ecsacs.PayloadMessage + // ackRequest is used to send acks to the backend + ackRequest chan string + ctx context.Context + taskEngine engine.TaskEngine + ecsClient api.ECSClient + dataClient data.Client + taskHandler *eventhandler.TaskHandler + // cancel is used to stop go routines started by start() method + cancel context.CancelFunc + cluster string + containerInstanceArn string + acsClient wsclient.ClientServer + refreshHandler refreshCredentialsHandler + credentialsManager credentials.Manager + latestSeqNumberTaskManifest *int64 +} + +// newPayloadRequestHandler returns a new payloadRequestHandler object +func newPayloadRequestHandler( + ctx context.Context, + taskEngine engine.TaskEngine, + ecsClient api.ECSClient, + cluster string, + containerInstanceArn string, + acsClient wsclient.ClientServer, + dataClient data.Client, + refreshHandler refreshCredentialsHandler, + credentialsManager credentials.Manager, + taskHandler *eventhandler.TaskHandler, seqNumTaskManifest *int64) payloadRequestHandler { + // Create a cancelable context from the parent context + derivedContext, cancel := context.WithCancel(ctx) + return payloadRequestHandler{ + messageBuffer: make(chan *ecsacs.PayloadMessage, payloadMessageBufferSize), + ackRequest: make(chan string, payloadMessageBufferSize), + taskEngine: taskEngine, + ecsClient: ecsClient, + dataClient: dataClient, + taskHandler: taskHandler, + ctx: derivedContext, + cancel: cancel, + cluster: cluster, + containerInstanceArn: containerInstanceArn, + acsClient: acsClient, + refreshHandler: refreshHandler, + credentialsManager: credentialsManager, + latestSeqNumberTaskManifest: seqNumTaskManifest, + } +} + +// handlerFunc returns the request handler function for the ecsacs.PayloadMessage type +func (payloadHandler *payloadRequestHandler) handlerFunc() func(payload *ecsacs.PayloadMessage) { + // return a function that just enqueues PayloadMessages into the message buffer + return func(payload *ecsacs.PayloadMessage) { + payloadHandler.messageBuffer <- payload + } +} + +// start invokes go routines to: +// 1. handle messages in the payload message buffer +// 2. handle ack requests to be sent to ACS +func (payloadHandler *payloadRequestHandler) start() { + go payloadHandler.handleMessages() + go payloadHandler.sendAcks() +} + +// stop cancels the context being used by the payload handler. This is used +// to stop the go routines started by 'start()' +func (payloadHandler *payloadRequestHandler) stop() { + payloadHandler.cancel() +} + +// sendAcks sends ack requests to ACS +func (payloadHandler *payloadRequestHandler) sendAcks() { + for { + select { + case mid := <-payloadHandler.ackRequest: + payloadHandler.ackMessageId(mid) + case <-payloadHandler.ctx.Done(): + return + } + } +} + +// ackMessageId sends an AckRequest for a message id +func (payloadHandler *payloadRequestHandler) ackMessageId(messageID string) { + seelog.Debugf("Acking payload message id: %s", messageID) + err := payloadHandler.acsClient.MakeRequest(&ecsacs.AckRequest{ + Cluster: aws.String(payloadHandler.cluster), + ContainerInstance: aws.String(payloadHandler.containerInstanceArn), + MessageId: aws.String(messageID), + }) + if err != nil { + seelog.Warnf("Error 'ack'ing request with messageID: %s, error: %v", messageID, err) + } +} + +// handleMessages processes payload messages in the payload message buffer in-order +func (payloadHandler *payloadRequestHandler) handleMessages() { + for { + select { + case payload := <-payloadHandler.messageBuffer: + payloadHandler.handleSingleMessage(payload) + case <-payloadHandler.ctx.Done(): + return + } + } +} + +// handleSingleMessage processes a single payload message. It adds tasks in the message to the task engine +// An error is returned if the message was not handled correctly. The error is being used only for testing +// today. In the future, it could be used for doing more interesting things. +func (payloadHandler *payloadRequestHandler) handleSingleMessage(payload *ecsacs.PayloadMessage) error { + if aws.StringValue(payload.MessageId) == "" { + seelog.Criticalf("Received a payload with no message id") + return fmt.Errorf("received a payload with no message id") + } + seelog.Debugf("Received payload message, message id: %s", aws.StringValue(payload.MessageId)) + credentialsAcks, allTasksHandled := payloadHandler.addPayloadTasks(payload) + + // Update latestSeqNumberTaskManifest for it to get updated in state file + if payloadHandler.latestSeqNumberTaskManifest != nil && payload.SeqNum != nil && + *payloadHandler.latestSeqNumberTaskManifest < *payload.SeqNum { + + *payloadHandler.latestSeqNumberTaskManifest = *payload.SeqNum + } + + if !allTasksHandled { + return fmt.Errorf("did not handle all tasks") + } + + go func() { + // Throw the ack in async; it doesn't really matter all that much and this is blocking handling more tasks. + for _, credentialsAck := range credentialsAcks { + payloadHandler.refreshHandler.ackMessage(credentialsAck) + } + payloadHandler.ackRequest <- *payload.MessageId + }() + + return nil +} + +// addPayloadTasks does validation on each task and, for all valid ones, adds +// it to the task engine. It returns a bool indicating if it could add every +// task to the taskEngine and a slice of credential ack requests +func (payloadHandler *payloadRequestHandler) addPayloadTasks(payload *ecsacs.PayloadMessage) ([]*ecsacs.IAMRoleCredentialsAckRequest, bool) { + // verify that we were able to work with all tasks in this payload so we know whether to ack the whole thing or not + allTasksOK := true + + validTasks := make([]*apitask.Task, 0, len(payload.Tasks)) + for _, task := range payload.Tasks { + if task == nil { + seelog.Criticalf("Received nil task for messageId: %s", aws.StringValue(payload.MessageId)) + allTasksOK = false + continue + } + apiTask, err := apitask.TaskFromACS(task, payload) + if err != nil { + payloadHandler.handleUnrecognizedTask(task, err, payload) + allTasksOK = false + continue + } + + if task.RoleCredentials != nil { + // The payload from ACS for the task has credentials for the + // task. Add those to the credentials manager and set the + // credentials id for the task as well + taskIAMRoleCredentials := credentials.IAMRoleCredentialsFromACS(task.RoleCredentials, credentials.ApplicationRoleType) + err = payloadHandler.credentialsManager.SetTaskCredentials( + &(credentials.TaskIAMRoleCredentials{ + ARN: aws.StringValue(task.Arn), + IAMRoleCredentials: taskIAMRoleCredentials, + })) + if err != nil { + payloadHandler.handleUnrecognizedTask(task, err, payload) + allTasksOK = false + continue + } + apiTask.SetCredentialsID(taskIAMRoleCredentials.CredentialsID) + } + + // Add ENI information to the task struct. + for _, acsENI := range task.ElasticNetworkInterfaces { + eni, err := apieni.ENIFromACS(acsENI) + if err != nil { + payloadHandler.handleUnrecognizedTask(task, err, payload) + allTasksOK = false + continue + } + apiTask.AddTaskENI(eni) + } + + // Add the app mesh information to task struct + if task.ProxyConfiguration != nil { + appmesh, err := apiappmesh.AppMeshFromACS(task.ProxyConfiguration) + if err != nil { + payloadHandler.handleUnrecognizedTask(task, err, payload) + allTasksOK = false + continue + } + apiTask.SetAppMesh(appmesh) + } + + if task.ExecutionRoleCredentials != nil { + // The payload message contains execution credentials for the task. + // Add the credentials to the credentials manager and set the + // task executionCredentials id. + taskExecutionIAMRoleCredentials := credentials.IAMRoleCredentialsFromACS(task.ExecutionRoleCredentials, credentials.ExecutionRoleType) + err = payloadHandler.credentialsManager.SetTaskCredentials( + &(credentials.TaskIAMRoleCredentials{ + ARN: aws.StringValue(task.Arn), + IAMRoleCredentials: taskExecutionIAMRoleCredentials, + })) + if err != nil { + payloadHandler.handleUnrecognizedTask(task, err, payload) + allTasksOK = false + continue + } + apiTask.SetExecutionRoleCredentialsID(taskExecutionIAMRoleCredentials.CredentialsID) + } + + validTasks = append(validTasks, apiTask) + } + + // Add 'stop' transitions first to allow seqnum ordering to work out + // Because a 'start' sequence number should only be proceeded if all 'stop's + // of the same sequence number have completed, the 'start' events need to be + // added after the 'stop' events are there to block them. + stoppedTasksCredentialsAcks, stoppedTasksAddedOK := payloadHandler.addTasks(payload, validTasks, isTaskStatusNotStopped) + newTasksCredentialsAcks, newTasksAddedOK := payloadHandler.addTasks(payload, validTasks, isTaskStatusStopped) + if !stoppedTasksAddedOK || !newTasksAddedOK { + allTasksOK = false + } + + // Construct a slice with credentials acks from all tasks + credentialsAcks := append(stoppedTasksCredentialsAcks, newTasksCredentialsAcks...) + return credentialsAcks, allTasksOK +} + +// addTasks adds the tasks to the task engine based on the skipAddTask condition +// This is used to add non-stopped tasks before adding stopped tasks +func (payloadHandler *payloadRequestHandler) addTasks(payload *ecsacs.PayloadMessage, tasks []*apitask.Task, skipAddTask skipAddTaskComparatorFunc) ([]*ecsacs.IAMRoleCredentialsAckRequest, bool) { + allTasksOK := true + var credentialsAcks []*ecsacs.IAMRoleCredentialsAckRequest + for _, task := range tasks { + if skipAddTask(task.GetDesiredStatus()) { + continue + } + payloadHandler.taskEngine.AddTask(task) + // Only need to save task to DB when its desired status is RUNNING (i.e. this is a new task that we are going + // to manage). When its desired status is STOPPED, the task is already in the DB and the desired status change + // will be saved by task manager. + if task.GetDesiredStatus() == apitaskstatus.TaskRunning { + err := payloadHandler.dataClient.SaveTask(task) + if err != nil { + seelog.Errorf("Failed to save data for task %s: %v", task.Arn, err) + allTasksOK = false + } + } + + ackCredentials := func(id string, description string) { + ack, err := payloadHandler.ackCredentials(payload.MessageId, id) + if err != nil { + allTasksOK = false + seelog.Errorf("Failed to acknowledge %s credentials for task: %s, err: %v", description, task.String(), err) + return + } + credentialsAcks = append(credentialsAcks, ack) + } + + // Generate an ack request for the credentials in the task, if the + // task is associated with an IAM role or the execution role + taskCredentialsID := task.GetCredentialsID() + if taskCredentialsID != "" { + ackCredentials(taskCredentialsID, "task iam role") + } + + taskExecutionCredentialsID := task.GetExecutionCredentialsID() + if taskExecutionCredentialsID != "" { + ackCredentials(taskExecutionCredentialsID, "task execution role") + } + } + return credentialsAcks, allTasksOK +} + +func (payloadHandler *payloadRequestHandler) ackCredentials(messageID *string, credentialsID string) (*ecsacs.IAMRoleCredentialsAckRequest, error) { + creds, ok := payloadHandler.credentialsManager.GetTaskCredentials(credentialsID) + if !ok { + return nil, fmt.Errorf("credentials could not be retrieved") + } else { + return &ecsacs.IAMRoleCredentialsAckRequest{ + MessageId: messageID, + Expiration: aws.String(creds.IAMRoleCredentials.Expiration), + CredentialsId: aws.String(creds.IAMRoleCredentials.CredentialsID), + }, nil + } +} + +// skipAddTaskComparatorFunc defines the function pointer that accepts task status +// and returns the boolean comparison result +type skipAddTaskComparatorFunc func(apitaskstatus.TaskStatus) bool + +// isTaskStatusStopped returns true if the task status == STOPPED +func isTaskStatusStopped(status apitaskstatus.TaskStatus) bool { + return status == apitaskstatus.TaskStopped +} + +// isTaskStatusNotStopped returns true if the task status != STOPPED +func isTaskStatusNotStopped(status apitaskstatus.TaskStatus) bool { + return status != apitaskstatus.TaskStopped +} + +// handleUnrecognizedTask handles unrecognized tasks by sending 'stopped' with +// a suitable reason to the backend +func (payloadHandler *payloadRequestHandler) handleUnrecognizedTask(task *ecsacs.Task, err error, payload *ecsacs.PayloadMessage) { + seelog.Warnf("Received unexpected acs message, messageID: %s, task: %v, err: %v", + aws.StringValue(payload.MessageId), aws.StringValue(task.Arn), err) + + if aws.StringValue(task.Arn) == "" { + seelog.Criticalf("Received task with no arn, messageId: %s", aws.StringValue(payload.MessageId)) + return + } + + // Only need to stop the task; it brings down the containers too. + taskEvent := api.TaskStateChange{ + TaskARN: *task.Arn, + Status: apitaskstatus.TaskStopped, + Reason: UnrecognizedTaskError{err}.Error(), + // The real task cannot be extracted from payload message, so we send an empty task. + // This is necessary because the task handler will not send an event whose + // Task is nil. + Task: &apitask.Task{}, + } + + payloadHandler.taskHandler.AddStateChangeEvent(taskEvent, payloadHandler.ecsClient) +} + +// clearAcks drains the ack request channel +func (payloadHandler *payloadRequestHandler) clearAcks() { + for { + select { + case <-payloadHandler.ackRequest: + default: + return + } + } +} diff --git a/agent/acs/handler/payload_handler_test.go b/agent/acs/handler/payload_handler_test.go new file mode 100644 index 00000000000..31e1adcd3dc --- /dev/null +++ b/agent/acs/handler/payload_handler_test.go @@ -0,0 +1,1032 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package handler + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "reflect" + "sync" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/api/eni" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + "github.com/aws/amazon-ecs-agent/agent/eventhandler" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + mock_wsclient "github.com/aws/amazon-ecs-agent/agent/wsclient/mock" + + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + clusterName = "default" + containerInstanceArn = "instance" + payloadMessageId = "123" + testTaskARN = "arn:aws:ecs:us-west-2:1234567890:task/test-cluster/abc" +) + +// testHelper wraps all the object required for the test +type testHelper struct { + ctrl *gomock.Controller + payloadHandler payloadRequestHandler + mockTaskEngine *mock_engine.MockTaskEngine + ecsClient api.ECSClient + mockWsClient *mock_wsclient.MockClientServer + credentialsManager credentials.Manager + eventHandler *eventhandler.TaskHandler + ctx context.Context + cancel context.CancelFunc +} + +func setup(t *testing.T) *testHelper { + ctrl := gomock.NewController(t) + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + credentialsManager := credentials.NewManager() + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := eventhandler.NewTaskHandler(ctx, data.NewNoopClient(), nil, nil) + latestSeqNumberTaskManifest := int64(10) + + handler := newPayloadRequestHandler( + ctx, + taskEngine, + ecsClient, + clusterName, + containerInstanceArn, + mockWsClient, + data.NewNoopClient(), + refreshCredentialsHandler{}, + credentialsManager, + taskHandler, &latestSeqNumberTaskManifest) + + return &testHelper{ + ctrl: ctrl, + payloadHandler: handler, + mockTaskEngine: taskEngine, + ecsClient: ecsClient, + mockWsClient: mockWsClient, + credentialsManager: credentialsManager, + eventHandler: taskHandler, + ctx: ctx, + cancel: cancel, + } +} + +// TestHandlePayloadMessageWithNoMessageId tests that agent doesn't ack payload messages +// that do not contain message ids +func TestHandlePayloadMessageWithNoMessageId(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + // test adding a payload message without the MessageId field + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("t1"), + }, + }, + } + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + assert.Error(t, err, "Expected error while adding a task with no message id") + + // test adding a payload message with blank MessageId + payloadMessage.MessageId = aws.String("") + err = tester.payloadHandler.handleSingleMessage(payloadMessage) + assert.Error(t, err, "Expected error while adding a task with no message id") +} + +func TestHandlePayloadMessageSaveData(t *testing.T) { + testCases := []struct { + name string + taskDesiredStatus string + shouldSave bool + }{ + { + name: "task with desired status RUNNING is saved", + taskDesiredStatus: "RUNNING", + shouldSave: true, + }, + { + name: "task with desired status STOPPED is not saved", + taskDesiredStatus: "STOPPED", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + var ackRequested *ecsacs.AckRequest + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + ackRequested = ackRequest + tester.cancel() + }).Times(1) + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Times(1) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + tester.payloadHandler.dataClient = dataClient + + go tester.payloadHandler.start() + err := tester.payloadHandler.handleSingleMessage(&ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String(testTaskARN), + DesiredStatus: aws.String(tc.taskDesiredStatus), + }, + }, + MessageId: aws.String(payloadMessageId), + }) + assert.NoError(t, err) + + // Wait till we get an ack from the ackBuffer. + select { + case <-tester.ctx.Done(): + } + // Verify the message id acked + assert.Equal(t, aws.StringValue(ackRequested.MessageId), payloadMessageId, "received message is not expected") + + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + if tc.shouldSave { + assert.Len(t, tasks, 1) + } else { + assert.Len(t, tasks, 0) + } + }) + } +} + +// TestHandlePayloadMessageSaveDataError tests that agent does not ack payload messages +// when state saver fails to save task into db. +func TestHandlePayloadMessageSaveDataError(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + // Save added task in the addedTask variable + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task *apitask.Task) { + addedTask = task + }).Times(1) + + tester.payloadHandler.dataClient = dataClient + + // Check if handleSingleMessage returns an error when we get error saving task data. + err := tester.payloadHandler.handleSingleMessage(&ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("t1"), // Use an invalid task arn to trigger error on saving task. + DesiredStatus: aws.String("RUNNING"), + }, + }, + MessageId: aws.String(payloadMessageId), + }) + assert.Error(t, err, "Expected error while adding a task from statemanager") + + // We expect task to be added to the engine even though it couldn't be saved. + expectedTask := &apitask.Task{ + Arn: "t1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + + assert.Equal(t, addedTask, expectedTask, "added task is not expected") +} + +func newTestDataClient(t *testing.T) (data.Client, func()) { + testDir, err := ioutil.TempDir("", "agent_acs_handler_unit_test") + require.NoError(t, err) + + testClient, err := data.NewWithSetup(testDir) + + cleanup := func() { + require.NoError(t, testClient.Close()) + require.NoError(t, os.RemoveAll(testDir)) + } + return testClient, cleanup +} + +// TestHandlePayloadMessageAckedWhenTaskAdded tests if the handler generates an ack +// after processing a payload message. +func TestHandlePayloadMessageAckedWhenTaskAdded(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task *apitask.Task) { + addedTask = task + }).Times(1) + + var ackRequested *ecsacs.AckRequest + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + ackRequested = ackRequest + tester.cancel() + }).Times(1) + + go tester.payloadHandler.start() + + // Send a payload message + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("t1"), + }, + }, + MessageId: aws.String(payloadMessageId), + } + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + assert.NoError(t, err, "Error handling payload message") + + // Wait till we get an ack from the ackBuffer + select { + case <-tester.ctx.Done(): + } + // Verify the message id acked + assert.Equal(t, aws.StringValue(ackRequested.MessageId), payloadMessageId, "received message is not expected") + + // Verify if task added == expected task + expectedTask := &apitask.Task{ + Arn: "t1", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + assert.Equal(t, addedTask, expectedTask, "received task is not expected") +} + +// TestHandlePayloadMessageCredentialsAckedWhenTaskAdded tests if the handler generates +// an ack after processing a payload message when the payload message contains a task +// with an IAM Role. It also tests if the credentials ack is generated +func TestHandlePayloadMessageCredentialsAckedWhenTaskAdded(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task *apitask.Task) { + addedTask = task + }).Times(1) + + var payloadAckRequested *ecsacs.AckRequest + var taskCredentialsAckRequested *ecsacs.IAMRoleCredentialsAckRequest + // The payload message in the test consists of a task, with credentials set + // Record the credentials ack and the payload message ack + gomock.InOrder( + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.IAMRoleCredentialsAckRequest) { + taskCredentialsAckRequested = ackRequest + }), + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + payloadAckRequested = ackRequest + // Cancel the context when the ack for the payload message is received + // This signals a successful workflow in the test + tester.cancel() + }), + ) + + refreshCredsHandler := newRefreshCredentialsHandler(tester.ctx, clusterName, containerInstanceArn, tester.mockWsClient, tester.credentialsManager, tester.mockTaskEngine) + defer refreshCredsHandler.clearAcks() + refreshCredsHandler.start() + tester.payloadHandler.refreshHandler = refreshCredsHandler + + go tester.payloadHandler.start() + + taskArn := "t1" + credentialsExpiration := "expiration" + credentialsRoleArn := "r1" + credentialsAccessKey := "akid" + credentialsSecretKey := "skid" + credentialsSessionToken := "token" + credentialsId := "credsid" + + // Send a payload message + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String(taskArn), + RoleCredentials: &ecsacs.IAMRoleCredentials{ + AccessKeyId: aws.String(credentialsAccessKey), + Expiration: aws.String(credentialsExpiration), + RoleArn: aws.String(credentialsRoleArn), + SecretAccessKey: aws.String(credentialsSecretKey), + SessionToken: aws.String(credentialsSessionToken), + CredentialsId: aws.String(credentialsId), + }, + }, + }, + MessageId: aws.String(payloadMessageId), + ClusterArn: aws.String(cluster), + ContainerInstanceArn: aws.String(containerInstance), + } + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + assert.NoError(t, err, "error handling payload message") + + // Wait till we get an ack from the ackBuffer + select { + case <-tester.ctx.Done(): + } + + // Verify the message id acked + assert.Equal(t, aws.StringValue(payloadAckRequested.MessageId), payloadMessageId, "received message is not expected") + + // Verify the correctness of the task added to the engine and the + // credentials ack generated for it + expectedCredentialsAck := &ecsacs.IAMRoleCredentialsAckRequest{ + MessageId: aws.String(payloadMessageId), + Expiration: aws.String(credentialsExpiration), + CredentialsId: aws.String(credentialsId), + } + expectedCredentials := credentials.IAMRoleCredentials{ + AccessKeyID: credentialsAccessKey, + Expiration: credentialsExpiration, + RoleArn: credentialsRoleArn, + SecretAccessKey: credentialsSecretKey, + SessionToken: credentialsSessionToken, + CredentialsID: credentialsId, + } + err = validateTaskAndCredentials(taskCredentialsAckRequested, expectedCredentialsAck, addedTask, taskArn, expectedCredentials) + assert.NoError(t, err, "error validating added task or credentials ack for the same") +} + +// TestAddPayloadTaskAddsNonStoppedTasksAfterStoppedTasks tests if tasks with desired status +// 'RUNNING' are added after tasks with desired status 'STOPPED' +func TestAddPayloadTaskAddsNonStoppedTasksAfterStoppedTasks(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var tasksAddedToEngine []*apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task *apitask.Task) { + tasksAddedToEngine = append(tasksAddedToEngine, task) + }).Times(2) + + stoppedTaskArn := "stoppedTask" + runningTaskArn := "runningTask" + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String(runningTaskArn), + DesiredStatus: aws.String("RUNNING"), + }, + { + Arn: aws.String(stoppedTaskArn), + DesiredStatus: aws.String("STOPPED"), + }, + }, + MessageId: aws.String(payloadMessageId), + } + + _, ok := tester.payloadHandler.addPayloadTasks(payloadMessage) + assert.True(t, ok) + assert.Len(t, tasksAddedToEngine, 2) + + // Verify if stopped task is added before running task + firstTaskAdded := tasksAddedToEngine[0] + assert.Equal(t, firstTaskAdded.Arn, stoppedTaskArn) + assert.Equal(t, firstTaskAdded.GetDesiredStatus(), apitaskstatus.TaskStopped) + + secondTaskAdded := tasksAddedToEngine[1] + assert.Equal(t, secondTaskAdded.Arn, runningTaskArn) + assert.Equal(t, secondTaskAdded.GetDesiredStatus(), apitaskstatus.TaskRunning) +} + +// TestPayloadBufferHandler tests if the async payloadBufferHandler routine +// acks messages after adding tasks +func TestPayloadBufferHandler(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task *apitask.Task) { + addedTask = task + }).Times(1) + + var ackRequested *ecsacs.AckRequest + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + ackRequested = ackRequest + tester.cancel() + }).Times(1) + + go tester.payloadHandler.start() + // Send a payload message to the payloadBufferChannel + taskArn := "t1" + tester.payloadHandler.messageBuffer <- &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String(taskArn), + }, + }, + MessageId: aws.String(payloadMessageId), + } + + // Wait till we get an ack + select { + case <-tester.ctx.Done(): + } + // Verify if payloadMessageId read from the ack buffer is correct + assert.Equal(t, aws.StringValue(ackRequested.MessageId), payloadMessageId, "received task is not expected") + + // Verify if the task added to the engine is correct + expectedTask := &apitask.Task{ + Arn: taskArn, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + assert.Equal(t, addedTask, expectedTask, "received task is not expected") +} + +// TestPayloadBufferHandlerWithCredentials tests if the async payloadBufferHandler routine +// acks the payload message and credentials after adding tasks +func TestPayloadBufferHandlerWithCredentials(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + // The payload message in the test consists of two tasks, record both of them in + // the order in which they were added + var firstAddedTask *apitask.Task + var secondAddedTask *apitask.Task + gomock.InOrder( + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task *apitask.Task) { + firstAddedTask = task + }), + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task *apitask.Task) { + secondAddedTask = task + }), + ) + + // The payload message in the test consists of two tasks, with credentials set + // for both. Record the credentials' ack and the payload message ack + var payloadAckRequested *ecsacs.AckRequest + var firstTaskCredentialsAckRequested *ecsacs.IAMRoleCredentialsAckRequest + var secondTaskCredentialsAckRequested *ecsacs.IAMRoleCredentialsAckRequest + gomock.InOrder( + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.IAMRoleCredentialsAckRequest) { + firstTaskCredentialsAckRequested = ackRequest + }), + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.IAMRoleCredentialsAckRequest) { + secondTaskCredentialsAckRequested = ackRequest + }), + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + payloadAckRequested = ackRequest + // Cancel the context when the ack for the payload message is received + // This signals a successful workflow in the test + tester.cancel() + }), + ) + + refreshCredsHandler := newRefreshCredentialsHandler(tester.ctx, clusterName, containerInstanceArn, tester.mockWsClient, tester.credentialsManager, tester.mockTaskEngine) + defer refreshCredsHandler.clearAcks() + refreshCredsHandler.start() + tester.payloadHandler.refreshHandler = refreshCredsHandler + + go tester.payloadHandler.start() + + firstTaskArn := "t1" + firstTaskCredentialsExpiration := "expiration" + firstTaskCredentialsRoleArn := "r1" + firstTaskCredentialsAccessKey := "akid" + firstTaskCredentialsSecretKey := "skid" + firstTaskCredentialsSessionToken := "token" + firstTaskCredentialsId := "credsid1" + + secondTaskArn := "t2" + secondTaskCredentialsExpiration := "expirationSecond" + secondTaskCredentialsRoleArn := "r2" + secondTaskCredentialsAccessKey := "akid2" + secondTaskCredentialsSecretKey := "skid2" + secondTaskCredentialsSessionToken := "token2" + secondTaskCredentialsId := "credsid2" + + // Send a payload message to the payloadBufferChannel + tester.payloadHandler.messageBuffer <- &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String(firstTaskArn), + RoleCredentials: &ecsacs.IAMRoleCredentials{ + AccessKeyId: aws.String(firstTaskCredentialsAccessKey), + Expiration: aws.String(firstTaskCredentialsExpiration), + RoleArn: aws.String(firstTaskCredentialsRoleArn), + SecretAccessKey: aws.String(firstTaskCredentialsSecretKey), + SessionToken: aws.String(firstTaskCredentialsSessionToken), + CredentialsId: aws.String(firstTaskCredentialsId), + }, + }, + { + Arn: aws.String(secondTaskArn), + RoleCredentials: &ecsacs.IAMRoleCredentials{ + AccessKeyId: aws.String(secondTaskCredentialsAccessKey), + Expiration: aws.String(secondTaskCredentialsExpiration), + RoleArn: aws.String(secondTaskCredentialsRoleArn), + SecretAccessKey: aws.String(secondTaskCredentialsSecretKey), + SessionToken: aws.String(secondTaskCredentialsSessionToken), + CredentialsId: aws.String(secondTaskCredentialsId), + }, + }, + }, + MessageId: aws.String(payloadMessageId), + ClusterArn: aws.String(cluster), + ContainerInstanceArn: aws.String(containerInstance), + } + + // Wait till we get an ack + select { + case <-tester.ctx.Done(): + } + + // Verify if payloadMessageId read from the ack buffer is correct + assert.Equal(t, aws.StringValue(payloadAckRequested.MessageId), payloadMessageId, "received message is not expected") + + // Verify the correctness of the first task added to the engine and the + // credentials ack generated for it + expectedCredentialsAckForFirstTask := &ecsacs.IAMRoleCredentialsAckRequest{ + MessageId: aws.String(payloadMessageId), + Expiration: aws.String(firstTaskCredentialsExpiration), + CredentialsId: aws.String(firstTaskCredentialsId), + } + expectedCredentialsForFirstTask := credentials.IAMRoleCredentials{ + AccessKeyID: firstTaskCredentialsAccessKey, + Expiration: firstTaskCredentialsExpiration, + RoleArn: firstTaskCredentialsRoleArn, + SecretAccessKey: firstTaskCredentialsSecretKey, + SessionToken: firstTaskCredentialsSessionToken, + CredentialsID: firstTaskCredentialsId, + } + err := validateTaskAndCredentials(firstTaskCredentialsAckRequested, expectedCredentialsAckForFirstTask, firstAddedTask, firstTaskArn, expectedCredentialsForFirstTask) + assert.NoError(t, err, "error validating added task or credentials ack for the same") + + // Verify the correctness of the second task added to the engine and the + // credentials ack generated for it + expectedCredentialsAckForSecondTask := &ecsacs.IAMRoleCredentialsAckRequest{ + MessageId: aws.String(payloadMessageId), + Expiration: aws.String(secondTaskCredentialsExpiration), + CredentialsId: aws.String(secondTaskCredentialsId), + } + expectedCredentialsForSecondTask := credentials.IAMRoleCredentials{ + AccessKeyID: secondTaskCredentialsAccessKey, + Expiration: secondTaskCredentialsExpiration, + RoleArn: secondTaskCredentialsRoleArn, + SecretAccessKey: secondTaskCredentialsSecretKey, + SessionToken: secondTaskCredentialsSessionToken, + CredentialsID: secondTaskCredentialsId, + } + err = validateTaskAndCredentials(secondTaskCredentialsAckRequested, expectedCredentialsAckForSecondTask, secondAddedTask, secondTaskArn, expectedCredentialsForSecondTask) + assert.NoError(t, err, "error validating added task or credentials ack for the same") +} + +// TestAddPayloadTaskAddsExecutionRoles tests the payload handler will add +// execution role credentials to the credentials manager and the field in +// task and container are appropriately set +func TestAddPayloadTaskAddsExecutionRoles(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task *apitask.Task) { + addedTask = task + }) + + var ackRequested *ecsacs.AckRequest + var executionCredentialsAckRequested *ecsacs.IAMRoleCredentialsAckRequest + gomock.InOrder( + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.IAMRoleCredentialsAckRequest) { + executionCredentialsAckRequested = ackRequest + }), + tester.mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.AckRequest) { + ackRequested = ackRequest + tester.cancel() + }), + ) + refreshCredsHandler := newRefreshCredentialsHandler(tester.ctx, clusterName, containerInstanceArn, tester.mockWsClient, tester.credentialsManager, tester.mockTaskEngine) + defer refreshCredsHandler.clearAcks() + refreshCredsHandler.start() + + tester.payloadHandler.refreshHandler = refreshCredsHandler + go tester.payloadHandler.start() + taskArn := "t1" + credentialsExpiration := "expiration" + credentialsRoleArn := "r1" + credentialsAccessKey := "akid" + credentialsSecretKey := "skid" + credentialsSessionToken := "token" + credentialsID := "credsid" + + tester.payloadHandler.messageBuffer <- &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String(taskArn), + ExecutionRoleCredentials: &ecsacs.IAMRoleCredentials{ + AccessKeyId: aws.String(credentialsAccessKey), + Expiration: aws.String(credentialsExpiration), + RoleArn: aws.String(credentialsRoleArn), + SecretAccessKey: aws.String(credentialsSecretKey), + SessionToken: aws.String(credentialsSessionToken), + CredentialsId: aws.String(credentialsID), + }, + }, + }, + MessageId: aws.String(payloadMessageId), + } + + // Wait till we get an ack + select { + case <-tester.ctx.Done(): + } + // Verify if payloadMessageId read from the ack buffer is correct + assert.Equal(t, aws.StringValue(ackRequested.MessageId), payloadMessageId, "received message not expected") + assert.Equal(t, "credsid", addedTask.GetExecutionCredentialsID(), "task execution role credentials id mismatch") + + // Verify the credentials in the payload message was stored in the credentials manager + iamRoleCredentials, ok := tester.credentialsManager.GetTaskCredentials("credsid") + assert.True(t, ok, "execution role credentials not found in credentials manager") + assert.Equal(t, credentialsRoleArn, iamRoleCredentials.IAMRoleCredentials.RoleArn) + assert.Equal(t, taskArn, iamRoleCredentials.ARN) + assert.Equal(t, credentialsExpiration, iamRoleCredentials.IAMRoleCredentials.Expiration) + assert.Equal(t, credentialsAccessKey, iamRoleCredentials.IAMRoleCredentials.AccessKeyID) + assert.Equal(t, credentialsSecretKey, iamRoleCredentials.IAMRoleCredentials.SecretAccessKey) + assert.Equal(t, credentialsSessionToken, iamRoleCredentials.IAMRoleCredentials.SessionToken) + assert.Equal(t, credentialsID, iamRoleCredentials.IAMRoleCredentials.CredentialsID) + assert.Equal(t, credentialsID, *executionCredentialsAckRequested.CredentialsId) + assert.Equal(t, payloadMessageId, *executionCredentialsAckRequested.MessageId) +} + +// validateTaskAndCredentials compares a task and a credentials ack object +// against expected values. It returns an error if either of the the +// comparisons fail +func validateTaskAndCredentials(taskCredentialsAck, expectedCredentialsAckForTask *ecsacs.IAMRoleCredentialsAckRequest, + addedTask *apitask.Task, + expectedTaskArn string, + expectedTaskCredentials credentials.IAMRoleCredentials) error { + if !reflect.DeepEqual(taskCredentialsAck, expectedCredentialsAckForTask) { + return fmt.Errorf("Mismatch between expected and received credentials ack requests, expected: %s, got: %s", expectedCredentialsAckForTask.String(), taskCredentialsAck.String()) + } + + expectedTask := &apitask.Task{ + Arn: expectedTaskArn, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + expectedTask.SetCredentialsID(expectedTaskCredentials.CredentialsID) + + if !reflect.DeepEqual(addedTask, expectedTask) { + return fmt.Errorf("Mismatch between expected and added tasks, expected: %v, added: %v", expectedTask, addedTask) + } + return nil +} + +func TestPayloadHandlerAddedENIToTask(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do( + func(task *apitask.Task) { + addedTask = task + }) + + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("arn"), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + { + AttachmentArn: aws.String("arn"), + Ec2Id: aws.String("ec2id"), + Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{ + { + Primary: aws.Bool(true), + PrivateAddress: aws.String("ipv4"), + }, + }, + Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{ + { + Address: aws.String("ipv6"), + }, + }, + SubnetGatewayIpv4Address: aws.String("ipv4/20"), + MacAddress: aws.String("mac"), + }, + }, + }, + }, + MessageId: aws.String(payloadMessageId), + } + + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + require.NoError(t, err) + + // Validate the added task has the eni information as expected + expectedENI := payloadMessage.Tasks[0].ElasticNetworkInterfaces[0] + taskeni := addedTask.GetPrimaryENI() + assert.Equal(t, aws.StringValue(expectedENI.Ec2Id), taskeni.ID) + assert.Equal(t, aws.StringValue(expectedENI.MacAddress), taskeni.MacAddress) + assert.Equal(t, 1, len(taskeni.IPV4Addresses)) + assert.Equal(t, 1, len(taskeni.IPV6Addresses)) + assert.Equal(t, aws.StringValue(expectedENI.Ipv4Addresses[0].PrivateAddress), taskeni.IPV4Addresses[0].Address) + assert.Equal(t, aws.StringValue(expectedENI.Ipv6Addresses[0].Address), taskeni.IPV6Addresses[0].Address) +} + +func TestPayloadHandlerAddedAppMeshToTask(t *testing.T) { + appMeshType := "APPMESH" + mockEgressIgnoredIP1 := "128.0.0.1" + mockEgressIgnoredIP2 := "171.1.3.24" + mockAppPort1 := "8000" + mockAppPort2 := "8001" + mockEgressIgnoredPort1 := "13000" + mockEgressIgnoredPort2 := "13001" + mockIgnoredUID := "1337" + mockIgnoredGID := "2339" + mockProxyIngressPort := "9000" + mockProxyEgressPort := "9001" + mockAppPorts := mockAppPort1 + "," + mockAppPort2 + mockEgressIgnoredIPs := mockEgressIgnoredIP1 + "," + mockEgressIgnoredIP2 + mockEgressIgnoredPorts := mockEgressIgnoredPort1 + "," + mockEgressIgnoredPort2 + mockContainerName := "testEnvoyContainer" + taskMetadataEndpointIP := "169.254.170.2" + instanceMetadataEndpointIP := "169.254.169.254" + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do( + func(task *apitask.Task) { + addedTask = task + }) + + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("arn"), + ProxyConfiguration: &ecsacs.ProxyConfiguration{ + Type: aws.String(appMeshType), + Properties: map[string]*string{ + "IgnoredUID": aws.String(mockIgnoredUID), + "IgnoredGID": aws.String(mockIgnoredGID), + "ProxyIngressPort": aws.String(mockProxyIngressPort), + "ProxyEgressPort": aws.String(mockProxyEgressPort), + "AppPorts": aws.String(mockAppPorts), + "EgressIgnoredIPs": aws.String(mockEgressIgnoredIPs), + "EgressIgnoredPorts": aws.String(mockEgressIgnoredPorts), + }, + ContainerName: aws.String(mockContainerName), + }, + }, + }, + MessageId: aws.String(payloadMessageId), + } + + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + assert.NoError(t, err) + + // Validate the added task has the eni information as expected + appMesh := addedTask.GetAppMesh() + assert.NotNil(t, appMesh) + assert.Equal(t, mockContainerName, appMesh.ContainerName) + assert.Equal(t, mockIgnoredUID, appMesh.IgnoredUID) + assert.Equal(t, mockIgnoredGID, appMesh.IgnoredGID) + assert.Equal(t, mockProxyIngressPort, appMesh.ProxyIngressPort) + assert.Equal(t, mockProxyEgressPort, appMesh.ProxyEgressPort) + assert.Equal(t, 2, len(appMesh.AppPorts)) + assert.Equal(t, mockAppPort1, appMesh.AppPorts[0]) + assert.Equal(t, mockAppPort2, appMesh.AppPorts[1]) + assert.Equal(t, 4, len(appMesh.EgressIgnoredIPs)) + assert.Equal(t, mockEgressIgnoredIP1, appMesh.EgressIgnoredIPs[0]) + assert.Equal(t, mockEgressIgnoredIP2, appMesh.EgressIgnoredIPs[1]) + assert.Equal(t, taskMetadataEndpointIP, appMesh.EgressIgnoredIPs[2]) + assert.Equal(t, instanceMetadataEndpointIP, appMesh.EgressIgnoredIPs[3]) + assert.Equal(t, 2, len(appMesh.EgressIgnoredPorts)) + assert.Equal(t, mockEgressIgnoredPort1, appMesh.EgressIgnoredPorts[0]) + assert.Equal(t, mockEgressIgnoredPort2, appMesh.EgressIgnoredPorts[1]) +} + +func TestPayloadHandlerAddedENITrunkToTask(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do( + func(task *apitask.Task) { + addedTask = task + }) + + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("arn"), + ElasticNetworkInterfaces: []*ecsacs.ElasticNetworkInterface{ + { + InterfaceAssociationProtocol: aws.String(eni.VLANInterfaceAssociationProtocol), + AttachmentArn: aws.String("arn"), + Ec2Id: aws.String("ec2id"), + Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{ + { + Primary: aws.Bool(true), + PrivateAddress: aws.String("ipv4"), + }, + }, + Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{ + { + Address: aws.String("ipv6"), + }, + }, + SubnetGatewayIpv4Address: aws.String("ipv4/20"), + MacAddress: aws.String("mac"), + InterfaceVlanProperties: &ecsacs.NetworkInterfaceVlanProperties{ + VlanId: aws.String("12345"), + TrunkInterfaceMacAddress: aws.String("mac"), + }, + }, + }, + }, + }, + MessageId: aws.String(payloadMessageId), + } + + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + require.NoError(t, err) + + taskeni := addedTask.GetPrimaryENI() + + assert.Equal(t, taskeni.InterfaceAssociationProtocol, eni.VLANInterfaceAssociationProtocol) + assert.Equal(t, taskeni.InterfaceVlanProperties.TrunkInterfaceMacAddress, "mac") + assert.Equal(t, taskeni.InterfaceVlanProperties.VlanID, "12345") +} + +func TestPayloadHandlerAddedECRAuthData(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do( + func(task *apitask.Task) { + addedTask = task + }) + + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("arn"), + Containers: []*ecsacs.Container{ + { + RegistryAuthentication: &ecsacs.RegistryAuthenticationData{ + Type: aws.String("ecr"), + EcrAuthData: &ecsacs.ECRAuthData{ + Region: aws.String("us-west-2"), + RegistryId: aws.String("registry-id"), + }, + }, + }, + }, + }, + }, + MessageId: aws.String(payloadMessageId), + } + + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + assert.NoError(t, err) + + // Validate the added task has the ECRAuthData information as expected + expected := payloadMessage.Tasks[0].Containers[0].RegistryAuthentication + actual := addedTask.Containers[0].RegistryAuthentication + + assert.NotNil(t, actual.ECRAuthData) + assert.Nil(t, actual.ASMAuthData) + + assert.Equal(t, aws.StringValue(expected.Type), actual.Type) + assert.Equal(t, aws.StringValue(expected.EcrAuthData.Region), actual.ECRAuthData.Region) + assert.Equal(t, aws.StringValue(expected.EcrAuthData.RegistryId), actual.ECRAuthData.RegistryID) +} + +func TestPayloadHandlerAddedASMAuthData(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do( + func(task *apitask.Task) { + addedTask = task + }) + + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("arn"), + Containers: []*ecsacs.Container{ + { + RegistryAuthentication: &ecsacs.RegistryAuthenticationData{ + Type: aws.String("asm"), + AsmAuthData: &ecsacs.ASMAuthData{ + Region: aws.String("us-west-2"), + CredentialsParameter: aws.String("asm-arn"), + }, + }, + }, + }, + }, + }, + MessageId: aws.String(payloadMessageId), + } + + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + assert.NoError(t, err) + + // Validate the added task has the ASMAuthData information as expected + expected := payloadMessage.Tasks[0].Containers[0].RegistryAuthentication + actual := addedTask.Containers[0].RegistryAuthentication + + assert.NotNil(t, actual.ASMAuthData) + assert.Nil(t, actual.ECRAuthData) + + assert.Equal(t, aws.StringValue(expected.Type), actual.Type) + assert.Equal(t, aws.StringValue(expected.AsmAuthData.Region), actual.ASMAuthData.Region) + assert.Equal(t, aws.StringValue(expected.AsmAuthData.CredentialsParameter), actual.ASMAuthData.CredentialsParameter) +} + +func TestHandleUnrecognizedTask(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + arn := "arn" + ecsacsTask := &ecsacs.Task{Arn: &arn} + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ecsacsTask}, + MessageId: aws.String(payloadMessageId), + } + + mockECSACSClient := mock_api.NewMockECSClient(tester.ctrl) + taskHandler := eventhandler.NewTaskHandler(tester.ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), mockECSACSClient) + tester.payloadHandler.taskHandler = taskHandler + + wait := &sync.WaitGroup{} + wait.Add(1) + + mockECSACSClient.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.NotNil(t, change.Task) + wait.Done() + }) + + tester.payloadHandler.handleUnrecognizedTask(ecsacsTask, errors.New("test error"), payloadMessage) + wait.Wait() +} + +func TestPayloadHandlerAddedFirelensData(t *testing.T) { + tester := setup(t) + defer tester.ctrl.Finish() + + var addedTask *apitask.Task + tester.mockTaskEngine.EXPECT().AddTask(gomock.Any()).Do( + func(task *apitask.Task) { + addedTask = task + }) + + payloadMessage := &ecsacs.PayloadMessage{ + Tasks: []*ecsacs.Task{ + { + Arn: aws.String("arn"), + Containers: []*ecsacs.Container{ + { + FirelensConfiguration: &ecsacs.FirelensConfiguration{ + Type: aws.String("fluentd"), + Options: map[string]*string{ + "enable-ecs-log-metadata": aws.String("true"), + }, + }, + }, + }, + }, + }, + MessageId: aws.String(payloadMessageId), + } + + err := tester.payloadHandler.handleSingleMessage(payloadMessage) + assert.NoError(t, err) + + // Validate the pieces of the Firelens container + expected := payloadMessage.Tasks[0].Containers[0].FirelensConfiguration + actual := addedTask.Containers[0].FirelensConfig + + assert.Equal(t, aws.StringValue(expected.Type), actual.Type) + assert.NotNil(t, actual.Options) + assert.Equal(t, aws.StringValue(expected.Options["enable-ecs-log-metadata"]), actual.Options["enable-ecs-log-metadata"]) +} diff --git a/agent/acs/handler/refresh_credentials_handler.go b/agent/acs/handler/refresh_credentials_handler.go new file mode 100644 index 00000000000..5d3b796a233 --- /dev/null +++ b/agent/acs/handler/refresh_credentials_handler.go @@ -0,0 +1,215 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package handler + +import ( + "fmt" + + "context" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" +) + +// refreshCredentialsHandler represents the refresh credentials operation for the ACS client +type refreshCredentialsHandler struct { + // messageBuffer is used to process IAMRoleCredentialsMessages received from the server + messageBuffer chan *ecsacs.IAMRoleCredentialsMessage + // ackRequest is used to send acks to the backend + ackRequest chan *ecsacs.IAMRoleCredentialsAckRequest + ctx context.Context + // cancel is used to stop go routines started by start() method + cancel context.CancelFunc + cluster *string + containerInstance *string + acsClient wsclient.ClientServer + credentialsManager credentials.Manager + taskEngine engine.TaskEngine +} + +// newRefreshCredentialsHandler returns a new refreshCredentialsHandler object +func newRefreshCredentialsHandler(ctx context.Context, cluster string, containerInstanceArn string, acsClient wsclient.ClientServer, credentialsManager credentials.Manager, taskEngine engine.TaskEngine) refreshCredentialsHandler { + // Create a cancelable context from the parent context + derivedContext, cancel := context.WithCancel(ctx) + return refreshCredentialsHandler{ + messageBuffer: make(chan *ecsacs.IAMRoleCredentialsMessage), + ackRequest: make(chan *ecsacs.IAMRoleCredentialsAckRequest), + ctx: derivedContext, + cancel: cancel, + cluster: aws.String(cluster), + containerInstance: aws.String(containerInstanceArn), + acsClient: acsClient, + credentialsManager: credentialsManager, + taskEngine: taskEngine, + } +} + +// handlerFunc returns the request handler function for the ecsacs.IAMRoleCredentialsMessage +func (refreshHandler *refreshCredentialsHandler) handlerFunc() func(message *ecsacs.IAMRoleCredentialsMessage) { + // return a function that just enqueues IAMRoleCredentials messages into the message buffer + return func(message *ecsacs.IAMRoleCredentialsMessage) { + refreshHandler.messageBuffer <- message + } +} + +// start invokes go routines to: +// 1. handle messages in the refresh credentials message buffer +// 2. handle ack requests to be sent to ACS +func (refreshHandler *refreshCredentialsHandler) start() { + go refreshHandler.handleMessages() + go refreshHandler.sendAcks() +} + +// stop cancels the context being used by the refresh credentials handler. This is used +// to stop the go routines started by 'start()' +func (refreshHandler *refreshCredentialsHandler) stop() { + refreshHandler.cancel() +} + +// sendAcks sends ack requests to ACS +func (refreshHandler *refreshCredentialsHandler) sendAcks() { + for { + select { + case ack := <-refreshHandler.ackRequest: + refreshHandler.ackMessage(ack) + case <-refreshHandler.ctx.Done(): + return + } + } +} + +// ackMessageId sends an IAMRoleCredentialsAckRequest to the backend +func (refreshHandler *refreshCredentialsHandler) ackMessage(ack *ecsacs.IAMRoleCredentialsAckRequest) { + err := refreshHandler.acsClient.MakeRequest(ack) + if err != nil { + seelog.Warnf("Error 'ack'ing request with messageID: %s, error: %v", aws.StringValue(ack.MessageId), err) + } + seelog.Debugf("Acking credentials message: %s", ack.String()) +} + +// handleMessages processes refresh credentials messages in the buffer in-order +func (refreshHandler *refreshCredentialsHandler) handleMessages() { + for { + select { + case message := <-refreshHandler.messageBuffer: + refreshHandler.handleSingleMessage(message) + case <-refreshHandler.ctx.Done(): + return + } + } +} + +// handleSingleMessage processes a single refresh credentials message. +func (refreshHandler *refreshCredentialsHandler) handleSingleMessage(message *ecsacs.IAMRoleCredentialsMessage) error { + // Validate fields in the message + err := validateIAMRoleCredentialsMessage(message) + if err != nil { + seelog.Errorf("Error validating credentials message: %v", err) + return err + } + taskArn := aws.StringValue(message.TaskArn) + messageId := aws.StringValue(message.MessageId) + task, ok := refreshHandler.taskEngine.GetTaskByArn(taskArn) + if !ok { + seelog.Errorf("Task not found in the engine for the arn in credentials message, arn: %s, messageId: %s", taskArn, messageId) + return fmt.Errorf("task not found in the engine for the arn in credentials message, arn: %s", taskArn) + } + + roleType := aws.StringValue(message.RoleType) + if !validRoleType(roleType) { + seelog.Errorf("Unknown RoleType for task in credentials message, roleType: %s arn: %s, messageId: %s", roleType, taskArn, messageId) + } else { + err = refreshHandler.credentialsManager.SetTaskCredentials( + &(credentials.TaskIAMRoleCredentials{ + ARN: taskArn, + IAMRoleCredentials: credentials.IAMRoleCredentialsFromACS(message.RoleCredentials, roleType), + })) + if err != nil { + seelog.Errorf("Unable to update credentials for task, err: %v messageId: %s", err, messageId) + return fmt.Errorf("unable to update credentials %v", err) + } + + if roleType == credentials.ApplicationRoleType { + task.SetCredentialsID(aws.StringValue(message.RoleCredentials.CredentialsId)) + } + if roleType == credentials.ExecutionRoleType { + task.SetExecutionRoleCredentialsID(aws.StringValue(message.RoleCredentials.CredentialsId)) + } + } + + go func() { + response := &ecsacs.IAMRoleCredentialsAckRequest{ + Expiration: message.RoleCredentials.Expiration, + MessageId: message.MessageId, + CredentialsId: message.RoleCredentials.CredentialsId, + } + refreshHandler.ackRequest <- response + }() + return nil +} + +// validateIAMRoleCredentialsMessage validates fields in the IAMRoleCredentialsMessage +// It returns an error if any of the following fields are not set in the message: +// messageId, taskArn, roleCredentials +func validateIAMRoleCredentialsMessage(message *ecsacs.IAMRoleCredentialsMessage) error { + if message == nil { + return fmt.Errorf("empty credentials message") + } + + messageId := aws.StringValue(message.MessageId) + if messageId == "" { + return fmt.Errorf("message id not set in credentials message") + } + + if aws.StringValue(message.TaskArn) == "" { + return fmt.Errorf("task Arn not set in credentials message") + } + + if message.RoleCredentials == nil { + return fmt.Errorf("role Credentials not set in credentials message: messageId: %s", messageId) + } + + if aws.StringValue(message.RoleCredentials.CredentialsId) == "" { + return fmt.Errorf("role Credentials ID not set in credentials message: messageId: %s", messageId) + } + + return nil +} + +// clearAcks drains the ack request channel +func (refreshHandler *refreshCredentialsHandler) clearAcks() { + for { + select { + case <-refreshHandler.ackRequest: + default: + return + } + } +} + +// validRoleType returns false if the RoleType in the acs refresh payload is not +// one of the expected types. TaskApplication, TaskExecution +func validRoleType(roleType string) bool { + switch roleType { + case credentials.ApplicationRoleType: + return true + case credentials.ExecutionRoleType: + return true + default: + return false + } +} diff --git a/agent/acs/handler/refresh_credentials_handler_test.go b/agent/acs/handler/refresh_credentials_handler_test.go new file mode 100644 index 00000000000..07b8110f81c --- /dev/null +++ b/agent/acs/handler/refresh_credentials_handler_test.go @@ -0,0 +1,318 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package handler + +import ( + "reflect" + "testing" + + "context" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + mock_wsclient "github.com/aws/amazon-ecs-agent/agent/wsclient/mock" + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" +) + +const ( + messageId = "message1" + taskArn = "task1" + cluster = "default" + containerInstance = "instance" + expiration = "soon" + roleArn = "taskrole1" + accessKey = "akid" + secretKey = "secret" + sessionToken = "token" + credentialsId = "credsid" + roleType = "TaskExecution" +) + +var expectedAck = &ecsacs.IAMRoleCredentialsAckRequest{ + Expiration: aws.String(expiration), + MessageId: aws.String(messageId), + CredentialsId: aws.String(credentialsId), +} + +var expectedCredentials = credentials.TaskIAMRoleCredentials{ + ARN: taskArn, + IAMRoleCredentials: credentials.IAMRoleCredentials{ + RoleArn: roleArn, + AccessKeyID: accessKey, + SecretAccessKey: secretKey, + SessionToken: sessionToken, + Expiration: expiration, + CredentialsID: credentialsId, + RoleType: roleType, + }, +} + +var message = &ecsacs.IAMRoleCredentialsMessage{ + MessageId: aws.String(messageId), + TaskArn: aws.String(taskArn), + RoleType: aws.String(roleType), + RoleCredentials: &ecsacs.IAMRoleCredentials{ + RoleArn: aws.String(roleArn), + Expiration: aws.String(expiration), + AccessKeyId: aws.String(accessKey), + SecretAccessKey: aws.String(secretKey), + SessionToken: aws.String(sessionToken), + CredentialsId: aws.String(credentialsId), + }, +} + +// TestValidateRefreshMessageWithNilMessage tests if a validation error +// is returned while validating an empty credentials message +func TestValidateRefreshMessageWithNilMessage(t *testing.T) { + err := validateIAMRoleCredentialsMessage(nil) + if err == nil { + t.Error("Expected validation error validating an empty message") + } +} + +// TestValidateRefreshMessageWithNoMessageId tests if a validation error +// is returned while validating a credentials message with no message id +func TestValidateRefreshMessageWithNoMessageId(t *testing.T) { + message := &ecsacs.IAMRoleCredentialsMessage{} + err := validateIAMRoleCredentialsMessage(message) + if err == nil { + t.Error("Expected validation error validating a message with no message id") + } + message.MessageId = aws.String("") + err = validateIAMRoleCredentialsMessage(message) + if err == nil { + t.Error("Expected validation error validating a message with empty message id") + } +} + +// TestValidateRefreshMessageWithNoRoleCredentials tests if a validation error +// is returned while validating a credentials message with no role credentials +func TestValidateRefreshMessageWithNoRoleCredentials(t *testing.T) { + message := &ecsacs.IAMRoleCredentialsMessage{ + MessageId: aws.String(messageId), + } + err := validateIAMRoleCredentialsMessage(message) + if err == nil { + t.Error("Expected validation error validating a message with no role credentials") + } +} + +// TestValidateRefreshMessageWithNoCredentialsId tests if a valid error +// is returned while validating a credentials message with no credentials id +func TestValidateRefreshMessageWithNoCredentialsId(t *testing.T) { + message := &ecsacs.IAMRoleCredentialsMessage{ + MessageId: aws.String(messageId), + RoleCredentials: &ecsacs.IAMRoleCredentials{}, + } + err := validateIAMRoleCredentialsMessage(message) + if err == nil { + t.Error("Expected validation error validating a message with no credentials id") + } + message.RoleCredentials.CredentialsId = aws.String("") + err = validateIAMRoleCredentialsMessage(message) + if err == nil { + t.Error("Expected validation error validating a message with empty credentials id") + } +} + +// TestValidateRefreshMessageWithNoTaskArn tests if a validation error +// is returned while validating a credentials message with no task arn +func TestValidateRefreshMessageWithNoTaskArn(t *testing.T) { + message := &ecsacs.IAMRoleCredentialsMessage{ + MessageId: aws.String(messageId), + RoleCredentials: &ecsacs.IAMRoleCredentials{ + CredentialsId: aws.String("id"), + }, + } + err := validateIAMRoleCredentialsMessage(message) + if err == nil { + t.Error("Expected validation error validating a message with no task arn") + } +} + +// TestValidateRefreshMessageSuccess tests if a valid credentials message +// is validated without any errors +func TestValidateRefreshMessageSuccess(t *testing.T) { + message := &ecsacs.IAMRoleCredentialsMessage{ + MessageId: aws.String(messageId), + RoleCredentials: &ecsacs.IAMRoleCredentials{ + CredentialsId: aws.String("id"), + }, + TaskArn: aws.String(taskArn), + } + err := validateIAMRoleCredentialsMessage(message) + if err != nil { + t.Errorf("Error validating credentials message: %v", err) + } +} + +// TestInvalidCredentialsMessageNotAcked tests if invalid credential messages +// are not acked +func TestInvalidCredentialsMessageNotAcked(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := credentials.NewManager() + + ctx, cancel := context.WithCancel(context.Background()) + handler := newRefreshCredentialsHandler(ctx, cluster, containerInstance, nil, credentialsManager, nil) + + // Start a goroutine to listen for acks. Cancelling the context stops the goroutine + go func() { + for { + select { + // We never expect the message to be acked + case <-handler.ackRequest: + t.Fatalf("Received ack when none expected") + case <-ctx.Done(): + return + } + } + }() + + // test adding a credentials message without the MessageId field + message := &ecsacs.IAMRoleCredentialsMessage{} + err := handler.handleSingleMessage(message) + if err == nil { + t.Error("Expected error updating credentials when the message contains no message id") + } + cancel() +} + +// TestCredentialsMessageNotAckedWhenTaskNotFound tests if credential messages +// are not acked when the task arn in the message is not found in the task +// engine +func TestCredentialsMessageNotAckedWhenTaskNotFound(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := credentials.NewManager() + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + // Return task not found from the engine for GetTaskByArn + taskEngine.EXPECT().GetTaskByArn(taskArn).Return(nil, false) + + ctx, cancel := context.WithCancel(context.Background()) + handler := newRefreshCredentialsHandler(ctx, cluster, containerInstance, nil, credentialsManager, taskEngine) + + // Start a goroutine to listen for acks. Cancelling the context stops the goroutine + go func() { + for { + select { + // We never expect the message to be acked + case <-handler.ackRequest: + t.Fatalf("Received ack when none expected") + case <-ctx.Done(): + return + } + } + }() + + // Test adding a credentials message without the MessageId field + err := handler.handleSingleMessage(message) + if err == nil { + t.Error("Expected error updating credentials when the message contains unexpected task arn") + } + cancel() +} + +// TestHandleRefreshMessageAckedWhenCredentialsUpdated tests that a credential message +// is ackd when the credentials are updated successfully +func TestHandleRefreshMessageAckedWhenCredentialsUpdated(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := credentials.NewManager() + + ctx, cancel := context.WithCancel(context.Background()) + var ackRequested *ecsacs.IAMRoleCredentialsAckRequest + + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.IAMRoleCredentialsAckRequest) { + ackRequested = ackRequest + cancel() + }).Times(1) + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + // Return a task from the engine for GetTaskByArn + taskEngine.EXPECT().GetTaskByArn(taskArn).Return(&apitask.Task{}, true) + + handler := newRefreshCredentialsHandler(ctx, clusterName, containerInstanceArn, mockWsClient, credentialsManager, taskEngine) + go handler.sendAcks() + + // test adding a credentials message without the MessageId field + err := handler.handleSingleMessage(message) + if err != nil { + t.Errorf("Error updating credentials: %v", err) + } + + // Wait till we get an ack from the ackBuffer + select { + case <-ctx.Done(): + } + + if !reflect.DeepEqual(ackRequested, expectedAck) { + t.Errorf("Message between expected and requested ack. Expected: %v, Requested: %v", expectedAck, ackRequested) + } + + creds, exist := credentialsManager.GetTaskCredentials(credentialsId) + if !exist { + t.Errorf("Expected credentials to exist for the task") + } + if !reflect.DeepEqual(creds, expectedCredentials) { + t.Errorf("Mismatch between expected credentials and credentials for task. Expected: %v, got: %v", expectedCredentials, creds) + } +} + +// TestRefreshCredentialsHandler tests if a credential message is acked when +// the message is sent to the messageBuffer channel +func TestRefreshCredentialsHandler(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := credentials.NewManager() + + ctx, cancel := context.WithCancel(context.Background()) + mockWsClient := mock_wsclient.NewMockClientServer(ctrl) + var ackRequested *ecsacs.IAMRoleCredentialsAckRequest + mockWsClient.EXPECT().MakeRequest(gomock.Any()).Do(func(ackRequest *ecsacs.IAMRoleCredentialsAckRequest) { + ackRequested = ackRequest + cancel() + }).Times(1) + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + // Return a task from the engine for GetTaskByArn + taskEngine.EXPECT().GetTaskByArn(taskArn).Return(&apitask.Task{}, true) + + handler := newRefreshCredentialsHandler(ctx, clusterName, containerInstanceArn, mockWsClient, credentialsManager, taskEngine) + go handler.start() + + handler.messageBuffer <- message + // Wait till we get an ack + select { + case <-ctx.Done(): + } + + if !reflect.DeepEqual(ackRequested, expectedAck) { + t.Errorf("Message between expected and requested ack. Expected: %v, Requested: %v", expectedAck, ackRequested) + } + + creds, exist := credentialsManager.GetTaskCredentials(credentialsId) + if !exist { + t.Errorf("Expected credentials to exist for the task") + } + if !reflect.DeepEqual(creds, expectedCredentials) { + t.Errorf("Mismatch between expected credentials and credentials for task. Expected: %v, got: %v", expectedCredentials, creds) + } +} diff --git a/agent/acs/handler/task_manifest_handler.go b/agent/acs/handler/task_manifest_handler.go new file mode 100644 index 00000000000..dcb62e3ea40 --- /dev/null +++ b/agent/acs/handler/task_manifest_handler.go @@ -0,0 +1,283 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package handler + +import ( + "context" + "strconv" + "sync" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" +) + +// taskManifestHandler handles task manifest message for the ACS client +type taskManifestHandler struct { + messageBufferTaskManifest chan *ecsacs.TaskManifestMessage + messageBufferTaskManifestAck chan string + messageBufferTaskStopVerificationMessage chan *ecsacs.TaskStopVerificationMessage + messageBufferTaskStopVerificationAck chan *ecsacs.TaskStopVerificationAck + ctx context.Context + taskEngine engine.TaskEngine + cancel context.CancelFunc + dataClient data.Client + cluster string + containerInstanceArn string + acsClient wsclient.ClientServer + latestSeqNumberTaskManifest *int64 + messageId string + lock sync.RWMutex +} + +// newTaskManifestHandler returns an instance of the taskManifestHandler struct +func newTaskManifestHandler(ctx context.Context, + cluster string, containerInstanceArn string, acsClient wsclient.ClientServer, + dataClient data.Client, taskEngine engine.TaskEngine, latestSeqNumberTaskManifest *int64) taskManifestHandler { + + // Create a cancelable context from the parent context + derivedContext, cancel := context.WithCancel(ctx) + return taskManifestHandler{ + messageBufferTaskManifest: make(chan *ecsacs.TaskManifestMessage), + messageBufferTaskManifestAck: make(chan string), + messageBufferTaskStopVerificationMessage: make(chan *ecsacs.TaskStopVerificationMessage), + messageBufferTaskStopVerificationAck: make(chan *ecsacs.TaskStopVerificationAck), + ctx: derivedContext, + cancel: cancel, + cluster: cluster, + containerInstanceArn: containerInstanceArn, + acsClient: acsClient, + taskEngine: taskEngine, + dataClient: dataClient, + latestSeqNumberTaskManifest: latestSeqNumberTaskManifest, + } +} + +func (taskManifestHandler *taskManifestHandler) handlerFuncTaskManifestMessage() func( + message *ecsacs.TaskManifestMessage) { + return func(message *ecsacs.TaskManifestMessage) { + taskManifestHandler.messageBufferTaskManifest <- message + } +} + +func (taskManifestHandler *taskManifestHandler) handlerFuncTaskStopVerificationMessage() func( + message *ecsacs.TaskStopVerificationAck) { + return func(message *ecsacs.TaskStopVerificationAck) { + taskManifestHandler.messageBufferTaskStopVerificationAck <- message + } +} + +func (taskManifestHandler *taskManifestHandler) start() { + // Task manifest and it's ack + go taskManifestHandler.handleTaskManifestMessage() + go taskManifestHandler.sendTaskManifestMessageAck() + + // Task stop verification message and it's ack + go taskManifestHandler.sendTaskStopVerificationMessage() + go taskManifestHandler.handleTaskStopVerificationAck() + +} + +func (taskManifestHandler *taskManifestHandler) getMessageId() string { + taskManifestHandler.lock.RLock() + defer taskManifestHandler.lock.RUnlock() + return taskManifestHandler.messageId +} + +func (taskManifestHandler *taskManifestHandler) setMessageId(messageId string) { + taskManifestHandler.lock.Lock() + defer taskManifestHandler.lock.Unlock() + taskManifestHandler.messageId = messageId +} + +func (taskManifestHandler *taskManifestHandler) sendTaskManifestMessageAck() { + for { + select { + case messageBufferTaskManifestAck := <-taskManifestHandler.messageBufferTaskManifestAck: + taskManifestHandler.ackTaskManifestMessage(messageBufferTaskManifestAck) + case <-taskManifestHandler.ctx.Done(): + return + } + } +} + +func (taskManifestHandler *taskManifestHandler) handleTaskStopVerificationAck() { + for { + select { + case messageBufferTaskStopVerificationAck := <-taskManifestHandler.messageBufferTaskStopVerificationAck: + if err := taskManifestHandler.handleSingleMessageVerificationAck(messageBufferTaskStopVerificationAck); err != nil { + seelog.Warnf("Error handling Verification ack with messageID: %s, error: %v", + messageBufferTaskStopVerificationAck.MessageId, err) + } + case <-taskManifestHandler.ctx.Done(): + return + } + } +} + +func (taskManifestHandler *taskManifestHandler) clearAcks() { + for { + select { + case <-taskManifestHandler.messageBufferTaskManifestAck: + case <-taskManifestHandler.messageBufferTaskStopVerificationAck: + default: + return + } + } +} + +func (taskManifestHandler *taskManifestHandler) ackTaskManifestMessage(messageID string) { + seelog.Debugf("Acking task manifest message id: %s", messageID) + err := taskManifestHandler.acsClient.MakeRequest(&ecsacs.AckRequest{ + Cluster: aws.String(taskManifestHandler.cluster), + ContainerInstance: aws.String(taskManifestHandler.containerInstanceArn), + MessageId: aws.String(messageID), + }) + if err != nil { + seelog.Warnf("Error 'ack'ing TaskManifestMessage with messageID: %s, error: %v", messageID, err) + } +} + +// stop is used to invoke a cancellation function +func (taskManifestHandler *taskManifestHandler) stop() { + taskManifestHandler.cancel() +} + +func (taskManifestHandler *taskManifestHandler) handleTaskManifestMessage() { + for { + select { + case <-taskManifestHandler.ctx.Done(): + return + case message := <-taskManifestHandler.messageBufferTaskManifest: + if err := taskManifestHandler.handleTaskManifestSingleMessage(message); err != nil { + seelog.Warnf("Unable to handle taskManifest message [%s]: %v", message.String(), err) + } + } + } +} + +func (taskManifestHandler *taskManifestHandler) sendTaskStopVerificationMessage() { + for { + select { + case message := <-taskManifestHandler.messageBufferTaskStopVerificationMessage: + if err := taskManifestHandler.acsClient.MakeRequest(message); err != nil { + seelog.Warnf("Unable to send taskStopVerification message [%s]: %v", message.String(), err) + } + case <-taskManifestHandler.ctx.Done(): + return + } + } +} + +// compares the list of tasks received in the task manifest message and tasks running on the the instance +// It returns all the task that are running on the instance but not present in task manifest message task list +func compareTasks(receivedTaskList []*ecsacs.TaskIdentifier, runningTaskList []*apitask.Task, clusterARN string) []*ecsacs.TaskIdentifier { + tasksToBeKilled := make([]*ecsacs.TaskIdentifier, 0) + for _, runningTask := range runningTaskList { + // For every task running on the instance check if the task is present in receivedTaskList with the DesiredState + // of running, if not add them to the list of task that needs to be stopped + if runningTask.GetDesiredStatus() == apitaskstatus.TaskRunning { + taskPresent := false + for _, receivedTask := range receivedTaskList { + if *receivedTask.TaskArn == runningTask.Arn && *receivedTask. + DesiredStatus == apitaskstatus.TaskRunningString { + // Task present, does not need to be stopped + taskPresent = true + break + } + } + if !taskPresent { + tasksToBeKilled = append(tasksToBeKilled, &ecsacs.TaskIdentifier{ + DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), + TaskArn: aws.String(runningTask.Arn), + TaskClusterArn: aws.String(clusterARN), + }) + } + } + } + return tasksToBeKilled +} + +func (taskManifestHandler *taskManifestHandler) handleSingleMessageVerificationAck( + message *ecsacs.TaskStopVerificationAck) error { + // Ensure that we have received a corresponding task manifest message before + taskManifestMessageId := taskManifestHandler.getMessageId() + if taskManifestMessageId != "" && *message.MessageId == taskManifestMessageId { + // Reset the message id so that the message with same message id is not processed twice + taskManifestHandler.setMessageId("") + for _, taskToKill := range message.StopTasks { + if *taskToKill.DesiredStatus == apitaskstatus.TaskStoppedString { + task, isPresent := taskManifestHandler.taskEngine.GetTaskByArn(*taskToKill.TaskArn) + if isPresent { + seelog.Infof("Stopping task from task manifest handler: %s", task.Arn) + task.SetDesiredStatus(apitaskstatus.TaskStopped) + taskManifestHandler.taskEngine.AddTask(task) + } else { + seelog.Debugf("Task not found on the instance: %s", *taskToKill.TaskArn) + } + } + } + } + return nil +} + +func (taskManifestHandler *taskManifestHandler) handleTaskManifestSingleMessage( + message *ecsacs.TaskManifestMessage) error { + taskListManifestHandler := message.Tasks + seqNumberFromMessage := *message.Timeline + clusterARN := *message.ClusterArn + agentLatestSequenceNumber := *taskManifestHandler.latestSeqNumberTaskManifest + + // Check if the sequence number of message received is more than the one stored in Agent + if agentLatestSequenceNumber < seqNumberFromMessage { + runningTasksOnInstance, err := taskManifestHandler.taskEngine.ListTasks() + if err != nil { + return err + } + *taskManifestHandler.latestSeqNumberTaskManifest = *message.Timeline + // Save the new sequence number to disk. + err = taskManifestHandler.dataClient.SaveMetadata(data.TaskManifestSeqNumKey, strconv.FormatInt(*message.Timeline, 10)) + if err != nil { + return err + } + + tasksToKill := compareTasks(taskListManifestHandler, runningTasksOnInstance, clusterARN) + + // Update messageId so that it can be compared to the messageId in TaskStopVerificationAck message + taskManifestHandler.setMessageId(*message.MessageId) + + // Throw the task manifest ack and task verification message in async so that it does not block the current + // thread. + go func() { + taskManifestHandler.messageBufferTaskManifestAck <- *message.MessageId + if len(tasksToKill) > 0 { + taskStopVerificationMessage := ecsacs.TaskStopVerificationMessage{ + MessageId: message.MessageId, + StopCandidates: tasksToKill, + } + + taskManifestHandler.messageBufferTaskStopVerificationMessage <- &taskStopVerificationMessage + } + }() + } else { + seelog.Debugf("Skipping the task manifest message. sequence number from task manifest: %d. sequence number "+ + " from Agent: %d", seqNumberFromMessage, agentLatestSequenceNumber) + } + + return nil +} diff --git a/agent/acs/handler/task_manifest_handler_test.go b/agent/acs/handler/task_manifest_handler_test.go new file mode 100644 index 00000000000..4ca495da447 --- /dev/null +++ b/agent/acs/handler/task_manifest_handler_test.go @@ -0,0 +1,549 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package handler + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/data" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + mock_wsclient "github.com/aws/amazon-ecs-agent/agent/wsclient/mock" + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testSeqNum = 12 +) + +// Tests the case when all the tasks running on the instance needs to be killed +func TestManifestHandlerKillAllTasks(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + cluster := "mock-cluster" + containerInstanceArn := "mock-container-instance" + messageId := "mock-message-id" + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + newTaskManifest := newTaskManifestHandler(ctx, cluster, containerInstanceArn, mockWSClient, + dataClient, taskEngine, aws.Int64(11)) + + ackRequested := &ecsacs.AckRequest{ + Cluster: aws.String(cluster), + ContainerInstance: aws.String(containerInstanceArn), + MessageId: aws.String(messageId), + } + + task2 := &task.Task{Arn: "arn2", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + task1 := &task.Task{Arn: "arn1", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + + taskList := []*task.Task{task1, task2} + + //Task that needs to be stopped, sent back by agent + taskIdentifierFinal := []*ecsacs.TaskIdentifier{ + {DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), TaskArn: aws.String("arn1"), TaskClusterArn: aws.String(cluster)}, + {DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), TaskArn: aws.String("arn2"), TaskClusterArn: aws.String(cluster)}, + } + + taskStopVerificationMessage := &ecsacs.TaskStopVerificationMessage{ + MessageId: aws.String(messageId), + StopCandidates: taskIdentifierFinal, + } + + messageTaskStopVerificationAck := &ecsacs.TaskStopVerificationAck{ + GeneratedAt: aws.Int64(123), + MessageId: aws.String(messageId), + StopTasks: taskIdentifierFinal, + } + + gomock.InOrder( + taskEngine.EXPECT().ListTasks().Return(taskList, nil).Times(1), + // AddTask function needs to be called twice for both the tasks getting stopped + taskEngine.EXPECT().AddTask(gomock.Any()), + taskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task1 *task.Task) { + newTaskManifest.stop() + }), + ) + + mockWSClient.EXPECT().MakeRequest(ackRequested).Times(1) + + mockWSClient.EXPECT().MakeRequest(taskStopVerificationMessage).Times(1).Do(func(message *ecsacs.TaskStopVerificationMessage) { + // Agent receives the ack message when taskStopVerificationMessage is processed by ACS + newTaskManifest.messageBufferTaskStopVerificationAck <- messageTaskStopVerificationAck + }) + + taskEngine.EXPECT().GetTaskByArn("arn1").Return(task1, true) + taskEngine.EXPECT().GetTaskByArn("arn2").Return(task2, true) + + message := &ecsacs.TaskManifestMessage{ + MessageId: aws.String(messageId), + ClusterArn: aws.String(cluster), + ContainerInstanceArn: aws.String(containerInstanceArn), + Tasks: []*ecsacs.TaskIdentifier{ + {DesiredStatus: aws.String("STOPPED"), TaskArn: aws.String("arn-long"), TaskClusterArn: aws.String(cluster)}, + }, + Timeline: aws.Int64(testSeqNum), + } + + go newTaskManifest.start() + + newTaskManifest.messageBufferTaskManifest <- message + + // mockWSClient.EXPECT().MakeRequest(ackRequested).Times(1) in this test is called by an asynchronous routine. + // Sometimes functions execution finishes before a call to this asynchronous routine, this sleep will ensure that + // asynchronous routine is called before function ends + time.Sleep(2 * time.Second) + + select { + case <-newTaskManifest.ctx.Done(): + } + + // Verify that new seq num has been correctly saved in database. + seqnum, err := dataClient.GetMetadata(data.TaskManifestSeqNumKey) + require.NoError(t, err) + assert.Equal(t, strconv.FormatInt(int64(testSeqNum), 10), seqnum) +} + +// Tests the case when two of three tasks running on the instance needs to be killed +func TestManifestHandlerKillFewTasks(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + cluster := "mock-cluster" + containerInstanceArn := "mock-container-instance" + messageId := "mock-message-id" + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + newTaskManifest := newTaskManifestHandler(ctx, cluster, containerInstanceArn, mockWSClient, + dataClient, taskEngine, aws.Int64(11)) + + ackRequested := &ecsacs.AckRequest{ + Cluster: aws.String(cluster), + ContainerInstance: aws.String(containerInstanceArn), + MessageId: aws.String(messageId), + } + + task2 := &task.Task{Arn: "arn2", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + task1 := &task.Task{Arn: "arn1", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + task3 := &task.Task{Arn: "arn3", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + + taskList := []*task.Task{task1, task2, task3} + + //Task that needs to be stopped, sent back by agent + taskIdentifierFinal := []*ecsacs.TaskIdentifier{ + {DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), TaskArn: aws.String("arn2"), TaskClusterArn: aws.String(cluster)}, + {DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), TaskArn: aws.String("arn3"), TaskClusterArn: aws.String(cluster)}, + } + + taskStopVerificationMessage := &ecsacs.TaskStopVerificationMessage{ + MessageId: aws.String(messageId), + StopCandidates: taskIdentifierFinal, + } + + messageTaskStopVerificationAck := &ecsacs.TaskStopVerificationAck{ + GeneratedAt: aws.Int64(123), + MessageId: aws.String(messageId), + StopTasks: taskIdentifierFinal, + } + + gomock.InOrder( + taskEngine.EXPECT().ListTasks().Return(taskList, nil).Times(1), + taskEngine.EXPECT().AddTask(gomock.Any()), + taskEngine.EXPECT().AddTask(gomock.Any()).Do(func(task1 *task.Task) { + newTaskManifest.stop() + }), + ) + + mockWSClient.EXPECT().MakeRequest(ackRequested).Times(1) + + mockWSClient.EXPECT().MakeRequest(taskStopVerificationMessage).Times(1).Do(func(message *ecsacs.TaskStopVerificationMessage) { + newTaskManifest.messageBufferTaskStopVerificationAck <- messageTaskStopVerificationAck + }) + + taskEngine.EXPECT().GetTaskByArn("arn3").Return(task1, true) + taskEngine.EXPECT().GetTaskByArn("arn2").Return(task2, true) + + message := &ecsacs.TaskManifestMessage{ + MessageId: aws.String(messageId), + ClusterArn: aws.String(cluster), + ContainerInstanceArn: aws.String(containerInstanceArn), + Tasks: []*ecsacs.TaskIdentifier{ + { + DesiredStatus: aws.String(apitaskstatus.TaskRunningString), + TaskArn: aws.String("arn1"), + TaskClusterArn: aws.String(cluster), + }, + { + DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), + TaskArn: aws.String("arn2"), + TaskClusterArn: aws.String(cluster), + }, + }, + Timeline: aws.Int64(testSeqNum), + } + + go newTaskManifest.start() + + newTaskManifest.messageBufferTaskManifest <- message + + // mockWSClient.EXPECT().MakeRequest(ackRequested).Times(1) in this test is called by an asynchronous routine. + // Sometimes functions execution finishes before a call to this asynchronous routine, this sleep will ensure that + // asynchronous routine is called before function ends + time.Sleep(2 * time.Second) + + select { + case <-newTaskManifest.ctx.Done(): + } + + // Verify that new seq num has been correctly saved in database. + seqnum, err := dataClient.GetMetadata(data.TaskManifestSeqNumKey) + require.NoError(t, err) + assert.Equal(t, strconv.FormatInt(int64(testSeqNum), 10), seqnum) +} + +// Tests the case when their is no difference in task running on the instance and tasks received in task manifest. No +// tasks on the instance needs to be killed +func TestManifestHandlerKillNoTasks(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + cluster := "mock-cluster" + containerInstanceArn := "mock-container-instance" + messageId := "mock-message-id" + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + newTaskManifest := newTaskManifestHandler(ctx, cluster, containerInstanceArn, mockWSClient, + dataClient, taskEngine, aws.Int64(11)) + + ackRequested := &ecsacs.AckRequest{ + Cluster: aws.String(cluster), + ContainerInstance: aws.String(containerInstanceArn), + MessageId: aws.String(messageId), + } + + task2 := &task.Task{Arn: "arn2", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + task1 := &task.Task{Arn: "arn1", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + task3 := &task.Task{Arn: "arn3", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + + taskList := []*task.Task{task1, task2, task3} + + //Task that needs to be stopped, sent back by agent + taskIdentifierFinal := []*ecsacs.TaskIdentifier{ + {DesiredStatus: aws.String("STOPPED"), TaskArn: aws.String("arn2")}, + {DesiredStatus: aws.String("STOPPED"), TaskArn: aws.String("arn3")}, + } + + taskStopVerificationMessage := &ecsacs.TaskStopVerificationMessage{ + MessageId: aws.String(messageId), + StopCandidates: taskIdentifierFinal, + } + + taskEngine.EXPECT().ListTasks().Return(taskList, nil).Times(1) + + mockWSClient.EXPECT().MakeRequest(taskStopVerificationMessage).Times(0) + mockWSClient.EXPECT().MakeRequest(ackRequested).Times(1).Do(func(message *ecsacs.AckRequest) { + newTaskManifest.stop() + }) + + message := &ecsacs.TaskManifestMessage{ + MessageId: aws.String(messageId), + ClusterArn: aws.String(cluster), + ContainerInstanceArn: aws.String(containerInstanceArn), + Tasks: []*ecsacs.TaskIdentifier{ + { + DesiredStatus: aws.String(apitaskstatus.TaskRunningString), + TaskArn: aws.String("arn1"), + }, + { + DesiredStatus: aws.String(apitaskstatus.TaskRunningString), + TaskArn: aws.String("arn2"), + }, + { + DesiredStatus: aws.String(apitaskstatus.TaskRunningString), + TaskArn: aws.String("arn3"), + }, + }, + Timeline: aws.Int64(testSeqNum), + } + + go newTaskManifest.start() + + newTaskManifest.messageBufferTaskManifest <- message + + // mockWSClient.EXPECT().MakeRequest(ackRequested).Times(1) in this test is called by an asynchronous routine. + // Sometimes functions execution finishes before a call to this asynchronous routine, this sleep will ensure that + // asynchronous routine is called before function ends + time.Sleep(2 * time.Second) + + select { + case <-newTaskManifest.ctx.Done(): + } + + // Verify that new seq num has been correctly saved in database. + seqnum, err := dataClient.GetMetadata(data.TaskManifestSeqNumKey) + require.NoError(t, err) + assert.Equal(t, strconv.FormatInt(int64(testSeqNum), 10), seqnum) +} + +// Tests the case when the task list received in TaskManifest message is different than the one received in +// TaskStopVerificationMessage +func TestManifestHandlerDifferentTaskLists(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + cluster := "mock-cluster" + containerInstanceArn := "mock-container-instance" + messageId := "mock-message-id" + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + newTaskManifest := newTaskManifestHandler(ctx, cluster, containerInstanceArn, mockWSClient, + dataClient, taskEngine, aws.Int64(11)) + + ackRequested := &ecsacs.AckRequest{ + Cluster: aws.String(cluster), + ContainerInstance: aws.String(containerInstanceArn), + MessageId: aws.String(messageId), + } + + task2 := &task.Task{Arn: "arn2", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + task1 := &task.Task{Arn: "arn1", DesiredStatusUnsafe: apitaskstatus.TaskRunning} + + taskList := []*task.Task{task1, task2} + + // tasks that suppose to be running + taskIdentifierInitial := ecsacs.TaskIdentifier{ + DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), + TaskArn: aws.String("arn1"), + TaskClusterArn: aws.String(cluster), + } + + //Task that needs to be stopped, sent back by agent + taskIdentifierAckFinal := []*ecsacs.TaskIdentifier{ + {DesiredStatus: aws.String(apitaskstatus.TaskRunningString), TaskArn: aws.String("arn1"), TaskClusterArn: aws.String(cluster)}, + {DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), TaskArn: aws.String("arn2"), TaskClusterArn: aws.String(cluster)}, + } + + //Task that needs to be stopped, sent back by agent + taskIdentifierMessage := []*ecsacs.TaskIdentifier{ + {DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), TaskArn: aws.String("arn1"), TaskClusterArn: aws.String(cluster)}, + {DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), TaskArn: aws.String("arn2"), TaskClusterArn: aws.String(cluster)}, + } + + taskStopVerificationMessage := &ecsacs.TaskStopVerificationMessage{ + MessageId: aws.String(messageId), + StopCandidates: taskIdentifierMessage, + } + + messageTaskStopVerificationAck := &ecsacs.TaskStopVerificationAck{ + GeneratedAt: aws.Int64(123), + MessageId: aws.String(messageId), + StopTasks: taskIdentifierAckFinal, + } + + gomock.InOrder( + taskEngine.EXPECT().ListTasks().Return(taskList, nil).Times(1), + taskEngine.EXPECT().AddTask(gomock.Any()).Times(1).Do(func(task1 *task.Task) { + newTaskManifest.stop() + }), + ) + + mockWSClient.EXPECT().MakeRequest(ackRequested).Times(1) + + mockWSClient.EXPECT().MakeRequest(taskStopVerificationMessage).Times(1).Do(func( + message *ecsacs.TaskStopVerificationMessage) { + newTaskManifest.messageBufferTaskStopVerificationAck <- messageTaskStopVerificationAck + }) + + taskEngine.EXPECT().GetTaskByArn("arn1").Times(0) + taskEngine.EXPECT().GetTaskByArn("arn2").Return(task2, true) + + message := &ecsacs.TaskManifestMessage{ + MessageId: aws.String(messageId), + ClusterArn: aws.String(cluster), + ContainerInstanceArn: aws.String(containerInstanceArn), + Tasks: []*ecsacs.TaskIdentifier{ + &taskIdentifierInitial, + }, + Timeline: aws.Int64(testSeqNum), + } + + go newTaskManifest.start() + + newTaskManifest.messageBufferTaskManifest <- message + + // mockWSClient.EXPECT().MakeRequest(ackRequested).Times(1) in this test is called by an asynchronous routine. + // Sometimes functions execution finishes before a call to this asynchronous routine, this sleep will ensure that + // asynchronous routine is called before function ends + time.Sleep(2 * time.Second) + + select { + case <-newTaskManifest.ctx.Done(): + } + + // Verify that new seq num has been correctly saved in database. + seqnum, err := dataClient.GetMetadata(data.TaskManifestSeqNumKey) + require.NoError(t, err) + assert.Equal(t, strconv.FormatInt(int64(testSeqNum), 10), seqnum) +} + +func TestManifestHandlerSequenceNumbers(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + testcases := []struct { + name string + inputSequenceNumber int64 + }{ + { + name: "Tests the case when sequence number received is older than the one stored in agent", + inputSequenceNumber: 13, + }, + { + name: "Tests the case when sequence number received is equal to one stored in agent", + inputSequenceNumber: 12, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Save the initial sequence number. + require.NoError(t, dataClient.SaveMetadata(data.TaskManifestSeqNumKey, + strconv.FormatInt(tc.inputSequenceNumber, 10))) + + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + + ctx := context.TODO() + mockWSClient := mock_wsclient.NewMockClientServer(ctrl) + newTaskManifest := newTaskManifestHandler(ctx, cluster, containerInstanceArn, mockWSClient, + data.NewNoopClient(), taskEngine, aws.Int64(tc.inputSequenceNumber)) + + taskList := []*task.Task{ + {Arn: "arn2", DesiredStatusUnsafe: apitaskstatus.TaskRunning}, + {Arn: "arn1", DesiredStatusUnsafe: apitaskstatus.TaskRunning}, + } + + gomock.InOrder( + taskEngine.EXPECT().ListTasks().Return(taskList, nil).Times(0), + taskEngine.EXPECT().AddTask(gomock.Any()).Times(0), + ) + + message := &ecsacs.TaskManifestMessage{ + MessageId: aws.String(eniMessageId), + ClusterArn: aws.String(clusterName), + ContainerInstanceArn: aws.String(containerInstanceArn), + Tasks: []*ecsacs.TaskIdentifier{ + { + DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), + TaskArn: aws.String("arn-long"), + }, + { + DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), + TaskArn: aws.String("arn-long-1"), + }, + }, + Timeline: aws.Int64(12), + } + err := newTaskManifest.handleTaskManifestSingleMessage(message) + assert.NoError(t, err) + + // Verify that the sequence number in db remains unchanged. + s, err := dataClient.GetMetadata(data.TaskManifestSeqNumKey) + require.NoError(t, err) + seqNum, err := strconv.ParseInt(s, 10, 64) + require.NoError(t, err) + assert.Equal(t, tc.inputSequenceNumber, seqNum) + }) + } +} + +func TestCompareTasksDifferentTasks(t *testing.T) { + receivedTaskList := []*ecsacs.TaskIdentifier{ + { + DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), + TaskArn: aws.String("arn-long"), + }, + { + DesiredStatus: aws.String(apitaskstatus.TaskStoppedString), + TaskArn: aws.String("arn-long-1"), + }, + } + + taskList := []*task.Task{ + {Arn: "arn2", DesiredStatusUnsafe: apitaskstatus.TaskRunning}, + {Arn: "arn1", DesiredStatusUnsafe: apitaskstatus.TaskRunning}, + } + + compareTaskList := compareTasks(receivedTaskList, taskList, "test-cluster-arn") + + assert.Equal(t, 2, len(compareTaskList)) +} + +func TestCompareTasksSameTasks(t *testing.T) { + receivedTaskList := []*ecsacs.TaskIdentifier{ + { + DesiredStatus: aws.String(apitaskstatus.TaskRunningString), + TaskArn: aws.String("arn1"), + }, + { + DesiredStatus: aws.String(apitaskstatus.TaskRunningString), + TaskArn: aws.String("arn2"), + }, + } + + taskList := []*task.Task{ + {Arn: "arn2", DesiredStatusUnsafe: apitaskstatus.TaskRunning}, + {Arn: "arn1", DesiredStatusUnsafe: apitaskstatus.TaskRunning}, + } + + compareTaskList := compareTasks(receivedTaskList, taskList, "test-cluster-arn") + + assert.Equal(t, 0, len(compareTaskList)) +} diff --git a/agent/acs/model/api/api-2.json b/agent/acs/model/api/api-2.json new file mode 100644 index 00000000000..3036c4f3f5c --- /dev/null +++ b/agent/acs/model/api/api-2.json @@ -0,0 +1,821 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-13", + "endpointPrefix":"ecsacs", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon ECS ACS", + "serviceFullName":"Amazon EC2 Container Service Agent Communication Service", + "signatureVersion":"v4", + "targetPrefix":"AmazonEC2ContainerAgentServiceV20141113", + "uid":"ecsacs-2014-11-13" + }, + "operations":{ + "AttachInstanceNetworkInterfaces":{ + "name":"AttachInstanceNetworkInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachInstanceNetworkInterfacesMessage"}, + "output":{"shape":"AckRequest"}, + "documentation":"AttachNetworkInterface requests that the Agent look for and confirm the attachment of a network interface by the control plane that is not bound to a specific task." + }, + "AttachTaskNetworkInterfaces":{ + "name":"AttachTaskNetworkInterfaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AttachTaskNetworkInterfacesMessage"}, + "output":{"shape":"AckRequest"}, + "documentation":"AttachNetworkInterface requests that the Agent look for and confirm the attachment of a network interface by the control plane." + }, + "Error":{ + "name":"Error", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ErrorMessage"} + }, + "Heartbeat":{ + "name":"Heartbeat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"HeartbeatMessage"}, + "documentation":"Heartbeat is a periodic message that informs the agent all is well." + }, + "Payload":{ + "name":"Payload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PayloadMessage"}, + "output":{"shape":"AckRequest"}, + "documentation":"Payload requests the Agent parse and launch a given Payload. In response, the Agent will 'ack' the request it recieved after procesing it." + }, + "PerformUpdate":{ + "name":"PerformUpdate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PerformUpdateMessage"}, + "output":{"shape":"AckRequest"} + }, + "Poll":{ + "name":"Poll", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PollRequest"}, + "output":{"shape":"CloseMessage"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"BadRequestException"}, + {"shape":"InvalidClusterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInstanceException"}, + {"shape":"InactiveInstanceException"} + ], + "documentation":"Poll is the (increasingly poorly named) method by which the agent creates an initial long-lasting connection over which more specific operations are performed. More accurately, this would be named \"StartSession\"" + }, + "RefreshTaskIAMRoleCredentials":{ + "name":"RefreshTaskIAMRoleCredentials", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IAMRoleCredentialsMessage"}, + "output":{"shape":"IAMRoleCredentialsAckRequest"} + }, + "StageUpdate":{ + "name":"StageUpdate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StageUpdateMessage"}, + "output":{"shape":"AckRequest"} + }, + "UpdateFailure":{ + "name":"UpdateFailure", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"NackRequest"} + } + }, + "shapes":{ + "ASMAuthData":{ + "type":"structure", + "members":{ + "credentialsParameter":{"shape":"String"}, + "region":{"shape":"String"} + } + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "AckRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "messageId":{"shape":"String"} + } + }, + "Association":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "type":{"shape":"AssociationType"}, + "content":{"shape":"EncodedString"}, + "containers":{"shape":"StringList"} + } + }, + "AssociationType":{ + "type":"string", + "enum":[ + "elastic-inference", + "gpu" + ] + }, + "Associations":{ + "type":"list", + "member":{"shape":"Association"} + }, + "AttachInstanceNetworkInterfacesMessage":{ + "type":"structure", + "members":{ + "containerInstanceArn":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "generatedAt":{"shape":"Long"}, + "messageId":{"shape":"String"}, + "waitTimeoutMs":{"shape":"Long"}, + "elasticNetworkInterfaces":{"shape":"ElasticNetworkInterfaceList"} + } + }, + "AttachTaskNetworkInterfacesMessage":{ + "type":"structure", + "members":{ + "containerInstanceArn":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "taskArn":{"shape":"String"}, + "generatedAt":{"shape":"Long"}, + "messageId":{"shape":"String"}, + "waitTimeoutMs":{"shape":"Long"}, + "elasticNetworkInterfaces":{"shape":"ElasticNetworkInterfaceList"} + } + }, + "AuthStrategy":{ + "type":"string", + "enum":["ExecutionRole"] + }, + "AuthenticationType":{ + "type":"string", + "enum":[ + "ecr", + "asm" + ] + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "CloseMessage":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + } + }, + "Container":{ + "type":"structure", + "members":{ + "command":{"shape":"StringList"}, + "cpu":{"shape":"Integer"}, + "entryPoint":{"shape":"StringList"}, + "environment":{"shape":"EnvironmentVariables"}, + "environmentFiles":{"shape":"EnvironmentFiles"}, + "essential":{"shape":"Boolean"}, + "image":{"shape":"String"}, + "links":{"shape":"StringList"}, + "memory":{"shape":"Integer"}, + "name":{"shape":"String"}, + "overrides":{"shape":"String"}, + "portMappings":{"shape":"PortMappingList"}, + "managedAgents":{"shape":"ManagedAgentList"}, + "mountPoints":{"shape":"MountPointList"}, + "volumesFrom":{"shape":"VolumeFromList"}, + "dockerConfig":{"shape":"DockerConfig"}, + "healthCheckType":{"shape":"HealthCheckType"}, + "registryAuthentication":{"shape":"RegistryAuthenticationData"}, + "logsAuthStrategy":{"shape":"AuthStrategy"}, + "secrets":{"shape":"SecretList"}, + "dependsOn":{"shape":"ContainerDependencies"}, + "startTimeout":{"shape":"Integer"}, + "stopTimeout":{"shape":"Integer"}, + "firelensConfiguration":{"shape":"FirelensConfiguration"}, + "containerArn":{"shape":"String"} + } + }, + "ContainerCondition":{ + "type":"string", + "enum":[ + "START", + "COMPLETE", + "SUCCESS", + "HEALTHY" + ] + }, + "ContainerDependencies":{ + "type":"list", + "member":{"shape":"ContainerDependency"} + }, + "ContainerDependency":{ + "type":"structure", + "members":{ + "containerName":{"shape":"String"}, + "condition":{"shape":"ContainerCondition"} + } + }, + "ContainerList":{ + "type":"list", + "member":{"shape":"Container"} + }, + "DockerConfig":{ + "type":"structure", + "members":{ + "version":{"shape":"String"}, + "config":{"shape":"String"}, + "hostConfig":{"shape":"String"} + } + }, + "DockerVolumeConfiguration":{ + "type":"structure", + "members":{ + "scope":{"shape":"Scope"}, + "autoprovision":{"shape":"Boolean"}, + "driver":{"shape":"String"}, + "driverOpts":{"shape":"StringMap"}, + "labels":{"shape":"StringMap"} + } + }, + "Double":{"type":"double"}, + "ECRAuthData":{ + "type":"structure", + "members":{ + "registryId":{"shape":"String"}, + "region":{"shape":"String"}, + "endpointOverride":{"shape":"String"}, + "useExecutionRole":{"shape":"Boolean"} + } + }, + + "NetworkInterfaceAssociationProtocol": { + "type": "string", + "enum": [ + "default", + "vlan" + ] + }, + + "NetworkInterfaceVlanProperties": { + "type": "structure", + "members": { + "vlanId":{"shape":"String"}, + "trunkInterfaceMacAddress":{"shape":"String"} + } + }, + "EFSTransitEncryption":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "EFSAuthorizationConfig":{ + "type":"structure", + "members":{ + "accessPointId":{"shape":"String"}, + "iam":{"shape":"EFSAuthorizationConfigIAM"} + } + }, + "EFSAuthorizationConfigIAM":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "EFSVolumeConfiguration":{ + "type":"structure", + "members":{ + "fileSystemId":{"shape":"String"}, + "rootDirectory":{"shape":"String"}, + "transitEncryption":{"shape":"EFSTransitEncryption"}, + "transitEncryptionPort":{"shape":"Integer"}, + "authorizationConfig":{"shape":"EFSAuthorizationConfig"} + } + }, + "ElasticNetworkInterface":{ + "type":"structure", + "members":{ + "interfaceAssociationProtocol": {"shape": "NetworkInterfaceAssociationProtocol"}, + "interfaceVlanProperties": {"shape": "NetworkInterfaceVlanProperties"}, + "macAddress":{"shape":"String"}, + "attachmentArn":{"shape":"String"}, + "ec2Id":{"shape":"String"}, + "ipv4Addresses":{"shape":"IPv4AddressList"}, + "ipv6Addresses":{"shape":"IPv6AddressList"}, + "domainName":{"shape":"StringList"}, + "domainNameServers":{"shape":"StringList"}, + "privateDnsName":{"shape":"String"}, + "subnetGatewayIpv4Address":{"shape":"String"} + } + }, + "ElasticNetworkInterfaceList":{ + "type":"list", + "member":{"shape":"ElasticNetworkInterface"} + }, + "EncodedString":{ + "type":"structure", + "members":{ + "value":{"shape":"String"}, + "encoding":{"shape":"Encoding"} + } + }, + "Encoding":{ + "type":"string", + "enum":[ + "identity", + "base64" + ] + }, + "EnvironmentFile":{ + "type":"structure", + "members":{ + "value":{"shape":"String"}, + "type":{"shape":"EnvironmentFileType"} + } + }, + "EnvironmentFiles":{ + "type":"list", + "member":{"shape":"EnvironmentFile"} + }, + "EnvironmentFileType": { + "type":"string", + "enum":[ + "s3" + ] + }, + "EnvironmentVariables":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "ErrorMessage":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + } + }, + "FSxWindowsFileServerAuthorizationConfig":{ + "type":"structure", + "members":{ + "credentialsParameter":{"shape":"String"}, + "domain":{"shape":"String"} + } + }, + "FSxWindowsFileServerVolumeConfiguration":{ + "type":"structure", + "members":{ + "fileSystemId":{"shape":"String"}, + "rootDirectory":{"shape":"String"}, + "authorizationConfig":{"shape":"FSxWindowsFileServerAuthorizationConfig"} + } + }, + "HealthCheckType":{ + "type":"string", + "enum":["docker"] + }, + "HeartbeatMessage":{ + "type":"structure", + "members":{ + "healthy":{"shape":"Boolean"} + } + }, + "HostVolumeProperties":{ + "type":"structure", + "members":{ + "sourcePath":{"shape":"String"} + } + }, + "IAMRoleCredentials":{ + "type":"structure", + "members":{ + "credentialsId":{"shape":"String"}, + "roleArn":{"shape":"String"}, + "accessKeyId":{"shape":"String"}, + "secretAccessKey":{"shape":"SensitiveString"}, + "sessionToken":{"shape":"String"}, + "expiration":{"shape":"String"} + } + }, + "IAMRoleCredentialsAckRequest":{ + "type":"structure", + "members":{ + "messageId":{"shape":"String"}, + "expiration":{"shape":"String"}, + "credentialsId":{"shape":"String"} + } + }, + "IAMRoleCredentialsMessage":{ + "type":"structure", + "members":{ + "taskArn":{"shape":"String"}, + "roleCredentials":{"shape":"IAMRoleCredentials"}, + "roleType":{"shape":"RoleType"}, + "messageId":{"shape":"String"} + } + }, + "IPv4AddressAssignment":{ + "type":"structure", + "members":{ + "privateAddress":{"shape":"String"}, + "primary":{"shape":"Boolean"} + } + }, + "IPv4AddressList":{ + "type":"list", + "member":{"shape":"IPv4AddressAssignment"} + }, + "IPv6AddressAssignment":{ + "type":"structure", + "members":{ + "address":{"shape":"String"} + } + }, + "IPv6AddressList":{ + "type":"list", + "member":{"shape":"IPv6AddressAssignment"} + }, + "InactiveInstanceException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "Integer":{"type":"integer"}, + "InvalidClusterException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "InvalidInstanceException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ManagedAgent":{ + "type":"structure", + "members":{ + "name":{"shape":"ManagedAgentName"}, + "properties":{"shape":"StringMap"} + } + }, + "ManagedAgentList":{ + "type":"list", + "member":{"shape":"ManagedAgent"} + }, + "ManagedAgentName":{ + "type":"string", + "enum":["ExecuteCommandAgent"] + }, + "FirelensConfiguration":{ + "type":"structure", + "members":{ + "type":{"shape":"FirelensConfigurationType"}, + "options":{"shape":"FirelensConfigurationOptionsMap"} + } + }, + "FirelensConfigurationType":{ + "type":"string", + "enum":[ + "fluentd", + "fluentbit" + ] + }, + "FirelensConfigurationOptionsMap": { + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Long":{"type":"long"}, + "MountPoint":{ + "type":"structure", + "members":{ + "sourceVolume":{"shape":"String"}, + "containerPath":{"shape":"String"}, + "readOnly":{"shape":"Boolean"} + } + }, + "MountPointList":{ + "type":"list", + "member":{"shape":"MountPoint"} + }, + "NackRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "messageId":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "PayloadMessage":{ + "type":"structure", + "members":{ + "containerInstanceArn":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "tasks":{"shape":"TaskList"}, + "generatedAt":{"shape":"Long"}, + "messageId":{"shape":"String"}, + "seqNum":{"shape":"Integer"} + } + }, + "PerformUpdateMessage":{ + "type":"structure", + "members":{ + "containerInstanceArn":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "updateInfo":{"shape":"UpdateInfo"}, + "messageId":{"shape":"String"} + } + }, + "PollRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "seqNum":{"shape":"Integer"}, + "versionInfo":{"shape":"VersionInfo"}, + "sendCredentials":{"shape":"Boolean"} + } + }, + "PortMapping":{ + "type":"structure", + "members":{ + "containerPort":{"shape":"Integer"}, + "hostPort":{"shape":"Integer"}, + "protocol":{"shape":"TransportProtocol"} + } + }, + "PortMappingList":{ + "type":"list", + "member":{"shape":"PortMapping"} + }, + "ProxyConfiguration":{ + "type":"structure", + "members":{ + "type":{"shape":"ProxyConfigurationType"}, + "containerName":{"shape":"String"}, + "properties":{"shape":"StringMap"} + } + }, + "ProxyConfigurationType":{ + "type":"string", + "enum":["APPMESH"] + }, + "RegistryAuthenticationData":{ + "type":"structure", + "members":{ + "type":{"shape":"AuthenticationType"}, + "ecrAuthData":{"shape":"ECRAuthData"}, + "asmAuthData":{"shape":"ASMAuthData"} + } + }, + "RoleType":{ + "type":"string", + "enum":[ + "TaskApplication", + "TaskExecution" + ] + }, + "Scope":{ + "type":"string", + "enum":[ + "task", + "shared" + ] + }, + "Secret":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "valueFrom":{"shape":"String"}, + "containerPath":{"shape":"String"}, + "type":{"shape":"SecretType"}, + "region":{"shape":"String"}, + "provider":{"shape":"SecretProvider"}, + "target":{"shape":"SecretTarget"} + } + }, + "SecretList":{ + "type":"list", + "member":{"shape":"Secret"} + }, + "SecretProvider":{ + "type":"string", + "enum":[ + "ssm", + "asm" + ] + }, + "SecretTarget":{ + "type":"string", + "enum":[ + "CONTAINER", + "LOG_DRIVER" + ] + }, + "SecretType":{ + "type":"string", + "enum":[ + "ENVIRONMENT_VARIABLE" + ] + }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "StageUpdateMessage":{ + "type":"structure", + "members":{ + "containerInstanceArn":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "updateInfo":{"shape":"UpdateInfo"}, + "messageId":{"shape":"String"} + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Task":{ + "type":"structure", + "members":{ + "arn":{"shape":"String"}, + "containers":{"shape":"ContainerList"}, + "desiredStatus":{"shape":"String"}, + "family":{"shape":"String"}, + "overrides":{"shape":"String"}, + "version":{"shape":"String"}, + "taskDefinitionAccountId":{"shape":"String"}, + "volumes":{"shape":"VolumeList"}, + "roleCredentials":{"shape":"IAMRoleCredentials"}, + "executionRoleCredentials":{"shape":"IAMRoleCredentials"}, + "elasticNetworkInterfaces":{"shape":"ElasticNetworkInterfaceList"}, + "cpu":{"shape":"Double"}, + "memory":{"shape":"Integer"}, + "associations":{"shape":"Associations"}, + "pidMode":{"shape":"String"}, + "ipcMode":{"shape":"String"}, + "proxyConfiguration":{"shape":"ProxyConfiguration"}, + "launchType":{"shape":"String"} + } + }, + "TaskList":{ + "type":"list", + "member":{"shape":"Task"} + }, + "TransportProtocol":{ + "type":"string", + "enum":[ + "tcp", + "udp" + ] + }, + "UpdateInfo":{ + "type":"structure", + "members":{ + "location":{"shape":"String"}, + "signature":{"shape":"String"} + } + }, + "VersionInfo":{ + "type":"structure", + "members":{ + "agentVersion":{"shape":"String"}, + "agentHash":{"shape":"String"}, + "dockerVersion":{"shape":"String"} + } + }, + "Volume":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "type":{"shape":"VolumeType"}, + "host":{"shape":"HostVolumeProperties"}, + "dockerVolumeConfiguration":{"shape":"DockerVolumeConfiguration"}, + "efsVolumeConfiguration":{"shape":"EFSVolumeConfiguration"}, + "fsxWindowsFileServerVolumeConfiguration":{"shape":"FSxWindowsFileServerVolumeConfiguration"} + } + }, + "VolumeFrom":{ + "type":"structure", + "members":{ + "sourceContainer":{"shape":"String"}, + "readOnly":{"shape":"Boolean"} + } + }, + "VolumeFromList":{ + "type":"list", + "member":{"shape":"VolumeFrom"} + }, + "VolumeList":{ + "type":"list", + "member":{"shape":"Volume"} + }, + "VolumeType":{ + "type":"string", + "enum":[ + "host", + "docker", + "efs", + "fsxWindowsFileServer" + ] + }, + "TaskIdentifier": { + "type": "structure", + "members": { + "taskArn": {"shape":"String"}, + "taskClusterArn": {"shape": "String"}, + "desiredStatus": {"shape":"String"} + } + }, + "TaskIdentifierList": { + "type": "list", + "member": {"shape": "TaskIdentifier"} + }, + "TaskManifestMessage": { + "type": "structure", + "members": { + "containerInstanceArn": {"shape":"String"}, + "clusterArn": {"shape":"String"}, + "tasks": {"shape": "TaskIdentifierList"}, + "generatedAt": {"shape": "Long"}, + "messageId": {"shape": "String"}, + "timeline": {"shape":"Long"} + } + }, + "TaskStopVerificationAck": { + "type": "structure", + "members": { + "stopTasks": {"shape": "TaskIdentifierList"}, + "generatedAt": {"shape": "Long"}, + "messageId": {"shape": "String"} + } + }, + "TaskStopVerificationMessage": { + "type": "structure", + "members": { + "stopCandidates": {"shape": "TaskIdentifierList"}, + "messageId": {"shape": "String"} + } + } + } +} diff --git a/agent/acs/model/ecsacs/api.go b/agent/acs/model/ecsacs/api.go new file mode 100644 index 00000000000..2d45dfc88a4 --- /dev/null +++ b/agent/acs/model/ecsacs/api.go @@ -0,0 +1,1829 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// +// Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. + +package ecsacs + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/private/protocol" +) + +type ASMAuthData struct { + _ struct{} `type:"structure"` + + CredentialsParameter *string `locationName:"credentialsParameter" type:"string"` + + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation +func (s ASMAuthData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ASMAuthData) GoString() string { + return s.String() +} + +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +type AckRequest struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s AckRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AckRequest) GoString() string { + return s.String() +} + +type Association struct { + _ struct{} `type:"structure"` + + Containers []*string `locationName:"containers" type:"list"` + + Content *EncodedString `locationName:"content" type:"structure"` + + Name *string `locationName:"name" type:"string"` + + Type *string `locationName:"type" type:"string" enum:"AssociationType"` +} + +// String returns the string representation +func (s Association) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Association) GoString() string { + return s.String() +} + +type AttachInstanceNetworkInterfacesInput struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` + + GeneratedAt *int64 `locationName:"generatedAt" type:"long"` + + MessageId *string `locationName:"messageId" type:"string"` + + WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"` +} + +// String returns the string representation +func (s AttachInstanceNetworkInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstanceNetworkInterfacesInput) GoString() string { + return s.String() +} + +type AttachInstanceNetworkInterfacesMessage struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` + + GeneratedAt *int64 `locationName:"generatedAt" type:"long"` + + MessageId *string `locationName:"messageId" type:"string"` + + WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"` +} + +// String returns the string representation +func (s AttachInstanceNetworkInterfacesMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstanceNetworkInterfacesMessage) GoString() string { + return s.String() +} + +type AttachInstanceNetworkInterfacesOutput struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s AttachInstanceNetworkInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstanceNetworkInterfacesOutput) GoString() string { + return s.String() +} + +type AttachTaskNetworkInterfacesInput struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` + + GeneratedAt *int64 `locationName:"generatedAt" type:"long"` + + MessageId *string `locationName:"messageId" type:"string"` + + TaskArn *string `locationName:"taskArn" type:"string"` + + WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"` +} + +// String returns the string representation +func (s AttachTaskNetworkInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachTaskNetworkInterfacesInput) GoString() string { + return s.String() +} + +type AttachTaskNetworkInterfacesMessage struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` + + GeneratedAt *int64 `locationName:"generatedAt" type:"long"` + + MessageId *string `locationName:"messageId" type:"string"` + + TaskArn *string `locationName:"taskArn" type:"string"` + + WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"` +} + +// String returns the string representation +func (s AttachTaskNetworkInterfacesMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachTaskNetworkInterfacesMessage) GoString() string { + return s.String() +} + +type AttachTaskNetworkInterfacesOutput struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s AttachTaskNetworkInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachTaskNetworkInterfacesOutput) GoString() string { + return s.String() +} + +type BadRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BadRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BadRequestException) GoString() string { + return s.String() +} + +func newErrorBadRequestException(v protocol.ResponseMetadata) error { + return &BadRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BadRequestException) Code() string { + return "BadRequestException" +} + +// Message returns the exception's message. +func (s *BadRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BadRequestException) OrigErr() error { + return nil +} + +func (s *BadRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CloseMessage struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s CloseMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloseMessage) GoString() string { + return s.String() +} + +type Container struct { + _ struct{} `type:"structure"` + + Command []*string `locationName:"command" type:"list"` + + ContainerArn *string `locationName:"containerArn" type:"string"` + + Cpu *int64 `locationName:"cpu" type:"integer"` + + DependsOn []*ContainerDependency `locationName:"dependsOn" type:"list"` + + DockerConfig *DockerConfig `locationName:"dockerConfig" type:"structure"` + + EntryPoint []*string `locationName:"entryPoint" type:"list"` + + Environment map[string]*string `locationName:"environment" type:"map"` + + EnvironmentFiles []*EnvironmentFile `locationName:"environmentFiles" type:"list"` + + Essential *bool `locationName:"essential" type:"boolean"` + + FirelensConfiguration *FirelensConfiguration `locationName:"firelensConfiguration" type:"structure"` + + HealthCheckType *string `locationName:"healthCheckType" type:"string" enum:"HealthCheckType"` + + Image *string `locationName:"image" type:"string"` + + Links []*string `locationName:"links" type:"list"` + + LogsAuthStrategy *string `locationName:"logsAuthStrategy" type:"string" enum:"AuthStrategy"` + + ManagedAgents []*ManagedAgent `locationName:"managedAgents" type:"list"` + + Memory *int64 `locationName:"memory" type:"integer"` + + MountPoints []*MountPoint `locationName:"mountPoints" type:"list"` + + Name *string `locationName:"name" type:"string"` + + Overrides *string `locationName:"overrides" type:"string"` + + PortMappings []*PortMapping `locationName:"portMappings" type:"list"` + + RegistryAuthentication *RegistryAuthenticationData `locationName:"registryAuthentication" type:"structure"` + + Secrets []*Secret `locationName:"secrets" type:"list"` + + StartTimeout *int64 `locationName:"startTimeout" type:"integer"` + + StopTimeout *int64 `locationName:"stopTimeout" type:"integer"` + + VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"` +} + +// String returns the string representation +func (s Container) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Container) GoString() string { + return s.String() +} + +type ContainerDependency struct { + _ struct{} `type:"structure"` + + Condition *string `locationName:"condition" type:"string" enum:"ContainerCondition"` + + ContainerName *string `locationName:"containerName" type:"string"` +} + +// String returns the string representation +func (s ContainerDependency) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerDependency) GoString() string { + return s.String() +} + +type DockerConfig struct { + _ struct{} `type:"structure"` + + Config *string `locationName:"config" type:"string"` + + HostConfig *string `locationName:"hostConfig" type:"string"` + + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s DockerConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DockerConfig) GoString() string { + return s.String() +} + +type DockerVolumeConfiguration struct { + _ struct{} `type:"structure"` + + Autoprovision *bool `locationName:"autoprovision" type:"boolean"` + + Driver *string `locationName:"driver" type:"string"` + + DriverOpts map[string]*string `locationName:"driverOpts" type:"map"` + + Labels map[string]*string `locationName:"labels" type:"map"` + + Scope *string `locationName:"scope" type:"string" enum:"Scope"` +} + +// String returns the string representation +func (s DockerVolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DockerVolumeConfiguration) GoString() string { + return s.String() +} + +type ECRAuthData struct { + _ struct{} `type:"structure"` + + EndpointOverride *string `locationName:"endpointOverride" type:"string"` + + Region *string `locationName:"region" type:"string"` + + RegistryId *string `locationName:"registryId" type:"string"` + + UseExecutionRole *bool `locationName:"useExecutionRole" type:"boolean"` +} + +// String returns the string representation +func (s ECRAuthData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ECRAuthData) GoString() string { + return s.String() +} + +type EFSAuthorizationConfig struct { + _ struct{} `type:"structure"` + + AccessPointId *string `locationName:"accessPointId" type:"string"` + + Iam *string `locationName:"iam" type:"string" enum:"EFSAuthorizationConfigIAM"` +} + +// String returns the string representation +func (s EFSAuthorizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSAuthorizationConfig) GoString() string { + return s.String() +} + +type EFSVolumeConfiguration struct { + _ struct{} `type:"structure"` + + AuthorizationConfig *EFSAuthorizationConfig `locationName:"authorizationConfig" type:"structure"` + + FileSystemId *string `locationName:"fileSystemId" type:"string"` + + RootDirectory *string `locationName:"rootDirectory" type:"string"` + + TransitEncryption *string `locationName:"transitEncryption" type:"string" enum:"EFSTransitEncryption"` + + TransitEncryptionPort *int64 `locationName:"transitEncryptionPort" type:"integer"` +} + +// String returns the string representation +func (s EFSVolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSVolumeConfiguration) GoString() string { + return s.String() +} + +type ElasticNetworkInterface struct { + _ struct{} `type:"structure"` + + AttachmentArn *string `locationName:"attachmentArn" type:"string"` + + DomainName []*string `locationName:"domainName" type:"list"` + + DomainNameServers []*string `locationName:"domainNameServers" type:"list"` + + Ec2Id *string `locationName:"ec2Id" type:"string"` + + InterfaceAssociationProtocol *string `locationName:"interfaceAssociationProtocol" type:"string" enum:"NetworkInterfaceAssociationProtocol"` + + InterfaceVlanProperties *NetworkInterfaceVlanProperties `locationName:"interfaceVlanProperties" type:"structure"` + + Ipv4Addresses []*IPv4AddressAssignment `locationName:"ipv4Addresses" type:"list"` + + Ipv6Addresses []*IPv6AddressAssignment `locationName:"ipv6Addresses" type:"list"` + + MacAddress *string `locationName:"macAddress" type:"string"` + + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + SubnetGatewayIpv4Address *string `locationName:"subnetGatewayIpv4Address" type:"string"` +} + +// String returns the string representation +func (s ElasticNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticNetworkInterface) GoString() string { + return s.String() +} + +type EncodedString struct { + _ struct{} `type:"structure"` + + Encoding *string `locationName:"encoding" type:"string" enum:"Encoding"` + + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s EncodedString) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncodedString) GoString() string { + return s.String() +} + +type EnvironmentFile struct { + _ struct{} `type:"structure"` + + Type *string `locationName:"type" type:"string" enum:"EnvironmentFileType"` + + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s EnvironmentFile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentFile) GoString() string { + return s.String() +} + +type ErrorInput struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ErrorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorInput) GoString() string { + return s.String() +} + +type ErrorMessage struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ErrorMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorMessage) GoString() string { + return s.String() +} + +type ErrorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ErrorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorOutput) GoString() string { + return s.String() +} + +type FSxWindowsFileServerAuthorizationConfig struct { + _ struct{} `type:"structure"` + + CredentialsParameter *string `locationName:"credentialsParameter" type:"string"` + + Domain *string `locationName:"domain" type:"string"` +} + +// String returns the string representation +func (s FSxWindowsFileServerAuthorizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FSxWindowsFileServerAuthorizationConfig) GoString() string { + return s.String() +} + +type FSxWindowsFileServerVolumeConfiguration struct { + _ struct{} `type:"structure"` + + AuthorizationConfig *FSxWindowsFileServerAuthorizationConfig `locationName:"authorizationConfig" type:"structure"` + + FileSystemId *string `locationName:"fileSystemId" type:"string"` + + RootDirectory *string `locationName:"rootDirectory" type:"string"` +} + +// String returns the string representation +func (s FSxWindowsFileServerVolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FSxWindowsFileServerVolumeConfiguration) GoString() string { + return s.String() +} + +type FirelensConfiguration struct { + _ struct{} `type:"structure"` + + Options map[string]*string `locationName:"options" type:"map"` + + Type *string `locationName:"type" type:"string" enum:"FirelensConfigurationType"` +} + +// String returns the string representation +func (s FirelensConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FirelensConfiguration) GoString() string { + return s.String() +} + +type HeartbeatInput struct { + _ struct{} `type:"structure"` + + Healthy *bool `locationName:"healthy" type:"boolean"` +} + +// String returns the string representation +func (s HeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeartbeatInput) GoString() string { + return s.String() +} + +type HeartbeatMessage struct { + _ struct{} `type:"structure"` + + Healthy *bool `locationName:"healthy" type:"boolean"` +} + +// String returns the string representation +func (s HeartbeatMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeartbeatMessage) GoString() string { + return s.String() +} + +type HeartbeatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeartbeatOutput) GoString() string { + return s.String() +} + +type HostVolumeProperties struct { + _ struct{} `type:"structure"` + + SourcePath *string `locationName:"sourcePath" type:"string"` +} + +// String returns the string representation +func (s HostVolumeProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostVolumeProperties) GoString() string { + return s.String() +} + +type IAMRoleCredentials struct { + _ struct{} `type:"structure"` + + AccessKeyId *string `locationName:"accessKeyId" type:"string"` + + CredentialsId *string `locationName:"credentialsId" type:"string"` + + Expiration *string `locationName:"expiration" type:"string"` + + RoleArn *string `locationName:"roleArn" type:"string"` + + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` + + SessionToken *string `locationName:"sessionToken" type:"string"` +} + +// String returns the string representation +func (s IAMRoleCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IAMRoleCredentials) GoString() string { + return s.String() +} + +type IAMRoleCredentialsAckRequest struct { + _ struct{} `type:"structure"` + + CredentialsId *string `locationName:"credentialsId" type:"string"` + + Expiration *string `locationName:"expiration" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s IAMRoleCredentialsAckRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IAMRoleCredentialsAckRequest) GoString() string { + return s.String() +} + +type IAMRoleCredentialsMessage struct { + _ struct{} `type:"structure"` + + MessageId *string `locationName:"messageId" type:"string"` + + RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"` + + RoleType *string `locationName:"roleType" type:"string" enum:"RoleType"` + + TaskArn *string `locationName:"taskArn" type:"string"` +} + +// String returns the string representation +func (s IAMRoleCredentialsMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IAMRoleCredentialsMessage) GoString() string { + return s.String() +} + +type IPv4AddressAssignment struct { + _ struct{} `type:"structure"` + + Primary *bool `locationName:"primary" type:"boolean"` + + PrivateAddress *string `locationName:"privateAddress" type:"string"` +} + +// String returns the string representation +func (s IPv4AddressAssignment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPv4AddressAssignment) GoString() string { + return s.String() +} + +type IPv6AddressAssignment struct { + _ struct{} `type:"structure"` + + Address *string `locationName:"address" type:"string"` +} + +// String returns the string representation +func (s IPv6AddressAssignment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPv6AddressAssignment) GoString() string { + return s.String() +} + +type InactiveInstanceException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InactiveInstanceException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InactiveInstanceException) GoString() string { + return s.String() +} + +func newErrorInactiveInstanceException(v protocol.ResponseMetadata) error { + return &InactiveInstanceException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InactiveInstanceException) Code() string { + return "InactiveInstanceException" +} + +// Message returns the exception's message. +func (s *InactiveInstanceException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InactiveInstanceException) OrigErr() error { + return nil +} + +func (s *InactiveInstanceException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InactiveInstanceException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InactiveInstanceException) RequestID() string { + return s.RespMetadata.RequestID +} + +type InvalidClusterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidClusterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidClusterException) GoString() string { + return s.String() +} + +func newErrorInvalidClusterException(v protocol.ResponseMetadata) error { + return &InvalidClusterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClusterException) Code() string { + return "InvalidClusterException" +} + +// Message returns the exception's message. +func (s *InvalidClusterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClusterException) OrigErr() error { + return nil +} + +func (s *InvalidClusterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClusterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClusterException) RequestID() string { + return s.RespMetadata.RequestID +} + +type InvalidInstanceException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidInstanceException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidInstanceException) GoString() string { + return s.String() +} + +func newErrorInvalidInstanceException(v protocol.ResponseMetadata) error { + return &InvalidInstanceException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidInstanceException) Code() string { + return "InvalidInstanceException" +} + +// Message returns the exception's message. +func (s *InvalidInstanceException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidInstanceException) OrigErr() error { + return nil +} + +func (s *InvalidInstanceException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidInstanceException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidInstanceException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ManagedAgent struct { + _ struct{} `type:"structure"` + + Name *string `locationName:"name" type:"string" enum:"ManagedAgentName"` + + Properties map[string]*string `locationName:"properties" type:"map"` +} + +// String returns the string representation +func (s ManagedAgent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedAgent) GoString() string { + return s.String() +} + +type MountPoint struct { + _ struct{} `type:"structure"` + + ContainerPath *string `locationName:"containerPath" type:"string"` + + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + SourceVolume *string `locationName:"sourceVolume" type:"string"` +} + +// String returns the string representation +func (s MountPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MountPoint) GoString() string { + return s.String() +} + +type NackRequest struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` + + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s NackRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NackRequest) GoString() string { + return s.String() +} + +type NetworkInterfaceVlanProperties struct { + _ struct{} `type:"structure"` + + TrunkInterfaceMacAddress *string `locationName:"trunkInterfaceMacAddress" type:"string"` + + VlanId *string `locationName:"vlanId" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfaceVlanProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceVlanProperties) GoString() string { + return s.String() +} + +type PayloadInput struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + GeneratedAt *int64 `locationName:"generatedAt" type:"long"` + + MessageId *string `locationName:"messageId" type:"string"` + + SeqNum *int64 `locationName:"seqNum" type:"integer"` + + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s PayloadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PayloadInput) GoString() string { + return s.String() +} + +type PayloadMessage struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + GeneratedAt *int64 `locationName:"generatedAt" type:"long"` + + MessageId *string `locationName:"messageId" type:"string"` + + SeqNum *int64 `locationName:"seqNum" type:"integer"` + + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s PayloadMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PayloadMessage) GoString() string { + return s.String() +} + +type PayloadOutput struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s PayloadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PayloadOutput) GoString() string { + return s.String() +} + +type PerformUpdateInput struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` + + UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"` +} + +// String returns the string representation +func (s PerformUpdateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PerformUpdateInput) GoString() string { + return s.String() +} + +type PerformUpdateMessage struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` + + UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"` +} + +// String returns the string representation +func (s PerformUpdateMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PerformUpdateMessage) GoString() string { + return s.String() +} + +type PerformUpdateOutput struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s PerformUpdateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PerformUpdateOutput) GoString() string { + return s.String() +} + +type PollInput struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + SendCredentials *bool `locationName:"sendCredentials" type:"boolean"` + + SeqNum *int64 `locationName:"seqNum" type:"integer"` + + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s PollInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollInput) GoString() string { + return s.String() +} + +type PollOutput struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s PollOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollOutput) GoString() string { + return s.String() +} + +type PollRequest struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + SendCredentials *bool `locationName:"sendCredentials" type:"boolean"` + + SeqNum *int64 `locationName:"seqNum" type:"integer"` + + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s PollRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PollRequest) GoString() string { + return s.String() +} + +type PortMapping struct { + _ struct{} `type:"structure"` + + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + HostPort *int64 `locationName:"hostPort" type:"integer"` + + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s PortMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortMapping) GoString() string { + return s.String() +} + +type ProxyConfiguration struct { + _ struct{} `type:"structure"` + + ContainerName *string `locationName:"containerName" type:"string"` + + Properties map[string]*string `locationName:"properties" type:"map"` + + Type *string `locationName:"type" type:"string" enum:"ProxyConfigurationType"` +} + +// String returns the string representation +func (s ProxyConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProxyConfiguration) GoString() string { + return s.String() +} + +type RefreshTaskIAMRoleCredentialsInput struct { + _ struct{} `type:"structure"` + + MessageId *string `locationName:"messageId" type:"string"` + + RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"` + + RoleType *string `locationName:"roleType" type:"string" enum:"RoleType"` + + TaskArn *string `locationName:"taskArn" type:"string"` +} + +// String returns the string representation +func (s RefreshTaskIAMRoleCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshTaskIAMRoleCredentialsInput) GoString() string { + return s.String() +} + +type RefreshTaskIAMRoleCredentialsOutput struct { + _ struct{} `type:"structure"` + + CredentialsId *string `locationName:"credentialsId" type:"string"` + + Expiration *string `locationName:"expiration" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s RefreshTaskIAMRoleCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshTaskIAMRoleCredentialsOutput) GoString() string { + return s.String() +} + +type RegistryAuthenticationData struct { + _ struct{} `type:"structure"` + + AsmAuthData *ASMAuthData `locationName:"asmAuthData" type:"structure"` + + EcrAuthData *ECRAuthData `locationName:"ecrAuthData" type:"structure"` + + Type *string `locationName:"type" type:"string" enum:"AuthenticationType"` +} + +// String returns the string representation +func (s RegistryAuthenticationData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegistryAuthenticationData) GoString() string { + return s.String() +} + +type Secret struct { + _ struct{} `type:"structure"` + + ContainerPath *string `locationName:"containerPath" type:"string"` + + Name *string `locationName:"name" type:"string"` + + Provider *string `locationName:"provider" type:"string" enum:"SecretProvider"` + + Region *string `locationName:"region" type:"string"` + + Target *string `locationName:"target" type:"string" enum:"SecretTarget"` + + Type *string `locationName:"type" type:"string" enum:"SecretType"` + + ValueFrom *string `locationName:"valueFrom" type:"string"` +} + +// String returns the string representation +func (s Secret) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Secret) GoString() string { + return s.String() +} + +type ServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerException) GoString() string { + return s.String() +} + +func newErrorServerException(v protocol.ResponseMetadata) error { + return &ServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServerException) Code() string { + return "ServerException" +} + +// Message returns the exception's message. +func (s *ServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServerException) OrigErr() error { + return nil +} + +func (s *ServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StageUpdateInput struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` + + UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"` +} + +// String returns the string representation +func (s StageUpdateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageUpdateInput) GoString() string { + return s.String() +} + +type StageUpdateMessage struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` + + UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"` +} + +// String returns the string representation +func (s StageUpdateMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageUpdateMessage) GoString() string { + return s.String() +} + +type StageUpdateOutput struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s StageUpdateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StageUpdateOutput) GoString() string { + return s.String() +} + +type Task struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + Associations []*Association `locationName:"associations" type:"list"` + + Containers []*Container `locationName:"containers" type:"list"` + + Cpu *float64 `locationName:"cpu" type:"double"` + + DesiredStatus *string `locationName:"desiredStatus" type:"string"` + + ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` + + ExecutionRoleCredentials *IAMRoleCredentials `locationName:"executionRoleCredentials" type:"structure"` + + Family *string `locationName:"family" type:"string"` + + IpcMode *string `locationName:"ipcMode" type:"string"` + + LaunchType *string `locationName:"launchType" type:"string"` + + Memory *int64 `locationName:"memory" type:"integer"` + + Overrides *string `locationName:"overrides" type:"string"` + + PidMode *string `locationName:"pidMode" type:"string"` + + ProxyConfiguration *ProxyConfiguration `locationName:"proxyConfiguration" type:"structure"` + + RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"` + + TaskDefinitionAccountId *string `locationName:"taskDefinitionAccountId" type:"string"` + + Version *string `locationName:"version" type:"string"` + + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Task) GoString() string { + return s.String() +} + +type TaskIdentifier struct { + _ struct{} `type:"structure"` + + DesiredStatus *string `locationName:"desiredStatus" type:"string"` + + TaskArn *string `locationName:"taskArn" type:"string"` + + TaskClusterArn *string `locationName:"taskClusterArn" type:"string"` +} + +// String returns the string representation +func (s TaskIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskIdentifier) GoString() string { + return s.String() +} + +type TaskManifestMessage struct { + _ struct{} `type:"structure"` + + ClusterArn *string `locationName:"clusterArn" type:"string"` + + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + GeneratedAt *int64 `locationName:"generatedAt" type:"long"` + + MessageId *string `locationName:"messageId" type:"string"` + + Tasks []*TaskIdentifier `locationName:"tasks" type:"list"` + + Timeline *int64 `locationName:"timeline" type:"long"` +} + +// String returns the string representation +func (s TaskManifestMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskManifestMessage) GoString() string { + return s.String() +} + +type TaskStopVerificationAck struct { + _ struct{} `type:"structure"` + + GeneratedAt *int64 `locationName:"generatedAt" type:"long"` + + MessageId *string `locationName:"messageId" type:"string"` + + StopTasks []*TaskIdentifier `locationName:"stopTasks" type:"list"` +} + +// String returns the string representation +func (s TaskStopVerificationAck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskStopVerificationAck) GoString() string { + return s.String() +} + +type TaskStopVerificationMessage struct { + _ struct{} `type:"structure"` + + MessageId *string `locationName:"messageId" type:"string"` + + StopCandidates []*TaskIdentifier `locationName:"stopCandidates" type:"list"` +} + +// String returns the string representation +func (s TaskStopVerificationMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskStopVerificationMessage) GoString() string { + return s.String() +} + +type UpdateFailureInput struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + MessageId *string `locationName:"messageId" type:"string"` + + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s UpdateFailureInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFailureInput) GoString() string { + return s.String() +} + +type UpdateFailureOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateFailureOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFailureOutput) GoString() string { + return s.String() +} + +type UpdateInfo struct { + _ struct{} `type:"structure"` + + Location *string `locationName:"location" type:"string"` + + Signature *string `locationName:"signature" type:"string"` +} + +// String returns the string representation +func (s UpdateInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInfo) GoString() string { + return s.String() +} + +type VersionInfo struct { + _ struct{} `type:"structure"` + + AgentHash *string `locationName:"agentHash" type:"string"` + + AgentVersion *string `locationName:"agentVersion" type:"string"` + + DockerVersion *string `locationName:"dockerVersion" type:"string"` +} + +// String returns the string representation +func (s VersionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersionInfo) GoString() string { + return s.String() +} + +type Volume struct { + _ struct{} `type:"structure"` + + DockerVolumeConfiguration *DockerVolumeConfiguration `locationName:"dockerVolumeConfiguration" type:"structure"` + + EfsVolumeConfiguration *EFSVolumeConfiguration `locationName:"efsVolumeConfiguration" type:"structure"` + + FsxWindowsFileServerVolumeConfiguration *FSxWindowsFileServerVolumeConfiguration `locationName:"fsxWindowsFileServerVolumeConfiguration" type:"structure"` + + Host *HostVolumeProperties `locationName:"host" type:"structure"` + + Name *string `locationName:"name" type:"string"` + + Type *string `locationName:"type" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +type VolumeFrom struct { + _ struct{} `type:"structure"` + + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + SourceContainer *string `locationName:"sourceContainer" type:"string"` +} + +// String returns the string representation +func (s VolumeFrom) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeFrom) GoString() string { + return s.String() +} diff --git a/agent/acs/model/generate.go b/agent/acs/model/generate.go new file mode 100644 index 00000000000..6f3c7de1401 --- /dev/null +++ b/agent/acs/model/generate.go @@ -0,0 +1,3 @@ +package model + +//go:generate go run -tags=codegen ../../gogenerate/awssdk.go -typesOnly=true -copyright_file ../../../scripts/copyright_file diff --git a/agent/acs/update_handler/doc.go b/agent/acs/update_handler/doc.go new file mode 100644 index 00000000000..45929d4b18f --- /dev/null +++ b/agent/acs/update_handler/doc.go @@ -0,0 +1,22 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package updater handles requests to update the agent. +// +// Requests are assumed to be two-phase where the phases consist of a 'download' +// followed by an 'update'. Either of these steps may fail. +// The agent is responsible for, on download, correctly downloading the +// referenced file and validating it against the provided checksum. However, +// the actual 'update' component is handled by signaling a watching process via +// exit code. +package updater diff --git a/agent/acs/update_handler/mock/mock_io.go b/agent/acs/update_handler/mock/mock_io.go new file mode 100644 index 00000000000..f0d15848811 --- /dev/null +++ b/agent/acs/update_handler/mock/mock_io.go @@ -0,0 +1,26 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package mock + +import "io" + +type nopRWCloser struct { + io.ReadWriter +} + +func (*nopRWCloser) Close() error { return nil } + +func NopReadWriteCloser(r io.ReadWriter) io.ReadWriteCloser { + return &nopRWCloser{r} +} diff --git a/agent/acs/update_handler/updater.go b/agent/acs/update_handler/updater.go new file mode 100644 index 00000000000..bf8c621f7c2 --- /dev/null +++ b/agent/acs/update_handler/updater.go @@ -0,0 +1,272 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package updater + +import ( + "crypto/sha256" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/httpclient" + "github.com/aws/amazon-ecs-agent/agent/sighandlers" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" +) + +const desiredImageFile = "desired-image" + +// update describes metadata around an update 2-phase request +type updater struct { + stage updateStage + stageTime time.Time + // downloadMessageID is the most recent message id seen for this update id + downloadMessageID string + // updateID is a unique identifier for this update used to determine if a + // new update request, even with a different message id, is a duplicate or + // not + updateID string + acs wsclient.ClientServer + config *config.Config + httpclient *http.Client + + sync.Mutex +} + +type updateStage int8 + +const ( + updateNone updateStage = iota + updateDownloading + updateDownloaded +) + +const ( + updateDownloadTimeout = 15 * time.Minute +) + +// AddAgentUpdateHandlers adds the needed update handlers to perform agent +// updates +func AddAgentUpdateHandlers(cs wsclient.ClientServer, cfg *config.Config, state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine) { + singleUpdater := &updater{ + acs: cs, + config: cfg, + httpclient: httpclient.New(updateDownloadTimeout, false), + } + cs.AddRequestHandler(singleUpdater.stageUpdateHandler()) + cs.AddRequestHandler(singleUpdater.performUpdateHandler(state, dataClient, taskEngine)) +} + +func (u *updater) stageUpdateHandler() func(req *ecsacs.StageUpdateMessage) { + return func(req *ecsacs.StageUpdateMessage) { + u.Lock() + defer u.Unlock() + + if req == nil || req.MessageId == nil { + seelog.Error("Nil request to stage update or missing MessageID") + return + } + + nack := func(reason string) { + seelog.Errorf("Nacking StageUpdate; reason: %s", reason) + u.acs.MakeRequest(&ecsacs.NackRequest{ + Cluster: req.ClusterArn, + ContainerInstance: req.ContainerInstanceArn, + MessageId: req.MessageId, + Reason: aws.String(reason), + }) + u.reset() + } + + if !u.config.UpdatesEnabled.Enabled() { + nack("Updates are disabled") + return + } + + if err := validateUpdateInfo(req.UpdateInfo); err != nil { + nack("Invalid update: " + err.Error()) + return + } + + seelog.Debug("Staging update", "update", req) + + if u.stage != updateNone { + if u.updateID != "" && u.updateID == *req.UpdateInfo.Signature { + seelog.Debug("Update already in progress, acking duplicate message", "id", u.updateID) + // Acking here is safe as any currently-downloading update will already be holding + // the update lock. A failed download will nack and clear state (while holding the + // update lock) before this code is reached, meaning that the above conditional will + // not evaluate true (no matching, in-progress update). + u.acs.MakeRequest(&ecsacs.AckRequest{ + Cluster: req.ClusterArn, + ContainerInstance: req.ContainerInstanceArn, + MessageId: req.MessageId, + }) + return + } else { + // Nack previous update + reason := "New update arrived: " + *req.MessageId + u.acs.MakeRequest(&ecsacs.NackRequest{ + Cluster: req.ClusterArn, + ContainerInstance: req.ContainerInstanceArn, + MessageId: &u.downloadMessageID, + Reason: &reason, + }) + } + } + u.updateID = *req.UpdateInfo.Signature + u.stage = updateDownloading + u.stageTime = ttime.Now() + u.downloadMessageID = *req.MessageId + + err := u.download(req.UpdateInfo) + if err != nil { + nack("Unable to download: " + err.Error()) + return + } + + u.stage = updateDownloaded + + u.acs.MakeRequest(&ecsacs.AckRequest{ + Cluster: req.ClusterArn, + ContainerInstance: req.ContainerInstanceArn, + MessageId: req.MessageId, + }) + } +} + +var removeFile = os.Remove + +var writeFile = ioutil.WriteFile + +var createFile = func(name string) (io.ReadWriteCloser, error) { + return os.Create(name) +} + +func (u *updater) download(info *ecsacs.UpdateInfo) (err error) { + if info == nil || info.Location == nil { + return errors.New("No location given") + } + if info.Signature == nil { + return errors.New("No signature given") + } + resp, err := u.httpclient.Get(*info.Location) + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + outFileBasename := utils.RandHex() + ".ecs-update.tar" + outFilePath := filepath.Join(u.config.UpdateDownloadDir, outFileBasename) + outFile, err := createFile(outFilePath) + if err != nil { + return err + } + defer func() { + outFile.Close() + if err != nil { + removeFile(outFilePath) + } + }() + + hashsum := sha256.New() + bodyHashReader := io.TeeReader(resp.Body, hashsum) + _, err = io.Copy(outFile, bodyHashReader) + if err != nil { + return err + } + shasum := hashsum.Sum(nil) + shasumString := fmt.Sprintf("%x", shasum) + + if shasumString != strings.TrimSpace(*info.Signature) { + return errors.New("Hashsum validation failed") + } + + err = writeFile(filepath.Join(u.config.UpdateDownloadDir, desiredImageFile), []byte(outFileBasename+"\n"), 0644) + return err +} + +var exit = os.Exit + +func (u *updater) performUpdateHandler(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine) func(req *ecsacs.PerformUpdateMessage) { + return func(req *ecsacs.PerformUpdateMessage) { + u.Lock() + defer u.Unlock() + + seelog.Debug("Got perform update request") + + if !u.config.UpdatesEnabled.Enabled() { + reason := "Updates are disabled" + seelog.Errorf("Nacking PerformUpdate; reason: %s", reason) + u.acs.MakeRequest(&ecsacs.NackRequest{ + Cluster: req.ClusterArn, + ContainerInstance: req.ContainerInstanceArn, + MessageId: req.MessageId, + Reason: aws.String(reason), + }) + return + } + + if u.stage != updateDownloaded { + seelog.Error("Nacking PerformUpdate; not downloaded") + reason := "Cannot perform update; update not downloaded" + u.acs.MakeRequest(&ecsacs.NackRequest{ + Cluster: req.ClusterArn, + ContainerInstance: req.ContainerInstanceArn, + MessageId: req.MessageId, + Reason: aws.String(reason), + }) + return + } + u.acs.MakeRequest(&ecsacs.AckRequest{ + Cluster: req.ClusterArn, + ContainerInstance: req.ContainerInstanceArn, + MessageId: req.MessageId, + }) + + err := sighandlers.FinalSave(state, dataClient, taskEngine) + if err != nil { + seelog.Critical("Error saving before update exit", "err", err) + } else { + seelog.Debug("Saved state!") + } + exit(exitcodes.ExitUpdate) + } +} + +func (u *updater) reset() { + u.updateID = "" + u.downloadMessageID = "" + u.stage = updateNone + u.stageTime = time.Time{} +} diff --git a/agent/acs/update_handler/updater_test.go b/agent/acs/update_handler/updater_test.go new file mode 100644 index 00000000000..7a65735d200 --- /dev/null +++ b/agent/acs/update_handler/updater_test.go @@ -0,0 +1,489 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package updater + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + mock_io "github.com/aws/amazon-ecs-agent/agent/acs/update_handler/mock" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/httpclient" + mock_http "github.com/aws/amazon-ecs-agent/agent/httpclient/mock" + mock_client "github.com/aws/amazon-ecs-agent/agent/wsclient/mock" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func ptr(i interface{}) interface{} { + n := reflect.New(reflect.TypeOf(i)) + n.Elem().Set(reflect.ValueOf(i)) + return n.Interface() +} + +func mocks(t *testing.T, cfg *config.Config) (*updater, *gomock.Controller, *config.Config, *mock_client.MockClientServer, *mock_http.MockRoundTripper) { + if cfg == nil { + cfg = &config.Config{ + UpdatesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + UpdateDownloadDir: filepath.Clean("/tmp/test/"), + } + } + ctrl := gomock.NewController(t) + + mockacs := mock_client.NewMockClientServer(ctrl) + mockhttp := mock_http.NewMockRoundTripper(ctrl) + httpClient := httpclient.New(updateDownloadTimeout, false) + httpClient.Transport.(httpclient.OverridableTransport).SetTransport(mockhttp) + + u := &updater{ + acs: mockacs, + config: cfg, + httpclient: httpClient, + } + + return u, ctrl, cfg, mockacs, mockhttp +} + +var writtenFile bytes.Buffer + +func mockOS() func() { + oCreateFile := createFile + createFile = func(name string) (io.ReadWriteCloser, error) { + writtenFile.Reset() + return mock_io.NopReadWriteCloser(&writtenFile), nil + } + writeFile = func(filename string, data []byte, perm os.FileMode) error { + return nil + } + exit = func(code int) {} + + return func() { + createFile = oCreateFile + writeFile = ioutil.WriteFile + exit = os.Exit + } +} +func TestStageUpdateWithUpdatesDisabled(t *testing.T) { + u, ctrl, _, mockacs, _ := mocks(t, &config.Config{ + UpdatesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + }) + defer ctrl.Finish() + + mockacs.EXPECT().MakeRequest(&nackRequestMatcher{&ecsacs.NackRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + Reason: ptr("Updates are disabled").(*string), + }}) + + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("6caeef375a080e3241781725b357890758d94b15d7ce63f6b2ff1cb5589f2007").(*string), + }, + }) + +} + +func TestPerformUpdateWithUpdatesDisabled(t *testing.T) { + u, ctrl, cfg, mockacs, _ := mocks(t, &config.Config{ + UpdatesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + }) + defer ctrl.Finish() + + mockacs.EXPECT().MakeRequest(&nackRequestMatcher{&ecsacs.NackRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + Reason: ptr("Updates are disabled").(*string), + }}) + + taskEngine := engine.NewTaskEngine(cfg, nil, nil, nil, nil, nil, nil, nil, nil) + msg := &ecsacs.PerformUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("c54518806ff4d14b680c35784113e1e7478491fe").(*string), + }, + } + + u.performUpdateHandler(dockerstate.NewTaskEngineState(), data.NewNoopClient(), taskEngine)(msg) +} + +func TestFullUpdateFlow(t *testing.T) { + // Test support for update via other regions' endpoints. + regions := map[string]string{ + "us-east-1": "s3.amazonaws.com", + "us-east-2": "s3-us-east-2.amazonaws.com", + "us-west-1": "s3-us-west-1.amazonaws.com", + } + + for region, host := range regions { + t.Run(region, func(t *testing.T) { + u, ctrl, cfg, mockacs, mockhttp := mocks(t, nil) + defer ctrl.Finish() + + defer mockOS()() + gomock.InOrder( + mockhttp.EXPECT().RoundTrip(mock_http.NewHTTPSimpleMatcher("GET", "https://"+host+"/amazon-ecs-agent/update.tar")).Return(mock_http.SuccessResponse("update-tar-data"), nil), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + })), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid2").(*string), + })), + ) + + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://" + host + "/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("6caeef375a080e3241781725b357890758d94b15d7ce63f6b2ff1cb5589f2007").(*string), + }, + }) + + require.Equal(t, "update-tar-data", writtenFile.String(), "incorrect data written") + + taskEngine := engine.NewTaskEngine(cfg, nil, nil, nil, nil, nil, nil, nil, nil) + msg := &ecsacs.PerformUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid2").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://" + host + "/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("c54518806ff4d14b680c35784113e1e7478491fe").(*string), + }, + } + + u.performUpdateHandler(dockerstate.NewTaskEngineState(), data.NewNoopClient(), taskEngine)(msg) + }) + } +} + +type nackRequestMatcher struct { + *ecsacs.NackRequest +} + +func (m *nackRequestMatcher) Matches(nack interface{}) bool { + other := nack.(*ecsacs.NackRequest) + if m.Cluster != nil && *m.Cluster != *other.Cluster { + return false + } + if m.ContainerInstance != nil && *m.ContainerInstance != *other.ContainerInstance { + return false + } + if m.MessageId != nil && *m.MessageId != *other.MessageId { + return false + } + if m.Reason != nil && *m.Reason != *other.Reason { + return false + } + return true +} + +func TestMissingUpdateInfo(t *testing.T) { + u, ctrl, _, mockacs, _ := mocks(t, nil) + defer ctrl.Finish() + + mockacs.EXPECT().MakeRequest(&nackRequestMatcher{&ecsacs.NackRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + }}) + + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + }) +} + +func (m *nackRequestMatcher) String() string { + return fmt.Sprintf("Nack request matcher %v", m.NackRequest) +} + +func TestUndownloadedUpdate(t *testing.T) { + u, ctrl, cfg, mockacs, _ := mocks(t, nil) + defer ctrl.Finish() + + mockacs.EXPECT().MakeRequest(&nackRequestMatcher{&ecsacs.NackRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + }}) + + taskEngine := engine.NewTaskEngine(cfg, nil, nil, nil, nil, nil, nil, nil, nil) + msg := &ecsacs.PerformUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + } + + u.performUpdateHandler(dockerstate.NewTaskEngineState(), data.NewNoopClient(), taskEngine)(msg) +} + +func TestDuplicateUpdateMessagesWithSuccess(t *testing.T) { + u, ctrl, cfg, mockacs, mockhttp := mocks(t, nil) + defer ctrl.Finish() + + defer mockOS()() + gomock.InOrder( + mockhttp.EXPECT().RoundTrip(mock_http.NewHTTPSimpleMatcher("GET", "https://s3.amazonaws.com/amazon-ecs-agent/update.tar")).Return(mock_http.SuccessResponse("update-tar-data"), nil), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + })), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid2").(*string), + })), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid3").(*string), + })), + ) + + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("6caeef375a080e3241781725b357890758d94b15d7ce63f6b2ff1cb5589f2007").(*string), + }, + }) + + // Multiple requests to stage something with the same signature should still + // result in only one download / update procedure. + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid2").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("6caeef375a080e3241781725b357890758d94b15d7ce63f6b2ff1cb5589f2007").(*string), + }, + }) + + require.Equal(t, "update-tar-data", writtenFile.String(), "incorrect data written") + + taskEngine := engine.NewTaskEngine(cfg, nil, nil, nil, nil, nil, nil, nil, nil) + msg := &ecsacs.PerformUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid3").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("c54518806ff4d14b680c35784113e1e7478491fe").(*string), + }, + } + + u.performUpdateHandler(dockerstate.NewTaskEngineState(), data.NewNoopClient(), taskEngine)(msg) +} + +func TestDuplicateUpdateMessagesWithFailure(t *testing.T) { + u, ctrl, cfg, mockacs, mockhttp := mocks(t, nil) + defer ctrl.Finish() + + gomock.InOrder( + mockhttp.EXPECT().RoundTrip(mock_http.NewHTTPSimpleMatcher("GET", "https://s3.amazonaws.com/amazon-ecs-agent/update.tar")).Return(mock_http.SuccessResponse("update-tar-data"), nil), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.NackRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + Reason: ptr("Unable to download: test error").(*string), + })), + mockhttp.EXPECT().RoundTrip(mock_http.NewHTTPSimpleMatcher("GET", "https://s3.amazonaws.com/amazon-ecs-agent/update.tar")).Return(mock_http.SuccessResponse("update-tar-data"), nil), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid2").(*string), + })), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid3").(*string), + })), + ) + + createFile = func(name string) (closer io.ReadWriteCloser, e error) { + return nil, errors.New("test error") + } + + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("6caeef375a080e3241781725b357890758d94b15d7ce63f6b2ff1cb5589f2007").(*string), + }, + }) + + defer mockOS()() + + // Multiple requests to stage something with the same signature where the previous update failed + // should cause another update attempt to be started + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid2").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("6caeef375a080e3241781725b357890758d94b15d7ce63f6b2ff1cb5589f2007").(*string), + }, + }) + + require.Equal(t, "update-tar-data", writtenFile.String(), "incorrect data written") + + taskEngine := engine.NewTaskEngine(cfg, nil, nil, nil, nil, nil, nil, nil, nil) + msg := &ecsacs.PerformUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid3").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("c54518806ff4d14b680c35784113e1e7478491fe").(*string), + }, + } + + u.performUpdateHandler(dockerstate.NewTaskEngineState(), data.NewNoopClient(), taskEngine)(msg) +} + +func TestNewerUpdateMessages(t *testing.T) { + u, ctrl, cfg, mockacs, mockhttp := mocks(t, nil) + defer ctrl.Finish() + + defer mockOS()() + gomock.InOrder( + mockhttp.EXPECT().RoundTrip(mock_http.NewHTTPSimpleMatcher("GET", "https://s3.amazonaws.com/amazon-ecs-agent/update.tar")).Return(mock_http.SuccessResponse("update-tar-data"), nil), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("StageMID").(*string), + })), + mockacs.EXPECT().MakeRequest(&nackRequestMatcher{&ecsacs.NackRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("StageMID").(*string), + Reason: ptr("New update arrived: StageMIDNew").(*string), + }}), + mockhttp.EXPECT().RoundTrip(mock_http.NewHTTPSimpleMatcher("GET", "https://s3.amazonaws.com/amazon-ecs-agent/new.tar")).Return(mock_http.SuccessResponse("newer-update-tar-data"), nil), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("StageMIDNew").(*string), + })), + mockacs.EXPECT().MakeRequest(gomock.Eq(&ecsacs.AckRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("mid2").(*string), + })), + ) + + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("StageMID").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("6caeef375a080e3241781725b357890758d94b15d7ce63f6b2ff1cb5589f2007").(*string), + }, + }) + + require.Equal(t, "update-tar-data", writtenFile.String(), "incorrect data written") + writtenFile.Reset() + + // Never perform, make sure a new hash results in a new stage + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("StageMIDNew").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/new.tar").(*string), + Signature: ptr("9c6ea7bd7d49f95b6d516517e453b965897109bf8a1d6ff3a6e57287049eb2de").(*string), + }, + }) + + require.Equal(t, "newer-update-tar-data", writtenFile.String(), "incorrect data written") + + taskEngine := engine.NewTaskEngine(cfg, nil, nil, nil, nil, nil, nil, nil, nil) + msg := &ecsacs.PerformUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("mid2").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("c54518806ff4d14b680c35784113e1e7478491fe").(*string), + }, + } + + u.performUpdateHandler(dockerstate.NewTaskEngineState(), data.NewNoopClient(), taskEngine)(msg) +} + +func TestValidationError(t *testing.T) { + u, ctrl, _, mockacs, mockhttp := mocks(t, nil) + defer ctrl.Finish() + + defer mockOS()() + gomock.InOrder( + mockhttp.EXPECT().RoundTrip(mock_http.NewHTTPSimpleMatcher("GET", "https://s3.amazonaws.com/amazon-ecs-agent/update.tar")).Return(mock_http.SuccessResponse("update-tar-data"), nil), + mockacs.EXPECT().MakeRequest(&nackRequestMatcher{&ecsacs.NackRequest{ + Cluster: ptr("cluster").(*string), + ContainerInstance: ptr("containerInstance").(*string), + MessageId: ptr("StageMID").(*string), + }}), + ) + + u.stageUpdateHandler()(&ecsacs.StageUpdateMessage{ + ClusterArn: ptr("cluster").(*string), + ContainerInstanceArn: ptr("containerInstance").(*string), + MessageId: ptr("StageMID").(*string), + UpdateInfo: &ecsacs.UpdateInfo{ + Location: ptr("https://s3.amazonaws.com/amazon-ecs-agent/update.tar").(*string), + Signature: ptr("Invalid signature").(*string), + }, + }) + + assert.Equal(t, "update-tar-data", writtenFile.String(), "incorrect data written") +} diff --git a/agent/acs/update_handler/validations.go b/agent/acs/update_handler/validations.go new file mode 100644 index 00000000000..ab4f1e7fb63 --- /dev/null +++ b/agent/acs/update_handler/validations.go @@ -0,0 +1,34 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package updater + +import ( + "errors" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" +) + +func validateUpdateInfo(updateInfo *ecsacs.UpdateInfo) error { + if updateInfo == nil { + return errors.New("no update info given") + } + if updateInfo.Location == nil { + return errors.New("no location given") + } + if updateInfo.Signature == nil { + return errors.New("no signature given") + } + + return nil +} diff --git a/agent/agent.go b/agent/agent.go new file mode 100644 index 00000000000..0444729fe1f --- /dev/null +++ b/agent/agent.go @@ -0,0 +1,32 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package main + +import ( + "math/rand" + "os" + "time" + + "github.com/aws/amazon-ecs-agent/agent/app" + "github.com/aws/amazon-ecs-agent/agent/logger" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func main() { + logger.InitSeelog() + os.Exit(app.Run(os.Args[1:])) +} diff --git a/agent/api/appmesh/appmesh.go b/agent/api/appmesh/appmesh.go new file mode 100644 index 00000000000..d971b8bd6f9 --- /dev/null +++ b/agent/api/appmesh/appmesh.go @@ -0,0 +1,126 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package appmesh + +import ( + "fmt" + "strings" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/aws-sdk-go/aws" +) + +const ( + appMesh = "APPMESH" + splitter = "," + ignoredUID = "IgnoredUID" + ignoredGID = "IgnoredGID" + proxyIngressPort = "ProxyIngressPort" + proxyEgressPort = "ProxyEgressPort" + appPorts = "AppPorts" + egressIgnoredIPs = "EgressIgnoredIPs" + egressIgnoredPorts = "EgressIgnoredPorts" + taskMetadataEndpointIP = "169.254.170.2" + instanceMetadataEndpointIP = "169.254.169.254" +) + +// AppMesh contains information of app mesh config +type AppMesh struct { + // ContainerName is the proxy container name + ContainerName string + // IgnoredUID is egress traffic from the processes owned by the UID will be ignored + IgnoredUID string + // IgnoredGID specifies egress traffic from the processes owned by the GID will be ignored + IgnoredGID string + // ProxyIngressPort is the ingress port number that proxy is listening on + ProxyIngressPort string + // ProxyEgressPort is the egress port number that proxy is listening on + ProxyEgressPort string + // AppPorts is the port number that application is listening on + AppPorts []string + // EgressIgnoredIPs is the list of ports for which egress traffic will be ignored + EgressIgnoredIPs []string + // EgressIgnoredPorts is the list of IPs for which egress traffic will be ignored + EgressIgnoredPorts []string +} + +// AppMeshFromACS validates proxy config if it is app mesh type and creates AppMesh object +func AppMeshFromACS(proxyConfig *ecsacs.ProxyConfiguration) (*AppMesh, error) { + + if *proxyConfig.Type != appMesh { + return nil, fmt.Errorf("agent does not support proxy type other than app mesh") + } + + return &AppMesh{ + ContainerName: aws.StringValue(proxyConfig.ContainerName), + IgnoredUID: aws.StringValue(proxyConfig.Properties[ignoredUID]), + IgnoredGID: aws.StringValue(proxyConfig.Properties[ignoredGID]), + ProxyIngressPort: aws.StringValue(proxyConfig.Properties[proxyIngressPort]), + ProxyEgressPort: aws.StringValue(proxyConfig.Properties[proxyEgressPort]), + AppPorts: buildAppPorts(proxyConfig), + EgressIgnoredIPs: buildEgressIgnoredIPs(proxyConfig), + EgressIgnoredPorts: buildEgressIgnoredPorts(proxyConfig), + }, nil +} + +// buildAppPorts creates app ports from proxy config +func buildAppPorts(proxyConfig *ecsacs.ProxyConfiguration) []string { + var inputAppPorts []string + if proxyConfig.Properties[appPorts] != nil && len(*proxyConfig.Properties[appPorts]) > 0 { + inputAppPorts = strings.Split(*proxyConfig.Properties[appPorts], splitter) + } + return inputAppPorts +} + +// buildEgressIgnoredIPs creates egress ignored IPs from proxy config +func buildEgressIgnoredIPs(proxyConfig *ecsacs.ProxyConfiguration) []string { + var inputEgressIgnoredIPs []string + if proxyConfig.Properties[egressIgnoredIPs] != nil && len(*proxyConfig.Properties[egressIgnoredIPs]) > 0 { + inputEgressIgnoredIPs = strings.Split(*proxyConfig.Properties[egressIgnoredIPs], splitter) + } + // append agent default egress ignored IPs + return appendDefaultEgressIgnoredIPs(inputEgressIgnoredIPs) +} + +// buildEgressIgnoredPorts creates egress ignored ports from proxy config +func buildEgressIgnoredPorts(proxyConfig *ecsacs.ProxyConfiguration) []string { + var inputEgressIgnoredPorts []string + if proxyConfig.Properties[egressIgnoredPorts] != nil && len(*proxyConfig.Properties[egressIgnoredPorts]) > 0 { + inputEgressIgnoredPorts = strings.Split(*proxyConfig.Properties[egressIgnoredPorts], splitter) + } + return inputEgressIgnoredPorts +} + +// appendDefaultEgressIgnoredIPs append task metadata endpoint ip and +// instance metadata ip address to egress ignored IPs if does not exist +func appendDefaultEgressIgnoredIPs(egressIgnoredIPs []string) []string { + hasTaskMetadataEndpointIP := false + hasInstanceMetadataEndpointIP := false + for _, egressIgnoredIP := range egressIgnoredIPs { + if strings.TrimSpace(egressIgnoredIP) == taskMetadataEndpointIP { + hasTaskMetadataEndpointIP = true + } + if strings.TrimSpace(egressIgnoredIP) == instanceMetadataEndpointIP { + hasInstanceMetadataEndpointIP = true + } + } + + if !hasTaskMetadataEndpointIP { + egressIgnoredIPs = append(egressIgnoredIPs, taskMetadataEndpointIP) + } + if !hasInstanceMetadataEndpointIP { + egressIgnoredIPs = append(egressIgnoredIPs, instanceMetadataEndpointIP) + } + + return egressIgnoredIPs +} diff --git a/agent/api/appmesh/appmesh_test.go b/agent/api/appmesh/appmesh_test.go new file mode 100644 index 00000000000..f8d708bcd72 --- /dev/null +++ b/agent/api/appmesh/appmesh_test.go @@ -0,0 +1,146 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package appmesh + +import ( + "testing" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +const ( + mockEgressIgnoredIP1 = "128.0.0.1" + mockEgressIgnoredIP2 = "171.1.3.24" + mockAppPort1 = "8000" + mockAppPort2 = "8001" + mockEgressIgnoredPort1 = "13000" + mockEgressIgnoredPort2 = "13001" + mockIgnoredUID = "1337" + mockIgnoredGID = "2339" + mockProxyIngressPort = "9000" + mockProxyEgressPort = "9001" + mockAppPorts = mockAppPort1 + splitter + mockAppPort2 + mockEgressIgnoredIPs = mockEgressIgnoredIP1 + splitter + mockEgressIgnoredIP2 + mockEgressIgnoredPorts = mockEgressIgnoredPort1 + splitter + mockEgressIgnoredPort2 + mockContainerName = "testEnvoyContainer" +) + +func TestAppMeshFromACS(t *testing.T) { + testProxyConfig := prepareProxyConfig() + + appMesh, err := AppMeshFromACS(&testProxyConfig) + + assert.NoError(t, err) + assert.NotNil(t, appMesh) + assert.Equal(t, mockContainerName, appMesh.ContainerName) + assert.Equal(t, mockIgnoredUID, appMesh.IgnoredUID) + assert.Equal(t, mockIgnoredGID, appMesh.IgnoredGID) + assert.Equal(t, mockProxyEgressPort, appMesh.ProxyEgressPort) + assert.Equal(t, mockProxyIngressPort, appMesh.ProxyIngressPort) + assert.Equal(t, mockAppPort1, appMesh.AppPorts[0]) + assert.Equal(t, mockAppPort2, appMesh.AppPorts[1]) + assert.Equal(t, mockEgressIgnoredIP1, appMesh.EgressIgnoredIPs[0]) + assert.Equal(t, mockEgressIgnoredIP2, appMesh.EgressIgnoredIPs[1]) + assert.Equal(t, taskMetadataEndpointIP, appMesh.EgressIgnoredIPs[2]) + assert.Equal(t, instanceMetadataEndpointIP, appMesh.EgressIgnoredIPs[3]) + assert.Equal(t, mockEgressIgnoredPort1, appMesh.EgressIgnoredPorts[0]) + assert.Equal(t, mockEgressIgnoredPort2, appMesh.EgressIgnoredPorts[1]) +} + +func TestAppMeshFromACSContainsDefaultEgressIgnoredIP(t *testing.T) { + testProxyConfig := prepareProxyConfig() + egressIgnoredIPs := mockEgressIgnoredIPs + splitter + taskMetadataEndpointIP + splitter + instanceMetadataEndpointIP + testProxyConfig.Properties[egressIgnoredIPs] = aws.String(egressIgnoredIPs) + + appMesh, err := AppMeshFromACS(&testProxyConfig) + + assert.NoError(t, err) + assert.NotNil(t, appMesh) + assert.Equal(t, mockIgnoredUID, appMesh.IgnoredUID) + assert.Equal(t, mockIgnoredGID, appMesh.IgnoredGID) + assert.Equal(t, mockProxyEgressPort, appMesh.ProxyEgressPort) + assert.Equal(t, mockProxyIngressPort, appMesh.ProxyIngressPort) + assert.Equal(t, mockAppPort1, appMesh.AppPorts[0]) + assert.Equal(t, mockAppPort2, appMesh.AppPorts[1]) + assert.Equal(t, mockEgressIgnoredIP1, appMesh.EgressIgnoredIPs[0]) + assert.Equal(t, mockEgressIgnoredIP2, appMesh.EgressIgnoredIPs[1]) + assert.Equal(t, taskMetadataEndpointIP, appMesh.EgressIgnoredIPs[2]) + assert.Equal(t, instanceMetadataEndpointIP, appMesh.EgressIgnoredIPs[3]) + assert.Equal(t, mockEgressIgnoredPort1, appMesh.EgressIgnoredPorts[0]) + assert.Equal(t, mockEgressIgnoredPort2, appMesh.EgressIgnoredPorts[1]) +} + +func TestAppMeshFromACSNonAppMeshProxyInput(t *testing.T) { + someOtherProxyType := "fooProxy" + testProxyConfig := prepareProxyConfig() + testProxyConfig.Type = &someOtherProxyType + + _, err := AppMeshFromACS(&testProxyConfig) + + assert.Error(t, err) +} + +func TestAppMeshEmptyAppPorts(t *testing.T) { + emptyAppPorts := "" + testProxyConfig := prepareProxyConfig() + testProxyConfig.Properties[appPorts] = &emptyAppPorts + + appMesh, err := AppMeshFromACS(&testProxyConfig) + + assert.NoError(t, err) + assert.Equal(t, 0, len(appMesh.AppPorts)) +} + +func TestAppMeshEmptyIgnoredIPs(t *testing.T) { + emptyIgnoredIPs := "" + testProxyConfig := prepareProxyConfig() + testProxyConfig.Properties[egressIgnoredIPs] = &emptyIgnoredIPs + + appMesh, err := AppMeshFromACS(&testProxyConfig) + + assert.NoError(t, err) + assert.Equal(t, 2, len(appMesh.EgressIgnoredIPs)) +} + +func TestAppMeshEmptyIgnoredPorts(t *testing.T) { + emptyIgnoredPorts := "" + testProxyConfig := prepareProxyConfig() + testProxyConfig.Properties[egressIgnoredPorts] = &emptyIgnoredPorts + + appMesh, err := AppMeshFromACS(&testProxyConfig) + + assert.NoError(t, err) + assert.Equal(t, 0, len(appMesh.EgressIgnoredPorts)) +} + +func prepareProxyConfig() ecsacs.ProxyConfiguration { + + return ecsacs.ProxyConfiguration{ + Type: aws.String(appMesh), + Properties: map[string]*string{ + ignoredUID: aws.String(mockIgnoredUID), + ignoredGID: aws.String(mockIgnoredGID), + proxyIngressPort: aws.String(mockProxyIngressPort), + proxyEgressPort: aws.String(mockProxyEgressPort), + appPorts: aws.String(mockAppPorts), + egressIgnoredIPs: aws.String(mockEgressIgnoredIPs), + egressIgnoredPorts: aws.String(mockEgressIgnoredPorts), + }, + ContainerName: aws.String(mockContainerName), + } +} diff --git a/agent/api/container/container.go b/agent/api/container/container.go new file mode 100644 index 00000000000..662cca10520 --- /dev/null +++ b/agent/api/container/container.go @@ -0,0 +1,1326 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" + "fmt" + "strconv" + "sync" + "time" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/credentials" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" +) + +const ( + // defaultContainerSteadyStateStatus defines the container status at + // which the container is assumed to be in steady state. It is set + // to 'ContainerRunning' unless overridden + defaultContainerSteadyStateStatus = apicontainerstatus.ContainerRunning + + // awslogsAuthExecutionRole is the string value passed in the task payload + // that specifies that the log driver should be authenticated using the + // execution role + awslogsAuthExecutionRole = "ExecutionRole" + + // DockerHealthCheckType is the type of container health check provided by docker + DockerHealthCheckType = "docker" + + // AuthTypeECR is to use image pull auth over ECR + AuthTypeECR = "ecr" + + // AuthTypeASM is to use image pull auth over AWS Secrets Manager + AuthTypeASM = "asm" + + // MetadataURIEnvironmentVariableName defines the name of the environment + // variable in containers' config, which can be used by the containers to access the + // v3 metadata endpoint + MetadataURIEnvironmentVariableName = "ECS_CONTAINER_METADATA_URI" + + // MetadataURIEnvVarNameV4 defines the name of the environment + // variable in containers' config, which can be used by the containers to access the + // v4 metadata endpoint + MetadataURIEnvVarNameV4 = "ECS_CONTAINER_METADATA_URI_V4" + + // MetadataURIFormat defines the URI format for v4 metadata endpoint + MetadataURIFormatV4 = "http://169.254.170.2/v4/%s" + + // SecretProviderSSM is to show secret provider being SSM + SecretProviderSSM = "ssm" + + // SecretProviderASM is to show secret provider being ASM + SecretProviderASM = "asm" + + // SecretTypeEnv is to show secret type being ENVIRONMENT_VARIABLE + SecretTypeEnv = "ENVIRONMENT_VARIABLE" + + // TargetLogDriver is to show secret target being "LOG_DRIVER", the default will be "CONTAINER" + SecretTargetLogDriver = "LOG_DRIVER" + + // neuronVisibleDevicesEnvVar is the env which indicates that the container wants to use inferentia devices. + neuronVisibleDevicesEnvVar = "AWS_NEURON_VISIBLE_DEVICES" +) + +var ( + // MetadataURIFormat defines the URI format for v3 metadata endpoint. Made as a var to be able to + // overwrite it in test. + MetadataURIFormat = "http://169.254.170.2/v3/%s" +) + +// DockerConfig represents additional metadata about a container to run. It's +// remodeled from the `ecsacs` api model file. Eventually it should not exist +// once this remodeling is refactored out. +type DockerConfig struct { + // Config is the configuration used to create container + Config *string `json:"config"` + // HostConfig is the configuration of container related to host resource + HostConfig *string `json:"hostConfig"` + // Version specifies the docker client API version to use + Version *string `json:"version"` +} + +// HealthStatus contains the health check result returned by docker +type HealthStatus struct { + // Status is the container health status + Status apicontainerstatus.ContainerHealthStatus `json:"status,omitempty"` + // Since is the timestamp when container health status changed + Since *time.Time `json:"statusSince,omitempty"` + // ExitCode is the exitcode of health check if failed + ExitCode int `json:"exitCode,omitempty"` + // Output is the output of health check + Output string `json:"output,omitempty"` +} + +type ManagedAgentState struct { + // ID of this managed agent state + ID string `json:"id,omitempty"` + // TODO: [ecs-exec] Change variable name from Status to KnownStatus in future PR to avoid noise + // Status is the managed agent health status + Status apicontainerstatus.ManagedAgentStatus `json:"status,omitempty"` + // SentStatus is the managed agent sent status + SentStatus apicontainerstatus.ManagedAgentStatus `json:"sentStatus,omitempty"` + // Reason is a placeholder for failure messaging + Reason string `json:"reason,omitempty"` + // LastStartedAt is the timestamp when the status last went from PENDING->RUNNING + LastStartedAt time.Time `json:"lastStartedAt,omitempty"` + // Metadata holds metadata about the managed agent + Metadata map[string]interface{} `json:"metadata,omitempty"` + // InitFailed indicates if exec agent initialization failed + InitFailed bool `json:"initFailed,omitempty"` +} + +type ManagedAgent struct { + ManagedAgentState + // Name is the name of this managed agent. This name is streamed down from ACS. + Name string `json:"name,omitempty"` + // Properties of this managed agent. Properties are streamed down from ACS. + Properties map[string]string `json:"properties,omitempty"` +} + +// Container is the internal representation of a container in the ECS agent +type Container struct { + // Name is the name of the container specified in the task definition + Name string + // RuntimeID is the docker id of the container + RuntimeID string + // TaskARNUnsafe is the task ARN of the task that the container belongs to. Access should be + // protected by lock i.e. via GetTaskARN and SetTaskARN. + TaskARNUnsafe string `json:"taskARN"` + // DependsOnUnsafe is the field which specifies the ordering for container startup and shutdown. + DependsOnUnsafe []DependsOn `json:"dependsOn,omitempty"` + // ManagedAgentsUnsafe presently contains only the executeCommandAgent + ManagedAgentsUnsafe []ManagedAgent `json:"managedAgents,omitempty"` + // V3EndpointID is a container identifier used to construct v3 metadata endpoint; it's unique among + // all the containers managed by the agent + V3EndpointID string + // Image is the image name specified in the task definition + Image string + // ImageID is the local ID of the image used in the container + ImageID string + // ImageDigest is the sha-256 digest of the container image as pulled from the repository + ImageDigest string + // Command is the command to run in the container which is specified in the task definition + Command []string + // CPU is the cpu limitation of the container which is specified in the task definition + CPU uint `json:"Cpu"` + // GPUIDs is the list of GPU ids for a container + GPUIDs []string + // Memory is the memory limitation of the container which is specified in the task definition + Memory uint + // Links contains a list of containers to link, corresponding to docker option: --link + Links []string + // FirelensConfig contains configuration for a Firelens container + FirelensConfig *FirelensConfig `json:"firelensConfiguration"` + // VolumesFrom contains a list of container's volume to use, corresponding to docker option: --volumes-from + VolumesFrom []VolumeFrom `json:"volumesFrom"` + // MountPoints contains a list of volume mount paths + MountPoints []MountPoint `json:"mountPoints"` + // Ports contains a list of ports binding configuration + Ports []PortBinding `json:"portMappings"` + // Secrets contains a list of secret + Secrets []Secret `json:"secrets"` + // Essential denotes whether the container is essential or not + Essential bool + // EntryPoint is entrypoint of the container, corresponding to docker option: --entrypoint + EntryPoint *[]string + // Environment is the environment variable set in the container + Environment map[string]string `json:"environment"` + // EnvironmentFiles is the list of environmentFile used to populate environment variables + EnvironmentFiles []EnvironmentFile `json:"environmentFiles"` + // Overrides contains the configuration to override of a container + Overrides ContainerOverrides `json:"overrides"` + // DockerConfig is the configuration used to create the container + DockerConfig DockerConfig `json:"dockerConfig"` + // RegistryAuthentication is the auth data used to pull image + RegistryAuthentication *RegistryAuthenticationData `json:"registryAuthentication"` + // HealthCheckType is the mechanism to use for the container health check + // currently it only supports 'DOCKER' + HealthCheckType string `json:"healthCheckType,omitempty"` + // Health contains the health check information of container health check + Health HealthStatus `json:"-"` + // LogsAuthStrategy specifies how the logs driver for the container will be + // authenticated + LogsAuthStrategy string + // StartTimeout specifies the time value after which if a container has a dependency + // on another container and the dependency conditions are 'SUCCESS', 'COMPLETE', 'HEALTHY', + // then that dependency will not be resolved. + StartTimeout uint + // StopTimeout specifies the time value to be passed as StopContainer api call + StopTimeout uint + + // lock is used for fields that are accessed and updated concurrently + lock sync.RWMutex + + // DesiredStatusUnsafe represents the state where the container should go. Generally, + // the desired status is informed by the ECS backend as a result of either + // API calls made to ECS or decisions made by the ECS service scheduler, + // though the agent may also set the DesiredStatusUnsafe if a different "essential" + // container in the task exits. The DesiredStatus is almost always either + // ContainerRunning or ContainerStopped. + // NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `GetDesiredStatus` + // and `SetDesiredStatus`. + // TODO DesiredStatusUnsafe should probably be private with appropriately written + // setter/getter. When this is done, we need to ensure that the UnmarshalJSON + // is handled properly so that the state storage continues to work. + DesiredStatusUnsafe apicontainerstatus.ContainerStatus `json:"desiredStatus"` + + // KnownStatusUnsafe represents the state where the container is. + // NOTE: Do not access `KnownStatusUnsafe` directly. Instead, use `GetKnownStatus` + // and `SetKnownStatus`. + // TODO KnownStatusUnsafe should probably be private with appropriately written + // setter/getter. When this is done, we need to ensure that the UnmarshalJSON + // is handled properly so that the state storage continues to work. + KnownStatusUnsafe apicontainerstatus.ContainerStatus `json:"KnownStatus"` + + // TransitionDependenciesMap is a map of the dependent container status to other + // dependencies that must be satisfied in order for this container to transition. + TransitionDependenciesMap TransitionDependenciesMap `json:"TransitionDependencySet"` + + // SteadyStateDependencies is a list of containers that must be in "steady state" before + // this one is created + // Note: Current logic requires that the containers specified here are run + // before this container can even be pulled. + // + // Deprecated: Use TransitionDependencySet instead. SteadyStateDependencies is retained for compatibility with old + // state files. + SteadyStateDependencies []string `json:"RunDependencies"` + + // Type specifies the container type. Except the 'Normal' type, all other types + // are not directly specified by task definitions, but created by the agent. The + // JSON tag is retained as this field's previous name 'IsInternal' for maintaining + // backwards compatibility. Please see JSON parsing hooks for this type for more + // details + Type ContainerType `json:"IsInternal"` + + // AppliedStatus is the status that has been "applied" (e.g., we've called Pull, + // Create, Start, or Stop) but we don't yet know that the application was successful. + // No need to save it in the state file, as agent will synchronize the container status + // on restart and for some operation eg: pull, it has to be recalled again. + AppliedStatus apicontainerstatus.ContainerStatus `json:"-"` + // ApplyingError is an error that occurred trying to transition the container + // to its desired state. It is propagated to the backend in the form + // 'Name: ErrorString' as the 'reason' field. + ApplyingError *apierrors.DefaultNamedError + + // SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS + // SubmitContainerStateChange API. + // TODO SentStatusUnsafe should probably be private with appropriately written + // setter/getter. When this is done, we need to ensure that the UnmarshalJSON is + // handled properly so that the state storage continues to work. + SentStatusUnsafe apicontainerstatus.ContainerStatus `json:"SentStatus"` + + // MetadataFileUpdated is set to true when we have completed updating the + // metadata file + MetadataFileUpdated bool `json:"metadataFileUpdated"` + + // KnownExitCodeUnsafe specifies the exit code for the container. + // It is exposed outside of the package so that it's marshalled/unmarshalled in + // the JSON body while saving the state. + // NOTE: Do not access KnownExitCodeUnsafe directly. Instead, use `GetKnownExitCode` + // and `SetKnownExitCode`. + KnownExitCodeUnsafe *int `json:"KnownExitCode"` + + // KnownPortBindingsUnsafe is an array of port bindings for the container. + KnownPortBindingsUnsafe []PortBinding `json:"KnownPortBindings"` + + // VolumesUnsafe is an array of volume mounts in the container. + VolumesUnsafe []types.MountPoint `json:"-"` + + // NetworkModeUnsafe is the network mode in which the container is started + NetworkModeUnsafe string `json:"-"` + + // NetworksUnsafe denotes the Docker Network Settings in the container. + NetworkSettingsUnsafe *types.NetworkSettings `json:"-"` + + // SteadyStateStatusUnsafe specifies the steady state status for the container + // If uninitialized, it's assumed to be set to 'ContainerRunning'. Even though + // it's not only supposed to be set when the container is being created, it's + // exposed outside of the package so that it's marshalled/unmarshalled in the + // the JSON body while saving the state + SteadyStateStatusUnsafe *apicontainerstatus.ContainerStatus `json:"SteadyStateStatus,omitempty"` + + // ContainerArn is the Arn of this container. + ContainerArn string `json:"ContainerArn,omitempty"` + + // ContainerTornDownUnsafe is set to true when we have cleaned up this container. For now this is only used for the + // pause container + ContainerTornDownUnsafe bool `json:"containerTornDown"` + + createdAt time.Time + startedAt time.Time + finishedAt time.Time + + labels map[string]string +} + +type DependsOn struct { + ContainerName string `json:"containerName"` + Condition string `json:"condition"` +} + +// DockerContainer is a mapping between containers-as-docker-knows-them and +// containers-as-we-know-them. +// This is primarily used in DockerState, but lives here such that tasks and +// containers know how to convert themselves into Docker's desired config format +type DockerContainer struct { + DockerID string `json:"DockerId"` + DockerName string // needed for linking + + Container *Container +} + +type EnvironmentFile struct { + Value string `json:"value"` + Type string `json:"type"` +} + +// MountPoint describes the in-container location of a Volume and references +// that Volume by name. +type MountPoint struct { + SourceVolume string `json:"sourceVolume"` + ContainerPath string `json:"containerPath"` + ReadOnly bool `json:"readOnly"` +} + +// FirelensConfig describes the type and options of a Firelens container. +type FirelensConfig struct { + Type string `json:"type"` + Options map[string]string `json:"options"` +} + +// VolumeFrom is a volume which references another container as its source. +type VolumeFrom struct { + SourceContainer string `json:"sourceContainer"` + ReadOnly bool `json:"readOnly"` +} + +// Secret contains all essential attributes needed for ECS secrets vending as environment variables/tmpfs files +type Secret struct { + Name string `json:"name"` + ValueFrom string `json:"valueFrom"` + Region string `json:"region"` + ContainerPath string `json:"containerPath"` + Type string `json:"type"` + Provider string `json:"provider"` + Target string `json:"target"` +} + +// GetSecretResourceCacheKey returns the key required to access the secret +// from the ssmsecret resource +func (s *Secret) GetSecretResourceCacheKey() string { + return s.ValueFrom + "_" + s.Region +} + +// String returns a human readable string representation of DockerContainer +func (dc *DockerContainer) String() string { + if dc == nil { + return "nil" + } + return fmt.Sprintf("Id: %s, Name: %s, Container: %s", dc.DockerID, dc.DockerName, dc.Container.String()) +} + +// NewContainerWithSteadyState creates a new Container object with the specified +// steady state. Containers that need the non default steady state set will +// use this method instead of setting it directly +func NewContainerWithSteadyState(steadyState apicontainerstatus.ContainerStatus) *Container { + steadyStateStatus := steadyState + return &Container{ + SteadyStateStatusUnsafe: &steadyStateStatus, + } +} + +// KnownTerminal returns true if the container's known status is STOPPED +func (c *Container) KnownTerminal() bool { + return c.GetKnownStatus().Terminal() +} + +// DesiredTerminal returns true if the container's desired status is STOPPED +func (c *Container) DesiredTerminal() bool { + return c.GetDesiredStatus().Terminal() +} + +// GetKnownStatus returns the known status of the container +func (c *Container) GetKnownStatus() apicontainerstatus.ContainerStatus { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.KnownStatusUnsafe +} + +// SetKnownStatus sets the known status of the container and update the container +// applied status +func (c *Container) SetKnownStatus(status apicontainerstatus.ContainerStatus) { + c.lock.Lock() + defer c.lock.Unlock() + + c.KnownStatusUnsafe = status + c.updateAppliedStatusUnsafe(status) +} + +// GetDesiredStatus gets the desired status of the container +func (c *Container) GetDesiredStatus() apicontainerstatus.ContainerStatus { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.DesiredStatusUnsafe +} + +// SetDesiredStatus sets the desired status of the container +func (c *Container) SetDesiredStatus(status apicontainerstatus.ContainerStatus) { + c.lock.Lock() + defer c.lock.Unlock() + + c.DesiredStatusUnsafe = status +} + +// GetSentStatus safely returns the SentStatusUnsafe of the container +func (c *Container) GetSentStatus() apicontainerstatus.ContainerStatus { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.SentStatusUnsafe +} + +// SetSentStatus safely sets the SentStatusUnsafe of the container +func (c *Container) SetSentStatus(status apicontainerstatus.ContainerStatus) { + c.lock.Lock() + defer c.lock.Unlock() + + c.SentStatusUnsafe = status +} + +// SetKnownExitCode sets exit code field in container struct +func (c *Container) SetKnownExitCode(i *int) { + c.lock.Lock() + defer c.lock.Unlock() + + c.KnownExitCodeUnsafe = i +} + +// GetKnownExitCode returns the container exit code +func (c *Container) GetKnownExitCode() *int { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.KnownExitCodeUnsafe +} + +// SetRegistryAuthCredentials sets the credentials for pulling image from ECR +func (c *Container) SetRegistryAuthCredentials(credential credentials.IAMRoleCredentials) { + c.lock.Lock() + defer c.lock.Unlock() + + c.RegistryAuthentication.ECRAuthData.SetPullCredentials(credential) +} + +// ShouldPullWithExecutionRole returns whether this container has its own ECR credentials +func (c *Container) ShouldPullWithExecutionRole() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.RegistryAuthentication != nil && + c.RegistryAuthentication.Type == AuthTypeECR && + c.RegistryAuthentication.ECRAuthData != nil && + c.RegistryAuthentication.ECRAuthData.UseExecutionRole +} + +// String returns a human readable string representation of this object +func (c *Container) String() string { + ret := fmt.Sprintf("%s(%s) (%s->%s)", c.Name, c.Image, + c.GetKnownStatus().String(), c.GetDesiredStatus().String()) + if c.GetKnownExitCode() != nil { + ret += " - Exit: " + strconv.Itoa(*c.GetKnownExitCode()) + } + return ret +} + +// GetSteadyStateStatus returns the steady state status for the container. If +// Container.steadyState is not initialized, the default steady state status +// defined by `defaultContainerSteadyStateStatus` is returned. The 'pause' +// container's steady state differs from that of other containers, as the +// 'pause' container can reach its teady state once networking resources +// have been provisioned for it, which is done in the `ContainerResourcesProvisioned` +// state +func (c *Container) GetSteadyStateStatus() apicontainerstatus.ContainerStatus { + if c.SteadyStateStatusUnsafe == nil { + return defaultContainerSteadyStateStatus + } + return *c.SteadyStateStatusUnsafe +} + +// IsKnownSteadyState returns true if the `KnownState` of the container equals +// the `steadyState` defined for the container +func (c *Container) IsKnownSteadyState() bool { + knownStatus := c.GetKnownStatus() + return knownStatus == c.GetSteadyStateStatus() +} + +// GetNextKnownStateProgression returns the state that the container should +// progress to based on its `KnownState`. The progression is +// incremental until the container reaches its steady state. From then on, +// it transitions to `ContainerStopped`. +// +// For example: +// a. if the steady state of the container is defined as `ContainerRunning`, +// the progression is: +// Container: None -> Pulled -> Created -> Running* -> Stopped -> Zombie +// +// b. if the steady state of the container is defined as `ContainerResourcesProvisioned`, +// the progression is: +// Container: None -> Pulled -> Created -> Running -> Provisioned* -> Stopped -> Zombie +// +// c. if the steady state of the container is defined as `ContainerCreated`, +// the progression is: +// Container: None -> Pulled -> Created* -> Stopped -> Zombie +func (c *Container) GetNextKnownStateProgression() apicontainerstatus.ContainerStatus { + if c.IsKnownSteadyState() { + return apicontainerstatus.ContainerStopped + } + + return c.GetKnownStatus() + 1 +} + +// IsInternal returns true if the container type is `ContainerCNIPause` +// or `ContainerNamespacePause`. It returns false otherwise +func (c *Container) IsInternal() bool { + return c.Type != ContainerNormal +} + +// IsRunning returns true if the container's known status is either RUNNING +// or RESOURCES_PROVISIONED. It returns false otherwise +func (c *Container) IsRunning() bool { + return c.GetKnownStatus().IsRunning() +} + +// IsMetadataFileUpdated returns true if the metadata file has been once the +// metadata file is ready and will no longer change +func (c *Container) IsMetadataFileUpdated() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.MetadataFileUpdated +} + +// SetMetadataFileUpdated sets the container's MetadataFileUpdated status to true +func (c *Container) SetMetadataFileUpdated() { + c.lock.Lock() + defer c.lock.Unlock() + + c.MetadataFileUpdated = true +} + +// IsEssential returns whether the container is an essential container or not +func (c *Container) IsEssential() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.Essential +} + +// AWSLogAuthExecutionRole returns true if the auth is by execution role +func (c *Container) AWSLogAuthExecutionRole() bool { + return c.LogsAuthStrategy == awslogsAuthExecutionRole +} + +// SetCreatedAt sets the timestamp for container's creation time +func (c *Container) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + c.lock.Lock() + defer c.lock.Unlock() + + c.createdAt = createdAt +} + +// SetStartedAt sets the timestamp for container's start time +func (c *Container) SetStartedAt(startedAt time.Time) { + if startedAt.IsZero() { + return + } + + c.lock.Lock() + defer c.lock.Unlock() + + c.startedAt = startedAt +} + +// SetFinishedAt sets the timestamp for container's stopped time +func (c *Container) SetFinishedAt(finishedAt time.Time) { + if finishedAt.IsZero() { + return + } + + c.lock.Lock() + defer c.lock.Unlock() + + c.finishedAt = finishedAt +} + +// GetCreatedAt sets the timestamp for container's creation time +func (c *Container) GetCreatedAt() time.Time { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.createdAt +} + +// GetStartedAt sets the timestamp for container's start time +func (c *Container) GetStartedAt() time.Time { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.startedAt +} + +// GetFinishedAt sets the timestamp for container's stopped time +func (c *Container) GetFinishedAt() time.Time { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.finishedAt +} + +// SetLabels sets the labels for a container +func (c *Container) SetLabels(labels map[string]string) { + c.lock.Lock() + defer c.lock.Unlock() + + c.labels = labels +} + +// SetRuntimeID sets the DockerID for a container +func (c *Container) SetRuntimeID(RuntimeID string) { + c.lock.Lock() + defer c.lock.Unlock() + + c.RuntimeID = RuntimeID +} + +// GetRuntimeID gets the DockerID for a container +func (c *Container) GetRuntimeID() string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.RuntimeID +} + +// SetImageDigest sets the ImageDigest for a container +func (c *Container) SetImageDigest(ImageDigest string) { + c.lock.Lock() + defer c.lock.Unlock() + + c.ImageDigest = ImageDigest +} + +// GetImageDigest gets the ImageDigest for a container +func (c *Container) GetImageDigest() string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.ImageDigest +} + +// GetLabels gets the labels for a container +func (c *Container) GetLabels() map[string]string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.labels +} + +// SetKnownPortBindings sets the ports for a container +func (c *Container) SetKnownPortBindings(ports []PortBinding) { + c.lock.Lock() + defer c.lock.Unlock() + + c.KnownPortBindingsUnsafe = ports +} + +// GetKnownPortBindings gets the ports for a container +func (c *Container) GetKnownPortBindings() []PortBinding { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.KnownPortBindingsUnsafe +} + +// GetManagedAgents returns the managed agents configured for this container +func (c *Container) GetManagedAgents() []ManagedAgent { + c.lock.RLock() + defer c.lock.RUnlock() + return c.ManagedAgentsUnsafe +} + +// SetVolumes sets the volumes mounted in a container +func (c *Container) SetVolumes(volumes []types.MountPoint) { + c.lock.Lock() + defer c.lock.Unlock() + + c.VolumesUnsafe = volumes +} + +// GetVolumes returns the volumes mounted in a container +func (c *Container) GetVolumes() []types.MountPoint { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.VolumesUnsafe +} + +// SetNetworkSettings sets the networks field in a container +func (c *Container) SetNetworkSettings(networks *types.NetworkSettings) { + c.lock.Lock() + defer c.lock.Unlock() + + c.NetworkSettingsUnsafe = networks +} + +// GetNetworkSettings returns the networks field in a container +func (c *Container) GetNetworkSettings() *types.NetworkSettings { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.NetworkSettingsUnsafe +} + +// SetNetworkMode sets the network mode of the container +func (c *Container) SetNetworkMode(networkMode string) { + c.lock.Lock() + defer c.lock.Unlock() + + c.NetworkModeUnsafe = networkMode +} + +// GetNetworkMode returns the network mode of the container +func (c *Container) GetNetworkMode() string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.NetworkModeUnsafe +} + +// HealthStatusShouldBeReported returns true if the health check is defined in +// the task definition +func (c *Container) HealthStatusShouldBeReported() bool { + return c.HealthCheckType == DockerHealthCheckType +} + +// SetHealthStatus sets the container health status +func (c *Container) SetHealthStatus(health HealthStatus) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.Health.Status == health.Status { + return + } + + c.Health.Status = health.Status + c.Health.Since = aws.Time(time.Now()) + c.Health.Output = health.Output + + // Set the health exit code if the health check failed + if c.Health.Status == apicontainerstatus.ContainerUnhealthy { + c.Health.ExitCode = health.ExitCode + } +} + +// GetHealthStatus returns the container health information +func (c *Container) GetHealthStatus() HealthStatus { + c.lock.RLock() + defer c.lock.RUnlock() + + // Copy the pointer to avoid race condition + copyHealth := c.Health + + if c.Health.Since != nil { + copyHealth.Since = aws.Time(aws.TimeValue(c.Health.Since)) + } + + return copyHealth +} + +// BuildContainerDependency adds a new dependency container and satisfied status +// to the dependent container +func (c *Container) BuildContainerDependency(contName string, + satisfiedStatus apicontainerstatus.ContainerStatus, + dependentStatus apicontainerstatus.ContainerStatus) { + contDep := ContainerDependency{ + ContainerName: contName, + SatisfiedStatus: satisfiedStatus, + } + if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok { + c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{} + } + deps := c.TransitionDependenciesMap[dependentStatus] + deps.ContainerDependencies = append(deps.ContainerDependencies, contDep) + c.TransitionDependenciesMap[dependentStatus] = deps +} + +// BuildResourceDependency adds a new resource dependency by taking in the required status +// of the resource that satisfies the dependency and the dependent container status, +// whose transition is dependent on the resource. +// example: if container's PULLED transition is dependent on volume resource's +// CREATED status, then RequiredStatus=VolumeCreated and dependentStatus=ContainerPulled +func (c *Container) BuildResourceDependency(resourceName string, + requiredStatus resourcestatus.ResourceStatus, + dependentStatus apicontainerstatus.ContainerStatus) { + + resourceDep := ResourceDependency{ + Name: resourceName, + RequiredStatus: requiredStatus, + } + if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok { + c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{} + } + deps := c.TransitionDependenciesMap[dependentStatus] + deps.ResourceDependencies = append(deps.ResourceDependencies, resourceDep) + c.TransitionDependenciesMap[dependentStatus] = deps +} + +// updateAppliedStatusUnsafe updates the container transitioning status +func (c *Container) updateAppliedStatusUnsafe(knownStatus apicontainerstatus.ContainerStatus) { + if c.AppliedStatus == apicontainerstatus.ContainerStatusNone { + return + } + + // Check if the container transition has already finished + if c.AppliedStatus <= knownStatus { + c.AppliedStatus = apicontainerstatus.ContainerStatusNone + } +} + +// SetAppliedStatus sets the applied status of container and returns whether +// the container is already in a transition +func (c *Container) SetAppliedStatus(status apicontainerstatus.ContainerStatus) bool { + c.lock.Lock() + defer c.lock.Unlock() + + if c.AppliedStatus != apicontainerstatus.ContainerStatusNone { + // return false to indicate the set operation failed + return false + } + + c.AppliedStatus = status + return true +} + +// GetAppliedStatus returns the transitioning status of container +func (c *Container) GetAppliedStatus() apicontainerstatus.ContainerStatus { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.AppliedStatus +} + +// ShouldPullWithASMAuth returns true if this container needs to retrieve +// private registry authentication data from ASM +func (c *Container) ShouldPullWithASMAuth() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.RegistryAuthentication != nil && + c.RegistryAuthentication.Type == AuthTypeASM && + c.RegistryAuthentication.ASMAuthData != nil +} + +// SetASMDockerAuthConfig add the docker auth config data to the +// RegistryAuthentication struct held by the container, this is then passed down +// to the docker client to pull the image +func (c *Container) SetASMDockerAuthConfig(dac types.AuthConfig) { + c.RegistryAuthentication.ASMAuthData.SetDockerAuthConfig(dac) +} + +// SetV3EndpointID sets the v3 endpoint id of container +func (c *Container) SetV3EndpointID(v3EndpointID string) { + c.lock.Lock() + defer c.lock.Unlock() + + c.V3EndpointID = v3EndpointID +} + +// GetV3EndpointID returns the v3 endpoint id of container +func (c *Container) GetV3EndpointID() string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.V3EndpointID +} + +// InjectV3MetadataEndpoint injects the v3 metadata endpoint as an environment variable for a container +func (c *Container) InjectV3MetadataEndpoint() { + c.lock.Lock() + defer c.lock.Unlock() + + // don't assume that the environment variable map has been initialized by others + if c.Environment == nil { + c.Environment = make(map[string]string) + } + + c.Environment[MetadataURIEnvironmentVariableName] = + fmt.Sprintf(MetadataURIFormat, c.V3EndpointID) +} + +// InjectV4MetadataEndpoint injects the v4 metadata endpoint as an environment variable for a container +func (c *Container) InjectV4MetadataEndpoint() { + c.lock.Lock() + defer c.lock.Unlock() + + // don't assume that the environment variable map has been initialized by others + if c.Environment == nil { + c.Environment = make(map[string]string) + } + + c.Environment[MetadataURIEnvVarNameV4] = + fmt.Sprintf(MetadataURIFormatV4, c.V3EndpointID) +} + +// ShouldCreateWithSSMSecret returns true if this container needs to get secret +// value from SSM Parameter Store +func (c *Container) ShouldCreateWithSSMSecret() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + // Secrets field will be nil if there is no secrets for container + if c.Secrets == nil { + return false + } + + for _, secret := range c.Secrets { + if secret.Provider == SecretProviderSSM { + return true + } + } + return false +} + +// ShouldCreateWithASMSecret returns true if this container needs to get secret +// value from AWS Secrets Manager +func (c *Container) ShouldCreateWithASMSecret() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + // Secrets field will be nil if there is no secrets for container + if c.Secrets == nil { + return false + } + + for _, secret := range c.Secrets { + if secret.Provider == SecretProviderASM { + return true + } + } + return false +} + +// ShouldCreateWithEnvFiles returns true if this container needs to +// retrieve environment variable files +func (c *Container) ShouldCreateWithEnvFiles() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + if c.EnvironmentFiles == nil { + return false + } + + return len(c.EnvironmentFiles) != 0 +} + +// MergeEnvironmentVariables appends additional envVarName:envVarValue pairs to +// the the container's environment values structure +func (c *Container) MergeEnvironmentVariables(envVars map[string]string) { + c.lock.Lock() + defer c.lock.Unlock() + + // don't assume that the environment variable map has been initialized by others + if c.Environment == nil { + c.Environment = make(map[string]string) + } + for k, v := range envVars { + c.Environment[k] = v + } +} + +// MergeEnvironmentVariablesFromEnvfiles appends environment variable pairs from +// the retrieved envfiles to the container's environment values list +// envvars from envfiles will have lower precedence than existing envvars +func (c *Container) MergeEnvironmentVariablesFromEnvfiles(envVarsList []map[string]string) error { + c.lock.Lock() + defer c.lock.Unlock() + + // create map if does not exist + if c.Environment == nil { + c.Environment = make(map[string]string) + } + + // envVarsList is a list of map, where each map is from an envfile + // iterate over this sequentially because the original order of the + // environment files give precedence to the environment variables + for _, envVars := range envVarsList { + for k, v := range envVars { + // existing environment variables have precedence over variables from envfile + // only set the env var if key does not already exist + if _, ok := c.Environment[k]; !ok { + c.Environment[k] = v + } + } + } + return nil +} + +// HasSecret returns whether a container has secret based on a certain condition. +func (c *Container) HasSecret(f func(s Secret) bool) bool { + c.lock.RLock() + defer c.lock.RUnlock() + + if c.Secrets == nil { + return false + } + + for _, secret := range c.Secrets { + if f(secret) { + return true + } + } + + return false +} + +func (c *Container) GetStartTimeout() time.Duration { + c.lock.Lock() + defer c.lock.Unlock() + + return time.Duration(c.StartTimeout) * time.Second +} + +func (c *Container) GetStopTimeout() time.Duration { + c.lock.Lock() + defer c.lock.Unlock() + + return time.Duration(c.StopTimeout) * time.Second +} + +func (c *Container) GetDependsOn() []DependsOn { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.DependsOnUnsafe +} + +func (c *Container) SetDependsOn(dependsOn []DependsOn) { + c.lock.Lock() + defer c.lock.Unlock() + + c.DependsOnUnsafe = dependsOn +} + +// DependsOnContainer checks whether a container depends on another container. +func (c *Container) DependsOnContainer(name string) bool { + c.lock.RLock() + defer c.lock.RUnlock() + + for _, dependsOn := range c.DependsOnUnsafe { + if dependsOn.ContainerName == name { + return true + } + } + + return false +} + +// HasContainerDependencies checks whether a container has any container dependency. +func (c *Container) HasContainerDependencies() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + return len(c.DependsOnUnsafe) != 0 +} + +// AddContainerDependency adds a container dependency to a container. +func (c *Container) AddContainerDependency(name string, condition string) { + c.lock.Lock() + defer c.lock.Unlock() + + c.DependsOnUnsafe = append(c.DependsOnUnsafe, DependsOn{ + ContainerName: name, + Condition: condition, + }) +} + +// GetLogDriver returns the log driver used by the container. +func (c *Container) GetLogDriver() string { + c.lock.RLock() + defer c.lock.RUnlock() + + if c.DockerConfig.HostConfig == nil { + return "" + } + + hostConfig := &dockercontainer.HostConfig{} + err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) + if err != nil { + seelog.Warnf("Encountered error when trying to get log driver for container %s: %v", c.RuntimeID, err) + return "" + } + + return hostConfig.LogConfig.Type +} + +// GetLogOptions gets the log 'options' map passed into the task definition. +// see https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html +func (c *Container) GetLogOptions() map[string]string { + c.lock.RLock() + defer c.lock.RUnlock() + + if c.DockerConfig.HostConfig == nil { + return map[string]string{} + } + + hostConfig := &dockercontainer.HostConfig{} + err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) + if err != nil { + seelog.Warnf("Encountered error when trying to get log configuration for container %s: %v", c.RuntimeID, err) + return map[string]string{} + } + + return hostConfig.LogConfig.Config +} + +// GetNetworkModeFromHostConfig returns the network mode used by the container from the host config . +func (c *Container) GetNetworkModeFromHostConfig() string { + c.lock.RLock() + defer c.lock.RUnlock() + + if c.DockerConfig.HostConfig == nil { + return "" + } + + hostConfig := &dockercontainer.HostConfig{} + // TODO return error to differentiate between error and default mode . + err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) + if err != nil { + seelog.Warnf("Encountered error when trying to get network mode for container %s: %v", c.RuntimeID, err) + return "" + } + + return hostConfig.NetworkMode.NetworkName() +} + +// GetHostConfig returns the container's host config. +func (c *Container) GetHostConfig() *string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.DockerConfig.HostConfig +} + +// GetFirelensConfig returns the container's firelens config. +func (c *Container) GetFirelensConfig() *FirelensConfig { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.FirelensConfig +} + +// GetEnvironmentFiles returns the container's environment files. +func (c *Container) GetEnvironmentFiles() []EnvironmentFile { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.EnvironmentFiles +} + +// RequireNeuronRuntime checks if the container needs to use the neuron runtime. +func (c *Container) RequireNeuronRuntime() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + _, ok := c.Environment[neuronVisibleDevicesEnvVar] + return ok +} + +// SetTaskARN sets the task arn of the container. +func (c *Container) SetTaskARN(arn string) { + c.lock.Lock() + defer c.lock.Unlock() + + c.TaskARNUnsafe = arn +} + +// GetTaskARN returns the task arn of the container. +func (c *Container) GetTaskARN() string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.TaskARNUnsafe +} + +// HasNotAndWillNotStart returns true if the container has never started, and is not going to start in the future. +// This is true if the following are all true: +// 1. Container's known status is earlier than running; +// 2. Container's desired status is stopped; +// 3. Container is not in the middle a transition (indicated by applied status is none status). +func (c *Container) HasNotAndWillNotStart() bool { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.KnownStatusUnsafe < apicontainerstatus.ContainerRunning && + c.DesiredStatusUnsafe.Terminal() && + c.AppliedStatus == apicontainerstatus.ContainerStatusNone +} + +// GetManagedAgentByName retrieves the managed agent with the name specified and a boolean indicating whether an agent +// was found or not. +// note: a zero value for ManagedAgent if the name is not known to this container. +func (c *Container) GetManagedAgentByName(agentName string) (ManagedAgent, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + for _, ma := range c.ManagedAgentsUnsafe { + if ma.Name == agentName { + return ma, true + } + } + return ManagedAgent{}, false +} + +// UpdateManagedAgentByName updates the state of the managed agent with the name specified. If the agent is not found, +// this method returns false. +func (c *Container) UpdateManagedAgentByName(agentName string, state ManagedAgentState) bool { + c.lock.Lock() + defer c.lock.Unlock() + for i, ma := range c.ManagedAgentsUnsafe { + if ma.Name == agentName { + // It's necessary to clone the whole ManagedAgent struct + c.ManagedAgentsUnsafe[i] = ManagedAgent{ + Name: ma.Name, + Properties: ma.Properties, + ManagedAgentState: state, + } + return true + } + } + return false +} + +// UpdateManagedAgentStatus updates the status of the managed agent with the name specified. If the agent is not found, +// this method returns false. +func (c *Container) UpdateManagedAgentStatus(agentName string, status apicontainerstatus.ManagedAgentStatus) bool { + c.lock.Lock() + defer c.lock.Unlock() + for i, ma := range c.ManagedAgentsUnsafe { + if ma.Name == agentName { + c.ManagedAgentsUnsafe[i].Status = status + return true + } + } + return false +} + +// UpdateManagedAgentSentStatus updates the sent status of the managed agent with the name specified. If the agent is not found, +// this method returns false. +func (c *Container) UpdateManagedAgentSentStatus(agentName string, status apicontainerstatus.ManagedAgentStatus) bool { + c.lock.Lock() + defer c.lock.Unlock() + for i, ma := range c.ManagedAgentsUnsafe { + if ma.Name == agentName { + c.ManagedAgentsUnsafe[i].SentStatus = status + return true + } + } + return false +} + +func (c *Container) GetManagedAgentStatus(agentName string) apicontainerstatus.ManagedAgentStatus { + c.lock.RLock() + defer c.lock.RUnlock() + for i, ma := range c.ManagedAgentsUnsafe { + if ma.Name == agentName { + return c.ManagedAgentsUnsafe[i].Status + } + } + // we shouldn't get here because we'll always have a valid ManagedAgentName + return apicontainerstatus.ManagedAgentStatusNone +} + +func (c *Container) GetManagedAgentSentStatus(agentName string) apicontainerstatus.ManagedAgentStatus { + c.lock.RLock() + defer c.lock.RUnlock() + for i, ma := range c.ManagedAgentsUnsafe { + if ma.Name == agentName { + return c.ManagedAgentsUnsafe[i].SentStatus + } + } + // we shouldn't get here because we'll always have a valid ManagedAgentName + return apicontainerstatus.ManagedAgentStatusNone +} + +func (c *Container) SetContainerTornDown(td bool) { + c.lock.Lock() + defer c.lock.Unlock() + c.ContainerTornDownUnsafe = td +} + +func (c *Container) IsContainerTornDown() bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.ContainerTornDownUnsafe +} diff --git a/agent/api/container/container_test.go b/agent/api/container/container_test.go new file mode 100644 index 00000000000..6bd0efa98bc --- /dev/null +++ b/agent/api/container/container_test.go @@ -0,0 +1,971 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "fmt" + "reflect" + "testing" + "time" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + + "github.com/aws/amazon-ecs-agent/agent/utils" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/stretchr/testify/assert" +) + +type configPair struct { + Container *Container + Config *dockercontainer.Config + HostConfig *dockercontainer.HostConfig +} + +func (pair configPair) Equal() bool { + conf := pair.Config + cont := pair.Container + hostConf := pair.HostConfig + + if (hostConf.Memory / 1024 / 1024) != int64(cont.Memory) { + return false + } + if hostConf.CPUShares != int64(cont.CPU) { + return false + } + if conf.Image != cont.Image { + return false + } + if cont.EntryPoint == nil && !utils.StrSliceEqual(conf.Entrypoint, []string{}) { + return false + } + if cont.EntryPoint != nil && !utils.StrSliceEqual(conf.Entrypoint, *cont.EntryPoint) { + return false + } + if !utils.StrSliceEqual(cont.Command, conf.Cmd) { + return false + } + // TODO, Volumes, VolumesFrom, ExposedPorts + + return true +} + +func TestGetSteadyStateStatusReturnsRunningByDefault(t *testing.T) { + container := &Container{} + assert.Equal(t, container.GetSteadyStateStatus(), apicontainerstatus.ContainerRunning) +} + +func TestIsKnownSteadyState(t *testing.T) { + // This creates a container with `iota` ContainerStatus (NONE) + container := &Container{} + assert.False(t, container.IsKnownSteadyState()) + // Transition container to PULLED, still not in steady state + container.SetKnownStatus(apicontainerstatus.ContainerPulled) + assert.False(t, container.IsKnownSteadyState()) + // Transition container to CREATED, still not in steady state + container.SetKnownStatus(apicontainerstatus.ContainerCreated) + assert.False(t, container.IsKnownSteadyState()) + // Transition container to RUNNING, now we're in steady state + container.SetKnownStatus(apicontainerstatus.ContainerRunning) + assert.True(t, container.IsKnownSteadyState()) + // Now, set steady state to RESOURCES_PROVISIONED + resourcesProvisioned := apicontainerstatus.ContainerResourcesProvisioned + container.SteadyStateStatusUnsafe = &resourcesProvisioned + // Container is not in steady state anymore + assert.False(t, container.IsKnownSteadyState()) + // Transition container to RESOURCES_PROVISIONED, we're in + // steady state again + container.SetKnownStatus(apicontainerstatus.ContainerResourcesProvisioned) + assert.True(t, container.IsKnownSteadyState()) +} + +func TestGetNextStateProgression(t *testing.T) { + // This creates a container with `iota` ContainerStatus (NONE) + container := &Container{} + // NONE should transition to PULLED + assert.Equal(t, container.GetNextKnownStateProgression(), apicontainerstatus.ContainerPulled) + container.SetKnownStatus(apicontainerstatus.ContainerPulled) + // PULLED should transition to CREATED + assert.Equal(t, container.GetNextKnownStateProgression(), apicontainerstatus.ContainerCreated) + container.SetKnownStatus(apicontainerstatus.ContainerCreated) + // CREATED should transition to RUNNING + assert.Equal(t, container.GetNextKnownStateProgression(), apicontainerstatus.ContainerRunning) + container.SetKnownStatus(apicontainerstatus.ContainerRunning) + // RUNNING should transition to STOPPED + assert.Equal(t, container.GetNextKnownStateProgression(), apicontainerstatus.ContainerStopped) + + resourcesProvisioned := apicontainerstatus.ContainerResourcesProvisioned + container.SteadyStateStatusUnsafe = &resourcesProvisioned + // Set steady state to RESOURCES_PROVISIONED + // RUNNING should transition to RESOURCES_PROVISIONED based on steady state + assert.Equal(t, container.GetNextKnownStateProgression(), apicontainerstatus.ContainerResourcesProvisioned) + container.SetKnownStatus(apicontainerstatus.ContainerResourcesProvisioned) + assert.Equal(t, container.GetNextKnownStateProgression(), apicontainerstatus.ContainerStopped) +} + +func TestIsInternal(t *testing.T) { + testCases := []struct { + container *Container + internal bool + }{ + {&Container{}, false}, + {&Container{Type: ContainerNormal}, false}, + {&Container{Type: ContainerCNIPause}, true}, + {&Container{Type: ContainerEmptyHostVolume}, true}, + {&Container{Type: ContainerNamespacePause}, true}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("IsInternal shoukd return %t for %s", tc.internal, tc.container.String()), + func(t *testing.T) { + assert.Equal(t, tc.internal, tc.container.IsInternal()) + }) + } +} + +// TestSetupExecutionRoleFlag tests whether or not the container appropriately +//sets the flag for using execution roles +func TestSetupExecutionRoleFlag(t *testing.T) { + testCases := []struct { + container *Container + result bool + msg string + }{ + {&Container{}, false, "the container does not use ECR, so it should not require credentials"}, + { + &Container{ + RegistryAuthentication: &RegistryAuthenticationData{Type: "non-ecr"}, + }, + false, + "the container does not use ECR, so it should not require credentials", + }, + { + &Container{ + RegistryAuthentication: &RegistryAuthenticationData{Type: "ecr"}, + }, + false, "the container uses ECR, but it does not require execution role credentials", + }, + { + &Container{ + RegistryAuthentication: &RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &ECRAuthData{ + UseExecutionRole: true, + }, + }, + }, + true, + "the container uses ECR and require execution role credentials", + }, + } + + for _, testCase := range testCases { + t.Run(fmt.Sprintf("Container: %s", testCase.container.String()), func(t *testing.T) { + assert.Equal(t, testCase.result, testCase.container.ShouldPullWithExecutionRole(), testCase.msg) + }) + } +} + +func TestSetHealthStatus(t *testing.T) { + container := Container{} + + // set the container status to be healthy + container.SetHealthStatus(HealthStatus{Status: apicontainerstatus.ContainerHealthy, Output: "test"}) + health := container.GetHealthStatus() + assert.Equal(t, health.Status, apicontainerstatus.ContainerHealthy) + assert.Equal(t, health.Output, "test") + assert.NotEmpty(t, health.Since) + + // set the health status again shouldn't update the timestamp + container.SetHealthStatus(HealthStatus{Status: apicontainerstatus.ContainerHealthy}) + health2 := container.GetHealthStatus() + assert.Equal(t, health2.Status, apicontainerstatus.ContainerHealthy) + assert.Equal(t, health2.Since, health.Since) + + // the sleep is to ensure the different of the two timestamp returned by time.Now() + // is big enough to pass asser.NotEqual + time.Sleep(10 * time.Millisecond) + // change the container health status + container.SetHealthStatus(HealthStatus{Status: apicontainerstatus.ContainerUnhealthy, ExitCode: 1}) + health3 := container.GetHealthStatus() + assert.Equal(t, health3.Status, apicontainerstatus.ContainerUnhealthy) + assert.Equal(t, health3.ExitCode, 1) + assert.NotEqual(t, health3.Since, health2.Since) +} + +func TestHealthStatusShouldBeReported(t *testing.T) { + container := Container{} + assert.False(t, container.HealthStatusShouldBeReported(), "Health status of container that does not have HealthCheckType set should not be reported") + container.HealthCheckType = DockerHealthCheckType + assert.True(t, container.HealthStatusShouldBeReported(), "Health status of container that has docker HealthCheckType set should be reported") + container.HealthCheckType = "unknown" + assert.False(t, container.HealthStatusShouldBeReported(), "Health status of container that has non-docker HealthCheckType set should not be reported") +} + +func TestBuildContainerDependency(t *testing.T) { + container := Container{TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]TransitionDependencySet)} + depContName := "dep" + container.BuildContainerDependency(depContName, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + assert.NotNil(t, container.TransitionDependenciesMap) + contDep := container.TransitionDependenciesMap[apicontainerstatus.ContainerRunning].ContainerDependencies + assert.Len(t, container.TransitionDependenciesMap, 1) + assert.Len(t, contDep, 1) + assert.Equal(t, contDep[0].ContainerName, depContName) + assert.Equal(t, contDep[0].SatisfiedStatus, apicontainerstatus.ContainerRunning) +} + +func TestBuildResourceDependency(t *testing.T) { + container := Container{TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]TransitionDependencySet)} + depResourceName := "cgroup" + + container.BuildResourceDependency(depResourceName, resourcestatus.ResourceStatus(1), apicontainerstatus.ContainerRunning) + + assert.NotNil(t, container.TransitionDependenciesMap) + resourceDep := container.TransitionDependenciesMap[apicontainerstatus.ContainerRunning].ResourceDependencies + assert.Len(t, container.TransitionDependenciesMap, 1) + assert.Len(t, resourceDep, 1) + assert.Equal(t, depResourceName, resourceDep[0].Name) + assert.Equal(t, resourcestatus.ResourceStatus(1), resourceDep[0].GetRequiredStatus()) +} + +func TestShouldPullWithASMAuth(t *testing.T) { + container := Container{ + Name: "myName", + Image: "image:tag", + RegistryAuthentication: &RegistryAuthenticationData{ + Type: "asm", + ASMAuthData: &ASMAuthData{ + CredentialsParameter: "secret-id", + Region: "region", + }, + }, + } + + assert.True(t, container.ShouldPullWithASMAuth()) +} + +func TestInjectV3MetadataEndpoint(t *testing.T) { + container := Container{ + V3EndpointID: "myV3EndpointID", + } + + container.InjectV3MetadataEndpoint() + + assert.NotNil(t, container.Environment) + assert.Equal(t, container.Environment[MetadataURIEnvironmentVariableName], + fmt.Sprintf(MetadataURIFormat, "myV3EndpointID")) +} + +func TestInjectV4MetadataEndpoint(t *testing.T) { + container := Container{ + V3EndpointID: "EndpointID", + } + container.InjectV4MetadataEndpoint() + + assert.NotNil(t, container.Environment) + assert.Equal(t, container.Environment[MetadataURIEnvVarNameV4], + fmt.Sprintf(MetadataURIFormatV4, "EndpointID")) +} + +func TestShouldCreateWithSSMSecret(t *testing.T) { + cases := []struct { + in Container + out bool + }{ + {Container{ + Name: "myName", + Image: "image:tag", + Secrets: []Secret{ + Secret{ + Provider: "ssm", + Name: "secret", + ValueFrom: "/test/secretName", + }}, + }, true}, + {Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + }, false}, + {Container{ + Name: "myName", + Image: "image:tag", + Secrets: []Secret{ + Secret{ + Provider: "asm", + Name: "secret", + ValueFrom: "/test/secretName", + }}, + }, false}, + } + + for _, test := range cases { + container := test.in + assert.Equal(t, test.out, container.ShouldCreateWithSSMSecret()) + } +} + +func TestMergeEnvironmentVariables(t *testing.T) { + cases := []struct { + Name string + InContainerEnvironment map[string]string + InEnvVarMap map[string]string + OutEnvVarMap map[string]string + }{ + { + Name: "merge single item", + InContainerEnvironment: map[string]string{ + "CONFIG1": "config1"}, + InEnvVarMap: map[string]string{ + "SECRET1": "secret1"}, + OutEnvVarMap: map[string]string{ + "CONFIG1": "config1", + "SECRET1": "secret1", + }, + }, + + { + Name: "merge single item to nil container env var map", + InContainerEnvironment: nil, + InEnvVarMap: map[string]string{ + "SECRET1": "secret1"}, + OutEnvVarMap: map[string]string{ + "SECRET1": "secret1", + }, + }, + + { + Name: "merge zero items to existing container env var map", + InContainerEnvironment: map[string]string{ + "CONFIG1": "config1"}, + InEnvVarMap: map[string]string{}, + OutEnvVarMap: map[string]string{ + "CONFIG1": "config1", + }, + }, + + { + Name: "merge nil to existing container env var map", + InContainerEnvironment: map[string]string{ + "CONFIG1": "config1"}, + InEnvVarMap: nil, + OutEnvVarMap: map[string]string{ + "CONFIG1": "config1", + }, + }, + + { + Name: "merge nil to nil container env var map", + InContainerEnvironment: nil, + InEnvVarMap: nil, + OutEnvVarMap: map[string]string{}, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + container := Container{ + Environment: c.InContainerEnvironment, + } + + container.MergeEnvironmentVariables(c.InEnvVarMap) + mapEq := reflect.DeepEqual(c.OutEnvVarMap, container.Environment) + assert.True(t, mapEq) + }) + } +} + +func TestShouldCreateWithASMSecret(t *testing.T) { + cases := []struct { + in Container + out bool + }{ + {Container{ + Name: "myName", + Image: "image:tag", + Secrets: []Secret{ + Secret{ + Provider: "asm", + Name: "secret", + ValueFrom: "/test/secretName", + }}, + }, true}, + {Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + }, false}, + {Container{ + Name: "myName", + Image: "image:tag", + Secrets: []Secret{ + Secret{ + Provider: "ssm", + Name: "secret", + ValueFrom: "/test/secretName", + }}, + }, false}, + } + + for _, test := range cases { + container := test.in + assert.Equal(t, test.out, container.ShouldCreateWithASMSecret()) + } +} + +func TestHasSecret(t *testing.T) { + isEnvOrLogDriverSecret := func(s Secret) bool { + return s.Type == SecretTypeEnv || s.Target == SecretTargetLogDriver + } + isSSMLogDriverSecret := func(s Secret) bool { + return s.Provider == SecretProviderSSM && s.Target == SecretTargetLogDriver + } + + testCases := []struct { + name string + f func(s Secret) bool + secrets []Secret + res bool + }{ + { + name: "test env secret", + f: isEnvOrLogDriverSecret, + secrets: []Secret{ + { + Provider: "asm", + Name: "secret", + Type: "ENVIRONMENT_VARIABLE", + ValueFrom: "/test/secretName", + }}, + res: true, + }, + { + name: "test no secret", + f: isEnvOrLogDriverSecret, + }, + { + name: "test mount point secret", + f: isEnvOrLogDriverSecret, + secrets: []Secret{ + { + Provider: "asm", + Name: "secret", + Type: "MOUNT_POINT", + ValueFrom: "/test/secretName", + }}, + res: false, + }, + { + name: "test log driver secret", + f: isEnvOrLogDriverSecret, + secrets: []Secret{ + { + Provider: "asm", + Name: "splunk-token", + ValueFrom: "/test/secretName", + Target: "LOG_DRIVER", + }}, + res: true, + }, + { + name: "test secret provider ssm", + f: isSSMLogDriverSecret, + secrets: []Secret{ + { + Name: "secret", + Provider: SecretProviderSSM, + Target: SecretTargetLogDriver, + }, + }, + res: true, + }, + { + name: "test wrong secret provider", + f: isSSMLogDriverSecret, + secrets: []Secret{ + { + Name: "secret", + Provider: "dummy", + Target: SecretTargetLogDriver, + }, + }, + res: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c := &Container{ + Name: "c", + Secrets: tc.secrets, + } + + assert.Equal(t, tc.res, c.HasSecret(tc.f)) + }) + } +} + +func TestPerContainerTimeouts(t *testing.T) { + timeout := uint(10) + expectedTimeout := time.Duration(timeout) * time.Second + + container := Container{ + StartTimeout: timeout, + StopTimeout: timeout, + } + + assert.Equal(t, container.GetStartTimeout(), expectedTimeout) + assert.Equal(t, container.GetStopTimeout(), expectedTimeout) +} + +func TestSetRuntimeIDInContainer(t *testing.T) { + container := Container{} + container.SetRuntimeID("asdfghjkl1234") + assert.Equal(t, "asdfghjkl1234", container.RuntimeID) + assert.Equal(t, "asdfghjkl1234", container.GetRuntimeID()) +} + +func TestGetManagedAgents(t *testing.T) { + container := Container{} + assert.Nil(t, container.GetManagedAgents()) + + expectedManagedAgent := ManagedAgent{ + Name: "dummyAgent", + Properties: map[string]string{"test": "prop"}, + ManagedAgentState: ManagedAgentState{ + LastStartedAt: time.Now(), + Status: apicontainerstatus.ManagedAgentCreated, + }, + } + container.ManagedAgentsUnsafe = []ManagedAgent{expectedManagedAgent} + assert.Equal(t, expectedManagedAgent, container.GetManagedAgents()[0]) +} + +func TestGetManagedAgentStatus(t *testing.T) { + container := Container{} + assert.Equal(t, apicontainerstatus.ManagedAgentStatusNone, container.GetManagedAgentStatus("dummyAgent")) + + expectedManagedAgent := ManagedAgent{ + Name: "dummyAgent", + ManagedAgentState: ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentCreated, + }, + } + container.ManagedAgentsUnsafe = []ManagedAgent{expectedManagedAgent} + assert.Equal(t, apicontainerstatus.ManagedAgentCreated, container.GetManagedAgentStatus("dummyAgent")) +} + +func TestGetManagedAgentSentStatus(t *testing.T) { + container := Container{} + assert.Equal(t, apicontainerstatus.ManagedAgentStatusNone, container.GetManagedAgentSentStatus("dummyAgent")) + + expectedManagedAgent := ManagedAgent{ + Name: "dummyAgent", + ManagedAgentState: ManagedAgentState{ + SentStatus: apicontainerstatus.ManagedAgentCreated, + }, + } + container.ManagedAgentsUnsafe = []ManagedAgent{expectedManagedAgent} + assert.Equal(t, apicontainerstatus.ManagedAgentCreated, container.GetManagedAgentSentStatus("dummyAgent")) +} + +func TestDependsOnContainer(t *testing.T) { + testCases := []struct { + name string + container *Container + dependsOnName string + dependsOn bool + }{ + { + name: "test DependsOnContainer positive case", + container: &Container{ + Name: "container1", + DependsOnUnsafe: []DependsOn{ + { + ContainerName: "container2", + Condition: "START", + }, + }, + }, + dependsOnName: "container2", + dependsOn: true, + }, + { + name: "test DependsOnContainer negative case", + container: &Container{ + Name: "container1", + DependsOnUnsafe: []DependsOn{ + { + ContainerName: "container2", + Condition: "START", + }, + }, + }, + dependsOnName: "container0", + dependsOn: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.dependsOn, tc.container.DependsOnContainer(tc.dependsOnName)) + }) + } +} + +func TestAddContainerDependency(t *testing.T) { + container := &Container{ + Name: "container1", + } + container.AddContainerDependency("container2", "START") + + assert.Contains(t, container.DependsOnUnsafe, DependsOn{ + ContainerName: "container2", + Condition: "START", + }) +} + +func TestGetLogDriver(t *testing.T) { + getContainer := func(hostConfig string) *Container { + c := &Container{ + Name: "c", + } + c.DockerConfig.HostConfig = &hostConfig + return c + } + + testCases := []struct { + name string + container *Container + logDriver string + }{ + { + name: "positive case", + container: getContainer(`{"LogConfig":{"Type":"logdriver"}}`), + logDriver: "logdriver", + }, + { + name: "negative case", + container: getContainer("invalid"), + logDriver: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.logDriver, tc.container.GetLogDriver()) + }) + } +} + +func TestGetNetworkModeFromHostConfig(t *testing.T) { + getContainer := func(hostConfig string) *Container { + c := &Container{ + Name: "c", + } + c.DockerConfig.HostConfig = &hostConfig + return c + } + + testCases := []struct { + name string + container *Container + expectedOutput string + }{ + { + name: "bridge mode", + container: getContainer("{\"NetworkMode\":\"bridge\"}"), + expectedOutput: "bridge", + }, + { + name: "invalid case", + container: getContainer("invalid"), + expectedOutput: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedOutput, tc.container.GetNetworkModeFromHostConfig()) + }) + } +} + +func TestShouldCreateWithEnvfiles(t *testing.T) { + cases := []struct { + in Container + out bool + }{ + { + Container{ + Name: "containerName", + Image: "image:tag", + EnvironmentFiles: []EnvironmentFile{ + EnvironmentFile{ + Value: "s3://bucket/envfile", + Type: "s3", + }, + }, + }, true}, + { + Container{ + Name: "containerName", + Image: "image:tag", + EnvironmentFiles: nil, + }, false}, + } + + for _, test := range cases { + container := test.in + assert.Equal(t, test.out, container.ShouldCreateWithEnvFiles()) + } +} + +func TestMergeEnvironmentVariablesFromEnvfiles(t *testing.T) { + cases := []struct { + Name string + InContainerEnvironment map[string]string + InEnvVarList []map[string]string + OutEnvVarMap map[string]string + }{ + { + Name: "merge one item", + InContainerEnvironment: map[string]string{"key1": "value1"}, + InEnvVarList: []map[string]string{{"key2": "value2"}}, + OutEnvVarMap: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + { + Name: "merge single item to nil env var map", + InContainerEnvironment: nil, + InEnvVarList: []map[string]string{{"key": "value"}}, + OutEnvVarMap: map[string]string{"key": "value"}, + }, + { + Name: "merge one item key already exists", + InContainerEnvironment: map[string]string{"key1": "value1"}, + InEnvVarList: []map[string]string{{"key1": "value2"}}, + OutEnvVarMap: map[string]string{"key1": "value1"}, + }, + { + Name: "merge two items with same key", + InContainerEnvironment: map[string]string{"key1": "value1"}, + InEnvVarList: []map[string]string{ + {"key2": "value2"}, + {"key2": "value3"}, + }, + OutEnvVarMap: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + } + + for _, test := range cases { + t.Run(test.Name, func(t *testing.T) { + container := Container{ + Environment: test.InContainerEnvironment, + } + + container.MergeEnvironmentVariablesFromEnvfiles(test.InEnvVarList) + assert.True(t, reflect.DeepEqual(test.OutEnvVarMap, container.Environment)) + }) + } +} + +func TestRequireNeuronRuntime(t *testing.T) { + c := &Container{ + Environment: map[string]string{neuronVisibleDevicesEnvVar: "all"}, + } + assert.True(t, c.RequireNeuronRuntime()) +} + +func TestHasNotAndWillNotStart(t *testing.T) { + testCases := []struct { + name string + knownStatus apicontainerstatus.ContainerStatus + desiredStatus apicontainerstatus.ContainerStatus + appliedStatus apicontainerstatus.ContainerStatus + expected bool + }{ + { + name: "container has started", + knownStatus: apicontainerstatus.ContainerRunning, + desiredStatus: apicontainerstatus.ContainerRunning, + appliedStatus: apicontainerstatus.ContainerStatusNone, + }, + { + name: "container wants to start", + knownStatus: apicontainerstatus.ContainerCreated, + desiredStatus: apicontainerstatus.ContainerRunning, + appliedStatus: apicontainerstatus.ContainerStatusNone, + }, + { + name: "container in the middle of transition", + knownStatus: apicontainerstatus.ContainerCreated, + desiredStatus: apicontainerstatus.ContainerStopped, + appliedStatus: apicontainerstatus.ContainerRunning, + }, + { + name: "container has not and will not start", + knownStatus: apicontainerstatus.ContainerPulled, + desiredStatus: apicontainerstatus.ContainerStopped, + appliedStatus: apicontainerstatus.ContainerStatusNone, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cont := &Container{ + KnownStatusUnsafe: tc.knownStatus, + DesiredStatusUnsafe: tc.desiredStatus, + AppliedStatus: tc.appliedStatus, + } + assert.Equal(t, tc.expected, cont.HasNotAndWillNotStart()) + }) + } +} + +func TestUpdateManagedAgentByName(t *testing.T) { + const ( + dummyAgent = "dummyAgent" + testStatus = apicontainerstatus.ManagedAgentStopped + testReason = "reason" + ) + cases := []struct { + name string + agentName string + state ManagedAgentState + }{ + { + name: "test nonexistent managed agent", + agentName: "nonexistentAgent", + }, + { + name: "test managed agent with default (zero) state", + agentName: dummyAgent, + }, + { + name: "test managed agent with nil metadata", + agentName: dummyAgent, + state: ManagedAgentState{ + Status: testStatus, + Reason: testReason, + LastStartedAt: time.Time{}, + Metadata: nil, + }, + }, + { + name: "test managed agent with full state", + agentName: dummyAgent, + state: ManagedAgentState{ + Status: testStatus, + Reason: testReason, + LastStartedAt: time.Time{}, + }, + }, + } + for _, test := range cases { + t.Run(test.name, func(t *testing.T) { + c := &Container{} + + // Verify we don't have extraneous data on a blank Container + assert.Nil(t, c.GetManagedAgents()) + agent, ok := c.GetManagedAgentByName(test.agentName) + assert.False(t, ok) + assert.Equal(t, ManagedAgentState{}, agent.ManagedAgentState) + var expectAgentFound bool + // simulate we only have data for "dummyAgent" + if test.agentName == dummyAgent { + expectAgentFound = true + c.ManagedAgentsUnsafe = []ManagedAgent{ + { + Name: test.agentName, + ManagedAgentState: test.state, + }, + } + } + + // Verify that we can retrieve the correct data stored in container.ManagedAgentsUnsafe + agent, ok = c.GetManagedAgentByName(test.agentName) + assert.Equal(t, expectAgentFound, ok) + assert.Equal(t, test.state, agent.ManagedAgentState) + + var newState ManagedAgentState + // simulate we only replace data data for "dummyAgent" + if test.agentName == dummyAgent { + newState = ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentRunning, + Reason: "new reason", + LastStartedAt: time.Now(), + } + c.UpdateManagedAgentByName(test.agentName, newState) + } + agent, ok = c.GetManagedAgentByName(test.agentName) + assert.Equal(t, expectAgentFound, ok) + assert.Equal(t, newState, agent.ManagedAgentState) + }) + } +} + +func TestUpdateManagedAgentSentStatus(t *testing.T) { + const dummyAgent = "dummyAgent" + cases := []struct { + name string + agentName string + updateSentStatus bool + sentStatus apicontainerstatus.ManagedAgentStatus + expectedSentStatus apicontainerstatus.ManagedAgentStatus + }{ + { + name: "test nonexistent managed agent", + agentName: "nonexistentAgent", + expectedSentStatus: apicontainerstatus.ManagedAgentStatusNone, + }, + { + name: "test managed agent with default (zero) status", + agentName: dummyAgent, + expectedSentStatus: apicontainerstatus.ManagedAgentStatusNone, + }, + { + name: "test managed agent with custom status", + agentName: dummyAgent, + updateSentStatus: true, + sentStatus: apicontainerstatus.ManagedAgentRunning, + expectedSentStatus: apicontainerstatus.ManagedAgentRunning, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + c := &Container{ + ManagedAgentsUnsafe: []ManagedAgent{{ + Name: tc.agentName, + }}, + } + before, _ := c.GetManagedAgentByName(tc.agentName) + assert.Equal(t, apicontainerstatus.ManagedAgentStatusNone, before.SentStatus) + if tc.updateSentStatus { + c.UpdateManagedAgentSentStatus(tc.agentName, tc.sentStatus) + } + after, _ := c.GetManagedAgentByName(tc.agentName) + assert.Equal(t, tc.expectedSentStatus, after.SentStatus) + + }) + } +} diff --git a/agent/api/container/container_unix.go b/agent/api/container/container_unix.go new file mode 100644 index 00000000000..5c759925396 --- /dev/null +++ b/agent/api/container/container_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "github.com/pkg/errors" +) + +const ( + // DockerContainerMinimumMemoryInBytes is the minimum amount of + // memory to be allocated to a docker container + DockerContainerMinimumMemoryInBytes = 4 * 1024 * 1024 // 4MB +) + +// RequiresCredentialSpec checks if container needs a credentialspec resource +func (c *Container) RequiresCredentialSpec() bool { + return false +} + +// GetCredentialSpec is used to retrieve the current credentialspec resource +func (c *Container) GetCredentialSpec() (string, error) { + return "", errors.New("unsupported platform") +} diff --git a/agent/api/container/container_windows.go b/agent/api/container/container_windows.go new file mode 100644 index 00000000000..6f8dadb84f5 --- /dev/null +++ b/agent/api/container/container_windows.go @@ -0,0 +1,68 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" + "strings" + + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" +) + +const ( + // DockerContainerMinimumMemoryInBytes is the minimum amount of + // memory to be allocated to a docker container + DockerContainerMinimumMemoryInBytes = 256 * 1024 * 1024 // 256MB +) + +// RequiresCredentialSpec checks if container needs a credentialspec resource +func (c *Container) RequiresCredentialSpec() bool { + credSpec, err := c.getCredentialSpec() + if err != nil || credSpec == "" { + return false + } + + return true +} + +// GetCredentialSpec is used to retrieve the current credentialspec resource +func (c *Container) GetCredentialSpec() (string, error) { + return c.getCredentialSpec() +} + +func (c *Container) getCredentialSpec() (string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + if c.DockerConfig.HostConfig == nil { + return "", errors.New("empty container hostConfig") + } + + hostConfig := &dockercontainer.HostConfig{} + err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) + if err != nil || len(hostConfig.SecurityOpt) == 0 { + return "", errors.New("unable to obtain security options from container hostConfig") + } + + for _, opt := range hostConfig.SecurityOpt { + if strings.HasPrefix(opt, "credentialspec") { + return opt, nil + } + } + + return "", errors.New("unable to obtain credentialspec") +} diff --git a/agent/api/container/container_windows_test.go b/agent/api/container/container_windows_test.go new file mode 100644 index 00000000000..db78310644f --- /dev/null +++ b/agent/api/container/container_windows_test.go @@ -0,0 +1,132 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRequiresCredentialSpec(t *testing.T) { + testCases := []struct { + name string + container *Container + expectedOutput bool + }{ + { + name: "hostconfig_nil", + container: &Container{}, + expectedOutput: false, + }, + { + name: "invalid_case", + container: getContainer("invalid"), + expectedOutput: false, + }, + { + name: "empty_sec_opt", + container: getContainer("{\"NetworkMode\":\"bridge\"}"), + expectedOutput: false, + }, + { + name: "missing_credentialspec", + container: getContainer("{\"SecurityOpt\": [\"invalid-sec-opt\"]}"), + expectedOutput: false, + }, + { + name: "valid_credentialspec_file", + container: getContainer("{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct.json\"]}"), + expectedOutput: true, + }, + { + name: "valid_credentialspec_s3", + container: getContainer("{\"SecurityOpt\": [\"credentialspec:arn:aws:s3:::${BucketName}/${ObjectName}\"]}"), + expectedOutput: true, + }, + { + name: "valid_credentialspec_ssm", + container: getContainer("{\"SecurityOpt\": [\"credentialspec:arn:aws:ssm:region:aws_account_id:parameter/parameter_name\"]}"), + expectedOutput: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedOutput, tc.container.RequiresCredentialSpec()) + }) + } +} + +func TestGetCredentialSpecErr(t *testing.T) { + testCases := []struct { + name string + container *Container + expectedOutputString string + expectedErrorString string + }{ + { + name: "hostconfig_nil", + container: &Container{}, + expectedOutputString: "", + expectedErrorString: "empty container hostConfig", + }, + { + name: "invalid_case", + container: getContainer("invalid"), + expectedOutputString: "", + expectedErrorString: "unable to obtain security options from container hostConfig", + }, + { + name: "empty_sec_opt", + container: getContainer("{\"NetworkMode\":\"bridge\"}"), + expectedOutputString: "", + expectedErrorString: "unable to obtain security options from container hostConfig", + }, + { + name: "missing_credentialspec", + container: getContainer("{\"SecurityOpt\": [\"invalid-sec-opt\"]}"), + expectedOutputString: "", + expectedErrorString: "unable to obtain credentialspec", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + expectedOutputStr, err := tc.container.GetCredentialSpec() + assert.Equal(t, tc.expectedOutputString, expectedOutputStr) + assert.EqualError(t, err, tc.expectedErrorString) + }) + } +} + +func TestGetCredentialSpecHappyPath(t *testing.T) { + c := getContainer("{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct.json\"]}") + + expectedCredentialSpec := "credentialspec:file://gmsa_gmsa-acct.json" + + credentialspec, err := c.GetCredentialSpec() + assert.NoError(t, err) + assert.EqualValues(t, expectedCredentialSpec, credentialspec) +} + +func getContainer(hostConfig string) *Container { + c := &Container{ + Name: "c", + } + c.DockerConfig.HostConfig = &hostConfig + return c +} diff --git a/agent/api/container/containerevent.go b/agent/api/container/containerevent.go new file mode 100644 index 00000000000..19fc7133f67 --- /dev/null +++ b/agent/api/container/containerevent.go @@ -0,0 +1,37 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +// DockerEventType represents the type of docker events +type DockerEventType int + +const ( + // ContainerStatusEvent represents the container status change events from docker + // currently create, start, stop, die, restart and oom event will have this type + ContainerStatusEvent DockerEventType = iota + // ContainerHealthEvent represents the container health status event from docker + // "health_status: unhealthy" and "health_status: healthy" will have this type + ContainerHealthEvent +) + +func (eventType DockerEventType) String() string { + switch eventType { + case ContainerStatusEvent: + return "ContainerStatusChangeEvent" + case ContainerHealthEvent: + return "ContainerHealthChangeEvent" + default: + return "UNKNOWN" + } +} diff --git a/agent/api/container/containeroverrides.go b/agent/api/container/containeroverrides.go new file mode 100644 index 00000000000..487c4e8eaa4 --- /dev/null +++ b/agent/api/container/containeroverrides.go @@ -0,0 +1,75 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" + "errors" + + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/utils" +) + +// ContainerOverrides are overrides applied to the container +type ContainerOverrides struct { + Command *[]string `json:"command"` +} + +// ContainerOverridesCopy is a type alias that doesn't have a custom unmarshaller so we +// can unmarshal ContainerOverrides data into something without recursing +type ContainerOverridesCopy ContainerOverrides + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded +// ContainerOverrides data +// This custom unmarshaller is needed because the json sent to us as a string +// rather than a fully typed object. We support both formats in the hopes that +// one day everything will be fully typed +// Note: the `json:",string"` tag DOES NOT apply here; it DOES NOT work with +// struct types, only ints/floats/etc. We're basically doing that though +// We also intentionally fail if there are any keys we were unable to unmarshal +// into our struct +func (overrides *ContainerOverrides) UnmarshalJSON(b []byte) error { + regular := ContainerOverridesCopy{} + + // Try to do it the strongly typed way first + err := json.Unmarshal(b, ®ular) + if err == nil { + err = utils.CompleteJsonUnmarshal(b, regular) + if err == nil { + *overrides = ContainerOverrides(regular) + return nil + } + err = apierrors.NewMultiError(errors.New("Error unmarshalling ContainerOverrides"), err) + } + + // Now the strongly typed way + var str string + err2 := json.Unmarshal(b, &str) + if err2 != nil { + return apierrors.NewMultiError(errors.New("Could not unmarshal ContainerOverrides into either an object or string respectively"), err, err2) + } + + // We have a string, let's try to unmarshal that into a typed object + err3 := json.Unmarshal([]byte(str), ®ular) + if err3 == nil { + err3 = utils.CompleteJsonUnmarshal([]byte(str), regular) + if err3 == nil { + *overrides = ContainerOverrides(regular) + return nil + } + err3 = apierrors.NewMultiError(errors.New("Error unmarshalling ContainerOverrides"), err3) + } + + return apierrors.NewMultiError(errors.New("Could not unmarshal ContainerOverrides in any supported way"), err, err2, err3) +} diff --git a/agent/api/container/containeroverrides_test.go b/agent/api/container/containeroverrides_test.go new file mode 100644 index 00000000000..7c1ef734cc4 --- /dev/null +++ b/agent/api/container/containeroverrides_test.go @@ -0,0 +1,96 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" + "reflect" + "testing" +) + +type testContainerOverrides struct { + SomeContainerOverrides ContainerOverrides `json:"overrides"` +} + +type testContainerOverrideInput struct { + SomeContainerOverrides string `json:"overrides"` +} + +func TestUnmarshalContainerOverrides(t *testing.T) { + overrides := &ContainerOverrides{} + + err := json.Unmarshal([]byte(`{"command": ["sh", "-c", "sleep 300"]}`), &overrides) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(overrides.Command, &[]string{"sh", "-c", "sleep 300"}) { + t.Error("Unmarshalled wrong result", overrides.Command) + } + + var overrides3 testContainerOverrides + err = json.Unmarshal([]byte(`{"overrides":{"command":["sh", "-c", "sleep 15"]}}`), &overrides3) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(overrides3.SomeContainerOverrides.Command, &[]string{"sh", "-c", "sleep 15"}) { + t.Error("unmarshalled wrong result", overrides3) + } + + overrides2 := ContainerOverrides{Command: &[]string{"sh", "-c", "sleep 1"}} + + strOverrides, err := json.Marshal(overrides2) + if err != nil { + t.Error(err) + } + input := testContainerOverrideInput{SomeContainerOverrides: string(strOverrides)} + strInput, err := json.Marshal(input) + if err != nil { + t.Error(err) + } + + var overrides4 testContainerOverrides + err = json.Unmarshal(strInput, &overrides4) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(overrides4.SomeContainerOverrides.Command, &[]string{"sh", "-c", "sleep 1"}) { + t.Error("Unmarshalled wrong result", overrides4) + } + + // Test that marshalling an unknown key fails hard + var overrides5 testContainerOverrides + err = json.Unmarshal([]byte(`{"overrides":{"command":["ash","-c","sleep 1"],"containerPlanet":"mars"}}`), &overrides5) + if err == nil { + t.Error("No error on unknown json field containerPlanet") + } + + // Test the same for the string + err = json.Unmarshal([]byte(`{"overrides":"{\"command\":[\"ash\",\"-c\",\"sleep 1\"],\"containerPlanet\":\"mars\"}"}`), &overrides5) + if err == nil { + t.Error("No error for unknown json field in string, containerPlanet") + } + + // Now error cases + err = json.Unmarshal([]byte(`{"overrides":"a string that can't be json unmarshalled }{"}`), &overrides5) + if err == nil { + t.Error("No error when unmarshalling an invalid json string") + } + + err = json.Unmarshal([]byte(`{"overrides": ["must be a string or object"] }`), &overrides5) + if err == nil { + t.Error("No error when unmarshalling a really invalid json string") + } +} diff --git a/agent/api/container/containertype.go b/agent/api/container/containertype.go new file mode 100644 index 00000000000..9045b872aa8 --- /dev/null +++ b/agent/api/container/containertype.go @@ -0,0 +1,106 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "errors" + + "github.com/cihub/seelog" +) + +const ( + // ContainerNormal represents the container type for 'Normal' containers + // These are the ones specified in the task definition via container + // definitions + ContainerNormal ContainerType = iota + // ContainerEmptyHostVolume represents the internal container type + // for the empty volumes container + ContainerEmptyHostVolume + // ContainerCNIPause represents the internal container type for the + // pause container + ContainerCNIPause + + // ContainerNamespacePause represents the internal container type for + // sharing either PID or IPC resource namespaces. Regardless if one or + // both flags are used, only 1 of these containers need to be active + ContainerNamespacePause +) + +// ContainerType represents the type of the internal container created +type ContainerType int32 + +var stringToContainerType = map[string]ContainerType{ + "NORMAL": ContainerNormal, + "EMPTY_HOST_VOLUME": ContainerEmptyHostVolume, + "CNI_PAUSE": ContainerCNIPause, + "NAMESPACE_PAUSE": ContainerNamespacePause, +} + +// String converts the container type enum to a string +func (containerType ContainerType) String() string { + for str, contType := range stringToContainerType { + if contType == containerType { + return str + } + } + + return "NORMAL" +} + +// UnmarshalJSON decodes the container type field in the JSON encoded string +// into the ContainerType object +func (containerType *ContainerType) UnmarshalJSON(b []byte) error { + strType := string(b) + + switch strType { + case "null": + *containerType = ContainerNormal + seelog.Warn("Unmarshalled nil ContainerType as Normal") + return nil + // 'true' or 'false' for compatibility with state version <= 5 + case "true": + *containerType = ContainerEmptyHostVolume + return nil + case "false": + *containerType = ContainerNormal + return nil + } + + if len(strType) < 2 { + *containerType = ContainerNormal + return errors.New("invalid length set for ContainerType: " + string(b)) + } + if b[0] != '"' || b[len(b)-1] != '"' { + *containerType = ContainerNormal + return errors.New("invalid value set for ContainerType, must be a string or null; got " + string(b)) + } + strType = string(b[1 : len(b)-1]) + + contType, ok := stringToContainerType[strType] + if !ok { + *containerType = ContainerNormal + return errors.New("unrecognized ContainerType: " + strType) + } + *containerType = contType + return nil +} + +// MarshalJSON overrides the logic for JSON-encoding a ContainerType object +func (containerType *ContainerType) MarshalJSON() ([]byte, error) { + if containerType == nil { + return []byte("null"), nil + } + + return []byte(`"` + containerType.String() + `"`), nil +} diff --git a/agent/api/container/containertype_test.go b/agent/api/container/containertype_test.go new file mode 100644 index 00000000000..12172645f3f --- /dev/null +++ b/agent/api/container/containertype_test.go @@ -0,0 +1,93 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +type containerTypeWrapper struct { + Type ContainerType `json:"IsInternal"` +} + +func TestMarshalContainerType(t *testing.T) { + testCases := []struct { + containerType containerTypeWrapper + encodedString string + }{ + {containerTypeWrapper{ContainerNormal}, `{"IsInternal":"NORMAL"}`}, + {containerTypeWrapper{ContainerEmptyHostVolume}, `{"IsInternal":"EMPTY_HOST_VOLUME"}`}, + {containerTypeWrapper{ContainerCNIPause}, `{"IsInternal":"CNI_PAUSE"}`}, + {containerTypeWrapper{ContainerNamespacePause}, `{"IsInternal":"NAMESPACE_PAUSE"}`}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s marshalled as %s", tc.containerType.Type.String(), tc.encodedString), + func(t *testing.T) { + contTypeWrapper := &tc.containerType + contTypeJSON, err := json.Marshal(contTypeWrapper) + assert.NoError(t, err) + assert.Equal(t, tc.encodedString, string(contTypeJSON)) + }) + } +} + +func TestUnmarhsalContainerType(t *testing.T) { + testCases := []struct { + containerType containerTypeWrapper + encodedString string + }{ + {containerTypeWrapper{ContainerNormal}, `{"IsInternal":"NORMAL"}`}, + {containerTypeWrapper{ContainerEmptyHostVolume}, `{"IsInternal":"EMPTY_HOST_VOLUME"}`}, + {containerTypeWrapper{ContainerCNIPause}, `{"IsInternal":"CNI_PAUSE"}`}, + {containerTypeWrapper{ContainerNamespacePause}, `{"IsInternal":"NAMESPACE_PAUSE"}`}, + {containerTypeWrapper{ContainerNormal}, `{"IsInternal":null}`}, + {containerTypeWrapper{ContainerNormal}, `{"IsInternal":false}`}, + {containerTypeWrapper{ContainerEmptyHostVolume}, `{"IsInternal":true}`}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s unmarshalled as %s", tc.encodedString, tc.containerType.Type.String()), + func(t *testing.T) { + var contTypeWrapper containerTypeWrapper + err := json.Unmarshal([]byte(tc.encodedString), &contTypeWrapper) + assert.NoError(t, err) + assert.Equal(t, tc.containerType.Type, contTypeWrapper.Type) + }) + } +} + +func TestUnmarhsalContainerTypeFailures(t *testing.T) { + testCases := []struct { + encodedString string + }{ + {`{"IsInternal":"foo"}`}, + {`{"IsInternal":""}`}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s unmarshalling", tc.encodedString), func(t *testing.T) { + var contTypeWrapper containerTypeWrapper + err := json.Unmarshal([]byte(tc.encodedString), &contTypeWrapper) + assert.Error(t, err) + assert.Equal(t, ContainerNormal, contTypeWrapper.Type) + }) + } +} diff --git a/agent/api/container/json.go b/agent/api/container/json.go new file mode 100644 index 00000000000..a80d9989753 --- /dev/null +++ b/agent/api/container/json.go @@ -0,0 +1,30 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" +) + +// In order to override the MarshalJSON method while still using Go's marshalling logic inside, an alias type +// is needed to avoid infinite recursion. +type jContainer Container + +// MarshalJSON wraps Go's marshalling logic with a necessary read lock. +func (c *Container) MarshalJSON() ([]byte, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + return json.Marshal((*jContainer)(c)) +} diff --git a/agent/api/container/port_binding.go b/agent/api/container/port_binding.go new file mode 100644 index 00000000000..c26f466af9b --- /dev/null +++ b/agent/api/container/port_binding.go @@ -0,0 +1,71 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "strconv" + + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/docker/go-connections/nat" +) + +const ( + // UnrecognizedTransportProtocolErrorName is an error where the protocol of the binding is invalid + UnrecognizedTransportProtocolErrorName = "UnrecognizedTransportProtocol" + // UnparseablePortErrorName is an error where the port configuration is invalid + UnparseablePortErrorName = "UnparsablePort" +) + +// PortBinding represents a port binding for a container +type PortBinding struct { + // ContainerPort is the port inside the container + ContainerPort uint16 + // HostPort is the port exposed on the host + HostPort uint16 + // BindIP is the IP address to which the port is bound + BindIP string `json:"BindIp"` + // Protocol is the protocol of the port + Protocol TransportProtocol +} + +// PortBindingFromDockerPortBinding constructs a PortBinding slice from a docker +// NetworkSettings.Ports map. +func PortBindingFromDockerPortBinding(dockerPortBindings nat.PortMap) ([]PortBinding, apierrors.NamedError) { + portBindings := make([]PortBinding, 0, len(dockerPortBindings)) + + for port, bindings := range dockerPortBindings { + containerPort, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, &apierrors.DefaultNamedError{Name: UnparseablePortErrorName, Err: "Error parsing docker port as int " + err.Error()} + } + protocol, err := NewTransportProtocol(port.Proto()) + if err != nil { + return nil, &apierrors.DefaultNamedError{Name: UnrecognizedTransportProtocolErrorName, Err: err.Error()} + } + + for _, binding := range bindings { + hostPort, err := strconv.Atoi(binding.HostPort) + if err != nil { + return nil, &apierrors.DefaultNamedError{Name: UnparseablePortErrorName, Err: "Error parsing port binding as int " + err.Error()} + } + portBindings = append(portBindings, PortBinding{ + ContainerPort: uint16(containerPort), + HostPort: uint16(hostPort), + BindIP: binding.HostIP, + Protocol: protocol, + }) + } + } + return portBindings, nil +} diff --git a/agent/api/container/port_binding_test.go b/agent/api/container/port_binding_test.go new file mode 100644 index 00000000000..556e5a04ecb --- /dev/null +++ b/agent/api/container/port_binding_test.go @@ -0,0 +1,128 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "reflect" + "testing" + + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/docker/go-connections/nat" +) + +func TestPortBindingFromDockerPortBinding(t *testing.T) { + pairs := []struct { + dockerPortBindings nat.PortMap + ecsPortBindings []PortBinding + }{ + { + nat.PortMap{ + nat.Port("53/udp"): []nat.PortBinding{ + {HostIP: "1.2.3.4", HostPort: "55"}, + }, + }, + []PortBinding{ + { + BindIP: "1.2.3.4", + HostPort: 55, + ContainerPort: 53, + Protocol: TransportProtocolUDP, + }, + }, + }, + { + nat.PortMap{ + nat.Port("80/tcp"): []nat.PortBinding{ + {HostIP: "2.3.4.5", HostPort: "8080"}, + {HostIP: "5.6.7.8", HostPort: "80"}, + }, + }, + []PortBinding{ + { + BindIP: "2.3.4.5", + HostPort: 8080, + ContainerPort: 80, + Protocol: TransportProtocolTCP, + }, + { + BindIP: "5.6.7.8", + HostPort: 80, + ContainerPort: 80, + Protocol: TransportProtocolTCP, + }, + }, + }, + } + + for i, pair := range pairs { + converted, err := PortBindingFromDockerPortBinding(pair.dockerPortBindings) + if err != nil { + t.Errorf("Error converting port binding pair #%v: %v", i, err) + } + if !reflect.DeepEqual(pair.ecsPortBindings, converted) { + t.Errorf("Converted bindings didn't match expected for #%v: expected %+v, actual %+v", i, pair.ecsPortBindings, converted) + } + } +} + +func TestPortBindingErrors(t *testing.T) { + badInputs := []struct { + dockerPortBindings nat.PortMap + errorName string + }{ + { + nat.PortMap{ + nat.Port("woof/tcp"): []nat.PortBinding{ + {HostIP: "2.3.4.5", HostPort: "8080"}, + {HostIP: "5.6.7.8", HostPort: "80"}, + }, + }, + UnparseablePortErrorName, + }, + { + nat.PortMap{ + nat.Port("80/tcp"): []nat.PortBinding{ + {HostIP: "2.3.4.5", HostPort: "8080"}, + {HostIP: "5.6.7.8", HostPort: "bark"}, + }, + }, + UnparseablePortErrorName, + }, + { + nat.PortMap{ + nat.Port("80/bark"): []nat.PortBinding{ + {HostIP: "2.3.4.5", HostPort: "8080"}, + {HostIP: "5.6.7.8", HostPort: "80"}, + }, + }, + UnrecognizedTransportProtocolErrorName, + }, + } + + for i, pair := range badInputs { + _, err := PortBindingFromDockerPortBinding(pair.dockerPortBindings) + if err == nil { + t.Errorf("Expected error converting port binding pair #%v", i) + } + namedErr, ok := err.(apierrors.NamedError) + if !ok { + t.Errorf("Expected err to implement NamedError") + } + if namedErr.ErrorName() != pair.errorName { + t.Errorf("Expected %s but was %s", pair.errorName, namedErr.ErrorName()) + } + } +} diff --git a/agent/api/container/registryauth.go b/agent/api/container/registryauth.go new file mode 100644 index 00000000000..25e54e0d8e3 --- /dev/null +++ b/agent/api/container/registryauth.go @@ -0,0 +1,105 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "sync" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + + "github.com/docker/docker/api/types" +) + +// RegistryAuthenticationData is the authentication data sent by the ECS backend. Currently, the only supported +// authentication data is for ECR. +type RegistryAuthenticationData struct { + Type string `json:"type"` + ECRAuthData *ECRAuthData `json:"ecrAuthData"` + ASMAuthData *ASMAuthData `json:"asmAuthData"` +} + +// ECRAuthData is the authentication details for ECR specifying the region, registryID, and possible endpoint override +type ECRAuthData struct { + EndpointOverride string `json:"endpointOverride"` + Region string `json:"region"` + RegistryID string `json:"registryId"` + UseExecutionRole bool `json:"useExecutionRole"` + pullCredentials credentials.IAMRoleCredentials + dockerAuthConfig types.AuthConfig + lock sync.RWMutex +} + +// ASMAuthData is the authentication data required for Docker private registry auth +type ASMAuthData struct { + // CredentialsParameter is set by ACS and specifies the name of the + // parameter to retrieve from ASM + CredentialsParameter string `json:"credentialsParameter"` + // Region is set by ACS and specifies the region to fetch the + // secret from + Region string `json:"region"` + // dockerAuthConfig gets populated during the ASM resource creation + // by the task engine + dockerAuthConfig types.AuthConfig + lock sync.RWMutex +} + +// GetPullCredentials returns the pull credentials in the auth +func (auth *ECRAuthData) GetPullCredentials() credentials.IAMRoleCredentials { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.pullCredentials +} + +// SetPullCredentials sets the credentials to pull from ECR in the auth +func (auth *ECRAuthData) SetPullCredentials(creds credentials.IAMRoleCredentials) { + auth.lock.Lock() + defer auth.lock.Unlock() + + auth.pullCredentials = creds +} + +// GetDockerAuthConfig returns the pull credentials in the auth +func (auth *ECRAuthData) GetDockerAuthConfig() types.AuthConfig { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.dockerAuthConfig +} + +// SetDockerAuthConfig sets the credentials to pull from ECR in the +// ecr auth data +func (auth *ECRAuthData) SetDockerAuthConfig(dac types.AuthConfig) { + auth.lock.Lock() + defer auth.lock.Unlock() + + auth.dockerAuthConfig = dac +} + +// GetDockerAuthConfig returns the pull credentials in the auth +func (auth *ASMAuthData) GetDockerAuthConfig() types.AuthConfig { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.dockerAuthConfig +} + +// SetDockerAuthConfig sets the credentials to pull from ECR in the +// auth +func (auth *ASMAuthData) SetDockerAuthConfig(dac types.AuthConfig) { + auth.lock.Lock() + defer auth.lock.Unlock() + + auth.dockerAuthConfig = dac +} diff --git a/agent/api/container/status/containerstatus.go b/agent/api/container/status/containerstatus.go new file mode 100644 index 00000000000..aa4ddea7c5b --- /dev/null +++ b/agent/api/container/status/containerstatus.go @@ -0,0 +1,194 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package status + +import ( + "errors" + "strings" +) + +const ( + // ContainerStatusNone is the zero state of a container; this container has not completed pull + ContainerStatusNone ContainerStatus = iota + // ContainerPulled represents a container which has had the image pulled + ContainerPulled + // ContainerCreated represents a container that has been created + ContainerCreated + // ContainerRunning represents a container that has started + ContainerRunning + // ContainerResourcesProvisioned represents a container that has completed provisioning all of its + // resources. Non-internal containers (containers present in the task definition) transition to + // this state without doing any additional work. However, containers that are added to a task + // by the ECS Agent would possibly need to perform additional actions before they can be + // considered "ready" and contribute to the progress of a task. For example, the "pause" container + // would be provisioned by invoking CNI plugins + ContainerResourcesProvisioned + // ContainerStopped represents a container that has stopped + ContainerStopped + // ContainerZombie is an "impossible" state that is used as the maximum + ContainerZombie +) + +const ( + // ContainerHealthUnknown is the initial status of container health + ContainerHealthUnknown ContainerHealthStatus = iota + // ContainerHealthy represents the status of container health check when returned healthy + ContainerHealthy + // ContainerUnhealthy represents the status of container health check when returned unhealthy + ContainerUnhealthy +) + +// ContainerStatus is an enumeration of valid states in the container lifecycle +type ContainerStatus int32 + +// ContainerHealthStatus is an enumeration of container health check status +type ContainerHealthStatus int32 + +var containerStatusMap = map[string]ContainerStatus{ + "NONE": ContainerStatusNone, + "PULLED": ContainerPulled, + "CREATED": ContainerCreated, + "RUNNING": ContainerRunning, + "RESOURCES_PROVISIONED": ContainerResourcesProvisioned, + "STOPPED": ContainerStopped, +} + +// BackendStatus returns the container health status recognized by backend +func (healthStatus ContainerHealthStatus) BackendStatus() string { + switch healthStatus { + case ContainerHealthy: + return "HEALTHY" + case ContainerUnhealthy: + return "UNHEALTHY" + default: + return "UNKNOWN" + } +} + +// String returns the readable description of the container health status +func (healthStatus ContainerHealthStatus) String() string { + return healthStatus.BackendStatus() +} + +// String returns a human readable string representation of this object +func (cs ContainerStatus) String() string { + for k, v := range containerStatusMap { + if v == cs { + return k + } + } + return "NONE" +} + +// ShouldReportToBackend returns true if the container status is recognized as a +// valid state by ECS. Note that not all container statuses are recognized by ECS +// or map to ECS states +func (cs *ContainerStatus) ShouldReportToBackend(steadyStateStatus ContainerStatus) bool { + return *cs == steadyStateStatus || *cs == ContainerStopped +} + +// BackendStatus maps the internal container status in the agent to that in the +// backend +func (cs *ContainerStatus) BackendStatus(steadyStateStatus ContainerStatus) ContainerStatus { + if *cs == steadyStateStatus { + return ContainerRunning + } + + if *cs == ContainerStopped { + return ContainerStopped + } + + return ContainerStatusNone +} + +// Terminal returns true if the container status is STOPPED +func (cs ContainerStatus) Terminal() bool { + return cs == ContainerStopped +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ContainerStatus data +func (cs *ContainerStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *cs = ContainerStatusNone + return nil + } + if b[0] != '"' || b[len(b)-1] != '"' { + *cs = ContainerStatusNone + return errors.New("container status unmarshal: status must be a string or null; Got " + string(b)) + } + strStatus := string(b[1 : len(b)-1]) + // 'UNKNOWN' and 'DEAD' for Compatibility with v1.0.0 state files + if strStatus == "UNKNOWN" { + *cs = ContainerStatusNone + return nil + } + if strStatus == "DEAD" { + *cs = ContainerStopped + return nil + } + + stat, ok := containerStatusMap[strStatus] + if !ok { + *cs = ContainerStatusNone + return errors.New("container status unmarshal: unrecognized status") + } + *cs = stat + return nil +} + +// MarshalJSON overrides the logic for JSON-encoding the ContainerStatus type +func (cs *ContainerStatus) MarshalJSON() ([]byte, error) { + if cs == nil { + return nil, nil + } + return []byte(`"` + cs.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded container health data +func (healthStatus *ContainerHealthStatus) UnmarshalJSON(b []byte) error { + *healthStatus = ContainerHealthUnknown + + if strings.ToLower(string(b)) == "null" { + return nil + } + if b[0] != '"' || b[len(b)-1] != '"' { + return errors.New("container health status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := string(b[1 : len(b)-1]) + switch strStatus { + case "UNKNOWN": + // The health status is already set to ContainerHealthUnknown initially + case "HEALTHY": + *healthStatus = ContainerHealthy + case "UNHEALTHY": + *healthStatus = ContainerUnhealthy + default: + return errors.New("container health status unmarshal: unrecognized status: " + string(b)) + } + return nil +} + +// MarshalJSON overrides the logic for JSON-encoding the ContainerHealthStatus type +func (healthStatus *ContainerHealthStatus) MarshalJSON() ([]byte, error) { + if healthStatus == nil { + return nil, nil + } + return []byte(`"` + healthStatus.String() + `"`), nil +} + +// IsRunning returns true if the container status is either RUNNING or RESOURCES_PROVISIONED +func (cs ContainerStatus) IsRunning() bool { + return cs == ContainerRunning || cs == ContainerResourcesProvisioned +} diff --git a/agent/api/container/status/containerstatus_test.go b/agent/api/container/status/containerstatus_test.go new file mode 100644 index 00000000000..1ee87b3ba14 --- /dev/null +++ b/agent/api/container/status/containerstatus_test.go @@ -0,0 +1,172 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package status + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestShouldReportToBackend(t *testing.T) { + // ContainerStatusNone is not reported to backend + var containerStatus ContainerStatus + assert.False(t, containerStatus.ShouldReportToBackend(ContainerRunning)) + assert.False(t, containerStatus.ShouldReportToBackend(ContainerResourcesProvisioned)) + + // ContainerPulled is not reported to backend + containerStatus = ContainerPulled + assert.False(t, containerStatus.ShouldReportToBackend(ContainerRunning)) + assert.False(t, containerStatus.ShouldReportToBackend(ContainerResourcesProvisioned)) + + // ContainerCreated is not reported to backend + containerStatus = ContainerCreated + assert.False(t, containerStatus.ShouldReportToBackend(ContainerRunning)) + assert.False(t, containerStatus.ShouldReportToBackend(ContainerResourcesProvisioned)) + + containerStatus = ContainerRunning + // ContainerRunning is reported to backend if the steady state is RUNNING as well + assert.True(t, containerStatus.ShouldReportToBackend(ContainerRunning)) + // ContainerRunning is not reported to backend if the steady state is RESOURCES_PROVISIONED + assert.False(t, containerStatus.ShouldReportToBackend(ContainerResourcesProvisioned)) + + containerStatus = ContainerResourcesProvisioned + // ContainerResourcesProvisioned is reported to backend if the steady state + // is RESOURCES_PROVISIONED + assert.True(t, containerStatus.ShouldReportToBackend(ContainerResourcesProvisioned)) + + // ContainerStopped is not reported to backend + containerStatus = ContainerStopped + assert.True(t, containerStatus.ShouldReportToBackend(ContainerRunning)) + assert.True(t, containerStatus.ShouldReportToBackend(ContainerResourcesProvisioned)) + +} + +func TestBackendStatus(t *testing.T) { + // BackendStatus is ContainerStatusNone when container status is ContainerStatusNone + var containerStatus ContainerStatus + assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStatusNone) + assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) + + // BackendStatus is still ContainerStatusNone when container status is ContainerPulled + containerStatus = ContainerPulled + assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStatusNone) + assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) + + // BackendStatus is still ContainerStatusNone when container status is ContainerCreated + containerStatus = ContainerCreated + assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStatusNone) + assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) + + containerStatus = ContainerRunning + // BackendStatus is ContainerRunning when container status is ContainerRunning + // and steady state is ContainerRunning + assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerRunning) + // BackendStatus is still ContainerStatusNone when container status is ContainerRunning + // and steady state is ContainerResourcesProvisioned + assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) + + containerStatus = ContainerResourcesProvisioned + // BackendStatus is still ContainerRunning when container status is ContainerResourcesProvisioned + // and steady state is ContainerResourcesProvisioned + assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerRunning) + + // BackendStatus is ContainerStopped when container status is ContainerStopped + containerStatus = ContainerStopped + assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStopped) + assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStopped) +} + +type testContainerStatus struct { + SomeStatus ContainerStatus `json:"status"` +} + +func TestUnmarshalContainerStatus(t *testing.T) { + status := ContainerStatusNone + + err := json.Unmarshal([]byte(`"RUNNING"`), &status) + if err != nil { + t.Error(err) + } + if status != ContainerRunning { + t.Error("RUNNING should unmarshal to RUNNING, not " + status.String()) + } + + var test testContainerStatus + err = json.Unmarshal([]byte(`{"status":"STOPPED"}`), &test) + if err != nil { + t.Error(err) + } + if test.SomeStatus != ContainerStopped { + t.Error("STOPPED should unmarshal to STOPPED, not " + test.SomeStatus.String()) + } +} + +func TestHealthBackendStatus(t *testing.T) { + testCases := []struct { + status ContainerHealthStatus + result string + }{ + { + status: ContainerHealthy, + result: "HEALTHY", + }, + { + status: ContainerUnhealthy, + result: "UNHEALTHY", + }, { + status: ContainerHealthUnknown, + result: "UNKNOWN", + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Status: %s BackendStatus: %s", tc.status, tc.result), func(t *testing.T) { + assert.Equal(t, tc.status.BackendStatus(), tc.result) + }) + } +} + +func TestUnmarshalContainerHealthStatus(t *testing.T) { + testCases := []struct { + Status ContainerHealthStatus + String string + }{ + { + Status: ContainerHealthUnknown, + String: `"UNKNOWN"`, + }, + { + Status: ContainerHealthy, + String: `"HEALTHY"`, + }, + { + Status: ContainerUnhealthy, + String: `"UNHEALTHY"`, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Status: %s, String value: %s", tc.Status, tc.String), func(t *testing.T) { + var status ContainerHealthStatus + err := json.Unmarshal([]byte(tc.String), &status) + assert.NoError(t, err) + assert.Equal(t, status, tc.Status) + }) + } +} diff --git a/agent/api/container/status/managedagentstatus.go b/agent/api/container/status/managedagentstatus.go new file mode 100644 index 00000000000..ce36d7ff0e6 --- /dev/null +++ b/agent/api/container/status/managedagentstatus.go @@ -0,0 +1,109 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package status + +import ( + "errors" + "strings" +) + +const ( + // ManagedAgentStatusNone is the zero state of a managed agent + ManagedAgentStatusNone ManagedAgentStatus = iota + // ManagedAgentCreated represents a managed agent which has dependencies in place + ManagedAgentCreated + // ManagedAgentRunning represents a managed agent that has started sucessfully + ManagedAgentRunning + // ManagedAgentStopped represents a managed agent that has stopped + ManagedAgentStopped +) + +// ManagedAgentStatus is an enumeration of valid states in the managed agent lifecycle +type ManagedAgentStatus int32 + +var managedAgentStatusMap = map[string]ManagedAgentStatus{ + "NONE": ManagedAgentStatusNone, + "CREATED": ManagedAgentCreated, + "RUNNING": ManagedAgentRunning, + "STOPPED": ManagedAgentStopped, +} + +// String returns a human readable string representation of this object +func (mas ManagedAgentStatus) String() string { + for k, v := range managedAgentStatusMap { + if v == mas { + return k + } + } + return "NONE" +} + +// Terminal returns true if the managed agent status is STOPPED +func (mas ManagedAgentStatus) Terminal() bool { + return mas == ManagedAgentStopped +} + +// IsRunning returns true if the managed agent status is either RUNNING +func (mas ManagedAgentStatus) IsRunning() bool { + return mas == ManagedAgentRunning +} + +// BackendStatus maps the internal managedAgent status in the agent to that in the backend +// This exists to force the status to be one of PENDING|RUNNING}|STOPPED +func (mas ManagedAgentStatus) BackendStatus() string { + switch mas { + case ManagedAgentRunning: + return mas.String() + case ManagedAgentStopped: + return mas.String() + } + return "PENDING" +} + +// ShouldReportToBackend returns true if the managedAgent status is recognized as a +// valid state by ECS. Note that not all managedAgent statuses are recognized by ECS +// or map to ECS states +// For now we expect PENDING/RUNNING/STOPPED as valid states +// we should only skip events that have ManagedAgentStatusNone +func (mas ManagedAgentStatus) ShouldReportToBackend() bool { + return mas == ManagedAgentCreated || mas == ManagedAgentRunning || mas == ManagedAgentStopped +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ManagedAgentStatus data +func (mas *ManagedAgentStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *mas = ManagedAgentStatusNone + return nil + } + if b[0] != '"' || b[len(b)-1] != '"' { + *mas = ManagedAgentStatusNone + return errors.New("managed agent status unmarshal: status must be a string or null; Got " + string(b)) + } + + stat, ok := managedAgentStatusMap[string(b[1:len(b)-1])] + if !ok { + *mas = ManagedAgentStatusNone + return errors.New("managed agent status unmarshal: unrecognized status") + } + *mas = stat + return nil +} + +// MarshalJSON overrides the logic for JSON-encoding the ManagedAgentStatus type +func (mas *ManagedAgentStatus) MarshalJSON() ([]byte, error) { + if mas == nil { + return nil, nil + } + return []byte(`"` + mas.String() + `"`), nil +} diff --git a/agent/api/container/status/managedagentstatus_test.go b/agent/api/container/status/managedagentstatus_test.go new file mode 100644 index 00000000000..1b1490de963 --- /dev/null +++ b/agent/api/container/status/managedagentstatus_test.go @@ -0,0 +1,95 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package status + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTerminal(t *testing.T) { + terminalStatus := ManagedAgentStopped + runningStatus := ManagedAgentRunning + assert.True(t, terminalStatus.Terminal()) + assert.False(t, runningStatus.Terminal()) +} + +func TestIsRunning(t *testing.T) { + terminalStatus := ManagedAgentStopped + runningStatus := ManagedAgentRunning + assert.True(t, runningStatus.IsRunning()) + assert.False(t, terminalStatus.IsRunning()) +} + +func TestShouldReportManagedAgentStatusToBackend(t *testing.T) { + // We should not report ManagedAgentStatusNone + var managedAgentStatus ManagedAgentStatus + assert.Equal(t, false, managedAgentStatus.ShouldReportToBackend()) + // We should report ManagedAgentCreated (ie PENDING) + managedAgentStatus = ManagedAgentCreated + assert.Equal(t, true, managedAgentStatus.ShouldReportToBackend()) + // We should report ManagedAgentRunning (ie RUNNING) + managedAgentStatus = ManagedAgentRunning + assert.Equal(t, true, managedAgentStatus.ShouldReportToBackend()) + // We should report ManagedAgentStooped (ie STOPPED) + managedAgentStatus = ManagedAgentStopped + assert.Equal(t, true, managedAgentStatus.ShouldReportToBackend()) +} + +func TestManagedAgentBackendStatus(t *testing.T) { + // BackendStatus is "PENDING" when managedAgent status is ManagedAgentStatusNone + var managedAgentStatus ManagedAgentStatus + assert.Equal(t, "PENDING", managedAgentStatus.BackendStatus()) + + managedAgentStatus = ManagedAgentCreated + // BackendStatus is "PENDING" when managedAgent status is ManagedAgentCreated + assert.Equal(t, "PENDING", managedAgentStatus.BackendStatus()) + + managedAgentStatus = ManagedAgentRunning + // BackendStatus is "RUNNING" when managedAgent status is ManagedAgentRunning + assert.Equal(t, "RUNNING", managedAgentStatus.BackendStatus()) + + // BackendStatus is "STOPPED" when managedAgent status is ManagedAgentStopped + managedAgentStatus = ManagedAgentStopped + assert.Equal(t, "STOPPED", managedAgentStatus.BackendStatus()) +} + +type testManagedAgentStatus struct { + SomeStatus ManagedAgentStatus `json:"status"` +} + +func TestUnmarshalManagedAgentStatus(t *testing.T) { + status := ManagedAgentStatusNone + + err := json.Unmarshal([]byte(`"RUNNING"`), &status) + if err != nil { + t.Error(err) + } + if status != ManagedAgentRunning { + t.Error("RUNNING should unmarshal to RUNNING, not " + status.String()) + } + + var test testManagedAgentStatus + err = json.Unmarshal([]byte(`{"status":"STOPPED"}`), &test) + if err != nil { + t.Error(err) + } + if test.SomeStatus != ManagedAgentStopped { + t.Error("STOPPED should unmarshal to STOPPED, not " + test.SomeStatus.String()) + } +} diff --git a/agent/api/container/transitiondependency.go b/agent/api/container/transitiondependency.go new file mode 100644 index 00000000000..42c9e4b82d6 --- /dev/null +++ b/agent/api/container/transitiondependency.go @@ -0,0 +1,96 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// TransitionDependencySet contains dependencies that impact transitions of +// containers. +type TransitionDependencySet struct { + // ContainerDependencies is the set of containers on which a transition is + // dependent. + ContainerDependencies []ContainerDependency `json:"ContainerDependencies"` + // ResourceDependencies is the set of resources on which a transition is + // dependent. + ResourceDependencies []ResourceDependency `json:"ResourceDependencies"` +} + +// ContainerDependency defines the relationship between a dependent container +// and its dependency. +type ContainerDependency struct { + // ContainerName defines the container on which a transition depends + ContainerName string `json:"ContainerName"` + // SatisfiedStatus defines the status that satisfies the dependency + SatisfiedStatus apicontainerstatus.ContainerStatus `json:"SatisfiedStatus"` + // DependentStatus defines the status that cannot be reached until the + // resource satisfies the dependency + DependentStatus apicontainerstatus.ContainerStatus `json:"DependentStatus,omitempty"` +} + +// ResourceDependency defines the relationship between a dependent container +// and its resource dependency. +type ResourceDependency struct { + // Name defines the Resource on which a transition depends + Name string `json:"Name"` + // RequiredStatus defines the status that satisfies the dependency + RequiredStatus resourcestatus.ResourceStatus `json:"RequiredStatus"` +} + +// GetRequiredStatus returns the required status for the dependency +func (rd *ResourceDependency) GetRequiredStatus() resourcestatus.ResourceStatus { + return rd.RequiredStatus +} + +// TransitionDependenciesMap is a map of the dependent container status to other +// dependencies that must be satisfied. +type TransitionDependenciesMap map[apicontainerstatus.ContainerStatus]TransitionDependencySet + +// UnmarshalJSON decodes the TransitionDependencySet tag in the JSON encoded string +// into the TransitionDependenciesMap object +func (td *TransitionDependenciesMap) UnmarshalJSON(b []byte) error { + depMap := make(map[apicontainerstatus.ContainerStatus]TransitionDependencySet) + err := json.Unmarshal(b, &depMap) + if err == nil { + *td = depMap + return nil + } + seelog.Debugf("Unmarshal 'TransitionDependencySet': %s, not a map: %v", string(b), err) + // Unmarshal to deprecated 'TransitionDependencySet' and then convert to a map + tdSet := TransitionDependencySet{} + if err := json.Unmarshal(b, &tdSet); err != nil { + return errors.Wrapf(err, + "Unmarshal 'TransitionDependencySet': does not comply with any of the dependency types") + } + for _, dep := range tdSet.ContainerDependencies { + dependentStatus := dep.DependentStatus + // no need for DependentStatus field anymore, since it becomes the map's key + dep.DependentStatus = 0 + if _, ok := depMap[dependentStatus]; !ok { + depMap[dependentStatus] = TransitionDependencySet{} + } + deps := depMap[dependentStatus] + deps.ContainerDependencies = append(deps.ContainerDependencies, dep) + depMap[dependentStatus] = deps + } + *td = depMap + return nil +} diff --git a/agent/api/container/transitiondependency_test.go b/agent/api/container/transitiondependency_test.go new file mode 100644 index 00000000000..15c5b0ff33f --- /dev/null +++ b/agent/api/container/transitiondependency_test.go @@ -0,0 +1,60 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" + "testing" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/stretchr/testify/assert" +) + +func TestUnmarshalOldTransitionDependencySet(t *testing.T) { + bytes := []byte(`{ + "ContainerDependencies": [ + { + "ContainerName": "container", + "SatisfiedStatus": "RUNNING", + "DependentStatus": "RUNNING" + } + ] + }`) + unmarshalledTdMap := TransitionDependenciesMap{} + err := json.Unmarshal(bytes, &unmarshalledTdMap) + assert.NoError(t, err) + assert.Len(t, unmarshalledTdMap, 1) + assert.NotNil(t, unmarshalledTdMap[apicontainerstatus.ContainerRunning]) + dep := unmarshalledTdMap[apicontainerstatus.ContainerRunning].ContainerDependencies + assert.Len(t, dep, 1) + assert.Equal(t, "container", dep[0].ContainerName) + assert.Equal(t, apicontainerstatus.ContainerRunning, dep[0].SatisfiedStatus) + assert.Equal(t, apicontainerstatus.ContainerStatusNone, dep[0].DependentStatus) +} + +func TestUnmarshalNewTransitionDependencySet(t *testing.T) { + bytes := []byte(`{"1":{"ContainerDependencies":[{"ContainerName":"container","SatisfiedStatus":"RUNNING"}]}}`) + unmarshalledTdMap := TransitionDependenciesMap{} + err := json.Unmarshal(bytes, &unmarshalledTdMap) + assert.NoError(t, err) + assert.Len(t, unmarshalledTdMap, 1) + assert.NotNil(t, unmarshalledTdMap[apicontainerstatus.ContainerPulled]) + dep := unmarshalledTdMap[apicontainerstatus.ContainerPulled].ContainerDependencies + assert.Len(t, dep, 1) + assert.Equal(t, "container", dep[0].ContainerName) + assert.Equal(t, apicontainerstatus.ContainerRunning, dep[0].SatisfiedStatus) + assert.Equal(t, apicontainerstatus.ContainerStatusNone, dep[0].DependentStatus) +} diff --git a/agent/api/container/transport.go b/agent/api/container/transport.go new file mode 100644 index 00000000000..f17d0010c1b --- /dev/null +++ b/agent/api/container/transport.go @@ -0,0 +1,91 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "errors" + "strings" + + "github.com/cihub/seelog" +) + +const ( + // TransportProtocolTCP represents TCP + TransportProtocolTCP TransportProtocol = iota + // TransportProtocolUDP represents UDP + TransportProtocolUDP + + tcp = "tcp" + udp = "udp" +) + +// TransportProtocol is an enumeration of valid transport protocols +type TransportProtocol int32 + +// NewTransportProtocol returns a TransportProtocol from a string in the task +func NewTransportProtocol(protocol string) (TransportProtocol, error) { + switch protocol { + case tcp: + return TransportProtocolTCP, nil + case udp: + return TransportProtocolUDP, nil + default: + return TransportProtocolTCP, errors.New(protocol + " is not a recognized transport protocol") + } +} + +// String converts TransportProtocol to a string +func (tp *TransportProtocol) String() string { + if tp == nil { + return tcp + } + switch *tp { + case TransportProtocolUDP: + return udp + case TransportProtocolTCP: + return tcp + default: + seelog.Critical("Unknown TransportProtocol type!") + return tcp + } +} + +// UnmarshalJSON for TransportProtocol determines whether to use TCP or UDP, +// setting TCP as the zero-value but treating other unrecognized values as +// errors +func (tp *TransportProtocol) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *tp = TransportProtocolTCP + seelog.Warn("Unmarshalled nil TransportProtocol as TCP") + return nil + } + switch string(b) { + case `"tcp"`: + *tp = TransportProtocolTCP + case `"udp"`: + *tp = TransportProtocolUDP + default: + *tp = TransportProtocolTCP + return errors.New("TransportProtocol must be \"tcp\" or \"udp\"; Got " + string(b)) + } + return nil +} + +// MarshalJSON overrides the logic for JSON-encoding the TransportProtocol type +func (tp *TransportProtocol) MarshalJSON() ([]byte, error) { + if tp == nil { + return []byte("null"), nil + } + return []byte(`"` + tp.String() + `"`), nil +} diff --git a/agent/api/container/transport_test.go b/agent/api/container/transport_test.go new file mode 100644 index 00000000000..1cb3d1ba8c2 --- /dev/null +++ b/agent/api/container/transport_test.go @@ -0,0 +1,141 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package container + +import ( + "encoding/json" + "testing" +) + +func TestUnmarshalTransportProtocol_Null(t *testing.T) { + tp := TransportProtocolTCP + + err := json.Unmarshal([]byte("null"), &tp) + if err != nil { + t.Error(err) + } + if tp != TransportProtocolTCP { + t.Error("null TransportProtocol should be TransportProtocolTCP") + } +} + +func TestUnmarshalTransportProtocol_TCP(t *testing.T) { + tp := TransportProtocolTCP + + err := json.Unmarshal([]byte(`"tcp"`), &tp) + if err != nil { + t.Error(err) + } + if tp != TransportProtocolTCP { + t.Error("tcp TransportProtocol should be TransportProtocolTCP") + } +} + +func TestUnmarshalTransportProtocol_UDP(t *testing.T) { + tp := TransportProtocolTCP + + err := json.Unmarshal([]byte(`"udp"`), &tp) + if err != nil { + t.Error(err) + } + if tp != TransportProtocolUDP { + t.Error("udp TransportProtocol should be TransportProtocolUDP") + } +} + +func TestUnmarshalTransportProtocol_NullStruct(t *testing.T) { + tp := struct { + Field1 TransportProtocol + }{} + + err := json.Unmarshal([]byte(`{"Field1":null}`), &tp) + if err != nil { + t.Error(err) + } + if tp.Field1 != TransportProtocolTCP { + t.Error("null TransportProtocol should be TransportProtocolTCP") + } +} + +func TestMarshalTransportProtocol_Unset(t *testing.T) { + data := struct { + Field TransportProtocol + }{} + + json, err := json.Marshal(&data) + if err != nil { + t.Error(err) + } + if string(json) != `{"Field":"tcp"}` { + t.Error(string(json)) + } +} + +func TestMarshalTransportProtocol_TCP(t *testing.T) { + data := struct { + Field TransportProtocol + }{TransportProtocolTCP} + + json, err := json.Marshal(&data) + if err != nil { + t.Error(err) + } + if string(json) != `{"Field":"tcp"}` { + t.Error(string(json)) + } +} + +func TestMarshalTransportProtocol_UDP(t *testing.T) { + data := struct { + Field TransportProtocol + }{TransportProtocolUDP} + + json, err := json.Marshal(&data) + if err != nil { + t.Error(err) + } + if string(json) != `{"Field":"udp"}` { + t.Error(string(json)) + } +} + +func TestMarshalUnmarshalTransportProtocol(t *testing.T) { + data := struct { + Field1 TransportProtocol + Field2 TransportProtocol + }{TransportProtocolTCP, TransportProtocolUDP} + + jsonBytes, err := json.Marshal(&data) + if err != nil { + t.Error(err) + } + if string(jsonBytes) != `{"Field1":"tcp","Field2":"udp"}` { + t.Error(string(jsonBytes)) + } + + unmarshalTo := struct { + Field1 TransportProtocol + Field2 TransportProtocol + }{} + + err = json.Unmarshal(jsonBytes, &unmarshalTo) + if err != nil { + t.Error(err) + } + if unmarshalTo.Field1 != TransportProtocolTCP || unmarshalTo.Field2 != TransportProtocolUDP { + t.Errorf("Expected tcp for Field1 but was %x, expected udp for Field2 but was %x", unmarshalTo.Field1, unmarshalTo.Field2) + } +} diff --git a/agent/api/ecsclient/client.go b/agent/api/ecsclient/client.go new file mode 100644 index 00000000000..203ce0f6f6c --- /dev/null +++ b/agent/api/ecsclient/client.go @@ -0,0 +1,607 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecsclient + +import ( + "errors" + "fmt" + "runtime" + "strings" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/async" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/httpclient" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/cihub/seelog" + "github.com/docker/docker/pkg/system" +) + +const ( + ecsMaxImageDigestLength = 255 + ecsMaxReasonLength = 255 + ecsMaxRuntimeIDLength = 255 + pollEndpointCacheSize = 1 + pollEndpointCacheTTL = 20 * time.Minute + roundtripTimeout = 5 * time.Second + azAttrName = "ecs.availability-zone" +) + +// APIECSClient implements ECSClient +type APIECSClient struct { + credentialProvider *credentials.Credentials + config *config.Config + standardClient api.ECSSDK + submitStateChangeClient api.ECSSubmitStateSDK + ec2metadata ec2.EC2MetadataClient + pollEndpoinCache async.Cache +} + +// NewECSClient creates a new ECSClient interface object +func NewECSClient( + credentialProvider *credentials.Credentials, + config *config.Config, + ec2MetadataClient ec2.EC2MetadataClient) api.ECSClient { + + var ecsConfig aws.Config + ecsConfig.Credentials = credentialProvider + ecsConfig.Region = &config.AWSRegion + ecsConfig.HTTPClient = httpclient.New(roundtripTimeout, config.AcceptInsecureCert) + if config.APIEndpoint != "" { + ecsConfig.Endpoint = &config.APIEndpoint + } + standardClient := ecs.New(session.New(&ecsConfig)) + submitStateChangeClient := newSubmitStateChangeClient(&ecsConfig) + pollEndpoinCache := async.NewLRUCache(pollEndpointCacheSize, pollEndpointCacheTTL) + return &APIECSClient{ + credentialProvider: credentialProvider, + config: config, + standardClient: standardClient, + submitStateChangeClient: submitStateChangeClient, + ec2metadata: ec2MetadataClient, + pollEndpoinCache: pollEndpoinCache, + } +} + +// SetSDK overrides the SDK to the given one. This is useful for injecting a +// test implementation +func (client *APIECSClient) SetSDK(sdk api.ECSSDK) { + client.standardClient = sdk +} + +// SetSubmitStateChangeSDK overrides the SDK to the given one. This is useful +// for injecting a test implementation +func (client *APIECSClient) SetSubmitStateChangeSDK(sdk api.ECSSubmitStateSDK) { + client.submitStateChangeClient = sdk +} + +// CreateCluster creates a cluster from a given name and returns its arn +func (client *APIECSClient) CreateCluster(clusterName string) (string, error) { + resp, err := client.standardClient.CreateCluster(&ecs.CreateClusterInput{ClusterName: &clusterName}) + if err != nil { + seelog.Criticalf("Could not create cluster: %v", err) + return "", err + } + seelog.Infof("Created a cluster named: %s", clusterName) + return *resp.Cluster.ClusterName, nil +} + +// RegisterContainerInstance calculates the appropriate resources, creates +// the default cluster if necessary, and returns the registered +// ContainerInstanceARN if successful. Supplying a non-empty container +// instance ARN allows a container instance to update its registered +// resources. +func (client *APIECSClient) RegisterContainerInstance(containerInstanceArn string, attributes []*ecs.Attribute, + tags []*ecs.Tag, registrationToken string, platformDevices []*ecs.PlatformDevice, + outpostARN string) (string, string, error) { + + clusterRef := client.config.Cluster + // If our clusterRef is empty, we should try to create the default + if clusterRef == "" { + clusterRef = config.DefaultClusterName + defer func() { + // Update the config value to reflect the cluster we end up in + client.config.Cluster = clusterRef + }() + // Attempt to register without checking existence of the cluster so we don't require + // excess permissions in the case where the cluster already exists and is active + containerInstanceArn, availabilityzone, err := client.registerContainerInstance(clusterRef, + containerInstanceArn, attributes, tags, registrationToken, platformDevices, outpostARN) + if err == nil { + return containerInstanceArn, availabilityzone, nil + } + + // If trying to register fails because the default cluster doesn't exist, try to create the cluster before calling + // register again + if apierrors.IsClusterNotFoundError(err) { + clusterRef, err = client.CreateCluster(clusterRef) + if err != nil { + return "", "", err + } + } + } + return client.registerContainerInstance(clusterRef, containerInstanceArn, attributes, tags, registrationToken, + platformDevices, outpostARN) +} + +func (client *APIECSClient) registerContainerInstance(clusterRef string, containerInstanceArn string, + attributes []*ecs.Attribute, tags []*ecs.Tag, registrationToken string, + platformDevices []*ecs.PlatformDevice, outpostARN string) (string, string, error) { + + registerRequest := ecs.RegisterContainerInstanceInput{Cluster: &clusterRef} + var registrationAttributes []*ecs.Attribute + if containerInstanceArn != "" { + // We are re-connecting a previously registered instance, restored from snapshot. + registerRequest.ContainerInstanceArn = &containerInstanceArn + } else { + // This is a new instance, not previously registered. + // Custom attribute registration only happens on initial instance registration. + for _, attribute := range client.getCustomAttributes() { + seelog.Debugf("Added a new custom attribute %v=%v", + aws.StringValue(attribute.Name), + aws.StringValue(attribute.Value), + ) + registrationAttributes = append(registrationAttributes, attribute) + } + } + // Standard attributes are included with all registrations. + registrationAttributes = append(registrationAttributes, attributes...) + + // Add additional attributes such as the os type + registrationAttributes = append(registrationAttributes, client.getAdditionalAttributes()...) + registrationAttributes = append(registrationAttributes, client.getOutpostAttribute(outpostARN)...) + + registerRequest.Attributes = registrationAttributes + if len(tags) > 0 { + registerRequest.Tags = tags + } + registerRequest.PlatformDevices = platformDevices + registerRequest = client.setInstanceIdentity(registerRequest) + + resources, err := client.getResources() + if err != nil { + return "", "", err + } + + registerRequest.TotalResources = resources + + registerRequest.ClientToken = ®istrationToken + resp, err := client.standardClient.RegisterContainerInstance(®isterRequest) + if err != nil { + seelog.Errorf("Unable to register as a container instance with ECS: %v", err) + return "", "", err + } + + var availabilityzone = "" + if resp != nil { + for _, attr := range resp.ContainerInstance.Attributes { + if aws.StringValue(attr.Name) == azAttrName { + availabilityzone = aws.StringValue(attr.Value) + break + } + } + } + + seelog.Info("Registered container instance with cluster!") + err = validateRegisteredAttributes(registerRequest.Attributes, resp.ContainerInstance.Attributes) + return aws.StringValue(resp.ContainerInstance.ContainerInstanceArn), availabilityzone, err +} + +func (client *APIECSClient) setInstanceIdentity(registerRequest ecs.RegisterContainerInstanceInput) ecs.RegisterContainerInstanceInput { + instanceIdentityDoc := "" + instanceIdentitySignature := "" + + if client.config.NoIID { + seelog.Info("Fetching Instance ID Document has been disabled") + registerRequest.InstanceIdentityDocument = &instanceIdentityDoc + registerRequest.InstanceIdentityDocumentSignature = &instanceIdentitySignature + return registerRequest + } + + iidRetrieved := true + instanceIdentityDoc, err := client.ec2metadata.GetDynamicData(ec2.InstanceIdentityDocumentResource) + if err != nil { + seelog.Errorf("Unable to get instance identity document: %v", err) + iidRetrieved = false + } + registerRequest.InstanceIdentityDocument = &instanceIdentityDoc + + if iidRetrieved { + instanceIdentitySignature, err = client.ec2metadata.GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource) + if err != nil { + seelog.Errorf("Unable to get instance identity signature: %v", err) + } + } + + registerRequest.InstanceIdentityDocumentSignature = &instanceIdentitySignature + return registerRequest +} + +func attributesToMap(attributes []*ecs.Attribute) map[string]string { + attributeMap := make(map[string]string) + attribs := attributes + for _, attribute := range attribs { + attributeMap[aws.StringValue(attribute.Name)] = aws.StringValue(attribute.Value) + } + return attributeMap +} + +func findMissingAttributes(expectedAttributes, actualAttributes map[string]string) ([]string, error) { + missingAttributes := make([]string, 0) + var err error + for key, val := range expectedAttributes { + if actualAttributes[key] != val { + missingAttributes = append(missingAttributes, key) + } else { + seelog.Tracef("Response contained expected value for attribute %v", key) + } + } + if len(missingAttributes) > 0 { + err = apierrors.NewAttributeError("Attribute validation failed") + } + return missingAttributes, err +} + +func (client *APIECSClient) getResources() ([]*ecs.Resource, error) { + // Micro-optimization, the pointer to this is used multiple times below + integerStr := "INTEGER" + + cpu, mem := getCpuAndMemory() + remainingMem := mem - int64(client.config.ReservedMemory) + seelog.Infof("Remaining mem: %d", remainingMem) + if remainingMem < 0 { + return nil, fmt.Errorf( + "api register-container-instance: reserved memory is higher than available memory on the host, total memory: %d, reserved: %d", + mem, client.config.ReservedMemory) + } + + cpuResource := ecs.Resource{ + Name: utils.Strptr("CPU"), + Type: &integerStr, + IntegerValue: &cpu, + } + memResource := ecs.Resource{ + Name: utils.Strptr("MEMORY"), + Type: &integerStr, + IntegerValue: &remainingMem, + } + portResource := ecs.Resource{ + Name: utils.Strptr("PORTS"), + Type: utils.Strptr("STRINGSET"), + StringSetValue: utils.Uint16SliceToStringSlice(client.config.ReservedPorts), + } + udpPortResource := ecs.Resource{ + Name: utils.Strptr("PORTS_UDP"), + Type: utils.Strptr("STRINGSET"), + StringSetValue: utils.Uint16SliceToStringSlice(client.config.ReservedPortsUDP), + } + + return []*ecs.Resource{&cpuResource, &memResource, &portResource, &udpPortResource}, nil +} + +func getCpuAndMemory() (int64, int64) { + memInfo, err := system.ReadMemInfo() + mem := int64(0) + if err == nil { + mem = memInfo.MemTotal / 1024 / 1024 // MiB + } else { + seelog.Errorf("Unable to get memory info: %v", err) + } + + cpu := runtime.NumCPU() * 1024 + + return int64(cpu), mem +} + +func validateRegisteredAttributes(expectedAttributes, actualAttributes []*ecs.Attribute) error { + var err error + expectedAttributesMap := attributesToMap(expectedAttributes) + actualAttributesMap := attributesToMap(actualAttributes) + missingAttributes, err := findMissingAttributes(expectedAttributesMap, actualAttributesMap) + if err != nil { + msg := strings.Join(missingAttributes, ",") + seelog.Errorf("Error registering attributes: %v", msg) + } + return err +} + +func (client *APIECSClient) getAdditionalAttributes() []*ecs.Attribute { + return []*ecs.Attribute{ + { + Name: aws.String("ecs.os-type"), + Value: aws.String(config.OSType), + }, + } +} + +func (client *APIECSClient) getOutpostAttribute(outpostARN string) []*ecs.Attribute { + if len(outpostARN) > 0 { + return []*ecs.Attribute{ + { + Name: aws.String("ecs.outpost-arn"), + Value: aws.String(outpostARN), + }, + } + } + return []*ecs.Attribute{} +} + +func (client *APIECSClient) getCustomAttributes() []*ecs.Attribute { + var attributes []*ecs.Attribute + for attribute, value := range client.config.InstanceAttributes { + attributes = append(attributes, &ecs.Attribute{ + Name: aws.String(attribute), + Value: aws.String(value), + }) + } + return attributes +} + +func (client *APIECSClient) SubmitTaskStateChange(change api.TaskStateChange) error { + // Submit attachment state change + if change.Attachment != nil { + var attachments []*ecs.AttachmentStateChange + + eniStatus := change.Attachment.Status.String() + attachments = []*ecs.AttachmentStateChange{ + { + AttachmentArn: aws.String(change.Attachment.AttachmentARN), + Status: aws.String(eniStatus), + }, + } + + _, err := client.submitStateChangeClient.SubmitTaskStateChange(&ecs.SubmitTaskStateChangeInput{ + Cluster: aws.String(client.config.Cluster), + Task: aws.String(change.TaskARN), + Attachments: attachments, + }) + if err != nil { + seelog.Warnf("Could not submit an attachment state change: %v", err) + return err + } + + return nil + } + + status := change.Status.BackendStatus() + + req := ecs.SubmitTaskStateChangeInput{ + Cluster: aws.String(client.config.Cluster), + Task: aws.String(change.TaskARN), + Status: aws.String(status), + Reason: aws.String(change.Reason), + PullStartedAt: change.PullStartedAt, + PullStoppedAt: change.PullStoppedAt, + ExecutionStoppedAt: change.ExecutionStoppedAt, + } + + for _, managedAgentEvent := range change.ManagedAgents { + if mgspl := client.buildManagedAgentStateChangePayload(managedAgentEvent); mgspl != nil { + req.ManagedAgents = append(req.ManagedAgents, mgspl) + } + } + + containerEvents := make([]*ecs.ContainerStateChange, len(change.Containers)) + for i, containerEvent := range change.Containers { + containerEvents[i] = client.buildContainerStateChangePayload(containerEvent) + } + + req.Containers = containerEvents + + _, err := client.submitStateChangeClient.SubmitTaskStateChange(&req) + if err != nil { + seelog.Warnf("Could not submit task state change: [%s]: %v", change.String(), err) + return err + } + + return nil +} + +func trimString(inputString string, maxLen int) string { + if len(inputString) > maxLen { + trimmed := inputString[0:maxLen] + return trimmed + } else { + return inputString + } +} + +func (client *APIECSClient) buildManagedAgentStateChangePayload(change api.ManagedAgentStateChange) *ecs.ManagedAgentStateChange { + if !change.Status.ShouldReportToBackend() { + seelog.Warnf("Not submitting unsupported managed agent state %s for container %s in task %s", + change.Status.String(), change.Container.Name, change.TaskArn) + return nil + } + var trimmedReason *string + if change.Reason != "" { + trimmedReason = aws.String(trimString(change.Reason, ecsMaxReasonLength)) + } + return &ecs.ManagedAgentStateChange{ + ManagedAgentName: aws.String(change.Name), + ContainerName: aws.String(change.Container.Name), + Status: aws.String(change.Status.String()), + Reason: trimmedReason, + } +} + +func (client *APIECSClient) buildContainerStateChangePayload(change api.ContainerStateChange) *ecs.ContainerStateChange { + statechange := &ecs.ContainerStateChange{ + ContainerName: aws.String(change.ContainerName), + } + if change.RuntimeID != "" { + trimmedRuntimeID := trimString(change.RuntimeID, ecsMaxRuntimeIDLength) + statechange.RuntimeId = aws.String(trimmedRuntimeID) + } + if change.Reason != "" { + trimmedReason := trimString(change.Reason, ecsMaxReasonLength) + statechange.Reason = aws.String(trimmedReason) + } + if change.ImageDigest != "" { + trimmedImageDigest := trimString(change.ImageDigest, ecsMaxImageDigestLength) + statechange.ImageDigest = aws.String(trimmedImageDigest) + } + status := change.Status + + if status != apicontainerstatus.ContainerStopped && status != apicontainerstatus.ContainerRunning { + seelog.Warnf("Not submitting unsupported upstream container state %s for container %s in task %s", + status.String(), change.ContainerName, change.TaskArn) + return nil + } + stat := change.Status.String() + if stat == "DEAD" { + stat = apicontainerstatus.ContainerStopped.String() + } + statechange.Status = aws.String(stat) + + if change.ExitCode != nil { + exitCode := int64(aws.IntValue(change.ExitCode)) + statechange.ExitCode = aws.Int64(exitCode) + } + networkBindings := make([]*ecs.NetworkBinding, len(change.PortBindings)) + for i, binding := range change.PortBindings { + hostPort := int64(binding.HostPort) + containerPort := int64(binding.ContainerPort) + bindIP := binding.BindIP + protocol := binding.Protocol.String() + + networkBindings[i] = &ecs.NetworkBinding{ + BindIP: aws.String(bindIP), + ContainerPort: aws.Int64(containerPort), + HostPort: aws.Int64(hostPort), + Protocol: aws.String(protocol), + } + } + statechange.NetworkBindings = networkBindings + + return statechange +} + +func (client *APIECSClient) SubmitContainerStateChange(change api.ContainerStateChange) error { + pl := client.buildContainerStateChangePayload(change) + if pl == nil { + return nil + } + _, err := client.submitStateChangeClient.SubmitContainerStateChange(&ecs.SubmitContainerStateChangeInput{ + Cluster: aws.String(client.config.Cluster), + ContainerName: aws.String(change.ContainerName), + ExitCode: pl.ExitCode, + ManagedAgents: pl.ManagedAgents, + NetworkBindings: pl.NetworkBindings, + Reason: pl.Reason, + RuntimeId: pl.RuntimeId, + Status: pl.Status, + Task: aws.String(change.TaskArn), + }) + if err != nil { + seelog.Warnf("Could not submit container state change: [%s]: %v", change.String(), err) + return err + } + return nil +} + +func (client *APIECSClient) SubmitAttachmentStateChange(change api.AttachmentStateChange) error { + attachmentStatus := change.Attachment.Status.String() + + req := ecs.SubmitAttachmentStateChangesInput{ + Cluster: &client.config.Cluster, + Attachments: []*ecs.AttachmentStateChange{ + { + AttachmentArn: aws.String(change.Attachment.AttachmentARN), + Status: aws.String(attachmentStatus), + }, + }, + } + + _, err := client.submitStateChangeClient.SubmitAttachmentStateChanges(&req) + if err != nil { + seelog.Warnf("Could not submit attachment state change [%s]: %v", change.String(), err) + return err + } + + return nil +} + +func (client *APIECSClient) DiscoverPollEndpoint(containerInstanceArn string) (string, error) { + resp, err := client.discoverPollEndpoint(containerInstanceArn) + if err != nil { + return "", err + } + + return aws.StringValue(resp.Endpoint), nil +} + +func (client *APIECSClient) DiscoverTelemetryEndpoint(containerInstanceArn string) (string, error) { + resp, err := client.discoverPollEndpoint(containerInstanceArn) + if err != nil { + return "", err + } + if resp.TelemetryEndpoint == nil { + return "", errors.New("No telemetry endpoint returned; nil") + } + + return aws.StringValue(resp.TelemetryEndpoint), nil +} + +func (client *APIECSClient) discoverPollEndpoint(containerInstanceArn string) (*ecs.DiscoverPollEndpointOutput, error) { + // Try getting an entry from the cache + cachedEndpoint, found := client.pollEndpoinCache.Get(containerInstanceArn) + if found { + // Cache hit. Return the output. + if output, ok := cachedEndpoint.(*ecs.DiscoverPollEndpointOutput); ok { + return output, nil + } + } + + // Cache miss, invoke the ECS DiscoverPollEndpoint API. + seelog.Debugf("Invoking DiscoverPollEndpoint for '%s'", containerInstanceArn) + output, err := client.standardClient.DiscoverPollEndpoint(&ecs.DiscoverPollEndpointInput{ + ContainerInstance: &containerInstanceArn, + Cluster: &client.config.Cluster, + }) + if err != nil { + return nil, err + } + + // Cache the response from ECS. + client.pollEndpoinCache.Set(containerInstanceArn, output) + return output, nil +} + +func (client *APIECSClient) GetResourceTags(resourceArn string) ([]*ecs.Tag, error) { + output, err := client.standardClient.ListTagsForResource(&ecs.ListTagsForResourceInput{ + ResourceArn: &resourceArn, + }) + if err != nil { + return nil, err + } + return output.Tags, nil +} + +func (client *APIECSClient) UpdateContainerInstancesState(instanceARN string, status string) error { + seelog.Debugf("Invoking UpdateContainerInstancesState, status='%s' instanceARN='%s'", status, instanceARN) + _, err := client.standardClient.UpdateContainerInstancesState(&ecs.UpdateContainerInstancesStateInput{ + ContainerInstances: []*string{aws.String(instanceARN)}, + Status: aws.String(status), + Cluster: &client.config.Cluster, + }) + return err +} diff --git a/agent/api/ecsclient/client_test.go b/agent/api/ecsclient/client_test.go new file mode 100644 index 00000000000..372288074f8 --- /dev/null +++ b/agent/api/ecsclient/client_test.go @@ -0,0 +1,1082 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecsclient + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/async" + mock_async "github.com/aws/amazon-ecs-agent/agent/async/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/ec2" + mock_ec2 "github.com/aws/amazon-ecs-agent/agent/ec2/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" +) + +const ( + configuredCluster = "mycluster" + iid = "instanceIdentityDocument" + iidSignature = "signature" + registrationToken = "clientToken" +) + +var ( + iidResponse = []byte(iid) + iidSignatureResponse = []byte(iidSignature) + containerInstanceTags = []*ecs.Tag{ + { + Key: aws.String("my_key1"), + Value: aws.String("my_val1"), + }, + { + Key: aws.String("my_key2"), + Value: aws.String("my_val2"), + }, + } + containerInstanceTagsMap = map[string]string{ + "my_key1": "my_val1", + "my_key2": "my_val2", + } + testManagedAgents = []*ecs.ManagedAgentStateChange{ + { + ManagedAgentName: aws.String("test_managed_agent"), + ContainerName: aws.String("test_container"), + Status: aws.String("RUNNING"), + Reason: aws.String("test_reason"), + }, + } +) + +func NewMockClient(ctrl *gomock.Controller, + ec2Metadata ec2.EC2MetadataClient, + additionalAttributes map[string]string) (api.ECSClient, *mock_api.MockECSSDK, *mock_api.MockECSSubmitStateSDK) { + + return NewMockClientWithConfig(ctrl, ec2Metadata, additionalAttributes, + &config.Config{ + Cluster: configuredCluster, + AWSRegion: "us-east-1", + InstanceAttributes: additionalAttributes, + }) +} + +func NewMockClientWithConfig(ctrl *gomock.Controller, + ec2Metadata ec2.EC2MetadataClient, + additionalAttributes map[string]string, + cfg *config.Config) (api.ECSClient, *mock_api.MockECSSDK, *mock_api.MockECSSubmitStateSDK) { + client := NewECSClient(credentials.AnonymousCredentials, cfg, ec2Metadata) + mockSDK := mock_api.NewMockECSSDK(ctrl) + mockSubmitStateSDK := mock_api.NewMockECSSubmitStateSDK(ctrl) + client.(*APIECSClient).SetSDK(mockSDK) + client.(*APIECSClient).SetSubmitStateChangeSDK(mockSubmitStateSDK) + return client, mockSDK, mockSubmitStateSDK +} + +type containerSubmitInputMatcher struct { + ecs.SubmitContainerStateChangeInput +} + +type taskSubmitInputMatcher struct { + ecs.SubmitTaskStateChangeInput +} + +func strptr(s string) *string { return &s } +func intptr(i int) *int { return &i } +func int64ptr(i *int) *int64 { + if i == nil { + return nil + } + j := int64(*i) + return &j +} +func equal(lhs, rhs interface{}) bool { + return reflect.DeepEqual(lhs, rhs) +} +func (lhs *containerSubmitInputMatcher) Matches(x interface{}) bool { + rhs := x.(*ecs.SubmitContainerStateChangeInput) + + return (equal(lhs.Cluster, rhs.Cluster) && + equal(lhs.ContainerName, rhs.ContainerName) && + equal(lhs.ExitCode, rhs.ExitCode) && + equal(lhs.NetworkBindings, rhs.NetworkBindings) && + equal(lhs.ManagedAgents, rhs.ManagedAgents) && + equal(lhs.Reason, rhs.Reason) && + equal(lhs.Status, rhs.Status) && + equal(lhs.Task, rhs.Task)) +} + +func (lhs *containerSubmitInputMatcher) String() string { + return fmt.Sprintf("%+v", *lhs) +} + +func (lhs *taskSubmitInputMatcher) Matches(x interface{}) bool { + rhs := x.(*ecs.SubmitTaskStateChangeInput) + + if !(equal(lhs.Cluster, rhs.Cluster) && + equal(lhs.Task, rhs.Task) && + equal(lhs.Status, rhs.Status) && + equal(lhs.Reason, rhs.Reason) && + equal(len(lhs.Attachments), len(rhs.Attachments))) { + return false + } + + if len(lhs.Attachments) != 0 { + for i := range lhs.Attachments { + if !(equal(lhs.Attachments[i].Status, rhs.Attachments[i].Status) && + equal(lhs.Attachments[i].AttachmentArn, rhs.Attachments[i].AttachmentArn)) { + return false + } + } + } + + if len(lhs.Containers) != 0 && !equal(lhs.Containers, rhs.Containers) { + return false + } + + return true +} + +func (lhs *taskSubmitInputMatcher) String() string { + return fmt.Sprintf("%+v", *lhs) +} + +func TestSubmitContainerStateChange(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, _, mockSubmitStateClient := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + mockSubmitStateClient.EXPECT().SubmitContainerStateChange(&containerSubmitInputMatcher{ + ecs.SubmitContainerStateChangeInput{ + Cluster: strptr(configuredCluster), + Task: strptr("arn"), + ContainerName: strptr("cont"), + RuntimeId: strptr("runtime id"), + Status: strptr("RUNNING"), + NetworkBindings: []*ecs.NetworkBinding{ + { + BindIP: strptr("1.2.3.4"), + ContainerPort: int64ptr(intptr(1)), + HostPort: int64ptr(intptr(2)), + Protocol: strptr("tcp"), + }, + { + BindIP: strptr("2.2.3.4"), + ContainerPort: int64ptr(intptr(3)), + HostPort: int64ptr(intptr(4)), + Protocol: strptr("udp"), + }, + }, + }, + }) + err := client.SubmitContainerStateChange(api.ContainerStateChange{ + TaskArn: "arn", + ContainerName: "cont", + RuntimeID: "runtime id", + Status: apicontainerstatus.ContainerRunning, + PortBindings: []apicontainer.PortBinding{ + { + BindIP: "1.2.3.4", + ContainerPort: 1, + HostPort: 2, + }, + { + BindIP: "2.2.3.4", + ContainerPort: 3, + HostPort: 4, + Protocol: apicontainer.TransportProtocolUDP, + }, + }, + }) + if err != nil { + t.Errorf("Unable to submit container state change: %v", err) + } +} + +func TestSubmitContainerStateChangeFull(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, _, mockSubmitStateClient := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + exitCode := 20 + reason := "I exited" + + mockSubmitStateClient.EXPECT().SubmitContainerStateChange(&containerSubmitInputMatcher{ + ecs.SubmitContainerStateChangeInput{ + Cluster: strptr(configuredCluster), + Task: strptr("arn"), + ContainerName: strptr("cont"), + RuntimeId: strptr("runtime id"), + Status: strptr("STOPPED"), + ExitCode: int64ptr(&exitCode), + Reason: strptr(reason), + NetworkBindings: []*ecs.NetworkBinding{ + { + BindIP: strptr(""), + ContainerPort: int64ptr(intptr(0)), + HostPort: int64ptr(intptr(0)), + Protocol: strptr("tcp"), + }, + }, + }, + }) + err := client.SubmitContainerStateChange(api.ContainerStateChange{ + TaskArn: "arn", + ContainerName: "cont", + RuntimeID: "runtime id", + Status: apicontainerstatus.ContainerStopped, + ExitCode: &exitCode, + Reason: reason, + PortBindings: []apicontainer.PortBinding{ + {}, + }, + }) + if err != nil { + t.Errorf("Unable to submit container state change: %v", err) + } +} + +func TestSubmitContainerStateChangeReason(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, _, mockSubmitStateClient := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + exitCode := 20 + reason := strings.Repeat("a", ecsMaxReasonLength) + + mockSubmitStateClient.EXPECT().SubmitContainerStateChange(&containerSubmitInputMatcher{ + ecs.SubmitContainerStateChangeInput{ + Cluster: strptr(configuredCluster), + Task: strptr("arn"), + ContainerName: strptr("cont"), + Status: strptr("STOPPED"), + ExitCode: int64ptr(&exitCode), + Reason: strptr(reason), + NetworkBindings: []*ecs.NetworkBinding{}, + }, + }) + err := client.SubmitContainerStateChange(api.ContainerStateChange{ + TaskArn: "arn", + ContainerName: "cont", + Status: apicontainerstatus.ContainerStopped, + ExitCode: &exitCode, + Reason: reason, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestSubmitContainerStateChangeLongReason(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, _, mockSubmitStateClient := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + exitCode := 20 + trimmedReason := strings.Repeat("a", ecsMaxReasonLength) + reason := strings.Repeat("a", ecsMaxReasonLength+1) + + mockSubmitStateClient.EXPECT().SubmitContainerStateChange(&containerSubmitInputMatcher{ + ecs.SubmitContainerStateChangeInput{ + Cluster: strptr(configuredCluster), + Task: strptr("arn"), + ContainerName: strptr("cont"), + Status: strptr("STOPPED"), + ExitCode: int64ptr(&exitCode), + Reason: strptr(trimmedReason), + NetworkBindings: []*ecs.NetworkBinding{}, + }, + }) + err := client.SubmitContainerStateChange(api.ContainerStateChange{ + TaskArn: "arn", + ContainerName: "cont", + Status: apicontainerstatus.ContainerStopped, + ExitCode: &exitCode, + Reason: reason, + }) + if err != nil { + t.Errorf("Unable to submit container state change: %v", err) + } +} + +func buildAttributeList(capabilities []string, attributes map[string]string) []*ecs.Attribute { + var rv []*ecs.Attribute + for _, capability := range capabilities { + rv = append(rv, &ecs.Attribute{Name: aws.String(capability)}) + } + for key, value := range attributes { + rv = append(rv, &ecs.Attribute{Name: aws.String(key), Value: aws.String(value)}) + } + return rv +} + +func TestReRegisterContainerInstance(t *testing.T) { + additionalAttributes := map[string]string{"my_custom_attribute": "Custom_Value1", + "my_other_custom_attribute": "Custom_Value2", + "attribute_name_with_no_value": "", + } + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(mockCtrl) + client, mc, _ := NewMockClient(mockCtrl, mockEC2Metadata, additionalAttributes) + + fakeCapabilities := []string{"capability1", "capability2"} + expectedAttributes := map[string]string{ + "ecs.os-type": config.OSType, + "ecs.availability-zone": "us-west-2b", + "ecs.outpost-arn": "test:arn:outpost", + } + for i := range fakeCapabilities { + expectedAttributes[fakeCapabilities[i]] = "" + } + capabilities := buildAttributeList(fakeCapabilities, nil) + + gomock.InOrder( + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil), + mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) { + assert.Equal(t, "arn:test", *req.ContainerInstanceArn, "Wrong container instance ARN") + assert.Equal(t, configuredCluster, *req.Cluster, "Wrong cluster") + assert.Equal(t, registrationToken, *req.ClientToken, "Wrong client token") + assert.Equal(t, iid, *req.InstanceIdentityDocument, "Wrong IID") + assert.Equal(t, iidSignature, *req.InstanceIdentityDocumentSignature, "Wrong IID sig") + assert.Equal(t, 4, len(req.TotalResources), "Wrong length of TotalResources") + resource, ok := findResource(req.TotalResources, "PORTS_UDP") + assert.True(t, ok, `Could not find resource "PORTS_UDP"`) + assert.Equal(t, "STRINGSET", *resource.Type, `Wrong type for resource "PORTS_UDP"`) + // "ecs.os-type", ecs.outpost-arn and the 2 that we specified as additionalAttributes + assert.Equal(t, 4, len(req.Attributes), "Wrong number of Attributes") + reqAttributes := func() map[string]string { + rv := make(map[string]string, len(req.Attributes)) + for i := range req.Attributes { + rv[aws.StringValue(req.Attributes[i].Name)] = aws.StringValue(req.Attributes[i].Value) + } + return rv + }() + for k, v := range reqAttributes { + assert.Contains(t, expectedAttributes, k) + assert.Equal(t, expectedAttributes[k], v) + } + assert.Equal(t, len(containerInstanceTags), len(req.Tags), "Wrong number of tags") + reqTags := extractTagsMapFromRegisterContainerInstanceInput(req) + for k, v := range reqTags { + assert.Contains(t, containerInstanceTagsMap, k) + assert.Equal(t, containerInstanceTagsMap[k], v) + } + }).Return(&ecs.RegisterContainerInstanceOutput{ + ContainerInstance: &ecs.ContainerInstance{ + ContainerInstanceArn: aws.String("registerArn"), + Attributes: buildAttributeList(fakeCapabilities, expectedAttributes), + }}, + nil), + ) + + arn, availabilityzone, err := client.RegisterContainerInstance("arn:test", capabilities, + containerInstanceTags, registrationToken, nil, "test:arn:outpost") + + assert.NoError(t, err) + assert.Equal(t, "registerArn", arn) + assert.Equal(t, "us-west-2b", availabilityzone, "availabilityZone is incorrect") +} + +func TestRegisterContainerInstance(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(mockCtrl) + additionalAttributes := map[string]string{"my_custom_attribute": "Custom_Value1", + "my_other_custom_attribute": "Custom_Value2", + } + client, mc, _ := NewMockClient(mockCtrl, mockEC2Metadata, additionalAttributes) + + fakeCapabilities := []string{"capability1", "capability2"} + expectedAttributes := map[string]string{ + "ecs.os-type": config.OSType, + "my_custom_attribute": "Custom_Value1", + "my_other_custom_attribute": "Custom_Value2", + "ecs.availability-zone": "us-west-2b", + "ecs.outpost-arn": "test:arn:outpost", + } + capabilities := buildAttributeList(fakeCapabilities, nil) + platformDevices := []*ecs.PlatformDevice{ + { + Id: aws.String("id1"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, + { + Id: aws.String("id2"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, + { + Id: aws.String("id3"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, + } + + gomock.InOrder( + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil), + mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) { + assert.Nil(t, req.ContainerInstanceArn) + assert.Equal(t, configuredCluster, *req.Cluster, "Wrong cluster") + assert.Equal(t, registrationToken, *req.ClientToken, "Wrong client token") + assert.Equal(t, iid, *req.InstanceIdentityDocument, "Wrong IID") + assert.Equal(t, iidSignature, *req.InstanceIdentityDocumentSignature, "Wrong IID sig") + assert.Equal(t, 4, len(req.TotalResources), "Wrong length of TotalResources") + resource, ok := findResource(req.TotalResources, "PORTS_UDP") + assert.True(t, ok, `Could not find resource "PORTS_UDP"`) + assert.Equal(t, "STRINGSET", *resource.Type, `Wrong type for resource "PORTS_UDP"`) + // 3 from expectedAttributes and 3 from additionalAttributes + assert.Equal(t, 6, len(req.Attributes), "Wrong number of Attributes") + for i := range req.Attributes { + if strings.Contains(*req.Attributes[i].Name, "capability") { + assert.Contains(t, fakeCapabilities, *req.Attributes[i].Name) + } else { + assert.Equal(t, expectedAttributes[*req.Attributes[i].Name], *req.Attributes[i].Value) + } + } + assert.Equal(t, len(containerInstanceTags), len(req.Tags), "Wrong number of tags") + assert.Equal(t, len(platformDevices), len(req.PlatformDevices), "Wrong number of devices") + reqTags := extractTagsMapFromRegisterContainerInstanceInput(req) + for k, v := range reqTags { + assert.Contains(t, containerInstanceTagsMap, k) + assert.Equal(t, containerInstanceTagsMap[k], v) + } + }).Return(&ecs.RegisterContainerInstanceOutput{ + ContainerInstance: &ecs.ContainerInstance{ + ContainerInstanceArn: aws.String("registerArn"), + Attributes: buildAttributeList(fakeCapabilities, expectedAttributes)}}, + nil), + ) + + arn, availabilityzone, err := client.RegisterContainerInstance("", capabilities, + containerInstanceTags, registrationToken, platformDevices, "test:arn:outpost") + assert.NoError(t, err) + assert.Equal(t, "registerArn", arn) + assert.Equal(t, "us-west-2b", availabilityzone) +} + +func TestRegisterContainerInstanceNoIID(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(mockCtrl) + additionalAttributes := map[string]string{"my_custom_attribute": "Custom_Value1", + "my_other_custom_attribute": "Custom_Value2", + } + client, mc, _ := NewMockClientWithConfig(mockCtrl, mockEC2Metadata, additionalAttributes, + &config.Config{ + Cluster: configuredCluster, + AWSRegion: "us-east-1", + InstanceAttributes: additionalAttributes, + NoIID: true, + }) + + fakeCapabilities := []string{"capability1", "capability2"} + expectedAttributes := map[string]string{ + "ecs.os-type": config.OSType, + "my_custom_attribute": "Custom_Value1", + "my_other_custom_attribute": "Custom_Value2", + "ecs.availability-zone": "us-west-2b", + "ecs.outpost-arn": "test:arn:outpost", + } + capabilities := buildAttributeList(fakeCapabilities, nil) + + gomock.InOrder( + mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) { + assert.Nil(t, req.ContainerInstanceArn) + assert.Equal(t, configuredCluster, *req.Cluster, "Wrong cluster") + assert.Equal(t, registrationToken, *req.ClientToken, "Wrong client token") + assert.Equal(t, "", *req.InstanceIdentityDocument, "Wrong IID") + assert.Equal(t, "", *req.InstanceIdentityDocumentSignature, "Wrong IID sig") + assert.Equal(t, 4, len(req.TotalResources), "Wrong length of TotalResources") + resource, ok := findResource(req.TotalResources, "PORTS_UDP") + assert.True(t, ok, `Could not find resource "PORTS_UDP"`) + assert.Equal(t, "STRINGSET", *resource.Type, `Wrong type for resource "PORTS_UDP"`) + // 3 from expectedAttributes and 3 from additionalAttributes + assert.Equal(t, 6, len(req.Attributes), "Wrong number of Attributes") + for i := range req.Attributes { + if strings.Contains(*req.Attributes[i].Name, "capability") { + assert.Contains(t, fakeCapabilities, *req.Attributes[i].Name) + } else { + assert.Equal(t, expectedAttributes[*req.Attributes[i].Name], *req.Attributes[i].Value) + } + } + assert.Equal(t, len(containerInstanceTags), len(req.Tags), "Wrong number of tags") + reqTags := extractTagsMapFromRegisterContainerInstanceInput(req) + for k, v := range reqTags { + assert.Contains(t, containerInstanceTagsMap, k) + assert.Equal(t, containerInstanceTagsMap[k], v) + } + }).Return(&ecs.RegisterContainerInstanceOutput{ + ContainerInstance: &ecs.ContainerInstance{ + ContainerInstanceArn: aws.String("registerArn"), + Attributes: buildAttributeList(fakeCapabilities, expectedAttributes)}}, + nil), + ) + + arn, availabilityzone, err := client.RegisterContainerInstance("", capabilities, + containerInstanceTags, registrationToken, nil, "test:arn:outpost") + assert.NoError(t, err) + assert.Equal(t, "registerArn", arn) + assert.Equal(t, "us-west-2b", availabilityzone) +} + +// TestRegisterContainerInstanceWithNegativeResource tests the registeration should fail with negative resource +func TestRegisterContainerInstanceWithNegativeResource(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + _, mem := getCpuAndMemory() + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(mockCtrl) + client := NewECSClient(credentials.AnonymousCredentials, + &config.Config{Cluster: configuredCluster, + AWSRegion: "us-east-1", + ReservedMemory: uint16(mem) + 1, + }, mockEC2Metadata) + mockSDK := mock_api.NewMockECSSDK(mockCtrl) + mockSubmitStateSDK := mock_api.NewMockECSSubmitStateSDK(mockCtrl) + client.(*APIECSClient).SetSDK(mockSDK) + client.(*APIECSClient).SetSubmitStateChangeSDK(mockSubmitStateSDK) + + gomock.InOrder( + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil), + ) + _, _, err := client.RegisterContainerInstance("", nil, nil, + "", nil, "") + assert.Error(t, err, "Register resource with negative value should cause registration fail") +} + +func TestRegisterContainerInstanceWithEmptyTags(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(mockCtrl) + client, mc, _ := NewMockClient(mockCtrl, mockEC2Metadata, nil) + + expectedAttributes := map[string]string{ + "ecs.os-type": config.OSType, + "my_custom_attribute": "Custom_Value1", + "my_other_custom_attribute": "Custom_Value2", + } + + fakeCapabilities := []string{"capability1", "capability2"} + + gomock.InOrder( + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil), + mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) { + assert.Nil(t, req.Tags) + }).Return(&ecs.RegisterContainerInstanceOutput{ + ContainerInstance: &ecs.ContainerInstance{ + ContainerInstanceArn: aws.String("registerArn"), + Attributes: buildAttributeList(fakeCapabilities, expectedAttributes)}}, + nil), + ) + + _, _, err := client.RegisterContainerInstance("", nil, make([]*ecs.Tag, 0), + "", nil, "") + assert.NoError(t, err) +} + +func TestValidateRegisteredAttributes(t *testing.T) { + origAttributes := []*ecs.Attribute{ + {Name: aws.String("foo"), Value: aws.String("bar")}, + {Name: aws.String("baz"), Value: aws.String("quux")}, + {Name: aws.String("no_value"), Value: aws.String("")}, + } + actualAttributes := []*ecs.Attribute{ + {Name: aws.String("baz"), Value: aws.String("quux")}, + {Name: aws.String("foo"), Value: aws.String("bar")}, + {Name: aws.String("no_value"), Value: aws.String("")}, + {Name: aws.String("ecs.internal-attribute"), Value: aws.String("some text")}, + } + assert.NoError(t, validateRegisteredAttributes(origAttributes, actualAttributes)) + + origAttributes = append(origAttributes, &ecs.Attribute{Name: aws.String("abc"), Value: aws.String("xyz")}) + assert.Error(t, validateRegisteredAttributes(origAttributes, actualAttributes)) +} + +func findResource(resources []*ecs.Resource, name string) (*ecs.Resource, bool) { + for _, resource := range resources { + if name == *resource.Name { + return resource, true + } + } + return nil, false +} + +func TestRegisterBlankCluster(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(mockCtrl) + + // Test the special 'empty cluster' behavior of creating 'default' + client := NewECSClient(credentials.AnonymousCredentials, + &config.Config{ + Cluster: "", + AWSRegion: "us-east-1", + }, + mockEC2Metadata) + mc := mock_api.NewMockECSSDK(mockCtrl) + client.(*APIECSClient).SetSDK(mc) + + expectedAttributes := map[string]string{ + "ecs.os-type": config.OSType, + } + defaultCluster := config.DefaultClusterName + gomock.InOrder( + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil), + mc.EXPECT().RegisterContainerInstance(gomock.Any()).Return(nil, awserr.New("ClientException", "Cluster not found.", errors.New("Cluster not found."))), + mc.EXPECT().CreateCluster(&ecs.CreateClusterInput{ClusterName: &defaultCluster}).Return(&ecs.CreateClusterOutput{Cluster: &ecs.Cluster{ClusterName: &defaultCluster}}, nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil), + mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) { + if *req.Cluster != config.DefaultClusterName { + t.Errorf("Wrong cluster: %v", *req.Cluster) + } + if *req.InstanceIdentityDocument != iid { + t.Errorf("Wrong IID: %v", *req.InstanceIdentityDocument) + } + if *req.InstanceIdentityDocumentSignature != iidSignature { + t.Errorf("Wrong IID sig: %v", *req.InstanceIdentityDocumentSignature) + } + }).Return(&ecs.RegisterContainerInstanceOutput{ + ContainerInstance: &ecs.ContainerInstance{ + ContainerInstanceArn: aws.String("registerArn"), + Attributes: buildAttributeList(nil, expectedAttributes)}}, + nil), + ) + + arn, availabilityzone, err := client.RegisterContainerInstance("", nil, nil, + "", nil, "") + if err != nil { + t.Errorf("Should not be an error: %v", err) + } + if arn != "registerArn" { + t.Errorf("Wrong arn: %v", arn) + } + if availabilityzone != "" { + t.Errorf("wrong availability zone: %v", availabilityzone) + } +} + +func TestRegisterBlankClusterNotCreatingClusterWhenErrorNotClusterNotFound(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(mockCtrl) + + // Test the special 'empty cluster' behavior of creating 'default' + client := NewECSClient(credentials.AnonymousCredentials, + &config.Config{ + Cluster: "", + AWSRegion: "us-east-1", + }, + mockEC2Metadata) + mc := mock_api.NewMockECSSDK(mockCtrl) + client.(*APIECSClient).SetSDK(mc) + + expectedAttributes := map[string]string{ + "ecs.os-type": config.OSType, + } + + gomock.InOrder( + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil), + mc.EXPECT().RegisterContainerInstance(gomock.Any()).Return(nil, awserr.New("ClientException", "Invalid request.", errors.New("Invalid request."))), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil), + mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil), + mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) { + if *req.Cluster != config.DefaultClusterName { + t.Errorf("Wrong cluster: %v", *req.Cluster) + } + if *req.InstanceIdentityDocument != iid { + t.Errorf("Wrong IID: %v", *req.InstanceIdentityDocument) + } + if *req.InstanceIdentityDocumentSignature != iidSignature { + t.Errorf("Wrong IID sig: %v", *req.InstanceIdentityDocumentSignature) + } + }).Return(&ecs.RegisterContainerInstanceOutput{ + ContainerInstance: &ecs.ContainerInstance{ + ContainerInstanceArn: aws.String("registerArn"), + Attributes: buildAttributeList(nil, expectedAttributes)}}, + nil), + ) + + arn, _, err := client.RegisterContainerInstance("", nil, nil, "", + nil, "") + assert.NoError(t, err, "Should not return error") + assert.Equal(t, "registerArn", arn, "Wrong arn") +} + +func TestDiscoverTelemetryEndpoint(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, mc, _ := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + expectedEndpoint := "http://127.0.0.1" + mc.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(&ecs.DiscoverPollEndpointOutput{TelemetryEndpoint: &expectedEndpoint}, nil) + endpoint, err := client.DiscoverTelemetryEndpoint("containerInstance") + if err != nil { + t.Error("Error getting telemetry endpoint: ", err) + } + if expectedEndpoint != endpoint { + t.Errorf("Expected telemetry endpoint(%s) != endpoint(%s)", expectedEndpoint, endpoint) + } +} + +func TestDiscoverTelemetryEndpointError(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, mc, _ := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + mc.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(nil, fmt.Errorf("Error getting endpoint")) + _, err := client.DiscoverTelemetryEndpoint("containerInstance") + if err == nil { + t.Error("Expected error getting telemetry endpoint, didn't get any") + } +} + +func TestDiscoverNilTelemetryEndpoint(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, mc, _ := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + pollEndpoint := "http://127.0.0.1" + mc.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(&ecs.DiscoverPollEndpointOutput{Endpoint: &pollEndpoint}, nil) + _, err := client.DiscoverTelemetryEndpoint("containerInstance") + if err == nil { + t.Error("Expected error getting telemetry endpoint with old response") + } +} + +func TestUpdateContainerInstancesState(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, mc, _ := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + + instanceARN := "myInstanceARN" + status := "DRAINING" + mc.EXPECT().UpdateContainerInstancesState(&ecs.UpdateContainerInstancesStateInput{ + ContainerInstances: []*string{aws.String(instanceARN)}, + Status: aws.String(status), + Cluster: aws.String(configuredCluster), + }).Return(&ecs.UpdateContainerInstancesStateOutput{}, nil) + + err := client.UpdateContainerInstancesState(instanceARN, status) + assert.NoError(t, err, fmt.Sprintf("Unexpected error calling UpdateContainerInstancesState: %s", err)) +} + +func TestUpdateContainerInstancesStateError(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, mc, _ := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + + instanceARN := "myInstanceARN" + status := "DRAINING" + mc.EXPECT().UpdateContainerInstancesState(&ecs.UpdateContainerInstancesStateInput{ + ContainerInstances: []*string{aws.String(instanceARN)}, + Status: aws.String(status), + Cluster: aws.String(configuredCluster), + }).Return(nil, fmt.Errorf("ERROR")) + + err := client.UpdateContainerInstancesState(instanceARN, status) + assert.Error(t, err, "Expected an error calling UpdateContainerInstancesState but got nil") +} + +func TestGetResourceTags(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, mc, _ := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + + instanceARN := "myInstanceARN" + mc.EXPECT().ListTagsForResource(&ecs.ListTagsForResourceInput{ + ResourceArn: aws.String(instanceARN), + }).Return(&ecs.ListTagsForResourceOutput{ + Tags: containerInstanceTags, + }, nil) + + _, err := client.GetResourceTags(instanceARN) + assert.NoError(t, err, fmt.Sprintf("Unexpected error calling GetResourceTags: %s", err)) +} + +func TestGetResourceTagsError(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + client, mc, _ := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + + instanceARN := "myInstanceARN" + mc.EXPECT().ListTagsForResource(&ecs.ListTagsForResourceInput{ + ResourceArn: aws.String(instanceARN), + }).Return(nil, fmt.Errorf("ERROR")) + + _, err := client.GetResourceTags(instanceARN) + assert.Error(t, err, "Expected an error calling GetResourceTags but got nil") +} + +func TestDiscoverPollEndpointCacheHit(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockSDK := mock_api.NewMockECSSDK(mockCtrl) + pollEndpoinCache := mock_async.NewMockCache(mockCtrl) + client := &APIECSClient{ + credentialProvider: credentials.AnonymousCredentials, + config: &config.Config{ + Cluster: configuredCluster, + AWSRegion: "us-east-1", + }, + standardClient: mockSDK, + ec2metadata: ec2.NewBlackholeEC2MetadataClient(), + pollEndpoinCache: pollEndpoinCache, + } + + pollEndpoint := "http://127.0.0.1" + pollEndpoinCache.EXPECT().Get("containerInstance").Return( + &ecs.DiscoverPollEndpointOutput{ + Endpoint: aws.String(pollEndpoint), + }, true) + output, err := client.discoverPollEndpoint("containerInstance") + if err != nil { + t.Fatalf("Error in discoverPollEndpoint: %v", err) + } + if aws.StringValue(output.Endpoint) != pollEndpoint { + t.Errorf("Mismatch in poll endpoint: %s != %s", aws.StringValue(output.Endpoint), pollEndpoint) + } +} + +func TestDiscoverPollEndpointCacheMiss(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockSDK := mock_api.NewMockECSSDK(mockCtrl) + pollEndpoinCache := mock_async.NewMockCache(mockCtrl) + client := &APIECSClient{ + credentialProvider: credentials.AnonymousCredentials, + config: &config.Config{ + Cluster: configuredCluster, + AWSRegion: "us-east-1", + }, + standardClient: mockSDK, + ec2metadata: ec2.NewBlackholeEC2MetadataClient(), + pollEndpoinCache: pollEndpoinCache, + } + pollEndpoint := "http://127.0.0.1" + pollEndpointOutput := &ecs.DiscoverPollEndpointOutput{ + Endpoint: &pollEndpoint, + } + + gomock.InOrder( + pollEndpoinCache.EXPECT().Get("containerInstance").Return(nil, false), + mockSDK.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return(pollEndpointOutput, nil), + pollEndpoinCache.EXPECT().Set("containerInstance", pollEndpointOutput), + ) + + output, err := client.discoverPollEndpoint("containerInstance") + if err != nil { + t.Fatalf("Error in discoverPollEndpoint: %v", err) + } + if aws.StringValue(output.Endpoint) != pollEndpoint { + t.Errorf("Mismatch in poll endpoint: %s != %s", aws.StringValue(output.Endpoint), pollEndpoint) + } +} + +func TestDiscoverTelemetryEndpointAfterPollEndpointCacheHit(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockSDK := mock_api.NewMockECSSDK(mockCtrl) + pollEndpoinCache := async.NewLRUCache(1, 10*time.Minute) + client := &APIECSClient{ + credentialProvider: credentials.AnonymousCredentials, + config: &config.Config{ + Cluster: configuredCluster, + AWSRegion: "us-east-1", + }, + standardClient: mockSDK, + ec2metadata: ec2.NewBlackholeEC2MetadataClient(), + pollEndpoinCache: pollEndpoinCache, + } + + pollEndpoint := "http://127.0.0.1" + mockSDK.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return( + &ecs.DiscoverPollEndpointOutput{ + Endpoint: &pollEndpoint, + TelemetryEndpoint: &pollEndpoint, + }, nil) + endpoint, err := client.DiscoverPollEndpoint("containerInstance") + if err != nil { + t.Fatalf("Error in discoverPollEndpoint: %v", err) + } + if endpoint != pollEndpoint { + t.Errorf("Mismatch in poll endpoint: %s", endpoint) + } + telemetryEndpoint, err := client.DiscoverTelemetryEndpoint("containerInstance") + if err != nil { + t.Fatalf("Error in discoverTelemetryEndpoint: %v", err) + } + if telemetryEndpoint != pollEndpoint { + t.Errorf("Mismatch in poll endpoint: %s", endpoint) + } +} + +// TestSubmitTaskStateChangeWithAttachments tests the SubmitTaskStateChange API +// also send the Attachment Status +func TestSubmitTaskStateChangeWithAttachments(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + client, _, mockSubmitStateClient := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + mockSubmitStateClient.EXPECT().SubmitTaskStateChange(&taskSubmitInputMatcher{ + ecs.SubmitTaskStateChangeInput{ + Cluster: aws.String(configuredCluster), + Task: aws.String("task_arn"), + Attachments: []*ecs.AttachmentStateChange{ + { + AttachmentArn: aws.String("eni_arn"), + Status: aws.String("ATTACHED"), + }, + }, + }, + }) + + err := client.SubmitTaskStateChange(api.TaskStateChange{ + TaskARN: "task_arn", + Attachment: &apieni.ENIAttachment{ + AttachmentARN: "eni_arn", + Status: apieni.ENIAttached, + }, + }) + assert.NoError(t, err, "Unable to submit task state change with attachments") +} + +func TestSubmitTaskStateChangeWithoutAttachments(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + client, _, mockSubmitStateClient := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + mockSubmitStateClient.EXPECT().SubmitTaskStateChange(&taskSubmitInputMatcher{ + ecs.SubmitTaskStateChangeInput{ + Cluster: aws.String(configuredCluster), + Task: aws.String("task_arn"), + Reason: aws.String(""), + Status: aws.String("RUNNING"), + }, + }) + + err := client.SubmitTaskStateChange(api.TaskStateChange{ + TaskARN: "task_arn", + Status: apitaskstatus.TaskRunning, + }) + assert.NoError(t, err, "Unable to submit task state change with no attachments") +} + +func TestSubmitTaskStateChangeWithManagedAgents(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + client, _, mockSubmitStateClient := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + mockSubmitStateClient.EXPECT().SubmitTaskStateChange(&taskSubmitInputMatcher{ + ecs.SubmitTaskStateChangeInput{ + Cluster: aws.String(configuredCluster), + Task: aws.String("task_arn"), + Reason: aws.String(""), + Status: aws.String("RUNNING"), + ManagedAgents: testManagedAgents, + }, + }) + + err := client.SubmitTaskStateChange(api.TaskStateChange{ + TaskARN: "task_arn", + Status: apitaskstatus.TaskRunning, + }) + assert.NoError(t, err, "Unable to submit task state change with managed agents") +} + +// TestSubmitContainerStateChangeWhileTaskInPending tests the container state change was submitted +// when the task is still in pending state +func TestSubmitContainerStateChangeWhileTaskInPending(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + testCases := []struct { + taskStatus apitaskstatus.TaskStatus + }{ + { + apitaskstatus.TaskStatusNone, + }, + { + apitaskstatus.TaskPulled, + }, + { + apitaskstatus.TaskCreated, + }, + } + + taskStateChangePending := api.TaskStateChange{ + Status: apitaskstatus.TaskCreated, + TaskARN: "arn", + Containers: []api.ContainerStateChange{ + { + TaskArn: "arn", + ContainerName: "container", + RuntimeID: "runtimeid", + Status: apicontainerstatus.ContainerRunning, + }, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("TaskStatus: %s", tc.taskStatus.String()), func(t *testing.T) { + taskStateChangePending.Status = tc.taskStatus + client, _, mockSubmitStateClient := NewMockClient(mockCtrl, ec2.NewBlackholeEC2MetadataClient(), nil) + mockSubmitStateClient.EXPECT().SubmitTaskStateChange(&taskSubmitInputMatcher{ + ecs.SubmitTaskStateChangeInput{ + Cluster: strptr(configuredCluster), + Task: strptr("arn"), + Status: strptr("PENDING"), + Reason: strptr(""), + Containers: []*ecs.ContainerStateChange{ + { + ContainerName: strptr("container"), + RuntimeId: strptr("runtimeid"), + Status: strptr("RUNNING"), + NetworkBindings: []*ecs.NetworkBinding{}, + }, + }, + }, + }) + err := client.SubmitTaskStateChange(taskStateChangePending) + assert.NoError(t, err) + }) + } +} + +func extractTagsMapFromRegisterContainerInstanceInput(req *ecs.RegisterContainerInstanceInput) map[string]string { + tagsMap := make(map[string]string, len(req.Tags)) + for i := range req.Tags { + tagsMap[aws.StringValue(req.Tags[i].Key)] = aws.StringValue(req.Tags[i].Value) + } + return tagsMap +} diff --git a/agent/api/ecsclient/retry_handler.go b/agent/api/ecsclient/retry_handler.go new file mode 100644 index 00000000000..9ee49f5fc36 --- /dev/null +++ b/agent/api/ecsclient/retry_handler.go @@ -0,0 +1,85 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecsclient + +import ( + "math" + "math/rand" + "time" + + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" +) + +const ( + // submitStateChangeInitialRetries is the initial set of retries where delay + // between retries grows exponentially at 2^n * 45+-15ms. This should be roughly + // 5 minutes of delay at the last growing retry. + // 5 min = 2^n * (30 + 15) ms + // n ~= 12.7 + // We round up to 13 (which really gives us ~6 minutes) for no + // particular reason. + // Because of jitter, this caps at ~8 minutes + submitStateChangeInitialRetries = 13 + + // submitStateChangeExtraRetries is the set of retries (at the max delay per + // retry of exactly 5 minutes) which should reach roughly 24 hours of elapsed + // time. + // baseTryTime = \sum_{i=0}^13{2^i * 45 ms} ~= 12 minutes + // 24 hours ~= 12 minutes + (n * 5 minutes) + // n ~= 285 + submitStateChangeExtraRetries = 285 +) + +// newSubmitStateChangeClient returns a client intended to be used for +// Submit*StateChange APIs which has the behavior of retrying the call on +// retriable errors for an extended period of time (roughly 24 hours). +func newSubmitStateChangeClient(awsConfig *aws.Config) *ecs.ECS { + sscConfig := awsConfig.Copy() + sscConfig.Retryer = &oneDayRetrier{} + client := ecs.New(session.New(sscConfig)) + return client +} + +// oneDayRetrier is a retrier for the AWS SDK that retries up to one day. +// Each retry will have an exponential backoff from 30ms to 5 minutes. Once the +// backoff has reached 5 minutes, it will not increase further. +// Conforms to the request.Retryer interface https://github.com/aws/aws-sdk-go/blob/v1.0.0/aws/request/retryer.go#L13 +type oneDayRetrier struct { + client.DefaultRetryer +} + +// MaxRetries returns the number of retries needed to retry for roughly a day +// for this Retrier +func (retrier *oneDayRetrier) MaxRetries() int { + return submitStateChangeExtraRetries + submitStateChangeInitialRetries +} + +// RetryRules returns the correct time to delay between retries for this +// instance of a retry. For the first 14 requests, it follows an exponential +// backoff between 30ms and 1 minute. +// See the const comments for math on how this gets us to around 24 hours +// total. +func (retrier *oneDayRetrier) RetryRules(r *request.Request) time.Duration { + // This logic is the same as the default retrier, but duplicated here such + // that upstream changes do not invalidate the math done above. + if r.RetryCount <= submitStateChangeInitialRetries { + delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30) + return time.Duration(delay) * time.Millisecond + } + return 5 * time.Minute +} diff --git a/agent/api/ecsclient/retry_handler_test.go b/agent/api/ecsclient/retry_handler_test.go new file mode 100644 index 00000000000..c566bff8be7 --- /dev/null +++ b/agent/api/ecsclient/retry_handler_test.go @@ -0,0 +1,57 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecsclient + +import ( + "errors" + "net/http" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/defaults" +) + +func TestOneDayRetrier(t *testing.T) { + stateChangeClient := newSubmitStateChangeClient(defaults.Config()) + + request, _ := stateChangeClient.SubmitContainerStateChangeRequest(&ecs.SubmitContainerStateChangeInput{}) + + retrier := stateChangeClient.Retryer + + var totalDelay time.Duration + for retries := 0; retries < retrier.MaxRetries(); retries++ { + request.Error = errors.New("") + request.Retryable = aws.Bool(true) + request.HTTPResponse = &http.Response{StatusCode: 500} + if request.WillRetry() && request.IsErrorRetryable() { + totalDelay += retrier.RetryRules(request) + request.RetryCount++ + } + } + + request.Error = errors.New("") + request.Retryable = aws.Bool(true) + request.HTTPResponse = &http.Response{StatusCode: 500} + if request.WillRetry() { + t.Errorf("Expected request to not be retried after %v retries", retrier.MaxRetries()) + } + + if totalDelay > 25*time.Hour || totalDelay < 23*time.Hour { + t.Errorf("Expected accumulated retry delay to be roughly 24 hours; was %v", totalDelay) + } +} diff --git a/agent/api/eni/eni.go b/agent/api/eni/eni.go new file mode 100644 index 00000000000..6ba5a36a8dd --- /dev/null +++ b/agent/api/eni/eni.go @@ -0,0 +1,338 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eni + +import ( + "fmt" + "net" + "strings" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/aws-sdk-go/aws" + "github.com/pkg/errors" +) + +// ENI contains information of the eni +type ENI struct { + // ID is the id of eni + ID string `json:"ec2Id"` + // MacAddress is the mac address of the eni + MacAddress string + // IPV4Addresses is the ipv4 address associated with the eni + IPV4Addresses []*ENIIPV4Address + // IPV6Addresses is the ipv6 address associated with the eni + IPV6Addresses []*ENIIPV6Address + // SubnetGatewayIPV4Address is the IPv4 address of the subnet gateway of the ENI + SubnetGatewayIPV4Address string `json:",omitempty"` + // DomainNameServers specifies the nameserver IP addresses for the eni + DomainNameServers []string `json:",omitempty"` + // DomainNameSearchList specifies the search list for the domain + // name lookup, for the eni + DomainNameSearchList []string `json:",omitempty"` + // PrivateDNSName is the dns name assigned by the vpc to this eni + PrivateDNSName string `json:",omitempty"` + // InterfaceAssociationProtocol is the type of ENI, valid value: "default", "vlan" + InterfaceAssociationProtocol string `json:",omitempty"` + // InterfaceVlanProperties contains information for an interface + // that is supposed to be used as a VLAN device + InterfaceVlanProperties *InterfaceVlanProperties `json:",omitempty"` + + // Due to historical reasons, the IPv4 subnet prefix length is sent with IPv4 subnet gateway + // address instead of the ENI's IP addresses. However, CNI plugins and many OS APIs expect it + // the other way around. Instead of doing this conversion all the time for each CNI and TMDS + // request, compute it once on demand and cache it here. + ipv4SubnetPrefixLength string + ipv4SubnetCIDRBlock string + ipv6SubnetCIDRBlock string +} + +// InterfaceVlanProperties contains information for an interface that +// is supposed to be used as a VLAN device +type InterfaceVlanProperties struct { + VlanID string + TrunkInterfaceMacAddress string +} + +const ( + // DefaultInterfaceAssociationProtocol represents the standard ENI type. + DefaultInterfaceAssociationProtocol = "default" + + // VLANInterfaceAssociationProtocol represents the ENI with trunking enabled. + VLANInterfaceAssociationProtocol = "vlan" + + // IPv6SubnetPrefixLength is the IPv6 global unicast address prefix length, consisting of + // global routing prefix and subnet ID lengths as specified in IPv6 addressing architecture + // (RFC 4291 section 2.5.4) and IPv6 Global Unicast Address Format (RFC 3587). + // The ACS ENI payload structure does not contain an IPv6 subnet prefix length because "/64" is + // the only allowed length per RFCs above, and the only one that VPC supports. + IPv6SubnetPrefixLength = "64" +) + +// GetIPV4Addresses returns the list of IPv4 addresses assigned to the ENI. +func (eni *ENI) GetIPV4Addresses() []string { + var addresses []string + for _, addr := range eni.IPV4Addresses { + addresses = append(addresses, addr.Address) + } + + return addresses +} + +// GetIPV6Addresses returns the list of IPv6 addresses assigned to the ENI. +func (eni *ENI) GetIPV6Addresses() []string { + var addresses []string + for _, addr := range eni.IPV6Addresses { + addresses = append(addresses, addr.Address) + } + + return addresses +} + +// GetPrimaryIPv4Address returns the primary IPv4 address assigned to the ENI. +func (eni *ENI) GetPrimaryIPv4Address() string { + var primaryAddr string + for _, addr := range eni.IPV4Addresses { + if addr.Primary { + primaryAddr = addr.Address + break + } + } + + return primaryAddr +} + +// GetPrimaryIPv4AddressWithPrefixLength returns the primary IPv4 address assigned to the ENI with +// its subnet prefix length. +func (eni *ENI) GetPrimaryIPv4AddressWithPrefixLength() string { + return eni.GetPrimaryIPv4Address() + "/" + eni.GetIPv4SubnetPrefixLength() +} + +// GetIPAddressesWithPrefixLength returns the list of all IP addresses assigned to the ENI with +// their subnet prefix length. +func (eni *ENI) GetIPAddressesWithPrefixLength() []string { + var addresses []string + for _, addr := range eni.IPV4Addresses { + addresses = append(addresses, addr.Address+"/"+eni.GetIPv4SubnetPrefixLength()) + } + for _, addr := range eni.IPV6Addresses { + addresses = append(addresses, addr.Address+"/"+IPv6SubnetPrefixLength) + } + + return addresses +} + +// GetIPv4SubnetPrefixLength returns the IPv4 prefix length of the ENI's subnet. +func (eni *ENI) GetIPv4SubnetPrefixLength() string { + if eni.ipv4SubnetPrefixLength == "" && eni.SubnetGatewayIPV4Address != "" { + eni.ipv4SubnetPrefixLength = strings.Split(eni.SubnetGatewayIPV4Address, "/")[1] + } + + return eni.ipv4SubnetPrefixLength +} + +// GetIPv4SubnetCIDRBlock returns the IPv4 CIDR block, if any, of the ENI's subnet. +func (eni *ENI) GetIPv4SubnetCIDRBlock() string { + if eni.ipv4SubnetCIDRBlock == "" && eni.SubnetGatewayIPV4Address != "" { + _, ipv4Net, err := net.ParseCIDR(eni.SubnetGatewayIPV4Address) + if err == nil { + eni.ipv4SubnetCIDRBlock = ipv4Net.String() + } + } + + return eni.ipv4SubnetCIDRBlock +} + +// GetIPv6SubnetCIDRBlock returns the IPv6 CIDR block, if any, of the ENI's subnet. +func (eni *ENI) GetIPv6SubnetCIDRBlock() string { + if eni.ipv6SubnetCIDRBlock == "" && len(eni.IPV6Addresses) > 0 { + ipv6Addr := eni.IPV6Addresses[0].Address + "/" + IPv6SubnetPrefixLength + _, ipv6Net, err := net.ParseCIDR(ipv6Addr) + if err == nil { + eni.ipv6SubnetCIDRBlock = ipv6Net.String() + } + } + + return eni.ipv6SubnetCIDRBlock +} + +// GetSubnetGatewayIPv4Address returns the subnet gateway IPv4 address for the ENI. +func (eni *ENI) GetSubnetGatewayIPv4Address() string { + var gwAddr string + if eni.SubnetGatewayIPV4Address != "" { + gwAddr = strings.Split(eni.SubnetGatewayIPV4Address, "/")[0] + } + + return gwAddr +} + +// GetHostname returns the hostname assigned to the ENI +func (eni *ENI) GetHostname() string { + return eni.PrivateDNSName +} + +// IsStandardENI returns true if the ENI is a standard/regular ENI. That is, if it +// has its association protocol as standard. To be backwards compatible, if the +// association protocol is not set for an ENI, it's considered a standard ENI as well. +func (eni *ENI) IsStandardENI() bool { + switch eni.InterfaceAssociationProtocol { + case "", DefaultInterfaceAssociationProtocol: + return true + case VLANInterfaceAssociationProtocol: + return false + default: + return false + } +} + +// String returns a human readable version of the ENI object +func (eni *ENI) String() string { + var ipv4Addresses []string + for _, addr := range eni.IPV4Addresses { + ipv4Addresses = append(ipv4Addresses, addr.Address) + } + var ipv6Addresses []string + for _, addr := range eni.IPV6Addresses { + ipv6Addresses = append(ipv6Addresses, addr.Address) + } + + eniString := "" + + if len(eni.InterfaceAssociationProtocol) == 0 { + eniString += fmt.Sprintf(" ,ENI type: [%s]", eni.InterfaceAssociationProtocol) + } + + if eni.InterfaceVlanProperties != nil { + eniString += fmt.Sprintf(" ,VLan ID: [%s], TrunkInterfaceMacAddress: [%s]", + eni.InterfaceVlanProperties.VlanID, eni.InterfaceVlanProperties.TrunkInterfaceMacAddress) + } + + return fmt.Sprintf( + "eni id:%s, mac: %s, hostname: %s, ipv4addresses: [%s], ipv6addresses: [%s], dns: [%s], dns search: [%s],"+ + " gateway ipv4: [%s][%s]", eni.ID, eni.MacAddress, eni.GetHostname(), strings.Join(ipv4Addresses, ","), + strings.Join(ipv6Addresses, ","), strings.Join(eni.DomainNameServers, ","), + strings.Join(eni.DomainNameSearchList, ","), eni.SubnetGatewayIPV4Address, eniString) +} + +// ENIIPV4Address is the ipv4 information of the eni +type ENIIPV4Address struct { + // Primary indicates whether the ip address is primary + Primary bool + // Address is the ipv4 address associated with eni + Address string +} + +// ENIIPV6Address is the ipv6 information of the eni +type ENIIPV6Address struct { + // Address is the ipv6 address associated with eni + Address string +} + +// ENIFromACS validates the given ACS ENI information and creates an ENI object from it. +func ENIFromACS(acsENI *ecsacs.ElasticNetworkInterface) (*ENI, error) { + err := ValidateTaskENI(acsENI) + if err != nil { + return nil, err + } + + var ipv4Addrs []*ENIIPV4Address + var ipv6Addrs []*ENIIPV6Address + + // Read IPv4 address information of the ENI. + for _, ec2Ipv4 := range acsENI.Ipv4Addresses { + ipv4Addrs = append(ipv4Addrs, &ENIIPV4Address{ + Primary: aws.BoolValue(ec2Ipv4.Primary), + Address: aws.StringValue(ec2Ipv4.PrivateAddress), + }) + } + + // Read IPv6 address information of the ENI. + for _, ec2Ipv6 := range acsENI.Ipv6Addresses { + ipv6Addrs = append(ipv6Addrs, &ENIIPV6Address{ + Address: aws.StringValue(ec2Ipv6.Address), + }) + } + + // Read ENI association properties. + var interfaceVlanProperties InterfaceVlanProperties + + if aws.StringValue(acsENI.InterfaceAssociationProtocol) == VLANInterfaceAssociationProtocol { + interfaceVlanProperties.TrunkInterfaceMacAddress = aws.StringValue(acsENI.InterfaceVlanProperties.TrunkInterfaceMacAddress) + interfaceVlanProperties.VlanID = aws.StringValue(acsENI.InterfaceVlanProperties.VlanId) + } + + eni := &ENI{ + ID: aws.StringValue(acsENI.Ec2Id), + MacAddress: aws.StringValue(acsENI.MacAddress), + IPV4Addresses: ipv4Addrs, + IPV6Addresses: ipv6Addrs, + SubnetGatewayIPV4Address: aws.StringValue(acsENI.SubnetGatewayIpv4Address), + PrivateDNSName: aws.StringValue(acsENI.PrivateDnsName), + InterfaceAssociationProtocol: aws.StringValue(acsENI.InterfaceAssociationProtocol), + InterfaceVlanProperties: &interfaceVlanProperties, + } + + for _, nameserverIP := range acsENI.DomainNameServers { + eni.DomainNameServers = append(eni.DomainNameServers, aws.StringValue(nameserverIP)) + } + for _, nameserverDomain := range acsENI.DomainName { + eni.DomainNameSearchList = append(eni.DomainNameSearchList, aws.StringValue(nameserverDomain)) + } + + return eni, nil +} + +// ValidateTaskENI validates the ENI information sent from ACS. +func ValidateTaskENI(acsENI *ecsacs.ElasticNetworkInterface) error { + // At least one IPv4 address should be associated with the ENI. + if len(acsENI.Ipv4Addresses) < 1 { + return errors.Errorf("eni message validation: no ipv4 addresses in the message") + } + + if acsENI.SubnetGatewayIpv4Address == nil { + return errors.Errorf("eni message validation: no subnet gateway ipv4 address in the message") + } + gwIPv4Addr := aws.StringValue(acsENI.SubnetGatewayIpv4Address) + s := strings.Split(gwIPv4Addr, "/") + if len(s) != 2 { + return errors.Errorf( + "eni message validation: invalid subnet gateway ipv4 address %s", gwIPv4Addr) + } + + if acsENI.MacAddress == nil { + return errors.Errorf("eni message validation: empty eni mac address in the message") + } + + if acsENI.Ec2Id == nil { + return errors.Errorf("eni message validation: empty eni id in the message") + } + + // The association protocol, if specified, must be a supported value. + if (acsENI.InterfaceAssociationProtocol != nil) && + (aws.StringValue(acsENI.InterfaceAssociationProtocol) != VLANInterfaceAssociationProtocol) && + (aws.StringValue(acsENI.InterfaceAssociationProtocol) != DefaultInterfaceAssociationProtocol) { + return errors.Errorf("invalid interface association protocol: %s", + aws.StringValue(acsENI.InterfaceAssociationProtocol)) + } + + // If the interface association protocol is vlan, InterfaceVlanProperties must be specified. + if aws.StringValue(acsENI.InterfaceAssociationProtocol) == VLANInterfaceAssociationProtocol { + if acsENI.InterfaceVlanProperties == nil || + len(aws.StringValue(acsENI.InterfaceVlanProperties.VlanId)) == 0 || + len(aws.StringValue(acsENI.InterfaceVlanProperties.TrunkInterfaceMacAddress)) == 0 { + return errors.New("vlan interface properties missing") + } + } + + return nil +} diff --git a/agent/api/eni/eni_test.go b/agent/api/eni/eni_test.go new file mode 100644 index 00000000000..0a0364aba1c --- /dev/null +++ b/agent/api/eni/eni_test.go @@ -0,0 +1,250 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eni + +import ( + "testing" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +const ( + defaultDNS = "169.254.169.253" + customDNS = "10.0.0.2" + customSearchDomain = "us-west-2.compute.internal" + + ipv4Addr = "1.2.3.4" + ipv4Gw = "1.2.3.1" + ipv4SubnetPrefixLength = "20" + ipv4Subnet = "1.2.0.0" + ipv4AddrWithPrefixLength = ipv4Addr + "/" + ipv4SubnetPrefixLength + ipv4GwWithPrefixLength = ipv4Gw + "/" + ipv4SubnetPrefixLength + ipv4SubnetCIDRBlock = ipv4Subnet + "/" + ipv4SubnetPrefixLength + ipv6Addr = "abcd:dcba:1234:4321::" + ipv6SubnetPrefixLength = "64" + ipv6SubnetCIDRBlock = ipv6Addr + "/" + ipv6SubnetPrefixLength + ipv6AddrWithPrefixLength = ipv6Addr + "/" + ipv6SubnetPrefixLength +) + +var ( + testENI = &ENI{ + ID: "eni-123", + InterfaceAssociationProtocol: DefaultInterfaceAssociationProtocol, + IPV4Addresses: []*ENIIPV4Address{ + { + Primary: true, + Address: ipv4Addr, + }, + }, + IPV6Addresses: []*ENIIPV6Address{ + { + Address: ipv6Addr, + }, + }, + SubnetGatewayIPV4Address: ipv4GwWithPrefixLength, + } +) + +func TestIsStandardENI(t *testing.T) { + testCases := []struct { + protocol string + isStandard bool + }{ + { + protocol: "", + isStandard: true, + }, + { + protocol: DefaultInterfaceAssociationProtocol, + isStandard: true, + }, + { + protocol: VLANInterfaceAssociationProtocol, + isStandard: false, + }, + { + protocol: "invalid", + isStandard: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.protocol, func(t *testing.T) { + eni := &ENI{ + InterfaceAssociationProtocol: tc.protocol, + } + assert.Equal(t, tc.isStandard, eni.IsStandardENI()) + }) + } +} + +func TestGetIPV4Addresses(t *testing.T) { + assert.Equal(t, []string{ipv4Addr}, testENI.GetIPV4Addresses()) +} + +func TestGetIPV6Addresses(t *testing.T) { + assert.Equal(t, []string{ipv6Addr}, testENI.GetIPV6Addresses()) +} + +func TestGetPrimaryIPv4Address(t *testing.T) { + assert.Equal(t, ipv4Addr, testENI.GetPrimaryIPv4Address()) +} + +func TestGetPrimaryIPv4AddressWithPrefixLength(t *testing.T) { + assert.Equal(t, ipv4AddrWithPrefixLength, testENI.GetPrimaryIPv4AddressWithPrefixLength()) +} + +func TestGetIPAddressesWithPrefixLength(t *testing.T) { + assert.Equal(t, []string{ipv4AddrWithPrefixLength, ipv6AddrWithPrefixLength}, testENI.GetIPAddressesWithPrefixLength()) +} + +func TestGetIPv4SubnetPrefixLength(t *testing.T) { + assert.Equal(t, ipv4SubnetPrefixLength, testENI.GetIPv4SubnetPrefixLength()) +} + +func TestGetIPv4SubnetCIDRBlock(t *testing.T) { + assert.Equal(t, ipv4SubnetCIDRBlock, testENI.GetIPv4SubnetCIDRBlock()) +} + +func TestGetIPv6SubnetCIDRBlock(t *testing.T) { + assert.Equal(t, ipv6SubnetCIDRBlock, testENI.GetIPv6SubnetCIDRBlock()) +} + +func TestGetSubnetGatewayIPv4Address(t *testing.T) { + assert.Equal(t, ipv4Gw, testENI.GetSubnetGatewayIPv4Address()) +} + +func TestENIToString(t *testing.T) { + expectedStr := `eni id:eni-123, mac: , hostname: , ipv4addresses: [1.2.3.4], ipv6addresses: [abcd:dcba:1234:4321::], dns: [], dns search: [], gateway ipv4: [1.2.3.1/20][]` + assert.Equal(t, expectedStr, testENI.String()) +} + +// TestENIFromACS tests the eni information was correctly read from the acs +func TestENIFromACS(t *testing.T) { + acsENI := getTestACSENI() + eni, err := ENIFromACS(acsENI) + assert.NoError(t, err) + assert.NotNil(t, eni) + assert.Equal(t, aws.StringValue(acsENI.Ec2Id), eni.ID) + assert.Len(t, eni.IPV4Addresses, 1) + assert.Len(t, eni.GetIPV4Addresses(), 1) + assert.Equal(t, aws.StringValue(acsENI.Ipv4Addresses[0].PrivateAddress), eni.IPV4Addresses[0].Address) + assert.Equal(t, aws.BoolValue(acsENI.Ipv4Addresses[0].Primary), eni.IPV4Addresses[0].Primary) + assert.Equal(t, aws.StringValue(acsENI.MacAddress), eni.MacAddress) + assert.Len(t, eni.IPV6Addresses, 1) + assert.Len(t, eni.GetIPV6Addresses(), 1) + assert.Equal(t, aws.StringValue(acsENI.Ipv6Addresses[0].Address), eni.IPV6Addresses[0].Address) + assert.Len(t, eni.DomainNameServers, 2) + assert.Equal(t, defaultDNS, eni.DomainNameServers[0]) + assert.Equal(t, customDNS, eni.DomainNameServers[1]) + assert.Len(t, eni.DomainNameSearchList, 1) + assert.Equal(t, customSearchDomain, eni.DomainNameSearchList[0]) + assert.Equal(t, aws.StringValue(acsENI.PrivateDnsName), eni.PrivateDNSName) +} + +// TestValidateENIFromACS tests the validation of enis from acs +func TestValidateENIFromACS(t *testing.T) { + acsENI := getTestACSENI() + err := ValidateTaskENI(acsENI) + assert.NoError(t, err) + + acsENI.Ipv6Addresses = nil + err = ValidateTaskENI(acsENI) + assert.NoError(t, err) + + acsENI.Ipv4Addresses = nil + err = ValidateTaskENI(acsENI) + assert.Error(t, err) +} + +func TestInvalidENIInterfaceVlanPropertyMissing(t *testing.T) { + acsENI := &ecsacs.ElasticNetworkInterface{ + InterfaceAssociationProtocol: aws.String(VLANInterfaceAssociationProtocol), + AttachmentArn: aws.String("arn"), + Ec2Id: aws.String("ec2id"), + Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{ + { + Primary: aws.Bool(true), + PrivateAddress: aws.String("ipv4"), + }, + }, + SubnetGatewayIpv4Address: aws.String(ipv4GwWithPrefixLength), + Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{ + { + Address: aws.String("ipv6"), + }, + }, + MacAddress: aws.String("mac"), + } + + err := ValidateTaskENI(acsENI) + assert.Error(t, err) + +} + +func TestInvalidENIInvalidInterfaceAssociationProtocol(t *testing.T) { + acsENI := &ecsacs.ElasticNetworkInterface{ + InterfaceAssociationProtocol: aws.String("no-eni"), + AttachmentArn: aws.String("arn"), + Ec2Id: aws.String("ec2id"), + Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{ + { + Primary: aws.Bool(true), + PrivateAddress: aws.String("ipv4"), + }, + }, + SubnetGatewayIpv4Address: aws.String(ipv4GwWithPrefixLength), + Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{ + { + Address: aws.String("ipv6"), + }, + }, + MacAddress: aws.String("mac"), + } + err := ValidateTaskENI(acsENI) + assert.Error(t, err) +} + +func TestInvalidSubnetGatewayAddress(t *testing.T) { + acsENI := getTestACSENI() + acsENI.SubnetGatewayIpv4Address = aws.String(ipv4Addr) + _, err := ENIFromACS(acsENI) + assert.Error(t, err) +} + +func getTestACSENI() *ecsacs.ElasticNetworkInterface { + return &ecsacs.ElasticNetworkInterface{ + AttachmentArn: aws.String("arn"), + Ec2Id: aws.String("ec2id"), + Ipv4Addresses: []*ecsacs.IPv4AddressAssignment{ + { + Primary: aws.Bool(true), + PrivateAddress: aws.String(ipv4Addr), + }, + }, + SubnetGatewayIpv4Address: aws.String(ipv4GwWithPrefixLength), + Ipv6Addresses: []*ecsacs.IPv6AddressAssignment{ + { + Address: aws.String(ipv6Addr)}, + }, + MacAddress: aws.String("mac"), + DomainNameServers: []*string{aws.String(defaultDNS), aws.String(customDNS)}, + DomainName: []*string{aws.String(customSearchDomain)}, + PrivateDnsName: aws.String("ip.region.compute.internal"), + } +} diff --git a/agent/api/eni/eniattachment.go b/agent/api/eni/eniattachment.go new file mode 100644 index 00000000000..82c0c38c9ac --- /dev/null +++ b/agent/api/eni/eniattachment.go @@ -0,0 +1,153 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eni + +import ( + "fmt" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +const ( + // ENIAttachmentTypeTaskENI represents the type of a task level eni + ENIAttachmentTypeTaskENI = "task-eni" + // ENIAttachmentTypeInstanceENI represents the type of an instance level eni + ENIAttachmentTypeInstanceENI = "instance-eni" +) + +// ENIAttachment contains the information of the eni attachment +type ENIAttachment struct { + // AttachmentType is the type of the eni attachment, can either be "task-eni" or "instance-eni" + AttachmentType string `json:"attachmentType"` + // TaskARN is the task identifier from ecs + TaskARN string `json:"taskArn"` + // AttachmentARN is the identifier for the eni attachment + AttachmentARN string `json:"attachmentArn"` + // AttachStatusSent indicates whether the attached status has been sent to backend + AttachStatusSent bool `json:"attachSent"` + // MACAddress is the mac address of eni + MACAddress string `json:"macAddress"` + // Status is the status of the eni: none/attached/detached + Status ENIAttachmentStatus `json:"status"` + // ExpiresAt is the timestamp past which the ENI Attachment is considered + // unsuccessful. The SubmitTaskStateChange API, with the attachment information + // should be invoked before this timestamp. + ExpiresAt time.Time `json:"expiresAt"` + // ackTimer is used to register the expirtation timeout callback for unsuccessful + // ENI attachments + ackTimer ttime.Timer + // guard protects access to fields of this struct + guard sync.RWMutex +} + +// StartTimer starts the ack timer to record the expiration of ENI attachment +func (eni *ENIAttachment) StartTimer(timeoutFunc func()) error { + eni.guard.Lock() + defer eni.guard.Unlock() + + if eni.ackTimer != nil { + // The timer has already been initialized, do nothing + return nil + } + now := time.Now() + duration := eni.ExpiresAt.Sub(now) + if duration <= 0 { + return errors.Errorf("eni attachment: timer expiration is in the past; expiration [%s] < now [%s]", + eni.ExpiresAt.String(), now.String()) + } + seelog.Infof("Starting ENI ack timer with duration=%s, %s", duration.String(), eni.stringUnsafe()) + eni.ackTimer = time.AfterFunc(duration, timeoutFunc) + return nil +} + +// Initialize initializes the fields that can't be populated from loading state file. +// Notably, this initializes the ack timer so that if we times out waiting for the eni to be attached, the attachment +// can be removed from state. +func (eni *ENIAttachment) Initialize(timeoutFunc func()) error { + eni.guard.Lock() + defer eni.guard.Unlock() + + if eni.AttachStatusSent { // eni attachment status has been sent, no need to start ack timer. + return nil + } + + now := time.Now() + duration := eni.ExpiresAt.Sub(now) + if duration <= 0 { + return errors.New("ENI attachment has already expired") + } + + seelog.Infof("Starting ENI ack timer with duration=%s, %s", duration.String(), eni.stringUnsafe()) + eni.ackTimer = time.AfterFunc(duration, timeoutFunc) + return nil +} + +// IsSent checks if the eni attached status has been sent +func (eni *ENIAttachment) IsSent() bool { + eni.guard.RLock() + defer eni.guard.RUnlock() + + return eni.AttachStatusSent +} + +// SetSentStatus marks the eni attached status has been sent +func (eni *ENIAttachment) SetSentStatus() { + eni.guard.Lock() + defer eni.guard.Unlock() + + eni.AttachStatusSent = true +} + +// StopAckTimer stops the ack timer set on the ENI attachment +func (eni *ENIAttachment) StopAckTimer() { + eni.guard.Lock() + defer eni.guard.Unlock() + + eni.ackTimer.Stop() +} + +// HasExpired returns true if the ENI attachment object has exceeded the +// threshold for notifying the backend of the attachment +func (eni *ENIAttachment) HasExpired() bool { + eni.guard.RLock() + defer eni.guard.RUnlock() + + return time.Now().After(eni.ExpiresAt) +} + +// String returns a string representation of the ENI Attachment +func (eni *ENIAttachment) String() string { + eni.guard.RLock() + defer eni.guard.RUnlock() + + return eni.stringUnsafe() +} + +// stringUnsafe returns a string representation of the ENI Attachment +func (eni *ENIAttachment) stringUnsafe() string { + // skip TaskArn field for instance level eni attachment since it won't have a task arn + if eni.AttachmentType == ENIAttachmentTypeInstanceENI { + return fmt.Sprintf( + "ENI Attachment: attachment=%s attachmentType=%s attachmentSent=%t mac=%s status=%s expiresAt=%s", + eni.AttachmentARN, eni.AttachmentType, eni.AttachStatusSent, eni.MACAddress, eni.Status.String(), eni.ExpiresAt.Format(time.RFC3339)) + } + + return fmt.Sprintf( + "ENI Attachment: task=%s attachment=%s attachmentType=%s attachmentSent=%t mac=%s status=%s expiresAt=%s", + eni.TaskARN, eni.AttachmentARN, eni.AttachmentType, eni.AttachStatusSent, eni.MACAddress, eni.Status.String(), eni.ExpiresAt.Format(time.RFC3339)) +} diff --git a/agent/api/eni/eniattachment_test.go b/agent/api/eni/eniattachment_test.go new file mode 100644 index 00000000000..c0eef37f7af --- /dev/null +++ b/agent/api/eni/eniattachment_test.go @@ -0,0 +1,171 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eni + +import ( + "encoding/json" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + taskARN = "t1" + attachmentARN = "att1" + mac = "mac1" + attachSent = true + attachmentType = "eni" +) + +func TestMarshalUnmarshal(t *testing.T) { + expiresAt := time.Now() + attachment := &ENIAttachment{ + TaskARN: taskARN, + AttachmentARN: attachmentARN, + AttachStatusSent: attachSent, + MACAddress: mac, + Status: ENIAttachmentNone, + ExpiresAt: expiresAt, + } + bytes, err := json.Marshal(attachment) + assert.NoError(t, err) + var unmarshalledAttachment ENIAttachment + err = json.Unmarshal(bytes, &unmarshalledAttachment) + assert.NoError(t, err) + assert.Equal(t, attachment.TaskARN, unmarshalledAttachment.TaskARN) + assert.Equal(t, attachment.AttachmentARN, unmarshalledAttachment.AttachmentARN) + assert.Equal(t, attachment.AttachStatusSent, unmarshalledAttachment.AttachStatusSent) + assert.Equal(t, attachment.MACAddress, unmarshalledAttachment.MACAddress) + assert.Equal(t, attachment.Status, unmarshalledAttachment.Status) + + expectedExpiresAtUTC, err := time.Parse(time.RFC3339, attachment.ExpiresAt.Format(time.RFC3339)) + assert.NoError(t, err) + unmarshalledExpiresAtUTC, err := time.Parse(time.RFC3339, unmarshalledAttachment.ExpiresAt.Format(time.RFC3339)) + assert.NoError(t, err) + assert.Equal(t, expectedExpiresAtUTC, unmarshalledExpiresAtUTC) +} + +func TestMarshalUnmarshalWithAttachmentType(t *testing.T) { + expiresAt := time.Now() + attachment := &ENIAttachment{ + AttachmentType: attachmentType, + TaskARN: taskARN, + AttachmentARN: attachmentARN, + AttachStatusSent: attachSent, + MACAddress: mac, + Status: ENIAttachmentNone, + ExpiresAt: expiresAt, + } + bytes, err := json.Marshal(attachment) + assert.NoError(t, err) + var unmarshalledAttachment ENIAttachment + err = json.Unmarshal(bytes, &unmarshalledAttachment) + assert.NoError(t, err) + assert.Equal(t, attachment.AttachmentType, unmarshalledAttachment.AttachmentType) + assert.Equal(t, attachment.TaskARN, unmarshalledAttachment.TaskARN) + assert.Equal(t, attachment.AttachmentARN, unmarshalledAttachment.AttachmentARN) + assert.Equal(t, attachment.AttachStatusSent, unmarshalledAttachment.AttachStatusSent) + assert.Equal(t, attachment.MACAddress, unmarshalledAttachment.MACAddress) + assert.Equal(t, attachment.Status, unmarshalledAttachment.Status) + + expectedExpiresAtUTC, err := time.Parse(time.RFC3339, attachment.ExpiresAt.Format(time.RFC3339)) + assert.NoError(t, err) + unmarshalledExpiresAtUTC, err := time.Parse(time.RFC3339, unmarshalledAttachment.ExpiresAt.Format(time.RFC3339)) + assert.NoError(t, err) + assert.Equal(t, expectedExpiresAtUTC, unmarshalledExpiresAtUTC) +} + +func TestStartTimerErrorWhenExpiresAtIsInThePast(t *testing.T) { + expiresAt := time.Now().Unix() - 1 + attachment := &ENIAttachment{ + TaskARN: taskARN, + AttachmentARN: attachmentARN, + AttachStatusSent: attachSent, + MACAddress: mac, + Status: ENIAttachmentNone, + ExpiresAt: time.Unix(expiresAt, 0), + } + assert.Error(t, attachment.StartTimer(func() {})) +} + +func TestHasExpired(t *testing.T) { + for _, tc := range []struct { + expiresAt int64 + expected bool + name string + }{ + {time.Now().Unix() - 1, true, "expiresAt in past returns true"}, + {time.Now().Unix() + 10, false, "expiresAt in future returns false"}, + } { + t.Run(tc.name, func(t *testing.T) { + attachment := &ENIAttachment{ + TaskARN: taskARN, + AttachmentARN: attachmentARN, + AttachStatusSent: attachSent, + MACAddress: mac, + Status: ENIAttachmentNone, + ExpiresAt: time.Unix(tc.expiresAt, 0), + } + assert.Equal(t, tc.expected, attachment.HasExpired()) + }) + } +} + +func TestInitialize(t *testing.T) { + var wg sync.WaitGroup + wg.Add(1) + timeoutFunc := func() { + wg.Done() + } + + expiresAt := time.Now().Unix() + 1 + attachment := &ENIAttachment{ + TaskARN: taskARN, + AttachmentARN: attachmentARN, + MACAddress: mac, + Status: ENIAttachmentNone, + ExpiresAt: time.Unix(expiresAt, 0), + } + assert.NoError(t, attachment.Initialize(timeoutFunc)) + wg.Wait() +} + +func TestInitializeExpired(t *testing.T) { + expiresAt := time.Now().Unix() - 1 + attachment := &ENIAttachment{ + TaskARN: taskARN, + AttachmentARN: attachmentARN, + MACAddress: mac, + Status: ENIAttachmentNone, + ExpiresAt: time.Unix(expiresAt, 0), + } + assert.Error(t, attachment.Initialize(func() {})) +} + +func TestInitializeExpiredButAlreadySent(t *testing.T) { + expiresAt := time.Now().Unix() - 1 + attachment := &ENIAttachment{ + TaskARN: taskARN, + AttachmentARN: attachmentARN, + AttachStatusSent: attachSent, + MACAddress: mac, + Status: ENIAttachmentNone, + ExpiresAt: time.Unix(expiresAt, 0), + } + assert.NoError(t, attachment.Initialize(func() {})) +} diff --git a/agent/api/eni/enistatus.go b/agent/api/eni/enistatus.go new file mode 100644 index 00000000000..27ac58b4ef4 --- /dev/null +++ b/agent/api/eni/enistatus.go @@ -0,0 +1,47 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eni + +const ( + // ENIAttachmentNone is zero state of a task when received attachemessage from acs + ENIAttachmentNone ENIAttachmentStatus = iota + // ENIAttached represents that a eni has shown on the host + ENIAttached + // ENIDetached represents that a eni has been actually detached from the host + ENIDetached +) + +// ENIAttachmentStatus is an enumeration type for eni attachment state +type ENIAttachmentStatus int32 + +var eniAttachmentStatusMap = map[string]ENIAttachmentStatus{ + "NONE": ENIAttachmentNone, + "ATTACHED": ENIAttached, + "DETACHED": ENIDetached, +} + +// String return the string value of the eniattachment status +func (eni *ENIAttachmentStatus) String() string { + for k, v := range eniAttachmentStatusMap { + if v == *eni { + return k + } + } + return "NONE" +} + +// ShouldSend returns whether the status should be sent to backend +func (eni *ENIAttachmentStatus) ShouldSend() bool { + return *eni == ENIAttached +} diff --git a/agent/api/eni/enistatus_test.go b/agent/api/eni/enistatus_test.go new file mode 100644 index 00000000000..e90d114d291 --- /dev/null +++ b/agent/api/eni/enistatus_test.go @@ -0,0 +1,78 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eni + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusToString(t *testing.T) { + testCases := []struct { + status ENIAttachmentStatus + str string + }{ + { + status: ENIAttachmentNone, + str: "NONE", + }, + { + status: ENIAttached, + str: "ATTACHED", + }, + { + status: ENIDetached, + str: "DETACHED", + }, + } + + for _, tc := range testCases { + t.Run(tc.str, func(t *testing.T) { + assert.Equal(t, tc.str, (&tc.status).String()) + }) + } +} + +func TestShouldSend(t *testing.T) { + testCases := []struct { + name string + status ENIAttachmentStatus + shouldSend bool + }{ + { + name: "NONE", + status: ENIAttachmentNone, + shouldSend: false, + }, + { + name: "ATTACHED", + status: ENIAttached, + shouldSend: true, + }, + { + name: "DETACHED", + status: ENIDetached, + shouldSend: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.shouldSend, (&tc.status).ShouldSend()) + }) + } +} diff --git a/agent/api/errors/error_types.go b/agent/api/errors/error_types.go new file mode 100644 index 00000000000..87a76db3348 --- /dev/null +++ b/agent/api/errors/error_types.go @@ -0,0 +1,111 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package errors + +import ( + "fmt" + "strings" +) + +// NamedError defines an interface that wraps error and add additional 'ErrorName' method +type NamedError interface { + error + ErrorName() string +} + +// Retriable defines an interface for retriable methods +type Retriable interface { + // Retry returns true if the operation can be retried + Retry() bool +} + +// DefaultRetriable implements the Retriable interface with a boolean to +// indicate if retry should occur +type DefaultRetriable struct { + retry bool +} + +// Retry returns true if the operation can be retried +func (dr DefaultRetriable) Retry() bool { + return dr.retry +} + +// NewRetriable creates a new DefaultRetriable object +func NewRetriable(retry bool) Retriable { + return DefaultRetriable{ + retry: retry, + } +} + +// RetriableError defines an interface for a retriable error +type RetriableError interface { + Retriable + error +} + +// DefaultRetriableError is used to wrap a retriable error +type DefaultRetriableError struct { + Retriable + error +} + +// NewRetriableError creates a new DefaultRetriableError object +func NewRetriableError(retriable Retriable, err error) RetriableError { + return &DefaultRetriableError{ + retriable, + err, + } +} + +// AttributeError defines an error type to indicate an error with an ECS +// attribute +type AttributeError struct { + err string +} + +// Error returns the error string for AttributeError +func (e AttributeError) Error() string { + return e.err +} + +// NewAttributeError creates a new AttributeError object +func NewAttributeError(err string) AttributeError { + return AttributeError{err} +} + +// MultiErr wraps multiple errors +type MultiErr struct { + errors []error +} + +// Error returns the error string for MultiErr +func (me MultiErr) Error() string { + ret := make([]string, len(me.errors)+1) + ret[0] = "Multiple error:" + for ndx, err := range me.errors { + ret[ndx+1] = fmt.Sprintf("\t%d: %s", ndx, err.Error()) + } + return strings.Join(ret, "\n") +} + +// NewMultiError creates a new MultErr object +func NewMultiError(errs ...error) error { + errors := make([]error, 0, len(errs)) + for _, err := range errs { + if err != nil { + errors = append(errors, err) + } + } + return MultiErr{errors} +} diff --git a/agent/api/errors/errors.go b/agent/api/errors/errors.go new file mode 100644 index 00000000000..90ca1f77036 --- /dev/null +++ b/agent/api/errors/errors.go @@ -0,0 +1,129 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package errors + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// InstanceTypeChangedErrorMessage is the error message to print for the +// instance type changed error when registering a container instance +const InstanceTypeChangedErrorMessage = "Container instance type changes are not supported." + +const ClusterNotFoundErrorMessage = "Cluster not found." + +// IsInstanceTypeChangedError returns true if the error when +// registering the container instance is because of instance type being +// changed +func IsInstanceTypeChangedError(err error) bool { + if awserr, ok := err.(awserr.Error); ok { + return strings.Contains(awserr.Message(), InstanceTypeChangedErrorMessage) + } + return false +} + +func IsClusterNotFoundError(err error) bool { + if awserr, ok := err.(awserr.Error); ok { + return strings.Contains(awserr.Message(), ClusterNotFoundErrorMessage) + } + return false +} + +// BadVolumeError represents an error caused by bad volume +type BadVolumeError struct { + Msg string +} + +func (err *BadVolumeError) Error() string { return err.Msg } + +// ErrorName returns name of the BadVolumeError +func (err *BadVolumeError) ErrorName() string { return "InvalidVolumeError" } + +// Retry implements Retirable interface +func (err *BadVolumeError) Retry() bool { return false } + +// DefaultNamedError is a wrapper type for 'error' which adds an optional name and provides a symmetric +// marshal/unmarshal +type DefaultNamedError struct { + Err string `json:"error"` + Name string `json:"name"` +} + +// Error implements error +func (err *DefaultNamedError) Error() string { + if err.Name == "" { + return "UnknownError: " + err.Err + } + return err.Name + ": " + err.Err +} + +// ErrorName implements NamedError +func (err *DefaultNamedError) ErrorName() string { + return err.Name +} + +// NewNamedError creates a NamedError. +func NewNamedError(err error) *DefaultNamedError { + if namedErr, ok := err.(NamedError); ok { + return &DefaultNamedError{Err: namedErr.Error(), Name: namedErr.ErrorName()} + } + return &DefaultNamedError{Err: err.Error()} +} + +// HostConfigError represents an error caused by host configuration +type HostConfigError struct { + Msg string +} + +// Error returns the error as a string +func (err *HostConfigError) Error() string { return err.Msg } + +// ErrorName returns the name of the error +func (err *HostConfigError) ErrorName() string { return "HostConfigError" } + +// DockerClientConfigError represents the error caused by docker client +type DockerClientConfigError struct { + Msg string +} + +// Error returns the error as a string +func (err *DockerClientConfigError) Error() string { return err.Msg } + +// ErrorName returns the name of the error +func (err *DockerClientConfigError) ErrorName() string { return "DockerClientConfigError" } + +// ResourceInitError is a task error for which a required resource cannot +// be initialized +type ResourceInitError struct { + taskARN string + origErr error +} + +// NewResourceInitError creates an error for resource initialize failure +func NewResourceInitError(taskARN string, origErr error) *ResourceInitError { + return &ResourceInitError{taskARN, origErr} +} + +// Error returns the error as a string +func (err *ResourceInitError) Error() string { + return fmt.Sprintf("resource cannot be initialized for task %s: %v", err.taskARN, err.origErr) +} + +// ErrorName is the name of the error +func (err *ResourceInitError) ErrorName() string { + return "ResourceInitializationError" +} diff --git a/agent/api/generate_mocks.go b/agent/api/generate_mocks.go new file mode 100644 index 00000000000..c19794a9d82 --- /dev/null +++ b/agent/api/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package api + +//go:generate mockgen -destination=mocks/api_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/api ECSSDK,ECSSubmitStateSDK,ECSClient diff --git a/agent/api/interface.go b/agent/api/interface.go new file mode 100644 index 00000000000..b053b282a4a --- /dev/null +++ b/agent/api/interface.go @@ -0,0 +1,71 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package api + +import "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + +// ECSClient is an interface over the ECSSDK interface which abstracts away some +// details around constructing the request and reading the response down to the +// parts the agent cares about. +// For example, the ever-present 'Cluster' member is abstracted out so that it +// may be configured once and used throughout transparently. +type ECSClient interface { + // RegisterContainerInstance calculates the appropriate resources, creates + // the default cluster if necessary, and returns the registered + // ContainerInstanceARN if successful. Supplying a non-empty container + // instance ARN allows a container instance to update its registered + // resources. + RegisterContainerInstance(existingContainerInstanceArn string, + attributes []*ecs.Attribute, tags []*ecs.Tag, registrationToken string, platformDevices []*ecs.PlatformDevice, + outpostARN string) (string, string, error) + // SubmitTaskStateChange sends a state change and returns an error + // indicating if it was submitted + SubmitTaskStateChange(change TaskStateChange) error + // SubmitContainerStateChange sends a state change and returns an error + // indicating if it was submitted + SubmitContainerStateChange(change ContainerStateChange) error + // SubmitAttachmentStateChange sends an attachment state change and returns an error + // indicating if it was submitted + SubmitAttachmentStateChange(change AttachmentStateChange) error + // DiscoverPollEndpoint takes a ContainerInstanceARN and returns the + // endpoint at which this Agent should contact ACS + DiscoverPollEndpoint(containerInstanceArn string) (string, error) + // DiscoverTelemetryEndpoint takes a ContainerInstanceARN and returns the + // endpoint at which this Agent should contact Telemetry Service + DiscoverTelemetryEndpoint(containerInstanceArn string) (string, error) + // GetResourceTags retrieves the Tags associated with a certain resource + GetResourceTags(resourceArn string) ([]*ecs.Tag, error) + // UpdateContainerInstancesState updates the given container Instance ID with + // the given status. Only valid statuses are ACTIVE and DRAINING. + UpdateContainerInstancesState(instanceARN, status string) error +} + +// ECSSDK is an interface that specifies the subset of the AWS Go SDK's ECS +// client that the Agent uses. This interface is meant to allow injecting a +// mock for testing. +type ECSSDK interface { + CreateCluster(*ecs.CreateClusterInput) (*ecs.CreateClusterOutput, error) + RegisterContainerInstance(*ecs.RegisterContainerInstanceInput) (*ecs.RegisterContainerInstanceOutput, error) + DiscoverPollEndpoint(*ecs.DiscoverPollEndpointInput) (*ecs.DiscoverPollEndpointOutput, error) + ListTagsForResource(*ecs.ListTagsForResourceInput) (*ecs.ListTagsForResourceOutput, error) + UpdateContainerInstancesState(input *ecs.UpdateContainerInstancesStateInput) (*ecs.UpdateContainerInstancesStateOutput, error) +} + +// ECSSubmitStateSDK is an interface with customized ecs client that +// implements the SubmitTaskStateChange and SubmitContainerStateChange +type ECSSubmitStateSDK interface { + SubmitContainerStateChange(*ecs.SubmitContainerStateChangeInput) (*ecs.SubmitContainerStateChangeOutput, error) + SubmitTaskStateChange(*ecs.SubmitTaskStateChangeInput) (*ecs.SubmitTaskStateChangeOutput, error) + SubmitAttachmentStateChanges(*ecs.SubmitAttachmentStateChangesInput) (*ecs.SubmitAttachmentStateChangesOutput, error) +} diff --git a/agent/api/mocks/api_mocks.go b/agent/api/mocks/api_mocks.go new file mode 100644 index 00000000000..cc1eb7c1a00 --- /dev/null +++ b/agent/api/mocks/api_mocks.go @@ -0,0 +1,333 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/api (interfaces: ECSSDK,ECSSubmitStateSDK,ECSClient) + +// Package mock_api is a generated GoMock package. +package mock_api + +import ( + reflect "reflect" + + api "github.com/aws/amazon-ecs-agent/agent/api" + ecs "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + gomock "github.com/golang/mock/gomock" +) + +// MockECSSDK is a mock of ECSSDK interface +type MockECSSDK struct { + ctrl *gomock.Controller + recorder *MockECSSDKMockRecorder +} + +// MockECSSDKMockRecorder is the mock recorder for MockECSSDK +type MockECSSDKMockRecorder struct { + mock *MockECSSDK +} + +// NewMockECSSDK creates a new mock instance +func NewMockECSSDK(ctrl *gomock.Controller) *MockECSSDK { + mock := &MockECSSDK{ctrl: ctrl} + mock.recorder = &MockECSSDKMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockECSSDK) EXPECT() *MockECSSDKMockRecorder { + return m.recorder +} + +// CreateCluster mocks base method +func (m *MockECSSDK) CreateCluster(arg0 *ecs.CreateClusterInput) (*ecs.CreateClusterOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateCluster", arg0) + ret0, _ := ret[0].(*ecs.CreateClusterOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateCluster indicates an expected call of CreateCluster +func (mr *MockECSSDKMockRecorder) CreateCluster(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCluster", reflect.TypeOf((*MockECSSDK)(nil).CreateCluster), arg0) +} + +// DiscoverPollEndpoint mocks base method +func (m *MockECSSDK) DiscoverPollEndpoint(arg0 *ecs.DiscoverPollEndpointInput) (*ecs.DiscoverPollEndpointOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DiscoverPollEndpoint", arg0) + ret0, _ := ret[0].(*ecs.DiscoverPollEndpointOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DiscoverPollEndpoint indicates an expected call of DiscoverPollEndpoint +func (mr *MockECSSDKMockRecorder) DiscoverPollEndpoint(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiscoverPollEndpoint", reflect.TypeOf((*MockECSSDK)(nil).DiscoverPollEndpoint), arg0) +} + +// ListTagsForResource mocks base method +func (m *MockECSSDK) ListTagsForResource(arg0 *ecs.ListTagsForResourceInput) (*ecs.ListTagsForResourceOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTagsForResource", arg0) + ret0, _ := ret[0].(*ecs.ListTagsForResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTagsForResource indicates an expected call of ListTagsForResource +func (mr *MockECSSDKMockRecorder) ListTagsForResource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTagsForResource", reflect.TypeOf((*MockECSSDK)(nil).ListTagsForResource), arg0) +} + +// RegisterContainerInstance mocks base method +func (m *MockECSSDK) RegisterContainerInstance(arg0 *ecs.RegisterContainerInstanceInput) (*ecs.RegisterContainerInstanceOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterContainerInstance", arg0) + ret0, _ := ret[0].(*ecs.RegisterContainerInstanceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterContainerInstance indicates an expected call of RegisterContainerInstance +func (mr *MockECSSDKMockRecorder) RegisterContainerInstance(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterContainerInstance", reflect.TypeOf((*MockECSSDK)(nil).RegisterContainerInstance), arg0) +} + +// UpdateContainerInstancesState mocks base method +func (m *MockECSSDK) UpdateContainerInstancesState(arg0 *ecs.UpdateContainerInstancesStateInput) (*ecs.UpdateContainerInstancesStateOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateContainerInstancesState", arg0) + ret0, _ := ret[0].(*ecs.UpdateContainerInstancesStateOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateContainerInstancesState indicates an expected call of UpdateContainerInstancesState +func (mr *MockECSSDKMockRecorder) UpdateContainerInstancesState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateContainerInstancesState", reflect.TypeOf((*MockECSSDK)(nil).UpdateContainerInstancesState), arg0) +} + +// MockECSSubmitStateSDK is a mock of ECSSubmitStateSDK interface +type MockECSSubmitStateSDK struct { + ctrl *gomock.Controller + recorder *MockECSSubmitStateSDKMockRecorder +} + +// MockECSSubmitStateSDKMockRecorder is the mock recorder for MockECSSubmitStateSDK +type MockECSSubmitStateSDKMockRecorder struct { + mock *MockECSSubmitStateSDK +} + +// NewMockECSSubmitStateSDK creates a new mock instance +func NewMockECSSubmitStateSDK(ctrl *gomock.Controller) *MockECSSubmitStateSDK { + mock := &MockECSSubmitStateSDK{ctrl: ctrl} + mock.recorder = &MockECSSubmitStateSDKMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockECSSubmitStateSDK) EXPECT() *MockECSSubmitStateSDKMockRecorder { + return m.recorder +} + +// SubmitAttachmentStateChanges mocks base method +func (m *MockECSSubmitStateSDK) SubmitAttachmentStateChanges(arg0 *ecs.SubmitAttachmentStateChangesInput) (*ecs.SubmitAttachmentStateChangesOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitAttachmentStateChanges", arg0) + ret0, _ := ret[0].(*ecs.SubmitAttachmentStateChangesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitAttachmentStateChanges indicates an expected call of SubmitAttachmentStateChanges +func (mr *MockECSSubmitStateSDKMockRecorder) SubmitAttachmentStateChanges(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAttachmentStateChanges", reflect.TypeOf((*MockECSSubmitStateSDK)(nil).SubmitAttachmentStateChanges), arg0) +} + +// SubmitContainerStateChange mocks base method +func (m *MockECSSubmitStateSDK) SubmitContainerStateChange(arg0 *ecs.SubmitContainerStateChangeInput) (*ecs.SubmitContainerStateChangeOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitContainerStateChange", arg0) + ret0, _ := ret[0].(*ecs.SubmitContainerStateChangeOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitContainerStateChange indicates an expected call of SubmitContainerStateChange +func (mr *MockECSSubmitStateSDKMockRecorder) SubmitContainerStateChange(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitContainerStateChange", reflect.TypeOf((*MockECSSubmitStateSDK)(nil).SubmitContainerStateChange), arg0) +} + +// SubmitTaskStateChange mocks base method +func (m *MockECSSubmitStateSDK) SubmitTaskStateChange(arg0 *ecs.SubmitTaskStateChangeInput) (*ecs.SubmitTaskStateChangeOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitTaskStateChange", arg0) + ret0, _ := ret[0].(*ecs.SubmitTaskStateChangeOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitTaskStateChange indicates an expected call of SubmitTaskStateChange +func (mr *MockECSSubmitStateSDKMockRecorder) SubmitTaskStateChange(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitTaskStateChange", reflect.TypeOf((*MockECSSubmitStateSDK)(nil).SubmitTaskStateChange), arg0) +} + +// MockECSClient is a mock of ECSClient interface +type MockECSClient struct { + ctrl *gomock.Controller + recorder *MockECSClientMockRecorder +} + +// MockECSClientMockRecorder is the mock recorder for MockECSClient +type MockECSClientMockRecorder struct { + mock *MockECSClient +} + +// NewMockECSClient creates a new mock instance +func NewMockECSClient(ctrl *gomock.Controller) *MockECSClient { + mock := &MockECSClient{ctrl: ctrl} + mock.recorder = &MockECSClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockECSClient) EXPECT() *MockECSClientMockRecorder { + return m.recorder +} + +// DiscoverPollEndpoint mocks base method +func (m *MockECSClient) DiscoverPollEndpoint(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DiscoverPollEndpoint", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DiscoverPollEndpoint indicates an expected call of DiscoverPollEndpoint +func (mr *MockECSClientMockRecorder) DiscoverPollEndpoint(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiscoverPollEndpoint", reflect.TypeOf((*MockECSClient)(nil).DiscoverPollEndpoint), arg0) +} + +// DiscoverTelemetryEndpoint mocks base method +func (m *MockECSClient) DiscoverTelemetryEndpoint(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DiscoverTelemetryEndpoint", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DiscoverTelemetryEndpoint indicates an expected call of DiscoverTelemetryEndpoint +func (mr *MockECSClientMockRecorder) DiscoverTelemetryEndpoint(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiscoverTelemetryEndpoint", reflect.TypeOf((*MockECSClient)(nil).DiscoverTelemetryEndpoint), arg0) +} + +// GetResourceTags mocks base method +func (m *MockECSClient) GetResourceTags(arg0 string) ([]*ecs.Tag, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetResourceTags", arg0) + ret0, _ := ret[0].([]*ecs.Tag) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetResourceTags indicates an expected call of GetResourceTags +func (mr *MockECSClientMockRecorder) GetResourceTags(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourceTags", reflect.TypeOf((*MockECSClient)(nil).GetResourceTags), arg0) +} + +// RegisterContainerInstance mocks base method +func (m *MockECSClient) RegisterContainerInstance(arg0 string, arg1 []*ecs.Attribute, arg2 []*ecs.Tag, arg3 string, arg4 []*ecs.PlatformDevice, arg5 string) (string, string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterContainerInstance", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// RegisterContainerInstance indicates an expected call of RegisterContainerInstance +func (mr *MockECSClientMockRecorder) RegisterContainerInstance(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterContainerInstance", reflect.TypeOf((*MockECSClient)(nil).RegisterContainerInstance), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// SubmitAttachmentStateChange mocks base method +func (m *MockECSClient) SubmitAttachmentStateChange(arg0 api.AttachmentStateChange) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitAttachmentStateChange", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SubmitAttachmentStateChange indicates an expected call of SubmitAttachmentStateChange +func (mr *MockECSClientMockRecorder) SubmitAttachmentStateChange(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAttachmentStateChange", reflect.TypeOf((*MockECSClient)(nil).SubmitAttachmentStateChange), arg0) +} + +// SubmitContainerStateChange mocks base method +func (m *MockECSClient) SubmitContainerStateChange(arg0 api.ContainerStateChange) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitContainerStateChange", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SubmitContainerStateChange indicates an expected call of SubmitContainerStateChange +func (mr *MockECSClientMockRecorder) SubmitContainerStateChange(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitContainerStateChange", reflect.TypeOf((*MockECSClient)(nil).SubmitContainerStateChange), arg0) +} + +// SubmitTaskStateChange mocks base method +func (m *MockECSClient) SubmitTaskStateChange(arg0 api.TaskStateChange) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitTaskStateChange", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SubmitTaskStateChange indicates an expected call of SubmitTaskStateChange +func (mr *MockECSClientMockRecorder) SubmitTaskStateChange(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitTaskStateChange", reflect.TypeOf((*MockECSClient)(nil).SubmitTaskStateChange), arg0) +} + +// UpdateContainerInstancesState mocks base method +func (m *MockECSClient) UpdateContainerInstancesState(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateContainerInstancesState", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateContainerInstancesState indicates an expected call of UpdateContainerInstancesState +func (mr *MockECSClientMockRecorder) UpdateContainerInstancesState(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateContainerInstancesState", reflect.TypeOf((*MockECSClient)(nil).UpdateContainerInstancesState), arg0, arg1) +} diff --git a/agent/api/statechange.go b/agent/api/statechange.go new file mode 100644 index 00000000000..cee70577b63 --- /dev/null +++ b/agent/api/statechange.go @@ -0,0 +1,323 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package api + +import ( + "fmt" + "strconv" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/pkg/errors" + + "github.com/aws/aws-sdk-go/aws" +) + +// ContainerStateChange represents a state change that needs to be sent to the +// SubmitContainerStateChange API +type ContainerStateChange struct { + // TaskArn is the unique identifier for the task + TaskArn string + // RuntimeID is the dockerID of the container + RuntimeID string + // ContainerName is the name of the container + ContainerName string + // Status is the status to send + Status apicontainerstatus.ContainerStatus + // ImageDigest is the sha-256 digest of the container image as pulled from the repository + ImageDigest string + // Reason may contain details of why the container stopped + Reason string + // ExitCode is the exit code of the container, if available + ExitCode *int + // PortBindings are the details of the host ports picked for the specified + // container ports + PortBindings []apicontainer.PortBinding + // Container is a pointer to the container involved in the state change that gives the event handler a hook into + // storing what status was sent. This is used to ensure the same event is handled only once. + Container *apicontainer.Container +} + +type ManagedAgentStateChange struct { + // TaskArn is the unique identifier for the task + TaskArn string + // Name is the name of the managed agent + Name string + // Container is a pointer to the container involved in the state change that gives the event handler a hook into + // storing what status was sent. This is used to ensure the same event is handled only once. + Container *apicontainer.Container + // Status is the status of the managed agent + Status apicontainerstatus.ManagedAgentStatus + // Reason indicates an error in a managed agent state chage + Reason string +} + +// TaskStateChange represents a state change that needs to be sent to the +// SubmitTaskStateChange API +type TaskStateChange struct { + // Attachment is the eni attachment object to send + Attachment *apieni.ENIAttachment + // TaskArn is the unique identifier for the task + TaskARN string + // Status is the status to send + Status apitaskstatus.TaskStatus + // Reason may contain details of why the task stopped + Reason string + // Containers holds the events generated by containers owned by this task + Containers []ContainerStateChange + // ManagedAgents contain the name and status of Agents running inside the container + ManagedAgents []ManagedAgentStateChange + // PullStartedAt is the timestamp when the task start pulling + PullStartedAt *time.Time + // PullStoppedAt is the timestamp when the task finished pulling + PullStoppedAt *time.Time + // ExecutionStoppedAt is the timestamp when the essential container stopped + ExecutionStoppedAt *time.Time + // Task is a pointer to the task involved in the state change that gives the event handler a hook into storing + // what status was sent. This is used to ensure the same event is handled only once. + Task *apitask.Task +} + +// AttachmentStateChange represents a state change that needs to be sent to the +// SubmitAttachmentStateChanges API +type AttachmentStateChange struct { + // Attachment is the eni attachment object to send + Attachment *apieni.ENIAttachment +} + +// NewTaskStateChangeEvent creates a new task state change event +// returns error if the state change doesn't need to be sent to the ECS backend. +func NewTaskStateChangeEvent(task *apitask.Task, reason string) (TaskStateChange, error) { + var event TaskStateChange + taskKnownStatus := task.GetKnownStatus() + if !taskKnownStatus.BackendRecognized() { + return event, errors.Errorf( + "create task state change event api: status not recognized by ECS: %v", + taskKnownStatus) + } + if task.GetSentStatus() >= taskKnownStatus { + return event, errors.Errorf( + "create task state change event api: status [%s] already sent", + taskKnownStatus.String()) + } + + event = TaskStateChange{ + TaskARN: task.Arn, + Status: taskKnownStatus, + Reason: reason, + Task: task, + } + + event.SetTaskTimestamps() + + return event, nil +} + +// NewContainerStateChangeEvent creates a new container state change event +// returns error if the state change doesn't need to be sent to the ECS backend. +func NewContainerStateChangeEvent(task *apitask.Task, cont *apicontainer.Container, reason string) (ContainerStateChange, error) { + event, err := newUncheckedContainerStateChangeEvent(task, cont, reason) + if err != nil { + return event, err + } + contKnownStatus := cont.GetKnownStatus() + if !contKnownStatus.ShouldReportToBackend(cont.GetSteadyStateStatus()) { + return event, errors.Errorf( + "create container state change event api: status not recognized by ECS: %v", + contKnownStatus) + } + if cont.GetSentStatus() >= contKnownStatus { + return event, errors.Errorf( + "create container state change event api: status [%s] already sent for container %s, task %s", + contKnownStatus.String(), cont.Name, task.Arn) + } + if reason == "" && cont.ApplyingError != nil { + reason = cont.ApplyingError.Error() + event.Reason = reason + } + return event, nil +} + +func newUncheckedContainerStateChangeEvent(task *apitask.Task, cont *apicontainer.Container, reason string) (ContainerStateChange, error) { + var event ContainerStateChange + if cont.IsInternal() { + return event, errors.Errorf( + "create container state change event api: internal container: %s", + cont.Name) + } + contKnownStatus := cont.GetKnownStatus() + event = ContainerStateChange{ + TaskArn: task.Arn, + ContainerName: cont.Name, + RuntimeID: cont.GetRuntimeID(), + Status: contKnownStatus.BackendStatus(cont.GetSteadyStateStatus()), + ExitCode: cont.GetKnownExitCode(), + PortBindings: cont.GetKnownPortBindings(), + ImageDigest: cont.GetImageDigest(), + Reason: reason, + Container: cont, + } + return event, nil +} + +// NewManagedAgentChangeEvent creates a new managedAgent change event to convey managed agent state changes +// returns error if the state change doesn't need to be sent to the ECS backend. +func NewManagedAgentChangeEvent(task *apitask.Task, cont *apicontainer.Container, managedAgentName string, reason string) (ManagedAgentStateChange, error) { + var event = ManagedAgentStateChange{} + managedAgent, ok := cont.GetManagedAgentByName(managedAgentName) + if !ok { + return event, errors.Errorf("No ExecuteCommandAgent available in container: %v", cont.Name) + } + if !managedAgent.Status.ShouldReportToBackend() { + return event, errors.Errorf("create managed agent state change event: status not recognized by ECS: %v", managedAgent.Status) + } + + event = ManagedAgentStateChange{ + TaskArn: task.Arn, + Name: managedAgent.Name, + Container: cont, + Status: managedAgent.Status, + Reason: reason, + } + + return event, nil +} + +// NewAttachmentStateChangeEvent creates a new attachment state change event +func NewAttachmentStateChangeEvent(eniAttachment *apieni.ENIAttachment) AttachmentStateChange { + return AttachmentStateChange{ + Attachment: eniAttachment, + } +} + +// String returns a human readable string representation of this object +func (c *ContainerStateChange) String() string { + res := fmt.Sprintf("%s %s -> %s", c.TaskArn, c.ContainerName, c.Status.String()) + if c.ExitCode != nil { + res += ", Exit " + strconv.Itoa(*c.ExitCode) + ", " + } + if c.Reason != "" { + res += ", Reason " + c.Reason + } + if len(c.PortBindings) != 0 { + res += fmt.Sprintf(", Ports %v", c.PortBindings) + } + if c.Container != nil { + res += ", Known Sent: " + c.Container.GetSentStatus().String() + } + return res +} + +// String returns a human readable string representation of ManagedAgentStateChange +func (m *ManagedAgentStateChange) String() string { + res := fmt.Sprintf("%s %s %s -> %s", m.TaskArn, m.Container.Name, m.Name, m.Status.String()) + if m.Reason != "" { + res += ", Reason " + m.Reason + } + return res +} + +// SetTaskTimestamps adds the timestamp information of task into the event +// to be sent by SubmitTaskStateChange +func (change *TaskStateChange) SetTaskTimestamps() { + if change.Task == nil { + return + } + + // Send the task timestamp if set + if timestamp := change.Task.GetPullStartedAt(); !timestamp.IsZero() { + change.PullStartedAt = aws.Time(timestamp.UTC()) + } + if timestamp := change.Task.GetPullStoppedAt(); !timestamp.IsZero() { + change.PullStoppedAt = aws.Time(timestamp.UTC()) + } + if timestamp := change.Task.GetExecutionStoppedAt(); !timestamp.IsZero() { + change.ExecutionStoppedAt = aws.Time(timestamp.UTC()) + } +} + +// ShouldBeReported checks if the statechange should be reported to backend +func (change *TaskStateChange) ShouldBeReported() bool { + // Events that should be reported: + // 1. Normal task state change: RUNNING/STOPPED + // 2. Container state change, with task status in CREATED/RUNNING/STOPPED + // The task timestamp will be sent in both of the event type + // TODO Move the Attachment statechange check into this method + if change.Status == apitaskstatus.TaskRunning || change.Status == apitaskstatus.TaskStopped { + return true + } + + if len(change.Containers) != 0 { + return true + } + + return false +} + +// String returns a human readable string representation of this object +func (change *TaskStateChange) String() string { + res := fmt.Sprintf("%s -> %s", change.TaskARN, change.Status.String()) + if change.Task != nil { + res += fmt.Sprintf(", Known Sent: %s, PullStartedAt: %s, PullStoppedAt: %s, ExecutionStoppedAt: %s", + change.Task.GetSentStatus().String(), + change.Task.GetPullStartedAt(), + change.Task.GetPullStoppedAt(), + change.Task.GetExecutionStoppedAt()) + } + if change.Attachment != nil { + res += ", " + change.Attachment.String() + } + for _, containerChange := range change.Containers { + res += ", container change: " + containerChange.String() + } + for _, managedAgentChange := range change.ManagedAgents { + res += ", managed agent: " + managedAgentChange.String() + } + + return res +} + +// String returns a human readable string representation of this object +func (change *AttachmentStateChange) String() string { + if change.Attachment != nil { + return fmt.Sprintf("%s -> %s, %s", change.Attachment.AttachmentARN, change.Attachment.Status.String(), + change.Attachment.String()) + } + + return "" +} + +// GetEventType returns an enum identifying the event type +func (ContainerStateChange) GetEventType() statechange.EventType { + return statechange.ContainerEvent +} + +func (ms ManagedAgentStateChange) GetEventType() statechange.EventType { + return statechange.ManagedAgentEvent +} + +// GetEventType returns an enum identifying the event type +func (ts TaskStateChange) GetEventType() statechange.EventType { + return statechange.TaskEvent +} + +// GetEventType returns an enum identifying the event type +func (AttachmentStateChange) GetEventType() statechange.EventType { + return statechange.AttachmentEvent +} diff --git a/agent/api/statechange_test.go b/agent/api/statechange_test.go new file mode 100644 index 00000000000..e134260ab36 --- /dev/null +++ b/agent/api/statechange_test.go @@ -0,0 +1,267 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package api + +import ( + "fmt" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + execcmd "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + + "github.com/stretchr/testify/assert" +) + +func TestShouldBeReported(t *testing.T) { + cases := []struct { + status apitaskstatus.TaskStatus + containerChange []ContainerStateChange + result bool + }{ + { // Normal task state change to running + status: apitaskstatus.TaskRunning, + result: true, + }, + { // Normal task state change to stopped + status: apitaskstatus.TaskStopped, + result: true, + }, + { // Container changed while task is not in steady state + status: apitaskstatus.TaskCreated, + containerChange: []ContainerStateChange{ + {TaskArn: "taskarn"}, + }, + result: true, + }, + { // No container change and task status not recognized + status: apitaskstatus.TaskCreated, + result: false, + }, + } + + for _, tc := range cases { + t.Run(fmt.Sprintf("task change status: %s, container change: %t", tc.status, len(tc.containerChange) > 0), + func(t *testing.T) { + taskChange := TaskStateChange{ + Status: tc.status, + Containers: tc.containerChange, + } + + assert.Equal(t, tc.result, taskChange.ShouldBeReported()) + }) + } +} + +func TestSetTaskTimestamps(t *testing.T) { + t1 := time.Now() + t2 := t1.Add(time.Second) + t3 := t2.Add(time.Second) + + change := &TaskStateChange{ + Task: &apitask.Task{ + PullStartedAtUnsafe: t1, + PullStoppedAtUnsafe: t2, + ExecutionStoppedAtUnsafe: t3, + }, + } + + change.SetTaskTimestamps() + assert.Equal(t, t1.UTC().String(), change.PullStartedAt.String()) + assert.Equal(t, t2.UTC().String(), change.PullStoppedAt.String()) + assert.Equal(t, t3.UTC().String(), change.ExecutionStoppedAt.String()) +} + +func TestSetContainerRuntimeID(t *testing.T) { + task := &apitask.Task{} + steadyStateStatus := apicontainerstatus.ContainerRunning + Containers := []*apicontainer.Container{ + { + RuntimeID: "222", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + SentStatusUnsafe: apicontainerstatus.ContainerStatusNone, + Type: apicontainer.ContainerNormal, + SteadyStateStatusUnsafe: &steadyStateStatus, + }, + } + + task.Containers = Containers + resp, ok := NewContainerStateChangeEvent(task, task.Containers[0], "") + + assert.NoError(t, ok, "error create newContainerStateChangeEvent") + assert.Equal(t, "222", resp.RuntimeID) +} + +func TestSetImageDigest(t *testing.T) { + task := &apitask.Task{} + steadyStateStatus := apicontainerstatus.ContainerRunning + Containers := []*apicontainer.Container{ + { + ImageDigest: "sha256:d1c14fcf2e9476ed58ebc4251b211f403f271e96b6c3d9ada0f1c5454ca4d230", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + SentStatusUnsafe: apicontainerstatus.ContainerStatusNone, + Type: apicontainer.ContainerNormal, + SteadyStateStatusUnsafe: &steadyStateStatus, + }, + } + + task.Containers = Containers + resp, ok := NewContainerStateChangeEvent(task, task.Containers[0], "") + + assert.NoError(t, ok, "error create newContainerStateChangeEvent") + assert.Equal(t, "sha256:d1c14fcf2e9476ed58ebc4251b211f403f271e96b6c3d9ada0f1c5454ca4d230", resp.ImageDigest) +} + +func TestNewUncheckedContainerStateChangeEvent(t *testing.T) { + tests := []struct { + name string + containerType apicontainer.ContainerType + }{ + { + name: "internal container", + containerType: apicontainer.ContainerEmptyHostVolume, + }, + { + name: "normal container", + containerType: apicontainer.ContainerNormal, + }, + } + steadyStateStatus := apicontainerstatus.ContainerRunning + exitCode := 1 + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + task := &apitask.Task{ + Arn: "arn:123", + Containers: []*apicontainer.Container{ + { + Name: "c1", + RuntimeID: "222", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + SentStatusUnsafe: apicontainerstatus.ContainerStatusNone, + Type: tc.containerType, + SteadyStateStatusUnsafe: &steadyStateStatus, + KnownExitCodeUnsafe: &exitCode, + KnownPortBindingsUnsafe: []apicontainer.PortBinding{{ + ContainerPort: 1, + HostPort: 2, + BindIP: "1.2.3.4", + Protocol: 3, + }}, + ImageDigest: "image", + }, + }} + + expectedEvent := ContainerStateChange{ + TaskArn: "arn:123", + ContainerName: "c1", + RuntimeID: "222", + Status: apicontainerstatus.ContainerRunning, + ExitCode: &exitCode, + PortBindings: []apicontainer.PortBinding{{ + ContainerPort: 1, + HostPort: 2, + BindIP: "1.2.3.4", + Protocol: 3, + }}, + ImageDigest: "image", + Reason: "reason", + Container: task.Containers[0], + } + + event, err := newUncheckedContainerStateChangeEvent(task, task.Containers[0], "reason") + if tc.containerType == apicontainer.ContainerNormal { + assert.NoError(t, err) + assert.Equal(t, expectedEvent, event) + } else { + assert.Error(t, err) + } + }) + } +} + +func TestNewManagedAgentChangeEvent(t *testing.T) { + tests := []struct { + name string + managedAgents []apicontainer.ManagedAgent + expectError bool + }{ + { + name: "no managed agents", + expectError: true, + }, + { + name: "non-existent managed agent", + expectError: true, + managedAgents: []apicontainer.ManagedAgent{{ + Name: "nonExistentAgent", + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentStatusNone, + Reason: "reason", + }}}, + }, + { + name: "with managed agents that should NOT be reported", + managedAgents: []apicontainer.ManagedAgent{{ + Name: "dummyAgent", + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentStatusNone, + Reason: "reason", + }}}, + expectError: true, + }, + { + name: "with managed agents that should be reported", + managedAgents: []apicontainer.ManagedAgent{{ + Name: execcmd.ExecuteCommandAgentName, + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentRunning, + Reason: "reason", + }}}, + expectError: false, + }, + } + for _, tc := range tests { + testContainer := apicontainer.Container{ + Name: "c1", + ManagedAgentsUnsafe: tc.managedAgents, + } + testContainers := []*apicontainer.Container{&testContainer} + t.Run(tc.name, func(t *testing.T) { + + task := &apitask.Task{ + Arn: "arn:123", + Containers: testContainers, + } + expectedEvent := ManagedAgentStateChange{ + TaskArn: "arn:123", + Name: execcmd.ExecuteCommandAgentName, + Container: &testContainer, + Status: apicontainerstatus.ManagedAgentRunning, + Reason: "test", + } + + event, err := NewManagedAgentChangeEvent(task, task.Containers[0], execcmd.ExecuteCommandAgentName, "test") + if tc.expectError { + assert.Error(t, err) + } else { + assert.Equal(t, expectedEvent, event) + } + }) + } +} diff --git a/agent/api/task/association.go b/agent/api/task/association.go new file mode 100644 index 00000000000..4f403bd2ae8 --- /dev/null +++ b/agent/api/task/association.go @@ -0,0 +1,38 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +// Association is a definition of a device (or other potential things) that is +// associated with a task. +type Association struct { + // Containers are the container names that can use this association. + Containers []string `json:"containers"` + // Content describes this association. + Content EncodedString `json:"content"` + // Name is a unique identifier of the association within a task. + Name string `json:"name"` + // Type specifies the type of this association. + Type string `json:"type"` +} + +// EncodedString is used to describe an association, the consumer of the association +// data should be responsible to decode it. We don't model it in Agent, or check the +// value of the encoded string, we just pass whatever we get from ACS, because +// we don't want Agent to be coupled with a specific association. +type EncodedString struct { + // Encoding is the encoding type of the value. + Encoding string `json:"encoding"` + // Value is the content of the encoded string. + Value string `json:"value"` +} diff --git a/agent/api/task/association_test.go b/agent/api/task/association_test.go new file mode 100644 index 00000000000..4c0525bf7f9 --- /dev/null +++ b/agent/api/task/association_test.go @@ -0,0 +1,100 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGPUAssociationMarshal(t *testing.T) { + expectedAssociationMap := map[string]interface{}{ + "containers": []interface{}{"container1"}, + "content": map[string]interface{}{ + "encoding": "base64", + "value": "val", + }, + "name": "gpu1", + "type": "gpu", + } + + association := Association{ + Containers: []string{"container1"}, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "gpu1", + Type: "gpu", + } + + associationJSON, err := json.Marshal(association) + assert.NoError(t, err) + + associationMap := make(map[string]interface{}) + json.Unmarshal(associationJSON, &associationMap) + assert.Equal(t, expectedAssociationMap, associationMap) +} + +func TestEIAssociationMarshal(t *testing.T) { + expectedAssociationMap := map[string]interface{}{ + "containers": []interface{}{"container1"}, + "content": map[string]interface{}{ + "encoding": "base64", + "value": "val", + }, + "name": "dev1", + "type": "elastic-inference", + } + + association := Association{ + Containers: []string{"container1"}, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "dev1", + Type: "elastic-inference", + } + + associationJSON, err := json.Marshal(association) + assert.NoError(t, err) + + associationMap := make(map[string]interface{}) + json.Unmarshal(associationJSON, &associationMap) + assert.Equal(t, expectedAssociationMap, associationMap) +} + +func TestEncodedStringMarshal(t *testing.T) { + expectedEncodedStringMap := map[string]interface{}{ + "encoding": "base64", + "value": "val", + } + + encodedString := EncodedString{ + Encoding: "base64", + Value: "val", + } + + encodedStringJSON, err := json.Marshal(encodedString) + assert.NoError(t, err) + + encodedStringMap := make(map[string]interface{}) + json.Unmarshal(encodedStringJSON, &encodedStringMap) + assert.Equal(t, expectedEncodedStringMap, encodedStringMap) +} diff --git a/agent/api/task/json.go b/agent/api/task/json.go new file mode 100644 index 00000000000..1c408ba6eac --- /dev/null +++ b/agent/api/task/json.go @@ -0,0 +1,30 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" +) + +// In order to override the MarshalJSON method while still using Go's marshalling logic inside, an alias type +// is needed to avoid infinite recursion. +type jTask Task + +// MarshalJSON wraps Go's marshalling logic with a necessary read lock. +func (task *Task) MarshalJSON() ([]byte, error) { + task.lock.RLock() + defer task.lock.RUnlock() + + return json.Marshal((*jTask)(task)) +} diff --git a/agent/api/task/status/statusmapping.go b/agent/api/task/status/statusmapping.go new file mode 100644 index 00000000000..1a75085eb94 --- /dev/null +++ b/agent/api/task/status/statusmapping.go @@ -0,0 +1,58 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package status + +import ( + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" +) + +// MapContainerToTaskStatus maps the container status to the corresponding task status. The +// transition map is illustrated below. +// +// Container: None -> Pulled -> Created -> Running -> Provisioned -> Stopped -> Zombie +// +// Task : None -> Created -> Running -> Stopped +func MapContainerToTaskStatus(knownState apicontainerstatus.ContainerStatus, steadyState apicontainerstatus.ContainerStatus) TaskStatus { + switch knownState { + case apicontainerstatus.ContainerStatusNone: + return TaskStatusNone + case steadyState: + return TaskRunning + case apicontainerstatus.ContainerCreated: + return TaskCreated + case apicontainerstatus.ContainerStopped: + return TaskStopped + } + + if knownState == apicontainerstatus.ContainerRunning && steadyState == apicontainerstatus.ContainerResourcesProvisioned { + return TaskCreated + } + + return TaskStatusNone +} + +// MapTaskToContainerStatus maps the task status to the corresponding container status +func MapTaskToContainerStatus(desiredState TaskStatus, steadyState apicontainerstatus.ContainerStatus) apicontainerstatus.ContainerStatus { + switch desiredState { + case TaskStatusNone: + return apicontainerstatus.ContainerStatusNone + case TaskCreated: + return apicontainerstatus.ContainerCreated + case TaskRunning: + return steadyState + case TaskStopped: + return apicontainerstatus.ContainerStopped + } + return apicontainerstatus.ContainerStatusNone +} diff --git a/agent/api/task/status/statusmapping_test.go b/agent/api/task/status/statusmapping_test.go new file mode 100644 index 00000000000..52fdc474416 --- /dev/null +++ b/agent/api/task/status/statusmapping_test.go @@ -0,0 +1,58 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package status + +import ( + "testing" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/stretchr/testify/assert" +) + +func TestTaskStatus(t *testing.T) { + // Effectively set containerStatus := ContainerStatusNone, we expect the task state + // to be TaskStatusNone + var containerStatus apicontainerstatus.ContainerStatus + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerRunning), TaskStatusNone) + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), TaskStatusNone) + + // When container state is PULLED, Task state is still NONE + containerStatus = apicontainerstatus.ContainerPulled + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerRunning), TaskStatusNone) + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), TaskStatusNone) + + // When container state is CREATED, Task state is CREATED as well + containerStatus = apicontainerstatus.ContainerCreated + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerRunning), TaskCreated) + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), TaskCreated) + + containerStatus = apicontainerstatus.ContainerRunning + // When container state is RUNNING and steadyState is RUNNING, Task state is RUNNING as well + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerRunning), TaskRunning) + // When container state is RUNNING and steadyState is RESOURCES_PROVISIONED, Task state + // still CREATED + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), TaskCreated) + + containerStatus = apicontainerstatus.ContainerResourcesProvisioned + // When container state is RESOURCES_PROVISIONED and steadyState is RESOURCES_PROVISIONED, + // Task state is RUNNING + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), TaskRunning) + + // When container state is STOPPED, Task state is STOPPED as well + containerStatus = apicontainerstatus.ContainerStopped + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerRunning), TaskStopped) + assert.Equal(t, MapContainerToTaskStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), TaskStopped) +} diff --git a/agent/api/task/status/taskstatus.go b/agent/api/task/status/taskstatus.go new file mode 100644 index 00000000000..04f44ed30fa --- /dev/null +++ b/agent/api/task/status/taskstatus.go @@ -0,0 +1,123 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package status + +import ( + "errors" + "strings" +) + +const ( + // TaskStatusNone is the zero state of a task; this task has been received but no further progress has completed + TaskStatusNone TaskStatus = iota + // TaskPulled represents a task which has had all its container images pulled, but not all have yet progressed passed pull + TaskPulled + // TaskCreated represents a task which has had all its containers created + TaskCreated + // TaskRunning represents a task which has had all its containers started + TaskRunning + // TaskStopped represents a task in which all containers are stopped + TaskStopped + // TaskZombie is an "impossible" state that is used as the maximum + TaskZombie + // TaskStoppedString represents task stopped status string + TaskStoppedString = "STOPPED" + // TaskRunningString represents task running status string + TaskRunningString = "RUNNING" + //TaskCreatedString represents task created status string + TaskCreatedString = "CREATED" + // TaskNoneString represents task none status string + TaskNoneString = "NONE" +) + +// TaskStatus is an enumeration of valid states in the task lifecycle +type TaskStatus int32 + +var taskStatusMap = map[string]TaskStatus{ + TaskNoneString: TaskStatusNone, + TaskCreatedString: TaskCreated, + TaskRunningString: TaskRunning, + TaskStoppedString: TaskStopped, +} + +// String returns a human readable string representation of this object +func (ts TaskStatus) String() string { + for k, v := range taskStatusMap { + if v == ts { + return k + } + } + return "NONE" +} + +// BackendStatus maps the internal task status in the agent to that in the backend +func (ts *TaskStatus) BackendStatus() string { + switch *ts { + case TaskRunning: + fallthrough + case TaskStopped: + return ts.String() + } + return "PENDING" +} + +// BackendRecognized returns true if the task status is recognized as a valid state +// by ECS. Note that not all task statuses are recognized by ECS or map to ECS +// states +func (ts *TaskStatus) BackendRecognized() bool { + return *ts == TaskRunning || *ts == TaskStopped +} + +// Terminal returns true if the Task status is STOPPED +func (ts TaskStatus) Terminal() bool { + return ts == TaskStopped +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded TaskStatus data +func (ts *TaskStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *ts = TaskStatusNone + return nil + } + if b[0] != '"' || b[len(b)-1] != '"' { + *ts = TaskStatusNone + return errors.New("TaskStatus must be a string or null") + } + strStatus := string(b[1 : len(b)-1]) + // 'UNKNOWN' and 'DEAD' for Compatibility with v1.0.0 state files + if strStatus == "UNKNOWN" { + *ts = TaskStatusNone + return nil + } + if strStatus == "DEAD" { + *ts = TaskStopped + return nil + } + + stat, ok := taskStatusMap[strStatus] + if !ok { + *ts = TaskStatusNone + return errors.New("Unrecognized TaskStatus") + } + *ts = stat + return nil +} + +// MarshalJSON overrides the logic for JSON-encoding the TaskStatus type +func (ts *TaskStatus) MarshalJSON() ([]byte, error) { + if ts == nil { + return nil, nil + } + return []byte(`"` + ts.String() + `"`), nil +} diff --git a/agent/api/task/status/taskstatus_test.go b/agent/api/task/status/taskstatus_test.go new file mode 100644 index 00000000000..5692b0c569c --- /dev/null +++ b/agent/api/task/status/taskstatus_test.go @@ -0,0 +1,46 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package status + +import ( + "encoding/json" + "testing" +) + +type testTaskStatus struct { + SomeStatus TaskStatus `json:"status"` +} + +func TestUnmarshalTaskStatus(t *testing.T) { + status := TaskStatusNone + + err := json.Unmarshal([]byte(`"RUNNING"`), &status) + if err != nil { + t.Error(err) + } + if status != TaskRunning { + t.Error("RUNNING should unmarshal to RUNNING, not " + status.String()) + } + + var test testTaskStatus + err = json.Unmarshal([]byte(`{"status":"STOPPED"}`), &test) + if err != nil { + t.Error(err) + } + if test.SomeStatus != TaskStopped { + t.Error("STOPPED should unmarshal to STOPPED, not " + test.SomeStatus.String()) + } +} diff --git a/agent/api/task/task.go b/agent/api/task/task.go new file mode 100644 index 00000000000..5f91d302af9 --- /dev/null +++ b/agent/api/task/task.go @@ -0,0 +1,2709 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/docker/docker/api/types" + "github.com/docker/go-connections/nat" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apiappmesh "github.com/aws/amazon-ecs-agent/agent/api/appmesh" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth" + "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret" + "github.com/aws/amazon-ecs-agent/agent/taskresource/envFiles" + "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" + "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + resourcetype "github.com/aws/amazon-ecs-agent/agent/taskresource/types" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/cihub/seelog" + "github.com/containernetworking/cni/libcni" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" +) + +const ( + // NetworkPauseContainerName is the internal name for the pause container + NetworkPauseContainerName = "~internal~ecs~pause" + + // NamespacePauseContainerName is the internal name for the IPC resource namespace and/or + // PID namespace sharing pause container + NamespacePauseContainerName = "~internal~ecs~pause~namespace" + + emptyHostVolumeName = "~internal~ecs-emptyvolume-source" + + // awsSDKCredentialsRelativeURIPathEnvironmentVariableName defines the name of the environment + // variable in containers' config, which will be used by the AWS SDK to fetch + // credentials. + awsSDKCredentialsRelativeURIPathEnvironmentVariableName = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + + NvidiaVisibleDevicesEnvVar = "NVIDIA_VISIBLE_DEVICES" + GPUAssociationType = "gpu" + + // neuronRuntime is the name of the neuron docker runtime. + neuronRuntime = "neuron" + + ContainerOrderingCreateCondition = "CREATE" + ContainerOrderingStartCondition = "START" + + arnResourceDelimiter = "/" + // networkModeNone specifies the string used to define the `none` docker networking mode + networkModeNone = "none" + // dockerMappingContainerPrefix specifies the prefix string used for setting the + // container's option (network, ipc, or pid) to that of another existing container + dockerMappingContainerPrefix = "container:" + + // awslogsCredsEndpointOpt is the awslogs option that is used to pass in an + // http endpoint for authentication + awslogsCredsEndpointOpt = "awslogs-credentials-endpoint" + // These contants identify the docker flag options + pidModeHost = "host" + pidModeTask = "task" + ipcModeHost = "host" + ipcModeTask = "task" + ipcModeSharable = "shareable" + ipcModeNone = "none" + + // firelensConfigBindFormatFluentd and firelensConfigBindFormatFluentbit specify the format of the firelens + // config file bind mount for fluentd and fluentbit firelens container respectively. + // First placeholder is host data dir, second placeholder is taskID. + firelensConfigBindFormatFluentd = "%s/data/firelens/%s/config/fluent.conf:/fluentd/etc/fluent.conf" + firelensConfigBindFormatFluentbit = "%s/data/firelens/%s/config/fluent.conf:/fluent-bit/etc/fluent-bit.conf" + + // firelensS3ConfigBindFormat specifies the format of the bind mount for the firelens config file downloaded from S3. + // First placeholder is host data dir, second placeholder is taskID, third placeholder is the s3 config path inside + // the firelens container. + firelensS3ConfigBindFormat = "%s/data/firelens/%s/config/external.conf:%s" + + // firelensSocketBindFormat specifies the format for firelens container's socket directory bind mount. + // First placeholder is host data dir, second placeholder is taskID. + firelensSocketBindFormat = "%s/data/firelens/%s/socket/:/var/run/" + // firelensDriverName is the log driver name for containers that want to use the firelens container to send logs. + firelensDriverName = "awsfirelens" + + // firelensConfigVarFmt specifies the format for firelens config variable name. The first placeholder + // is option name. The second placeholder is the index of the container in the task's container list, appended + // for the purpose of avoiding config vars from different containers within a task collide (note: can't append + // container name here because it may contain hyphen which will break the config var resolution (see PR 2164 for + // details), and can't append container ID either because we need the config var in PostUnmarshalTask, which is + // before all the containers being created). + firelensConfigVarFmt = "%s_%d" + // firelensConfigVarPlaceholderFmtFluentd and firelensConfigVarPlaceholderFmtFluentbit specify the config var + // placeholder format expected by fluentd and fluentbit respectively. + firelensConfigVarPlaceholderFmtFluentd = "\"#{ENV['%s']}\"" + firelensConfigVarPlaceholderFmtFluentbit = "${%s}" + + // awsExecutionEnvKey is the key of the env specifying the execution environment. + awsExecutionEnvKey = "AWS_EXECUTION_ENV" + // ec2ExecutionEnv specifies the ec2 execution environment. + ec2ExecutionEnv = "AWS_ECS_EC2" + + // specifies bridge type mode for a task + BridgeNetworkMode = "bridge" + + // specifies awsvpc type mode for a task + AWSVPCNetworkMode = "awsvpc" + + // disableIPv6SysctlKey specifies the setting that controls whether ipv6 is disabled. + disableIPv6SysctlKey = "net.ipv6.conf.all.disable_ipv6" + // sysctlValueOff specifies the value to use to turn off a sysctl setting. + sysctlValueOff = "0" +) + +// TaskOverrides are the overrides applied to a task +type TaskOverrides struct{} + +// Task is the internal representation of a task in the ECS agent +type Task struct { + // Arn is the unique identifier for the task + Arn string + // Overrides are the overrides applied to a task + Overrides TaskOverrides `json:"-"` + // Family is the name of the task definition family + Family string + // Version is the version of the task definition + Version string + // Containers are the containers for the task + Containers []*apicontainer.Container + // Associations are the available associations for the task. + Associations []Association `json:"associations"` + // ResourcesMapUnsafe is the map of resource type to corresponding resources + ResourcesMapUnsafe resourcetype.ResourcesMap `json:"resources"` + // Volumes are the volumes for the task + Volumes []TaskVolume `json:"volumes"` + // CPU is a task-level limit for compute resources. A value of 1 means that + // the task may access 100% of 1 vCPU on the instance + CPU float64 `json:"Cpu,omitempty"` + // Memory is a task-level limit for memory resources in bytes + Memory int64 `json:"Memory,omitempty"` + // DesiredStatusUnsafe represents the state where the task should go. Generally, + // the desired status is informed by the ECS backend as a result of either + // API calls made to ECS or decisions made by the ECS service scheduler. + // The DesiredStatusUnsafe is almost always either apitaskstatus.TaskRunning or apitaskstatus.TaskStopped. + // NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `UpdateStatus`, + // `UpdateDesiredStatus`, `SetDesiredStatus`, and `SetDesiredStatus`. + // TODO DesiredStatusUnsafe should probably be private with appropriately written + // setter/getter. When this is done, we need to ensure that the UnmarshalJSON + // is handled properly so that the state storage continues to work. + DesiredStatusUnsafe apitaskstatus.TaskStatus `json:"DesiredStatus"` + + // KnownStatusUnsafe represents the state where the task is. This is generally + // the minimum of equivalent status types for the containers in the task; + // if one container is at ContainerRunning and another is at ContainerPulled, + // the task KnownStatusUnsafe would be TaskPulled. + // NOTE: Do not access KnownStatusUnsafe directly. Instead, use `UpdateStatus`, + // and `GetKnownStatus`. + // TODO KnownStatusUnsafe should probably be private with appropriately written + // setter/getter. When this is done, we need to ensure that the UnmarshalJSON + // is handled properly so that the state storage continues to work. + KnownStatusUnsafe apitaskstatus.TaskStatus `json:"KnownStatus"` + // KnownStatusTimeUnsafe captures the time when the KnownStatusUnsafe was last updated. + // NOTE: Do not access KnownStatusTime directly, instead use `GetKnownStatusTime`. + KnownStatusTimeUnsafe time.Time `json:"KnownTime"` + + // PullStartedAtUnsafe is the timestamp when the task start pulling the first container, + // it won't be set if the pull never happens + PullStartedAtUnsafe time.Time `json:"PullStartedAt"` + // PullStoppedAtUnsafe is the timestamp when the task finished pulling the last container, + // it won't be set if the pull never happens + PullStoppedAtUnsafe time.Time `json:"PullStoppedAt"` + // ExecutionStoppedAtUnsafe is the timestamp when the task desired status moved to stopped, + // which is when the any of the essential containers stopped + ExecutionStoppedAtUnsafe time.Time `json:"ExecutionStoppedAt"` + + // SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS SubmitTaskStateChange API. + // TODO(samuelkarp) SentStatusUnsafe needs a lock and setters/getters. + // TODO SentStatusUnsafe should probably be private with appropriately written + // setter/getter. When this is done, we need to ensure that the UnmarshalJSON + // is handled properly so that the state storage continues to work. + SentStatusUnsafe apitaskstatus.TaskStatus `json:"SentStatus"` + + StartSequenceNumber int64 + StopSequenceNumber int64 + + // ExecutionCredentialsID is the ID of credentials that are used by agent to + // perform some action at the task level, such as pulling image from ECR + ExecutionCredentialsID string `json:"executionCredentialsID"` + + // credentialsID is used to set the CredentialsId field for the + // IAMRoleCredentials object associated with the task. This id can be + // used to look up the credentials for task in the credentials manager + credentialsID string + credentialsRelativeURIUnsafe string + + // ENIs is the list of Elastic Network Interfaces assigned to this task. The + // TaskENIs type is helpful when decoding state files which might have stored + // ENIs as a single ENI object instead of a list. + ENIs TaskENIs `json:"ENI"` + + // AppMesh is the service mesh specified by the task + AppMesh *apiappmesh.AppMesh + + // MemoryCPULimitsEnabled to determine if task supports CPU, memory limits + MemoryCPULimitsEnabled bool `json:"MemoryCPULimitsEnabled,omitempty"` + + // PlatformFields consists of fields specific to linux/windows for a task + PlatformFields PlatformFields `json:"PlatformFields,omitempty"` + + // terminalReason should be used when we explicitly move a task to stopped. + // This ensures the task object carries some context for why it was explicitly + // stoppped. + terminalReason string + terminalReasonOnce sync.Once + + // PIDMode is used to determine how PID namespaces are organized between + // containers of the Task + PIDMode string `json:"PidMode,omitempty"` + + // IPCMode is used to determine how IPC resources should be shared among + // containers of the Task + IPCMode string `json:"IpcMode,omitempty"` + + // NvidiaRuntime is the runtime to pass Nvidia GPU devices to containers + NvidiaRuntime string `json:"NvidiaRuntime,omitempty"` + + // LocalIPAddressUnsafe stores the local IP address allocated to the bridge that connects the task network + // namespace and the host network namespace, for tasks in awsvpc network mode (tasks in other network mode won't + // have a value for this). This field should be accessed via GetLocalIPAddress and SetLocalIPAddress. + LocalIPAddressUnsafe string `json:"LocalIPAddress,omitempty"` + + // LaunchType is the launch type of this task. + LaunchType string `json:"LaunchType,omitempty"` + + // lock is for protecting all fields in the task struct + lock sync.RWMutex +} + +// TaskFromACS translates ecsacs.Task to apitask.Task by first marshaling the received +// ecsacs.Task to json and unmarshaling it as apitask.Task +func TaskFromACS(acsTask *ecsacs.Task, envelope *ecsacs.PayloadMessage) (*Task, error) { + data, err := jsonutil.BuildJSON(acsTask) + if err != nil { + return nil, err + } + task := &Task{} + if err := json.Unmarshal(data, task); err != nil { + return nil, err + } + if task.GetDesiredStatus() == apitaskstatus.TaskRunning && envelope.SeqNum != nil { + task.StartSequenceNumber = *envelope.SeqNum + } else if task.GetDesiredStatus() == apitaskstatus.TaskStopped && envelope.SeqNum != nil { + task.StopSequenceNumber = *envelope.SeqNum + } + + // Overrides the container command if it's set + for _, container := range task.Containers { + if (container.Overrides != apicontainer.ContainerOverrides{}) && container.Overrides.Command != nil { + container.Command = *container.Overrides.Command + } + container.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + } + + //initialize resources map for task + task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + return task, nil +} + +func (task *Task) initializeVolumes(cfg *config.Config, dockerClient dockerapi.DockerClient, ctx context.Context) error { + err := task.initializeDockerLocalVolumes(dockerClient, ctx) + if err != nil { + return apierrors.NewResourceInitError(task.Arn, err) + } + err = task.initializeDockerVolumes(cfg.SharedVolumeMatchFullConfig.Enabled(), dockerClient, ctx) + if err != nil { + return apierrors.NewResourceInitError(task.Arn, err) + } + err = task.initializeEFSVolumes(cfg, dockerClient, ctx) + if err != nil { + return apierrors.NewResourceInitError(task.Arn, err) + } + return nil +} + +// PostUnmarshalTask is run after a task has been unmarshalled, but before it has been +// run. It is possible it will be subsequently called after that and should be +// able to handle such an occurrence appropriately (e.g. behave idempotently). +func (task *Task) PostUnmarshalTask(cfg *config.Config, + credentialsManager credentials.Manager, resourceFields *taskresource.ResourceFields, + dockerClient dockerapi.DockerClient, ctx context.Context, options ...Option) error { + // TODO, add rudimentary plugin support and call any plugins that want to + // hook into this + task.adjustForPlatform(cfg) + if task.MemoryCPULimitsEnabled { + if err := task.initializeCgroupResourceSpec(cfg.CgroupPath, cfg.CgroupCPUPeriod, resourceFields); err != nil { + seelog.Errorf("Task [%s]: could not intialize resource: %v", task.Arn, err) + return apierrors.NewResourceInitError(task.Arn, err) + } + } + + if err := task.initializeContainerOrderingForVolumes(); err != nil { + seelog.Errorf("Task [%s]: could not initialize volumes dependency for container: %v", task.Arn, err) + return apierrors.NewResourceInitError(task.Arn, err) + } + + if err := task.initializeContainerOrderingForLinks(); err != nil { + seelog.Errorf("Task [%s]: could not initialize links dependency for container: %v", task.Arn, err) + return apierrors.NewResourceInitError(task.Arn, err) + } + + task.initSecretResources(credentialsManager, resourceFields) + + task.initializeCredentialsEndpoint(credentialsManager) + + // NOTE: initializeVolumes needs to be after initializeCredentialsEndpoint, because EFS volume might + // need the credentials endpoint constructed by it. + if err := task.initializeVolumes(cfg, dockerClient, ctx); err != nil { + return err + } + + if err := task.addGPUResource(cfg); err != nil { + seelog.Errorf("Task [%s]: could not initialize GPU associations: %v", task.Arn, err) + return apierrors.NewResourceInitError(task.Arn, err) + } + + task.initializeContainersV3MetadataEndpoint(utils.NewDynamicUUIDProvider()) + task.initializeContainersV4MetadataEndpoint(utils.NewDynamicUUIDProvider()) + if err := task.addNetworkResourceProvisioningDependency(cfg); err != nil { + seelog.Errorf("Task [%s]: could not provision network resource: %v", task.Arn, err) + return apierrors.NewResourceInitError(task.Arn, err) + } + // Adds necessary Pause containers for sharing PID or IPC namespaces + task.addNamespaceSharingProvisioningDependency(cfg) + + if err := task.applyFirelensSetup(cfg, resourceFields, credentialsManager); err != nil { + return err + } + + if task.requiresCredentialSpecResource() { + if err := task.initializeCredentialSpecResource(cfg, credentialsManager, resourceFields); err != nil { + seelog.Errorf("Task [%s]: could not initialize credentialspec resource: %v", task.Arn, err) + return apierrors.NewResourceInitError(task.Arn, err) + } + } + + if err := task.initializeEnvfilesResource(cfg, credentialsManager); err != nil { + seelog.Errorf("Task [%s]: could not initialize environment files resource: %v", task.Arn, err) + return apierrors.NewResourceInitError(task.Arn, err) + } + task.populateTaskARN() + + // fsxWindowsFileserver is the product type -- it is technically "agnostic" ie it should apply to both Windows and Linux tasks + if task.requiresFSxWindowsFileServerResource() { + if err := task.initializeFSxWindowsFileServerResource(cfg, credentialsManager, resourceFields); err != nil { + seelog.Errorf("Task [%s]: could not initialize FSx for Windows File Server resource: %v", task.Arn, err) + return apierrors.NewResourceInitError(task.Arn, err) + } + } + + for _, opt := range options { + if err := opt(task); err != nil { + seelog.Errorf("Task [%s]: could not apply task option: %v", task.Arn, err) + return err + } + } + return nil +} + +// populateTaskARN populates the arn of the task to the containers. +func (task *Task) populateTaskARN() { + for _, c := range task.Containers { + c.SetTaskARN(task.Arn) + } +} + +func (task *Task) initSecretResources(credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) { + if task.requiresASMDockerAuthData() { + task.initializeASMAuthResource(credentialsManager, resourceFields) + } + + if task.requiresSSMSecret() { + task.initializeSSMSecretResource(credentialsManager, resourceFields) + } + + if task.requiresASMSecret() { + task.initializeASMSecretResource(credentialsManager, resourceFields) + } +} + +func (task *Task) applyFirelensSetup(cfg *config.Config, resourceFields *taskresource.ResourceFields, + credentialsManager credentials.Manager) error { + firelensContainer := task.GetFirelensContainer() + if firelensContainer != nil { + if err := task.initializeFirelensResource(cfg, resourceFields, firelensContainer, credentialsManager); err != nil { + return apierrors.NewResourceInitError(task.Arn, err) + } + if err := task.addFirelensContainerDependency(); err != nil { + return errors.New("unable to add firelens container dependency") + } + } + + return nil +} + +func (task *Task) addGPUResource(cfg *config.Config) error { + if cfg.GPUSupportEnabled { + for _, association := range task.Associations { + // One GPU can be associated with only one container + // That is why validating if association.Containers is of length 1 + if association.Type == GPUAssociationType { + if len(association.Containers) != 1 { + return fmt.Errorf("could not associate multiple containers to GPU %s", association.Name) + } + + container, ok := task.ContainerByName(association.Containers[0]) + if !ok { + return fmt.Errorf("could not find container with name %s for associating GPU %s", + association.Containers[0], association.Name) + } + container.GPUIDs = append(container.GPUIDs, association.Name) + } + } + task.populateGPUEnvironmentVariables() + task.NvidiaRuntime = cfg.NvidiaRuntime + } + return nil +} + +func (task *Task) isGPUEnabled() bool { + for _, association := range task.Associations { + if association.Type == GPUAssociationType { + return true + } + } + return false +} + +func (task *Task) populateGPUEnvironmentVariables() { + for _, container := range task.Containers { + if len(container.GPUIDs) > 0 { + gpuList := strings.Join(container.GPUIDs, ",") + envVars := make(map[string]string) + envVars[NvidiaVisibleDevicesEnvVar] = gpuList + container.MergeEnvironmentVariables(envVars) + } + } +} + +func (task *Task) shouldRequireNvidiaRuntime(container *apicontainer.Container) bool { + _, ok := container.Environment[NvidiaVisibleDevicesEnvVar] + return ok +} + +func (task *Task) initializeDockerLocalVolumes(dockerClient dockerapi.DockerClient, ctx context.Context) error { + var requiredLocalVolumes []string + for _, container := range task.Containers { + for _, mountPoint := range container.MountPoints { + vol, ok := task.HostVolumeByName(mountPoint.SourceVolume) + if !ok { + continue + } + if localVolume, ok := vol.(*taskresourcevolume.LocalDockerVolume); ok { + localVolume.HostPath = task.volumeName(mountPoint.SourceVolume) + container.BuildResourceDependency(mountPoint.SourceVolume, + resourcestatus.ResourceStatus(taskresourcevolume.VolumeCreated), + apicontainerstatus.ContainerPulled) + requiredLocalVolumes = append(requiredLocalVolumes, mountPoint.SourceVolume) + } + } + } + + if len(requiredLocalVolumes) == 0 { + // No need to create the auxiliary local driver volumes + return nil + } + + // if we have required local volumes, create one with default local drive + for _, volumeName := range requiredLocalVolumes { + vol, _ := task.HostVolumeByName(volumeName) + // BUG(samuelkarp) On Windows, volumes with names that differ only by case will collide + scope := taskresourcevolume.TaskScope + localVolume, err := taskresourcevolume.NewVolumeResource(ctx, volumeName, HostVolumeType, + vol.Source(), scope, false, + taskresourcevolume.DockerLocalVolumeDriver, + make(map[string]string), make(map[string]string), dockerClient) + + if err != nil { + return err + } + + task.AddResource(resourcetype.DockerVolumeKey, localVolume) + } + return nil +} + +func (task *Task) volumeName(name string) string { + return "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex() +} + +// initializeDockerVolumes checks the volume resource in the task to determine if the agent +// should create the volume before creating the container +func (task *Task) initializeDockerVolumes(sharedVolumeMatchFullConfig bool, dockerClient dockerapi.DockerClient, ctx context.Context) error { + for i, vol := range task.Volumes { + // No need to do this for non-docker volume, eg: host bind/empty volume + if vol.Type != DockerVolumeType { + continue + } + + dockerVolume, ok := vol.Volume.(*taskresourcevolume.DockerVolumeConfig) + if !ok { + return errors.New("task volume: volume configuration does not match the type 'docker'") + } + // Agent needs to create task-scoped volume + if dockerVolume.Scope == taskresourcevolume.TaskScope { + if err := task.addTaskScopedVolumes(ctx, dockerClient, &task.Volumes[i]); err != nil { + return err + } + } else { + // Agent needs to create shared volume if that's auto provisioned + if err := task.addSharedVolumes(sharedVolumeMatchFullConfig, ctx, dockerClient, &task.Volumes[i]); err != nil { + return err + } + } + } + return nil +} + +// initializeEFSVolumes inspects the volume definitions in the task definition. +// If it finds EFS volumes in the task definition, then it converts it to a docker +// volume definition. +func (task *Task) initializeEFSVolumes(cfg *config.Config, dockerClient dockerapi.DockerClient, ctx context.Context) error { + for i, vol := range task.Volumes { + // No need to do this for non-efs volume, eg: host bind/empty volume + if vol.Type != EFSVolumeType { + continue + } + + efsvol, ok := vol.Volume.(*taskresourcevolume.EFSVolumeConfig) + if !ok { + return errors.New("task volume: volume configuration does not match the type 'efs'") + } + + err := task.addEFSVolumes(ctx, cfg, dockerClient, &task.Volumes[i], efsvol) + if err != nil { + return err + } + } + return nil +} + +// addEFSVolumes converts the EFS task definition into an internal docker 'local' volume +// mounted with NFS struct and updates container dependency +func (task *Task) addEFSVolumes( + ctx context.Context, + cfg *config.Config, + dockerClient dockerapi.DockerClient, + vol *TaskVolume, + efsvol *taskresourcevolume.EFSVolumeConfig, +) error { + driverOpts := taskresourcevolume.GetDriverOptions(cfg, efsvol, task.GetCredentialsRelativeURI()) + driverName := getEFSVolumeDriverName(cfg) + volumeResource, err := taskresourcevolume.NewVolumeResource( + ctx, + vol.Name, + EFSVolumeType, + task.volumeName(vol.Name), + "task", + false, + driverName, + driverOpts, + map[string]string{}, + dockerClient, + ) + if err != nil { + return err + } + + vol.Volume = &volumeResource.VolumeConfig + task.AddResource(resourcetype.DockerVolumeKey, volumeResource) + task.updateContainerVolumeDependency(vol.Name) + return nil +} + +// addTaskScopedVolumes adds the task scoped volume into task resources and updates container dependency +func (task *Task) addTaskScopedVolumes(ctx context.Context, dockerClient dockerapi.DockerClient, + vol *TaskVolume) error { + + volumeConfig := vol.Volume.(*taskresourcevolume.DockerVolumeConfig) + volumeResource, err := taskresourcevolume.NewVolumeResource( + ctx, + vol.Name, + DockerVolumeType, + task.volumeName(vol.Name), + volumeConfig.Scope, volumeConfig.Autoprovision, + volumeConfig.Driver, volumeConfig.DriverOpts, + volumeConfig.Labels, dockerClient) + if err != nil { + return err + } + + vol.Volume = &volumeResource.VolumeConfig + task.AddResource(resourcetype.DockerVolumeKey, volumeResource) + task.updateContainerVolumeDependency(vol.Name) + return nil +} + +// addSharedVolumes adds shared volume into task resources and updates container dependency +func (task *Task) addSharedVolumes(SharedVolumeMatchFullConfig bool, ctx context.Context, dockerClient dockerapi.DockerClient, + vol *TaskVolume) error { + + volumeConfig := vol.Volume.(*taskresourcevolume.DockerVolumeConfig) + volumeConfig.DockerVolumeName = vol.Name + // if autoprovision == true, we will auto-provision the volume if it does not exist already + // else the named volume must exist + if !volumeConfig.Autoprovision { + volumeMetadata := dockerClient.InspectVolume(ctx, vol.Name, dockerclient.InspectVolumeTimeout) + if volumeMetadata.Error != nil { + return errors.Wrapf(volumeMetadata.Error, "initialize volume: volume detection failed, volume '%s' does not exist and autoprovision is set to false", vol.Name) + } + return nil + } + + // at this point we know autoprovision = true + // check if the volume configuration matches the one exists on the instance + volumeMetadata := dockerClient.InspectVolume(ctx, volumeConfig.DockerVolumeName, dockerclient.InspectVolumeTimeout) + if volumeMetadata.Error != nil { + // Inspect the volume timed out, fail the task + if _, ok := volumeMetadata.Error.(*dockerapi.DockerTimeoutError); ok { + return volumeMetadata.Error + } + + seelog.Infof("initialize volume: Task [%s]: non-autoprovisioned volume not found, adding to task resource %q", task.Arn, vol.Name) + // this resource should be created by agent + volumeResource, err := taskresourcevolume.NewVolumeResource( + ctx, + vol.Name, + DockerVolumeType, + vol.Name, + volumeConfig.Scope, volumeConfig.Autoprovision, + volumeConfig.Driver, volumeConfig.DriverOpts, + volumeConfig.Labels, dockerClient) + if err != nil { + return err + } + + task.AddResource(resourcetype.DockerVolumeKey, volumeResource) + task.updateContainerVolumeDependency(vol.Name) + return nil + } + + seelog.Infof("initialize volume: Task [%s]: volume [%s] already exists", task.Arn, volumeConfig.DockerVolumeName) + if !SharedVolumeMatchFullConfig { + seelog.Infof("initialize volume: Task [%s]: ECS_SHARED_VOLUME_MATCH_FULL_CONFIG is set to false and volume with name [%s] is found", task.Arn, volumeConfig.DockerVolumeName) + return nil + } + + // validate all the volume metadata fields match to the configuration + if len(volumeMetadata.DockerVolume.Labels) == 0 && len(volumeMetadata.DockerVolume.Labels) == len(volumeConfig.Labels) { + seelog.Infof("labels are both empty or null: Task [%s]: volume [%s]", task.Arn, volumeConfig.DockerVolumeName) + } else if !reflect.DeepEqual(volumeMetadata.DockerVolume.Labels, volumeConfig.Labels) { + return errors.Errorf("intialize volume: non-autoprovisioned volume does not match existing volume labels: existing: %v, expected: %v", + volumeMetadata.DockerVolume.Labels, volumeConfig.Labels) + } + + if len(volumeMetadata.DockerVolume.Options) == 0 && len(volumeMetadata.DockerVolume.Options) == len(volumeConfig.DriverOpts) { + seelog.Infof("driver options are both empty or null: Task [%s]: volume [%s]", task.Arn, volumeConfig.DockerVolumeName) + } else if !reflect.DeepEqual(volumeMetadata.DockerVolume.Options, volumeConfig.DriverOpts) { + return errors.Errorf("initialize volume: non-autoprovisioned volume does not match existing volume options: existing: %v, expected: %v", + volumeMetadata.DockerVolume.Options, volumeConfig.DriverOpts) + } + // Right now we are not adding shared, autoprovision = true volume to task as resource if it already exists (i.e. when this task didn't create the volume). + // if we need to change that, make a call to task.AddResource here. + return nil +} + +// updateContainerVolumeDependency adds the volume resource to container dependency +func (task *Task) updateContainerVolumeDependency(name string) { + // Find all the container that depends on the volume + for _, container := range task.Containers { + for _, mountpoint := range container.MountPoints { + if mountpoint.SourceVolume == name { + container.BuildResourceDependency(name, + resourcestatus.ResourceCreated, + apicontainerstatus.ContainerPulled) + } + } + } +} + +// initializeCredentialsEndpoint sets the credentials endpoint for all containers in a task if needed. +func (task *Task) initializeCredentialsEndpoint(credentialsManager credentials.Manager) { + id := task.GetCredentialsID() + if id == "" { + // No credentials set for the task. Do not inject the endpoint environment variable. + return + } + taskCredentials, ok := credentialsManager.GetTaskCredentials(id) + if !ok { + // Task has credentials id set, but credentials manager is unaware of + // the id. This should never happen as the payload handler sets + // credentialsId for the task after adding credentials to the + // credentials manager + seelog.Errorf("Unable to get credentials for task: %s", task.Arn) + return + } + + credentialsEndpointRelativeURI := taskCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI() + for _, container := range task.Containers { + // container.Environment map would not be initialized if there are + // no environment variables to be set or overridden in the container + // config. Check if that's the case and initialize if needed + if container.Environment == nil { + container.Environment = make(map[string]string) + } + container.Environment[awsSDKCredentialsRelativeURIPathEnvironmentVariableName] = credentialsEndpointRelativeURI + } + + task.SetCredentialsRelativeURI(credentialsEndpointRelativeURI) +} + +// initializeContainersV3MetadataEndpoint generates an v3 endpoint id for each container, constructs the +// v3 metadata endpoint, and injects it as an environment variable +func (task *Task) initializeContainersV3MetadataEndpoint(uuidProvider utils.UUIDProvider) { + for _, container := range task.Containers { + v3EndpointID := container.GetV3EndpointID() + if v3EndpointID == "" { // if container's v3 endpoint has not been set + container.SetV3EndpointID(uuidProvider.New()) + } + + container.InjectV3MetadataEndpoint() + } +} + +// initializeContainersV4MetadataEndpoint generates an v4 endpoint id which we reuse the v3 container id +// (they are the same) for each container, constructs the v4 metadata endpoint, +// and injects it as an environment variable +func (task *Task) initializeContainersV4MetadataEndpoint(uuidProvider utils.UUIDProvider) { + for _, container := range task.Containers { + v3EndpointID := container.GetV3EndpointID() + if v3EndpointID == "" { // if container's v3 endpoint has not been set + container.SetV3EndpointID(uuidProvider.New()) + } + + container.InjectV4MetadataEndpoint() + } +} + +// requiresASMDockerAuthData returns true if atleast one container in the task +// needs to retrieve private registry authentication data from ASM +func (task *Task) requiresASMDockerAuthData() bool { + for _, container := range task.Containers { + if container.ShouldPullWithASMAuth() { + return true + } + } + return false +} + +// initializeASMAuthResource builds the resource dependency map for the ASM auth resource +func (task *Task) initializeASMAuthResource(credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) { + asmAuthResource := asmauth.NewASMAuthResource(task.Arn, task.getAllASMAuthDataRequirements(), + task.ExecutionCredentialsID, credentialsManager, resourceFields.ASMClientCreator) + task.AddResource(asmauth.ResourceName, asmAuthResource) + for _, container := range task.Containers { + if container.ShouldPullWithASMAuth() { + container.BuildResourceDependency(asmAuthResource.GetName(), + resourcestatus.ResourceStatus(asmauth.ASMAuthStatusCreated), + apicontainerstatus.ContainerPulled) + } + } +} + +func (task *Task) getAllASMAuthDataRequirements() []*apicontainer.ASMAuthData { + var reqs []*apicontainer.ASMAuthData + for _, container := range task.Containers { + if container.ShouldPullWithASMAuth() { + reqs = append(reqs, container.RegistryAuthentication.ASMAuthData) + } + } + return reqs +} + +// requiresSSMSecret returns true if at least one container in the task +// needs to retrieve secret from SSM parameter +func (task *Task) requiresSSMSecret() bool { + for _, container := range task.Containers { + if container.ShouldCreateWithSSMSecret() { + return true + } + } + return false +} + +// initializeSSMSecretResource builds the resource dependency map for the SSM ssmsecret resource +func (task *Task) initializeSSMSecretResource(credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) { + ssmSecretResource := ssmsecret.NewSSMSecretResource(task.Arn, task.getAllSSMSecretRequirements(), + task.ExecutionCredentialsID, credentialsManager, resourceFields.SSMClientCreator) + task.AddResource(ssmsecret.ResourceName, ssmSecretResource) + + // for every container that needs ssm secret vending as env, it needs to wait all secrets got retrieved + for _, container := range task.Containers { + if container.ShouldCreateWithSSMSecret() { + container.BuildResourceDependency(ssmSecretResource.GetName(), + resourcestatus.ResourceStatus(ssmsecret.SSMSecretCreated), + apicontainerstatus.ContainerCreated) + } + + // Firelens container needs to depends on secret if other containers use secret log options. + if container.GetFirelensConfig() != nil && task.firelensDependsOnSecretResource(apicontainer.SecretProviderSSM) { + container.BuildResourceDependency(ssmSecretResource.GetName(), + resourcestatus.ResourceStatus(ssmsecret.SSMSecretCreated), + apicontainerstatus.ContainerCreated) + } + } +} + +// firelensDependsOnSecret checks whether the firelens container needs to depends on a secret resource of +// a certain provider type. +func (task *Task) firelensDependsOnSecretResource(secretProvider string) bool { + isLogDriverSecretWithGivenProvider := func(s apicontainer.Secret) bool { + return s.Provider == secretProvider && s.Target == apicontainer.SecretTargetLogDriver + } + for _, container := range task.Containers { + if container.GetLogDriver() == firelensDriverName && container.HasSecret(isLogDriverSecretWithGivenProvider) { + return true + } + } + return false +} + +// getAllSSMSecretRequirements stores all secrets in a map whose key is region and value is all +// secrets in that region +func (task *Task) getAllSSMSecretRequirements() map[string][]apicontainer.Secret { + reqs := make(map[string][]apicontainer.Secret) + + for _, container := range task.Containers { + for _, secret := range container.Secrets { + if secret.Provider == apicontainer.SecretProviderSSM { + if _, ok := reqs[secret.Region]; !ok { + reqs[secret.Region] = []apicontainer.Secret{} + } + + reqs[secret.Region] = append(reqs[secret.Region], secret) + } + } + } + return reqs +} + +// requiresASMSecret returns true if at least one container in the task +// needs to retrieve secret from AWS Secrets Manager +func (task *Task) requiresASMSecret() bool { + for _, container := range task.Containers { + if container.ShouldCreateWithASMSecret() { + return true + } + } + return false +} + +// initializeASMSecretResource builds the resource dependency map for the asmsecret resource +func (task *Task) initializeASMSecretResource(credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) { + asmSecretResource := asmsecret.NewASMSecretResource(task.Arn, task.getAllASMSecretRequirements(), + task.ExecutionCredentialsID, credentialsManager, resourceFields.ASMClientCreator) + task.AddResource(asmsecret.ResourceName, asmSecretResource) + + // for every container that needs asm secret vending as envvar, it needs to wait all secrets got retrieved + for _, container := range task.Containers { + if container.ShouldCreateWithASMSecret() { + container.BuildResourceDependency(asmSecretResource.GetName(), + resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated), + apicontainerstatus.ContainerCreated) + } + + // Firelens container needs to depends on secret if other containers use secret log options. + if container.GetFirelensConfig() != nil && task.firelensDependsOnSecretResource(apicontainer.SecretProviderASM) { + container.BuildResourceDependency(asmSecretResource.GetName(), + resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated), + apicontainerstatus.ContainerCreated) + } + } +} + +// getAllASMSecretRequirements stores secrets in a task in a map +func (task *Task) getAllASMSecretRequirements() map[string]apicontainer.Secret { + reqs := make(map[string]apicontainer.Secret) + + for _, container := range task.Containers { + for _, secret := range container.Secrets { + if secret.Provider == apicontainer.SecretProviderASM { + secretKey := secret.GetSecretResourceCacheKey() + if _, ok := reqs[secretKey]; !ok { + reqs[secretKey] = secret + } + } + } + } + return reqs +} + +// GetFirelensContainer returns the firelens container in the task, if there is one. +func (task *Task) GetFirelensContainer() *apicontainer.Container { + for _, container := range task.Containers { + if container.GetFirelensConfig() != nil { // This is a firelens container. + return container + } + } + return nil +} + +// initializeFirelensResource initializes the firelens task resource and adds it as a dependency of the +// firelens container. +func (task *Task) initializeFirelensResource(config *config.Config, resourceFields *taskresource.ResourceFields, + firelensContainer *apicontainer.Container, credentialsManager credentials.Manager) error { + if firelensContainer.GetFirelensConfig() == nil { + return errors.New("firelens container config doesn't exist") + } + + containerToLogOptions := make(map[string]map[string]string) + // Collect plain text log options. + if err := task.collectFirelensLogOptions(containerToLogOptions); err != nil { + return errors.Wrap(err, "unable to initialize firelens resource") + } + + // Collect secret log options. + if err := task.collectFirelensLogEnvOptions(containerToLogOptions, firelensContainer.FirelensConfig.Type); err != nil { + return errors.Wrap(err, "unable to initialize firelens resource") + } + + for _, container := range task.Containers { + firelensConfig := container.GetFirelensConfig() + if firelensConfig != nil { + var ec2InstanceID string + if container.Environment != nil && container.Environment[awsExecutionEnvKey] == ec2ExecutionEnv { + ec2InstanceID = resourceFields.EC2InstanceID + } + + var networkMode string + if task.IsNetworkModeAWSVPC() { + networkMode = AWSVPCNetworkMode + } else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == BridgeNetworkMode { + networkMode = BridgeNetworkMode + } else { + networkMode = container.GetNetworkModeFromHostConfig() + } + firelensResource, err := firelens.NewFirelensResource(config.Cluster, task.Arn, task.Family+":"+task.Version, + ec2InstanceID, config.DataDir, firelensConfig.Type, config.AWSRegion, networkMode, firelensConfig.Options, containerToLogOptions, + credentialsManager, task.ExecutionCredentialsID) + if err != nil { + return errors.Wrap(err, "unable to initialize firelens resource") + } + task.AddResource(firelens.ResourceName, firelensResource) + container.BuildResourceDependency(firelensResource.GetName(), resourcestatus.ResourceCreated, + apicontainerstatus.ContainerCreated) + return nil + } + } + + return errors.New("unable to initialize firelens resource because there's no firelens container") +} + +// addFirelensContainerDependency adds a START dependency between each container using awsfirelens log driver +// and the firelens container. +func (task *Task) addFirelensContainerDependency() error { + var firelensContainer *apicontainer.Container + for _, container := range task.Containers { + if container.GetFirelensConfig() != nil { + firelensContainer = container + } + } + + if firelensContainer == nil { + return errors.New("unable to add firelens container dependency because there's no firelens container") + } + + if firelensContainer.HasContainerDependencies() { + // If firelens container has any container dependency, we don't add internal container dependency that depends + // on it in order to be safe (otherwise we need to deal with circular dependency). + seelog.Warnf("Not adding container dependency to let firelens container %s start first, because it has dependency on other containers.", firelensContainer.Name) + return nil + } + + for _, container := range task.Containers { + containerHostConfig := container.GetHostConfig() + if containerHostConfig == nil { + continue + } + + // Firelens container itself could be using awsfirelens log driver. Don't add container dependency in this case. + if container.Name == firelensContainer.Name { + continue + } + + hostConfig := &dockercontainer.HostConfig{} + if err := json.Unmarshal([]byte(*containerHostConfig), hostConfig); err != nil { + return errors.Wrapf(err, "unable to decode host config of container %s", container.Name) + } + + if hostConfig.LogConfig.Type == firelensDriverName { + // If there's no dependency between the app container and the firelens container, make firelens container + // start first to be the default behavior by adding a START container depdendency. + if !container.DependsOnContainer(firelensContainer.Name) { + seelog.Infof("Adding a START container dependency on firelens container %s for container %s", + firelensContainer.Name, container.Name) + container.AddContainerDependency(firelensContainer.Name, ContainerOrderingStartCondition) + } + } + } + + return nil +} + +// collectFirelensLogOptions collects the log options for all the containers that use the firelens container +// as the log driver. +// containerToLogOptions is a nested map. Top level key is the container name. Second level is a map storing +// the log option key and value of the container. +func (task *Task) collectFirelensLogOptions(containerToLogOptions map[string]map[string]string) error { + for _, container := range task.Containers { + if container.DockerConfig.HostConfig == nil { + continue + } + + hostConfig := &dockercontainer.HostConfig{} + if err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig); err != nil { + return errors.Wrapf(err, "unable to decode host config of container %s", container.Name) + } + + if hostConfig.LogConfig.Type == firelensDriverName { + if containerToLogOptions[container.Name] == nil { + containerToLogOptions[container.Name] = make(map[string]string) + } + for k, v := range hostConfig.LogConfig.Config { + containerToLogOptions[container.Name][k] = v + } + } + } + + return nil +} + +// collectFirelensLogEnvOptions collects all the log secret options. Each secret log option will have a value +// of a config file variable (e.g. "${config_var_name}") and we will pass the secret value as env to the firelens +// container and it will resolve the config file variable from the env. +// Each config variable name has a format of log-option-key_container-name. We need the container name because options +// from different containers using awsfirelens log driver in a task will be presented in the same firelens config file. +func (task *Task) collectFirelensLogEnvOptions(containerToLogOptions map[string]map[string]string, firelensConfigType string) error { + placeholderFmt := "" + switch firelensConfigType { + case firelens.FirelensConfigTypeFluentd: + placeholderFmt = firelensConfigVarPlaceholderFmtFluentd + case firelens.FirelensConfigTypeFluentbit: + placeholderFmt = firelensConfigVarPlaceholderFmtFluentbit + default: + return errors.Errorf("unsupported firelens config type %s", firelensConfigType) + } + + for _, container := range task.Containers { + for _, secret := range container.Secrets { + if secret.Target == apicontainer.SecretTargetLogDriver { + if containerToLogOptions[container.Name] == nil { + containerToLogOptions[container.Name] = make(map[string]string) + } + + idx := task.GetContainerIndex(container.Name) + if idx < 0 { + return errors.Errorf("can't find container %s in task %s", container.Name, task.Arn) + } + containerToLogOptions[container.Name][secret.Name] = fmt.Sprintf(placeholderFmt, + fmt.Sprintf(firelensConfigVarFmt, secret.Name, idx)) + } + } + } + return nil +} + +// AddFirelensContainerBindMounts adds config file bind mount and socket directory bind mount to the firelens +// container's host config. +func (task *Task) AddFirelensContainerBindMounts(firelensConfig *apicontainer.FirelensConfig, hostConfig *dockercontainer.HostConfig, + config *config.Config) *apierrors.HostConfigError { + taskID, err := task.GetID() + if err != nil { + return &apierrors.HostConfigError{Msg: err.Error()} + } + + var configBind, s3ConfigBind, socketBind string + switch firelensConfig.Type { + case firelens.FirelensConfigTypeFluentd: + configBind = fmt.Sprintf(firelensConfigBindFormatFluentd, config.DataDirOnHost, taskID) + s3ConfigBind = fmt.Sprintf(firelensS3ConfigBindFormat, config.DataDirOnHost, taskID, firelens.S3ConfigPathFluentd) + case firelens.FirelensConfigTypeFluentbit: + configBind = fmt.Sprintf(firelensConfigBindFormatFluentbit, config.DataDirOnHost, taskID) + s3ConfigBind = fmt.Sprintf(firelensS3ConfigBindFormat, config.DataDirOnHost, taskID, firelens.S3ConfigPathFluentbit) + default: + return &apierrors.HostConfigError{Msg: fmt.Sprintf("encounter invalid firelens configuration type %s", + firelensConfig.Type)} + } + socketBind = fmt.Sprintf(firelensSocketBindFormat, config.DataDirOnHost, taskID) + + hostConfig.Binds = append(hostConfig.Binds, configBind, socketBind) + + // Add the s3 config bind mount if firelens container is using a config file from S3. + if firelensConfig.Options != nil && firelensConfig.Options[firelens.ExternalConfigTypeOption] == firelens.ExternalConfigTypeS3 { + hostConfig.Binds = append(hostConfig.Binds, s3ConfigBind) + } + return nil +} + +// BuildCNIConfig builds a list of CNI network configurations for the task. +// If includeIPAMConfig is set to true, the list also includes the bridge IPAM configuration. +func (task *Task) BuildCNIConfig(includeIPAMConfig bool, cniConfig *ecscni.Config) (*ecscni.Config, error) { + if !task.IsNetworkModeAWSVPC() { + return nil, errors.New("task config: task network mode is not AWSVPC") + } + + var netconf *libcni.NetworkConfig + var ifName string + var err error + + // Build a CNI network configuration for each ENI. + for _, eni := range task.ENIs { + switch eni.InterfaceAssociationProtocol { + // If the association protocol is set to "default" or unset (to preserve backwards + // compatibility), consider it a "standard" ENI attachment. + case "", apieni.DefaultInterfaceAssociationProtocol: + cniConfig.ID = eni.MacAddress + ifName, netconf, err = ecscni.NewENINetworkConfig(eni, cniConfig) + case apieni.VLANInterfaceAssociationProtocol: + cniConfig.ID = eni.MacAddress + ifName, netconf, err = ecscni.NewBranchENINetworkConfig(eni, cniConfig) + default: + err = errors.Errorf("task config: unknown interface association type: %s", + eni.InterfaceAssociationProtocol) + } + + if err != nil { + return nil, err + } + + cniConfig.NetworkConfigs = append(cniConfig.NetworkConfigs, &ecscni.NetworkConfig{ + IfName: ifName, + CNINetworkConfig: netconf, + }) + } + + // Build the bridge CNI network configuration. + // All AWSVPC tasks have a bridge network. + ifName, netconf, err = ecscni.NewBridgeNetworkConfig(cniConfig, includeIPAMConfig) + if err != nil { + return nil, err + } + cniConfig.NetworkConfigs = append(cniConfig.NetworkConfigs, &ecscni.NetworkConfig{ + IfName: ifName, + CNINetworkConfig: netconf, + }) + + // Build a CNI network configuration for AppMesh if enabled. + appMeshConfig := task.GetAppMesh() + if appMeshConfig != nil { + ifName, netconf, err = ecscni.NewAppMeshConfig(appMeshConfig, cniConfig) + if err != nil { + return nil, err + } + cniConfig.NetworkConfigs = append(cniConfig.NetworkConfigs, &ecscni.NetworkConfig{ + IfName: ifName, + CNINetworkConfig: netconf, + }) + } + + return cniConfig, nil +} + +// IsNetworkModeAWSVPC checks if the task is configured to use the AWSVPC task networking feature. +func (task *Task) IsNetworkModeAWSVPC() bool { + return len(task.ENIs) > 0 +} + +func (task *Task) addNetworkResourceProvisioningDependency(cfg *config.Config) error { + if !task.IsNetworkModeAWSVPC() { + return nil + } + pauseContainer := apicontainer.NewContainerWithSteadyState(apicontainerstatus.ContainerResourcesProvisioned) + pauseContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + pauseContainer.Name = NetworkPauseContainerName + pauseContainer.Image = fmt.Sprintf("%s:%s", cfg.PauseContainerImageName, cfg.PauseContainerTag) + pauseContainer.Essential = true + pauseContainer.Type = apicontainer.ContainerCNIPause + + // Set pauseContainer user the same as proxy container user when image name is not DefaultPauseContainerImageName + if task.GetAppMesh() != nil && cfg.PauseContainerImageName != config.DefaultPauseContainerImageName { + appMeshConfig := task.GetAppMesh() + + // Validation is done when registering task to make sure there is one container name matching + for _, container := range task.Containers { + if container.Name != appMeshConfig.ContainerName { + continue + } + + if container.DockerConfig.Config == nil { + return errors.Errorf("user needs to be specified for proxy container") + } + containerConfig := &dockercontainer.Config{} + if err := json.Unmarshal([]byte(aws.StringValue(container.DockerConfig.Config)), &containerConfig); err != nil { + return errors.Errorf("unable to decode given docker config: %s", err.Error()) + } + + if containerConfig.User == "" { + return errors.Errorf("user needs to be specified for proxy container") + } + + pauseConfig := dockercontainer.Config{ + User: containerConfig.User, + Image: fmt.Sprintf("%s:%s", cfg.PauseContainerImageName, cfg.PauseContainerTag), + } + + bytes, err := json.Marshal(pauseConfig) + if err != nil { + return errors.Errorf("Error json marshaling pause config: %s", err) + } + serializedConfig := string(bytes) + pauseContainer.DockerConfig = apicontainer.DockerConfig{ + Config: &serializedConfig, + } + break + } + } + + task.Containers = append(task.Containers, pauseContainer) + + for _, container := range task.Containers { + if container.IsInternal() { + continue + } + container.BuildContainerDependency(NetworkPauseContainerName, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerPulled) + pauseContainer.BuildContainerDependency(container.Name, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped) + } + + for _, resource := range task.GetResources() { + if resource.DependOnTaskNetwork() { + seelog.Debugf("Task [%s]: adding network pause container dependency to resource [%s]", task.Arn, resource.GetName()) + resource.BuildContainerDependency(NetworkPauseContainerName, apicontainerstatus.ContainerResourcesProvisioned, resourcestatus.ResourceStatus(taskresourcevolume.VolumeCreated)) + } + } + return nil +} + +func (task *Task) addNamespaceSharingProvisioningDependency(cfg *config.Config) { + // Pause container does not need to be created if no namespace sharing will be done at task level + if task.getIPCMode() != ipcModeTask && task.getPIDMode() != pidModeTask { + return + } + namespacePauseContainer := apicontainer.NewContainerWithSteadyState(apicontainerstatus.ContainerRunning) + namespacePauseContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + namespacePauseContainer.Name = NamespacePauseContainerName + namespacePauseContainer.Image = fmt.Sprintf("%s:%s", config.DefaultPauseContainerImageName, config.DefaultPauseContainerTag) + namespacePauseContainer.Essential = true + namespacePauseContainer.Type = apicontainer.ContainerNamespacePause + task.Containers = append(task.Containers, namespacePauseContainer) + + for _, container := range task.Containers { + if container.IsInternal() { + continue + } + container.BuildContainerDependency(NamespacePauseContainerName, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerPulled) + namespacePauseContainer.BuildContainerDependency(container.Name, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped) + } +} + +// ContainerByName returns the *Container for the given name +func (task *Task) ContainerByName(name string) (*apicontainer.Container, bool) { + for _, container := range task.Containers { + if container.Name == name { + return container, true + } + } + return nil, false +} + +// HostVolumeByName returns the task Volume for the given a volume name in that +// task. The second return value indicates the presence of that volume +func (task *Task) HostVolumeByName(name string) (taskresourcevolume.Volume, bool) { + for _, v := range task.Volumes { + if v.Name == name { + return v.Volume, true + } + } + return nil, false +} + +// UpdateMountPoints updates the mount points of volumes that were created +// without specifying a host path. This is used as part of the empty host +// volume feature. +func (task *Task) UpdateMountPoints(cont *apicontainer.Container, vols []types.MountPoint) { + for _, mountPoint := range cont.MountPoints { + containerPath := utils.GetCanonicalPath(mountPoint.ContainerPath) + for _, vol := range vols { + if strings.Compare(vol.Destination, containerPath) == 0 || + // /path/ -> /path or \path\ -> \path + strings.Compare(vol.Destination, strings.TrimRight(containerPath, string(filepath.Separator))) == 0 { + if hostVolume, exists := task.HostVolumeByName(mountPoint.SourceVolume); exists { + if empty, ok := hostVolume.(*taskresourcevolume.LocalDockerVolume); ok { + empty.HostPath = vol.Source + } + } + } + } + } +} + +// updateTaskKnownStatus updates the given task's status based on its container's status. +// It updates to the minimum of all containers no matter what +// It returns a TaskStatus indicating what change occurred or TaskStatusNone if +// there was no change +// Invariant: task known status is the minimum of container known status +func (task *Task) updateTaskKnownStatus() (newStatus apitaskstatus.TaskStatus) { + seelog.Debugf("api/task: Updating task's known status, task: %s", task.String()) + // Set to a large 'impossible' status that can't be the min + containerEarliestKnownStatus := apicontainerstatus.ContainerZombie + var earliestKnownStatusContainer *apicontainer.Container + essentialContainerStopped := false + for _, container := range task.Containers { + containerKnownStatus := container.GetKnownStatus() + if containerKnownStatus == apicontainerstatus.ContainerStopped && container.Essential { + essentialContainerStopped = true + } + if containerKnownStatus < containerEarliestKnownStatus { + containerEarliestKnownStatus = containerKnownStatus + earliestKnownStatusContainer = container + } + } + if earliestKnownStatusContainer == nil { + seelog.Criticalf( + "Impossible state found while updating tasks's known status, earliest state recorded as %s for task [%v]", + containerEarliestKnownStatus.String(), task) + return apitaskstatus.TaskStatusNone + } + seelog.Debugf("api/task: Container with earliest known container is [%s] for task: %s", + earliestKnownStatusContainer.String(), task.String()) + // If the essential container is stopped while other containers may be running + // don't update the task status until the other containers are stopped. + if earliestKnownStatusContainer.IsKnownSteadyState() && essentialContainerStopped { + seelog.Debugf( + "Essential container is stopped while other containers are running, not updating task status for task: %s", + task.String()) + return apitaskstatus.TaskStatusNone + } + // We can't rely on earliest container known status alone for determining if the + // task state needs to be updated as containers can have different steady states + // defined. Instead we should get the task status for all containers' known + // statuses and compute the min of this + earliestKnownTaskStatus := task.getEarliestKnownTaskStatusForContainers() + if task.GetKnownStatus() < earliestKnownTaskStatus { + seelog.Infof("api/task: Updating task's known status to: %s, task: %s", + earliestKnownTaskStatus.String(), task.String()) + task.SetKnownStatus(earliestKnownTaskStatus) + return task.GetKnownStatus() + } + return apitaskstatus.TaskStatusNone +} + +// getEarliestKnownTaskStatusForContainers gets the lowest (earliest) task status +// based on the known statuses of all containers in the task +func (task *Task) getEarliestKnownTaskStatusForContainers() apitaskstatus.TaskStatus { + if len(task.Containers) == 0 { + seelog.Criticalf("No containers in the task: %s", task.String()) + return apitaskstatus.TaskStatusNone + } + // Set earliest container status to an impossible to reach 'high' task status + earliest := apitaskstatus.TaskZombie + for _, container := range task.Containers { + containerTaskStatus := apitaskstatus.MapContainerToTaskStatus(container.GetKnownStatus(), container.GetSteadyStateStatus()) + if containerTaskStatus < earliest { + earliest = containerTaskStatus + } + } + + return earliest +} + +// DockerConfig converts the given container in this task to the format of +// the Docker SDK 'Config' struct +func (task *Task) DockerConfig(container *apicontainer.Container, apiVersion dockerclient.DockerVersion) (*dockercontainer.Config, *apierrors.DockerClientConfigError) { + return task.dockerConfig(container, apiVersion) +} + +func (task *Task) dockerConfig(container *apicontainer.Container, apiVersion dockerclient.DockerVersion) (*dockercontainer.Config, *apierrors.DockerClientConfigError) { + dockerEnv := make([]string, 0, len(container.Environment)) + for envKey, envVal := range container.Environment { + dockerEnv = append(dockerEnv, envKey+"="+envVal) + } + + var entryPoint []string + if container.EntryPoint != nil { + entryPoint = *container.EntryPoint + } + + containerConfig := &dockercontainer.Config{ + Image: container.Image, + Cmd: container.Command, + Entrypoint: entryPoint, + ExposedPorts: task.dockerExposedPorts(container), + Env: dockerEnv, + } + + if container.DockerConfig.Config != nil { + if err := json.Unmarshal([]byte(aws.StringValue(container.DockerConfig.Config)), &containerConfig); err != nil { + return nil, &apierrors.DockerClientConfigError{Msg: "Unable decode given docker config: " + err.Error()} + } + } + if container.HealthCheckType == apicontainer.DockerHealthCheckType && containerConfig.Healthcheck == nil { + return nil, &apierrors.DockerClientConfigError{ + Msg: "docker health check is nil while container health check type is DOCKER"} + } + + if containerConfig.Labels == nil { + containerConfig.Labels = make(map[string]string) + } + + if container.Type == apicontainer.ContainerCNIPause { + // apply hostname to pause container's docker config + return task.applyENIHostname(containerConfig), nil + } + + return containerConfig, nil +} + +func (task *Task) dockerExposedPorts(container *apicontainer.Container) nat.PortSet { + dockerExposedPorts := make(map[nat.Port]struct{}) + + for _, portBinding := range container.Ports { + dockerPort := nat.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String()) + dockerExposedPorts[dockerPort] = struct{}{} + } + return dockerExposedPorts +} + +// DockerHostConfig construct the configuration recognized by docker +func (task *Task) DockerHostConfig(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer, apiVersion dockerclient.DockerVersion, cfg *config.Config) (*dockercontainer.HostConfig, *apierrors.HostConfigError) { + return task.dockerHostConfig(container, dockerContainerMap, apiVersion, cfg) +} + +// ApplyExecutionRoleLogsAuth will check whether the task has execution role +// credentials, and add the genereated credentials endpoint to the associated HostConfig +func (task *Task) ApplyExecutionRoleLogsAuth(hostConfig *dockercontainer.HostConfig, credentialsManager credentials.Manager) *apierrors.HostConfigError { + id := task.GetExecutionCredentialsID() + if id == "" { + // No execution credentials set for the task. Do not inject the endpoint environment variable. + return &apierrors.HostConfigError{Msg: "No execution credentials set for the task"} + } + + executionRoleCredentials, ok := credentialsManager.GetTaskCredentials(id) + if !ok { + // Task has credentials id set, but credentials manager is unaware of + // the id. This should never happen as the payload handler sets + // credentialsId for the task after adding credentials to the + // credentials manager + return &apierrors.HostConfigError{Msg: "Unable to get execution role credentials for task"} + } + credentialsEndpointRelativeURI := executionRoleCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI() + if hostConfig.LogConfig.Config == nil { + hostConfig.LogConfig.Config = map[string]string{} + } + hostConfig.LogConfig.Config[awslogsCredsEndpointOpt] = credentialsEndpointRelativeURI + return nil +} + +func (task *Task) dockerHostConfig(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer, apiVersion dockerclient.DockerVersion, cfg *config.Config) (*dockercontainer.HostConfig, *apierrors.HostConfigError) { + dockerLinkArr, err := task.dockerLinks(container, dockerContainerMap) + if err != nil { + return nil, &apierrors.HostConfigError{Msg: err.Error()} + } + + dockerPortMap := task.dockerPortMap(container) + + volumesFrom, err := task.dockerVolumesFrom(container, dockerContainerMap) + if err != nil { + return nil, &apierrors.HostConfigError{Msg: err.Error()} + } + + binds, err := task.dockerHostBinds(container) + if err != nil { + return nil, &apierrors.HostConfigError{Msg: err.Error()} + } + + resources := task.getDockerResources(container) + + // Populate hostConfig + hostConfig := &dockercontainer.HostConfig{ + Links: dockerLinkArr, + Binds: binds, + PortBindings: dockerPortMap, + VolumesFrom: volumesFrom, + Resources: resources, + } + + if err := task.overrideContainerRuntime(container, hostConfig, cfg); err != nil { + return nil, err + } + + if container.DockerConfig.HostConfig != nil { + err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig) + if err != nil { + return nil, &apierrors.HostConfigError{Msg: "Unable to decode given host config: " + err.Error()} + } + } + + if err := task.platformHostConfigOverride(hostConfig); err != nil { + return nil, &apierrors.HostConfigError{Msg: err.Error()} + } + + // Determine if network mode should be overridden and override it if needed + ok, networkMode := task.shouldOverrideNetworkMode(container, dockerContainerMap) + if ok { + hostConfig.NetworkMode = dockercontainer.NetworkMode(networkMode) + // Override 'awsvpc' parameters if needed + if container.Type == apicontainer.ContainerCNIPause { + // apply ExtraHosts to HostConfig for pause container + if hosts := task.generateENIExtraHosts(); hosts != nil { + hostConfig.ExtraHosts = append(hostConfig.ExtraHosts, hosts...) + } + + if task.shouldEnableIPv6() { + // By default, the disable ipv6 setting is turned on, so need to turn it off to enable it. + enableIPv6SysctlSetting(hostConfig) + } + + // Override the DNS settings for the pause container if ENI has custom + // DNS settings + return task.overrideDNS(hostConfig), nil + } + } + + ok, pidMode := task.shouldOverridePIDMode(container, dockerContainerMap) + if ok { + hostConfig.PidMode = dockercontainer.PidMode(pidMode) + } + + ok, ipcMode := task.shouldOverrideIPCMode(container, dockerContainerMap) + if ok { + hostConfig.IpcMode = dockercontainer.IpcMode(ipcMode) + } + + return hostConfig, nil +} + +// overrideContainerRuntime overrides the runtime for the container in host config if needed. +func (task *Task) overrideContainerRuntime(container *apicontainer.Container, hostCfg *dockercontainer.HostConfig, + cfg *config.Config) *apierrors.HostConfigError { + if task.isGPUEnabled() && task.shouldRequireNvidiaRuntime(container) { + if task.NvidiaRuntime == "" { + return &apierrors.HostConfigError{Msg: "Runtime is not set for GPU containers"} + } + seelog.Debugf("Setting runtime as %s for container %s", task.NvidiaRuntime, container.Name) + hostCfg.Runtime = task.NvidiaRuntime + } + + if cfg.InferentiaSupportEnabled && container.RequireNeuronRuntime() { + seelog.Debugf("Setting runtime as %s for container %s", neuronRuntime, container.Name) + hostCfg.Runtime = neuronRuntime + } + return nil +} + +// Requires an *apicontainer.Container and returns the Resources for the HostConfig struct +func (task *Task) getDockerResources(container *apicontainer.Container) dockercontainer.Resources { + // Convert MB to B and set Memory + dockerMem := int64(container.Memory * 1024 * 1024) + if dockerMem != 0 && dockerMem < apicontainer.DockerContainerMinimumMemoryInBytes { + seelog.Warnf("Task %s container %s memory setting is too low, increasing to %d bytes", + task.Arn, container.Name, apicontainer.DockerContainerMinimumMemoryInBytes) + dockerMem = apicontainer.DockerContainerMinimumMemoryInBytes + } + // Set CPUShares + cpuShare := task.dockerCPUShares(container.CPU) + resources := dockercontainer.Resources{ + Memory: dockerMem, + CPUShares: cpuShare, + } + return resources +} + +// shouldOverrideNetworkMode returns true if the network mode of the container needs +// to be overridden. It also returns the override string in this case. It returns +// false otherwise +func (task *Task) shouldOverrideNetworkMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) { + // TODO. We can do an early return here by determining which kind of task it is + // Example: Does this task have ENIs in its payload, what is its networking mode etc + if container.IsInternal() { + // If it's an internal container, set the network mode to none. + // Currently, internal containers are either for creating empty host + // volumes or for creating the 'pause' container. Both of these + // only need the network mode to be set to "none" + return true, networkModeNone + } + + // For other types of containers, determine if the container map contains + // a pause container. Since a pause container is only added to the task + // when using non docker daemon supported network modes, its existence + // indicates the need to configure the network mode outside of supported + // network drivers + if !task.IsNetworkModeAWSVPC() { + return false, "" + } + + pauseContName := "" + for _, cont := range task.Containers { + if cont.Type == apicontainer.ContainerCNIPause { + pauseContName = cont.Name + break + } + } + if pauseContName == "" { + seelog.Critical("Pause container required, but not found in the task: %s", task.String()) + return false, "" + } + pauseContainer, ok := dockerContainerMap[pauseContName] + if !ok || pauseContainer == nil { + // This should never be the case and implies a code-bug. + seelog.Criticalf("Pause container required, but not found in container map for container: [%s] in task: %s", + container.String(), task.String()) + return false, "" + } + return true, dockerMappingContainerPrefix + pauseContainer.DockerID +} + +// overrideDNS overrides a container's host config if the following conditions are +// true: +// 1. Task has an ENI associated with it +// 2. ENI has custom DNS IPs and search list associated with it +// This should only be done for the pause container as other containers inherit +// /etc/resolv.conf of this container (they share the network namespace) +func (task *Task) overrideDNS(hostConfig *dockercontainer.HostConfig) *dockercontainer.HostConfig { + eni := task.GetPrimaryENI() + if eni == nil { + return hostConfig + } + + hostConfig.DNS = eni.DomainNameServers + hostConfig.DNSSearch = eni.DomainNameSearchList + + return hostConfig +} + +// applyENIHostname adds the hostname provided by the ENI message to the +// container's docker config. At the time of implmentation, we are only using it +// to configure the pause container for awsvpc tasks +func (task *Task) applyENIHostname(dockerConfig *dockercontainer.Config) *dockercontainer.Config { + eni := task.GetPrimaryENI() + if eni == nil { + return dockerConfig + } + + hostname := eni.GetHostname() + if hostname == "" { + return dockerConfig + } + + dockerConfig.Hostname = hostname + return dockerConfig +} + +// generateENIExtraHosts returns a slice of strings of the form "hostname:ip" +// that is generated using the hostname and ip addresses allocated to the ENI +func (task *Task) generateENIExtraHosts() []string { + eni := task.GetPrimaryENI() + if eni == nil { + return nil + } + + hostname := eni.GetHostname() + if hostname == "" { + return nil + } + + extraHosts := []string{} + + for _, ip := range eni.GetIPV4Addresses() { + host := fmt.Sprintf("%s:%s", hostname, ip) + extraHosts = append(extraHosts, host) + } + return extraHosts +} + +func (task *Task) shouldEnableIPv6() bool { + eni := task.GetPrimaryENI() + if eni == nil { + return false + } + return len(eni.GetIPV6Addresses()) > 0 +} + +// shouldOverridePIDMode returns true if the PIDMode of the container needs +// to be overridden. It also returns the override string in this case. It returns +// false otherwise +func (task *Task) shouldOverridePIDMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) { + // If the container is an internal container (ContainerEmptyHostVolume, + // ContainerCNIPause, or ContainerNamespacePause), then PID namespace for + // the container itself should be private (default Docker option) + if container.IsInternal() { + return false, "" + } + + switch task.getPIDMode() { + case pidModeHost: + return true, pidModeHost + + case pidModeTask: + pauseCont, ok := task.ContainerByName(NamespacePauseContainerName) + if !ok { + seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn) + task.SetDesiredStatus(apitaskstatus.TaskStopped) + return false, "" + } + pauseDockerID, ok := dockerContainerMap[pauseCont.Name] + if !ok || pauseDockerID == nil { + // Docker container shouldn't be nil or not exist if the Container definition within task exists; implies code-bug + seelog.Criticalf("Namespace Pause docker container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn) + task.SetDesiredStatus(apitaskstatus.TaskStopped) + return false, "" + } + return true, dockerMappingContainerPrefix + pauseDockerID.DockerID + + // If PIDMode is not Host or Task, then no need to override + default: + return false, "" + } +} + +// shouldOverrideIPCMode returns true if the IPCMode of the container needs +// to be overridden. It also returns the override string in this case. It returns +// false otherwise +func (task *Task) shouldOverrideIPCMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) { + // All internal containers do not need the same IPCMode. The NamespaceContainerPause + // needs to be "shareable" if ipcMode is "task". All other internal containers should + // defer to the Docker daemon default option (either shareable or private depending on + // version and configuration) + if container.IsInternal() { + if container.Type == apicontainer.ContainerNamespacePause { + // Setting NamespaceContainerPause to be sharable with other containers + if task.getIPCMode() == ipcModeTask { + return true, ipcModeSharable + } + } + // Defaulting to Docker daemon default option + return false, "" + } + + switch task.getIPCMode() { + // No IPCMode provided in Task Definition, no need to override + case "": + return false, "" + + // IPCMode is none - container will have own private namespace with /dev/shm not mounted + case ipcModeNone: + return true, ipcModeNone + + case ipcModeHost: + return true, ipcModeHost + + case ipcModeTask: + pauseCont, ok := task.ContainerByName(NamespacePauseContainerName) + if !ok { + seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn) + task.SetDesiredStatus(apitaskstatus.TaskStopped) + return false, "" + } + pauseDockerID, ok := dockerContainerMap[pauseCont.Name] + if !ok || pauseDockerID == nil { + // Docker container shouldn't be nill or not exist if the Container definition within task exists; implies code-bug + seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn) + task.SetDesiredStatus(apitaskstatus.TaskStopped) + return false, "" + } + return true, dockerMappingContainerPrefix + pauseDockerID.DockerID + + default: + return false, "" + } +} + +func (task *Task) initializeContainerOrderingForVolumes() error { + for _, container := range task.Containers { + if len(container.VolumesFrom) > 0 { + for _, volume := range container.VolumesFrom { + if _, ok := task.ContainerByName(volume.SourceContainer); !ok { + return fmt.Errorf("could not find container with name %s", volume.SourceContainer) + } + dependOn := apicontainer.DependsOn{ContainerName: volume.SourceContainer, Condition: ContainerOrderingCreateCondition} + container.SetDependsOn(append(container.GetDependsOn(), dependOn)) + } + } + } + return nil +} + +func (task *Task) initializeContainerOrderingForLinks() error { + for _, container := range task.Containers { + if len(container.Links) > 0 { + for _, link := range container.Links { + linkParts := strings.Split(link, ":") + if len(linkParts) > 2 { + return fmt.Errorf("Invalid link format") + } + linkName := linkParts[0] + if _, ok := task.ContainerByName(linkName); !ok { + return fmt.Errorf("could not find container with name %s", linkName) + } + dependOn := apicontainer.DependsOn{ContainerName: linkName, Condition: ContainerOrderingStartCondition} + container.SetDependsOn(append(container.GetDependsOn(), dependOn)) + } + } + } + return nil +} + +func (task *Task) dockerLinks(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) ([]string, error) { + dockerLinkArr := make([]string, len(container.Links)) + for i, link := range container.Links { + linkParts := strings.Split(link, ":") + if len(linkParts) > 2 { + return []string{}, errors.New("Invalid link format") + } + linkName := linkParts[0] + var linkAlias string + + if len(linkParts) == 2 { + linkAlias = linkParts[1] + } else { + seelog.Warnf("Link name [%s] found with no linkalias for container: [%s] in task: [%s]", + linkName, container.String(), task.String()) + linkAlias = linkName + } + + targetContainer, ok := dockerContainerMap[linkName] + if !ok { + return []string{}, errors.New("Link target not available: " + linkName) + } + dockerLinkArr[i] = targetContainer.DockerName + ":" + linkAlias + } + return dockerLinkArr, nil +} + +func (task *Task) dockerPortMap(container *apicontainer.Container) nat.PortMap { + dockerPortMap := nat.PortMap{} + + for _, portBinding := range container.Ports { + dockerPort := nat.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String()) + currentMappings, existing := dockerPortMap[dockerPort] + if existing { + dockerPortMap[dockerPort] = append(currentMappings, nat.PortBinding{HostPort: strconv.Itoa(int(portBinding.HostPort))}) + } else { + dockerPortMap[dockerPort] = []nat.PortBinding{{HostPort: strconv.Itoa(int(portBinding.HostPort))}} + } + } + return dockerPortMap +} + +func (task *Task) dockerVolumesFrom(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) ([]string, error) { + volumesFrom := make([]string, len(container.VolumesFrom)) + for i, volume := range container.VolumesFrom { + targetContainer, ok := dockerContainerMap[volume.SourceContainer] + if !ok { + return []string{}, errors.New("Volume target not available: " + volume.SourceContainer) + } + if volume.ReadOnly { + volumesFrom[i] = targetContainer.DockerName + ":ro" + } else { + volumesFrom[i] = targetContainer.DockerName + } + } + return volumesFrom, nil +} + +func (task *Task) dockerHostBinds(container *apicontainer.Container) ([]string, error) { + if container.Name == emptyHostVolumeName { + // emptyHostVolumes are handled as a special case in config, not + // hostConfig + return []string{}, nil + } + + binds := make([]string, len(container.MountPoints)) + for i, mountPoint := range container.MountPoints { + hv, ok := task.HostVolumeByName(mountPoint.SourceVolume) + if !ok { + return []string{}, errors.New("Invalid volume referenced: " + mountPoint.SourceVolume) + } + + if hv.Source() == "" || mountPoint.ContainerPath == "" { + seelog.Errorf( + "Unable to resolve volume mounts for container [%s]; invalid path: [%s]; [%s] -> [%s] in task: [%s]", + container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath, task.String()) + return []string{}, errors.Errorf("Unable to resolve volume mounts; invalid path: %s %s; %s -> %s", + container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath) + } + + bind := hv.Source() + ":" + mountPoint.ContainerPath + if mountPoint.ReadOnly { + bind += ":ro" + } + binds[i] = bind + } + + return binds, nil +} + +// UpdateStatus updates a task's known and desired statuses to be compatible +// with all of its containers +// It will return a bool indicating if there was a change +func (task *Task) UpdateStatus() bool { + change := task.updateTaskKnownStatus() + // DesiredStatus can change based on a new known status + task.UpdateDesiredStatus() + return change != apitaskstatus.TaskStatusNone +} + +// UpdateDesiredStatus sets the known status of the task +func (task *Task) UpdateDesiredStatus() { + task.lock.Lock() + defer task.lock.Unlock() + task.updateTaskDesiredStatusUnsafe() + task.updateContainerDesiredStatusUnsafe(task.DesiredStatusUnsafe) + task.updateResourceDesiredStatusUnsafe(task.DesiredStatusUnsafe) +} + +// updateTaskDesiredStatusUnsafe determines what status the task should properly be at based on the containers' statuses +// Invariant: task desired status must be stopped if any essential container is stopped +func (task *Task) updateTaskDesiredStatusUnsafe() { + seelog.Debugf("Updating task: [%s]", task.stringUnsafe()) + + // A task's desired status is stopped if any essential container is stopped + // Otherwise, the task's desired status is unchanged (typically running, but no need to change) + for _, cont := range task.Containers { + if cont.Essential && (cont.KnownTerminal() || cont.DesiredTerminal()) { + seelog.Infof("api/task: Updating task desired status to stopped because of container: [%s]; task: [%s]", + cont.Name, task.stringUnsafe()) + task.DesiredStatusUnsafe = apitaskstatus.TaskStopped + } + } +} + +// updateContainerDesiredStatusUnsafe sets all container's desired status's to the +// task's desired status +// Invariant: container desired status is <= task desired status converted to container status +// Note: task desired status and container desired status is typically only RUNNING or STOPPED +func (task *Task) updateContainerDesiredStatusUnsafe(taskDesiredStatus apitaskstatus.TaskStatus) { + for _, container := range task.Containers { + taskDesiredStatusToContainerStatus := apitaskstatus.MapTaskToContainerStatus(taskDesiredStatus, container.GetSteadyStateStatus()) + if container.GetDesiredStatus() < taskDesiredStatusToContainerStatus { + container.SetDesiredStatus(taskDesiredStatusToContainerStatus) + } + } +} + +// updateResourceDesiredStatusUnsafe sets all resources' desired status depending on the +// task's desired status +// TODO: Create a mapping of resource status to the corresponding task status and use it here +func (task *Task) updateResourceDesiredStatusUnsafe(taskDesiredStatus apitaskstatus.TaskStatus) { + resources := task.getResourcesUnsafe() + for _, r := range resources { + if taskDesiredStatus == apitaskstatus.TaskRunning { + if r.GetDesiredStatus() < r.SteadyState() { + r.SetDesiredStatus(r.SteadyState()) + } + } else { + if r.GetDesiredStatus() < r.TerminalStatus() { + r.SetDesiredStatus(r.TerminalStatus()) + } + } + } +} + +// SetKnownStatus sets the known status of the task +func (task *Task) SetKnownStatus(status apitaskstatus.TaskStatus) { + task.setKnownStatus(status) + task.updateKnownStatusTime() +} + +func (task *Task) setKnownStatus(status apitaskstatus.TaskStatus) { + task.lock.Lock() + defer task.lock.Unlock() + + task.KnownStatusUnsafe = status +} + +func (task *Task) updateKnownStatusTime() { + task.lock.Lock() + defer task.lock.Unlock() + + task.KnownStatusTimeUnsafe = ttime.Now() +} + +// GetKnownStatus gets the KnownStatus of the task +func (task *Task) GetKnownStatus() apitaskstatus.TaskStatus { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.KnownStatusUnsafe +} + +// GetKnownStatusTime gets the KnownStatusTime of the task +func (task *Task) GetKnownStatusTime() time.Time { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.KnownStatusTimeUnsafe +} + +// SetCredentialsID sets the credentials ID for the task +func (task *Task) SetCredentialsID(id string) { + task.lock.Lock() + defer task.lock.Unlock() + + task.credentialsID = id +} + +// GetCredentialsID gets the credentials ID for the task +func (task *Task) GetCredentialsID() string { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.credentialsID +} + +// SetCredentialsRelativeURI sets the credentials relative uri for the task +func (task *Task) SetCredentialsRelativeURI(uri string) { + task.lock.Lock() + defer task.lock.Unlock() + + task.credentialsRelativeURIUnsafe = uri +} + +// GetCredentialsRelativeURI returns the credentials relative uri for the task +func (task *Task) GetCredentialsRelativeURI() string { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.credentialsRelativeURIUnsafe +} + +// SetExecutionRoleCredentialsID sets the ID for the task execution role credentials +func (task *Task) SetExecutionRoleCredentialsID(id string) { + task.lock.Lock() + defer task.lock.Unlock() + + task.ExecutionCredentialsID = id +} + +// GetExecutionCredentialsID gets the credentials ID for the task +func (task *Task) GetExecutionCredentialsID() string { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.ExecutionCredentialsID +} + +// GetDesiredStatus gets the desired status of the task +func (task *Task) GetDesiredStatus() apitaskstatus.TaskStatus { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.DesiredStatusUnsafe +} + +// SetDesiredStatus sets the desired status of the task +func (task *Task) SetDesiredStatus(status apitaskstatus.TaskStatus) { + task.lock.Lock() + defer task.lock.Unlock() + + task.DesiredStatusUnsafe = status +} + +// GetSentStatus safely returns the SentStatus of the task +func (task *Task) GetSentStatus() apitaskstatus.TaskStatus { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.SentStatusUnsafe +} + +// SetSentStatus safely sets the SentStatus of the task +func (task *Task) SetSentStatus(status apitaskstatus.TaskStatus) { + task.lock.Lock() + defer task.lock.Unlock() + + task.SentStatusUnsafe = status +} + +// AddTaskENI adds ENI information to the task. +func (task *Task) AddTaskENI(eni *apieni.ENI) { + task.lock.Lock() + defer task.lock.Unlock() + + if task.ENIs == nil { + task.ENIs = make([]*apieni.ENI, 0) + } + task.ENIs = append(task.ENIs, eni) +} + +// GetTaskENIs returns the list of ENIs for the task. +func (task *Task) GetTaskENIs() []*apieni.ENI { + // TODO: what's the point of locking if we are returning a pointer? + task.lock.RLock() + defer task.lock.RUnlock() + + return task.ENIs +} + +// GetPrimaryENI returns the primary ENI of the task. Since ACS can potentially send +// multiple ENIs to the agent, the first ENI in the list is considered as the primary ENI. +func (task *Task) GetPrimaryENI() *apieni.ENI { + task.lock.RLock() + defer task.lock.RUnlock() + + if len(task.ENIs) == 0 { + return nil + } + return task.ENIs[0] +} + +// SetAppMesh sets the app mesh config of the task +func (task *Task) SetAppMesh(appMesh *apiappmesh.AppMesh) { + task.lock.Lock() + defer task.lock.Unlock() + + task.AppMesh = appMesh +} + +// GetAppMesh returns the app mesh config of the task +func (task *Task) GetAppMesh() *apiappmesh.AppMesh { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.AppMesh +} + +// GetStopSequenceNumber returns the stop sequence number of a task +func (task *Task) GetStopSequenceNumber() int64 { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.StopSequenceNumber +} + +// SetStopSequenceNumber sets the stop seqence number of a task +func (task *Task) SetStopSequenceNumber(seqnum int64) { + task.lock.Lock() + defer task.lock.Unlock() + + task.StopSequenceNumber = seqnum +} + +// SetPullStartedAt sets the task pullstartedat timestamp and returns whether +// this field was updated or not +func (task *Task) SetPullStartedAt(timestamp time.Time) bool { + task.lock.Lock() + defer task.lock.Unlock() + + // Only set this field if it is not set + if task.PullStartedAtUnsafe.IsZero() { + task.PullStartedAtUnsafe = timestamp + return true + } + return false +} + +// GetPullStartedAt returns the PullStartedAt timestamp +func (task *Task) GetPullStartedAt() time.Time { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.PullStartedAtUnsafe +} + +// SetPullStoppedAt sets the task pullstoppedat timestamp +func (task *Task) SetPullStoppedAt(timestamp time.Time) { + task.lock.Lock() + defer task.lock.Unlock() + + task.PullStoppedAtUnsafe = timestamp +} + +// GetPullStoppedAt returns the PullStoppedAt timestamp +func (task *Task) GetPullStoppedAt() time.Time { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.PullStoppedAtUnsafe +} + +// SetExecutionStoppedAt sets the ExecutionStoppedAt timestamp of the task +func (task *Task) SetExecutionStoppedAt(timestamp time.Time) bool { + task.lock.Lock() + defer task.lock.Unlock() + + if task.ExecutionStoppedAtUnsafe.IsZero() { + task.ExecutionStoppedAtUnsafe = timestamp + return true + } + return false +} + +// GetExecutionStoppedAt returns the task executionStoppedAt timestamp +func (task *Task) GetExecutionStoppedAt() time.Time { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.ExecutionStoppedAtUnsafe +} + +// String returns a human readable string representation of this object +func (task *Task) String() string { + return task.stringUnsafe() +} + +// stringUnsafe returns a human readable string representation of this object +func (task *Task) stringUnsafe() string { + return fmt.Sprintf("%s:%s %s, TaskStatus: (%s->%s) N Containers: %d, N ENIs %d", + task.Family, task.Version, task.Arn, + task.KnownStatusUnsafe.String(), task.DesiredStatusUnsafe.String(), + len(task.Containers), len(task.ENIs)) +} + +// GetID is used to retrieve the taskID from taskARN +// Reference: http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-ecs +func (task *Task) GetID() (string, error) { + // Parse taskARN + parsedARN, err := arn.Parse(task.Arn) + if err != nil { + return "", errors.Wrapf(err, "task get-id: malformed taskARN: %s", task.Arn) + } + + // Get task resource section + resource := parsedARN.Resource + + if !strings.Contains(resource, arnResourceDelimiter) { + return "", errors.Errorf("task get-id: malformed task resource: %s", resource) + } + + resourceSplit := strings.Split(resource, arnResourceDelimiter) + return resourceSplit[len(resourceSplit)-1], nil +} + +// RecordExecutionStoppedAt checks if this is an essential container stopped +// and set the task executionStoppedAt timestamps +func (task *Task) RecordExecutionStoppedAt(container *apicontainer.Container) { + if !container.Essential { + return + } + if container.GetKnownStatus() != apicontainerstatus.ContainerStopped { + return + } + // If the essential container is stopped, set the ExecutionStoppedAt timestamp + now := time.Now() + ok := task.SetExecutionStoppedAt(now) + if !ok { + // ExecutionStoppedAt was already recorded. Nothing to left to do here + return + } + seelog.Infof("Task [%s]: recording execution stopped time. Essential container [%s] stopped at: %s", + task.Arn, container.Name, now.String()) +} + +// GetResources returns the list of task resources from ResourcesMap +func (task *Task) GetResources() []taskresource.TaskResource { + task.lock.RLock() + defer task.lock.RUnlock() + return task.getResourcesUnsafe() +} + +// getResourcesUnsafe returns the list of task resources from ResourcesMap +func (task *Task) getResourcesUnsafe() []taskresource.TaskResource { + var resourceList []taskresource.TaskResource + for _, resources := range task.ResourcesMapUnsafe { + resourceList = append(resourceList, resources...) + } + return resourceList +} + +// AddResource adds a resource to ResourcesMap +func (task *Task) AddResource(resourceType string, resource taskresource.TaskResource) { + task.lock.Lock() + defer task.lock.Unlock() + task.ResourcesMapUnsafe[resourceType] = append(task.ResourcesMapUnsafe[resourceType], resource) +} + +// SetTerminalReason sets the terminalReason string and this can only be set +// once per the task's lifecycle. This field does not accept updates. +func (task *Task) SetTerminalReason(reason string) { + seelog.Infof("Task [%s]: attempting to set terminal reason for task [%s]", task.Arn, reason) + task.terminalReasonOnce.Do(func() { + seelog.Infof("Task [%s]: setting terminal reason for task [%s]", task.Arn, reason) + + // Converts the first letter of terminal reason into capital letter + words := strings.Fields(reason) + words[0] = strings.Title(words[0]) + task.terminalReason = strings.Join(words, " ") + }) +} + +// GetTerminalReason retrieves the terminalReason string +func (task *Task) GetTerminalReason() string { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.terminalReason +} + +// PopulateASMAuthData sets docker auth credentials for a container +func (task *Task) PopulateASMAuthData(container *apicontainer.Container) error { + secretID := container.RegistryAuthentication.ASMAuthData.CredentialsParameter + resource, ok := task.getASMAuthResource() + if !ok { + return errors.New("task auth data: unable to fetch ASM resource") + } + // This will cause a panic if the resource is not of ASMAuthResource type. + // But, it's better to panic as we should have never reached condition + // unless we released an agent without any testing around that code path + asmResource := resource[0].(*asmauth.ASMAuthResource) + dac, ok := asmResource.GetASMDockerAuthConfig(secretID) + if !ok { + return errors.Errorf("task auth data: unable to fetch docker auth config [%s]", secretID) + } + container.SetASMDockerAuthConfig(dac) + return nil +} + +func (task *Task) getASMAuthResource() ([]taskresource.TaskResource, bool) { + task.lock.RLock() + defer task.lock.RUnlock() + + res, ok := task.ResourcesMapUnsafe[asmauth.ResourceName] + return res, ok +} + +// getSSMSecretsResource retrieves ssmsecret resource from resource map +func (task *Task) getSSMSecretsResource() ([]taskresource.TaskResource, bool) { + task.lock.RLock() + defer task.lock.RUnlock() + + res, ok := task.ResourcesMapUnsafe[ssmsecret.ResourceName] + return res, ok +} + +// PopulateSecrets appends secrets to container's env var map and hostconfig section +func (task *Task) PopulateSecrets(hostConfig *dockercontainer.HostConfig, container *apicontainer.Container) *apierrors.DockerClientConfigError { + var ssmRes *ssmsecret.SSMSecretResource + var asmRes *asmsecret.ASMSecretResource + + if container.ShouldCreateWithSSMSecret() { + resource, ok := task.getSSMSecretsResource() + if !ok { + return &apierrors.DockerClientConfigError{Msg: "task secret data: unable to fetch SSM Secrets resource"} + } + ssmRes = resource[0].(*ssmsecret.SSMSecretResource) + } + + if container.ShouldCreateWithASMSecret() { + resource, ok := task.getASMSecretsResource() + if !ok { + return &apierrors.DockerClientConfigError{Msg: "task secret data: unable to fetch ASM Secrets resource"} + } + asmRes = resource[0].(*asmsecret.ASMSecretResource) + } + + populateContainerSecrets(hostConfig, container, ssmRes, asmRes) + return nil +} + +func populateContainerSecrets(hostConfig *dockercontainer.HostConfig, container *apicontainer.Container, + ssmRes *ssmsecret.SSMSecretResource, asmRes *asmsecret.ASMSecretResource) { + envVars := make(map[string]string) + + logDriverTokenName := "" + logDriverTokenSecretValue := "" + + for _, secret := range container.Secrets { + secretVal := "" + + if secret.Provider == apicontainer.SecretProviderSSM { + k := secret.GetSecretResourceCacheKey() + if secretValue, ok := ssmRes.GetCachedSecretValue(k); ok { + secretVal = secretValue + } + } + + if secret.Provider == apicontainer.SecretProviderASM { + k := secret.GetSecretResourceCacheKey() + if secretValue, ok := asmRes.GetCachedSecretValue(k); ok { + secretVal = secretValue + } + } + + if secret.Type == apicontainer.SecretTypeEnv { + envVars[secret.Name] = secretVal + continue + } + + if secret.Target == apicontainer.SecretTargetLogDriver { + // Log driver secrets for container using awsfirelens log driver won't be saved in log config and passed to + // Docker here. They will only be used to configure the firelens container. + if container.GetLogDriver() == firelensDriverName { + continue + } + + logDriverTokenName = secret.Name + logDriverTokenSecretValue = secretVal + + // Check if all the name and secret value for the log driver do exist + // And add the secret value for this log driver into container's HostConfig + if hostConfig.LogConfig.Type != "" && logDriverTokenName != "" && logDriverTokenSecretValue != "" { + if hostConfig.LogConfig.Config == nil { + hostConfig.LogConfig.Config = map[string]string{} + } + hostConfig.LogConfig.Config[logDriverTokenName] = logDriverTokenSecretValue + } + } + } + + container.MergeEnvironmentVariables(envVars) +} + +// PopulateSecretLogOptionsToFirelensContainer collects secret log option values for awsfirelens log driver from task +// resource and specified then as envs of firelens container. Firelens container will use the envs to resolve config +// file variables constructed for secret log options when loading the config file. +func (task *Task) PopulateSecretLogOptionsToFirelensContainer(firelensContainer *apicontainer.Container) *apierrors.DockerClientConfigError { + firelensENVs := make(map[string]string) + + var ssmRes *ssmsecret.SSMSecretResource + var asmRes *asmsecret.ASMSecretResource + + resource, ok := task.getSSMSecretsResource() + if ok { + ssmRes = resource[0].(*ssmsecret.SSMSecretResource) + } + + resource, ok = task.getASMSecretsResource() + if ok { + asmRes = resource[0].(*asmsecret.ASMSecretResource) + } + + for _, container := range task.Containers { + if container.GetLogDriver() != firelensDriverName { + continue + } + + logDriverSecretData, err := collectLogDriverSecretData(container.Secrets, ssmRes, asmRes) + if err != nil { + return &apierrors.DockerClientConfigError{ + Msg: fmt.Sprintf("unable to generate config to create firelens container: %v", err), + } + } + + idx := task.GetContainerIndex(container.Name) + if idx < 0 { + return &apierrors.DockerClientConfigError{ + Msg: fmt.Sprintf("unable to generate config to create firelens container because container %s is not found in task", container.Name), + } + } + for key, value := range logDriverSecretData { + envKey := fmt.Sprintf(firelensConfigVarFmt, key, idx) + firelensENVs[envKey] = value + } + } + + firelensContainer.MergeEnvironmentVariables(firelensENVs) + return nil +} + +// collectLogDriverSecretData collects all the secret values for log driver secrets. +func collectLogDriverSecretData(secrets []apicontainer.Secret, ssmRes *ssmsecret.SSMSecretResource, + asmRes *asmsecret.ASMSecretResource) (map[string]string, error) { + secretData := make(map[string]string) + for _, secret := range secrets { + if secret.Target != apicontainer.SecretTargetLogDriver { + continue + } + + secretVal := "" + cacheKey := secret.GetSecretResourceCacheKey() + if secret.Provider == apicontainer.SecretProviderSSM { + if ssmRes == nil { + return nil, errors.Errorf("missing secret value for secret %s", secret.Name) + } + + if secretValue, ok := ssmRes.GetCachedSecretValue(cacheKey); ok { + secretVal = secretValue + } + } else if secret.Provider == apicontainer.SecretProviderASM { + if asmRes == nil { + return nil, errors.Errorf("missing secret value for secret %s", secret.Name) + } + + if secretValue, ok := asmRes.GetCachedSecretValue(cacheKey); ok { + secretVal = secretValue + } + } + + secretData[secret.Name] = secretVal + } + + return secretData, nil +} + +// getASMSecretsResource retrieves asmsecret resource from resource map +func (task *Task) getASMSecretsResource() ([]taskresource.TaskResource, bool) { + task.lock.RLock() + defer task.lock.RUnlock() + + res, ok := task.ResourcesMapUnsafe[asmsecret.ResourceName] + return res, ok +} + +// InitializeResources initializes the required field in the task on agent restart +// Some of the fields in task isn't saved in the agent state file, agent needs +// to initialize these fields before processing the task, eg: docker client in resource +func (task *Task) InitializeResources(resourceFields *taskresource.ResourceFields) { + task.lock.Lock() + defer task.lock.Unlock() + + for _, resources := range task.ResourcesMapUnsafe { + for _, resource := range resources { + resource.Initialize(resourceFields, task.KnownStatusUnsafe, task.DesiredStatusUnsafe) + } + } +} + +// Retrieves a Task's PIDMode +func (task *Task) getPIDMode() string { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.PIDMode +} + +// Retrieves a Task's IPCMode +func (task *Task) getIPCMode() string { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.IPCMode +} + +// AssociationByTypeAndContainer gets a list of names of all the associations associated with a container and of a +// certain type +func (task *Task) AssociationsByTypeAndContainer(associationType, containerName string) []string { + task.lock.RLock() + defer task.lock.RUnlock() + + var associationNames []string + for _, association := range task.Associations { + if association.Type == associationType { + for _, associatedContainerName := range association.Containers { + if associatedContainerName == containerName { + associationNames = append(associationNames, association.Name) + } + } + } + } + + return associationNames +} + +// AssociationByTypeAndName gets an association of a certain type and name +func (task *Task) AssociationByTypeAndName(associationType, associationName string) (*Association, bool) { + task.lock.RLock() + defer task.lock.RUnlock() + + for _, association := range task.Associations { + if association.Type == associationType && association.Name == associationName { + return &association, true + } + } + + return nil, false +} + +// GetContainerIndex returns the index of the container in the container list. This doesn't count internal container. +func (task *Task) GetContainerIndex(containerName string) int { + task.lock.RLock() + defer task.lock.RUnlock() + + idx := 0 + for _, container := range task.Containers { + if container.IsInternal() { + continue + } + if container.Name == containerName { + return idx + } + idx++ + } + return -1 +} + +func (task *Task) initializeEnvfilesResource(config *config.Config, credentialsManager credentials.Manager) error { + + for _, container := range task.Containers { + if container.ShouldCreateWithEnvFiles() { + envfileResource, err := envFiles.NewEnvironmentFileResource(config.Cluster, task.Arn, config.AWSRegion, config.DataDir, + container.Name, container.EnvironmentFiles, credentialsManager, task.ExecutionCredentialsID) + if err != nil { + return errors.Wrapf(err, "unable to initialize envfiles resource for container %s", container.Name) + } + task.AddResource(envFiles.ResourceName, envfileResource) + container.BuildResourceDependency(envfileResource.GetName(), resourcestatus.ResourceCreated, apicontainerstatus.ContainerCreated) + } + } + + return nil +} + +func (task *Task) getEnvfilesResource(containerName string) (taskresource.TaskResource, bool) { + task.lock.RLock() + defer task.lock.RUnlock() + + resources, ok := task.ResourcesMapUnsafe[envFiles.ResourceName] + if !ok { + return nil, false + } + + for _, resource := range resources { + envfileResource := resource.(*envFiles.EnvironmentFileResource) + if envfileResource.GetContainerName() == containerName { + return envfileResource, true + } + } + + // was not able to retrieve envfile resource for specified container name + return nil, false +} + +// MergeEnvVarsFromEnvfiles should be called when creating a container - +// this method reads the environment variables specified in the environment files +// that was downloaded to disk and merges it with existing environment variables +func (task *Task) MergeEnvVarsFromEnvfiles(container *apicontainer.Container) *apierrors.ResourceInitError { + var envfileResource *envFiles.EnvironmentFileResource + resource, ok := task.getEnvfilesResource(container.Name) + if !ok { + err := errors.New(fmt.Sprintf("task environment files: unable to retrieve environment files resource for container %s", container.Name)) + return apierrors.NewResourceInitError(task.Arn, err) + } + envfileResource = resource.(*envFiles.EnvironmentFileResource) + + envVarsList, err := envfileResource.ReadEnvVarsFromEnvfiles() + if err != nil { + return apierrors.NewResourceInitError(task.Arn, err) + } + + err = container.MergeEnvironmentVariablesFromEnvfiles(envVarsList) + if err != nil { + return apierrors.NewResourceInitError(task.Arn, err) + } + + return nil +} + +// GetLocalIPAddress returns the local IP address of the task. +func (task *Task) GetLocalIPAddress() string { + task.lock.RLock() + defer task.lock.RUnlock() + + return task.LocalIPAddressUnsafe +} + +// SetLocalIPAddress sets the local IP address of the task. +func (task *Task) SetLocalIPAddress(addr string) { + task.lock.Lock() + defer task.lock.Unlock() + + task.LocalIPAddressUnsafe = addr +} diff --git a/agent/api/task/task_enis.go b/agent/api/task/task_enis.go new file mode 100644 index 00000000000..6eb717718ef --- /dev/null +++ b/agent/api/task/task_enis.go @@ -0,0 +1,54 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" +) + +// TaskENIs type enumerates the list of ENI objects as a type. It is used for +// implementing a custom unmarshaler. The unmarshaler is capable of unmarshaling +// both a list of ENI objects into the TaskENIs type (the new scheme) or +// a single ENI object into the TaskENIs type (the old scheme before this object +// was introduced). +// +// The 'task' package is deemed to be a better home for this than the 'eni' package +// since this is only required for unmarshaling 'Task' object. None of the +// functionality/types in the 'eni' package themselves have any dependencies on this +// type. +type TaskENIs []*apieni.ENI + +func (taskENIs *TaskENIs) UnmarshalJSON(b []byte) error { + var enis []*apieni.ENI + // Try to unmarshal this as a list of ENI objects. + err := json.Unmarshal(b, &enis) + if err == nil { + // Successfully decoded the byte slice as a list of ENI objects, we're done. + *taskENIs = enis + return nil + } + // There was an error unmarshaling the byte slice as a list of ENIs. This is the case + // where we're restoring from a previous version of the state file, where the ENI is + // being stored as a standalone object and not as a list. + var eni apieni.ENI + err = json.Unmarshal(b, &eni) + if err != nil { + return err + } + enis = []*apieni.ENI{&eni} + *taskENIs = enis + return nil +} diff --git a/agent/api/task/task_linux.go b/agent/api/task/task_linux.go new file mode 100644 index 00000000000..9cc247b5ec3 --- /dev/null +++ b/agent/api/task/task_linux.go @@ -0,0 +1,253 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "path/filepath" + "time" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + resourcetype "github.com/aws/amazon-ecs-agent/agent/taskresource/types" + "github.com/cihub/seelog" + dockercontainer "github.com/docker/docker/api/types/container" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +const ( + // With a 100ms CPU period, we can express 0.01 vCPU to 10 vCPUs + maxTaskVCPULimit = 10 + // Reference: http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html + minimumCPUShare = 2 + + minimumCPUPercent = 0 + bytesPerMegabyte = 1024 * 1024 +) + +// PlatformFields consists of fields specific to Linux for a task +type PlatformFields struct{} + +func (task *Task) adjustForPlatform(cfg *config.Config) { + task.lock.Lock() + defer task.lock.Unlock() + task.MemoryCPULimitsEnabled = cfg.TaskCPUMemLimit.Enabled() +} + +func (task *Task) initializeCgroupResourceSpec(cgroupPath string, cGroupCPUPeriod time.Duration, resourceFields *taskresource.ResourceFields) error { + cgroupRoot, err := task.BuildCgroupRoot() + if err != nil { + return errors.Wrapf(err, "cgroup resource: unable to determine cgroup root for task") + } + resSpec, err := task.BuildLinuxResourceSpec(cGroupCPUPeriod) + if err != nil { + return errors.Wrapf(err, "cgroup resource: unable to build resource spec for task") + } + cgroupResource := cgroup.NewCgroupResource(task.Arn, resourceFields.Control, + resourceFields.IOUtil, cgroupRoot, cgroupPath, resSpec) + task.AddResource(resourcetype.CgroupKey, cgroupResource) + for _, container := range task.Containers { + container.BuildResourceDependency(cgroupResource.GetName(), + resourcestatus.ResourceStatus(cgroup.CgroupCreated), + apicontainerstatus.ContainerPulled) + } + return nil +} + +// BuildCgroupRoot helps build the task cgroup prefix +// Example: /ecs/task-id +func (task *Task) BuildCgroupRoot() (string, error) { + taskID, err := task.GetID() + if err != nil { + return "", errors.Wrapf(err, "task build cgroup root: unable to get task-id from task ARN: %s", task.Arn) + } + + return filepath.Join(config.DefaultTaskCgroupPrefix, taskID), nil +} + +// BuildLinuxResourceSpec returns a linuxResources object for the task cgroup +func (task *Task) BuildLinuxResourceSpec(cGroupCPUPeriod time.Duration) (specs.LinuxResources, error) { + linuxResourceSpec := specs.LinuxResources{} + + // If task level CPU limits are requested, set CPU quota + CPU period + // Else set CPU shares + if task.CPU > 0 { + linuxCPUSpec, err := task.buildExplicitLinuxCPUSpec(cGroupCPUPeriod) + if err != nil { + return specs.LinuxResources{}, err + } + linuxResourceSpec.CPU = &linuxCPUSpec + } else { + linuxCPUSpec := task.buildImplicitLinuxCPUSpec() + linuxResourceSpec.CPU = &linuxCPUSpec + } + + // Validate and build task memory spec + // NOTE: task memory specifications are optional + if task.Memory > 0 { + linuxMemorySpec, err := task.buildLinuxMemorySpec() + if err != nil { + return specs.LinuxResources{}, err + } + linuxResourceSpec.Memory = &linuxMemorySpec + } + + return linuxResourceSpec, nil +} + +// buildExplicitLinuxCPUSpec builds CPU spec when task CPU limits are +// explicitly requested +func (task *Task) buildExplicitLinuxCPUSpec(cGroupCPUPeriod time.Duration) (specs.LinuxCPU, error) { + if task.CPU > maxTaskVCPULimit { + return specs.LinuxCPU{}, + errors.Errorf("task CPU spec builder: unsupported CPU limits, requested=%f, max-supported=%d", + task.CPU, maxTaskVCPULimit) + } + taskCPUPeriod := uint64(cGroupCPUPeriod / time.Microsecond) + taskCPUQuota := int64(task.CPU * float64(taskCPUPeriod)) + + // TODO: DefaultCPUPeriod only permits 10VCPUs. + // Adaptive calculation of CPUPeriod required for further support + // (samuelkarp) The largest available EC2 instance in terms of CPU count is a x1.32xlarge, + // with 128 vCPUs. If we assume a fixed evaluation period of 100ms (100000us), + // we'd need a quota of 12800000us, which is longer than the maximum of 1000000. + // For 128 vCPUs, we'd probably need something like a 1ms (1000us - the minimum) + // evaluation period, an 128000us quota in order to stay within the min/max limits. + return specs.LinuxCPU{ + Quota: &taskCPUQuota, + Period: &taskCPUPeriod, + }, nil +} + +// buildImplicitLinuxCPUSpec builds the implicit task CPU spec when +// task CPU and memory limit feature is enabled +func (task *Task) buildImplicitLinuxCPUSpec() specs.LinuxCPU { + // If task level CPU limits are missing, + // aggregate container CPU shares when present + var taskCPUShares uint64 + for _, container := range task.Containers { + if container.CPU > 0 { + taskCPUShares += uint64(container.CPU) + } + } + + // If there are are no CPU limits at task or container level, + // default task CPU shares + if taskCPUShares == 0 { + // Set default CPU shares + taskCPUShares = minimumCPUShare + } + + return specs.LinuxCPU{ + Shares: &taskCPUShares, + } +} + +// buildLinuxMemorySpec validates and builds the task memory spec +func (task *Task) buildLinuxMemorySpec() (specs.LinuxMemory, error) { + // If task memory limit is not present, cgroup parent memory is not set + // If task memory limit is set, ensure that no container + // of this task has a greater request + for _, container := range task.Containers { + containerMemoryLimit := int64(container.Memory) + if containerMemoryLimit > task.Memory { + return specs.LinuxMemory{}, + errors.Errorf("task memory spec builder: container memory limit(%d) greater than task memory limit(%d)", + containerMemoryLimit, task.Memory) + } + } + + // Kernel expects memory to be expressed in bytes + memoryBytes := task.Memory * bytesPerMegabyte + return specs.LinuxMemory{ + Limit: &memoryBytes, + }, nil +} + +// platformHostConfigOverride to override platform specific feature sets +func (task *Task) platformHostConfigOverride(hostConfig *dockercontainer.HostConfig) error { + // Override cgroup parent + return task.overrideCgroupParent(hostConfig) +} + +// overrideCgroupParent updates hostconfig with cgroup parent when task cgroups +// are enabled +func (task *Task) overrideCgroupParent(hostConfig *dockercontainer.HostConfig) error { + task.lock.RLock() + defer task.lock.RUnlock() + if task.MemoryCPULimitsEnabled { + cgroupRoot, err := task.BuildCgroupRoot() + if err != nil { + return errors.Wrapf(err, "task cgroup override: unable to obtain cgroup root for task: %s", task.Arn) + } + hostConfig.CgroupParent = cgroupRoot + } + return nil +} + +// dockerCPUShares converts containerCPU shares if needed as per the logic stated below: +// Docker silently converts 0 to 1024 CPU shares, which is probably not what we +// want. Instead, we convert 0 to 2 to be closer to expected behavior. The +// reason for 2 over 1 is that 1 is an invalid value (Linux's choice, not Docker's). +func (task *Task) dockerCPUShares(containerCPU uint) int64 { + if containerCPU <= 1 { + seelog.Debugf( + "Converting CPU shares to allowed minimum of 2 for task arn: [%s] and cpu shares: %d", + task.Arn, containerCPU) + return 2 + } + return int64(containerCPU) +} + +// requiresCredentialSpecResource returns true if at least one container in the task +// needs a valid credentialspec resource +func (task *Task) requiresCredentialSpecResource() bool { + return false +} + +// initializeCredentialSpecResource builds the resource dependency map for the credentialspec resource +func (task *Task) initializeCredentialSpecResource(config *config.Config, credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) error { + return errors.New("task credentialspec is only supported on windows") +} + +// GetCredentialSpecResource retrieves credentialspec resource from resource map +func (task *Task) GetCredentialSpecResource() ([]taskresource.TaskResource, bool) { + return []taskresource.TaskResource{}, false +} + +func enableIPv6SysctlSetting(hostConfig *dockercontainer.HostConfig) { + if hostConfig.Sysctls == nil { + hostConfig.Sysctls = make(map[string]string) + } + hostConfig.Sysctls[disableIPv6SysctlKey] = sysctlValueOff +} + +// requiresFSxWindowsFileServerResource returns true if at least one volume in the task +// is of type 'fsxWindowsFileServer' +func (task *Task) requiresFSxWindowsFileServerResource() bool { + return false +} + +// initializeFSxWindowsFileServerResource builds the resource dependency map for the fsxwindowsfileserver resource +func (task *Task) initializeFSxWindowsFileServerResource(cfg *config.Config, credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) error { + return errors.New("task with FSx for Windows File Server volumes is only supported on Windows container instance") +} diff --git a/agent/api/task/task_linux_test.go b/agent/api/task/task_linux_test.go new file mode 100644 index 00000000000..cfefd55c3c3 --- /dev/null +++ b/agent/api/task/task_linux_test.go @@ -0,0 +1,1205 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + "testing" + "time" + + apiappmesh "github.com/aws/amazon-ecs-agent/agent/api/appmesh" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/mock_control" + "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" + "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret" + mock_ioutilwrapper "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper/mocks" + "github.com/golang/mock/gomock" + + "github.com/aws/aws-sdk-go/aws" + dockercontainer "github.com/docker/docker/api/types/container" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + validTaskArn = "arn:aws:ecs:region:account-id:task/task-id" + invalidTaskArn = "invalid:task::arn" + + expectedCgroupRoot = "/ecs/task-id" + + taskVCPULimit = 2.0 + taskMemoryLimit = 512 + minDockerClientAPIVersion = dockerclient.Version_1_17 + + proxyName = "envoy" + + testCluster = "testCluster" + testDataDir = "testDataDir" + testDataDirOnHost = "testDataDirOnHost" + testInstanceID = "testInstanceID" + testTaskDefFamily = "testFamily" + testTaskDefVersion = "1" + testRegion = "testRegion" + testExecutionCredentialsID = "testExecutionCredentialsID" + + defaultCPUPeriod = 100 * time.Millisecond // 100ms +) + +func TestAddNetworkResourceProvisioningDependencyNop(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + }, + }, + } + testTask.addNetworkResourceProvisioningDependency(nil) + assert.Equal(t, 1, len(testTask.Containers)) +} + +func TestAddNetworkResourceProvisioningDependencyWithENI(t *testing.T) { + testTask := &Task{ + ENIs: []*apieni.ENI{{}}, + Containers: []*apicontainer.Container{ + { + Name: "c1", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + } + cfg := &config.Config{ + PauseContainerImageName: "pause-container-image-name", + PauseContainerTag: "pause-container-tag", + } + testTask.addNetworkResourceProvisioningDependency(cfg) + assert.Equal(t, 2, len(testTask.Containers), + "addNetworkResourceProvisioningDependency should add another container") + pauseContainer, ok := testTask.ContainerByName(NetworkPauseContainerName) + require.True(t, ok, "Expected to find pause container") + assert.Equal(t, apicontainer.ContainerCNIPause, pauseContainer.Type, "pause container should have correct type") + assert.True(t, pauseContainer.Essential, "pause container should be essential") + assert.Equal(t, cfg.PauseContainerImageName+":"+cfg.PauseContainerTag, pauseContainer.Image, + "pause container should use configured image") +} + +func TestAddNetworkResourceProvisioningDependencyWithAppMesh(t *testing.T) { + pauseConfig := dockercontainer.Config{ + User: "1337:35", + } + + bytes, _ := json.Marshal(pauseConfig) + serializedConfig := string(bytes) + + testTask := &Task{ + AppMesh: &apiappmesh.AppMesh{ + ContainerName: proxyName, + }, + ENIs: []*apieni.ENI{{}}, + Containers: []*apicontainer.Container{ + { + Name: "c1", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + { + Name: proxyName, + DockerConfig: apicontainer.DockerConfig{ + Config: &serializedConfig, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + } + cfg := &config.Config{ + PauseContainerImageName: "pause-container-image-name", + PauseContainerTag: "pause-container-tag", + } + + testTask.addNetworkResourceProvisioningDependency(cfg) + assert.Equal(t, 3, len(testTask.Containers), + "addNetworkResourceProvisioningDependency should add another container") + pauseContainer, ok := testTask.ContainerByName(NetworkPauseContainerName) + require.True(t, ok, "Expected to find pause container") + containerConfig := &dockercontainer.Config{} + json.Unmarshal([]byte(aws.StringValue(pauseContainer.DockerConfig.Config)), &containerConfig) + assert.Equal(t, "1337:35", containerConfig.User, "pause container should have correct user") + assert.Equal(t, apicontainer.ContainerCNIPause, pauseContainer.Type, "pause container should have correct type") + assert.True(t, pauseContainer.Essential, "pause container should be essential") + assert.Equal(t, cfg.PauseContainerImageName+":"+cfg.PauseContainerTag, pauseContainer.Image, + "pause container should use configured image") +} + +func TestAddNetworkResourceProvisioningDependencyWithAppMeshDefaultImage(t *testing.T) { + pauseConfig := dockercontainer.Config{ + User: "1337:35", + } + + bytes, _ := json.Marshal(pauseConfig) + serializedConfig := string(bytes) + + testTask := &Task{ + AppMesh: &apiappmesh.AppMesh{ + ContainerName: proxyName, + }, + ENIs: []*apieni.ENI{{}}, + Containers: []*apicontainer.Container{ + { + Name: "c1", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + { + Name: proxyName, + DockerConfig: apicontainer.DockerConfig{ + Config: &serializedConfig, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + } + cfg := &config.Config{ + PauseContainerImageName: "", + PauseContainerTag: "pause-container-tag", + } + testTask.addNetworkResourceProvisioningDependency(cfg) + assert.Equal(t, 3, len(testTask.Containers), + "addNetworkResourceProvisioningDependency should add another container") + pauseContainer, ok := testTask.ContainerByName(NetworkPauseContainerName) + require.True(t, ok, "Expected to find pause container") + assert.Equal(t, apicontainer.DockerConfig{}, pauseContainer.DockerConfig, "pause container should not have user") + assert.Equal(t, apicontainer.ContainerCNIPause, pauseContainer.Type, "pause container should have correct type") + assert.True(t, pauseContainer.Essential, "pause container should be essential") + assert.Equal(t, cfg.PauseContainerImageName+":"+cfg.PauseContainerTag, pauseContainer.Image, + "pause container should use configured image") +} + +func TestAddNetworkResourceProvisioningDependencyWithAppMeshError(t *testing.T) { + testTask := &Task{ + AppMesh: &apiappmesh.AppMesh{ + ContainerName: proxyName, + }, + ENIs: []*apieni.ENI{{}}, + Containers: []*apicontainer.Container{ + { + Name: "c1", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + { + Name: proxyName, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + } + cfg := &config.Config{ + PauseContainerImageName: "pause-container-image-name", + PauseContainerTag: "pause-container-tag", + } + err := testTask.addNetworkResourceProvisioningDependency(cfg) + assert.Error(t, err, + "addNetworkResourceProvisioningDependency should throw error when no user in proxy container") +} + +// TestBuildCgroupRootHappyPath builds cgroup root from valid taskARN +func TestBuildCgroupRootHappyPath(t *testing.T) { + task := Task{ + Arn: validTaskArn, + } + + cgroupRoot, err := task.BuildCgroupRoot() + + assert.NoError(t, err) + assert.Equal(t, expectedCgroupRoot, cgroupRoot) +} + +// TestBuildCgroupRootErrorPath validates the cgroup path build error path +func TestBuildCgroupRootErrorPath(t *testing.T) { + task := Task{ + Arn: invalidTaskArn, + } + + cgroupRoot, err := task.BuildCgroupRoot() + + assert.Error(t, err) + assert.Empty(t, cgroupRoot) +} + +// TestBuildLinuxResourceSpecCPUMem validates the linux resource spec builder +func TestBuildLinuxResourceSpecCPUMem(t *testing.T) { + taskMemoryLimit := int64(taskMemoryLimit) + + task := &Task{ + Arn: validTaskArn, + CPU: float64(taskVCPULimit), + Memory: taskMemoryLimit, + } + + expectedTaskCPUPeriod := uint64(defaultCPUPeriod / time.Microsecond) + expectedTaskCPUQuota := int64(taskVCPULimit * float64(expectedTaskCPUPeriod)) + expectedTaskMemory := taskMemoryLimit * bytesPerMegabyte + expectedLinuxResourceSpec := specs.LinuxResources{ + CPU: &specs.LinuxCPU{ + Quota: &expectedTaskCPUQuota, + Period: &expectedTaskCPUPeriod, + }, + Memory: &specs.LinuxMemory{ + Limit: &expectedTaskMemory, + }, + } + + linuxResourceSpec, err := task.BuildLinuxResourceSpec(defaultCPUPeriod) + + assert.NoError(t, err) + assert.EqualValues(t, expectedLinuxResourceSpec, linuxResourceSpec) +} + +// TestBuildLinuxResourceSpecCPU validates the linux resource spec builder +func TestBuildLinuxResourceSpecCPU(t *testing.T) { + task := &Task{ + Arn: validTaskArn, + CPU: float64(taskVCPULimit), + } + + expectedTaskCPUPeriod := uint64(defaultCPUPeriod / time.Microsecond) + expectedTaskCPUQuota := int64(taskVCPULimit * float64(expectedTaskCPUPeriod)) + expectedLinuxResourceSpec := specs.LinuxResources{ + CPU: &specs.LinuxCPU{ + Quota: &expectedTaskCPUQuota, + Period: &expectedTaskCPUPeriod, + }, + } + + linuxResourceSpec, err := task.BuildLinuxResourceSpec(defaultCPUPeriod) + + assert.NoError(t, err) + assert.EqualValues(t, expectedLinuxResourceSpec, linuxResourceSpec) +} + +// TestBuildLinuxResourceSpecWithoutTaskCPULimits validates behavior of CPU Shares +func TestBuildLinuxResourceSpecWithoutTaskCPULimits(t *testing.T) { + task := &Task{ + Arn: validTaskArn, + Containers: []*apicontainer.Container{ + { + Name: "C1", + }, + }, + } + expectedCPUShares := uint64(minimumCPUShare) + expectedLinuxResourceSpec := specs.LinuxResources{ + CPU: &specs.LinuxCPU{ + Shares: &expectedCPUShares, + }, + } + + linuxResourceSpec, err := task.BuildLinuxResourceSpec(defaultCPUPeriod) + + assert.NoError(t, err) + assert.EqualValues(t, expectedLinuxResourceSpec, linuxResourceSpec) +} + +// TestBuildLinuxResourceSpecWithoutTaskCPUWithContainerCPULimits validates behavior of CPU Shares +func TestBuildLinuxResourceSpecWithoutTaskCPUWithContainerCPULimits(t *testing.T) { + task := &Task{ + Arn: validTaskArn, + Containers: []*apicontainer.Container{ + { + Name: "C1", + CPU: uint(512), + }, + }, + } + expectedCPUShares := uint64(512) + expectedLinuxResourceSpec := specs.LinuxResources{ + CPU: &specs.LinuxCPU{ + Shares: &expectedCPUShares, + }, + } + + linuxResourceSpec, err := task.BuildLinuxResourceSpec(defaultCPUPeriod) + + assert.NoError(t, err) + assert.EqualValues(t, expectedLinuxResourceSpec, linuxResourceSpec) +} + +// TestBuildLinuxResourceSpecInvalidMem validates the linux resource spec builder +func TestBuildLinuxResourceSpecInvalidMem(t *testing.T) { + taskMemoryLimit := int64(taskMemoryLimit) + + task := &Task{ + Arn: validTaskArn, + CPU: float64(taskVCPULimit), + Memory: taskMemoryLimit, + Containers: []*apicontainer.Container{ + { + Name: "C1", + Memory: uint(2048), + }, + }, + } + + expectedLinuxResourceSpec := specs.LinuxResources{} + linuxResourceSpec, err := task.BuildLinuxResourceSpec(defaultCPUPeriod) + + assert.Error(t, err) + assert.EqualValues(t, expectedLinuxResourceSpec, linuxResourceSpec) +} + +// TestOverrideCgroupParent validates the cgroup parent override +func TestOverrideCgroupParentHappyPath(t *testing.T) { + task := &Task{ + Arn: validTaskArn, + CPU: float64(taskVCPULimit), + Memory: int64(taskMemoryLimit), + MemoryCPULimitsEnabled: true, + } + + hostConfig := &dockercontainer.HostConfig{} + + assert.NoError(t, task.overrideCgroupParent(hostConfig)) + assert.NotEmpty(t, hostConfig) + assert.Equal(t, expectedCgroupRoot, hostConfig.CgroupParent) +} + +// TestOverrideCgroupParentErrorPath validates the error path for +// cgroup parent update +func TestOverrideCgroupParentErrorPath(t *testing.T) { + task := &Task{ + Arn: invalidTaskArn, + CPU: float64(taskVCPULimit), + Memory: int64(taskMemoryLimit), + MemoryCPULimitsEnabled: true, + } + + hostConfig := &dockercontainer.HostConfig{} + + assert.Error(t, task.overrideCgroupParent(hostConfig)) + assert.Empty(t, hostConfig.CgroupParent) +} + +// TestPlatformHostConfigOverride validates the platform host config overrides +func TestPlatformHostConfigOverride(t *testing.T) { + task := &Task{ + Arn: validTaskArn, + CPU: float64(taskVCPULimit), + Memory: int64(taskMemoryLimit), + MemoryCPULimitsEnabled: true, + } + + hostConfig := &dockercontainer.HostConfig{} + + assert.NoError(t, task.platformHostConfigOverride(hostConfig)) + assert.NotEmpty(t, hostConfig) + assert.Equal(t, expectedCgroupRoot, hostConfig.CgroupParent) +} + +// TestPlatformHostConfigOverride validates the platform host config overrides +func TestPlatformHostConfigOverrideErrorPath(t *testing.T) { + task := &Task{ + Arn: invalidTaskArn, + CPU: float64(taskVCPULimit), + Memory: int64(taskMemoryLimit), + MemoryCPULimitsEnabled: true, + Containers: []*apicontainer.Container{ + { + Name: "c1", + }, + }, + } + + dockerHostConfig, err := task.DockerHostConfig(task.Containers[0], dockerMap(task), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Error(t, err) + assert.Empty(t, dockerHostConfig) +} + +func TestDockerHostConfigRawConfigMerging(t *testing.T) { + // Use a struct that will marshal to the actual message we expect; not + // dockercontainer.HostConfig which will include a lot of zero values. + rawHostConfigInput := struct { + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` + SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` + }{ + Privileged: true, + SecurityOpt: []string{"foo", "bar"}, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + if err != nil { + t.Fatal(err) + } + + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + Image: "image", + CPU: 50, + Memory: 100, + VolumesFrom: []apicontainer.VolumeFrom{{SourceContainer: "c2"}}, + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + }, + { + Name: "c2", + }, + }, + } + + hostConfig, configErr := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), + minDockerClientAPIVersion, &config.Config{}) + assert.Nil(t, configErr) + + expected := dockercontainer.HostConfig{ + Privileged: true, + SecurityOpt: []string{"foo", "bar"}, + VolumesFrom: []string{"dockername-c2"}, + Resources: dockercontainer.Resources{ + // Convert MB to B and set Memory + Memory: int64(100 * 1024 * 1024), + CPUShares: 50, + CPUPercent: minimumCPUPercent, + }, + } + assertSetStructFieldsEqual(t, expected, *hostConfig) +} + +func TestInitCgroupResourceSpecHappyPath(t *testing.T) { + taskMemoryLimit := int64(taskMemoryLimit) + task := &Task{ + Arn: validTaskArn, + CPU: float64(taskVCPULimit), + Memory: taskMemoryLimit, + Containers: []*apicontainer.Container{ + { + Name: "c1", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + MemoryCPULimitsEnabled: true, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockControl := mock_control.NewMockControl(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + assert.NoError(t, task.initializeCgroupResourceSpec("cgroupPath", defaultCPUPeriod, &taskresource.ResourceFields{ + Control: mockControl, + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + IOUtil: mockIO, + }, + })) + assert.Equal(t, 1, len(task.GetResources())) + assert.Equal(t, 1, len(task.Containers[0].TransitionDependenciesMap)) +} + +func TestInitCgroupResourceSpecInvalidARN(t *testing.T) { + task := &Task{ + Arn: "arn", // malformed arn + Family: "testFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + MemoryCPULimitsEnabled: true, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + assert.Error(t, task.initializeCgroupResourceSpec("", time.Millisecond, nil)) + assert.Equal(t, 0, len(task.GetResources())) + assert.Equal(t, 0, len(task.Containers[0].TransitionDependenciesMap)) +} + +func TestInitCgroupResourceSpecInvalidMem(t *testing.T) { + taskMemoryLimit := int64(taskMemoryLimit) + task := &Task{ + Arn: validTaskArn, + CPU: float64(taskVCPULimit), + Memory: taskMemoryLimit, + Containers: []*apicontainer.Container{ + { + Name: "C1", + Memory: uint(2048), // container memory > task memory + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + MemoryCPULimitsEnabled: true, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + assert.Error(t, task.initializeCgroupResourceSpec("", time.Millisecond, nil)) + assert.Equal(t, 0, len(task.GetResources())) + assert.Equal(t, 0, len(task.Containers[0].TransitionDependenciesMap)) +} + +func TestPostUnmarshalWithCPULimitsFail(t *testing.T) { + task := &Task{ + Arn: "arn", // malformed arn + Family: "testFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + cfg := config.Config{ + TaskCPUMemLimit: config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled}, + } + assert.Error(t, task.PostUnmarshalTask(&cfg, nil, nil, nil, nil)) + assert.Equal(t, 0, len(task.GetResources())) + assert.Equal(t, 0, len(task.Containers[0].TransitionDependenciesMap)) +} + +func TestPostUnmarshalWithFirelensContainer(t *testing.T) { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Options["config-file-type"] = "file" + task.Containers[1].FirelensConfig.Options["config-file-value"] = "/tmp/file" + + resourceFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + EC2InstanceID: testInstanceID, + }, + } + cfg := &config.Config{ + DataDir: testDataDir, + Cluster: testCluster, + AWSRegion: testRegion, + } + assert.NoError(t, task.PostUnmarshalTask(cfg, nil, resourceFields, nil, nil)) + resources := task.GetResources() + assert.Len(t, resources, 2) + assert.Len(t, task.Containers[1].TransitionDependenciesMap, 1) + assert.Len(t, task.Containers[1].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies, 2) + var firelensResource *firelens.FirelensResource + var secretResource *ssmsecret.SSMSecretResource + for _, resource := range resources { + if resource.GetName() == firelens.ResourceName { + firelensResource = resource.(*firelens.FirelensResource) + } else if resource.GetName() == ssmsecret.ResourceName { + secretResource = resource.(*ssmsecret.SSMSecretResource) + } + } + + assert.NotNil(t, firelensResource) + assert.NotNil(t, secretResource) + + assert.Equal(t, testCluster, firelensResource.GetCluster()) + assert.Equal(t, validTaskArn, firelensResource.GetTaskARN()) + assert.Equal(t, testTaskDefFamily+":"+testTaskDefVersion, firelensResource.GetTaskDefinition()) + assert.Equal(t, testInstanceID, firelensResource.GetEC2InstanceID()) + assert.Equal(t, testDataDir+"/firelens/task-id", firelensResource.GetResourceDir()) + assert.Equal(t, testRegion, firelensResource.GetRegion()) + assert.Equal(t, testExecutionCredentialsID, firelensResource.GetExecutionCredentialsID()) + assert.Equal(t, "file", firelensResource.GetExternalConfigType()) + assert.Equal(t, "/tmp/file", firelensResource.GetExternalConfigValue()) + assert.NotNil(t, firelensResource.GetContainerToLogOptions()) + assert.Equal(t, "value1", firelensResource.GetContainerToLogOptions()["logsender"]["key1"]) + assert.Equal(t, "value2", firelensResource.GetContainerToLogOptions()["logsender"]["key2"]) + assert.Contains(t, task.Containers[0].DependsOnUnsafe, apicontainer.DependsOn{ + ContainerName: task.Containers[1].Name, + Condition: ContainerOrderingStartCondition, + }) +} + +func TestPostUnmarshalWithFirelensContainerError(t *testing.T) { + task := getFirelensTask(t) + task.Containers[0].DockerConfig.HostConfig = strptr(string("invalid")) + + resourceFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + EC2InstanceID: testInstanceID, + }, + } + cfg := &config.Config{ + DataDir: testDataDir, + Cluster: testCluster, + } + assert.Error(t, task.PostUnmarshalTask(cfg, nil, resourceFields, nil, nil)) +} + +func TestGetFirelensContainer(t *testing.T) { + firelensContainer := &apicontainer.Container{ + Name: "c", + FirelensConfig: &apicontainer.FirelensConfig{ + Type: firelens.FirelensConfigTypeFluentd, + }, + } + + testCases := []struct { + name string + task *Task + firelensContainer *apicontainer.Container + }{ + { + name: "task has firelens container", + task: &Task{ + Containers: []*apicontainer.Container{ + firelensContainer, + }, + }, + firelensContainer: firelensContainer, + }, + { + name: "task doesn't have firelens container", + task: &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c", + }, + }, + }, + firelensContainer: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.firelensContainer, tc.task.GetFirelensContainer()) + }) + } +} + +func TestInitializeFirelensResource(t *testing.T) { + cfg := &config.Config{ + DataDir: testDataDir, + Cluster: testCluster, + AWSRegion: testRegion, + } + resourceFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + EC2InstanceID: testInstanceID, + }, + } + + testCases := []struct { + name string + task *Task + shouldFail bool + shouldHaveInstanceID bool + shouldDisableMetadata bool + expectedLogOptions map[string]map[string]string + }{ + { + name: "test initialize firelens resource fluentd", + task: getFirelensTask(t), + shouldHaveInstanceID: true, + expectedLogOptions: map[string]map[string]string{ + "logsender": { + "key1": "value1", + "key2": "value2", + "secret-name": "\"#{ENV['secret-name_0']}\"", + }, + }, + }, + { + name: "test initialize firelens resource fluentbit", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Type = firelens.FirelensConfigTypeFluentbit + return task + }(), + shouldHaveInstanceID: true, + expectedLogOptions: map[string]map[string]string{ + "logsender": { + "key1": "value1", + "key2": "value2", + "secret-name": "${secret-name_0}", + }, + }, + }, + { + name: "test initialize firelens resource without ec2 instance id", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].Environment = nil + return task + }(), + expectedLogOptions: map[string]map[string]string{ + "logsender": { + "key1": "value1", + "key2": "value2", + "secret-name": "\"#{ENV['secret-name_0']}\"", + }, + }, + }, + { + name: "test initialize firelens resource disables ecs log metadata", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Options["enable-ecs-log-metadata"] = "false" + return task + }(), + shouldHaveInstanceID: true, + shouldDisableMetadata: true, + expectedLogOptions: map[string]map[string]string{ + "logsender": { + "key1": "value1", + "key2": "value2", + "secret-name": "\"#{ENV['secret-name_0']}\"", + }, + }, + }, + { + name: "test initialize firelens resource invalid host config", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[0].DockerConfig.HostConfig = strptr(string("invalid")) + return task + }(), + shouldFail: true, + }, + { + name: "test initialize firelens resource no firelens container", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig = nil + return task + }(), + shouldFail: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.task.initializeFirelensResource(cfg, resourceFields, tc.task.Containers[1], nil) + if tc.shouldFail { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + resources := tc.task.GetResources() + assert.Equal(t, 1, len(resources)) + assert.Equal(t, 1, len(tc.task.Containers[1].TransitionDependenciesMap)) + + firelensResource := resources[0].(*firelens.FirelensResource) + assert.Equal(t, testCluster, firelensResource.GetCluster()) + assert.Equal(t, validTaskArn, firelensResource.GetTaskARN()) + assert.Equal(t, testTaskDefFamily+":"+testTaskDefVersion, firelensResource.GetTaskDefinition()) + assert.Equal(t, testDataDir+"/firelens/task-id", firelensResource.GetResourceDir()) + assert.Equal(t, testRegion, firelensResource.GetRegion()) + assert.Equal(t, testExecutionCredentialsID, firelensResource.GetExecutionCredentialsID()) + assert.NotNil(t, firelensResource.GetContainerToLogOptions()) + assert.Equal(t, tc.expectedLogOptions, firelensResource.GetContainerToLogOptions()) + assert.Equal(t, !tc.shouldDisableMetadata, firelensResource.GetECSMetadataEnabled()) + + if tc.shouldHaveInstanceID { + assert.Equal(t, testInstanceID, firelensResource.GetEC2InstanceID()) + } else { + assert.Empty(t, firelensResource.GetEC2InstanceID()) + } + } + }) + } +} + +func TestInitializeFirelensResourceWithExternalConfig(t *testing.T) { + cfg := &config.Config{ + DataDir: testDataDir, + Cluster: testCluster, + AWSRegion: testRegion, + } + resourceFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + EC2InstanceID: testInstanceID, + }, + } + + testCases := []struct { + name string + task *Task + shouldFail bool + expectedConfigType string + expectedConfigValue string + }{ + { + name: "test initialize firelens resource with external config type file", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Options["config-file-type"] = "file" + task.Containers[1].FirelensConfig.Options["config-file-value"] = "/tmp/file" + return task + }(), + expectedConfigType: "file", + expectedConfigValue: "/tmp/file", + }, + { + name: "test initialize firelens resource with external config type arn", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Options["config-file-type"] = "s3" + task.Containers[1].FirelensConfig.Options["config-file-value"] = "arn:aws:s3:::bucket/key" + return task + }(), + expectedConfigType: "s3", + expectedConfigValue: "arn:aws:s3:::bucket/key", + }, + { + name: "test initialize firelens resource missing config value", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Options["config-file-type"] = "s3" + return task + }(), + shouldFail: true, + }, + { + name: "test initialize firelens resource invalid firelens config type", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Type = "invalid" + return task + }(), + shouldFail: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.task.initializeFirelensResource(cfg, resourceFields, tc.task.Containers[1], nil) + if tc.shouldFail { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + resources := tc.task.GetResources() + assert.Equal(t, 1, len(resources)) + assert.Equal(t, 1, len(tc.task.Containers[1].TransitionDependenciesMap)) + + firelensResource := resources[0].(*firelens.FirelensResource) + assert.Equal(t, tc.expectedConfigType, firelensResource.GetExternalConfigType()) + assert.Equal(t, tc.expectedConfigValue, firelensResource.GetExternalConfigValue()) + } + }) + } +} + +func TestCollectFirelensLogOptions(t *testing.T) { + task := getFirelensTask(t) + + containerToLogOptions := make(map[string]map[string]string) + err := task.collectFirelensLogOptions(containerToLogOptions) + assert.NoError(t, err) + assert.Equal(t, "value1", containerToLogOptions["logsender"]["key1"]) + assert.Equal(t, "value2", containerToLogOptions["logsender"]["key2"]) +} + +func TestCollectFirelensLogOptionsInvalidOptions(t *testing.T) { + task := getFirelensTask(t) + task.Containers[0].DockerConfig.HostConfig = strptr(string("invalid")) + + containerToLogOptions := make(map[string]map[string]string) + err := task.collectFirelensLogOptions(containerToLogOptions) + assert.Error(t, err) +} + +func TestCollectFirelensLogEnvOptions(t *testing.T) { + task := getFirelensTask(t) + + containerToLogOptions := make(map[string]map[string]string) + err := task.collectFirelensLogEnvOptions(containerToLogOptions, "fluentd") + assert.NoError(t, err) + assert.Equal(t, "\"#{ENV['secret-name_0']}\"", containerToLogOptions["logsender"]["secret-name"]) +} + +func TestAddFirelensContainerDependency(t *testing.T) { + testCases := []struct { + name string + task *Task + shouldAddDependency bool + }{ + { + name: "test adding firelens container dependency", + task: getFirelensTask(t), + shouldAddDependency: true, + }, + { + name: "test not adding firelens container dependency case 1", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[0].FirelensConfig = task.Containers[1].FirelensConfig + task.Containers = task.Containers[:1] + return task + }(), + shouldAddDependency: false, + }, + { + name: "test not adding firelens container dependency case 2", + task: func() *Task { + task := getFirelensTask(t) + task.Containers = append(task.Containers, &apicontainer.Container{ + Name: "container2", + }) + task.Containers[1].DependsOnUnsafe = append(task.Containers[1].DependsOnUnsafe, apicontainer.DependsOn{ + ContainerName: "container2", + Condition: ContainerOrderingStartCondition, + }) + return task + }(), + shouldAddDependency: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.task.addFirelensContainerDependency() + assert.NoError(t, err) + + if tc.shouldAddDependency { + assert.Equal(t, 1, len(tc.task.Containers[0].DependsOnUnsafe)) + assert.Equal(t, tc.task.Containers[1].Name, tc.task.Containers[0].DependsOnUnsafe[0].ContainerName) + assert.Equal(t, ContainerOrderingStartCondition, tc.task.Containers[0].DependsOnUnsafe[0].Condition) + } else { + assert.Empty(t, tc.task.Containers[0].DependsOnUnsafe) + } + }) + } +} + +func TestAddFirelensContainerBindMounts(t *testing.T) { + cfg := &config.Config{ + DataDirOnHost: testDataDirOnHost, + } + + testCases := []struct { + name string + task *Task + hostCfg *dockercontainer.HostConfig + cfg *config.Config + shouldFail bool + expectedBindMounts []string + }{ + { + name: "test add bind mounts for fluentd firelens container", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Options["config-file-type"] = "s3" + task.Containers[1].FirelensConfig.Options["config-file-value"] = "arn:aws:s3:::bucket/key" + return task + }(), + hostCfg: &dockercontainer.HostConfig{}, + cfg: cfg, + shouldFail: false, + expectedBindMounts: []string{ + "testDataDirOnHost/data/firelens/task-id/config/fluent.conf:/fluentd/etc/fluent.conf", + "testDataDirOnHost/data/firelens/task-id/socket/:/var/run/", + "testDataDirOnHost/data/firelens/task-id/config/external.conf:/fluentd/etc/external.conf", + }, + }, + { + name: "test add bind mounts for fluentbit firelens container", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Type = firelens.FirelensConfigTypeFluentbit + task.Containers[1].FirelensConfig.Options["config-file-type"] = "s3" + task.Containers[1].FirelensConfig.Options["config-file-value"] = "arn:aws:s3:::bucket/key" + return task + }(), + hostCfg: &dockercontainer.HostConfig{}, + cfg: cfg, + shouldFail: false, + expectedBindMounts: []string{ + "testDataDirOnHost/data/firelens/task-id/config/fluent.conf:/fluent-bit/etc/fluent-bit.conf", + "testDataDirOnHost/data/firelens/task-id/socket/:/var/run/", + "testDataDirOnHost/data/firelens/task-id/config/external.conf:/fluent-bit/etc/external.conf", + }, + }, + { + name: "test add bind mounts invalid firelens configuration type", + task: func() *Task { + task := getFirelensTask(t) + task.Containers[1].FirelensConfig.Type = "invalid" + return task + }(), + hostCfg: &dockercontainer.HostConfig{}, + cfg: cfg, + shouldFail: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.task.AddFirelensContainerBindMounts(tc.task.Containers[1].FirelensConfig, tc.hostCfg, tc.cfg) + if tc.shouldFail { + // assert.Error doesn't work with *apierrors.HostConfigError. + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + assert.Equal(t, tc.expectedBindMounts, tc.hostCfg.Binds) + } + }) + } +} + +func TestFirelensDependsOnSecretResource(t *testing.T) { + testCases := []struct { + name string + provider string + task *Task + res bool + }{ + { + name: "depends on ssm", + provider: apicontainer.SecretProviderSSM, + task: getFirelensTask(t), + res: true, + }, + { + name: "depends on asm", + provider: apicontainer.SecretProviderASM, + task: func() *Task { + task := getFirelensTask(t) + task.Containers[0].Secrets[0].Provider = apicontainer.SecretProviderASM + return task + }(), + res: true, + }, + { + name: "no dependency", + provider: apicontainer.SecretProviderSSM, + task: func() *Task { + task := getFirelensTask(t) + task.Containers[0].Secrets = []apicontainer.Secret{} + return task + }(), + res: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.res, tc.task.firelensDependsOnSecretResource(tc.provider)) + }) + } +} + +func TestPopulateSecretLogOptionsToFirelensContainer(t *testing.T) { + task := getFirelensTask(t) + ssmRes := &ssmsecret.SSMSecretResource{} + ssmRes.SetCachedSecretValue("secret-value-from_us-west-2", "secret-val") + task.AddResource(ssmsecret.ResourceName, ssmRes) + + assert.Nil(t, task.PopulateSecretLogOptionsToFirelensContainer(task.Containers[1])) + assert.Len(t, task.Containers[1].Environment, 2) + assert.Equal(t, "secret-val", task.Containers[1].Environment["secret-name_0"]) +} + +func TestCollectLogDriverSecretData(t *testing.T) { + ssmRes := &ssmsecret.SSMSecretResource{} + ssmRes.SetCachedSecretValue("secret-value-from_us-west-2", "secret-val") + + asmRes := &asmsecret.ASMSecretResource{} + asmRes.SetCachedSecretValue("secret-value-from-asm_us-west-2", "secret-val-asm") + + secrets := []apicontainer.Secret{ + { + Name: "secret-name", + Provider: apicontainer.SecretProviderSSM, + Target: apicontainer.SecretTargetLogDriver, + ValueFrom: "secret-value-from", + Region: "us-west-2", + }, + { + Name: "secret-name-asm", + Provider: apicontainer.SecretProviderASM, + Target: apicontainer.SecretTargetLogDriver, + ValueFrom: "secret-value-from-asm", + Region: "us-west-2", + }, + } + + secretData, err := collectLogDriverSecretData(secrets, ssmRes, asmRes) + assert.NoError(t, err) + assert.Len(t, secretData, 2) + assert.Equal(t, "secret-val", secretData["secret-name"]) + assert.Equal(t, "secret-val-asm", secretData["secret-name-asm"]) +} + +// getFirelensTask returns a sample firelens task. +func getFirelensTask(t *testing.T) *Task { + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: firelensDriverName, + Config: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + + return &Task{ + Arn: validTaskArn, + Family: testTaskDefFamily, + Version: testTaskDefVersion, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + ExecutionCredentialsID: testExecutionCredentialsID, + Containers: []*apicontainer.Container{ + { + Name: "logsender", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + Secrets: []apicontainer.Secret{ + { + Name: "secret-name", + ValueFrom: "secret-value-from", + Region: "us-west-2", + + Target: apicontainer.SecretTargetLogDriver, + Provider: apicontainer.SecretProviderSSM, + }, + }, + NetworkModeUnsafe: BridgeNetworkMode, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + { + Name: "firelenscontainer", + FirelensConfig: &apicontainer.FirelensConfig{ + Type: firelens.FirelensConfigTypeFluentd, + Options: map[string]string{ + "enable-ecs-log-metadata": "true", + }, + }, + Environment: map[string]string{ + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + }, + NetworkModeUnsafe: BridgeNetworkMode, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + } +} + +func TestEnableIPv6SysctlSetting(t *testing.T) { + hostConfig := &dockercontainer.HostConfig{} + enableIPv6SysctlSetting(hostConfig) + require.NotNil(t, hostConfig.Sysctls) + assert.Equal(t, sysctlValueOff, hostConfig.Sysctls[disableIPv6SysctlKey]) +} diff --git a/agent/api/task/task_option.go b/agent/api/task/task_option.go new file mode 100644 index 00000000000..b604e9707a6 --- /dev/null +++ b/agent/api/task/task_option.go @@ -0,0 +1,18 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +// Option defines a function that can be executed in some task API methods in order to perform custom configurations +// of the task +type Option func(*Task) error diff --git a/agent/api/task/task_test.go b/agent/api/task/task_test.go new file mode 100644 index 00000000000..aaba0658e41 --- /dev/null +++ b/agent/api/task/task_test.go @@ -0,0 +1,3559 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/amazon-ecs-agent/agent/api/appmesh" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/asm" + mock_factory "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks" + mock_secretsmanageriface "github.com/aws/amazon-ecs-agent/agent/asm/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + mock_ssm_factory "github.com/aws/amazon-ecs-agent/agent/ssm/factory/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/aws-sdk-go/service/secretsmanager" + + "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret" + "github.com/aws/amazon-ecs-agent/agent/taskresource/envFiles" + "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret" + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDockerConfigPortBinding(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + Ports: []apicontainer.PortBinding{{10, 10, "", apicontainer.TransportProtocolTCP}, {20, 20, "", apicontainer.TransportProtocolUDP}}, + }, + }, + } + + config, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if err != nil { + t.Error(err) + } + + _, ok := config.ExposedPorts["10/tcp"] + if !ok { + t.Fatal("Could not get exposed ports 10/tcp") + } + _, ok = config.ExposedPorts["20/udp"] + if !ok { + t.Fatal("Could not get exposed ports 20/udp") + } +} + +func TestDockerHostConfigCPUShareZero(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + CPU: 0, + }, + }, + } + + hostconfig, err := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + if err != nil { + t.Error(err) + } + if runtime.GOOS == "windows" { + if hostconfig.CPUShares != 0 { + // CPUShares will always be 0 on windows + t.Error("CPU shares expected to be 0 on windows") + } + } else if hostconfig.CPUShares != 2 { + t.Error("CPU shares of 0 did not get changed to 2") + } +} + +func TestDockerHostConfigCPUShareMinimum(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + CPU: 1, + }, + }, + } + + hostconfig, err := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + if err != nil { + t.Error(err) + } + + if runtime.GOOS == "windows" { + if hostconfig.CPUShares != 0 { + // CPUShares will always be 0 on windows + t.Error("CPU shares expected to be 0 on windows") + } + } else if hostconfig.CPUShares != 2 { + t.Error("CPU shares of 0 did not get changed to 2") + } +} + +func TestDockerHostConfigCPUShareUnchanged(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + CPU: 100, + }, + }, + } + + hostconfig, err := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + if err != nil { + t.Error(err) + } + + if runtime.GOOS == "windows" { + if hostconfig.CPUShares != 0 { + // CPUShares will always be 0 on windows + t.Error("CPU shares expected to be 0 on windows") + } + } else if hostconfig.CPUShares != 100 { + t.Error("CPU shares unexpectedly changed") + } +} + +func TestDockerHostConfigPortBinding(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + Ports: []apicontainer.PortBinding{{10, 10, "", apicontainer.TransportProtocolTCP}, {20, 20, "", apicontainer.TransportProtocolUDP}}, + }, + }, + } + + config, err := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + + bindings, ok := config.PortBindings["10/tcp"] + assert.True(t, ok, "Could not get port bindings") + assert.Equal(t, 1, len(bindings), "Wrong number of bindings") + assert.Equal(t, "10", bindings[0].HostPort, "Wrong hostport") + + bindings, ok = config.PortBindings["20/udp"] + assert.True(t, ok, "Could not get port bindings") + assert.Equal(t, 1, len(bindings), "Wrong number of bindings") + assert.Equal(t, "20", bindings[0].HostPort, "Wrong hostport") +} + +func TestDockerHostConfigVolumesFrom(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + }, + { + Name: "c2", + VolumesFrom: []apicontainer.VolumeFrom{{SourceContainer: "c1"}}, + }, + }, + } + + config, err := testTask.DockerHostConfig(testTask.Containers[1], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + + if !reflect.DeepEqual(config.VolumesFrom, []string{"dockername-c1"}) { + t.Error("Expected volumesFrom to be resolved, was: ", config.VolumesFrom) + } +} + +func TestDockerHostConfigRawConfig(t *testing.T) { + rawHostConfigInput := dockercontainer.HostConfig{ + Privileged: true, + ReadonlyRootfs: true, + DNS: []string{"dns1, dns2"}, + DNSSearch: []string{"dns.search"}, + ExtraHosts: []string{"extra:hosts"}, + SecurityOpt: []string{"foo", "bar"}, + Resources: dockercontainer.Resources{ + CPUShares: 2, + Ulimits: []*units.Ulimit{{Name: "ulimit name", Soft: 10, Hard: 100}}, + }, + LogConfig: dockercontainer.LogConfig{ + Type: "foo", + Config: map[string]string{"foo": "bar"}, + }, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + if err != nil { + t.Fatal(err) + } + + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + }, + }, + } + + config, configErr := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, configErr) + + expectedOutput := rawHostConfigInput + expectedOutput.CPUPercent = minimumCPUPercent + if runtime.GOOS == "windows" { + // CPUShares will always be 0 on windows + expectedOutput.CPUShares = 0 + } + assertSetStructFieldsEqual(t, expectedOutput, *config) +} + +func TestDockerHostConfigPauseContainer(t *testing.T) { + testTask := &Task{ + ENIs: []*apieni.ENI{ + { + ID: "eniID", + }, + }, + Containers: []*apicontainer.Container{ + { + Name: "c1", + }, + { + Name: NetworkPauseContainerName, + Type: apicontainer.ContainerCNIPause, + }, + }, + } + + customContainer := testTask.Containers[0] + pauseContainer := testTask.Containers[1] + // Verify that the network mode is set to "container:" + // for a non pause container + cfg, err := testTask.DockerHostConfig(customContainer, dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + assert.Equal(t, "container:"+dockerIDPrefix+NetworkPauseContainerName, string(cfg.NetworkMode)) + + // Verify that the network mode is not set to "none" for the + // empty volume container + cfg, err = testTask.DockerHostConfig(testTask.Containers[1], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + assert.Equal(t, networkModeNone, string(cfg.NetworkMode)) + + // Verify that the network mode is set to "none" for the pause container + cfg, err = testTask.DockerHostConfig(pauseContainer, dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + assert.Equal(t, networkModeNone, string(cfg.NetworkMode)) + + // Verify that overridden DNS settings are set for the pause container + // and not set for non pause containers + testTask.ENIs[0].DomainNameServers = []string{"169.254.169.253"} + testTask.ENIs[0].DomainNameSearchList = []string{"us-west-2.compute.internal"} + + // DNS overrides are only applied to the pause container. Verify that the non-pause + // container contains no overrides + cfg, err = testTask.DockerHostConfig(customContainer, dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + assert.Equal(t, 0, len(cfg.DNS)) + assert.Equal(t, 0, len(cfg.DNSSearch)) + + // Verify DNS settings are overridden for the pause container + cfg, err = testTask.DockerHostConfig(pauseContainer, dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + assert.Equal(t, []string{"169.254.169.253"}, cfg.DNS) + assert.Equal(t, []string{"us-west-2.compute.internal"}, cfg.DNSSearch) + + // Verify eni ExtraHosts added to HostConfig for pause container + ipaddr := &apieni.ENIIPV4Address{Primary: true, Address: "10.0.1.1"} + testTask.ENIs[0].IPV4Addresses = []*apieni.ENIIPV4Address{ipaddr} + testTask.ENIs[0].PrivateDNSName = "eni.ip.region.compute.internal" + + testTask.ENIs[0].IPV6Addresses = []*apieni.ENIIPV6Address{{Address: ipv6}} + cfg, err = testTask.DockerHostConfig(pauseContainer, dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + assert.Equal(t, []string{"eni.ip.region.compute.internal:10.0.1.1"}, cfg.ExtraHosts) + + // Verify ipv6 setting is enabled. + if runtime.GOOS == "linux" { + require.NotNil(t, cfg.Sysctls) + assert.Equal(t, sysctlValueOff, cfg.Sysctls[disableIPv6SysctlKey]) + } + + // Verify eni Hostname is added to DockerConfig for pause container + dockerconfig, dockerConfigErr := testTask.DockerConfig(pauseContainer, defaultDockerClientAPIVersion) + assert.Nil(t, dockerConfigErr) + assert.Equal(t, "eni.ip.region.compute.internal", dockerconfig.Hostname) + +} + +func TestBadDockerHostConfigRawConfig(t *testing.T) { + for _, badHostConfig := range []string{"malformed", `{"Privileged": "wrongType"}`} { + testTask := Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(badHostConfig), + }, + }, + }, + } + _, err := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(&testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Error(t, err) + } +} + +func TestDockerConfigRawConfig(t *testing.T) { + rawConfigInput := dockercontainer.Config{ + Hostname: "hostname", + Domainname: "domainname", + NetworkDisabled: true, + WorkingDir: "workdir", + User: "user", + } + + rawConfig, err := json.Marshal(&rawConfigInput) + if err != nil { + t.Fatal(err) + } + + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + Config: strptr(string(rawConfig)), + }, + }, + }, + } + + config, configErr := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if configErr != nil { + t.Fatal(configErr) + } + + expectedOutput := rawConfigInput + + assertSetStructFieldsEqual(t, expectedOutput, *config) +} + +func TestDockerConfigRawConfigNilLabel(t *testing.T) { + rawConfig, err := json.Marshal(&struct{ Labels map[string]string }{nil}) + if err != nil { + t.Fatal(err) + } + + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + Config: strptr(string(rawConfig)), + }, + }, + }, + } + + _, configErr := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if configErr != nil { + t.Fatal(configErr) + } +} + +func TestDockerConfigRawConfigMerging(t *testing.T) { + // Use a struct that will marshal to the actual message we expect; not + // dockercontainer.Config which will include a lot of zero values. + rawConfigInput := struct { + User string `json:"User,omitempty" yaml:"User,omitempty"` + }{ + User: "user", + } + + rawConfig, err := json.Marshal(&rawConfigInput) + if err != nil { + t.Fatal(err) + } + + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + Image: "image", + CPU: 50, + Memory: 1000, + DockerConfig: apicontainer.DockerConfig{ + Config: strptr(string(rawConfig)), + }, + }, + }, + } + + config, configErr := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if configErr != nil { + t.Fatal(configErr) + } + + expected := dockercontainer.Config{ + Image: "image", + User: "user", + } + + assertSetStructFieldsEqual(t, expected, *config) +} + +func TestBadDockerConfigRawConfig(t *testing.T) { + for _, badConfig := range []string{"malformed", `{"Labels": "wrongType"}`} { + testTask := Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + Config: strptr(badConfig), + }, + }, + }, + } + _, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if err == nil { + t.Fatal("Expected error, was none for: " + badConfig) + } + } +} + +func TestGetCredentialsEndpointWhenCredentialsAreSet(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := mock_credentials.NewMockManager(ctrl) + + credentialsIDInTask := "credsid" + task := Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + Environment: make(map[string]string), + }, + { + Name: "c2", + Environment: make(map[string]string), + }}, + credentialsID: credentialsIDInTask, + } + + taskCredentials := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: credentials.IAMRoleCredentials{CredentialsID: "credsid"}, + } + credentialsManager.EXPECT().GetTaskCredentials(credentialsIDInTask).Return(taskCredentials, true) + task.initializeCredentialsEndpoint(credentialsManager) + + // Test if all containers in the task have the environment variable for + // credentials endpoint set correctly. + for _, container := range task.Containers { + env := container.Environment + _, exists := env[awsSDKCredentialsRelativeURIPathEnvironmentVariableName] + if !exists { + t.Errorf("'%s' environment variable not set for container '%s', env: %v", awsSDKCredentialsRelativeURIPathEnvironmentVariableName, container.Name, env) + } + } +} + +func TestGetCredentialsEndpointWhenCredentialsAreNotSet(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := mock_credentials.NewMockManager(ctrl) + + task := Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + Environment: make(map[string]string), + }, + { + Name: "c2", + Environment: make(map[string]string), + }}, + } + + task.initializeCredentialsEndpoint(credentialsManager) + + for _, container := range task.Containers { + env := container.Environment + _, exists := env[awsSDKCredentialsRelativeURIPathEnvironmentVariableName] + if exists { + t.Errorf("'%s' environment variable should not be set for container '%s'", awsSDKCredentialsRelativeURIPathEnvironmentVariableName, container.Name) + } + } +} + +func TestGetDockerResources(t *testing.T) { + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + CPU: uint(10), + Memory: uint(256), + }, + }, + } + resources := testTask.getDockerResources(testTask.Containers[0]) + assert.Equal(t, int64(10), resources.CPUShares, "Wrong number of CPUShares") + assert.Equal(t, int64(268435456), resources.Memory, "Wrong amount of memory") +} + +func TestGetDockerResourcesCPUTooLow(t *testing.T) { + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + CPU: uint(0), + Memory: uint(256), + }, + }, + } + resources := testTask.getDockerResources(testTask.Containers[0]) + assert.Equal(t, int64(268435456), resources.Memory, "Wrong amount of memory") + + // Minimum requirement of 2 CPU Shares + if resources.CPUShares != 2 { + t.Error("CPU shares of 0 did not get changed to 2") + } +} + +func TestGetDockerResourcesMemoryTooLow(t *testing.T) { + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + CPU: uint(10), + Memory: uint(1), + }, + }, + } + resources := testTask.getDockerResources(testTask.Containers[0]) + assert.Equal(t, int64(10), resources.CPUShares, "Wrong number of CPUShares") + assert.Equal(t, int64(apicontainer.DockerContainerMinimumMemoryInBytes), resources.Memory, + "Wrong amount of memory") +} + +func TestGetDockerResourcesUnspecifiedMemory(t *testing.T) { + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + CPU: uint(10), + }, + }, + } + resources := testTask.getDockerResources(testTask.Containers[0]) + assert.Equal(t, int64(10), resources.CPUShares, "Wrong number of CPUShares") + assert.Equal(t, int64(0), resources.Memory, "Wrong amount of memory") +} + +func TestPostUnmarshalTaskWithDockerVolumes(t *testing.T) { + autoprovision := true + ctrl := gomock.NewController(t) + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + dockerClient.EXPECT().InspectVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.SDKVolumeResponse{DockerVolume: &types.Volume{}}) + taskFromACS := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("myName1"), + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr("/some/path"), + SourceVolume: strptr("dockervolume"), + }, + }, + }, + }, + Volumes: []*ecsacs.Volume{ + { + Name: strptr("dockervolume"), + Type: strptr("docker"), + DockerVolumeConfiguration: &ecsacs.DockerVolumeConfiguration{ + Autoprovision: &autoprovision, + Scope: strptr("shared"), + Driver: strptr("local"), + DriverOpts: make(map[string]*string), + Labels: nil, + }, + }, + }, + } + seqNum := int64(42) + task, err := TaskFromACS(&taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + assert.Equal(t, 1, len(task.Containers)) // before PostUnmarshalTask + cfg := config.Config{} + task.PostUnmarshalTask(&cfg, nil, nil, dockerClient, nil) + assert.Equal(t, 1, len(task.Containers), "Should match the number of containers as before PostUnmarshalTask") + assert.Equal(t, 1, len(task.Volumes), "Should have 1 volume") + taskVol := task.Volumes[0] + assert.Equal(t, "dockervolume", taskVol.Name) + assert.Equal(t, DockerVolumeType, taskVol.Type) +} + +// Test that the PostUnmarshal function properly changes EfsVolumeConfiguration +// task definitions into a dockerVolumeConfiguration task resource. +func TestPostUnmarshalTaskWithEFSVolumes(t *testing.T) { + taskFromACS := getACSEFSTask() + seqNum := int64(42) + + testCases := map[string]string{ + "us-west-2": "fs-12345.efs.us-west-2.amazonaws.com", + "cn-north-1": "fs-12345.efs.cn-north-1.amazonaws.com.cn", + "us-iso-east-1": "fs-12345.efs.us-iso-east-1.c2s.ic.gov", + "not-a-region": "fs-12345.efs.not-a-region.amazonaws.com", + } + for region, expectedHostname := range testCases { + t.Run(region, func(t *testing.T) { + task, err := TaskFromACS(taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + assert.Equal(t, 1, len(task.Containers)) // before PostUnmarshalTask + cfg := config.Config{} + cfg.AWSRegion = region + require.NoError(t, task.PostUnmarshalTask(&cfg, nil, nil, nil, nil)) + assert.Equal(t, 1, len(task.Containers), "Should match the number of containers as before PostUnmarshalTask") + assert.Equal(t, 1, len(task.Volumes), "Should have 1 volume") + taskVol := task.Volumes[0] + assert.Equal(t, "efsvolume", taskVol.Name) + assert.Equal(t, "efs", taskVol.Type) + + resources := task.GetResources() + assert.Len(t, resources, 1) + vol, ok := resources[0].(*taskresourcevolume.VolumeResource) + require.True(t, ok) + dockerVolName := vol.VolumeConfig.DockerVolumeName + b, err := json.Marshal(resources[0]) + require.NoError(t, err) + assert.JSONEq(t, fmt.Sprintf(`{ + "name": "efsvolume", + "dockerVolumeConfiguration": { + "scope": "task", + "autoprovision": false, + "mountPoint": "", + "driver": "local", + "driverOpts": { + "device": ":/tmp", + "o": "addr=%s,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport", + "type": "nfs" + }, + "labels": {}, + "dockerVolumeName": "%s" + }, + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "NONE", + "knownStatus": "NONE" + }`, expectedHostname, dockerVolName), string(b)) + }) + } +} + +func TestPostUnmarshalTaskWithEFSVolumesThatUseECSVolumePlugin(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + testCreds := getACSIAMRoleCredentials() + credentialsIDInTask := aws.StringValue(testCreds.CredentialsId) + credentialsManager := mock_credentials.NewMockManager(ctrl) + taskCredentials := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: credentials.IAMRoleCredentials{CredentialsID: credentialsIDInTask}, + } + expectedEndpoint := "/v2/credentials/" + credentialsIDInTask + credentialsManager.EXPECT().GetTaskCredentials(credentialsIDInTask).Return(taskCredentials, true) + + taskFromACS := getACSEFSTask() + taskFromACS.RoleCredentials = testCreds + seqNum := int64(42) + task, err := TaskFromACS(taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + assert.Equal(t, 1, len(task.Containers)) + task.SetCredentialsID(aws.StringValue(testCreds.CredentialsId)) + + cfg := &config.Config{} + cfg.VolumePluginCapabilities = []string{"efsAuth"} + require.NoError(t, task.PostUnmarshalTask(cfg, credentialsManager, nil, nil, nil)) + assert.Equal(t, 1, len(task.Containers), "Should match the number of containers as before PostUnmarshalTask") + assert.Equal(t, 1, len(task.Volumes), "Should have 1 volume") + taskVol := task.Volumes[0] + assert.Equal(t, "efsvolume", taskVol.Name) + assert.Equal(t, "efs", taskVol.Type) + + resources := task.GetResources() + assert.Len(t, resources, 1) + vol, ok := resources[0].(*taskresourcevolume.VolumeResource) + require.True(t, ok) + dockerVolName := vol.VolumeConfig.DockerVolumeName + b, err := json.Marshal(resources[0]) + require.NoError(t, err) + assert.JSONEq(t, fmt.Sprintf(`{ + "name": "efsvolume", + "dockerVolumeConfiguration": { + "scope": "task", + "autoprovision": false, + "mountPoint": "", + "driver": "amazon-ecs-volume-plugin", + "driverOpts": { + "device": "fs-12345:/tmp", + "o": "tls,tlsport=12345,iam,awscredsuri=%s,accesspoint=fsap-123", + "type": "efs" + }, + "labels": {}, + "dockerVolumeName": "%s" + }, + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "NONE", + "knownStatus": "NONE" + }`, expectedEndpoint, dockerVolName), string(b)) +} + +func TestInitializeContainersV3MetadataEndpoint(t *testing.T) { + task := Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + Environment: make(map[string]string), + }, + }, + } + container := task.Containers[0] + + task.initializeContainersV3MetadataEndpoint(utils.NewStaticUUIDProvider("new-uuid")) + + // Test if the v3 endpoint id is set and the endpoint is injected to env + assert.Equal(t, container.GetV3EndpointID(), "new-uuid") + assert.Equal(t, container.Environment[apicontainer.MetadataURIEnvironmentVariableName], + fmt.Sprintf(apicontainer.MetadataURIFormat, "new-uuid")) +} + +func TestInitializeContainersV4MetadataEndpoint(t *testing.T) { + task := Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + Environment: make(map[string]string), + }, + }, + } + container := task.Containers[0] + + task.initializeContainersV4MetadataEndpoint(utils.NewStaticUUIDProvider("new-uuid")) + + // Test if the v3 endpoint id is set and the endpoint is injected to env + assert.Equal(t, container.GetV3EndpointID(), "new-uuid") + assert.Equal(t, container.Environment[apicontainer.MetadataURIEnvVarNameV4], + fmt.Sprintf(apicontainer.MetadataURIFormatV4, "new-uuid")) +} + +func TestPostUnmarshalTaskWithLocalVolumes(t *testing.T) { + // Constants used here are defined in task_unix_test.go and task_windows_test.go + taskFromACS := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("myName1"), + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr("/path1/"), + SourceVolume: strptr("localvol1"), + }, + }, + }, + { + Name: strptr("myName2"), + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr("/path2/"), + SourceVolume: strptr("localvol2"), + }, + }, + }, + }, + Volumes: []*ecsacs.Volume{ + { + Name: strptr("localvol1"), + Type: strptr("host"), + Host: &ecsacs.HostVolumeProperties{}, + }, + { + Name: strptr("localvol2"), + Type: strptr("host"), + Host: &ecsacs.HostVolumeProperties{}, + }, + }, + } + seqNum := int64(42) + task, err := TaskFromACS(&taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + assert.Equal(t, 2, len(task.Containers)) // before PostUnmarshalTask + cfg := config.Config{} + task.PostUnmarshalTask(&cfg, nil, nil, nil, nil) + + assert.Equal(t, 2, len(task.Containers), "Should match the number of containers as before PostUnmarshalTask") + taskResources := task.getResourcesUnsafe() + assert.Equal(t, 2, len(taskResources), "Should have created 2 volume resources") + + for _, r := range taskResources { + vol := r.(*taskresourcevolume.VolumeResource) + assert.Equal(t, "task", vol.VolumeConfig.Scope) + assert.Equal(t, false, vol.VolumeConfig.Autoprovision) + assert.Equal(t, "local", vol.VolumeConfig.Driver) + assert.Equal(t, 0, len(vol.VolumeConfig.DriverOpts)) + assert.Equal(t, 0, len(vol.VolumeConfig.Labels)) + } + +} + +// Slice of structs for Table Driven testing for sharing PID and IPC resources +var namespaceTests = []struct { + PIDMode string + IPCMode string + ShouldProvision bool +}{ + {"", "none", false}, + {"", "", false}, + {"host", "host", false}, + {"task", "task", true}, + {"host", "task", true}, + {"task", "host", true}, + {"", "task", true}, + {"task", "none", true}, + {"host", "none", false}, + {"host", "", false}, + {"", "host", false}, +} + +// Testing the Getter functions for IPCMode and PIDMode +func TestGetPIDAndIPCFromTask(t *testing.T) { + for _, aTest := range namespaceTests { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + }, + { + Name: "c2", + }, + }, + PIDMode: aTest.PIDMode, + IPCMode: aTest.IPCMode, + } + assert.Equal(t, aTest.PIDMode, testTask.getPIDMode()) + assert.Equal(t, aTest.IPCMode, testTask.getIPCMode()) + } +} + +// Tests if NamespacePauseContainer was provisioned in PostUnmarshalTask +func TestPostUnmarshalTaskWithPIDSharing(t *testing.T) { + for _, aTest := range namespaceTests { + testTaskFromACS := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + PidMode: strptr(aTest.PIDMode), + IpcMode: strptr(aTest.IPCMode), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("container1"), + }, + { + Name: strptr("container2"), + }, + }, + } + + seqNum := int64(42) + task, err := TaskFromACS(&testTaskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + assert.Equal(t, aTest.PIDMode, task.getPIDMode()) + assert.Equal(t, aTest.IPCMode, task.getIPCMode()) + assert.Equal(t, 2, len(task.Containers)) // before PostUnmarshalTask + cfg := config.Config{} + task.PostUnmarshalTask(&cfg, nil, nil, nil, nil) + if aTest.ShouldProvision { + assert.Equal(t, 3, len(task.Containers), "Namespace Pause Container should be created.") + } else { + assert.Equal(t, 2, len(task.Containers), "Namespace Pause Container should NOT be created.") + } + } +} + +func TestNamespaceProvisionDependencyAndHostConfig(t *testing.T) { + for _, aTest := range namespaceTests { + taskFromACS := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + PidMode: strptr(aTest.PIDMode), + IpcMode: strptr(aTest.IPCMode), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("container1"), + }, + { + Name: strptr("container2"), + }, + }, + } + seqNum := int64(42) + task, err := TaskFromACS(&taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + assert.Equal(t, aTest.PIDMode, task.getPIDMode()) + assert.Equal(t, aTest.IPCMode, task.getIPCMode()) + assert.Equal(t, 2, len(task.Containers)) // before PostUnmarshalTask + cfg := config.Config{} + task.PostUnmarshalTask(&cfg, nil, nil, nil, nil) + if !aTest.ShouldProvision { + assert.Equal(t, 2, len(task.Containers), "Namespace Pause Container should NOT be created.") + docMaps := dockerMap(task) + for _, container := range task.Containers { + //configure HostConfig for each container + dockHostCfg, err := task.DockerHostConfig(container, docMaps, defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + assert.Equal(t, task.IPCMode, string(dockHostCfg.IpcMode)) + assert.Equal(t, task.PIDMode, string(dockHostCfg.PidMode)) + switch aTest.IPCMode { + case "host": + assert.True(t, dockHostCfg.IpcMode.IsHost()) + case "none": + assert.True(t, dockHostCfg.IpcMode.IsNone()) + case "": + assert.True(t, dockHostCfg.IpcMode.IsEmpty()) + } + switch aTest.PIDMode { + case "host": + assert.True(t, dockHostCfg.PidMode.IsHost()) + case "": + assert.Equal(t, "", string(dockHostCfg.PidMode)) + } + } + } else { + assert.Equal(t, 3, len(task.Containers), "Namespace Pause Container should be created.") + + namespacePause, ok := task.ContainerByName(NamespacePauseContainerName) + if !ok { + t.Fatal("Namespace Pause Container not found") + } + + docMaps := dockerMap(task) + dockerName := docMaps[namespacePause.Name].DockerID + + for _, container := range task.Containers { + //configure HostConfig for each container + dockHostCfg, err := task.DockerHostConfig(container, docMaps, defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + if namespacePause == container { + // Expected behavior for IPCMode="task" is "shareable" + if aTest.IPCMode == "task" { + assert.True(t, dockHostCfg.IpcMode.IsShareable()) + } else { + assert.True(t, dockHostCfg.IpcMode.IsEmpty()) + } + // Expected behavior for any PIDMode is "" + assert.Equal(t, "", string(dockHostCfg.PidMode)) + } else { + switch aTest.IPCMode { + case "task": + assert.True(t, dockHostCfg.IpcMode.IsContainer()) + assert.Equal(t, dockerName, dockHostCfg.IpcMode.Container()) + case "host": + assert.True(t, dockHostCfg.IpcMode.IsHost()) + } + + switch aTest.PIDMode { + case "task": + assert.True(t, dockHostCfg.PidMode.IsContainer()) + assert.Equal(t, dockerName, dockHostCfg.PidMode.Container()) + case "host": + assert.True(t, dockHostCfg.PidMode.IsHost()) + } + } + } + } + } +} + +func TestAddNamespaceSharingProvisioningDependency(t *testing.T) { + for _, aTest := range namespaceTests { + testTask := &Task{ + PIDMode: aTest.PIDMode, + IPCMode: aTest.IPCMode, + Containers: []*apicontainer.Container{ + { + Name: "c1", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + { + Name: "c2", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + } + cfg := &config.Config{ + PauseContainerImageName: "pause-container-image-name", + PauseContainerTag: "pause-container-tag", + } + testTask.addNamespaceSharingProvisioningDependency(cfg) + if aTest.ShouldProvision { + pauseContainer, ok := testTask.ContainerByName(NamespacePauseContainerName) + require.True(t, ok, "Expected to find pause container") + assert.True(t, pauseContainer.Essential, "Pause Container should be essential") + assert.Equal(t, len(testTask.Containers), 3) + for _, cont := range testTask.Containers { + // Check if dependency to Pause container was set + if cont.Name == NamespacePauseContainerName { + continue + } + found := false + for _, contDep := range cont.TransitionDependenciesMap[apicontainerstatus.ContainerPulled].ContainerDependencies { + if contDep.ContainerName == NamespacePauseContainerName && contDep.SatisfiedStatus == apicontainerstatus.ContainerRunning { + found = true + } + } + assert.True(t, found, "Dependency should have been found") + } + } else { + assert.Equal(t, len(testTask.Containers), 2, "Pause Container should not have been added") + } + + } +} + +func TestTaskFromACS(t *testing.T) { + intptr := func(i int64) *int64 { + return &i + } + boolptr := func(b bool) *bool { + return &b + } + floatptr := func(f float64) *float64 { + return &f + } + // Testing type conversions, bleh. At least the type conversion itself + // doesn't look this messy. + taskFromAcs := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("myName"), + Cpu: intptr(10), + Command: []*string{strptr("command"), strptr("command2")}, + EntryPoint: []*string{strptr("sh"), strptr("-c")}, + Environment: map[string]*string{"key": strptr("value")}, + Essential: boolptr(true), + Image: strptr("image:tag"), + Links: []*string{strptr("link1"), strptr("link2")}, + Memory: intptr(100), + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr("/container/path"), + ReadOnly: boolptr(true), + SourceVolume: strptr("sourceVolume"), + }, + }, + Overrides: strptr(`{"command":["a","b","c"]}`), + PortMappings: []*ecsacs.PortMapping{ + { + HostPort: intptr(800), + ContainerPort: intptr(900), + Protocol: strptr("udp"), + }, + }, + VolumesFrom: []*ecsacs.VolumeFrom{ + { + ReadOnly: boolptr(true), + SourceContainer: strptr("volumeLink"), + }, + }, + DockerConfig: &ecsacs.DockerConfig{ + Config: strptr("config json"), + HostConfig: strptr("hostconfig json"), + Version: strptr("version string"), + }, + Secrets: []*ecsacs.Secret{ + { + Name: strptr("secret"), + ValueFrom: strptr("/test/secret"), + Provider: strptr("ssm"), + Region: strptr("us-west-2"), + }, + }, + EnvironmentFiles: []*ecsacs.EnvironmentFile{ + { + Value: strptr("s3://bucketName/envFile"), + Type: strptr("s3"), + }, + }, + }, + }, + Volumes: []*ecsacs.Volume{ + { + Name: strptr("volName"), + Type: strptr("host"), + Host: &ecsacs.HostVolumeProperties{ + SourcePath: strptr("/host/path"), + }, + }, + }, + Associations: []*ecsacs.Association{ + { + Containers: []*string{ + strptr("myName"), + }, + Content: &ecsacs.EncodedString{ + Encoding: strptr("base64"), + Value: strptr("val"), + }, + Name: strptr("gpu1"), + Type: strptr("gpu"), + }, + { + Containers: []*string{ + strptr("myName"), + }, + Content: &ecsacs.EncodedString{ + Encoding: strptr("base64"), + Value: strptr("val"), + }, + Name: strptr("dev1"), + Type: strptr("elastic-inference"), + }, + }, + RoleCredentials: getACSIAMRoleCredentials(), + Cpu: floatptr(2.0), + Memory: intptr(512), + } + expectedTask := &Task{ + Arn: "myArn", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "myName", + Image: "image:tag", + Command: []string{"a", "b", "c"}, + Links: []string{"link1", "link2"}, + EntryPoint: &[]string{"sh", "-c"}, + Essential: true, + Environment: map[string]string{"key": "value"}, + CPU: 10, + Memory: 100, + MountPoints: []apicontainer.MountPoint{ + { + ContainerPath: "/container/path", + ReadOnly: true, + SourceVolume: "sourceVolume", + }, + }, + Overrides: apicontainer.ContainerOverrides{ + Command: &[]string{"a", "b", "c"}, + }, + Ports: []apicontainer.PortBinding{ + { + HostPort: 800, + ContainerPort: 900, + Protocol: apicontainer.TransportProtocolUDP, + }, + }, + VolumesFrom: []apicontainer.VolumeFrom{ + { + ReadOnly: true, + SourceContainer: "volumeLink", + }, + }, + DockerConfig: apicontainer.DockerConfig{ + Config: strptr("config json"), + HostConfig: strptr("hostconfig json"), + Version: strptr("version string"), + }, + Secrets: []apicontainer.Secret{ + { + Name: "secret", + ValueFrom: "/test/secret", + Provider: "ssm", + Region: "us-west-2", + }, + }, + EnvironmentFiles: []apicontainer.EnvironmentFile{ + { + Value: "s3://bucketName/envFile", + Type: "s3", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "volName", + Type: "host", + Volume: &taskresourcevolume.FSHostVolume{ + FSSourcePath: "/host/path", + }, + }, + }, + Associations: []Association{ + { + Containers: []string{ + "myName", + }, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "gpu1", + Type: "gpu", + }, + { + Containers: []string{ + "myName", + }, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "dev1", + Type: "elastic-inference", + }, + }, + StartSequenceNumber: 42, + CPU: 2.0, + Memory: 512, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + + seqNum := int64(42) + task, err := TaskFromACS(&taskFromAcs, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + + assert.NoError(t, err) + assert.EqualValues(t, expectedTask, task) +} + +func TestTaskUpdateKnownStatusHappyPath(t *testing.T) { + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + Containers: []*apicontainer.Container{ + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + } + + newStatus := testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskCreated, newStatus, "task status should depend on the earlist container status") + assert.Equal(t, apitaskstatus.TaskCreated, testTask.GetKnownStatus(), "task status should depend on the earlist container status") +} + +// TestTaskUpdateKnownStatusNotChangeToRunningWithEssentialContainerStopped tests when there is one essential +// container is stopped while the other containers are running, the task status shouldn't be changed to running +func TestTaskUpdateKnownStatusNotChangeToRunningWithEssentialContainerStopped(t *testing.T) { + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskCreated, + Containers: []*apicontainer.Container{ + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + } + + newStatus := testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskStatusNone, newStatus, "task status should not move to running if essential container is stopped") + assert.Equal(t, apitaskstatus.TaskCreated, testTask.GetKnownStatus(), "task status should not move to running if essential container is stopped") +} + +// TestTaskUpdateKnownStatusToPendingWithEssentialContainerStopped tests when there is one essential container +// is stopped while other container status are prior to Running, the task status should be updated. +func TestTaskUpdateKnownStatusToPendingWithEssentialContainerStopped(t *testing.T) { + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + Containers: []*apicontainer.Container{ + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + }, + }, + } + + newStatus := testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskCreated, newStatus) + assert.Equal(t, apitaskstatus.TaskCreated, testTask.GetKnownStatus()) +} + +// TestTaskUpdateKnownStatusToPendingWithEssentialContainerStoppedWhenSteadyStateIsResourcesProvisioned +// tests when there is one essential container is stopped while other container status are prior to +// ResourcesProvisioned, the task status should be updated. +func TestTaskUpdateKnownStatusToPendingWithEssentialContainerStoppedWhenSteadyStateIsResourcesProvisioned(t *testing.T) { + resourcesProvisioned := apicontainerstatus.ContainerResourcesProvisioned + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + Containers: []*apicontainer.Container{ + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + Essential: true, + SteadyStateStatusUnsafe: &resourcesProvisioned, + }, + }, + } + + newStatus := testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskCreated, newStatus) + assert.Equal(t, apitaskstatus.TaskCreated, testTask.GetKnownStatus()) +} + +// TestGetEarliestTaskStatusForContainersEmptyTask verifies that +// `getEarliestKnownTaskStatusForContainers` returns TaskStatusNone when invoked on +// a task with no containers +func TestGetEarliestTaskStatusForContainersEmptyTask(t *testing.T) { + testTask := &Task{} + assert.Equal(t, testTask.getEarliestKnownTaskStatusForContainers(), apitaskstatus.TaskStatusNone) +} + +// TestGetEarliestTaskStatusForContainersWhenKnownStatusIsNotSetForContainers verifies that +// `getEarliestKnownTaskStatusForContainers` returns TaskStatusNone when invoked on +// a task with containers that do not have the `KnownStatusUnsafe` field set +func TestGetEarliestTaskStatusForContainersWhenKnownStatusIsNotSetForContainers(t *testing.T) { + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + Containers: []*apicontainer.Container{ + {}, + {}, + }, + } + assert.Equal(t, testTask.getEarliestKnownTaskStatusForContainers(), apitaskstatus.TaskStatusNone) +} + +func TestGetEarliestTaskStatusForContainersWhenSteadyStateIsRunning(t *testing.T) { + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + Containers: []*apicontainer.Container{ + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + } + + // Since a container is still in CREATED state, the earliest known status + // for the task based on its container statuses must be `apitaskstatus.TaskCreated` + assert.Equal(t, testTask.getEarliestKnownTaskStatusForContainers(), apitaskstatus.TaskCreated) + // Ensure that both containers are RUNNING, which means that the earliest known status + // for the task based on its container statuses must be `apitaskstatus.TaskRunning` + testTask.Containers[0].SetKnownStatus(apicontainerstatus.ContainerRunning) + assert.Equal(t, testTask.getEarliestKnownTaskStatusForContainers(), apitaskstatus.TaskRunning) +} + +func TestGetEarliestTaskStatusForContainersWhenSteadyStateIsResourceProvisioned(t *testing.T) { + resourcesProvisioned := apicontainerstatus.ContainerResourcesProvisioned + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + Containers: []*apicontainer.Container{ + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + SteadyStateStatusUnsafe: &resourcesProvisioned, + }, + }, + } + + // Since a container is still in CREATED state, the earliest known status + // for the task based on its container statuses must be `apitaskstatus.TaskCreated` + assert.Equal(t, testTask.getEarliestKnownTaskStatusForContainers(), apitaskstatus.TaskCreated) + testTask.Containers[0].SetKnownStatus(apicontainerstatus.ContainerRunning) + // Even if all containers transition to RUNNING, the earliest known status + // for the task based on its container statuses would still be `apitaskstatus.TaskCreated` + // as one of the containers has RESOURCES_PROVISIONED as its steady state + assert.Equal(t, testTask.getEarliestKnownTaskStatusForContainers(), apitaskstatus.TaskCreated) + // All of the containers in the task have reached their steady state. Ensure + // that the earliest known status for the task based on its container states + // is now `apitaskstatus.TaskRunning` + testTask.Containers[2].SetKnownStatus(apicontainerstatus.ContainerResourcesProvisioned) + assert.Equal(t, testTask.getEarliestKnownTaskStatusForContainers(), apitaskstatus.TaskRunning) +} + +func TestTaskUpdateKnownStatusChecksSteadyStateWhenSetToRunning(t *testing.T) { + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + Containers: []*apicontainer.Container{ + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + } + + // One of the containers is in CREATED state, expect task to be updated + // to apitaskstatus.TaskCreated + newStatus := testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskCreated, newStatus, "Incorrect status returned: %s", newStatus.String()) + assert.Equal(t, apitaskstatus.TaskCreated, testTask.GetKnownStatus()) + + // All of the containers are in RUNNING state, expect task to be updated + // to apitaskstatus.TaskRunning + testTask.Containers[0].SetKnownStatus(apicontainerstatus.ContainerRunning) + newStatus = testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskRunning, newStatus, "Incorrect status returned: %s", newStatus.String()) + assert.Equal(t, apitaskstatus.TaskRunning, testTask.GetKnownStatus()) +} + +func TestTaskUpdateKnownStatusChecksSteadyStateWhenSetToResourceProvisioned(t *testing.T) { + resourcesProvisioned := apicontainerstatus.ContainerResourcesProvisioned + testTask := &Task{ + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + Containers: []*apicontainer.Container{ + { + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + Essential: true, + }, + { + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + Essential: true, + SteadyStateStatusUnsafe: &resourcesProvisioned, + }, + }, + } + + // One of the containers is in CREATED state, expect task to be updated + // to apitaskstatus.TaskCreated + newStatus := testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskCreated, newStatus, "Incorrect status returned: %s", newStatus.String()) + assert.Equal(t, apitaskstatus.TaskCreated, testTask.GetKnownStatus()) + + // All of the containers are in RUNNING state, but one of the containers + // has its steady state set to RESOURCES_PROVISIONED, doexpect task to be + // updated to apitaskstatus.TaskRunning + testTask.Containers[0].SetKnownStatus(apicontainerstatus.ContainerRunning) + newStatus = testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskStatusNone, newStatus, "Incorrect status returned: %s", newStatus.String()) + assert.Equal(t, apitaskstatus.TaskCreated, testTask.GetKnownStatus()) + + // All of the containers have reached their steady states, expect the task + // to be updated to `apitaskstatus.TaskRunning` + testTask.Containers[2].SetKnownStatus(apicontainerstatus.ContainerResourcesProvisioned) + newStatus = testTask.updateTaskKnownStatus() + assert.Equal(t, apitaskstatus.TaskRunning, newStatus, "Incorrect status returned: %s", newStatus.String()) + assert.Equal(t, apitaskstatus.TaskRunning, testTask.GetKnownStatus()) +} + +func assertSetStructFieldsEqual(t *testing.T, expected, actual interface{}) { + for i := 0; i < reflect.TypeOf(expected).NumField(); i++ { + expectedValue := reflect.ValueOf(expected).Field(i) + // All the values we actually expect to see are valid and non-nil + if !expectedValue.IsValid() || ((expectedValue.Kind() == reflect.Map || expectedValue.Kind() == reflect.Slice) && expectedValue.IsNil()) { + continue + } + expectedVal := expectedValue.Interface() + actualVal := reflect.ValueOf(actual).Field(i).Interface() + if !reflect.DeepEqual(expectedVal, actualVal) { + t.Fatalf("Field %v did not match: %v != %v", reflect.TypeOf(expected).Field(i).Name, expectedVal, actualVal) + } + } +} + +// TestGetIDErrorPaths performs table tests on GetID with erroneous taskARNs +func TestGetIDErrorPaths(t *testing.T) { + testCases := []struct { + arn string + name string + }{ + {"", "EmptyString"}, + {"invalidArn", "InvalidARN"}, + {"arn:aws:ecs:region:account-id:task:task-id", "IncorrectSections"}, + {"arn:aws:ecs:region:account-id:task", "IncorrectResourceSections"}, + } + + task := Task{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + task.Arn = tc.arn + taskID, err := task.GetID() + assert.Error(t, err, "GetID should return an error") + assert.Empty(t, taskID, "ID should be empty") + }) + } +} + +// TestGetIDHappyPath validates the happy path of GetID +func TestGetIDHappyPath(t *testing.T) { + taskNormalARN := Task{ + Arn: "arn:aws:ecs:region:account-id:task/task-id", + } + taskLongARN := Task{ + Arn: "arn:aws:ecs:region:account-id:task/cluster-name/task-id", + } + + taskID, err := taskNormalARN.GetID() + assert.NoError(t, err) + assert.Equal(t, "task-id", taskID) + + taskID, err = taskLongARN.GetID() + assert.NoError(t, err) + assert.Equal(t, "task-id", taskID) + +} + +// TestTaskGetPrimaryENI tests the eni can be correctly acquired by calling GetTaskPrimaryENI +func TestTaskGetPrimaryENI(t *testing.T) { + enisOfTask := []*apieni.ENI{ + { + ID: "id", + }, + } + testTask := &Task{ + ENIs: enisOfTask, + } + + eni := testTask.GetPrimaryENI() + assert.NotNil(t, eni) + assert.Equal(t, "id", eni.ID) + + testTask.ENIs = nil + eni = testTask.GetPrimaryENI() + assert.Nil(t, eni) +} + +// TestTaskFromACSWithOverrides tests the container command is overridden correctly +func TestTaskFromACSWithOverrides(t *testing.T) { + taskFromACS := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("myName1"), + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr("volumeContainerPath1"), + SourceVolume: strptr("volumeName1"), + }, + }, + Overrides: strptr(`{"command": ["foo", "bar"]}`), + }, + { + Name: strptr("myName2"), + Command: []*string{strptr("command")}, + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr("volumeContainerPath2"), + SourceVolume: strptr("volumeName2"), + }, + }, + }, + }, + } + + seqNum := int64(42) + task, err := TaskFromACS(&taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + assert.Equal(t, 2, len(task.Containers)) // before PostUnmarshalTask + + assert.Equal(t, task.Containers[0].Command[0], "foo") + assert.Equal(t, task.Containers[0].Command[1], "bar") + assert.Equal(t, task.Containers[1].Command[0], "command") +} + +// TestSetPullStartedAt tests the task SetPullStartedAt +func TestSetPullStartedAt(t *testing.T) { + testTask := &Task{} + + t1 := time.Now() + t2 := t1.Add(1 * time.Second) + + testTask.SetPullStartedAt(t1) + assert.Equal(t, t1, testTask.GetPullStartedAt(), "first set of pullStartedAt should succeed") + + testTask.SetPullStartedAt(t2) + assert.Equal(t, t1, testTask.GetPullStartedAt(), "second set of pullStartedAt should have no impact") +} + +// TestSetExecutionStoppedAt tests the task SetExecutionStoppedAt +func TestSetExecutionStoppedAt(t *testing.T) { + testTask := &Task{} + + t1 := time.Now() + t2 := t1.Add(1 * time.Second) + + testTask.SetExecutionStoppedAt(t1) + assert.Equal(t, t1, testTask.GetExecutionStoppedAt(), "first set of executionStoppedAt should succeed") + + testTask.SetExecutionStoppedAt(t2) + assert.Equal(t, t1, testTask.GetExecutionStoppedAt(), "second set of executionStoppedAt should have no impact") +} + +func TestApplyExecutionRoleLogsAuthSet(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := mock_credentials.NewMockManager(ctrl) + + credentialsIDInTask := "credsid" + expectedEndpoint := "/v2/credentials/" + credentialsIDInTask + + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: "foo", + Config: map[string]string{"foo": "bar"}, + }, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + + task := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "testFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + }, + }, + ExecutionCredentialsID: credentialsIDInTask, + } + + taskCredentials := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: credentials.IAMRoleCredentials{CredentialsID: "credsid"}, + } + credentialsManager.EXPECT().GetTaskCredentials(credentialsIDInTask).Return(taskCredentials, true) + task.initializeCredentialsEndpoint(credentialsManager) + + cfg, err := task.DockerHostConfig(task.Containers[0], dockerMap(task), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + + err = task.ApplyExecutionRoleLogsAuth(cfg, credentialsManager) + assert.Nil(t, err) + + endpoint, ok := cfg.LogConfig.Config["awslogs-credentials-endpoint"] + assert.True(t, ok) + assert.Equal(t, expectedEndpoint, endpoint) +} + +func TestApplyExecutionRoleLogsAuthNoConfigInHostConfig(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := mock_credentials.NewMockManager(ctrl) + + credentialsIDInTask := "credsid" + expectedEndpoint := "/v2/credentials/" + credentialsIDInTask + + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: "foo", + }, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + + task := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "testFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + }, + }, + ExecutionCredentialsID: credentialsIDInTask, + } + + taskCredentials := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: credentials.IAMRoleCredentials{CredentialsID: "credsid"}, + } + credentialsManager.EXPECT().GetTaskCredentials(credentialsIDInTask).Return(taskCredentials, true) + task.initializeCredentialsEndpoint(credentialsManager) + + cfg, err := task.DockerHostConfig(task.Containers[0], dockerMap(task), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + + err = task.ApplyExecutionRoleLogsAuth(cfg, credentialsManager) + assert.Nil(t, err) + + endpoint, ok := cfg.LogConfig.Config["awslogs-credentials-endpoint"] + assert.True(t, ok) + assert.Equal(t, expectedEndpoint, endpoint) +} + +func TestApplyExecutionRoleLogsAuthFailEmptyCredentialsID(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := mock_credentials.NewMockManager(ctrl) + + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: "foo", + Config: map[string]string{"foo": "bar"}, + }, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + + task := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "testFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + }, + }, + } + + task.initializeCredentialsEndpoint(credentialsManager) + + cfg, err := task.DockerHostConfig(task.Containers[0], dockerMap(task), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + + err = task.ApplyExecutionRoleLogsAuth(cfg, credentialsManager) + assert.Error(t, err) +} + +func TestApplyExecutionRoleLogsAuthFailNoCredentialsForTask(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := mock_credentials.NewMockManager(ctrl) + + credentialsIDInTask := "credsid" + + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: "foo", + Config: map[string]string{"foo": "bar"}, + }, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + + task := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "testFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + }, + }, + ExecutionCredentialsID: credentialsIDInTask, + } + + credentialsManager.EXPECT().GetTaskCredentials(credentialsIDInTask).Return(credentials.TaskIAMRoleCredentials{}, false) + task.initializeCredentialsEndpoint(credentialsManager) + + cfg, err := task.DockerHostConfig(task.Containers[0], dockerMap(task), defaultDockerClientAPIVersion, &config.Config{}) + assert.Error(t, err) + + err = task.ApplyExecutionRoleLogsAuth(cfg, credentialsManager) + assert.Error(t, err) +} + +// TestSetMinimumMemoryLimit ensures that we set the correct minimum memory limit when the limit is too low +func TestSetMinimumMemoryLimit(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + Memory: uint(1), + }, + }, + } + + hostconfig, err := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Nil(t, err) + + assert.Equal(t, int64(apicontainer.DockerContainerMinimumMemoryInBytes), hostconfig.Memory) + + hostconfig, err = testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), dockerclient.Version_1_18, + &config.Config{}) + assert.Nil(t, err) + + assert.Equal(t, int64(apicontainer.DockerContainerMinimumMemoryInBytes), hostconfig.Memory) +} + +// TestContainerHealthConfig tests that we set the correct container health check config +func TestContainerHealthConfig(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + HealthCheckType: apicontainer.DockerHealthCheckType, + DockerConfig: apicontainer.DockerConfig{ + Config: aws.String(`{ + "HealthCheck":{ + "Test":["command"], + "Interval":5000000000, + "Timeout":4000000000, + "StartPeriod":60000000000, + "Retries":5} + }`), + }, + }, + }, + } + + config, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + assert.Nil(t, err) + require.NotNil(t, config.Healthcheck, "health config was not set in docker config") + assert.Equal(t, config.Healthcheck.Test, []string{"command"}) + assert.Equal(t, config.Healthcheck.Retries, 5) + assert.Equal(t, config.Healthcheck.Interval, 5*time.Second) + assert.Equal(t, config.Healthcheck.Timeout, 4*time.Second) + assert.Equal(t, config.Healthcheck.StartPeriod, 1*time.Minute) +} + +func TestRecordExecutionStoppedAt(t *testing.T) { + testCases := []struct { + essential bool + status apicontainerstatus.ContainerStatus + executionStoppedAtSet bool + msg string + }{ + { + essential: true, + status: apicontainerstatus.ContainerStopped, + executionStoppedAtSet: true, + msg: "essential container stopped should have executionStoppedAt set", + }, + { + essential: false, + status: apicontainerstatus.ContainerStopped, + executionStoppedAtSet: false, + msg: "non essential container stopped should not cause executionStoppedAt set", + }, + { + essential: true, + status: apicontainerstatus.ContainerRunning, + executionStoppedAtSet: false, + msg: "essential non-stop status change should not cause executionStoppedAt set", + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Container status: %s, essential: %v, executionStoppedAt should be set: %v", tc.status, tc.essential, tc.executionStoppedAtSet), func(t *testing.T) { + task := &Task{} + task.RecordExecutionStoppedAt(&apicontainer.Container{ + Essential: tc.essential, + KnownStatusUnsafe: tc.status, + }) + assert.Equal(t, !tc.executionStoppedAtSet, task.GetExecutionStoppedAt().IsZero(), tc.msg) + }) + } +} + +func TestMarshalUnmarshalTaskASMResource(t *testing.T) { + + expectedCredentialsParameter := "secret-id" + expectedRegion := "us-west-2" + expectedExecutionCredentialsID := "credsid" + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + Name: "myName", + Image: "image:tag", + RegistryAuthentication: &apicontainer.RegistryAuthenticationData{ + Type: "asm", + ASMAuthData: &apicontainer.ASMAuthData{ + CredentialsParameter: expectedCredentialsParameter, + Region: expectedRegion, + }, + }, + }, + }, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + credentialsManager := mock_credentials.NewMockManager(ctrl) + + // create asm auth resource + res := asmauth.NewASMAuthResource( + task.Arn, + task.getAllASMAuthDataRequirements(), + expectedExecutionCredentialsID, + credentialsManager, + asmClientCreator) + res.SetKnownStatus(resourcestatus.ResourceRemoved) + + // add asm auth resource to task + task.AddResource(asmauth.ResourceName, res) + + // validate asm auth resource + resource, ok := task.getASMAuthResource() + assert.True(t, ok) + asmResource := resource[0].(*asmauth.ASMAuthResource) + req := asmResource.GetRequiredASMResources() + + assert.Equal(t, resourcestatus.ResourceRemoved, asmResource.GetKnownStatus()) + assert.Equal(t, expectedExecutionCredentialsID, asmResource.GetExecutionCredentialsID()) + assert.Equal(t, expectedCredentialsParameter, req[0].CredentialsParameter) + assert.Equal(t, expectedRegion, req[0].Region) + + // marshal and unmarshal task + marshal, err := json.Marshal(task) + assert.NoError(t, err) + + var otask Task + err = json.Unmarshal(marshal, &otask) + assert.NoError(t, err) + + // validate asm auth resource + oresource, ok := otask.getASMAuthResource() + assert.True(t, ok) + oasmResource := oresource[0].(*asmauth.ASMAuthResource) + oreq := oasmResource.GetRequiredASMResources() + + assert.Equal(t, resourcestatus.ResourceRemoved, oasmResource.GetKnownStatus()) + assert.Equal(t, expectedExecutionCredentialsID, oasmResource.GetExecutionCredentialsID()) + assert.Equal(t, expectedCredentialsParameter, oreq[0].CredentialsParameter) + assert.Equal(t, expectedRegion, oreq[0].Region) +} + +func TestSetTerminalReason(t *testing.T) { + + expectedTerminalReason := "Failed to provision resource" + overrideTerminalReason := "should not override terminal reason" + + task := &Task{} + + // set terminal reason string + task.SetTerminalReason(expectedTerminalReason) + assert.Equal(t, expectedTerminalReason, task.GetTerminalReason()) + + // try to override terminal reason string, should not overwrite + task.SetTerminalReason(overrideTerminalReason) + assert.Equal(t, expectedTerminalReason, task.GetTerminalReason()) +} + +func TestPopulateASMAuthData(t *testing.T) { + expectedUsername := "username" + expectedPassword := "password" + + credentialsParameter := "secret-id" + region := "us-west-2" + + credentialsID := "execution role" + accessKeyID := "akid" + secretAccessKey := "sakid" + sessionToken := "token" + executionRoleCredentials := credentials.IAMRoleCredentials{ + CredentialsID: credentialsID, + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SessionToken: sessionToken, + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + RegistryAuthentication: &apicontainer.RegistryAuthenticationData{ + Type: "asm", + ASMAuthData: &apicontainer.ASMAuthData{ + CredentialsParameter: credentialsParameter, + Region: region, + }, + }, + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + + // returned asm data + asmAuthDataBytes, _ := json.Marshal(&asm.AuthDataValue{ + Username: aws.String(expectedUsername), + Password: aws.String(expectedPassword), + }) + asmAuthDataVal := string(asmAuthDataBytes) + asmSecretValue := &secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(asmAuthDataVal), + } + + // create asm auth resource + asmRes := asmauth.NewASMAuthResource( + task.Arn, + task.getAllASMAuthDataRequirements(), + credentialsID, + credentialsManager, + asmClientCreator) + + // add asm auth resource to task + task.AddResource(asmauth.ResourceName, asmRes) + + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return( + credentials.TaskIAMRoleCredentials{ + ARN: "", + IAMRoleCredentials: executionRoleCredentials, + }, true), + asmClientCreator.EXPECT().NewASMClient(region, executionRoleCredentials).Return(mockASMClient), + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Return(asmSecretValue, nil), + ) + + // create resource + err := asmRes.Create() + require.NoError(t, err) + + err = task.PopulateASMAuthData(container) + assert.NoError(t, err) + + dac := container.RegistryAuthentication.ASMAuthData.GetDockerAuthConfig() + assert.Equal(t, expectedUsername, dac.Username) + assert.Equal(t, expectedPassword, dac.Password) +} + +func TestPopulateASMAuthDataNoASMResource(t *testing.T) { + + credentialsParameter := "secret-id" + region := "us-west-2" + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + RegistryAuthentication: &apicontainer.RegistryAuthenticationData{ + Type: "asm", + ASMAuthData: &apicontainer.ASMAuthData{ + CredentialsParameter: credentialsParameter, + Region: region, + }, + }, + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + // asm resource not added to task, call returns error + err := task.PopulateASMAuthData(container) + assert.Error(t, err) + +} + +func TestPopulateASMAuthDataNoDockerAuthConfig(t *testing.T) { + credentialsParameter := "secret-id" + region := "us-west-2" + credentialsID := "execution role" + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + RegistryAuthentication: &apicontainer.RegistryAuthenticationData{ + Type: "asm", + ASMAuthData: &apicontainer.ASMAuthData{ + CredentialsParameter: credentialsParameter, + Region: region, + }, + }, + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + + // create asm auth resource + asmRes := asmauth.NewASMAuthResource( + task.Arn, + task.getAllASMAuthDataRequirements(), + credentialsID, + credentialsManager, + asmClientCreator) + + // add asm auth resource to task + task.AddResource(asmauth.ResourceName, asmRes) + + // asm resource does not return docker auth config, call returns error + err := task.PopulateASMAuthData(container) + assert.Error(t, err) +} + +func TestPostUnmarshalTaskASMDockerAuth(t *testing.T) { + credentialsParameter := "secret-id" + region := "us-west-2" + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + RegistryAuthentication: &apicontainer.RegistryAuthenticationData{ + Type: "asm", + ASMAuthData: &apicontainer.ASMAuthData{ + CredentialsParameter: credentialsParameter, + Region: region, + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: testTaskARN, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cfg := &config.Config{} + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + ASMClientCreator: asmClientCreator, + CredentialsManager: credentialsManager, + }, + } + + err := task.PostUnmarshalTask(cfg, credentialsManager, resFields, nil, nil) + assert.NoError(t, err) +} + +func TestPostUnmarshalTaskSecret(t *testing.T) { + secret := apicontainer.Secret{ + Provider: "ssm", + Name: "secret", + Region: "us-west-2", + ValueFrom: "/test/secretName", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: testTaskARN, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cfg := &config.Config{} + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + } + + err := task.PostUnmarshalTask(cfg, credentialsManager, resFields, nil, nil) + assert.NoError(t, err) +} + +func TestPostUnmarshalTaskASMSecret(t *testing.T) { + secret := apicontainer.Secret{ + Provider: "asm", + Name: "secret", + Region: "us-west-2", + ValueFrom: "/test/secretName", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: testTaskARN, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cfg := &config.Config{} + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + ASMClientCreator: asmClientCreator, + CredentialsManager: credentialsManager, + }, + } + + resourceDep := apicontainer.ResourceDependency{ + Name: asmsecret.ResourceName, + RequiredStatus: resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated), + } + + err := task.PostUnmarshalTask(cfg, credentialsManager, resFields, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, 1, len(task.ResourcesMapUnsafe)) + assert.Equal(t, resourceDep, task.Containers[0].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies[0]) +} + +func TestGetAllSSMSecretRequirements(t *testing.T) { + regionWest := "us-west-2" + regionEast := "us-east-1" + + secret1 := apicontainer.Secret{ + Provider: "ssm", + Name: "secret1", + Region: regionWest, + ValueFrom: "/test/secretName1", + } + + secret2 := apicontainer.Secret{ + Provider: "asm", + Name: "secret2", + Region: regionWest, + ValueFrom: "/test/secretName2", + } + + secret3 := apicontainer.Secret{ + Provider: "ssm", + Name: "secret3", + Region: regionEast, + ValueFrom: "/test/secretName3", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret1, secret2, secret3}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + reqs := task.getAllSSMSecretRequirements() + assert.Equal(t, secret1, reqs[regionWest][0]) + assert.Equal(t, 1, len(reqs[regionWest])) +} + +func TestInitializeAndGetSSMSecretResource(t *testing.T) { + secret := apicontainer.Secret{ + Provider: "ssm", + Name: "secret", + Region: "us-west-2", + ValueFrom: "/test/secretName", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + container1 := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container, container1}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + } + + task.initializeSSMSecretResource(credentialsManager, resFields) + + resourceDep := apicontainer.ResourceDependency{ + Name: ssmsecret.ResourceName, + RequiredStatus: resourcestatus.ResourceStatus(ssmsecret.SSMSecretCreated), + } + + assert.Equal(t, resourceDep, task.Containers[0].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies[0]) + assert.Equal(t, 0, len(task.Containers[1].TransitionDependenciesMap)) + + _, ok := task.getSSMSecretsResource() + assert.True(t, ok) +} + +func TestRequiresSSMSecret(t *testing.T) { + secret := apicontainer.Secret{ + Provider: "ssm", + Name: "secret", + Region: "us-west-2", + ValueFrom: "/test/secretName", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + container1 := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container, container1}, + } + + assert.Equal(t, true, task.requiresSSMSecret()) +} + +func TestRequiresSSMSecretNoSecret(t *testing.T) { + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + container1 := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container, container1}, + } + + assert.Equal(t, false, task.requiresSSMSecret()) +} + +func TestRequiresASMSecret(t *testing.T) { + secret := apicontainer.Secret{ + Provider: "asm", + Name: "secret", + Region: "us-west-2", + ValueFrom: "/test/secretName", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + container1 := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container, container1}, + } + + assert.True(t, task.requiresASMSecret()) +} + +func TestRequiresASMSecretNoSecret(t *testing.T) { + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + container1 := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container, container1}, + } + + assert.False(t, task.requiresASMSecret()) +} + +func TestGetAllASMSecretRequirements(t *testing.T) { + regionWest := "us-west-2" + regionEast := "us-east-1" + + secret1 := apicontainer.Secret{ + Provider: "asm", + Name: "secret1", + Region: regionWest, + ValueFrom: "/test/secretName1", + } + + secret2 := apicontainer.Secret{ + Provider: "asm", + Name: "secret2", + Region: regionWest, + ValueFrom: "/test/secretName2", + } + + secret3 := apicontainer.Secret{ + Provider: "ssm", + Name: "secret3", + Region: regionEast, + ValueFrom: "/test/secretName3", + } + + secret4 := apicontainer.Secret{ + Provider: "asm", + Name: "secret4", + Region: regionWest, + ValueFrom: "/test/secretName1", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret1, secret2, secret3, secret4}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + reqs := task.getAllASMSecretRequirements() + assert.Equal(t, secret1, reqs["/test/secretName1_us-west-2"]) + assert.Equal(t, secret2, reqs["/test/secretName2_us-west-2"]) + assert.Equal(t, 2, len(reqs)) +} + +func TestInitializeAndGetASMSecretResource(t *testing.T) { + secret := apicontainer.Secret{ + Provider: "asm", + Name: "secret", + Region: "us-west-2", + ValueFrom: "/test/secretName", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + container1 := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: nil, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container, container1}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + ASMClientCreator: asmClientCreator, + CredentialsManager: credentialsManager, + }, + } + + task.initializeASMSecretResource(credentialsManager, resFields) + + resourceDep := apicontainer.ResourceDependency{ + Name: asmsecret.ResourceName, + RequiredStatus: resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated), + } + + assert.Equal(t, resourceDep, task.Containers[0].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies[0]) + assert.Equal(t, 0, len(task.Containers[1].TransitionDependenciesMap)) + + _, ok := task.getASMSecretsResource() + assert.True(t, ok) +} + +func TestPopulateSecrets(t *testing.T) { + secret1 := apicontainer.Secret{ + Provider: "ssm", + Name: "secret1", + Region: "us-west-2", + Type: "ENVIRONMENT_VARIABLE", + ValueFrom: "/test/secretName", + } + + secret2 := apicontainer.Secret{ + Provider: "asm", + Name: "secret2", + Region: "us-west-2", + Type: "ENVIRONMENT_VARIABLE", + ValueFrom: "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName", + } + + secret3 := apicontainer.Secret{ + Provider: "ssm", + Name: "splunk-token", + Region: "us-west-1", + Target: "LOG_DRIVER", + ValueFrom: "/test/secretName1", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret1, secret2, secret3}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + hostConfig := &dockercontainer.HostConfig{} + logDriverName := "splunk" + hostConfig.LogConfig.Type = logDriverName + configMap := map[string]string{ + "splunk-option": "option", + } + hostConfig.LogConfig.Config = configMap + + ssmRes := &ssmsecret.SSMSecretResource{} + ssmRes.SetCachedSecretValue(secretKeyWest1, "secretValue1") + + asmRes := &asmsecret.ASMSecretResource{} + asmRes.SetCachedSecretValue(asmSecretKeyWest1, "secretValue2") + + ssmRes.SetCachedSecretValue(secKeyLogDriver, "secretValue3") + + task.AddResource(ssmsecret.ResourceName, ssmRes) + task.AddResource(asmsecret.ResourceName, asmRes) + + task.PopulateSecrets(hostConfig, container) + assert.Equal(t, "secretValue1", container.Environment["secret1"]) + assert.Equal(t, "secretValue2", container.Environment["secret2"]) + assert.Equal(t, "", container.Environment["secret3"]) + assert.Equal(t, "secretValue3", hostConfig.LogConfig.Config["splunk-token"]) + assert.Equal(t, "option", hostConfig.LogConfig.Config["splunk-option"]) +} + +func TestPopulateSecretsNoConfigInHostConfig(t *testing.T) { + secret1 := apicontainer.Secret{ + Provider: "ssm", + Name: "splunk-token", + Region: "us-west-1", + Target: "LOG_DRIVER", + ValueFrom: "/test/secretName1", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret1}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + hostConfig := &dockercontainer.HostConfig{} + logDriverName := "splunk" + hostConfig.LogConfig.Type = logDriverName + + ssmRes := &ssmsecret.SSMSecretResource{} + ssmRes.SetCachedSecretValue(secKeyLogDriver, "secretValue1") + task.AddResource(ssmsecret.ResourceName, ssmRes) + task.PopulateSecrets(hostConfig, container) + assert.Equal(t, "secretValue1", hostConfig.LogConfig.Config["splunk-token"]) +} + +func TestPopulateSecretsAsEnvOnlySSM(t *testing.T) { + secret1 := apicontainer.Secret{ + Provider: "asm", + Name: "secret1", + Region: "us-west-2", + Type: "MOUNT_POINT", + ValueFrom: "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName", + } + + secret2 := apicontainer.Secret{ + Provider: "asm", + Name: "secret2", + Region: "us-west-1", + ValueFrom: "/test/secretName1", + Target: "LOG_DRIVER", + } + + secret3 := apicontainer.Secret{ + Provider: "ssm", + Name: "secret3", + Region: "us-west-2", + Type: "ENVIRONMENT_VARIABLE", + ValueFrom: "/test/secretName", + } + + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + Secrets: []apicontainer.Secret{secret1, secret2, secret3}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + asmRes := &asmsecret.ASMSecretResource{} + asmRes.SetCachedSecretValue(asmSecretKeyWest1, "secretValue1") + asmRes.SetCachedSecretValue(secKeyLogDriver, "secretValue2") + + ssmRes := &ssmsecret.SSMSecretResource{} + ssmRes.SetCachedSecretValue(secretKeyWest1, "secretValue3") + + task.AddResource(ssmsecret.ResourceName, ssmRes) + task.AddResource(asmsecret.ResourceName, asmRes) + + hostConfig := &dockercontainer.HostConfig{} + + task.PopulateSecrets(hostConfig, container) + + assert.Equal(t, "secretValue3", container.Environment["secret3"]) + assert.Equal(t, 1, len(container.Environment)) +} + +func TestAddGPUResource(t *testing.T) { + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + } + + container1 := &apicontainer.Container{ + Name: "myName1", + Image: "image:tag", + } + + association := []Association{ + { + Containers: []string{ + "myName", + }, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "gpu1", + Type: "gpu", + }, + { + Containers: []string{ + "myName", + }, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "gpu2", + Type: "gpu", + }, + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container, container1}, + Associations: association, + } + + cfg := &config.Config{GPUSupportEnabled: true} + err := task.addGPUResource(cfg) + + assert.Equal(t, []string{"gpu1", "gpu2"}, container.GPUIDs) + assert.Equal(t, []string(nil), container1.GPUIDs) + assert.NoError(t, err) +} + +func TestAddGPUResourceWithInvalidContainer(t *testing.T) { + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + } + + association := []Association{ + { + Containers: []string{ + "myName1", + }, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "gpu1", + Type: "gpu", + }, + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + Associations: association, + } + cfg := &config.Config{GPUSupportEnabled: true} + err := task.addGPUResource(cfg) + assert.Error(t, err) +} + +func TestPopulateGPUEnvironmentVariables(t *testing.T) { + container := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + GPUIDs: []string{"gpu1", "gpu2"}, + } + + container1 := &apicontainer.Container{ + Name: "myName1", + Image: "image:tag", + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container, container1}, + } + + task.populateGPUEnvironmentVariables() + + environment := make(map[string]string) + environment[NvidiaVisibleDevicesEnvVar] = "gpu1,gpu2" + + assert.Equal(t, environment, container.Environment) + assert.Equal(t, map[string]string(nil), container1.Environment) +} + +func TestDockerHostConfigNvidiaRuntime(t *testing.T) { + testTask := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{ + { + Name: "myName1", + Image: "image:tag", + GPUIDs: []string{"gpu1"}, + }, + }, + Associations: []Association{ + { + Containers: []string{ + "myName1", + }, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "gpu1", + Type: "gpu", + }, + }, + NvidiaRuntime: config.DefaultNvidiaRuntime, + } + + cfg := &config.Config{GPUSupportEnabled: true, NvidiaRuntime: config.DefaultNvidiaRuntime} + testTask.addGPUResource(cfg) + dockerHostConfig, _ := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Equal(t, testTask.NvidiaRuntime, dockerHostConfig.Runtime) +} + +func TestDockerHostConfigRuntimeWithoutGPU(t *testing.T) { + testTask := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{ + { + Name: "myName1", + Image: "image:tag", + }, + }, + } + + cfg := &config.Config{GPUSupportEnabled: true} + testTask.addGPUResource(cfg) + dockerHostConfig, _ := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Equal(t, "", dockerHostConfig.Runtime) +} + +func TestDockerHostConfigNoNvidiaRuntime(t *testing.T) { + testTask := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{ + { + Name: "myName1", + Image: "image:tag", + GPUIDs: []string{"gpu1"}, + }, + }, + Associations: []Association{ + { + Containers: []string{ + "myName1", + }, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "gpu1", + Type: "gpu", + }, + }, + } + + cfg := &config.Config{GPUSupportEnabled: true} + testTask.addGPUResource(cfg) + _, err := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{}) + assert.Error(t, err) +} + +func TestDockerHostConfigNeuronRuntime(t *testing.T) { + testTask := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{ + { + Name: "myName1", + Image: "image:tag", + Environment: map[string]string{ + "AWS_NEURON_VISIBLE_DEVICES": "all", + }, + }, + }, + } + + dockerHostConfig, _ := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), defaultDockerClientAPIVersion, + &config.Config{InferentiaSupportEnabled: true}) + assert.Equal(t, neuronRuntime, dockerHostConfig.Runtime) +} + +func TestAssociationsByTypeAndContainer(t *testing.T) { + associationType := "elastic-inference" + container1 := &apicontainer.Container{ + Name: "containerName1", + } + container2 := &apicontainer.Container{ + Name: "containerName2", + } + association1 := Association{ + Containers: []string{container1.Name}, + Type: associationType, + Name: "dev1", + } + association2 := Association{ + Containers: []string{container1.Name, container2.Name}, + Type: associationType, + Name: "dev2", + } + task := &Task{ + Associations: []Association{association1, association2}, + } + + // container 1 is associated with association 1 and association 2 + assert.Equal(t, task.AssociationsByTypeAndContainer(associationType, container1.Name), + []string{association1.Name, association2.Name}) + // container 2 is associated with association 2 + assert.Equal(t, task.AssociationsByTypeAndContainer(associationType, container2.Name), + []string{association2.Name}) +} + +func TestAssociationByTypeAndName(t *testing.T) { + association1 := Association{ + Type: "elastic-inference", + Name: "dev1", + } + association2 := Association{ + Type: "other-type", + Name: "dev2", + } + task := &Task{ + Associations: []Association{association1, association2}, + } + + // positive cases + association, ok := task.AssociationByTypeAndName("elastic-inference", "dev1") + assert.Equal(t, *association, association1) + association, ok = task.AssociationByTypeAndName("other-type", "dev2") + assert.Equal(t, *association, association2) + + // negative cases + association, ok = task.AssociationByTypeAndName("elastic-inference", "dev2") + assert.False(t, ok) + association, ok = task.AssociationByTypeAndName("other-type", "dev1") + assert.False(t, ok) +} + +func TestTaskGPUEnabled(t *testing.T) { + testTask := &Task{ + Associations: []Association{ + { + Containers: []string{ + "myName1", + }, + Content: EncodedString{ + Encoding: "base64", + Value: "val", + }, + Name: "gpu1", + Type: "gpu", + }, + }, + } + + assert.True(t, testTask.isGPUEnabled()) +} + +func TestTaskGPUDisabled(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "myName1", + }, + }, + } + assert.False(t, testTask.isGPUEnabled()) +} + +func TestInitializeContainerOrderingWithLinksAndVolumesFrom(t *testing.T) { + containerWithOnlyVolume := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + VolumesFrom: []apicontainer.VolumeFrom{{SourceContainer: "myName1"}}, + } + + containerWithOnlyLink := &apicontainer.Container{ + Name: "myName1", + Image: "image:tag", + Links: []string{"myName"}, + } + + containerWithBothVolumeAndLink := &apicontainer.Container{ + Name: "myName2", + Image: "image:tag", + VolumesFrom: []apicontainer.VolumeFrom{{SourceContainer: "myName"}}, + Links: []string{"myName1"}, + } + + containerWithNoVolumeOrLink := &apicontainer.Container{ + Name: "myName3", + Image: "image:tag", + } + + task := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{containerWithOnlyVolume, containerWithOnlyLink, + containerWithBothVolumeAndLink, containerWithNoVolumeOrLink}, + } + + err := task.initializeContainerOrderingForVolumes() + assert.NoError(t, err) + err = task.initializeContainerOrderingForLinks() + assert.NoError(t, err) + + containerResultWithVolume := task.Containers[0] + assert.Equal(t, "myName1", containerResultWithVolume.DependsOnUnsafe[0].ContainerName) + assert.Equal(t, ContainerOrderingCreateCondition, containerResultWithVolume.DependsOnUnsafe[0].Condition) + + containerResultWithLink := task.Containers[1] + assert.Equal(t, "myName", containerResultWithLink.DependsOnUnsafe[0].ContainerName) + assert.Equal(t, ContainerOrderingStartCondition, containerResultWithLink.DependsOnUnsafe[0].Condition) + + containerResultWithBothVolumeAndLink := task.Containers[2] + assert.Equal(t, "myName", containerResultWithBothVolumeAndLink.DependsOnUnsafe[0].ContainerName) + assert.Equal(t, ContainerOrderingCreateCondition, containerResultWithBothVolumeAndLink.DependsOnUnsafe[0].Condition) + assert.Equal(t, "myName1", containerResultWithBothVolumeAndLink.DependsOnUnsafe[1].ContainerName) + assert.Equal(t, ContainerOrderingStartCondition, containerResultWithBothVolumeAndLink.DependsOnUnsafe[1].Condition) + + containerResultWithNoVolumeOrLink := task.Containers[3] + assert.Equal(t, 0, len(containerResultWithNoVolumeOrLink.DependsOnUnsafe)) +} + +func TestInitializeContainerOrderingWithError(t *testing.T) { + containerWithVolumeError := &apicontainer.Container{ + Name: "myName", + Image: "image:tag", + VolumesFrom: []apicontainer.VolumeFrom{{SourceContainer: "dummyContainer"}}, + } + + containerWithLinkError1 := &apicontainer.Container{ + Name: "myName1", + Image: "image:tag", + Links: []string{"dummyContainer"}, + } + + containerWithLinkError2 := &apicontainer.Container{ + Name: "myName2", + Image: "image:tag", + Links: []string{"myName:link1:link2"}, + } + + task1 := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{containerWithVolumeError, containerWithLinkError1}, + } + + task2 := &Task{ + Arn: "test", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{containerWithVolumeError, containerWithLinkError2}, + } + + errVolume1 := task1.initializeContainerOrderingForVolumes() + assert.Error(t, errVolume1) + errLink1 := task1.initializeContainerOrderingForLinks() + assert.Error(t, errLink1) + + errVolume2 := task2.initializeContainerOrderingForVolumes() + assert.Error(t, errVolume2) + errLink2 := task2.initializeContainerOrderingForLinks() + assert.Error(t, errLink2) +} + +func TestTaskFromACSPerContainerTimeouts(t *testing.T) { + modelTimeout := int64(10) + expectedTimeout := uint(modelTimeout) + + taskFromACS := ecsacs.Task{ + Containers: []*ecsacs.Container{ + { + StartTimeout: aws.Int64(modelTimeout), + StopTimeout: aws.Int64(modelTimeout), + }, + }, + } + seqNum := int64(42) + task, err := TaskFromACS(&taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + + assert.Equal(t, task.Containers[0].StartTimeout, expectedTimeout) + assert.Equal(t, task.Containers[0].StopTimeout, expectedTimeout) +} + +func TestGetContainerIndex(t *testing.T) { + task := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + }, + { + Name: "p", + Type: apicontainer.ContainerCNIPause, + }, + { + Name: "c2", + }, + }, + } + + assert.Equal(t, 0, task.GetContainerIndex("c1")) + assert.Equal(t, 1, task.GetContainerIndex("c2")) + assert.Equal(t, -1, task.GetContainerIndex("p")) +} + +func TestBuildCNIConfigRegularENIWithAppMesh(t *testing.T) { + for _, blockIMDS := range []bool{true, false} { + t.Run(fmt.Sprintf("When BlockInstanceMetadata is %t", blockIMDS), func(t *testing.T) { + testTask := &Task{} + testTask.AddTaskENI(getTestENI()) + testTask.SetAppMesh(&appmesh.AppMesh{ + IgnoredUID: ignoredUID, + ProxyIngressPort: proxyIngressPort, + ProxyEgressPort: proxyEgressPort, + AppPorts: []string{ + appPort, + }, + EgressIgnoredIPs: []string{ + egressIgnoredIP, + }, + }) + cniConfig, err := testTask.BuildCNIConfig(true, &ecscni.Config{ + BlockInstanceMetadata: blockIMDS, + }) + assert.NoError(t, err) + // We expect 3 NetworkConfig objects in the cni Config wrapper object: + // ENI, Bridge and Appmesh + require.Len(t, cniConfig.NetworkConfigs, 3) + // The first one should be for the ENI. + var eniConfig ecscni.ENIConfig + err = json.Unmarshal(cniConfig.NetworkConfigs[0].CNINetworkConfig.Bytes, &eniConfig) + require.NoError(t, err) + assert.Equal(t, mac, eniConfig.MACAddress, eniConfig) + assert.Equal(t, []string{ipv4 + ipv4Block, ipv6 + ipv6Block}, eniConfig.IPAddresses) + assert.Equal(t, []string{ipv4Gateway}, eniConfig.GatewayIPAddresses) + assert.Equal(t, blockIMDS, eniConfig.BlockInstanceMetadata) + // The second one should be for the Bridge. + var bridgeConfig ecscni.BridgeConfig + err = json.Unmarshal(cniConfig.NetworkConfigs[1].CNINetworkConfig.Bytes, &bridgeConfig) + require.NoError(t, err) + assert.Equal(t, "ecs-bridge", bridgeConfig.BridgeName) + // The third one should be for Appmesh. + var appMeshConfig ecscni.AppMeshConfig + err = json.Unmarshal(cniConfig.NetworkConfigs[2].CNINetworkConfig.Bytes, &appMeshConfig) + require.NoError(t, err) + assert.Equal(t, ignoredUID, appMeshConfig.IgnoredUID) + assert.Equal(t, proxyIngressPort, appMeshConfig.ProxyIngressPort) + assert.Equal(t, proxyEgressPort, appMeshConfig.ProxyEgressPort) + assert.Equal(t, appPort, appMeshConfig.AppPorts[0]) + assert.Equal(t, egressIgnoredIP, appMeshConfig.EgressIgnoredIPs[0]) + }) + } +} + +func TestBuildCNIConfigTrunkBranchENI(t *testing.T) { + for _, blockIMDS := range []bool{true, false} { + t.Run(fmt.Sprintf("When BlockInstanceMetadata is %t", blockIMDS), func(t *testing.T) { + testTask := &Task{} + testTask.AddTaskENI(&apieni.ENI{ + ID: "TestBuildCNIConfigTrunkBranchENI", + MacAddress: mac, + InterfaceAssociationProtocol: apieni.VLANInterfaceAssociationProtocol, + InterfaceVlanProperties: &apieni.InterfaceVlanProperties{ + VlanID: "1234", + TrunkInterfaceMacAddress: "macTrunk", + }, + SubnetGatewayIPV4Address: ipv4Gateway + ipv4Block, + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Primary: true, + Address: ipv4, + }, + }, + IPV6Addresses: []*apieni.ENIIPV6Address{ + { + Address: ipv6, + }, + }, + }) + + cniConfig, err := testTask.BuildCNIConfig(true, &ecscni.Config{ + BlockInstanceMetadata: blockIMDS, + }) + assert.NoError(t, err) + // We expect 2 NetworkConfig objects in the cni Config wrapper object: + // Branch ENI and Bridge. + require.Len(t, cniConfig.NetworkConfigs, 2) + // The first one should be for the ENI. + var eniConfig ecscni.BranchENIConfig + err = json.Unmarshal(cniConfig.NetworkConfigs[0].CNINetworkConfig.Bytes, &eniConfig) + require.NoError(t, err) + assert.Equal(t, mac, eniConfig.BranchMACAddress, eniConfig) + assert.Equal(t, "macTrunk", eniConfig.TrunkMACAddress, eniConfig) + assert.Equal(t, "1234", eniConfig.BranchVlanID) + assert.Equal(t, []string{ipv4 + ipv4Block, ipv6 + ipv6Block}, eniConfig.IPAddresses) + assert.Equal(t, []string{ipv4Gateway}, eniConfig.GatewayIPAddresses) + assert.Equal(t, blockIMDS, eniConfig.BlockInstanceMetadata) + // The second one should be for the Bridge. + var bridgeConfig ecscni.BridgeConfig + err = json.Unmarshal(cniConfig.NetworkConfigs[1].CNINetworkConfig.Bytes, &bridgeConfig) + require.NoError(t, err) + assert.Equal(t, "ecs-bridge", bridgeConfig.BridgeName) + }) + } +} + +func TestPostUnmarshalTaskEnvfiles(t *testing.T) { + envfile := apicontainer.EnvironmentFile{ + Value: "s3://bucket/envfile", + Type: "s3", + } + + container := &apicontainer.Container{ + Name: "containerName", + Image: "image:tag", + EnvironmentFiles: []apicontainer.EnvironmentFile{envfile}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: testTaskARN, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cfg := &config.Config{} + credentialsManager := mock_credentials.NewMockManager(ctrl) + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + CredentialsManager: credentialsManager, + }, + } + + resourceDep := apicontainer.ResourceDependency{ + Name: envFiles.ResourceName, + RequiredStatus: resourcestatus.ResourceStatus(envFiles.EnvFileCreated), + } + + err := task.PostUnmarshalTask(cfg, credentialsManager, resFields, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, 1, len(task.ResourcesMapUnsafe)) + assert.Equal(t, resourceDep, + task.Containers[0].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies[0]) +} + +func TestInitializeAndGetEnvfilesResource(t *testing.T) { + envfile := apicontainer.EnvironmentFile{ + Value: "s3://bucket/envfile", + Type: "s3", + } + + container := &apicontainer.Container{ + Name: "containerName", + Image: "image:tag", + EnvironmentFiles: []apicontainer.EnvironmentFile{envfile}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "testArn", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cfg := &config.Config{ + DataDir: "/ecs/data", + } + credentialsManager := mock_credentials.NewMockManager(ctrl) + + task.initializeEnvfilesResource(cfg, credentialsManager) + + resourceDep := apicontainer.ResourceDependency{ + Name: envFiles.ResourceName, + RequiredStatus: resourcestatus.ResourceStatus(envFiles.EnvFileCreated), + } + + assert.Equal(t, resourceDep, + task.Containers[0].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies[0]) + + _, ok := task.getEnvfilesResource("containerName") + assert.True(t, ok) +} + +func TestRequiresEnvfiles(t *testing.T) { + envfile := apicontainer.EnvironmentFile{ + Value: "s3://bucket/envfile", + Type: "s3", + } + + container := &apicontainer.Container{ + Name: "containerName", + Image: "image:tag", + EnvironmentFiles: []apicontainer.EnvironmentFile{envfile}, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &Task{ + Arn: "testArn", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{container}, + } + + assert.Equal(t, true, requireEnvfiles(task)) +} + +func requireEnvfiles(task *Task) bool { + for _, container := range task.Containers { + if container.ShouldCreateWithEnvFiles() { + return true + } + } + return false +} + +func TestPopulateTaskARN(t *testing.T) { + task := &Task{ + Arn: testTaskARN, + Containers: []*apicontainer.Container{ + { + Name: "test", + }, + }, + } + task.populateTaskARN() + assert.Equal(t, task.Arn, task.Containers[0].GetTaskARN()) +} + +func TestShouldEnableIPv6(t *testing.T) { + task := &Task{ + ENIs: []*apieni.ENI{getTestENI()}, + } + assert.True(t, task.shouldEnableIPv6()) + task.ENIs[0].IPV6Addresses = nil + assert.False(t, task.shouldEnableIPv6()) +} + +func getTestENI() *apieni.ENI { + return &apieni.ENI{ + ID: "test", + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Primary: true, + Address: ipv4, + }, + }, + MacAddress: mac, + IPV6Addresses: []*apieni.ENIIPV6Address{ + { + Address: ipv6, + }, + }, + SubnetGatewayIPV4Address: ipv4Gateway + ipv4Block, + } +} + +func TestPostUnmarshalTaskWithOptions(t *testing.T) { + taskFromACS := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + Version: strptr("1"), + } + seqNum := int64(42) + task, err := TaskFromACS(&taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + numCalls := 0 + opt := func(optTask *Task) error { + assert.Equal(t, task, optTask) + numCalls++ + return nil + } + task.PostUnmarshalTask(&config.Config{}, nil, nil, nil, nil, opt, opt) + assert.Equal(t, 2, numCalls) +} diff --git a/agent/api/task/task_test_utils.go b/agent/api/task/task_test_utils.go new file mode 100644 index 00000000000..a4bfda48f84 --- /dev/null +++ b/agent/api/task/task_test_utils.go @@ -0,0 +1,146 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + + "github.com/aws/aws-sdk-go/aws" +) + +const ( + dockerIDPrefix = "dockerid-" + secretKeyWest1 = "/test/secretName_us-west-2" + secKeyLogDriver = "/test/secretName1_us-west-1" + asmSecretKeyWest1 = "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName_us-west-2" + ipv4 = "10.0.0.2" + ipv4Block = "/24" + ipv4Gateway = "10.0.0.1" + mac = "1.2.3.4" + ipv6 = "f0:234:23" + ipv6Block = "/64" + ignoredUID = "1337" + proxyIngressPort = "15000" + proxyEgressPort = "15001" + appPort = "9000" + egressIgnoredIP = "169.254.169.254" + testTaskARN = "arn:aws:ecs:us-west-2:1234567890:task/test-cluster/abc" +) + +var defaultDockerClientAPIVersion = dockerclient.Version_1_17 + +func strptr(s string) *string { return &s } + +func dockerMap(task *Task) map[string]*apicontainer.DockerContainer { + m := make(map[string]*apicontainer.DockerContainer) + for _, container := range task.Containers { + m[container.Name] = &apicontainer.DockerContainer{DockerID: dockerIDPrefix + container.Name, DockerName: "dockername-" + container.Name, Container: container} + } + return m +} + +func getACSIAMRoleCredentials() *ecsacs.IAMRoleCredentials { + testTime := ttime.Now().Truncate(1 * time.Second).Format(time.RFC3339) + return &ecsacs.IAMRoleCredentials{ + CredentialsId: strptr("credsId"), + AccessKeyId: strptr("keyId"), + Expiration: strptr(testTime), + RoleArn: strptr("roleArn"), + SecretAccessKey: strptr("OhhSecret"), + SessionToken: strptr("sessionToken"), + } +} + +func getACSEFSTask() *ecsacs.Task { + return &ecsacs.Task{ + Arn: strptr(testTaskARN), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("myName1"), + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr("/some/path"), + SourceVolume: strptr("efsvolume"), + }, + }, + }, + }, + Volumes: []*ecsacs.Volume{ + { + Name: strptr("efsvolume"), + Type: strptr("efs"), + EfsVolumeConfiguration: &ecsacs.EFSVolumeConfiguration{ + AuthorizationConfig: &ecsacs.EFSAuthorizationConfig{ + Iam: strptr("ENABLED"), + AccessPointId: strptr("fsap-123"), + }, + FileSystemId: strptr("fs-12345"), + RootDirectory: strptr("/tmp"), + TransitEncryption: strptr("ENABLED"), + TransitEncryptionPort: aws.Int64(12345), + }, + }, + }, + } +} + +func getEFSTask() *Task { + return &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "efs-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "efs-volume-test", + Type: "efs", + Volume: getEFSVolumeConfig(), + }, + }, + } +} + +func getEFSVolumeConfig() *taskresourcevolume.EFSVolumeConfig { + return &taskresourcevolume.EFSVolumeConfig{ + AuthConfig: taskresourcevolume.EFSAuthConfig{ + Iam: "ENABLED", + AccessPointId: "fsap-123", + }, + FileSystemID: "fs-12345", + RootDirectory: "/my/root/dir", + TransitEncryption: "ENABLED", + TransitEncryptionPort: 12345, + } +} diff --git a/agent/api/task/task_unsupported.go b/agent/api/task/task_unsupported.go new file mode 100644 index 00000000000..be2eac955de --- /dev/null +++ b/agent/api/task/task_unsupported.go @@ -0,0 +1,92 @@ +// +build !linux,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/cihub/seelog" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" +) + +const ( + minimumCPUPercent = 0 +) + +// PlatformFields consists of fields specific to Linux for a task +type PlatformFields struct{} + +func (task *Task) adjustForPlatform(cfg *config.Config) { + task.lock.Lock() + defer task.lock.Unlock() + task.MemoryCPULimitsEnabled = cfg.TaskCPUMemLimit.Enabled() +} + +func (task *Task) initializeCgroupResourceSpec(cgroupPath string, cGroupCPUPeriod time.Duration, resourceFields *taskresource.ResourceFields) error { + return nil +} + +func (task *Task) platformHostConfigOverride(hostConfig *dockercontainer.HostConfig) error { + return nil +} + +// duplicate of dockerCPUShares in task_linux.go +func (task *Task) dockerCPUShares(containerCPU uint) int64 { + if containerCPU <= 1 { + seelog.Debugf( + "Converting CPU shares to allowed minimum of 2 for task arn: [%s] and cpu shares: %d", + task.Arn, containerCPU) + return 2 + } + return int64(containerCPU) +} + +// requiresCredentialSpecResource returns true if at least one container in the task +// needs a valid credentialspec resource +func (task *Task) requiresCredentialSpecResource() bool { + return false +} + +// initializeCredentialSpecResource builds the resource dependency map for the credentialspec resource +func (task *Task) initializeCredentialSpecResource(config *config.Config, credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) error { + return errors.New("task credentialspec is only supported on windows") +} + +// GetCredentialSpecResource retrieves credentialspec resource from resource map +func (task *Task) GetCredentialSpecResource() ([]taskresource.TaskResource, bool) { + return []taskresource.TaskResource{}, false +} + +func enableIPv6SysctlSetting(hostConfig *dockercontainer.HostConfig) { + return +} + +// requiresFSxWindowsFileServerResource returns true if at least one volume in the task +// is of type 'fsxWindowsFileServer' +func (task *Task) requiresFSxWindowsFileServerResource() bool { + return false +} + +// initializeFSxWindowsFileServerResource builds the resource dependency map for the fsxwindowsfileserver resource +func (task *Task) initializeFSxWindowsFileServerResource(cfg *config.Config, credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) error { + return errors.New("task with FSx for Windows File Server volumes is only supported on Windows container instance") +} diff --git a/agent/api/task/task_windows.go b/agent/api/task/task_windows.go new file mode 100644 index 00000000000..efd19de3629 --- /dev/null +++ b/agent/api/task/task_windows.go @@ -0,0 +1,248 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "errors" + "runtime" + "time" + + "github.com/aws/amazon-ecs-agent/agent/utils" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" + "github.com/aws/amazon-ecs-agent/agent/taskresource/fsxwindowsfileserver" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + resourcetype "github.com/aws/amazon-ecs-agent/agent/taskresource/types" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/cihub/seelog" + dockercontainer "github.com/docker/docker/api/types/container" +) + +const ( + // cpuSharesPerCore represents the cpu shares of a cpu core in docker + cpuSharesPerCore = 1024 + percentageFactor = 100 + minimumCPUPercent = 1 +) + +// PlatformFields consists of fields specific to Windows for a task +type PlatformFields struct { + // CpuUnbounded determines whether a mix of unbounded and bounded CPU tasks + // are allowed to run in the instance + CpuUnbounded config.BooleanDefaultFalse `json:"cpuUnbounded"` + // MemoryUnbounded determines whether a mix of unbounded and bounded Memory tasks + // are allowed to run in the instance + MemoryUnbounded config.BooleanDefaultFalse `json:"memoryUnbounded"` +} + +var cpuShareScaleFactor = runtime.NumCPU() * cpuSharesPerCore + +// adjustForPlatform makes Windows-specific changes to the task after unmarshal +func (task *Task) adjustForPlatform(cfg *config.Config) { + task.downcaseAllVolumePaths() + platformFields := PlatformFields{ + CpuUnbounded: cfg.PlatformVariables.CPUUnbounded, + MemoryUnbounded: cfg.PlatformVariables.MemoryUnbounded, + } + task.PlatformFields = platformFields +} + +// downcaseAllVolumePaths forces all volume paths (host path and container path) +// to be lower-case. This is to account for Windows case-insensitivity and the +// case-sensitive string comparison that takes place elsewhere in the code. +func (task *Task) downcaseAllVolumePaths() { + for _, volume := range task.Volumes { + if hostVol, ok := volume.Volume.(*taskresourcevolume.FSHostVolume); ok { + hostVol.FSSourcePath = utils.GetCanonicalPath(hostVol.FSSourcePath) + } + } + for _, container := range task.Containers { + for i, mountPoint := range container.MountPoints { + // container.MountPoints is a slice of values, not a slice of pointers so + // we need to mutate the actual value instead of the copied value + container.MountPoints[i].ContainerPath = utils.GetCanonicalPath(mountPoint.ContainerPath) + } + } +} + +// platformHostConfigOverride provides an entry point to set up default HostConfig options to be +// passed to Docker API. +func (task *Task) platformHostConfigOverride(hostConfig *dockercontainer.HostConfig) error { + // Convert the CPUShares to CPUPercent + hostConfig.CPUPercent = hostConfig.CPUShares * percentageFactor / int64(cpuShareScaleFactor) + if hostConfig.CPUPercent == 0 && hostConfig.CPUShares != 0 { + // if CPU percent is too low, we set it to the minimum(linux and some windows tasks). + // if the CPU is explicitly set to zero or not set at all, and CPU unbounded + // tasks are allowed for windows, let CPU percent be zero. + // this is a workaround to allow CPU unbounded tasks(https://github.com/aws/amazon-ecs-agent/issues/1127) + hostConfig.CPUPercent = minimumCPUPercent + } + hostConfig.CPUShares = 0 + + if hostConfig.Memory <= 0 && task.PlatformFields.MemoryUnbounded.Enabled() { + // As of version 17.06.2-ee-6 of docker. MemoryReservation is not supported on windows. This ensures that + // this parameter is not passed, allowing to launch a container without a hard limit. + hostConfig.MemoryReservation = 0 + } + + return nil +} + +// dockerCPUShares converts containerCPU shares if needed as per the logic stated below: +// Docker silently converts 0 to 1024 CPU shares, which is probably not what we +// want. Instead, we convert 0 to 2 to be closer to expected behavior. The +// reason for 2 over 1 is that 1 is an invalid value (Linux's choice, not Docker's). +func (task *Task) dockerCPUShares(containerCPU uint) int64 { + if containerCPU <= 1 && !task.PlatformFields.CpuUnbounded.Enabled() { + seelog.Debugf( + "Converting CPU shares to allowed minimum of 2 for task arn: [%s] and cpu shares: %d", + task.Arn, containerCPU) + return 2 + } + return int64(containerCPU) +} + +func (task *Task) initializeCgroupResourceSpec(cgroupPath string, cGroupCPUPeriod time.Duration, resourceFields *taskresource.ResourceFields) error { + return errors.New("unsupported platform") +} + +// requiresCredentialSpecResource returns true if at least one container in the task +// needs a valid credentialspec resource +func (task *Task) requiresCredentialSpecResource() bool { + for _, container := range task.Containers { + if container.RequiresCredentialSpec() { + return true + } + } + return false +} + +// initializeCredentialSpecResource builds the resource dependency map for the credentialspec resource +func (task *Task) initializeCredentialSpecResource(config *config.Config, credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) error { + credentialspecResource, err := credentialspec.NewCredentialSpecResource(task.Arn, config.AWSRegion, task.getAllCredentialSpecRequirements(), + task.ExecutionCredentialsID, credentialsManager, resourceFields.SSMClientCreator, resourceFields.S3ClientCreator) + if err != nil { + return err + } + + task.AddResource(credentialspec.ResourceName, credentialspecResource) + + // for every container that needs credential spec vending, it needs to wait for all credential spec resources + for _, container := range task.Containers { + if container.RequiresCredentialSpec() { + container.BuildResourceDependency(credentialspecResource.GetName(), + resourcestatus.ResourceStatus(credentialspec.CredentialSpecCreated), + apicontainerstatus.ContainerCreated) + } + } + + return nil +} + +// getAllCredentialSpecRequirements is used to build all the credential spec requirements for the task +func (task *Task) getAllCredentialSpecRequirements() []string { + reqs := []string{} + + for _, container := range task.Containers { + credentialSpec, err := container.GetCredentialSpec() + if err == nil && credentialSpec != "" && !utils.StrSliceContains(reqs, credentialSpec) { + reqs = append(reqs, credentialSpec) + } + } + + return reqs +} + +// GetCredentialSpecResource retrieves credentialspec resource from resource map +func (task *Task) GetCredentialSpecResource() ([]taskresource.TaskResource, bool) { + task.lock.RLock() + defer task.lock.RUnlock() + + res, ok := task.ResourcesMapUnsafe[credentialspec.ResourceName] + return res, ok +} + +func enableIPv6SysctlSetting(hostConfig *dockercontainer.HostConfig) { + return +} + +// requiresFSxWindowsFileServerResource returns true if at least one volume in the task +// is of type 'fsxWindowsFileServer' +func (task *Task) requiresFSxWindowsFileServerResource() bool { + for _, volume := range task.Volumes { + if volume.Type == FSxWindowsFileServerVolumeType { + return true + } + } + return false +} + +// initializeFSxWindowsFileServerResource builds the resource dependency map for the fsxwindowsfileserver resource +func (task *Task) initializeFSxWindowsFileServerResource(cfg *config.Config, credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields) error { + for i, vol := range task.Volumes { + if vol.Type != FSxWindowsFileServerVolumeType { + continue + } + + fsxWindowsFileServerVol, ok := vol.Volume.(*fsxwindowsfileserver.FSxWindowsFileServerVolumeConfig) + if !ok { + return errors.New("task volume: volume configuration does not match the type 'fsxWindowsFileServer'") + } + + err := task.addFSxWindowsFileServerResource(cfg, credentialsManager, resourceFields, &task.Volumes[i], fsxWindowsFileServerVol) + if err != nil { + return err + } + } + return nil +} + +// addFSxWindowsFileServerResource creates a fsxwindowsfileserver resource for every fsxwindowsfileserver task volume +// and updates container dependency +func (task *Task) addFSxWindowsFileServerResource( + cfg *config.Config, + credentialsManager credentials.Manager, + resourceFields *taskresource.ResourceFields, + vol *TaskVolume, + fsxWindowsFileServerVol *fsxwindowsfileserver.FSxWindowsFileServerVolumeConfig, +) error { + fsxwindowsfileserverResource, err := fsxwindowsfileserver.NewFSxWindowsFileServerResource( + task.Arn, + cfg.AWSRegion, + vol.Name, + FSxWindowsFileServerVolumeType, + fsxWindowsFileServerVol, + task.ExecutionCredentialsID, + credentialsManager, + resourceFields.SSMClientCreator, + resourceFields.ASMClientCreator, + resourceFields.FSxClientCreator) + if err != nil { + return err + } + + vol.Volume = &fsxwindowsfileserverResource.VolumeConfig + task.AddResource(resourcetype.FSxWindowsFileServerKey, fsxwindowsfileserverResource) + task.updateContainerVolumeDependency(vol.Name) + + return nil +} diff --git a/agent/api/task/task_windows_test.go b/agent/api/task/task_windows_test.go new file mode 100644 index 00000000000..9f5e6e13af5 --- /dev/null +++ b/agent/api/task/task_windows_test.go @@ -0,0 +1,725 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + "fmt" + "runtime" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" + "github.com/aws/amazon-ecs-agent/agent/taskresource/fsxwindowsfileserver" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/golang/mock/gomock" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + mock_asm_factory "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + mock_fsx_factory "github.com/aws/amazon-ecs-agent/agent/fsx/factory/mocks" + mock_s3_factory "github.com/aws/amazon-ecs-agent/agent/s3/factory/mocks" + mock_ssm_factory "github.com/aws/amazon-ecs-agent/agent/ssm/factory/mocks" +) + +const ( + minDockerClientAPIVersion = dockerclient.Version_1_24 + + nonZeroMemoryReservationValue = 1 + expectedMemoryReservationValue = 0 +) + +func TestPostUnmarshalWindowsCanonicalPaths(t *testing.T) { + // Testing type conversions, bleh. At least the type conversion itself + // doesn't look this messy. + taskFromAcs := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("myName"), + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr(`C:/Container/Path`), + SourceVolume: strptr("sourceVolume"), + }, + }, + }, + }, + Volumes: []*ecsacs.Volume{ + { + Name: strptr("sourceVolume"), + Type: strptr("host"), + Host: &ecsacs.HostVolumeProperties{ + SourcePath: strptr(`C:/Host/path`), + }, + }, + }, + } + expectedTask := &Task{ + Arn: "myArn", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "myName", + TaskARNUnsafe: "myArn", + MountPoints: []apicontainer.MountPoint{ + { + ContainerPath: `c:\container\path`, + SourceVolume: "sourceVolume", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "sourceVolume", + Type: "host", + Volume: &taskresourcevolume.FSHostVolume{ + FSSourcePath: `c:\host\path`, + }, + }, + }, + StartSequenceNumber: 42, + } + + seqNum := int64(42) + task, err := TaskFromACS(&taskFromAcs, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + cfg := config.Config{TaskCPUMemLimit: config.BooleanDefaultTrue{Value: config.ExplicitlyDisabled}} + task.PostUnmarshalTask(&cfg, nil, nil, nil, nil) + + for _, container := range task.Containers { // remove v3 endpoint from each container because it's randomly generated + removeV3andV4EndpointConfig(container) + } + assert.Equal(t, expectedTask.Containers, task.Containers, "Containers should be equal") + assert.Equal(t, expectedTask.Volumes, task.Volumes, "Volumes should be equal") +} + +// removeV3EndpointConfig removes the v3 endpoint id and the injected env for a container +// so that checking all other fields can be easier +func removeV3andV4EndpointConfig(container *apicontainer.Container) { + container.SetV3EndpointID("") + if container.Environment != nil { + delete(container.Environment, apicontainer.MetadataURIEnvironmentVariableName) + delete(container.Environment, apicontainer.MetadataURIEnvVarNameV4) + } + if len(container.Environment) == 0 { + container.Environment = nil + } +} + +func TestWindowsPlatformHostConfigOverride(t *testing.T) { + // Testing Windows platform override for HostConfig. + // Expects MemorySwappiness option to be set to -1 + + task := &Task{} + + hostConfig := &dockercontainer.HostConfig{Resources: dockercontainer.Resources{CPUShares: int64(1 * cpuSharesPerCore)}} + + task.platformHostConfigOverride(hostConfig) + assert.Equal(t, int64(1*cpuSharesPerCore*percentageFactor)/int64(cpuShareScaleFactor), hostConfig.CPUPercent) + assert.Equal(t, int64(0), hostConfig.CPUShares) + + hostConfig = &dockercontainer.HostConfig{Resources: dockercontainer.Resources{CPUShares: 10}} + task.platformHostConfigOverride(hostConfig) + assert.Equal(t, int64(minimumCPUPercent), hostConfig.CPUPercent) + assert.Empty(t, hostConfig.CPUShares) +} + +func TestDockerHostConfigRawConfigMerging(t *testing.T) { + // Use a struct that will marshal to the actual message we expect; not + // dockercontainer.HostConfig which will include a lot of zero values. + rawHostConfigInput := struct { + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` + SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` + }{ + Privileged: true, + SecurityOpt: []string{"foo", "bar"}, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + if err != nil { + t.Fatal(err) + } + + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + Image: "image", + CPU: 10, + Memory: 100, + VolumesFrom: []apicontainer.VolumeFrom{{SourceContainer: "c2"}}, + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + }, + { + Name: "c2", + }, + }, + } + + hostConfig, configErr := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), + minDockerClientAPIVersion, &config.Config{}) + assert.Nil(t, configErr) + + expected := dockercontainer.HostConfig{ + Resources: dockercontainer.Resources{ + // Convert MB to B and set Memory + Memory: apicontainer.DockerContainerMinimumMemoryInBytes, + CPUPercent: minimumCPUPercent, + }, + Privileged: true, + SecurityOpt: []string{"foo", "bar"}, + VolumesFrom: []string{"dockername-c2"}, + } + + assert.Nil(t, expected.MemorySwappiness, "Expected default memorySwappiness to be nil") + assertSetStructFieldsEqual(t, expected, *hostConfig) +} + +func TestCPUPercentBasedOnUnboundedEnabled(t *testing.T) { + cpuShareScaleFactor := runtime.NumCPU() * cpuSharesPerCore + testcases := []struct { + cpu int64 + cpuUnbounded config.BooleanDefaultFalse + cpuPercent int64 + }{ + { + cpu: 0, + cpuUnbounded: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + cpuPercent: 0, + }, + { + cpu: 1, + cpuUnbounded: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + cpuPercent: 1, + }, + { + cpu: 0, + cpuUnbounded: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + cpuPercent: 1, + }, + { + cpu: 1, + cpuUnbounded: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + cpuPercent: 1, + }, + { + cpu: 100, + cpuUnbounded: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + cpuPercent: 100 * percentageFactor / int64(cpuShareScaleFactor), + }, + { + cpu: 100, + cpuUnbounded: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + cpuPercent: 100 * percentageFactor / int64(cpuShareScaleFactor), + }, + } + for _, tc := range testcases { + t.Run(fmt.Sprintf("container cpu-%d,cpu unbounded tasks enabled- %t,expected cpu percent-%d", + tc.cpu, tc.cpuUnbounded.Enabled(), tc.cpuPercent), func(t *testing.T) { + testTask := &Task{ + Containers: []*apicontainer.Container{ + { + Name: "c1", + CPU: uint(tc.cpu), + }, + }, + PlatformFields: PlatformFields{ + CpuUnbounded: tc.cpuUnbounded, + }, + } + + hostconfig, err := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), + minDockerClientAPIVersion, &config.Config{}) + assert.Nil(t, err) + assert.Empty(t, hostconfig.CPUShares) + assert.Equal(t, tc.cpuPercent, hostconfig.CPUPercent) + }) + } +} + +func TestWindowsMemoryReservationOption(t *testing.T) { + // Testing sending a task to windows overriding MemoryReservation value + rawHostConfigInput := dockercontainer.HostConfig{ + Resources: dockercontainer.Resources{ + MemoryReservation: nonZeroMemoryReservationValue, + }, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + if err != nil { + t.Fatal(err) + } + + testTask := &Task{ + Arn: "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: strptr(string(rawHostConfig)), + }, + }, + }, + PlatformFields: PlatformFields{ + MemoryUnbounded: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + }, + } + + // With MemoryUnbounded set to false, MemoryReservation is not overridden + cfg, configErr := testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), + defaultDockerClientAPIVersion, &config.Config{}) + + assert.Nil(t, configErr) + assert.EqualValues(t, nonZeroMemoryReservationValue, cfg.MemoryReservation) + + // With MemoryUnbounded set to true, tasks with no memory hard limit will have their memory reservation set to zero + testTask.PlatformFields.MemoryUnbounded = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg, configErr = testTask.DockerHostConfig(testTask.Containers[0], dockerMap(testTask), + defaultDockerClientAPIVersion, &config.Config{}) + + assert.Nil(t, configErr) + assert.EqualValues(t, expectedMemoryReservationValue, cfg.MemoryReservation) +} + +func TestGetCanonicalPath(t *testing.T) { + testcases := []struct { + name string + path string + expectedResult string + }{ + { + name: "folderPath", + path: `C:\myFile`, + expectedResult: `c:\myfile`, + }, + { + name: "drivePath", + path: `D:`, + expectedResult: `d:`, + }, + { + name: "pipePath", + path: `\\.\pipe\docker_engine`, + expectedResult: `\\.\pipe\docker_engine`, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + result := utils.GetCanonicalPath(tc.path) + assert.Equal(t, result, tc.expectedResult) + }) + } +} + +func TestRequiresCredentialSpecResource(t *testing.T) { + container1 := &apicontainer.Container{} + task1 := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{container1}, + } + + hostConfig := "{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct.json\"]}" + container2 := &apicontainer.Container{} + container2.DockerConfig.HostConfig = &hostConfig + task2 := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{container2}, + } + + testCases := []struct { + name string + task *Task + expectedOutput bool + }{ + { + name: "missing_credentialspec", + task: task1, + expectedOutput: false, + }, + { + name: "valid_credentialspec", + task: task2, + expectedOutput: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedOutput, tc.task.requiresCredentialSpecResource()) + }) + } + +} + +func TestGetAllCredentialSpecRequirements(t *testing.T) { + hostConfig := "{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct.json\"]}" + container := &apicontainer.Container{} + container.DockerConfig.HostConfig = &hostConfig + + task := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{container}, + } + + allCredSpecReq := task.getAllCredentialSpecRequirements() + + credentialspec := "credentialspec:file://gmsa_gmsa-acct.json" + expectedCredSpecReq := []string{credentialspec} + + assert.EqualValues(t, expectedCredSpecReq, allCredSpecReq) +} + +func TestGetAllCredentialSpecRequirementsWithMultipleContainersUsingSameSpec(t *testing.T) { + hostConfig := "{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct.json\"]}" + c1 := &apicontainer.Container{} + c1.DockerConfig.HostConfig = &hostConfig + + c2 := &apicontainer.Container{} + c2.DockerConfig.HostConfig = &hostConfig + + task := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{c1, c2}, + } + + allCredSpecReq := task.getAllCredentialSpecRequirements() + + credentialspec := "credentialspec:file://gmsa_gmsa-acct.json" + expectedCredSpecReq := []string{credentialspec} + + assert.Equal(t, len(expectedCredSpecReq), len(allCredSpecReq)) + assert.EqualValues(t, expectedCredSpecReq, allCredSpecReq) +} + +func TestGetAllCredentialSpecRequirementsWithMultipleContainers(t *testing.T) { + hostConfig1 := "{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct-1.json\"]}" + hostConfig2 := "{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct-2.json\"]}" + + c1 := &apicontainer.Container{} + c1.DockerConfig.HostConfig = &hostConfig1 + + c2 := &apicontainer.Container{} + c2.DockerConfig.HostConfig = &hostConfig1 + + c3 := &apicontainer.Container{} + c3.DockerConfig.HostConfig = &hostConfig2 + + task := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{c1, c2, c3}, + } + + allCredSpecReq := task.getAllCredentialSpecRequirements() + + credentialspec1 := "credentialspec:file://gmsa_gmsa-acct-1.json" + credentialspec2 := "credentialspec:file://gmsa_gmsa-acct-2.json" + + expectedCredSpecReq := []string{credentialspec1, credentialspec2} + + assert.EqualValues(t, expectedCredSpecReq, allCredSpecReq) +} + +func TestInitializeAndGetCredentialSpecResource(t *testing.T) { + hostConfig := "{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct.json\"]}" + container := &apicontainer.Container{ + Name: "myName", + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + container.DockerConfig.HostConfig = &hostConfig + + task := &Task{ + Arn: "test", + Containers: []*apicontainer.Container{container}, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cfg := &config.Config{ + AWSRegion: "test-aws-region", + } + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + } + + task.initializeCredentialSpecResource(cfg, credentialsManager, resFields) + + resourceDep := apicontainer.ResourceDependency{ + Name: credentialspec.ResourceName, + RequiredStatus: resourcestatus.ResourceStatus(credentialspec.CredentialSpecCreated), + } + + assert.Equal(t, resourceDep, task.Containers[0].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies[0]) + + _, ok := task.GetCredentialSpecResource() + assert.True(t, ok) +} + +func TestGetCredentialSpecResource(t *testing.T) { + credentialspecResource := &credentialspec.CredentialSpecResource{} + task := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + task.AddResource(credentialspec.ResourceName, credentialspecResource) + + credentialspecTaskResource, ok := task.GetCredentialSpecResource() + assert.True(t, ok) + assert.NotEmpty(t, credentialspecTaskResource) +} + +func TestRequiresFSxWindowsFileServerResource(t *testing.T) { + task1 := &Task{ + Arn: "test1", + Volumes: []TaskVolume{ + { + Name: "fsxWindowsFileServerVolume", + Type: "fsxWindowsFileServer", + Volume: &fsxwindowsfileserver.FSxWindowsFileServerVolumeConfig{ + FileSystemID: "fs-12345678", + RootDirectory: "root", + AuthConfig: fsxwindowsfileserver.FSxWindowsFileServerAuthConfig{ + CredentialsParameter: "arn", + Domain: "test", + }, + }, + }, + }, + } + + task2 := &Task{ + Arn: "test2", + Volumes: []TaskVolume{}, + } + + testCases := []struct { + name string + task *Task + expectedOutput bool + }{ + { + name: "valid_fsxwindowsfileserver", + task: task1, + expectedOutput: true, + }, + { + name: "missing_fsxwindowsfileserver", + task: task2, + expectedOutput: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedOutput, tc.task.requiresFSxWindowsFileServerResource()) + }) + } +} + +func TestInitializeAndAddFSxWindowsFileServerResource(t *testing.T) { + task := &Task{ + Arn: "test1", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Volumes: []TaskVolume{ + { + Name: "fsxWindowsFileServerVolume", + Type: "fsxWindowsFileServer", + Volume: &fsxwindowsfileserver.FSxWindowsFileServerVolumeConfig{ + FileSystemID: "fs-12345678", + RootDirectory: "root", + AuthConfig: fsxwindowsfileserver.FSxWindowsFileServerAuthConfig{ + CredentialsParameter: "arn", + Domain: "test", + }, + }, + }, + }, + Containers: []*apicontainer.Container{ + { + Name: "myName", + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "fsxWindowsFileServerVolume", + ContainerPath: "/test", + ReadOnly: false, + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cfg := &config.Config{ + AWSRegion: "test-aws-region", + } + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl) + fsxClientCreator := mock_fsx_factory.NewMockFSxClientCreator(ctrl) + + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + ASMClientCreator: asmClientCreator, + SSMClientCreator: ssmClientCreator, + FSxClientCreator: fsxClientCreator, + CredentialsManager: credentialsManager, + }, + } + + task.initializeFSxWindowsFileServerResource(cfg, credentialsManager, resFields) + + assert.Equal(t, 1, len(task.Containers), "Should match the number of containers as before PostUnmarshalTask") + assert.Equal(t, 1, len(task.Volumes), "Should have 1 volume") + assert.Equal(t, 1, len(task.Containers[0].TransitionDependenciesMap), "Should have 1 container volume dependency") + taskVol := task.Volumes[0] + assert.Equal(t, "fsxWindowsFileServerVolume", taskVol.Name) + assert.Equal(t, FSxWindowsFileServerVolumeType, taskVol.Type) + + resources := task.GetResources() + assert.Len(t, resources, 1) + _, ok := resources[0].(*fsxwindowsfileserver.FSxWindowsFileServerResource) + require.True(t, ok) +} + +func TestPostUnmarshalTaskWithFSxWindowsFileServerVolumes(t *testing.T) { + taskFromACS := ecsacs.Task{ + Arn: strptr("myArn"), + DesiredStatus: strptr("RUNNING"), + Family: strptr("myFamily"), + Version: strptr("1"), + Containers: []*ecsacs.Container{ + { + Name: strptr("myName1"), + MountPoints: []*ecsacs.MountPoint{ + { + ContainerPath: strptr("\\some\\path"), + SourceVolume: strptr("fsxWindowsFileServerVolume"), + }, + }, + }, + }, + Volumes: []*ecsacs.Volume{ + { + Name: strptr("fsxWindowsFileServerVolume"), + Type: strptr("fsxWindowsFileServer"), + FsxWindowsFileServerVolumeConfiguration: &ecsacs.FSxWindowsFileServerVolumeConfiguration{ + AuthorizationConfig: &ecsacs.FSxWindowsFileServerAuthorizationConfig{ + CredentialsParameter: strptr("arn"), + Domain: strptr("test"), + }, + FileSystemId: strptr("fs-12345678"), + RootDirectory: strptr("test"), + }, + }, + }, + } + seqNum := int64(42) + task, err := TaskFromACS(&taskFromACS, &ecsacs.PayloadMessage{SeqNum: &seqNum}) + assert.Nil(t, err, "Should be able to handle acs task") + assert.Equal(t, 1, len(task.Containers)) // before PostUnmarshalTask + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cfg := config.Config{ + AWSRegion: "test-aws-region", + } + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl) + fsxClientCreator := mock_fsx_factory.NewMockFSxClientCreator(ctrl) + + resFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + ASMClientCreator: asmClientCreator, + FSxClientCreator: fsxClientCreator, + CredentialsManager: credentialsManager, + }, + } + + task.PostUnmarshalTask(&cfg, credentialsManager, resFields, nil, nil) + assert.Equal(t, 1, len(task.Containers), "Should match the number of containers as before PostUnmarshalTask") + assert.Equal(t, 1, len(task.Volumes), "Should have 1 volume") + taskVol := task.Volumes[0] + assert.Equal(t, "fsxWindowsFileServerVolume", taskVol.Name) + assert.Equal(t, FSxWindowsFileServerVolumeType, taskVol.Type) + + resources := task.GetResources() + assert.Len(t, resources, 1) + _, ok := resources[0].(*fsxwindowsfileserver.FSxWindowsFileServerResource) + require.True(t, ok) + b, err := json.Marshal(resources[0]) + require.NoError(t, err) + assert.JSONEq(t, fmt.Sprintf(`{ + "name": "fsxWindowsFileServerVolume", + "taskARN": "myArn", + "executionCredentialsID": "", + "fsxWindowsFileServerVolumeConfiguration": { + "fileSystemId": "fs-12345678", + "rootDirectory": "test", + "authorizationConfig": { + "credentialsParameter": "arn", + "domain": "test" + }, + "fsxWindowsFileServerHostPath": "" + }, + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "NONE", + "knownStatus": "NONE" + }`), string(b)) +} diff --git a/agent/api/task/taskvolume.go b/agent/api/task/taskvolume.go new file mode 100644 index 00000000000..6bdbfe5dd84 --- /dev/null +++ b/agent/api/task/taskvolume.go @@ -0,0 +1,204 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/fsxwindowsfileserver" + taskresourcetypes "github.com/aws/amazon-ecs-agent/agent/taskresource/types" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +const ( + HostVolumeType = "host" + DockerVolumeType = "docker" + EFSVolumeType = "efs" + FSxWindowsFileServerVolumeType = "fsxWindowsFileServer" +) + +// TaskVolume is a definition of all the volumes available for containers to +// reference within a task. It must be named. +type TaskVolume struct { + Type string `json:"type"` + Name string `json:"name"` + Volume taskresourcevolume.Volume +} + +// UnmarshalJSON for TaskVolume determines the name and volume type, and +// unmarshals it into the appropriate HostVolume fulfilling interfaces +func (tv *TaskVolume) UnmarshalJSON(b []byte) error { + // Format: {name: volumeName, host: HostVolume, dockerVolumeConfiguration {}} + intermediate := make(map[string]json.RawMessage) + if err := json.Unmarshal(b, &intermediate); err != nil { + return err + } + name, ok := intermediate["name"] + if !ok { + return errors.New("invalid Volume; must include a name") + } + if err := json.Unmarshal(name, &tv.Name); err != nil { + return err + } + + volumeType, ok := intermediate["type"] + if !ok { + volumeType = []byte(`"host"`) + seelog.Infof("Unmarshal task volume: volume type not specified, default to host") + } + if err := json.Unmarshal(volumeType, &tv.Type); err != nil { + return err + } + + switch tv.Type { + case HostVolumeType: + return tv.unmarshalHostVolume(intermediate["host"]) + case DockerVolumeType: + return tv.unmarshalDockerVolume(intermediate["dockerVolumeConfiguration"]) + case EFSVolumeType: + return tv.unmarshalEFSVolume(intermediate["efsVolumeConfiguration"]) + case FSxWindowsFileServerVolumeType: + return tv.unmarshalFSxWindowsFileServerVolume(intermediate["fsxWindowsFileServerVolumeConfiguration"]) + default: + return errors.Errorf("unrecognized volume type: %q", tv.Type) + } +} + +// MarshalJSON overrides the logic for JSON-encoding a TaskVolume object +func (tv *TaskVolume) MarshalJSON() ([]byte, error) { + result := make(map[string]interface{}) + + if len(tv.Type) == 0 { + tv.Type = HostVolumeType + } + + result["name"] = tv.Name + result["type"] = tv.Type + + switch tv.Type { + case DockerVolumeType: + result["dockerVolumeConfiguration"] = tv.Volume + case HostVolumeType: + result["host"] = tv.Volume + case EFSVolumeType: + result["efsVolumeConfiguration"] = tv.Volume + case FSxWindowsFileServerVolumeType: + result["fsxWindowsFileServerVolumeConfiguration"] = tv.Volume + default: + return nil, errors.Errorf("unrecognized volume type: %q", tv.Type) + } + + return json.Marshal(result) +} + +func (tv *TaskVolume) unmarshalDockerVolume(data json.RawMessage) error { + if data == nil { + return errors.New("invalid volume: empty volume configuration") + } + var dockerVolumeConfig taskresourcevolume.DockerVolumeConfig + err := json.Unmarshal(data, &dockerVolumeConfig) + if err != nil { + return err + } + + tv.Volume = &dockerVolumeConfig + return nil +} + +func (tv *TaskVolume) unmarshalEFSVolume(data json.RawMessage) error { + if data == nil { + return errors.New("invalid volume: empty volume configuration") + } + var efsVolumeConfig taskresourcevolume.EFSVolumeConfig + err := json.Unmarshal(data, &efsVolumeConfig) + if err != nil { + return err + } + + tv.Volume = &efsVolumeConfig + return nil +} + +func (tv *TaskVolume) unmarshalFSxWindowsFileServerVolume(data json.RawMessage) error { + if data == nil { + return errors.New("invalid volume: empty volume configuration") + } + var fsxWindowsFileServerVolumeConfig fsxwindowsfileserver.FSxWindowsFileServerVolumeConfig + err := json.Unmarshal(data, &fsxWindowsFileServerVolumeConfig) + if err != nil { + return err + } + + tv.Volume = &fsxWindowsFileServerVolumeConfig + return nil +} + +func (tv *TaskVolume) unmarshalHostVolume(data json.RawMessage) error { + if data == nil { + return errors.New("invalid volume: empty volume configuration") + } + + // Default to trying to unmarshal it as a FSHostVolume + var hostvolume taskresourcevolume.FSHostVolume + err := json.Unmarshal(data, &hostvolume) + if err != nil { + return err + } + if hostvolume.FSSourcePath == "" { + // If the FSSourcePath is empty, that must mean it was not an + // FSHostVolume (empty path is invalid for that type). + // Unmarshal it as local docker volume. + localVolume := &taskresourcevolume.LocalDockerVolume{} + json.Unmarshal(data, localVolume) + tv.Volume = localVolume + } else { + tv.Volume = &hostvolume + } + return nil +} + +// getEFSVolumeDriverName returns the driver name for creating the EFS volume. +func getEFSVolumeDriverName(cfg *config.Config) string { + if taskresourcevolume.UseECSVolumePlugin(cfg) { + return taskresourcevolume.ECSVolumePlugin + } + return taskresourcevolume.DockerLocalDriverName +} + +// getDockerVolumeResource retrieves docker volume resource from task resource map. +func (task *Task) getDockerVolumeResource() ([]taskresource.TaskResource, bool) { + task.lock.RLock() + defer task.lock.RUnlock() + + res, ok := task.ResourcesMapUnsafe[taskresourcetypes.DockerVolumeKey] + return res, ok +} + +// SetPausePIDInVolumeResources sets the pause container pid field in each volume resource. +func (task *Task) SetPausePIDInVolumeResources(pid string) { + resources, ok := task.getDockerVolumeResource() + if !ok { + return + } + + for _, res := range resources { + volRes := res.(*taskresourcevolume.VolumeResource) + volRes.SetPauseContainerPID(pid) + } +} diff --git a/agent/api/task/taskvolume_test.go b/agent/api/task/taskvolume_test.go new file mode 100644 index 00000000000..436891213ed --- /dev/null +++ b/agent/api/task/taskvolume_test.go @@ -0,0 +1,686 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + "fmt" + "runtime" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMarshalUnmarshalOldTaskVolumes(t *testing.T) { + taskData := `{"volumes":[{"host":{"sourcePath":"/path"},"name":"1"},{"host":{"hostpath":""}, "name":"2"}]}` + + var out Task + err := json.Unmarshal([]byte(taskData), &out) + require.NoError(t, err, "Could not unmarshal task") + require.Len(t, out.Volumes, 2, "Incorrect number of volumes") + + var v1, v2 TaskVolume + + for _, v := range out.Volumes { + switch v.Name { + case "1": + v1 = v + case "2": + v2 = v + } + } + + _, ok := v1.Volume.(*taskresourcevolume.FSHostVolume) + assert.True(t, ok, "Expected v1 to be host volume") + assert.Equal(t, "/path", v1.Volume.(*taskresourcevolume.FSHostVolume).FSSourcePath, "Unmarshaled v2 didn't match marshalled v2") + _, ok = v2.Volume.(*taskresourcevolume.LocalDockerVolume) + assert.True(t, ok, "Expected v2 to be local empty volume") + assert.Equal(t, "", v2.Volume.Source(), "Expected v2 to have 'sourcepath' work correctly") +} + +func TestMarshalTaskVolumesEFS(t *testing.T) { + task := &Task{ + Arn: "test", + Volumes: []TaskVolume{ + { + Name: "1", + Type: EFSVolumeType, + Volume: &taskresourcevolume.EFSVolumeConfig{ + AuthConfig: taskresourcevolume.EFSAuthConfig{ + AccessPointId: "fsap-123", + Iam: "ENABLED", + }, + FileSystemID: "fs-12345", + RootDirectory: "/tmp", + TransitEncryption: "ENABLED", + TransitEncryptionPort: 23456, + }, + }, + }, + } + + marshal, err := json.Marshal(task) + require.NoError(t, err, "Could not marshal task") + expectedTaskDef := `{ + "Arn": "test", + "Family": "", + "Version": "", + "Containers": null, + "associations": null, + "resources": null, + "volumes": [ + { + "efsVolumeConfiguration": { + "authorizationConfig": { + "accessPointId": "fsap-123", + "iam": "ENABLED" + }, + "fileSystemId": "fs-12345", + "rootDirectory": "/tmp", + "transitEncryption": "ENABLED", + "transitEncryptionPort": 23456, + "dockerVolumeName": "" + }, + "name": "1", + "type": "efs" + } + ], + "DesiredStatus": "NONE", + "KnownStatus": "NONE", + "KnownTime": "0001-01-01T00:00:00Z", + "PullStartedAt": "0001-01-01T00:00:00Z", + "PullStoppedAt": "0001-01-01T00:00:00Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "NONE", + "StartSequenceNumber": 0, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "PlatformFields": %s + }` + if runtime.GOOS == "windows" { + // windows task defs have a special 'cpu/memory unbounded' field added. + // see https://github.com/aws/amazon-ecs-agent/pull/1227 + expectedTaskDef = fmt.Sprintf(expectedTaskDef, `{"cpuUnbounded": null, "memoryUnbounded": null}`) + } else { + expectedTaskDef = fmt.Sprintf(expectedTaskDef, "{}") + } + require.JSONEq(t, expectedTaskDef, string(marshal)) +} + +func TestUnmarshalTaskVolumesEFS(t *testing.T) { + taskDef := []byte(`{ + "Arn": "test", + "Family": "", + "Version": "", + "Containers": null, + "associations": null, + "resources": null, + "volumes": [ + { + "efsVolumeConfiguration": { + "authorizationConfig": { + "accessPointId": "fsap-123", + "iam": "ENABLED" + }, + "fileSystemId": "fs-12345", + "rootDirectory": "/tmp", + "transitEncryption": "ENABLED", + "transitEncryptionPort": 23456, + "dockerVolumeName": "" + }, + "name": "1", + "type": "efs" + } + ], + "DesiredStatus": "NONE", + "KnownStatus": "NONE", + "KnownTime": "0001-01-01T00:00:00Z", + "PullStartedAt": "0001-01-01T00:00:00Z", + "PullStoppedAt": "0001-01-01T00:00:00Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "NONE", + "StartSequenceNumber": 0, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "PlatformFields": {} + }`) + var task Task + err := json.Unmarshal(taskDef, &task) + require.NoError(t, err, "Could not unmarshal task") + + require.Len(t, task.Volumes, 1) + assert.Equal(t, "efs", task.Volumes[0].Type) + assert.Equal(t, "1", task.Volumes[0].Name) + efsConfig, ok := task.Volumes[0].Volume.(*taskresourcevolume.EFSVolumeConfig) + require.True(t, ok) + assert.Equal(t, "fsap-123", efsConfig.AuthConfig.AccessPointId) + assert.Equal(t, "ENABLED", efsConfig.AuthConfig.Iam) + assert.Equal(t, "fs-12345", efsConfig.FileSystemID) + assert.Equal(t, "/tmp", efsConfig.RootDirectory) + assert.Equal(t, "ENABLED", efsConfig.TransitEncryption) + assert.Equal(t, int64(23456), efsConfig.TransitEncryptionPort) +} + +func TestMarshalUnmarshalTaskVolumes(t *testing.T) { + task := &Task{ + Arn: "test", + Volumes: []TaskVolume{ + {Name: "1", Type: HostVolumeType, Volume: &taskresourcevolume.LocalDockerVolume{}}, + {Name: "2", Type: HostVolumeType, Volume: &taskresourcevolume.FSHostVolume{FSSourcePath: "/path"}}, + {Name: "3", Type: DockerVolumeType, Volume: &taskresourcevolume.DockerVolumeConfig{Scope: "task", Driver: "local"}}, + {Name: "4", Type: EFSVolumeType, Volume: &taskresourcevolume.EFSVolumeConfig{FileSystemID: "fs-12345", RootDirectory: "/tmp"}}, + }, + } + + marshal, err := json.Marshal(task) + require.NoError(t, err, "Could not marshal task") + + var out Task + err = json.Unmarshal(marshal, &out) + require.NoError(t, err, "Could not unmarshal task") + require.Len(t, out.Volumes, 4, "Incorrect number of volumes") + + var v1, v2, v3, v4 TaskVolume + + for _, v := range out.Volumes { + switch v.Name { + case "1": + v1 = v + case "2": + v2 = v + case "3": + v3 = v + case "4": + v4 = v + } + } + + _, ok := v1.Volume.(*taskresourcevolume.LocalDockerVolume) + assert.True(t, ok, "Expected v1 to be local empty volume") + assert.Equal(t, "", v1.Volume.Source(), "Expected v2 to have 'sourcepath' work correctly") + _, ok = v2.Volume.(*taskresourcevolume.FSHostVolume) + assert.True(t, ok, "Expected v2 to be host volume") + assert.Equal(t, "/path", v2.Volume.(*taskresourcevolume.FSHostVolume).FSSourcePath, "Unmarshaled v2 didn't match marshalled v2") + + dockerVolume, ok := v3.Volume.(*taskresourcevolume.DockerVolumeConfig) + assert.True(t, ok, "incorrect DockerVolumeConfig type") + assert.Equal(t, "task", dockerVolume.Scope) + assert.Equal(t, "local", dockerVolume.Driver) + + efsVolume, ok := v4.Volume.(*taskresourcevolume.EFSVolumeConfig) + assert.True(t, ok, "incorrect EFSVolumeConfig type") + assert.Equal(t, "fs-12345", efsVolume.FileSystemID) + assert.Equal(t, "/tmp", efsVolume.RootDirectory) +} + +func TestInitializeLocalDockerVolume(t *testing.T) { + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "empty-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "empty-volume-test", + Type: "docker", + Volume: &taskresourcevolume.LocalDockerVolume{}, + }, + }, + } + + testTask.initializeDockerLocalVolumes(nil, nil) + + assert.Len(t, testTask.ResourcesMapUnsafe, 1, "expect the resource map has an empty volume resource") + assert.Len(t, testTask.Containers[0].TransitionDependenciesMap, 1, "expect a volume resource as the container dependency") +} + +func TestInitializeSharedProvisionedVolume(t *testing.T) { + sharedVolumeMatchFullConfig := config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "shared-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "shared-volume-test", + Type: "docker", + Volume: &taskresourcevolume.DockerVolumeConfig{ + Scope: "shared", + Autoprovision: false, + }, + }, + }, + } + + // Expect the volume already exists on the instance + dockerClient.EXPECT().InspectVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.SDKVolumeResponse{}) + err := testTask.initializeDockerVolumes(sharedVolumeMatchFullConfig.Enabled(), dockerClient, nil) + + assert.NoError(t, err) + assert.Len(t, testTask.ResourcesMapUnsafe, 0, "no volume resource should be provisioned by agent") + assert.Len(t, testTask.Containers[0].TransitionDependenciesMap, 0, "resource already exists") +} + +func TestInitializeEFSVolume_UseLocalVolumeDriver(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := getEFSTask() + + cfg := &config.Config{ + AWSRegion: "us-west-1", + } + err := testTask.initializeEFSVolumes(cfg, dockerClient, nil) + + assert.NoError(t, err) + assert.Len(t, testTask.Volumes, 1) + + dockervol, ok := testTask.Volumes[0].Volume.(*taskresourcevolume.DockerVolumeConfig) + assert.True(t, ok) + assert.Equal(t, "local", dockervol.Driver) + assert.Equal(t, "nfs", dockervol.DriverOpts["type"]) + assert.Equal(t, "addr=fs-12345.efs.us-west-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport", dockervol.DriverOpts["o"]) + assert.Equal(t, ":/my/root/dir", dockervol.DriverOpts["device"]) +} + +func TestInitializeEFSVolume_UseECSVolumePlugin(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := getEFSTask() + testTask.credentialsRelativeURIUnsafe = "/v2/creds-id" + + cfg := &config.Config{ + VolumePluginCapabilities: []string{"efsAuth"}, + } + err := testTask.initializeEFSVolumes(cfg, dockerClient, nil) + + require.NoError(t, err) + assert.Len(t, testTask.Volumes, 1) + + dockervol, ok := testTask.Volumes[0].Volume.(*taskresourcevolume.DockerVolumeConfig) + require.True(t, ok) + assert.Equal(t, "efs", dockervol.DriverOpts["type"]) + assert.Equal(t, "tls,tlsport=12345,iam,awscredsuri=/v2/creds-id,accesspoint=fsap-123", dockervol.DriverOpts["o"]) + assert.Equal(t, "fs-12345:/my/root/dir", dockervol.DriverOpts["device"]) +} + +func TestInitializeEFSVolume_WrongVolumeConfig(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := getEFSTask() + testTask.Volumes[0].Volume = &taskresourcevolume.DockerVolumeConfig{} + + cfg := &config.Config{ + AWSRegion: "us-west-1", + } + err := testTask.initializeEFSVolumes(cfg, dockerClient, nil) + + assert.Error(t, err) +} + +func TestInitializeEFSVolume_WrongVolumeType(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := getEFSTask() + testTask.Volumes[0].Type = "docker" + + cfg := &config.Config{ + AWSRegion: "us-west-1", + } + err := testTask.initializeEFSVolumes(cfg, dockerClient, nil) + + assert.NoError(t, err) + err = testTask.initializeDockerVolumes(true, dockerClient, nil) + assert.Error(t, err) +} + +func TestInitializeSharedProvisionedVolumeError(t *testing.T) { + sharedVolumeMatchFullConfig := config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "shared-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "shared-volume-test", + Type: "docker", + Volume: &taskresourcevolume.DockerVolumeConfig{ + Scope: "shared", + Autoprovision: false, + }, + }, + }, + } + + // Expect the volume does not exists on the instance + dockerClient.EXPECT().InspectVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.SDKVolumeResponse{Error: errors.New("volume not exist")}) + err := testTask.initializeDockerVolumes(sharedVolumeMatchFullConfig.Enabled(), dockerClient, nil) + assert.Error(t, err, "volume not found for auto-provisioned resource should cause task to fail") +} + +func TestInitializeSharedNonProvisionedVolume(t *testing.T) { + sharedVolumeMatchFullConfig := config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "shared-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "shared-volume-test", + Type: "docker", + Volume: &taskresourcevolume.DockerVolumeConfig{ + Scope: "shared", + Autoprovision: false, + Labels: map[string]string{"test": "test"}, + }, + }, + }, + } + + // Expect the volume already exists on the instance + dockerClient.EXPECT().InspectVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.SDKVolumeResponse{ + DockerVolume: &types.Volume{ + Labels: map[string]string{"test": "test"}, + }, + }) + err := testTask.initializeDockerVolumes(sharedVolumeMatchFullConfig.Enabled(), dockerClient, nil) + + assert.NoError(t, err) + assert.Len(t, testTask.ResourcesMapUnsafe, 0, "no volume resource should be provisioned by agent") + assert.Len(t, testTask.Containers[0].TransitionDependenciesMap, 0, "resource already exists") +} + +func TestInitializeSharedNonProvisionedVolumeValidateNameOnly(t *testing.T) { + sharedVolumeMatchFullConfig := config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled} + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "shared-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "shared-volume-test", + Type: "docker", + Volume: &taskresourcevolume.DockerVolumeConfig{ + Scope: "shared", + Autoprovision: true, + DriverOpts: map[string]string{"type": "tmpfs"}, + Labels: map[string]string{"domain": "test"}, + }, + }, + }, + } + + // Expect the volume already exists on the instance + dockerClient.EXPECT().InspectVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.SDKVolumeResponse{ + DockerVolume: &types.Volume{ + Options: map[string]string{}, + Labels: nil, + }, + }) + err := testTask.initializeDockerVolumes(sharedVolumeMatchFullConfig.Enabled(), dockerClient, nil) + + assert.NoError(t, err) + assert.Len(t, testTask.ResourcesMapUnsafe, 0, "no volume resource should be provisioned by agent") + assert.Len(t, testTask.Containers[0].TransitionDependenciesMap, 0, "resource already exists") +} + +func TestInitializeSharedAutoprovisionVolumeNotFoundError(t *testing.T) { + sharedVolumeMatchFullConfig := config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "shared-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "shared-volume-test", + Type: "docker", + Volume: &taskresourcevolume.DockerVolumeConfig{ + Scope: "shared", + Autoprovision: true, + }, + }, + }, + } + + dockerClient.EXPECT().InspectVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.SDKVolumeResponse{Error: errors.New("not found")}) + err := testTask.initializeDockerVolumes(sharedVolumeMatchFullConfig.Enabled(), dockerClient, nil) + assert.NoError(t, err) + assert.Len(t, testTask.ResourcesMapUnsafe, 1, "volume resource should be provisioned by agent") + assert.Len(t, testTask.Containers[0].TransitionDependenciesMap, 1, "volume resource should be in the container dependency map") +} + +func TestInitializeSharedAutoprovisionVolumeNotMatchError(t *testing.T) { + sharedVolumeMatchFullConfig := config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "shared-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "shared-volume-test", + Type: "docker", + Volume: &taskresourcevolume.DockerVolumeConfig{ + Scope: "shared", + Autoprovision: true, + }, + }, + }, + } + + dockerClient.EXPECT().InspectVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.SDKVolumeResponse{ + DockerVolume: &types.Volume{ + Labels: map[string]string{"test": "test"}, + }, + }) + err := testTask.initializeDockerVolumes(sharedVolumeMatchFullConfig.Enabled(), dockerClient, nil) + assert.Error(t, err, "volume resource details not match should cause task fail") +} + +func TestInitializeSharedAutoprovisionVolumeTimeout(t *testing.T) { + sharedVolumeMatchFullConfig := config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctrl := gomock.NewController(t) + defer ctrl.Finish() + dockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "shared-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "shared-volume-test", + Type: "docker", + Volume: &taskresourcevolume.DockerVolumeConfig{ + Scope: "shared", + Autoprovision: true, + }, + }, + }, + } + + dockerClient.EXPECT().InspectVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.SDKVolumeResponse{ + Error: &dockerapi.DockerTimeoutError{}, + }) + err := testTask.initializeDockerVolumes(sharedVolumeMatchFullConfig.Enabled(), dockerClient, nil) + assert.Error(t, err, "volume resource details not match should cause task fail") +} + +func TestInitializeTaskVolume(t *testing.T) { + sharedVolumeMatchFullConfig := config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + testTask := &Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + Containers: []*apicontainer.Container{ + { + MountPoints: []apicontainer.MountPoint{ + { + SourceVolume: "task-volume-test", + ContainerPath: "/ecs", + }, + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + Volumes: []TaskVolume{ + { + Name: "task-volume-test", + Type: "docker", + Volume: &taskresourcevolume.DockerVolumeConfig{ + Scope: "task", + }, + }, + }, + } + + err := testTask.initializeDockerVolumes(sharedVolumeMatchFullConfig.Enabled(), nil, nil) + assert.NoError(t, err) + assert.Len(t, testTask.ResourcesMapUnsafe, 1, "expect the resource map has an empty volume resource") + assert.Len(t, testTask.Containers[0].TransitionDependenciesMap, 1, "expect a volume resource as the container dependency") +} + +func TestGetEFSVolumeDriverName(t *testing.T) { + cfg := &config.Config{ + VolumePluginCapabilities: []string{"efsAuth"}, + } + assert.Equal(t, "amazon-ecs-volume-plugin", getEFSVolumeDriverName(cfg)) + assert.Equal(t, "local", getEFSVolumeDriverName(&config.Config{})) +} + +func TestSetPausePIDInVolumeResources(t *testing.T) { + task := getEFSTask() + volRes := &taskresourcevolume.VolumeResource{} + task.AddResource("dockerVolume", volRes) + task.SetPausePIDInVolumeResources("pid") + assert.Equal(t, "pid", volRes.GetPauseContainerPID()) +} diff --git a/agent/api/task/taskvolume_windows_test.go b/agent/api/task/taskvolume_windows_test.go new file mode 100644 index 00000000000..c9117fc9a73 --- /dev/null +++ b/agent/api/task/taskvolume_windows_test.go @@ -0,0 +1,139 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/taskresource/fsxwindowsfileserver" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMarshalTaskVolumeFSxWindowsFileServer(t *testing.T) { + task := &Task{ + Arn: "test", + Volumes: []TaskVolume{ + { + Name: "1", + Type: FSxWindowsFileServerVolumeType, + Volume: &fsxwindowsfileserver.FSxWindowsFileServerVolumeConfig{ + FileSystemID: "fs-12345678", + RootDirectory: "/test", + AuthConfig: fsxwindowsfileserver.FSxWindowsFileServerAuthConfig{ + CredentialsParameter: "arn", + Domain: "test", + }, + }, + }, + }, + } + + marshal, err := json.Marshal(task) + require.NoError(t, err, "Could not marshal task") + expectedTaskDef := `{ + "Arn": "test", + "Family": "", + "Version": "", + "Containers": null, + "associations": null, + "resources": null, + "volumes": [ + { + "fsxWindowsFileServerVolumeConfiguration": { + "authorizationConfig": { + "credentialsParameter": "arn", + "domain": "test" + }, + "fileSystemId": "fs-12345678", + "rootDirectory": "/test", + "fsxWindowsFileServerHostPath": "" + }, + "name": "1", + "type": "fsxWindowsFileServer" + } + ], + "DesiredStatus": "NONE", + "KnownStatus": "NONE", + "KnownTime": "0001-01-01T00:00:00Z", + "PullStartedAt": "0001-01-01T00:00:00Z", + "PullStoppedAt": "0001-01-01T00:00:00Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "NONE", + "StartSequenceNumber": 0, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "PlatformFields": %s + }` + expectedTaskDef = fmt.Sprintf(expectedTaskDef, `{"cpuUnbounded": null, "memoryUnbounded": null}`) + require.JSONEq(t, expectedTaskDef, string(marshal)) +} + +func TestUnmarshalTaskVolumeFSxWindowsFileServer(t *testing.T) { + taskDef := []byte(`{ + "Arn": "test", + "Family": "", + "Version": "", + "Containers": null, + "associations": null, + "resources": null, + "volumes": [ + { + "fsxWindowsFileServerVolumeConfiguration": { + "authorizationConfig": { + "credentialsParameter": "arn", + "domain": "test" + }, + "fileSystemId": "fs-12345678", + "rootDirectory": "/test", + "fsxWindowsFileServerHostPath": "" + }, + "name": "1", + "type": "fsxWindowsFileServer" + } + ], + "DesiredStatus": "NONE", + "KnownStatus": "NONE", + "KnownTime": "0001-01-01T00:00:00Z", + "PullStartedAt": "0001-01-01T00:00:00Z", + "PullStoppedAt": "0001-01-01T00:00:00Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "NONE", + "StartSequenceNumber": 0, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "PlatformFields": {} + }`) + var task Task + err := json.Unmarshal(taskDef, &task) + require.NoError(t, err, "Could not unmarshal task") + + require.Len(t, task.Volumes, 1) + assert.Equal(t, "fsxWindowsFileServer", task.Volumes[0].Type) + assert.Equal(t, "1", task.Volumes[0].Name) + fsxWindowsFileServerConfig, ok := task.Volumes[0].Volume.(*fsxwindowsfileserver.FSxWindowsFileServerVolumeConfig) + assert.True(t, ok) + assert.Equal(t, "fs-12345678", fsxWindowsFileServerConfig.FileSystemID) + assert.Equal(t, "/test", fsxWindowsFileServerConfig.RootDirectory) + assert.Equal(t, "arn", fsxWindowsFileServerConfig.AuthConfig.CredentialsParameter) + assert.Equal(t, "test", fsxWindowsFileServerConfig.AuthConfig.Domain) +} diff --git a/agent/api/task/types_unmarshal_test.go b/agent/api/task/types_unmarshal_test.go new file mode 100644 index 00000000000..35a0b0e966e --- /dev/null +++ b/agent/api/task/types_unmarshal_test.go @@ -0,0 +1,72 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "encoding/json" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" +) + +func TestVolumesFromUnmarshal(t *testing.T) { + var vols []apicontainer.VolumeFrom + err := json.Unmarshal([]byte(`[{"sourceContainer":"c1"},{"sourceContainer":"c2","readOnly":true}]`), &vols) + if err != nil { + t.Fatal("Unable to unmarshal json") + } + if (vols[0] != apicontainer.VolumeFrom{SourceContainer: "c1", ReadOnly: false}) { + t.Error("VolumeFrom 1 didn't match expected output") + } + if (vols[1] != apicontainer.VolumeFrom{SourceContainer: "c2", ReadOnly: true}) { + t.Error("VolumeFrom 2 didn't match expected output") + } +} + +func TestEmptyHostVolumeUnmarshal(t *testing.T) { + var task Task + err := json.Unmarshal([]byte(`{"volumes":[{"name":"test","type": "host","host":{}}]}`), &task) + if err != nil { + t.Fatal("Could not unmarshal: ", err) + } + if task.Volumes[0].Name != "test" { + t.Error("Wrong name") + } + if fs, ok := task.Volumes[0].Volume.(*taskresourcevolume.LocalDockerVolume); !ok { + t.Error("Wrong type") + if fs.Source() != "" { + t.Error("Should default to empty string") + } + } +} + +func TestHostHostVolumeUnmarshal(t *testing.T) { + var task Task + err := json.Unmarshal([]byte(`{"volumes":[{"name":"test","type": "host","host":{"sourcePath":"/path"}}]}`), &task) + if err != nil { + t.Fatal("Could not unmarshal: ", err) + } + if task.Volumes[0].Name != "test" { + t.Error("Wrong name") + } + fsv, ok := task.Volumes[0].Volume.(*taskresourcevolume.FSHostVolume) + if !ok { + t.Error("Wrong type") + } else if fsv.Source() != "/path" { + t.Error("Wrong host path: ", fsv.Source()) + } +} diff --git a/agent/api/task/util.go b/agent/api/task/util.go new file mode 100644 index 00000000000..534f752495d --- /dev/null +++ b/agent/api/task/util.go @@ -0,0 +1,23 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +// RemoveFromTaskArray removes the element at ndx from an array of task +// pointers, arr. If the ndx is out of bounds, it returns arr unchanged. +func RemoveFromTaskArray(arr []*Task, ndx int) []*Task { + if ndx < 0 || ndx >= len(arr) { + return arr + } + return append(arr[0:ndx], arr[ndx+1:]...) +} diff --git a/agent/api/task/util_test.go b/agent/api/task/util_test.go new file mode 100644 index 00000000000..102a8237413 --- /dev/null +++ b/agent/api/task/util_test.go @@ -0,0 +1,59 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package task + +import ( + "fmt" + "reflect" + "testing" +) + +func taskN(n int) *Task { + return &Task{ + Arn: fmt.Sprint(n), + } +} + +func TestRemoveFromTaskArray(t *testing.T) { + arr := []*Task{taskN(1), taskN(2), taskN(3)} + + arr2 := RemoveFromTaskArray(arr, -1) + if !reflect.DeepEqual(arr, arr2) { + t.Error("Index -1 out of bounds altered arr") + } + + arr2 = RemoveFromTaskArray(arr, 3) + if !reflect.DeepEqual(arr, arr2) { + t.Error("Index 3 out of bounds altered arr") + } + + arr = RemoveFromTaskArray(arr, 2) + if !reflect.DeepEqual(arr, []*Task{taskN(1), taskN(2)}) { + t.Error("Last element not removed") + } + arr = RemoveFromTaskArray(arr, 0) + if !reflect.DeepEqual(arr, []*Task{taskN(2)}) { + t.Error("First element not removed") + } + arr = RemoveFromTaskArray(arr, 0) + if !reflect.DeepEqual(arr, []*Task{}) { + t.Error("First element not removed") + } + arr = RemoveFromTaskArray(arr, 0) + if !reflect.DeepEqual(arr, []*Task{}) { + t.Error("Removing from empty arr changed it") + } +} diff --git a/agent/api/testutils/container_equal.go b/agent/api/testutils/container_equal.go new file mode 100644 index 00000000000..fad345ecf32 --- /dev/null +++ b/agent/api/testutils/container_equal.go @@ -0,0 +1,105 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package testutils contains files that are used in tests but not elsewhere and thus can +// be excluded from the final executable. Including them in a different package +// allows this to happen +package testutils + +import ( + "reflect" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/utils" +) + +// ContainersEqual determines if this container is equal to another container. +// This is not exact equality, but logical equality. +// TODO: use reflection along with `equal:"unordered"` annotations on slices to +// replace this verbose code (low priority, but would be fun) +func ContainersEqual(lhs, rhs *apicontainer.Container) bool { + if lhs == rhs { + return true + } + if lhs.Name != rhs.Name || lhs.Image != rhs.Image { + return false + } + + if !utils.StrSliceEqual(lhs.Command, rhs.Command) { + return false + } + if lhs.CPU != rhs.CPU || lhs.Memory != rhs.Memory { + return false + } + // Order doesn't matter + if !utils.SlicesDeepEqual(lhs.Links, rhs.Links) { + return false + } + if !utils.SlicesDeepEqual(lhs.VolumesFrom, rhs.VolumesFrom) { + return false + } + if !utils.SlicesDeepEqual(lhs.MountPoints, rhs.MountPoints) { + return false + } + if !utils.SlicesDeepEqual(lhs.Ports, rhs.Ports) { + return false + } + if !utils.SlicesDeepEqual(lhs.KnownPortBindingsUnsafe, rhs.KnownPortBindingsUnsafe) { + return false + } + if lhs.Essential != rhs.Essential { + return false + } + if lhs.EntryPoint == nil || rhs.EntryPoint == nil { + if lhs.EntryPoint != rhs.EntryPoint { + return false + } + // both nil + } else { + if !utils.StrSliceEqual(*lhs.EntryPoint, *rhs.EntryPoint) { + return false + } + } + if !reflect.DeepEqual(lhs.Environment, rhs.Environment) { + return false + } + if !ContainerOverridesEqual(lhs.Overrides, rhs.Overrides) { + return false + } + if lhs.DesiredStatusUnsafe != rhs.DesiredStatusUnsafe || lhs.KnownStatusUnsafe != rhs.KnownStatusUnsafe { + return false + } + if lhs.AppliedStatus != rhs.AppliedStatus { + return false + } + if !reflect.DeepEqual(lhs.GetKnownExitCode(), rhs.GetKnownExitCode()) { + return false + } + + return true +} + +// ContainerOverridesEqual determines if two container overrides are equal +func ContainerOverridesEqual(lhs, rhs apicontainer.ContainerOverrides) bool { + if lhs.Command == nil || rhs.Command == nil { + if lhs.Command != rhs.Command { + return false + } + } else { + if !utils.StrSliceEqual(*lhs.Command, *rhs.Command) { + return false + } + } + + return true +} diff --git a/agent/api/testutils/container_equal_test.go b/agent/api/testutils/container_equal_test.go new file mode 100644 index 00000000000..97ebed76d0d --- /dev/null +++ b/agent/api/testutils/container_equal_test.go @@ -0,0 +1,90 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package testutils + +import ( + "fmt" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +func TestContainerEqual(t *testing.T) { + + exitCodeContainer := func(p *int) apicontainer.Container { + c := apicontainer.Container{} + c.SetKnownExitCode(p) + return c + } + + testCases := []struct { + lhs apicontainer.Container + rhs apicontainer.Container + shouldBeEqual bool + }{ + // Equal Pairs + {apicontainer.Container{Name: "name"}, apicontainer.Container{Name: "name"}, true}, + {apicontainer.Container{Image: "nginx"}, apicontainer.Container{Image: "nginx"}, true}, + {apicontainer.Container{Command: []string{"c"}}, apicontainer.Container{Command: []string{"c"}}, true}, + {apicontainer.Container{CPU: 1}, apicontainer.Container{CPU: 1}, true}, + {apicontainer.Container{Memory: 1}, apicontainer.Container{Memory: 1}, true}, + {apicontainer.Container{Links: []string{"1", "2"}}, apicontainer.Container{Links: []string{"1", "2"}}, true}, + {apicontainer.Container{Links: []string{"1", "2"}}, apicontainer.Container{Links: []string{"2", "1"}}, true}, + {apicontainer.Container{Ports: []apicontainer.PortBinding{{1, 2, "1", apicontainer.TransportProtocolTCP}}}, apicontainer.Container{Ports: []apicontainer.PortBinding{{1, 2, "1", apicontainer.TransportProtocolTCP}}}, true}, + {apicontainer.Container{Essential: true}, apicontainer.Container{Essential: true}, true}, + {apicontainer.Container{EntryPoint: nil}, apicontainer.Container{EntryPoint: nil}, true}, + {apicontainer.Container{EntryPoint: &[]string{"1", "2"}}, apicontainer.Container{EntryPoint: &[]string{"1", "2"}}, true}, + {apicontainer.Container{Environment: map[string]string{}}, apicontainer.Container{Environment: map[string]string{}}, true}, + {apicontainer.Container{Environment: map[string]string{"a": "b", "c": "d"}}, apicontainer.Container{Environment: map[string]string{"c": "d", "a": "b"}}, true}, + {apicontainer.Container{DesiredStatusUnsafe: apicontainerstatus.ContainerRunning}, apicontainer.Container{DesiredStatusUnsafe: apicontainerstatus.ContainerRunning}, true}, + {apicontainer.Container{AppliedStatus: apicontainerstatus.ContainerRunning}, apicontainer.Container{AppliedStatus: apicontainerstatus.ContainerRunning}, true}, + {apicontainer.Container{KnownStatusUnsafe: apicontainerstatus.ContainerRunning}, apicontainer.Container{KnownStatusUnsafe: apicontainerstatus.ContainerRunning}, true}, + {exitCodeContainer(aws.Int(1)), exitCodeContainer(aws.Int(1)), true}, + {exitCodeContainer(nil), exitCodeContainer(nil), true}, + // Unequal Pairs + {apicontainer.Container{Name: "name"}, apicontainer.Container{Name: "名前"}, false}, + {apicontainer.Container{Image: "nginx"}, apicontainer.Container{Image: "えんじんえっくす"}, false}, + {apicontainer.Container{Command: []string{"c"}}, apicontainer.Container{Command: []string{"し"}}, false}, + {apicontainer.Container{Command: []string{"c", "b"}}, apicontainer.Container{Command: []string{"b", "c"}}, false}, + {apicontainer.Container{CPU: 1}, apicontainer.Container{CPU: 2e2}, false}, + {apicontainer.Container{Memory: 1}, apicontainer.Container{Memory: 2e2}, false}, + {apicontainer.Container{Links: []string{"1", "2"}}, apicontainer.Container{Links: []string{"1", "二"}}, false}, + {apicontainer.Container{Ports: []apicontainer.PortBinding{{1, 2, "1", apicontainer.TransportProtocolTCP}}}, apicontainer.Container{Ports: []apicontainer.PortBinding{{1, 2, "二", apicontainer.TransportProtocolTCP}}}, false}, + {apicontainer.Container{Ports: []apicontainer.PortBinding{{1, 2, "1", apicontainer.TransportProtocolTCP}}}, apicontainer.Container{Ports: []apicontainer.PortBinding{{1, 22, "1", apicontainer.TransportProtocolTCP}}}, false}, + {apicontainer.Container{Ports: []apicontainer.PortBinding{{1, 2, "1", apicontainer.TransportProtocolTCP}}}, apicontainer.Container{Ports: []apicontainer.PortBinding{{1, 2, "1", apicontainer.TransportProtocolUDP}}}, false}, + {apicontainer.Container{Essential: true}, apicontainer.Container{Essential: false}, false}, + {apicontainer.Container{EntryPoint: nil}, apicontainer.Container{EntryPoint: &[]string{"nonnil"}}, false}, + {apicontainer.Container{EntryPoint: &[]string{"1", "2"}}, apicontainer.Container{EntryPoint: &[]string{"2", "1"}}, false}, + {apicontainer.Container{EntryPoint: &[]string{"1", "2"}}, apicontainer.Container{EntryPoint: &[]string{"1", "二"}}, false}, + {apicontainer.Container{Environment: map[string]string{"a": "b", "c": "d"}}, apicontainer.Container{Environment: map[string]string{"し": "d", "a": "b"}}, false}, + {apicontainer.Container{DesiredStatusUnsafe: apicontainerstatus.ContainerRunning}, apicontainer.Container{DesiredStatusUnsafe: apicontainerstatus.ContainerStopped}, false}, + {apicontainer.Container{AppliedStatus: apicontainerstatus.ContainerRunning}, apicontainer.Container{AppliedStatus: apicontainerstatus.ContainerStopped}, false}, + {apicontainer.Container{KnownStatusUnsafe: apicontainerstatus.ContainerRunning}, apicontainer.Container{KnownStatusUnsafe: apicontainerstatus.ContainerStopped}, false}, + {exitCodeContainer(aws.Int(0)), exitCodeContainer(aws.Int(42)), false}, + {exitCodeContainer(nil), exitCodeContainer(aws.Int(12)), false}, + } + + for index, tc := range testCases { + t.Run(fmt.Sprintf("index %d expected %t", index, tc.shouldBeEqual), func(t *testing.T) { + assert.Equal(t, ContainersEqual(&tc.lhs, &tc.rhs), tc.shouldBeEqual, "ContainersEqual not working as expected. Check index failure.") + // Symetric + assert.Equal(t, ContainersEqual(&tc.rhs, &tc.lhs), tc.shouldBeEqual, "Symetric equality check failed. Check index failure.") + }) + } +} diff --git a/agent/api/testutils/task_equal.go b/agent/api/testutils/task_equal.go new file mode 100644 index 00000000000..22e47eefb02 --- /dev/null +++ b/agent/api/testutils/task_equal.go @@ -0,0 +1,51 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package testutils + +import ( + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" +) + +// TasksEqual determines if the lhs and rhs tasks are equal for the definition +// of having the same family, version, statuses, and equal containers. +func TasksEqual(lhs, rhs *apitask.Task) bool { + if lhs == rhs { + return true + } + if lhs.Arn != rhs.Arn { + return false + } + + if lhs.Family != rhs.Family || lhs.Version != rhs.Version { + return false + } + + for _, left := range lhs.Containers { + right, ok := rhs.ContainerByName(left.Name) + if !ok { + return false + } + if !ContainersEqual(left, right) { + return false + } + } + + if lhs.DesiredStatusUnsafe != rhs.DesiredStatusUnsafe { + return false + } + if lhs.GetKnownStatus() != rhs.GetKnownStatus() { + return false + } + return true +} diff --git a/agent/api/testutils/task_equal_test.go b/agent/api/testutils/task_equal_test.go new file mode 100644 index 00000000000..15a62258043 --- /dev/null +++ b/agent/api/testutils/task_equal_test.go @@ -0,0 +1,59 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package testutils + +import ( + "fmt" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/stretchr/testify/assert" +) + +func TestTaskEqual(t *testing.T) { + + testCases := []struct { + rhs apitask.Task + lhs apitask.Task + shouldBeEqual bool + }{ + // Equal Pairs + {apitask.Task{Arn: "a"}, apitask.Task{Arn: "a"}, true}, + {apitask.Task{Family: "a"}, apitask.Task{Family: "a"}, true}, + {apitask.Task{Version: "a"}, apitask.Task{Version: "a"}, true}, + {apitask.Task{Containers: []*apicontainer.Container{{Name: "a"}}}, apitask.Task{Containers: []*apicontainer.Container{{Name: "a"}}}, true}, + {apitask.Task{DesiredStatusUnsafe: apitaskstatus.TaskRunning}, apitask.Task{DesiredStatusUnsafe: apitaskstatus.TaskRunning}, true}, + {apitask.Task{KnownStatusUnsafe: apitaskstatus.TaskRunning}, apitask.Task{KnownStatusUnsafe: apitaskstatus.TaskRunning}, true}, + + // Unequal Pairs + {apitask.Task{Arn: "a"}, apitask.Task{Arn: "あ"}, false}, + {apitask.Task{Family: "a"}, apitask.Task{Family: "あ"}, false}, + {apitask.Task{Version: "a"}, apitask.Task{Version: "あ"}, false}, + {apitask.Task{Containers: []*apicontainer.Container{{Name: "a"}}}, apitask.Task{Containers: []*apicontainer.Container{{Name: "あ"}}}, false}, + {apitask.Task{DesiredStatusUnsafe: apitaskstatus.TaskRunning}, apitask.Task{DesiredStatusUnsafe: apitaskstatus.TaskStopped}, false}, + {apitask.Task{KnownStatusUnsafe: apitaskstatus.TaskRunning}, apitask.Task{KnownStatusUnsafe: apitaskstatus.TaskStopped}, false}, + } + + for index, tc := range testCases { + t.Run(fmt.Sprintf("index %d expected %t", index, tc.shouldBeEqual), func(t *testing.T) { + assert.Equal(t, TasksEqual(&tc.lhs, &tc.rhs), tc.shouldBeEqual, "TasksEqual not working as expected. Check index failure.") + // Symetric + assert.Equal(t, TasksEqual(&tc.rhs, &tc.lhs), tc.shouldBeEqual, "Symetric equality check failed. Check index failure.") + }) + } +} diff --git a/agent/app/agent.go b/agent/app/agent.go new file mode 100644 index 00000000000..cdf869c0d53 --- /dev/null +++ b/agent/app/agent.go @@ -0,0 +1,813 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + "github.com/aws/amazon-ecs-agent/agent/metrics" + + acshandler "github.com/aws/amazon-ecs-agent/agent/acs/handler" + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/api/ecsclient" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/app/factory" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/eni/pause" + "github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper" + "github.com/aws/amazon-ecs-agent/agent/eventhandler" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/handlers" + "github.com/aws/amazon-ecs-agent/agent/sighandlers" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/aws/amazon-ecs-agent/agent/statemanager" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + tcshandler "github.com/aws/amazon-ecs-agent/agent/tcs/handler" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper" + "github.com/aws/amazon-ecs-agent/agent/version" + "github.com/aws/aws-sdk-go/aws" + aws_credentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/cihub/seelog" + "github.com/pborman/uuid" +) + +const ( + containerChangeEventStreamName = "ContainerChange" + deregisterContainerInstanceEventStreamName = "DeregisterContainerInstance" + clusterMismatchErrorFormat = "Data mismatch; saved cluster '%v' does not match configured cluster '%v'. Perhaps you want to delete the configured checkpoint file?" + instanceIDMismatchErrorFormat = "Data mismatch; saved InstanceID '%s' does not match current InstanceID '%s'. Overwriting old datafile" + instanceTypeMismatchErrorFormat = "The current instance type does not match the registered instance type. Please revert the instance type change, or alternatively launch a new instance: %v" + + vpcIDAttributeName = "ecs.vpc-id" + subnetIDAttributeName = "ecs.subnet-id" +) + +var ( + instanceNotLaunchedInVPCError = errors.New("instance not launched in VPC") +) + +// agent interface is used by the app runner to interact with the ecsAgent +// object. Its purpose is to mostly demonstrate how to interact with the +// ecsAgent type. +type agent interface { + // printECSAttributes prints the Agent's capabilities based on + // its environment + printECSAttributes() int + // startWindowsService starts the agent as a Windows Service + startWindowsService() int + // start starts the Agent execution + start() int + // setTerminationHandler sets the termination handler + setTerminationHandler(sighandlers.TerminationHandler) +} + +// ecsAgent wraps all the entities needed to start the ECS Agent execution. +// after creating it via +// the newAgent() method +type ecsAgent struct { + ctx context.Context + cancel context.CancelFunc + ec2MetadataClient ec2.EC2MetadataClient + ec2Client ec2.Client + cfg *config.Config + dataClient data.Client + dockerClient dockerapi.DockerClient + containerInstanceARN string + credentialProvider *aws_credentials.Credentials + stateManagerFactory factory.StateManager + saveableOptionFactory factory.SaveableOption + pauseLoader pause.Loader + udevMonitor udevwrapper.Udev + cniClient ecscni.CNIClient + vpc string + subnet string + mac string + metadataManager containermetadata.Manager + terminationHandler sighandlers.TerminationHandler + mobyPlugins mobypkgwrapper.Plugins + resourceFields *taskresource.ResourceFields + availabilityZone string + latestSeqNumberTaskManifest *int64 +} + +// newAgent returns a new ecsAgent object, but does not start anything +func newAgent(blackholeEC2Metadata bool, acceptInsecureCert *bool) (agent, error) { + ctx, cancel := context.WithCancel(context.Background()) + ec2MetadataClient := ec2.NewEC2MetadataClient(nil) + if blackholeEC2Metadata { + ec2MetadataClient = ec2.NewBlackholeEC2MetadataClient() + } + + seelog.Info("Loading configuration") + cfg, err := config.NewConfig(ec2MetadataClient) + if err != nil { + // All required config values can be inferred from EC2 Metadata, + // so this error could be transient. + seelog.Criticalf("Error loading config: %v", err) + cancel() + return nil, err + } + cfg.AcceptInsecureCert = aws.BoolValue(acceptInsecureCert) + if cfg.AcceptInsecureCert { + seelog.Warn("SSL certificate verification disabled. This is not recommended.") + } + seelog.Infof("Amazon ECS agent Version: %s, Commit: %s", version.Version, version.GitShortHash) + seelog.Debugf("Loaded config: %s", cfg.String()) + + ec2Client := ec2.NewClientImpl(cfg.AWSRegion) + dockerClient, err := dockerapi.NewDockerGoClient(sdkclientfactory.NewFactory(ctx, cfg.DockerEndpoint), cfg, ctx) + + if err != nil { + // This is also non terminal in the current config + seelog.Criticalf("Error creating Docker client: %v", err) + cancel() + return nil, err + } + + var dataClient data.Client + if cfg.Checkpoint.Enabled() { + dataClient, err = data.New(cfg.DataDir) + if err != nil { + seelog.Criticalf("Error creating data client: %v", err) + cancel() + return nil, err + } + } else { + dataClient = data.NewNoopClient() + } + + var metadataManager containermetadata.Manager + if cfg.ContainerMetadataEnabled.Enabled() { + // We use the default API client for the metadata inspect call. This version has some information + // missing which means if we need those fields later we will need to change this client to + // the appropriate version + metadataManager = containermetadata.NewManager(dockerClient, cfg) + } + + initialSeqNumber := int64(-1) + return &ecsAgent{ + ctx: ctx, + cancel: cancel, + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + cfg: cfg, + dockerClient: dockerClient, + dataClient: dataClient, + // We instantiate our own credentialProvider for use in acs/tcs. This tries + // to mimic roughly the way it's instantiated by the SDK for a default + // session. + credentialProvider: defaults.CredChain(defaults.Config(), defaults.Handlers()), + stateManagerFactory: factory.NewStateManager(), + saveableOptionFactory: factory.NewSaveableOption(), + pauseLoader: pause.New(), + cniClient: ecscni.NewClient(cfg.CNIPluginsPath), + metadataManager: metadataManager, + terminationHandler: sighandlers.StartDefaultTerminationHandler, + mobyPlugins: mobypkgwrapper.NewPlugins(), + latestSeqNumberTaskManifest: &initialSeqNumber, + }, nil +} + +// printECSAttributes prints the Agent's ECS Attributes based on its +// environment +func (agent *ecsAgent) printECSAttributes() int { + capabilities, err := agent.capabilities() + if err != nil { + seelog.Warnf("Unable to obtain capabilities: %v", err) + return exitcodes.ExitError + } + for _, attr := range capabilities { + fmt.Printf("%s\t%s\n", aws.StringValue(attr.Name), aws.StringValue(attr.Value)) + } + return exitcodes.ExitSuccess +} + +func (agent *ecsAgent) setTerminationHandler(handler sighandlers.TerminationHandler) { + agent.terminationHandler = handler +} + +// start starts the ECS Agent +func (agent *ecsAgent) start() int { + sighandlers.StartDebugHandler() + + containerChangeEventStream := eventstream.NewEventStream(containerChangeEventStreamName, agent.ctx) + credentialsManager := credentials.NewManager() + state := dockerstate.NewTaskEngineState() + imageManager := engine.NewImageManager(agent.cfg, agent.dockerClient, state) + client := ecsclient.NewECSClient(agent.credentialProvider, agent.cfg, agent.ec2MetadataClient) + + agent.initializeResourceFields(credentialsManager) + return agent.doStart(containerChangeEventStream, credentialsManager, state, imageManager, client, execcmd.NewManager()) +} + +// doStart is the worker invoked by start for starting the ECS Agent. This involves +// initializing the docker task engine, state saver, image manager, credentials +// manager, poll and telemetry sessions, api handler etc +func (agent *ecsAgent) doStart(containerChangeEventStream *eventstream.EventStream, + credentialsManager credentials.Manager, + state dockerstate.TaskEngineState, + imageManager engine.ImageManager, + client api.ECSClient, + execCmdMgr execcmd.Manager) int { + // check docker version >= 1.9.0, exit agent if older + if exitcode, ok := agent.verifyRequiredDockerVersion(); !ok { + return exitcode + } + + // Conditionally create '/ecs' cgroup root + if agent.cfg.TaskCPUMemLimit.Enabled() { + if err := agent.cgroupInit(); err != nil { + seelog.Criticalf("Unable to initialize cgroup root for ECS: %v", err) + return exitcodes.ExitTerminal + } + } + if agent.cfg.GPUSupportEnabled { + err := agent.initializeGPUManager() + if err != nil { + seelog.Criticalf("Could not initialize Nvidia GPU Manager: %v", err) + return exitcodes.ExitError + } + } + + // Create the task engine + taskEngine, currentEC2InstanceID, err := agent.newTaskEngine(containerChangeEventStream, + credentialsManager, state, imageManager, execCmdMgr) + if err != nil { + seelog.Criticalf("Unable to initialize new task engine: %v", err) + return exitcodes.ExitTerminal + } + agent.initMetricsEngine() + + loadPauseErr := agent.loadPauseContainer() + if loadPauseErr != nil { + seelog.Errorf("Failed to load pause container: %v", loadPauseErr) + } + + var vpcSubnetAttributes []*ecs.Attribute + // Check if Task ENI is enabled + if agent.cfg.TaskENIEnabled.Enabled() { + // check pause container image load + if loadPauseErr != nil { + if pause.IsNoSuchFileError(loadPauseErr) || pause.UnsupportedPlatform(loadPauseErr) { + return exitcodes.ExitTerminal + } else { + return exitcodes.ExitError + } + } + + err, terminal := agent.initializeTaskENIDependencies(state, taskEngine) + switch err { + case nil: + // No error, we can proceed with the rest of initialization + // Set vpc and subnet id attributes + vpcSubnetAttributes = agent.constructVPCSubnetAttributes() + case instanceNotLaunchedInVPCError: + // We have ascertained that the EC2 Instance is not running in a VPC + // No need to stop the ECS Agent in this case; all we need to do is + // to not update the config to disable the TaskENIEnabled flag and + // move on + seelog.Warnf("Unable to detect VPC ID for the Instance, disabling Task ENI capability: %v", err) + agent.cfg.TaskENIEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled} + default: + // Encountered an error initializing dependencies for dealing with + // ENIs for Tasks. Exit with the appropriate error code + seelog.Criticalf("Unable to initialize Task ENI dependencies: %v", err) + if terminal { + return exitcodes.ExitTerminal + } + return exitcodes.ExitError + } + } + + // Register the container instance + err = agent.registerContainerInstance(client, vpcSubnetAttributes) + if err != nil { + if isTransient(err) { + return exitcodes.ExitError + } + return exitcodes.ExitTerminal + } + + // Add container instance ARN to metadata manager + if agent.cfg.ContainerMetadataEnabled.Enabled() { + agent.metadataManager.SetContainerInstanceARN(agent.containerInstanceARN) + agent.metadataManager.SetAvailabilityZone(agent.availabilityZone) + agent.metadataManager.SetHostPrivateIPv4Address(agent.getHostPrivateIPv4AddressFromEC2Metadata()) + agent.metadataManager.SetHostPublicIPv4Address(agent.getHostPublicIPv4AddressFromEC2Metadata()) + } + + if agent.cfg.Checkpoint.Enabled() { + agent.saveMetadata(data.AgentVersionKey, version.Version) + agent.saveMetadata(data.AvailabilityZoneKey, agent.availabilityZone) + agent.saveMetadata(data.ClusterNameKey, agent.cfg.Cluster) + agent.saveMetadata(data.ContainerInstanceARNKey, agent.containerInstanceARN) + agent.saveMetadata(data.EC2InstanceIDKey, currentEC2InstanceID) + } + + // Begin listening to the docker daemon and saving changes + taskEngine.SetDataClient(agent.dataClient) + imageManager.SetDataClient(agent.dataClient) + taskEngine.MustInit(agent.ctx) + + // Start back ground routines, including the telemetry session + deregisterInstanceEventStream := eventstream.NewEventStream( + deregisterContainerInstanceEventStreamName, agent.ctx) + deregisterInstanceEventStream.StartListening() + taskHandler := eventhandler.NewTaskHandler(agent.ctx, agent.dataClient, state, client) + attachmentEventHandler := eventhandler.NewAttachmentEventHandler(agent.ctx, agent.dataClient, client) + agent.startAsyncRoutines(containerChangeEventStream, credentialsManager, imageManager, + taskEngine, deregisterInstanceEventStream, client, taskHandler, attachmentEventHandler, state) + + // Start the acs session, which should block doStart + return agent.startACSSession(credentialsManager, taskEngine, + deregisterInstanceEventStream, client, state, taskHandler) +} + +// newTaskEngine creates a new docker task engine object. It tries to load the +// local state if needed, else initializes a new one +func (agent *ecsAgent) newTaskEngine(containerChangeEventStream *eventstream.EventStream, + credentialsManager credentials.Manager, + state dockerstate.TaskEngineState, + imageManager engine.ImageManager, + execCmdMgr execcmd.Manager) (engine.TaskEngine, string, error) { + + containerChangeEventStream.StartListening() + + if !agent.cfg.Checkpoint.Enabled() { + seelog.Info("Checkpointing not enabled; a new container instance will be created each time the agent is run") + return engine.NewTaskEngine(agent.cfg, agent.dockerClient, credentialsManager, + containerChangeEventStream, imageManager, state, + agent.metadataManager, agent.resourceFields, execCmdMgr), "", nil + } + + savedData, err := agent.loadData(containerChangeEventStream, credentialsManager, state, imageManager, execCmdMgr) + if err != nil { + seelog.Criticalf("Error loading previously saved state: %v", err) + return nil, "", err + } + + err = agent.checkCompatibility(savedData.taskEngine) + if err != nil { + seelog.Criticalf("Error checking compatibility with previously saved state: %v", err) + return nil, "", err + } + + currentEC2InstanceID := agent.getEC2InstanceID() + if savedData.ec2InstanceID != "" && savedData.ec2InstanceID != currentEC2InstanceID { + seelog.Warnf(instanceIDMismatchErrorFormat, + savedData.ec2InstanceID, currentEC2InstanceID) + + // Reset agent state as a new container instance + state.Reset() + // Reset taskEngine; all the other values are still default + return engine.NewTaskEngine(agent.cfg, agent.dockerClient, credentialsManager, + containerChangeEventStream, imageManager, state, agent.metadataManager, + agent.resourceFields, execCmdMgr), currentEC2InstanceID, nil + } + + if savedData.cluster != "" { + if err := agent.setClusterInConfig(savedData.cluster); err != nil { + return nil, "", err + } + } + + // Use the values we loaded if there's no issue + agent.containerInstanceARN = savedData.containerInstanceARN + agent.availabilityZone = savedData.availabilityZone + agent.latestSeqNumberTaskManifest = &savedData.latestTaskManifestSeqNum + + return savedData.taskEngine, currentEC2InstanceID, nil +} + +func (agent *ecsAgent) initMetricsEngine() { + // In case of a panic during set-up, we will recover quietly and resume + // normal Agent execution. + defer func() { + if r := recover(); r != nil { + seelog.Errorf("MetricsEngine Set-up panicked. Recovering quietly: %s", r) + } + }() + + // We init the global MetricsEngine before we publish metrics + metrics.MustInit(agent.cfg) + metrics.PublishMetrics() +} + +// setClusterInConfig sets the cluster name in the config object based on +// previous state. It returns an error if there's a mismatch between the +// the current cluster name with what's restored from the cluster state +func (agent *ecsAgent) setClusterInConfig(previousCluster string) error { + // TODO Handle default cluster in a sane and unified way across the codebase + configuredCluster := agent.cfg.Cluster + if configuredCluster == "" { + seelog.Debug("Setting cluster to default; none configured") + configuredCluster = config.DefaultClusterName + } + if previousCluster != configuredCluster { + err := clusterMismatchError{ + fmt.Errorf(clusterMismatchErrorFormat, previousCluster, configuredCluster), + } + seelog.Criticalf("%v", err) + return err + } + agent.cfg.Cluster = previousCluster + seelog.Infof("Restored cluster '%s'", agent.cfg.Cluster) + + return nil +} + +// getEC2InstanceID gets the EC2 instance ID from the metadata service +func (agent *ecsAgent) getEC2InstanceID() string { + instanceID, err := agent.ec2MetadataClient.InstanceID() + if err != nil { + seelog.Warnf( + "Unable to access EC2 Metadata service to determine EC2 ID: %v", err) + return "" + } + return instanceID +} + +// getoutpostARN gets the Outpost ARN from the metadata service +func (agent *ecsAgent) getoutpostARN() string { + outpostARN, err := agent.ec2MetadataClient.OutpostARN() + if err == nil { + seelog.Infof( + "Outpost ARN from EC2 Metadata: %s", outpostARN) + return outpostARN + } + return "" +} + +// newStateManager creates a new state manager object for the task engine. +// Rest of the parameters are pointers and it's expected that all of these +// will be backfilled when state manager's Load() method is invoked +func (agent *ecsAgent) newStateManager( + taskEngine engine.TaskEngine, + cluster *string, + containerInstanceArn *string, + savedInstanceID *string, + availabilityZone *string, latestSeqNumberTaskManifest *int64) (statemanager.StateManager, error) { + if !agent.cfg.Checkpoint.Enabled() { + return statemanager.NewNoopStateManager(), nil + } + + return agent.stateManagerFactory.NewStateManager(agent.cfg, + // This is for making testing easier as we can mock this + agent.saveableOptionFactory.AddSaveable("TaskEngine", taskEngine), + agent.saveableOptionFactory.AddSaveable("ContainerInstanceArn", + containerInstanceArn), + agent.saveableOptionFactory.AddSaveable("Cluster", cluster), + agent.saveableOptionFactory.AddSaveable("EC2InstanceID", savedInstanceID), + agent.saveableOptionFactory.AddSaveable("availabilityZone", availabilityZone), + agent.saveableOptionFactory.AddSaveable("latestSeqNumberTaskManifest", latestSeqNumberTaskManifest), + ) +} + +// constructVPCSubnetAttributes returns vpc and subnet IDs of the instance as +// an attribute list +func (agent *ecsAgent) constructVPCSubnetAttributes() []*ecs.Attribute { + return []*ecs.Attribute{ + { + Name: aws.String(vpcIDAttributeName), + Value: aws.String(agent.vpc), + }, + { + Name: aws.String(subnetIDAttributeName), + Value: aws.String(agent.subnet), + }, + } +} + +// registerContainerInstance registers the container instance ID for the ECS Agent +func (agent *ecsAgent) registerContainerInstance( + client api.ECSClient, + additionalAttributes []*ecs.Attribute) error { + // Preflight request to make sure they're good + if preflightCreds, err := agent.credentialProvider.Get(); err != nil || preflightCreds.AccessKeyID == "" { + seelog.Errorf("Error getting valid credentials: %s", err) + } + + agentCapabilities, err := agent.capabilities() + if err != nil { + return err + } + capabilities := append(agentCapabilities, additionalAttributes...) + + // Get the tags of this container instance defined in config file + tags := utils.MapToTags(agent.cfg.ContainerInstanceTags) + if agent.cfg.ContainerInstancePropagateTagsFrom == config.ContainerInstancePropagateTagsFromEC2InstanceType { + ec2Tags, err := agent.getContainerInstanceTagsFromEC2API() + // If we are unable to call the API, we should not treat it as a transient error, + // because we've already retried several times, we may throttle the API if we + // keep retrying. + if err != nil { + return err + } + seelog.Infof("Retrieved Tags from EC2 DescribeTags API:\n%v", ec2Tags) + tags = mergeTags(tags, ec2Tags) + } + + platformDevices := agent.getPlatformDevices() + + outpostARN := agent.getoutpostARN() + + if agent.containerInstanceARN != "" { + seelog.Infof("Restored from checkpoint file. I am running as '%s' in cluster '%s'", agent.containerInstanceARN, agent.cfg.Cluster) + return agent.reregisterContainerInstance(client, capabilities, tags, uuid.New(), platformDevices, outpostARN) + } + + seelog.Info("Registering Instance with ECS") + containerInstanceArn, availabilityZone, err := client.RegisterContainerInstance("", + capabilities, tags, uuid.New(), platformDevices, outpostARN) + if err != nil { + seelog.Errorf("Error registering: %v", err) + if retriable, ok := err.(apierrors.Retriable); ok && !retriable.Retry() { + return err + } + if utils.IsAWSErrorCodeEqual(err, ecs.ErrCodeInvalidParameterException) { + seelog.Critical("Instance registration attempt with an invalid parameter") + return err + } + if _, ok := err.(apierrors.AttributeError); ok { + seelog.Critical("Instance registration attempt with an invalid attribute") + return err + } + return transientError{err} + } + seelog.Infof("Registration completed successfully. I am running as '%s' in cluster '%s'", containerInstanceArn, agent.cfg.Cluster) + agent.containerInstanceARN = containerInstanceArn + agent.availabilityZone = availabilityZone + return nil +} + +// reregisterContainerInstance registers a container instance that has already been +// registered with ECS. This is for cases where the ECS Agent is being restored +// from a check point. +func (agent *ecsAgent) reregisterContainerInstance(client api.ECSClient, capabilities []*ecs.Attribute, + tags []*ecs.Tag, registrationToken string, platformDevices []*ecs.PlatformDevice, outpostARN string) error { + _, availabilityZone, err := client.RegisterContainerInstance(agent.containerInstanceARN, capabilities, tags, + registrationToken, platformDevices, outpostARN) + + //set az to agent + agent.availabilityZone = availabilityZone + + if err == nil { + return nil + } + seelog.Errorf("Error re-registering: %v", err) + if apierrors.IsInstanceTypeChangedError(err) { + seelog.Criticalf(instanceTypeMismatchErrorFormat, err) + return err + } + if _, ok := err.(apierrors.AttributeError); ok { + seelog.Critical("Instance re-registration attempt with an invalid attribute") + return err + } + return transientError{err} +} + +// startAsyncRoutines starts all of the background methods +func (agent *ecsAgent) startAsyncRoutines( + containerChangeEventStream *eventstream.EventStream, + credentialsManager credentials.Manager, + imageManager engine.ImageManager, + taskEngine engine.TaskEngine, + deregisterInstanceEventStream *eventstream.EventStream, + client api.ECSClient, + taskHandler *eventhandler.TaskHandler, + attachmentEventHandler *eventhandler.AttachmentEventHandler, + state dockerstate.TaskEngineState) { + + // Start of the periodic image cleanup process + if !agent.cfg.ImageCleanupDisabled.Enabled() { + go imageManager.StartImageCleanupProcess(agent.ctx) + } + + // Start automatic spot instance draining poller routine + if agent.cfg.SpotInstanceDrainingEnabled.Enabled() { + go agent.startSpotInstanceDrainingPoller(agent.ctx, client) + } + + go agent.terminationHandler(state, agent.dataClient, taskEngine, agent.cancel) + + // Agent introspection api + go handlers.ServeIntrospectionHTTPEndpoint(agent.ctx, &agent.containerInstanceARN, taskEngine, agent.cfg) + + statsEngine := stats.NewDockerStatsEngine(agent.cfg, agent.dockerClient, containerChangeEventStream) + + // Start serving the endpoint to fetch IAM Role credentials and other task metadata + if agent.cfg.TaskMetadataAZDisabled { + // send empty availability zone + go handlers.ServeTaskHTTPEndpoint(agent.ctx, credentialsManager, state, client, agent.containerInstanceARN, agent.cfg, statsEngine, "") + } else { + go handlers.ServeTaskHTTPEndpoint(agent.ctx, credentialsManager, state, client, agent.containerInstanceARN, agent.cfg, statsEngine, agent.availabilityZone) + } + + // Start sending events to the backend + go eventhandler.HandleEngineEvents(agent.ctx, taskEngine, client, taskHandler, attachmentEventHandler) + + telemetrySessionParams := tcshandler.TelemetrySessionParams{ + Ctx: agent.ctx, + CredentialProvider: agent.credentialProvider, + Cfg: agent.cfg, + ContainerInstanceArn: agent.containerInstanceARN, + DeregisterInstanceEventStream: deregisterInstanceEventStream, + ECSClient: client, + TaskEngine: taskEngine, + StatsEngine: statsEngine, + } + + // Start metrics session in a go routine + go tcshandler.StartMetricsSession(&telemetrySessionParams) +} + +func (agent *ecsAgent) startSpotInstanceDrainingPoller(ctx context.Context, client api.ECSClient) { + for !agent.spotInstanceDrainingPoller(client) { + select { + case <-ctx.Done(): + return + default: + time.Sleep(time.Second) + } + } +} + +// spotInstanceDrainingPoller returns true if spot instance interruption has been +// set AND the container instance state is successfully updated to DRAINING. +func (agent *ecsAgent) spotInstanceDrainingPoller(client api.ECSClient) bool { + // this endpoint 404s unless a interruption has been set, so expect failure in most cases. + resp, err := agent.ec2MetadataClient.SpotInstanceAction() + if err == nil { + type InstanceAction struct { + Time string + Action string + } + ia := InstanceAction{} + + err := json.Unmarshal([]byte(resp), &ia) + if err != nil { + seelog.Errorf("Invalid response from /spot/instance-action endpoint: %s Error: %s", resp, err) + return false + } + + switch ia.Action { + case "hibernate", "terminate", "stop": + default: + seelog.Errorf("Invalid response from /spot/instance-action endpoint: %s, Error: unrecognized action (%s)", resp, ia.Action) + return false + } + + seelog.Infof("Received a spot interruption (%s) scheduled for %s, setting state to DRAINING", ia.Action, ia.Time) + err = client.UpdateContainerInstancesState(agent.containerInstanceARN, "DRAINING") + if err != nil { + seelog.Errorf("Error setting instance [ARN: %s] state to DRAINING: %s", agent.containerInstanceARN, err) + } else { + return true + } + } + return false +} + +// startACSSession starts a session with ECS's Agent Communication service. This +// is a blocking call and only returns when the handler returns +func (agent *ecsAgent) startACSSession( + credentialsManager credentials.Manager, + taskEngine engine.TaskEngine, + deregisterInstanceEventStream *eventstream.EventStream, + client api.ECSClient, + state dockerstate.TaskEngineState, + taskHandler *eventhandler.TaskHandler) int { + + acsSession := acshandler.NewSession( + agent.ctx, + agent.cfg, + deregisterInstanceEventStream, + agent.containerInstanceARN, + agent.credentialProvider, + client, + state, + agent.dataClient, + taskEngine, + credentialsManager, + taskHandler, + agent.latestSeqNumberTaskManifest, + ) + seelog.Info("Beginning Polling for updates") + err := acsSession.Start() + if err != nil { + seelog.Criticalf("Unretriable error starting communicating with ACS: %v", err) + return exitcodes.ExitTerminal + } + return exitcodes.ExitSuccess +} + +// validateRequiredVersion validates docker version. +// Minimum docker version supported is 1.9.0, maps to api version 1.21 +// see https://docs.docker.com/develop/sdk/#api-version-matrix +func (agent *ecsAgent) verifyRequiredDockerVersion() (int, bool) { + supportedVersions := agent.dockerClient.SupportedVersions() + if len(supportedVersions) == 0 { + seelog.Critical("Could not get supported docker versions.") + return exitcodes.ExitError, false + } + + // if api version 1.21 is supported, it means docker version is at least 1.9.0 + for _, version := range supportedVersions { + if version == dockerclient.Version_1_21 { + return -1, true + } + } + + // api 1.21 is not supported, docker version is older than 1.9.0 + seelog.Criticalf("Required minimum docker API verion %s is not supported", + dockerclient.Version_1_21) + return exitcodes.ExitTerminal, false +} + +// getContainerInstanceTagsFromEC2API will retrieve the tags of this instance remotely. +func (agent *ecsAgent) getContainerInstanceTagsFromEC2API() ([]*ecs.Tag, error) { + // Get instance ID from ec2 metadata client. + instanceID, err := agent.ec2MetadataClient.InstanceID() + if err != nil { + return nil, err + } + + return agent.ec2Client.DescribeECSTagsForInstance(instanceID) +} + +// mergeTags will merge the local tags and ec2 tags, for the overlap part, ec2 tags +// will be overridden by local tags. +func mergeTags(localTags []*ecs.Tag, ec2Tags []*ecs.Tag) []*ecs.Tag { + tagsMap := make(map[string]string) + + for _, ec2Tag := range ec2Tags { + tagsMap[aws.StringValue(ec2Tag.Key)] = aws.StringValue(ec2Tag.Value) + } + + for _, localTag := range localTags { + tagsMap[aws.StringValue(localTag.Key)] = aws.StringValue(localTag.Value) + } + + return utils.MapToTags(tagsMap) +} + +// getHostPrivateIPv4AddressFromEC2Metadata will retrieve the PrivateIPAddress (IPv4) of this +// instance throught the EC2 API +func (agent *ecsAgent) getHostPrivateIPv4AddressFromEC2Metadata() string { + // Get instance private IP from ec2 metadata client. + hostPrivateIPv4Address, err := agent.ec2MetadataClient.PrivateIPv4Address() + if err != nil { + seelog.Errorf("Unable to retrieve Host Instance PrivateIPv4 Address: %v", err) + return "" + } + return hostPrivateIPv4Address +} + +// getHostPublicIPv4AddressFromEC2Metadata will retrieve the PublicIPAddress (IPv4) of this +// instance through the EC2 API +func (agent *ecsAgent) getHostPublicIPv4AddressFromEC2Metadata() string { + // Get instance public IP from ec2 metadata client. + hostPublicIPv4Address, err := agent.ec2MetadataClient.PublicIPv4Address() + if err != nil { + seelog.Errorf("Unable to retrieve Host Instance PublicIPv4 Address: %v", err) + return "" + } + return hostPublicIPv4Address +} + +func (agent *ecsAgent) saveMetadata(key, val string) { + err := agent.dataClient.SaveMetadata(key, val) + if err != nil { + seelog.Errorf("Failed to save agent metadata to disk (key: [%s], value: [%s]): %v", key, val, err) + } +} diff --git a/agent/app/agent_capability.go b/agent/app/agent_capability.go new file mode 100644 index 00000000000..69adf24f6c1 --- /dev/null +++ b/agent/app/agent_capability.go @@ -0,0 +1,501 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +const ( + // capabilityPrefix is deprecated. For new capabilities, use attributePrefix. + capabilityPrefix = "com.amazonaws.ecs.capability." + attributePrefix = "ecs.capability." + capabilityTaskIAMRole = "task-iam-role" + capabilityTaskIAMRoleNetHost = "task-iam-role-network-host" + taskENIAttributeSuffix = "task-eni" + taskENIIPv6AttributeSuffix = "task-eni.ipv6" + taskENIBlockInstanceMetadataAttributeSuffix = "task-eni-block-instance-metadata" + appMeshAttributeSuffix = "aws-appmesh" + cniPluginVersionSuffix = "cni-plugin-version" + capabilityTaskCPUMemLimit = "task-cpu-mem-limit" + capabilityDockerPluginInfix = "docker-plugin." + attributeSeparator = "." + capabilityPrivateRegistryAuthASM = "private-registry-authentication.secretsmanager" + capabilitySecretEnvSSM = "secrets.ssm.environment-variables" + capabilitySecretEnvASM = "secrets.asm.environment-variables" + capabilitySecretLogDriverSSM = "secrets.ssm.bootstrap.log-driver" + capabilitySecretLogDriverASM = "secrets.asm.bootstrap.log-driver" + capabiltyPIDAndIPCNamespaceSharing = "pid-ipc-namespace-sharing" + capabilityNvidiaDriverVersionInfix = "nvidia-driver-version." + capabilityECREndpoint = "ecr-endpoint" + capabilityContainerOrdering = "container-ordering" + taskEIAAttributeSuffix = "task-eia" + taskEIAWithOptimizedCPU = "task-eia.optimized-cpu" + taskENITrunkingAttributeSuffix = "task-eni-trunking" + branchCNIPluginVersionSuffix = "branch-cni-plugin-version" + capabilityFirelensFluentd = "firelens.fluentd" + capabilityFirelensFluentbit = "firelens.fluentbit" + capabilityFirelensLoggingDriver = "logging-driver.awsfirelens" + capabilityFirelensConfigFile = "firelens.options.config.file" + capabilityFirelensConfigS3 = "firelens.options.config.s3" + capabilityFullTaskSync = "full-sync" + capabilityGMSA = "gmsa" + capabilityEFS = "efs" + capabilityEFSAuth = "efsAuth" + capabilityEnvFilesS3 = "env-files.s3" + capabilityFSxWindowsFileServer = "fsxWindowsFileServer" + capabilityExec = "execute-command" + capabilityDepsRootDir = "/managed-agents" + capabilityExecBinRelativePath = "bin" + capabilityExecConfigRelativePath = "config" + capabilityExecCertsRelativePath = "certs" +) + +var ( + nameOnlyAttributes = []string{ + // ecs agent version 1.19.0 supports private registry authentication using + // aws secrets manager + capabilityPrivateRegistryAuthASM, + // ecs agent version 1.22.0 supports ecs secrets integrating with aws systems manager + capabilitySecretEnvSSM, + // ecs agent version 1.27.0 supports ecs secrets for logging drivers + capabilitySecretLogDriverSSM, + // support ecr endpoint override + capabilityECREndpoint, + // ecs agent version 1.23.0 supports ecs secrets integrating with aws secrets manager + capabilitySecretEnvASM, + // ecs agent version 1.27.0 supports ecs secrets for logging drivers + capabilitySecretLogDriverASM, + // support container ordering in agent + capabilityContainerOrdering, + // support full task sync + capabilityFullTaskSync, + // ecs agent version 1.39.0 supports bulk loading env vars through environmentFiles in S3 + capabilityEnvFilesS3, + } + capabilityExecRequiredBinaries = []string{ + "amazon-ssm-agent", + "ssm-agent-worker", + "ssm-session-worker", + } + capabilityExecRequiredCerts = []string{ + "tls-ca-bundle.pem", + } + // use empty struct as value type to simulate set + capabilityExecInvalidSsmVersions = map[string]struct{}{} + + pathExists = defaultPathExists + getSubDirectories = defaultGetSubDirectories +) + +// capabilities returns the supported capabilities of this agent / docker-client pair. +// Currently, the following capabilities are possible: +// +// com.amazonaws.ecs.capability.privileged-container +// com.amazonaws.ecs.capability.docker-remote-api.1.17 +// com.amazonaws.ecs.capability.docker-remote-api.1.18 +// com.amazonaws.ecs.capability.docker-remote-api.1.19 +// com.amazonaws.ecs.capability.docker-remote-api.1.20 +// com.amazonaws.ecs.capability.logging-driver.json-file +// com.amazonaws.ecs.capability.logging-driver.syslog +// com.amazonaws.ecs.capability.logging-driver.fluentd +// com.amazonaws.ecs.capability.logging-driver.journald +// com.amazonaws.ecs.capability.logging-driver.gelf +// com.amazonaws.ecs.capability.logging-driver.none +// com.amazonaws.ecs.capability.selinux +// com.amazonaws.ecs.capability.apparmor +// com.amazonaws.ecs.capability.ecr-auth +// com.amazonaws.ecs.capability.task-iam-role +// com.amazonaws.ecs.capability.task-iam-role-network-host +// ecs.capability.docker-volume-driver.${driverName} +// ecs.capability.task-eni +// ecs.capability.task-eni-block-instance-metadata +// ecs.capability.execution-role-ecr-pull +// ecs.capability.execution-role-awslogs +// ecs.capability.container-health-check +// ecs.capability.private-registry-authentication.secretsmanager +// ecs.capability.secrets.ssm.environment-variables +// ecs.capability.secrets.ssm.bootstrap.log-driver +// ecs.capability.pid-ipc-namespace-sharing +// ecs.capability.ecr-endpoint +// ecs.capability.secrets.asm.environment-variables +// ecs.capability.secrets.asm.bootstrap.log-driver +// ecs.capability.aws-appmesh +// ecs.capability.task-eia +// ecs.capability.task-eni-trunking +// ecs.capability.task-eia.optimized-cpu +// ecs.capability.firelens.fluentd +// ecs.capability.firelens.fluentbit +// ecs.capability.efs +// com.amazonaws.ecs.capability.logging-driver.awsfirelens +// ecs.capability.firelens.options.config.file +// ecs.capability.firelens.options.config.s3 +// ecs.capability.full-sync +// ecs.capability.gmsa +// ecs.capability.efsAuth +// ecs.capability.env-files.s3 +// ecs.capability.fsxWindowsFileServer +// ecs.capability.execute-command +func (agent *ecsAgent) capabilities() ([]*ecs.Attribute, error) { + var capabilities []*ecs.Attribute + + for _, cap := range nameOnlyAttributes { + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+cap) + } + + if !agent.cfg.PrivilegedDisabled.Enabled() { + capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"privileged-container") + } + + supportedVersions := make(map[dockerclient.DockerVersion]bool) + // Determine API versions to report as supported. Supported versions are also used for capability-enablement, except + // logging drivers. + for _, version := range agent.dockerClient.SupportedVersions() { + capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"docker-remote-api."+string(version)) + supportedVersions[version] = true + } + + capabilities = agent.appendLoggingDriverCapabilities(capabilities) + + if agent.cfg.SELinuxCapable.Enabled() { + capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"selinux") + } + if agent.cfg.AppArmorCapable.Enabled() { + capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"apparmor") + } + + capabilities = agent.appendTaskIamRoleCapabilities(capabilities, supportedVersions) + + capabilities, err := agent.appendTaskCPUMemLimitCapabilities(capabilities, supportedVersions) + if err != nil { + return nil, err + } + + capabilities = agent.appendTaskENICapabilities(capabilities) + capabilities = agent.appendENITrunkingCapabilities(capabilities) + capabilities = agent.appendDockerDependentCapabilities(capabilities, supportedVersions) + + // TODO: gate this on docker api version when ecs supported docker includes + // credentials endpoint feature from upstream docker + if agent.cfg.OverrideAWSLogsExecutionRole.Enabled() { + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+"execution-role-awslogs") + } + + capabilities = agent.appendVolumeDriverCapabilities(capabilities) + + if agent.cfg.GPUSupportEnabled { + capabilities = agent.appendNvidiaDriverVersionAttribute(capabilities) + } + + // ecs agent version 1.22.0 supports sharing PID namespaces and IPC resource namespaces + // with host EC2 instance and among containers within the task + capabilities = agent.appendPIDAndIPCNamespaceSharingCapabilities(capabilities) + + // ecs agent version 1.26.0 supports aws-appmesh cni plugin + capabilities = agent.appendAppMeshCapabilities(capabilities) + + // support elastic inference in agent + capabilities = agent.appendTaskEIACapabilities(capabilities) + + // support aws router capabilities for fluentd + capabilities = agent.appendFirelensFluentdCapabilities(capabilities) + + // support aws router capabilities for fluentbit + capabilities = agent.appendFirelensFluentbitCapabilities(capabilities) + + // support aws router capabilities for log driver router + capabilities = agent.appendFirelensLoggingDriverCapabilities(capabilities) + + // support efs on ecs capabilities + capabilities = agent.appendEFSCapabilities(capabilities) + + // support external firelens config + capabilities = agent.appendFirelensConfigCapabilities(capabilities) + + // support GMSA capabilities + capabilities = agent.appendGMSACapabilities(capabilities) + + // support efs auth on ecs capabilities + for _, cap := range agent.cfg.VolumePluginCapabilities { + capabilities = agent.appendEFSVolumePluginCapabilities(capabilities, cap) + } + + // support fsxWindowsFileServer on ecs capabilities + capabilities = agent.appendFSxWindowsFileServerCapabilities(capabilities) + // add ecs-exec capabilities if applicable + capabilities, err = agent.appendExecCapabilities(capabilities) + if err != nil { + return nil, err + } + + return capabilities, nil +} + +func (agent *ecsAgent) appendDockerDependentCapabilities(capabilities []*ecs.Attribute, + supportedVersions map[dockerclient.DockerVersion]bool) []*ecs.Attribute { + if _, ok := supportedVersions[dockerclient.Version_1_19]; ok { + capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"ecr-auth") + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+"execution-role-ecr-pull") + } + + if _, ok := supportedVersions[dockerclient.Version_1_24]; ok && !agent.cfg.DisableDockerHealthCheck.Enabled() { + // Docker health check was added in API 1.24 + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+"container-health-check") + } + return capabilities +} + +func (agent *ecsAgent) appendLoggingDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + knownVersions := make(map[dockerclient.DockerVersion]struct{}) + // Determine known API versions. Known versions are used exclusively for logging-driver enablement, since none of + // the structural API elements change. + for _, version := range agent.dockerClient.KnownVersions() { + knownVersions[version] = struct{}{} + } + + for _, loggingDriver := range agent.cfg.AvailableLoggingDrivers { + requiredVersion := dockerclient.LoggingDriverMinimumVersion[loggingDriver] + if _, ok := knownVersions[requiredVersion]; ok { + capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+"logging-driver."+string(loggingDriver)) + } + } + return capabilities +} + +func (agent *ecsAgent) appendTaskIamRoleCapabilities(capabilities []*ecs.Attribute, supportedVersions map[dockerclient.DockerVersion]bool) []*ecs.Attribute { + if agent.cfg.TaskIAMRoleEnabled.Enabled() { + // The "task-iam-role" capability is supported for docker v1.7.x onwards + // Refer https://github.com/docker/docker/blob/master/docs/reference/api/docker_remote_api.md + // to lookup the table of docker supportedVersions to API supportedVersions + if _, ok := supportedVersions[dockerclient.Version_1_19]; ok { + capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+capabilityTaskIAMRole) + } else { + seelog.Warn("Task IAM Role not enabled due to unsuppported Docker version") + } + } + + if agent.cfg.TaskIAMRoleEnabledForNetworkHost { + // The "task-iam-role-network-host" capability is supported for docker v1.7.x onwards + if _, ok := supportedVersions[dockerclient.Version_1_19]; ok { + capabilities = appendNameOnlyAttribute(capabilities, capabilityPrefix+capabilityTaskIAMRoleNetHost) + } else { + seelog.Warn("Task IAM Role for Host Network not enabled due to unsuppported Docker version") + } + } + return capabilities +} + +func (agent *ecsAgent) appendTaskCPUMemLimitCapabilities(capabilities []*ecs.Attribute, supportedVersions map[dockerclient.DockerVersion]bool) ([]*ecs.Attribute, error) { + if agent.cfg.TaskCPUMemLimit.Enabled() { + if _, ok := supportedVersions[dockerclient.Version_1_22]; ok { + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityTaskCPUMemLimit) + } else if agent.cfg.TaskCPUMemLimit.Value == config.ExplicitlyEnabled { + // explicitly enabled -- return an error because we cannot fulfil an explicit request + return nil, errors.New("engine: Task CPU + Mem limit cannot be enabled due to unsupported Docker version") + } else { + // implicitly enabled -- don't register the capability, but degrade gracefully + seelog.Warn("Task CPU + Mem Limit disabled due to unsupported Docker version. API version 1.22 or greater is required.") + agent.cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + } + } + return capabilities, nil +} + +func (agent *ecsAgent) appendTaskENICapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + if agent.cfg.TaskENIEnabled.Enabled() { + // The assumption here is that all of the dependencies for supporting the + // Task ENI in the Agent have already been validated prior to the invocation of + // the `agent.capabilities()` call + capabilities = append(capabilities, &ecs.Attribute{ + Name: aws.String(attributePrefix + taskENIAttributeSuffix), + }) + capabilities = agent.appendIPv6Capability(capabilities) + taskENIVersionAttribute, err := agent.getTaskENIPluginVersionAttribute() + if err != nil { + return capabilities + } + capabilities = append(capabilities, taskENIVersionAttribute) + + // We only care about AWSVPCBlockInstanceMetdata if Task ENI is enabled + if agent.cfg.AWSVPCBlockInstanceMetdata.Enabled() { + // If the Block Instance Metadata flag is set for AWS VPC networking mode, register a capability + // indicating the same + capabilities = append(capabilities, &ecs.Attribute{ + Name: aws.String(attributePrefix + taskENIBlockInstanceMetadataAttributeSuffix), + }) + } + } + + return capabilities +} + +func (agent *ecsAgent) appendExecCapabilities(capabilities []*ecs.Attribute) ([]*ecs.Attribute, error) { + // for an instance to be exec-enabled, it needs resources needed by SSM (binaries, configuration files and certs) + // the following bind mounts are defined in ecs-init and added to the ecs-agent container + + capabilityExecRootDir := filepath.Join(capabilityDepsRootDir, capabilityExec) + binDir := filepath.Join(capabilityExecRootDir, capabilityExecBinRelativePath) + configDir := filepath.Join(capabilityExecRootDir, capabilityExecConfigRelativePath) + certsDir := filepath.Join(capabilityExecRootDir, capabilityExecCertsRelativePath) + + // top-level folders, /bin, /config, /certs + dependencies := map[string][]string{ + binDir: []string{}, + configDir: []string{}, + certsDir: capabilityExecRequiredCerts, + } + if exists, err := dependenciesExist(dependencies); err != nil || !exists { + return capabilities, err + } + + // ssm binaries are stored in /bin//, 1 version is downloaded by ami builder for ECS instances + binDependencies := map[string][]string{} + // child folders named by version inside binDir, e.g. 3.0.236.0 + binFolders, err := getSubDirectories(binDir) + if err != nil { + return capabilities, err + } + // use raw string for regular expression to avoid escaping backslash (\) + var validSsmVersion = regexp.MustCompile(`^\d+(\.\d+)*$`) + for _, binFolder := range binFolders { + if matched := validSsmVersion.Match([]byte(binFolder)); !matched { + continue + } + if _, found := capabilityExecInvalidSsmVersions[binFolder]; found { + continue + } + + // check for the same set of binaries for all versions for now + // change this if future versions of ssm agent require different binaries + versionSubDirectory := filepath.Join(binDir, binFolder) + binDependencies[versionSubDirectory] = capabilityExecRequiredBinaries + } + if len(binDependencies) < 1 { + return capabilities, nil + } + if exists, err := checkAnyValidDependency(binDependencies); err != nil || !exists { + return capabilities, err + } + + return appendNameOnlyAttribute(capabilities, attributePrefix+capabilityExec), nil +} + +func defaultGetSubDirectories(path string) ([]string, error) { + var subDirectories []string + + fileInfos, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + for _, fileInfo := range fileInfos { + if fileInfo.IsDir() { + subDirectories = append(subDirectories, fileInfo.Name()) + } + } + return subDirectories, nil +} + +func dependenciesExist(dependencies map[string][]string) (bool, error) { + for directory, files := range dependencies { + if exists, err := pathExists(directory, true); err != nil || !exists { + return false, err + } + + for _, filename := range files { + path := filepath.Join(directory, filename) + if exists, err := pathExists(path, false); err != nil || !exists { + return false, err + } + } + } + return true, nil +} + +func checkAnyValidDependency(dependencies map[string][]string) (bool, error) { + var validDependencies = 0 + for directory, files := range dependencies { + filesValid := true + if exists, err := pathExists(directory, true); err != nil || !exists { + continue + } + + for _, filename := range files { + path := filepath.Join(directory, filename) + if exists, err := pathExists(path, false); err != nil || !exists { + filesValid = false + } + } + + if filesValid { + validDependencies++ + } + } + if validDependencies >= 1 { + return true, nil + } + return false, fmt.Errorf("no valid dependencies") +} + +func defaultPathExists(path string, shouldBeDirectory bool) (bool, error) { + fileInfo, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + isDirectory := fileInfo.IsDir() + return (isDirectory && shouldBeDirectory) || (!isDirectory && !shouldBeDirectory), nil +} + +// getTaskENIPluginVersionAttribute returns the version information of the ECS +// CNI plugins. It just executes the ENI plugin as the assumption is that these +// plugins are packaged with the ECS Agent, which means all of the other plugins +// should also emit the same version information. Also, the version information +// doesn't contribute to placement decisions and just serves as additional +// debugging information +func (agent *ecsAgent) getTaskENIPluginVersionAttribute() (*ecs.Attribute, error) { + version, err := agent.cniClient.Version(ecscni.ECSENIPluginName) + if err != nil { + seelog.Warnf( + "Unable to determine the version of the plugin '%s': %v", + ecscni.ECSENIPluginName, err) + return nil, err + } + + return &ecs.Attribute{ + Name: aws.String(attributePrefix + cniPluginVersionSuffix), + Value: aws.String(version), + }, nil +} + +func appendNameOnlyAttribute(attributes []*ecs.Attribute, name string) []*ecs.Attribute { + return append(attributes, &ecs.Attribute{ + Name: aws.String(name), + }) +} diff --git a/agent/app/agent_capability_test.go b/agent/app/agent_capability_test.go new file mode 100644 index 00000000000..55b0baa9a82 --- /dev/null +++ b/agent/app/agent_capability_test.go @@ -0,0 +1,999 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "errors" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + + app_mocks "github.com/aws/amazon-ecs-agent/agent/app/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + mock_ecscni "github.com/aws/amazon-ecs-agent/agent/ecscni/mocks" + mock_pause "github.com/aws/amazon-ecs-agent/agent/eni/pause/mocks" + mock_mobypkgwrapper "github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper/mocks" + + "github.com/aws/aws-sdk-go/aws" + aws_credentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testTempDirPrefix = "agent-capability-test-" +) + +func init() { + mockPathExists(false) +} + +func mockPathExists(shouldExist bool) { + pathExists = func(path string, shouldBeDirectory bool) (bool, error) { + return shouldExist, nil + } +} + +func TestCapabilities(t *testing.T) { + mockPathExists(true) + defer mockPathExists(false) + getSubDirectories = func(path string) ([]string, error) { + // appendExecCapabilities() requires at least 1 version to exist + return []string{"3.0.236.0"}, nil + } + defer func() { + getSubDirectories = defaultGetSubDirectories + }() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + AvailableLoggingDrivers: []dockerclient.LoggingDriver{ + dockerclient.JSONFileDriver, + dockerclient.SyslogDriver, + dockerclient.JournaldDriver, + dockerclient.GelfDriver, + dockerclient.FluentdDriver, + }, + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + SELinuxCapable: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + AppArmorCapable: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskENIEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + AWSVPCBlockInstanceMetdata: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskCleanupWaitDuration: config.DefaultConfig().TaskCleanupWaitDuration, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + // Scan() and ListPluginsWithFilters() are tested with + // AnyTimes() because they are not called in windows. + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + dockerclient.Version_1_19, + }), + cniClient.EXPECT().Version(ecscni.ECSENIPluginName).Return("v1", nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedNameOnlyCapabilities := []string{ + capabilityPrefix + "privileged-container", + capabilityPrefix + "docker-remote-api.1.17", + capabilityPrefix + "docker-remote-api.1.18", + capabilityPrefix + "logging-driver.json-file", + capabilityPrefix + "logging-driver.syslog", + capabilityPrefix + "logging-driver.journald", + capabilityPrefix + "selinux", + capabilityPrefix + "apparmor", + attributePrefix + "docker-plugin.local", + attributePrefix + taskENIAttributeSuffix, + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + attributePrefix + capabilityECREndpoint, + attributePrefix + capabilitySecretEnvASM, + attributePrefix + capabilitySecretLogDriverASM, + attributePrefix + capabilityContainerOrdering, + attributePrefix + capabilityFullTaskSync, + attributePrefix + capabilityEnvFilesS3, + attributePrefix + taskENIBlockInstanceMetadataAttributeSuffix, + attributePrefix + capabilityExec, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedNameOnlyCapabilities { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{ + { + Name: aws.String(attributePrefix + cniPluginVersionSuffix), + Value: aws.String("v1"), + }, + }...) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + cniClient: cniClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestCapabilitiesECR(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + conf := &config.Config{} + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client := mock_dockerapi.NewMockDockerClient(ctrl) + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_19, + }) + client.EXPECT().KnownVersions().Return(nil) + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil) + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + pauseLoader: mockPauseLoader, + dockerClient: client, + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + _, ok := capMap["com.amazonaws.ecs.capability.ecr-auth"] + assert.True(t, ok, "Could not find ECR capability when expected; got capabilities %v", capabilities) + + _, ok = capMap["ecs.capability.execution-role-ecr-pull"] + assert.True(t, ok, "Could not find ECR execution pull capability when expected; got capabilities %v", capabilities) +} + +func TestCapabilitiesTaskIAMRoleForSupportedDockerVersion(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conf := &config.Config{ + TaskIAMRoleEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client := mock_dockerapi.NewMockDockerClient(ctrl) + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_19, + }) + client.EXPECT().KnownVersions().Return(nil) + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil) + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + ok := capMap["com.amazonaws.ecs.capability.task-iam-role"] + assert.True(t, ok, "Could not find iam capability when expected; got capabilities %v", capabilities) +} + +func TestCapabilitiesTaskIAMRoleForUnSupportedDockerVersion(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + conf := &config.Config{ + TaskIAMRoleEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_18, + }) + client.EXPECT().KnownVersions().Return(nil) + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil) + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + _, ok := capMap["com.amazonaws.ecs.capability.task-iam-role"] + assert.False(t, ok, "task-iam-role capability set for unsupported docker version") +} + +func TestCapabilitiesTaskIAMRoleNetworkHostForSupportedDockerVersion(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + conf := &config.Config{ + TaskIAMRoleEnabledForNetworkHost: true, + } + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_19, + }) + client.EXPECT().KnownVersions().Return(nil) + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil) + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + _, ok := capMap["com.amazonaws.ecs.capability.task-iam-role-network-host"] + assert.True(t, ok, "Could not find iam capability when expected; got capabilities %v", capabilities) +} + +func TestCapabilitiesTaskIAMRoleNetworkHostForUnSupportedDockerVersion(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + conf := &config.Config{ + TaskIAMRoleEnabledForNetworkHost: true, + } + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_18, + }) + client.EXPECT().KnownVersions().Return(nil) + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil) + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + _, ok := capMap["com.amazonaws.ecs.capability.task-iam-role-network-host"] + assert.False(t, ok, "task-iam-role capability set for unsupported docker version") +} + +func TestAWSVPCBlockInstanceMetadataWhenTaskENIIsDisabled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + AvailableLoggingDrivers: []dockerclient.LoggingDriver{ + dockerclient.JSONFileDriver, + }, + TaskENIEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + AWSVPCBlockInstanceMetdata: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + dockerclient.Version_1_19, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "privileged-container", + capabilityPrefix + "docker-remote-api.1.17", + capabilityPrefix + "docker-remote-api.1.18", + capabilityPrefix + "logging-driver.json-file", + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + cniClient: cniClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } + + for _, capability := range capabilities { + if aws.StringValue(capability.Name) == "ecs.capability.task-eni-block-instance-metadata" { + t.Errorf("%s capability found when Task ENI is disabled in the config", taskENIBlockInstanceMetadataAttributeSuffix) + } + } +} + +func TestCapabilitiesExecutionRoleAWSLogs(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + conf := &config.Config{ + OverrideAWSLogsExecutionRole: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskENIEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }) + client.EXPECT().KnownVersions().Return(nil) + cniClient.EXPECT().Version(ecscni.ECSENIPluginName).Return("v1", errors.New("some error happened")) + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil) + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + cniClient: cniClient, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + require.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + _, ok := capMap["ecs.capability.execution-role-awslogs"] + assert.True(t, ok, "Could not find AWSLogs execution role capability when expected; got capabilities %v", capabilities) +} + +func TestCapabilitiesTaskResourceLimit(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + conf := &config.Config{TaskCPUMemLimit: config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled}} + + client := mock_dockerapi.NewMockDockerClient(ctrl) + versionList := []dockerclient.DockerVersion{dockerclient.Version_1_22} + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + gomock.InOrder( + client.EXPECT().SupportedVersions().Return(versionList), + client.EXPECT().KnownVersions().Return(versionList), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + expectedCapability := attributePrefix + capabilityTaskCPUMemLimit + + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + _, ok := capMap[expectedCapability] + assert.True(t, ok, "Should contain: "+expectedCapability) + assert.True(t, agent.cfg.TaskCPUMemLimit.Enabled(), "TaskCPUMemLimit should remain true") +} + +func TestCapabilitesTaskResourceLimitDisabledByMissingDockerVersion(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + conf := &config.Config{TaskCPUMemLimit: config.BooleanDefaultTrue{Value: config.NotSet}} + + client := mock_dockerapi.NewMockDockerClient(ctrl) + versionList := []dockerclient.DockerVersion{dockerclient.Version_1_19} + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + gomock.InOrder( + client.EXPECT().SupportedVersions().Return(versionList), + client.EXPECT().KnownVersions().Return(versionList), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + unexpectedCapability := attributePrefix + capabilityTaskCPUMemLimit + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + _, ok := capMap[unexpectedCapability] + + assert.False(t, ok, "Docker 1.22 is required for task resource limits. Should be disabled") + assert.False(t, conf.TaskCPUMemLimit.Enabled(), "TaskCPUMemLimit should be made false when we can't find the right docker.") +} + +func TestCapabilitesTaskResourceLimitErrorCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + conf := &config.Config{TaskCPUMemLimit: config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled}} + + client := mock_dockerapi.NewMockDockerClient(ctrl) + versionList := []dockerclient.DockerVersion{dockerclient.Version_1_19} + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + gomock.InOrder( + client.EXPECT().SupportedVersions().Return(versionList), + client.EXPECT().KnownVersions().Return(versionList), + ) + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + pauseLoader: mockPauseLoader, + dockerClient: client, + } + + capabilities, err := agent.capabilities() + assert.Nil(t, capabilities) + assert.Error(t, err, "An error should be thrown when TaskCPUMemLimit is explicitly enabled") +} + +func TestCapabilitiesContainerHealth(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_24, + }) + client.EXPECT().KnownVersions().Return(nil) + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil) + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &config.Config{}, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + require.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + _, ok := capMap["ecs.capability.container-health-check"] + assert.True(t, ok, "Could not find container health check capability when expected; got capabilities %v", capabilities) +} + +func TestCapabilitiesContainerHealthDisabled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_24, + }) + client.EXPECT().KnownVersions().Return(nil) + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil) + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &config.Config{DisableDockerHealthCheck: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}}, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + require.NoError(t, err) + + capMap := make(map[string]bool) + for _, capability := range capabilities { + capMap[aws.StringValue(capability.Name)] = true + } + + assert.NotContains(t, "ecs.capability.container-health-check", "Find container health check capability unexpected when it is disabled") +} + +func TestCapabilitesListPluginsErrorCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client := mock_dockerapi.NewMockDockerClient(ctrl) + versionList := []dockerclient.DockerVersion{dockerclient.Version_1_19} + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + gomock.InOrder( + client.EXPECT().SupportedVersions().Return(versionList), + client.EXPECT().KnownVersions().Return(versionList), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return(nil, errors.New("listPlugins error happened")), + ) + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &config.Config{}, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + require.NoError(t, err) + + for _, capability := range capabilities { + if strings.HasPrefix(aws.StringValue(capability.Name), "ecs.capability.docker-volume-driver") { + assert.Equal(t, aws.StringValue(capability.Name), "ecs.capability.docker-volume-driver.local") + } + } +} + +func TestCapabilitesScanPluginsErrorCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + client := mock_dockerapi.NewMockDockerClient(ctrl) + versionList := []dockerclient.DockerVersion{dockerclient.Version_1_19} + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + gomock.InOrder( + client.EXPECT().SupportedVersions().Return(versionList), + client.EXPECT().KnownVersions().Return(versionList), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return(nil, errors.New("Scan plugins error happened")), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &config.Config{}, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + require.NoError(t, err) + + for _, capability := range capabilities { + if strings.HasPrefix(aws.StringValue(capability.Name), "ecs.capability.docker-volume-driver") { + assert.Equal(t, aws.StringValue(capability.Name), "ecs.capability.docker-volume-driver.local") + } + } +} + +func TestCapabilitiesExecuteCommand(t *testing.T) { + execCapability := ecs.Attribute{ + Name: aws.String(attributePrefix + capabilityExec), + } + testCases := []struct { + name string + pathExists func(string, bool) (bool, error) + getSubDirectories func(path string) ([]string, error) + invalidSsmVersions map[string]struct{} + shouldHaveExecCapability bool + }{ + { + name: "execute-command capability should not be added if any required file is not found", + pathExists: func(path string, shouldBeDirectory bool) (bool, error) { return false, nil }, + getSubDirectories: func(path string) ([]string, error) { return []string{"3.0.236.0"}, nil }, + shouldHaveExecCapability: false, + }, + { + name: "execute-command capability should not be added if no ssm versions are found", + pathExists: func(path string, shouldBeDirectory bool) (bool, error) { return true, nil }, + getSubDirectories: func(path string) ([]string, error) { return nil, nil }, + shouldHaveExecCapability: false, + }, + { + name: "execute-command capability should not be added if no valid ssm versions are found", + pathExists: func(path string, shouldBeDirectory bool) (bool, error) { return true, nil }, + getSubDirectories: func(path string) ([]string, error) { return []string{"2.0.0.0", "some_folder"}, nil }, + invalidSsmVersions: map[string]struct{}{"2.0.0.0": struct{}{}}, + shouldHaveExecCapability: false, + }, + { + name: "execute-command capability should not be added if there are directroies exist but have no valid ssm version exist", + pathExists: func(path string, shouldBeDirectory bool) (bool, error) { return true, nil }, + getSubDirectories: func(path string) ([]string, error) { return []string{"3.0.236.0", "3.1.23.0"}, nil }, + invalidSsmVersions: map[string]struct{}{"3.0.236.0": struct{}{}, "3.1.23.0": struct{}{}}, + shouldHaveExecCapability: false, + }, + { + name: "execute-command capability should be added if requirements are met", + pathExists: func(path string, shouldBeDirectory bool) (bool, error) { return true, nil }, + getSubDirectories: func(path string) ([]string, error) { return []string{"3.0.236.0"}, nil }, + shouldHaveExecCapability: true, + }, + { + name: "execute-command capability should be added if have valid ssm version exists", + pathExists: func(path string, shouldBeDirectory bool) (bool, error) { return true, nil }, + getSubDirectories: func(path string) ([]string, error) { return []string{"3.0.236.0", "3.1.23.0"}, nil }, + shouldHaveExecCapability: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pathExists = tc.pathExists + getSubDirectories = tc.getSubDirectories + oCapabilityExecInvalidSsmVersions := capabilityExecInvalidSsmVersions + capabilityExecInvalidSsmVersions = tc.invalidSsmVersions + defer func() { + mockPathExists(false) + getSubDirectories = defaultGetSubDirectories + capabilityExecInvalidSsmVersions = oCapabilityExecInvalidSsmVersions + }() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + client := mock_dockerapi.NewMockDockerClient(ctrl) + versionList := []dockerclient.DockerVersion{dockerclient.Version_1_19} + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + gomock.InOrder( + client.EXPECT().SupportedVersions().Return(versionList), + client.EXPECT().KnownVersions().Return(versionList), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return(nil, errors.New("Scan plugins error happened")), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &config.Config{}, + dockerClient: client, + pauseLoader: mockPauseLoader, + mobyPlugins: mockMobyPlugins, + } + + capabilities, err := agent.capabilities() + if err != nil { + t.Fatal(err) + } + + if tc.shouldHaveExecCapability { + assert.Contains(t, capabilities, &execCapability) + } else { + assert.NotContains(t, capabilities, &execCapability) + } + }) + } +} + +func TestDefaultGetSubDirectories(t *testing.T) { + rootDir, err := ioutil.TempDir(os.TempDir(), testTempDirPrefix) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + subDir, err := ioutil.TempDir(rootDir, "dir") + if err != nil { + t.Fatal(err) + } + _, err = ioutil.TempFile(rootDir, "file") + if err != nil { + t.Fatal(err) + } + notExistingPath := filepath.Join(rootDir, "not-existing") + + testCases := []struct { + name string + path string + expectedResult []string + shouldFail bool + }{ + { + name: "return names of child folders if path exists", + path: rootDir, + expectedResult: []string{filepath.Base(subDir)}, + shouldFail: false, + }, + { + name: "return error if path does not exist", + path: notExistingPath, + expectedResult: nil, + shouldFail: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + subDirectories, err := defaultGetSubDirectories(tc.path) + if tc.shouldFail { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // actual result should have the same elements, don't need to be in same order + assert.Subset(t, tc.expectedResult, subDirectories) + assert.Subset(t, subDirectories, tc.expectedResult) + } + }) + } +} + +func TestDefaultPathExistsd(t *testing.T) { + rootDir, err := ioutil.TempDir(os.TempDir(), testTempDirPrefix) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + file, err := ioutil.TempFile(rootDir, "file") + if err != nil { + t.Fatal(err) + } + notExistingPath := filepath.Join(rootDir, "not-existing") + testCases := []struct { + name string + path string + shouldBeDirectory bool + expected bool + }{ + { + name: "return false if directory does not exist", + path: notExistingPath, + shouldBeDirectory: true, + expected: false, + }, + { + name: "return false if false does not exist", + path: notExistingPath, + shouldBeDirectory: false, + expected: false, + }, + { + name: "if directory exists, return shouldBeDirectory", + path: rootDir, + shouldBeDirectory: true, + expected: true, + }, + { + name: "if directory exists, return shouldBeDirectory", + path: rootDir, + shouldBeDirectory: false, + expected: false, + }, + { + name: "if file exists, return !shouldBeDirectory", + path: file.Name(), + shouldBeDirectory: false, + expected: true, + }, + { + name: "if file exists, return !shouldBeDirectory", + path: file.Name(), + shouldBeDirectory: true, + expected: false, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := defaultPathExists(tc.path, tc.shouldBeDirectory) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, result, tc.expected) + }) + } +} diff --git a/agent/app/agent_capability_unix.go b/agent/app/agent_capability_unix.go new file mode 100644 index 00000000000..9effa91a872 --- /dev/null +++ b/agent/app/agent_capability_unix.go @@ -0,0 +1,188 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "strings" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" +) + +const ( + AVX = "avx" + AVX2 = "avx2" + SSE41 = "sse4_1" + SSE42 = "sse4_2" + CpuInfoPath = "/proc/cpuinfo" +) + +func (agent *ecsAgent) appendVolumeDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + // "local" is default docker driver + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityDockerPluginInfix+volume.DockerLocalVolumeDriver) + + // for non-standardized plugins, call docker pkg's plugins.Scan() + nonStandardizedPlugins, err := agent.mobyPlugins.Scan() + if err != nil { + seelog.Warnf("Scanning plugins failed: %v", err) + // do not return yet, we need the list of plugins below. range handles nil slice. + } + + for _, pluginName := range nonStandardizedPlugins { + // Replace the ':' to '.' in the plugin name for attributes + capabilities = appendNameOnlyAttribute(capabilities, + attributePrefix+capabilityDockerPluginInfix+strings.Replace(pluginName, config.DockerTagSeparator, attributeSeparator, -1)) + } + + // for standardized plugins, call docker's plugin ls API + pluginEnabled := true + volumeDriverType := []string{dockerapi.VolumeDriverType} + standardizedPlugins, err := agent.dockerClient.ListPluginsWithFilters(agent.ctx, pluginEnabled, volumeDriverType, dockerclient.ListPluginsTimeout) + if err != nil { + seelog.Warnf("Listing plugins with filters enabled=%t, capabilities=%v failed: %v", pluginEnabled, volumeDriverType, err) + return capabilities + } + + // For plugin with default tag latest, register two attributes with and without the latest tag + // as the tag is optional and can be added by docker or customer + for _, pluginName := range standardizedPlugins { + names := strings.Split(pluginName, config.DockerTagSeparator) + if len(names) > 1 && names[len(names)-1] == config.DefaultDockerTag { + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityDockerPluginInfix+strings.Join(names[:len(names)-1], attributeSeparator)) + } + + capabilities = appendNameOnlyAttribute(capabilities, + attributePrefix+capabilityDockerPluginInfix+strings.Replace(pluginName, config.DockerTagSeparator, attributeSeparator, -1)) + } + return capabilities +} + +func (agent *ecsAgent) appendNvidiaDriverVersionAttribute(capabilities []*ecs.Attribute) []*ecs.Attribute { + if agent.resourceFields != nil && agent.resourceFields.NvidiaGPUManager != nil { + driverVersion := agent.resourceFields.NvidiaGPUManager.GetDriverVersion() + if driverVersion != "" { + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityNvidiaDriverVersionInfix+driverVersion) + } + } + return capabilities +} + +func (agent *ecsAgent) appendENITrunkingCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + if !agent.cfg.ENITrunkingEnabled.Enabled() { + return capabilities + } + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+taskENITrunkingAttributeSuffix) + return agent.appendBranchENIPluginVersionAttribute(capabilities) +} + +func (agent *ecsAgent) appendBranchENIPluginVersionAttribute(capabilities []*ecs.Attribute) []*ecs.Attribute { + version, err := agent.cniClient.Version(ecscni.ECSBranchENIPluginName) + if err != nil { + seelog.Warnf( + "Unable to determine the version of the plugin '%s': %v", + ecscni.ECSBranchENIPluginName, err) + return capabilities + } + + return append(capabilities, &ecs.Attribute{ + Name: aws.String(attributePrefix + branchCNIPluginVersionSuffix), + Value: aws.String(version), + }) +} + +func (agent *ecsAgent) appendPIDAndIPCNamespaceSharingCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + isLoaded, err := agent.pauseLoader.IsLoaded(agent.dockerClient) + if !isLoaded || err != nil { + seelog.Warnf("Pause container is not loaded, did not append PID and IPC capabilities: %v", err) + return capabilities + } + return appendNameOnlyAttribute(capabilities, attributePrefix+capabiltyPIDAndIPCNamespaceSharing) +} + +func (agent *ecsAgent) appendAppMeshCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return appendNameOnlyAttribute(capabilities, attributePrefix+appMeshAttributeSuffix) +} + +func (agent *ecsAgent) appendTaskEIACapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+taskEIAAttributeSuffix) + + eiaRequiredFlags := []string{AVX, AVX2, SSE41, SSE42} + cpuInfo, err := utils.ReadCPUInfo(CpuInfoPath) + if err != nil { + seelog.Warnf("Unable to read cpuinfo: %v", err) + return capabilities + } + + flagMap := utils.GetCPUFlags(cpuInfo) + missingFlags := []string{} + for _, requiredFlag := range eiaRequiredFlags { + if _, ok := flagMap[requiredFlag]; !ok { + missingFlags = append(missingFlags, requiredFlag) + } + } + + if len(missingFlags) > 0 { + seelog.Infof("Missing cpu flags for EIA support: %v", strings.Join(missingFlags, ",")) + return capabilities + } + + return appendNameOnlyAttribute(capabilities, attributePrefix+taskEIAWithOptimizedCPU) +} + +func (agent *ecsAgent) appendFirelensFluentdCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return appendNameOnlyAttribute(capabilities, attributePrefix+capabilityFirelensFluentd) +} + +func (agent *ecsAgent) appendFirelensFluentbitCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return appendNameOnlyAttribute(capabilities, attributePrefix+capabilityFirelensFluentbit) +} + +func (agent *ecsAgent) appendEFSCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return appendNameOnlyAttribute(capabilities, attributePrefix+capabilityEFS) +} + +func (agent *ecsAgent) appendEFSVolumePluginCapabilities(capabilities []*ecs.Attribute, pluginCapability string) []*ecs.Attribute { + return appendNameOnlyAttribute(capabilities, attributePrefix+pluginCapability) +} + +func (agent *ecsAgent) appendFirelensLoggingDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return appendNameOnlyAttribute(capabilities, capabilityPrefix+capabilityFirelensLoggingDriver) +} + +func (agent *ecsAgent) appendFirelensConfigCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityFirelensConfigFile) + return appendNameOnlyAttribute(capabilities, attributePrefix+capabilityFirelensConfigS3) +} + +func (agent *ecsAgent) appendGMSACapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendIPv6Capability(capabilities []*ecs.Attribute) []*ecs.Attribute { + return appendNameOnlyAttribute(capabilities, attributePrefix+taskENIIPv6AttributeSuffix) +} + +func (agent *ecsAgent) appendFSxWindowsFileServerCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} diff --git a/agent/app/agent_capability_unix_test.go b/agent/app/agent_capability_unix_test.go new file mode 100644 index 00000000000..53c822fd95f --- /dev/null +++ b/agent/app/agent_capability_unix_test.go @@ -0,0 +1,872 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "errors" + "os" + "path/filepath" + "testing" + + mock_pause "github.com/aws/amazon-ecs-agent/agent/eni/pause/mocks" + + app_mocks "github.com/aws/amazon-ecs-agent/agent/app/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + mock_ecscni "github.com/aws/amazon-ecs-agent/agent/ecscni/mocks" + "github.com/aws/amazon-ecs-agent/agent/gpu" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/utils" + mock_mobypkgwrapper "github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper/mocks" + "github.com/aws/aws-sdk-go/aws" + aws_credentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func init() { + mockPathExists(false) +} + +func TestVolumeDriverCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + AvailableLoggingDrivers: []dockerclient.LoggingDriver{ + dockerclient.JSONFileDriver, + dockerclient.SyslogDriver, + dockerclient.JournaldDriver, + dockerclient.GelfDriver, + dockerclient.FluentdDriver, + }, + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + SELinuxCapable: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + AppArmorCapable: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskENIEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + AWSVPCBlockInstanceMetdata: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskCleanupWaitDuration: config.DefaultConfig().TaskCleanupWaitDuration, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + dockerclient.Version_1_19, + }), + cniClient.EXPECT().Version(ecscni.ECSENIPluginName).Return("v1", nil), + mockMobyPlugins.EXPECT().Scan().Return([]string{"fancyvolumedriver"}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return( + []string{"coolvolumedriver", "volumedriver:latest"}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "privileged-container", + capabilityPrefix + "docker-remote-api.1.17", + capabilityPrefix + "docker-remote-api.1.18", + capabilityPrefix + "logging-driver.json-file", + capabilityPrefix + "logging-driver.syslog", + capabilityPrefix + "logging-driver.journald", + capabilityPrefix + "selinux", + capabilityPrefix + "apparmor", + attributePrefix + "docker-plugin.local", + attributePrefix + "docker-plugin.fancyvolumedriver", + attributePrefix + "docker-plugin.coolvolumedriver", + attributePrefix + "docker-plugin.volumedriver", + attributePrefix + "docker-plugin.volumedriver.latest", + attributePrefix + taskENIBlockInstanceMetadataAttributeSuffix, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{ + { + Name: aws.String(attributePrefix + cniPluginVersionSuffix), + Value: aws.String("v1"), + }, + }...) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + cniClient: cniClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestNvidiaDriverCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + GPUSupportEnabled: true, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "docker-remote-api.1.17", + attributePrefix + "docker-plugin.local", + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + // nvidia driver version capability + attributePrefix + "nvidia-driver-version.396.44", + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + resourceFields: &taskresource.ResourceFields{ + NvidiaGPUManager: &gpu.NvidiaGPUManager{ + DriverVersion: "396.44", + }, + }, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestEmptyNvidiaDriverCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + GPUSupportEnabled: true, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "docker-remote-api.1.17", + attributePrefix + "docker-plugin.local", + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + resourceFields: &taskresource.ResourceFields{ + NvidiaGPUManager: &gpu.NvidiaGPUManager{ + DriverVersion: "", + }, + }, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestENITrunkingCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskENIEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + ENITrunkingEnabled: config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled}, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + cniClient.EXPECT().Version(ecscni.ECSENIPluginName).Return("v1", nil), + cniClient.EXPECT().Version(ecscni.ECSBranchENIPluginName).Return("v2", nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "docker-remote-api.1.17", + attributePrefix + "docker-plugin.local", + attributePrefix + taskENIAttributeSuffix, + attributePrefix + taskENIIPv6AttributeSuffix, + attributePrefix + taskENITrunkingAttributeSuffix, + attributePrefix + taskENITrunkingAttributeSuffix, + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{ + // linux specific capabilities + { + Name: aws.String(attributePrefix + cniPluginVersionSuffix), + Value: aws.String("v1"), + }, + { + Name: aws.String(attributePrefix + branchCNIPluginVersionSuffix), + Value: aws.String("v2"), + }, + }...) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + cniClient: cniClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } + +} + +func TestNoENITrunkingCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskENIEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + ENITrunkingEnabled: config.BooleanDefaultTrue{Value: config.ExplicitlyDisabled}, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + cniClient.EXPECT().Version(ecscni.ECSENIPluginName).Return("v1", nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "docker-remote-api.1.17", + attributePrefix + "docker-plugin.local", + attributePrefix + taskENIAttributeSuffix, + attributePrefix + taskENIIPv6AttributeSuffix, + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + } + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{ + // linux specific capabilities + { + Name: aws.String(attributePrefix + cniPluginVersionSuffix), + Value: aws.String("v1"), + }, + }...) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + cniClient: cniClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestPIDAndIPCNamespaceSharingCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "docker-remote-api.1.17", + attributePrefix + "docker-plugin.local", + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + attributePrefix + capabilityECREndpoint, + attributePrefix + capabilitySecretEnvASM, + attributePrefix + capabilitySecretLogDriverASM, + attributePrefix + capabilityContainerOrdering, + attributePrefix + capabilityFullTaskSync, + attributePrefix + capabilityEnvFilesS3, + attributePrefix + capabiltyPIDAndIPCNamespaceSharing, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestPIDAndIPCNamespaceSharingCapabilitiesNoPauseContainer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, errors.New("mock error")) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "docker-remote-api.1.17", + attributePrefix + "docker-plugin.local", + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + attributePrefix + capabilityECREndpoint, + attributePrefix + capabilitySecretEnvASM, + attributePrefix + capabilitySecretLogDriverASM, + attributePrefix + capabilityContainerOrdering, + attributePrefix + capabilityFullTaskSync, + attributePrefix + capabilityEnvFilesS3, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestAppMeshCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "docker-remote-api.1.17", + attributePrefix + "docker-plugin.local", + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + attributePrefix + capabilityECREndpoint, + attributePrefix + capabilitySecretEnvASM, + attributePrefix + capabilitySecretLogDriverASM, + attributePrefix + capabilityContainerOrdering, + attributePrefix + capabilityFullTaskSync, + attributePrefix + capabilityEnvFilesS3, + attributePrefix + capabiltyPIDAndIPCNamespaceSharing, + attributePrefix + appMeshAttributeSuffix, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestTaskEIACapabilitiesNoOptimizedCPU(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + utils.OpenFile = func(path string) (*os.File, error) { + return os.Open(filepath.Join(".", "testdata", "test_cpu_info_fail")) + } + defer resetOpenFile() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + assert.Contains(t, capabilities, &ecs.Attribute{Name: aws.String(attributePrefix + taskEIAAttributeSuffix)}) + assert.NotContains(t, capabilities, &ecs.Attribute{Name: aws.String(attributePrefix + taskEIAWithOptimizedCPU)}) +} + +func TestTaskEIACapabilitiesWithOptimizedCPU(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + + utils.OpenFile = func(path string) (*os.File, error) { + return os.Open(filepath.Join(".", "testdata", "test_cpu_info")) + } + defer resetOpenFile() + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + assert.Contains(t, capabilities, &ecs.Attribute{Name: aws.String(attributePrefix + taskEIAWithOptimizedCPU)}) +} + +func resetOpenFile() { + utils.OpenFile = os.Open +} + +func TestCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + VolumePluginCapabilities: []string{capabilityEFSAuth}, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "docker-remote-api.1.17", + attributePrefix + "docker-plugin.local", + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + attributePrefix + capabilityECREndpoint, + attributePrefix + capabilitySecretEnvASM, + attributePrefix + capabilitySecretLogDriverASM, + attributePrefix + capabilityContainerOrdering, + attributePrefix + capabiltyPIDAndIPCNamespaceSharing, + attributePrefix + appMeshAttributeSuffix, + attributePrefix + taskEIAAttributeSuffix, + attributePrefix + capabilityFirelensFluentd, + attributePrefix + capabilityFirelensFluentbit, + attributePrefix + capabilityEFS, + attributePrefix + capabilityEFSAuth, + capabilityPrefix + capabilityFirelensLoggingDriver, + attributePrefix + capabilityEnvFilesS3, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestFirelensConfigCapabilitiesUnix(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + conf := &config.Config{ + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil) + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + }), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + client.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + ) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + assert.Contains(t, capabilities, &ecs.Attribute{Name: aws.String(attributePrefix + capabilityFirelensConfigFile)}) + assert.Contains(t, capabilities, &ecs.Attribute{Name: aws.String(attributePrefix + capabilityFirelensConfigS3)}) +} + +func TestAppendGMSACapabilities(t *testing.T) { + var inputCapabilities []*ecs.Attribute + + agent := &ecsAgent{} + + capabilities := agent.appendGMSACapabilities(inputCapabilities) + assert.Equal(t, len(inputCapabilities), len(capabilities)) + assert.EqualValues(t, capabilities, inputCapabilities) +} + +func TestAppendFSxWindowsFileServerCapabilities(t *testing.T) { + var inputCapabilities []*ecs.Attribute + + agent := &ecsAgent{} + + capabilities := agent.appendFSxWindowsFileServerCapabilities(inputCapabilities) + assert.Equal(t, len(inputCapabilities), len(capabilities)) + assert.EqualValues(t, capabilities, inputCapabilities) +} diff --git a/agent/app/agent_capability_unspecified.go b/agent/app/agent_capability_unspecified.go new file mode 100644 index 00000000000..401cb26f31e --- /dev/null +++ b/agent/app/agent_capability_unspecified.go @@ -0,0 +1,123 @@ +// +build !linux,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "strings" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/cihub/seelog" +) + +func (agent *ecsAgent) appendVolumeDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + // "local" is default docker driver + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityDockerPluginInfix+volume.DockerLocalVolumeDriver) + + // for non-standardized plugins, call docker pkg's plugins.Scan() + nonStandardizedPlugins, err := agent.mobyPlugins.Scan() + if err != nil { + seelog.Warnf("Scanning plugins failed: %v", err) + // do not return yet, we need the list of plugins below. range handles nil slice. + } + + for _, pluginName := range nonStandardizedPlugins { + // Replace the ':' to '.' in the plugin name for attributes + capabilities = appendNameOnlyAttribute(capabilities, + attributePrefix+capabilityDockerPluginInfix+strings.Replace(pluginName, config.DockerTagSeparator, attributeSeparator, -1)) + } + + // for standardized plugins, call docker's plugin ls API + pluginEnabled := true + volumeDriverType := []string{dockerapi.VolumeDriverType} + standardizedPlugins, err := agent.dockerClient.ListPluginsWithFilters(agent.ctx, pluginEnabled, volumeDriverType, dockerclient.ListPluginsTimeout) + if err != nil { + seelog.Warnf("Listing plugins with filters enabled=%t, capabilities=%v failed: %v", pluginEnabled, volumeDriverType, err) + return capabilities + } + + // For plugin with default tag latest, register two attributes with and without the latest tag + // as the tag is optional and can be added by docker or customer + for _, pluginName := range standardizedPlugins { + names := strings.Split(pluginName, config.DockerTagSeparator) + if len(names) > 1 && names[len(names)-1] == config.DefaultDockerTag { + capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityDockerPluginInfix+strings.Join(names[:len(names)-1], attributeSeparator)) + } + + capabilities = appendNameOnlyAttribute(capabilities, + attributePrefix+capabilityDockerPluginInfix+strings.Replace(pluginName, config.DockerTagSeparator, attributeSeparator, -1)) + } + return capabilities +} + +func (agent *ecsAgent) appendNvidiaDriverVersionAttribute(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendENITrunkingCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendPIDAndIPCNamespaceSharingCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendAppMeshCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendTaskEIACapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFirelensFluentdCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFirelensFluentbitCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendEFSCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFirelensLoggingDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFirelensConfigCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendGMSACapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendEFSVolumePluginCapabilities(capabilities []*ecs.Attribute, pluginCapability string) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendIPv6Capability(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFSxWindowsFileServerCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} diff --git a/agent/app/agent_capability_windows.go b/agent/app/agent_capability_windows.go new file mode 100644 index 00000000000..3aa419367c5 --- /dev/null +++ b/agent/app/agent_capability_windows.go @@ -0,0 +1,90 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" +) + +func (agent *ecsAgent) appendVolumeDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + // "local" is default docker driver + return appendNameOnlyAttribute(capabilities, attributePrefix+capabilityDockerPluginInfix+volume.DockerLocalVolumeDriver) +} + +func (agent *ecsAgent) appendNvidiaDriverVersionAttribute(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendENITrunkingCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendPIDAndIPCNamespaceSharingCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendAppMeshCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendTaskEIACapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFirelensFluentdCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFirelensFluentbitCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendEFSCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFirelensLoggingDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFirelensConfigCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendGMSACapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + if agent.cfg.GMSACapable { + return appendNameOnlyAttribute(capabilities, attributePrefix+capabilityGMSA) + } + + return capabilities +} + +func (agent *ecsAgent) appendEFSVolumePluginCapabilities(capabilities []*ecs.Attribute, pluginCapability string) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendIPv6Capability(capabilities []*ecs.Attribute) []*ecs.Attribute { + return capabilities +} + +func (agent *ecsAgent) appendFSxWindowsFileServerCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute { + if agent.cfg.FSxWindowsFileServerCapable { + return appendNameOnlyAttribute(capabilities, attributePrefix+capabilityFSxWindowsFileServer) + } + + return capabilities +} diff --git a/agent/app/agent_capability_windows_test.go b/agent/app/agent_capability_windows_test.go new file mode 100644 index 00000000000..450ecf3238d --- /dev/null +++ b/agent/app/agent_capability_windows_test.go @@ -0,0 +1,310 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "testing" + + app_mocks "github.com/aws/amazon-ecs-agent/agent/app/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + mock_ecscni "github.com/aws/amazon-ecs-agent/agent/ecscni/mocks" + mock_mobypkgwrapper "github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper/mocks" + + "github.com/aws/aws-sdk-go/aws" + aws_credentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func init() { + mockPathExists(false) +} + +func TestVolumeDriverCapabilitiesWindows(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + conf := &config.Config{ + AvailableLoggingDrivers: []dockerclient.LoggingDriver{ + dockerclient.JSONFileDriver, + dockerclient.SyslogDriver, + dockerclient.JournaldDriver, + dockerclient.GelfDriver, + dockerclient.FluentdDriver, + }, + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + SELinuxCapable: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + AppArmorCapable: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskENIEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + AWSVPCBlockInstanceMetdata: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskCleanupWaitDuration: config.DefaultConfig().TaskCleanupWaitDuration, + } + + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + dockerclient.Version_1_19, + }), + cniClient.EXPECT().Version(ecscni.ECSENIPluginName).Return("v1", nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "privileged-container", + capabilityPrefix + "docker-remote-api.1.17", + capabilityPrefix + "docker-remote-api.1.18", + capabilityPrefix + "logging-driver.json-file", + capabilityPrefix + "logging-driver.syslog", + capabilityPrefix + "logging-driver.journald", + capabilityPrefix + "selinux", + capabilityPrefix + "apparmor", + attributePrefix + "docker-plugin.local", + attributePrefix + taskENIAttributeSuffix, + attributePrefix + taskENIBlockInstanceMetadataAttributeSuffix, + } + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{ + { + Name: aws.String(attributePrefix + cniPluginVersionSuffix), + Value: aws.String("v1"), + }, + }...) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + cniClient: cniClient, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } + +} + +// Test the list of capabilities supported in windows +func TestSupportedCapabilitiesWindows(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_dockerapi.NewMockDockerClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + conf := &config.Config{ + AvailableLoggingDrivers: []dockerclient.LoggingDriver{ + dockerclient.JSONFileDriver, + dockerclient.SyslogDriver, + dockerclient.JournaldDriver, + dockerclient.GelfDriver, + dockerclient.FluentdDriver, + }, + PrivilegedDisabled: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + SELinuxCapable: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + AppArmorCapable: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskENIEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + AWSVPCBlockInstanceMetdata: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + TaskCleanupWaitDuration: config.DefaultConfig().TaskCleanupWaitDuration, + } + + gomock.InOrder( + client.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + }), + client.EXPECT().KnownVersions().Return([]dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + dockerclient.Version_1_19, + }), + cniClient.EXPECT().Version(ecscni.ECSENIPluginName).Return("v1", nil), + ) + + expectedCapabilityNames := []string{ + capabilityPrefix + "privileged-container", + capabilityPrefix + "docker-remote-api.1.17", + capabilityPrefix + "docker-remote-api.1.18", + capabilityPrefix + "logging-driver.json-file", + capabilityPrefix + "logging-driver.syslog", + capabilityPrefix + "logging-driver.journald", + capabilityPrefix + "selinux", + capabilityPrefix + "apparmor", + attributePrefix + "docker-plugin.local", + attributePrefix + taskENIAttributeSuffix, + attributePrefix + capabilityPrivateRegistryAuthASM, + attributePrefix + capabilitySecretEnvSSM, + attributePrefix + capabilitySecretLogDriverSSM, + attributePrefix + capabilityECREndpoint, + attributePrefix + capabilitySecretEnvASM, + attributePrefix + capabilitySecretLogDriverASM, + attributePrefix + capabilityContainerOrdering, + attributePrefix + capabilityFullTaskSync, + attributePrefix + capabilityEnvFilesS3, + attributePrefix + taskENIBlockInstanceMetadataAttributeSuffix} + + var expectedCapabilities []*ecs.Attribute + for _, name := range expectedCapabilityNames { + expectedCapabilities = append(expectedCapabilities, + &ecs.Attribute{Name: aws.String(name)}) + } + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{ + { + Name: aws.String(attributePrefix + cniPluginVersionSuffix), + Value: aws.String("v1"), + }, + }...) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: conf, + dockerClient: client, + cniClient: cniClient, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + capabilities, err := agent.capabilities() + assert.NoError(t, err) + + assert.Equal(t, len(expectedCapabilities), len(capabilities)) + for _, expected := range expectedCapabilities { + assert.Contains(t, capabilities, &ecs.Attribute{ + Name: expected.Name, + Value: expected.Value, + }) + } +} + +func TestAppendGMSACapabilities(t *testing.T) { + var inputCapabilities []*ecs.Attribute + var expectedCapabilities []*ecs.Attribute + + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{ + { + Name: aws.String(attributePrefix + capabilityGMSA), + }, + }...) + + agent := &ecsAgent{ + cfg: &config.Config{ + GMSACapable: true, + }, + } + + capabilities := agent.appendGMSACapabilities(inputCapabilities) + + assert.Equal(t, len(expectedCapabilities), len(capabilities)) + for i, expected := range expectedCapabilities { + assert.Equal(t, aws.StringValue(expected.Name), aws.StringValue(capabilities[i].Name)) + assert.Equal(t, aws.StringValue(expected.Value), aws.StringValue(capabilities[i].Value)) + } +} + +func TestAppendGMSACapabilitiesFalse(t *testing.T) { + var inputCapabilities []*ecs.Attribute + var expectedCapabilities []*ecs.Attribute + + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{}...) + + agent := &ecsAgent{ + cfg: &config.Config{ + GMSACapable: false, + }, + } + + capabilities := agent.appendGMSACapabilities(inputCapabilities) + + assert.Equal(t, len(expectedCapabilities), len(capabilities)) +} + +func TestAppendFSxWindowsFileServerCapabilities(t *testing.T) { + var inputCapabilities []*ecs.Attribute + var expectedCapabilities []*ecs.Attribute + + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{ + { + Name: aws.String(attributePrefix + capabilityFSxWindowsFileServer), + }, + }...) + + agent := &ecsAgent{ + cfg: &config.Config{ + FSxWindowsFileServerCapable: true, + }, + } + + capabilities := agent.appendFSxWindowsFileServerCapabilities(inputCapabilities) + + assert.Equal(t, len(expectedCapabilities), len(capabilities)) + for i, expected := range expectedCapabilities { + assert.Equal(t, aws.StringValue(expected.Name), aws.StringValue(capabilities[i].Name)) + assert.Equal(t, aws.StringValue(expected.Value), aws.StringValue(capabilities[i].Value)) + } +} + +func TestAppendFSxWindowsFileServerCapabilitiesFalse(t *testing.T) { + var inputCapabilities []*ecs.Attribute + var expectedCapabilities []*ecs.Attribute + + expectedCapabilities = append(expectedCapabilities, + []*ecs.Attribute{}...) + + agent := &ecsAgent{ + cfg: &config.Config{ + FSxWindowsFileServerCapable: false, + }, + } + + capabilities := agent.appendFSxWindowsFileServerCapabilities(inputCapabilities) + + assert.Equal(t, len(expectedCapabilities), len(capabilities)) +} diff --git a/agent/app/agent_compatibility_linux.go b/agent/app/agent_compatibility_linux.go new file mode 100644 index 00000000000..f5e92c4a90f --- /dev/null +++ b/agent/app/agent_compatibility_linux.go @@ -0,0 +1,63 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/engine" + + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// checkCompatibility (for linux) ensures that the running task engine is capable for running with the TaskCPUMemLimit +// flag enabled. It will disable the feature if it determines that it is incompatible and the feature isn't explicitly +// enabled. +func (agent *ecsAgent) checkCompatibility(engine engine.TaskEngine) error { + + // We don't need to do these checks if CPUMemLimit is disabled + if !agent.cfg.TaskCPUMemLimit.Enabled() { + return nil + } + + tasks, err := engine.ListTasks() + if err != nil { + return err + } + + // Loop over each task to determine if the state is compatible with the running version of agent and its options + compatible := true + for _, task := range tasks { + if !task.MemoryCPULimitsEnabled { + seelog.Warnf("App: task from state file is not compatible with TaskCPUMemLimit setting, because it is not using ecs' cgroup hierarchy: %s", task.Arn) + compatible = false + break + } + } + + if compatible { + return nil + } + + if agent.cfg.TaskCPUMemLimit.Value == config.ExplicitlyEnabled { + return errors.New("App: unable to load old tasks because TaskCPUMemLimits setting is incompatible with old state.") + } + + seelog.Warn("App: disabling TaskCPUMemLimit.") + agent.cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + + return nil +} diff --git a/agent/app/agent_compatibility_linux_test.go b/agent/app/agent_compatibility_linux_test.go new file mode 100644 index 00000000000..8d028bcbdfb --- /dev/null +++ b/agent/app/agent_compatibility_linux_test.go @@ -0,0 +1,159 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "testing" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + mock_statemanager "github.com/aws/amazon-ecs-agent/agent/statemanager/mocks" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + mockPathExists(false) +} + +func TestCompatibilityEnabledSuccess(t *testing.T) { + ctrl, creds, _, images, _, _, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + stateManager := mock_statemanager.NewMockStateManager(ctrl) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.TaskCPUMemLimit = config.BooleanDefaultTrue{Value: config.NotSet} + + agent := &ecsAgent{ + cfg: &cfg, + dataClient: data.NewNoopClient(), + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: saveableOptionFactory, + ec2MetadataClient: ec2.NewBlackholeEC2MetadataClient(), + } + + gomock.InOrder( + saveableOptionFactory.EXPECT().AddSaveable(gomock.Any(), gomock.Any()).AnyTimes(), + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil), + stateManager.EXPECT().Load().AnyTimes(), + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + containerChangeEventStream := eventstream.NewEventStream("events", ctx) + _, _, err := agent.newTaskEngine(containerChangeEventStream, creds, dockerstate.NewTaskEngineState(), images, execCmdMgr) + + assert.NoError(t, err) + assert.True(t, cfg.TaskCPUMemLimit.Enabled()) +} + +func TestCompatibilityNotSetFail(t *testing.T) { + ctrl, creds, _, images, _, _, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + stateManager := mock_statemanager.NewMockStateManager(ctrl) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.TaskCPUMemLimit = config.BooleanDefaultTrue{Value: config.NotSet} + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + populateBoltDB(dataClient, t) + + // Put a bad task in previously saved state. + for _, task := range getTaskListWithOneBadTask() { + require.NoError(t, dataClient.SaveTask(task)) + } + + agent := &ecsAgent{ + cfg: &cfg, + dataClient: dataClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: saveableOptionFactory, + ec2MetadataClient: ec2.NewBlackholeEC2MetadataClient(), + } + gomock.InOrder( + saveableOptionFactory.EXPECT().AddSaveable(gomock.Any(), gomock.Any()).AnyTimes(), + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil), + stateManager.EXPECT().Load().AnyTimes(), + ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + containerChangeEventStream := eventstream.NewEventStream("events", ctx) + _, _, err := agent.newTaskEngine(containerChangeEventStream, creds, dockerstate.NewTaskEngineState(), images, execCmdMgr) + + assert.NoError(t, err) + assert.False(t, cfg.TaskCPUMemLimit.Enabled()) +} + +func TestCompatibilityExplicitlyEnabledFail(t *testing.T) { + ctrl, creds, _, images, _, _, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + stateManager := mock_statemanager.NewMockStateManager(ctrl) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.TaskCPUMemLimit = config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled} + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + populateBoltDB(dataClient, t) + + // Put a bad task in previously saved state. + for _, task := range getTaskListWithOneBadTask() { + require.NoError(t, dataClient.SaveTask(task)) + } + + agent := &ecsAgent{ + cfg: &cfg, + dataClient: dataClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: saveableOptionFactory, + ec2MetadataClient: ec2.NewBlackholeEC2MetadataClient(), + } + gomock.InOrder( + saveableOptionFactory.EXPECT().AddSaveable(gomock.Any(), gomock.Any()).AnyTimes(), + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil), + stateManager.EXPECT().Load().AnyTimes(), + ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + containerChangeEventStream := eventstream.NewEventStream("events", ctx) + _, _, err := agent.newTaskEngine(containerChangeEventStream, creds, dockerstate.NewTaskEngineState(), images, execCmdMgr) + + assert.Error(t, err) +} + +func getTaskListWithOneBadTask() []*apitask.Task { + badTask := &apitask.Task{ + Arn: testTaskARN, + } + return []*apitask.Task{badTask} +} diff --git a/agent/app/agent_compatibility_unspecified.go b/agent/app/agent_compatibility_unspecified.go new file mode 100644 index 00000000000..63975a94aff --- /dev/null +++ b/agent/app/agent_compatibility_unspecified.go @@ -0,0 +1,22 @@ +// +build !linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import "github.com/aws/amazon-ecs-agent/agent/engine" + +func (agent *ecsAgent) checkCompatibility(engine engine.TaskEngine) error { + return nil +} diff --git a/agent/app/agent_integ_test.go b/agent/app/agent_integ_test.go new file mode 100644 index 00000000000..a9ac2b27479 --- /dev/null +++ b/agent/app/agent_integ_test.go @@ -0,0 +1,36 @@ +// +build integration +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "os" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +func TestNewAgent(t *testing.T) { + os.Setenv("AWS_DEFAULT_REGION", "us-west-2") + defer os.Unsetenv("AWS_DEFAULT_REGION") + + agent, err := newAgent(true, aws.Bool(true)) + + assert.NoError(t, err) + // printECSAttributes should ensure that agent's cfg is set with + // valid values and not panic + assert.Equal(t, exitcodes.ExitSuccess, agent.printECSAttributes()) +} diff --git a/agent/app/agent_test.go b/agent/app/agent_test.go new file mode 100644 index 00000000000..379d4b385f3 --- /dev/null +++ b/agent/app/agent_test.go @@ -0,0 +1,1450 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "sort" + "sync" + "testing" + + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + mock_factory "github.com/aws/amazon-ecs-agent/agent/app/factory/mocks" + app_mocks "github.com/aws/amazon-ecs-agent/agent/app/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + mock_containermetadata "github.com/aws/amazon-ecs-agent/agent/containermetadata/mocks" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + mock_ec2 "github.com/aws/amazon-ecs-agent/agent/ec2/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_execcmdagent "github.com/aws/amazon-ecs-agent/agent/engine/execcmd/mocks" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + mock_pause "github.com/aws/amazon-ecs-agent/agent/eni/pause/mocks" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/aws/amazon-ecs-agent/agent/statemanager" + mock_statemanager "github.com/aws/amazon-ecs-agent/agent/statemanager/mocks" + mock_mobypkgwrapper "github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper/mocks" + "github.com/aws/amazon-ecs-agent/agent/version" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + aws_credentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + clusterName = "some-cluster" + containerInstanceARN = "container-instance1" + availabilityZone = "us-west-2b" + hostPrivateIPv4Address = "127.0.0.1" + hostPublicIPv4Address = "127.0.0.1" + instanceID = "i-123" +) + +var apiVersions = []dockerclient.DockerVersion{ + dockerclient.Version_1_21, + dockerclient.Version_1_22, + dockerclient.Version_1_23} +var capabilities []*ecs.Attribute + +func setup(t *testing.T) (*gomock.Controller, + *mock_credentials.MockManager, + *mock_dockerstate.MockTaskEngineState, + *mock_engine.MockImageManager, + *mock_api.MockECSClient, + *mock_dockerapi.MockDockerClient, + *mock_factory.MockStateManager, + *mock_factory.MockSaveableOption, + *mock_execcmdagent.MockManager) { + + ctrl := gomock.NewController(t) + + return ctrl, + mock_credentials.NewMockManager(ctrl), + mock_dockerstate.NewMockTaskEngineState(ctrl), + mock_engine.NewMockImageManager(ctrl), + mock_api.NewMockECSClient(ctrl), + mock_dockerapi.NewMockDockerClient(ctrl), + mock_factory.NewMockStateManager(ctrl), + mock_factory.NewMockSaveableOption(ctrl), + mock_execcmdagent.NewMockManager(ctrl) +} + +func TestDoStartMinimumSupportedDockerVersionTerminal(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + oldAPIVersions := []dockerclient.DockerVersion{ + dockerclient.Version_1_18, + dockerclient.Version_1_19, + dockerclient.Version_1_20} + gomock.InOrder( + dockerClient.EXPECT().SupportedVersions().Return(oldAPIVersions), + ) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: dockerClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: saveableOptionFactory, + } + exitCode := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + assert.Equal(t, exitcodes.ExitTerminal, exitCode) +} + +func TestDoStartMinimumSupportedDockerVersionError(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + gomock.InOrder( + dockerClient.EXPECT().SupportedVersions().Return([]dockerclient.DockerVersion{}), + ) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: dockerClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: saveableOptionFactory, + } + + exitCode := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + assert.Equal(t, exitcodes.ExitError, exitCode) +} + +func TestDoStartNewTaskEngineError(t *testing.T) { + ctrl, credentialsManager, _, imageManager, client, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + gomock.InOrder( + dockerClient.EXPECT().SupportedVersions().Return(apiVersions), + saveableOptionFactory.EXPECT().AddSaveable("TaskEngine", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("latestSeqNumberTaskManifest", gomock.Any()).Return(nil), + + // An error in creating the state manager should result in an + // error from newTaskEngine as well + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).Return( + nil, errors.New("error")), + ) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + dockerClient: dockerClient, + ec2MetadataClient: ec2MetadataClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: saveableOptionFactory, + } + + exitCode := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, client, execCmdMgr) + assert.Equal(t, exitcodes.ExitTerminal, exitCode) +} + +func TestDoStartRegisterContainerInstanceErrorTerminal(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + gomock.InOrder( + dockerClient.EXPECT().SupportedVersions().Return(apiVersions), + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + dockerClient.EXPECT().SupportedVersions().Return(nil), + dockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil), + dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return("", "", apierrors.NewAttributeError("error")), + ) + + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + dockerClient: dockerClient, + mobyPlugins: mockMobyPlugins, + ec2MetadataClient: mockEC2Metadata, + } + + exitCode := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + assert.Equal(t, exitcodes.ExitTerminal, exitCode) +} + +func TestDoStartRegisterContainerInstanceErrorNonTerminal(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + gomock.InOrder( + dockerClient.EXPECT().SupportedVersions().Return(apiVersions), + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + dockerClient.EXPECT().SupportedVersions().Return(nil), + dockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil), + dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return("", "", errors.New("error")), + ) + + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + ec2MetadataClient: mockEC2Metadata, + } + + exitCode := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + assert.Equal(t, exitcodes.ExitError, exitCode) +} + +func TestDoStartHappyPath(t *testing.T) { + ctrl, credentialsManager, _, imageManager, client, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + saveableOptionFactory.EXPECT().AddSaveable(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any()). + Return(statemanager.NewNoopStateManager(), nil).AnyTimes() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2MetadataClient.EXPECT().PrivateIPv4Address().Return(hostPrivateIPv4Address, nil) + ec2MetadataClient.EXPECT().PublicIPv4Address().Return(hostPublicIPv4Address, nil) + ec2MetadataClient.EXPECT().OutpostARN().Return("", nil) + ec2MetadataClient.EXPECT().InstanceID().Return(instanceID, nil) + + var discoverEndpointsInvoked sync.WaitGroup + discoverEndpointsInvoked.Add(2) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + dockerClient.EXPECT().Version(gomock.Any(), gomock.Any()).AnyTimes() + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + containermetadata := mock_containermetadata.NewMockManager(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + imageManager.EXPECT().StartImageCleanupProcess(gomock.Any()).MaxTimes(1) + dockerClient.EXPECT().ListContainers(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.ListContainersResponse{}).AnyTimes() + client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Do(func(x interface{}) { + // Ensures that the test waits until acs session has bee started + discoverEndpointsInvoked.Done() + }).Return("poll-endpoint", nil) + client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return("acs-endpoint", nil).AnyTimes() + client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Do(func(x interface{}) { + // Ensures that the test waits until telemetry session has bee started + discoverEndpointsInvoked.Done() + }).Return("telemetry-endpoint", nil) + client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Return( + "tele-endpoint", nil).AnyTimes() + + gomock.InOrder( + dockerClient.EXPECT().SupportedVersions().Return(apiVersions), + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + dockerClient.EXPECT().SupportedVersions().Return(nil), + dockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil), + dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).Return(containerInstanceARN, availabilityZone, nil), + containermetadata.EXPECT().SetContainerInstanceARN(containerInstanceARN), + containermetadata.EXPECT().SetAvailabilityZone(availabilityZone), + containermetadata.EXPECT().SetHostPrivateIPv4Address(hostPrivateIPv4Address), + containermetadata.EXPECT().SetHostPublicIPv4Address(hostPublicIPv4Address), + imageManager.EXPECT().SetDataClient(gomock.Any()), + dockerClient.EXPECT().ContainerEvents(gomock.Any()), + ) + + cfg := getTestConfig() + cfg.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + // Cancel the context to cancel async routines + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: dockerClient, + dataClient: dataClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + metadataManager: containermetadata, + terminationHandler: func(taskEngineState dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + }, + stateManagerFactory: stateManagerFactory, + ec2MetadataClient: ec2MetadataClient, + saveableOptionFactory: saveableOptionFactory, + } + + var agentW sync.WaitGroup + agentW.Add(1) + go func() { + agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, client, execCmdMgr) + agentW.Done() + }() + + discoverEndpointsInvoked.Wait() + cancel() + agentW.Wait() + + assertMetadata(t, data.AgentVersionKey, version.Version, dataClient) + assertMetadata(t, data.AvailabilityZoneKey, availabilityZone, dataClient) + assertMetadata(t, data.ClusterNameKey, clusterName, dataClient) + assertMetadata(t, data.ContainerInstanceARNKey, containerInstanceARN, dataClient) + assertMetadata(t, data.EC2InstanceIDKey, instanceID, dataClient) +} + +func assertMetadata(t *testing.T, key, expectedVal string, dataClient data.Client) { + val, err := dataClient.GetMetadata(key) + require.NoError(t, err) + assert.Equal(t, expectedVal, val) +} + +func TestNewTaskEngineRestoreFromCheckpointNoEC2InstanceIDToLoadHappyPath(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + expectedInstanceID := "inst-1" + gomock.InOrder( + saveableOptionFactory.EXPECT().AddSaveable("TaskEngine", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Do( + func(name string, saveable statemanager.Saveable) { + previousContainerInstanceARN, ok := saveable.(*string) + assert.True(t, ok) + *previousContainerInstanceARN = "prev-container-inst" + }).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("latestSeqNumberTaskManifest", gomock.Any()).Return(nil), + + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + statemanager.NewNoopStateManager(), nil), + ec2MetadataClient.EXPECT().InstanceID().Return(expectedInstanceID, nil), + ) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + stateManagerFactory: stateManagerFactory, + ec2MetadataClient: ec2MetadataClient, + saveableOptionFactory: saveableOptionFactory, + } + + _, instanceID, err := agent.newTaskEngine(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, execCmdMgr) + assert.NoError(t, err) + assert.Equal(t, expectedInstanceID, instanceID) + assert.Equal(t, "prev-container-inst", agent.containerInstanceARN) +} + +func TestNewTaskEngineRestoreFromCheckpointPreviousEC2InstanceIDLoadedHappyPath(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + expectedInstanceID := "inst-1" + + gomock.InOrder( + saveableOptionFactory.EXPECT().AddSaveable("TaskEngine", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Do( + func(name string, saveable statemanager.Saveable) { + previousContainerInstanceARN, ok := saveable.(*string) + assert.True(t, ok) + *previousContainerInstanceARN = "prev-container-inst" + }).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Do( + func(name string, saveable statemanager.Saveable) { + previousEC2InstanceID, ok := saveable.(*string) + assert.True(t, ok) + *previousEC2InstanceID = "inst-2" + }).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Do( + func(name string, saveable statemanager.Saveable) { + previousAZ, ok := saveable.(*string) + assert.True(t, ok) + *previousAZ = "us-west-2b" + }).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("latestSeqNumberTaskManifest", gomock.Any()).Return(nil), + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + statemanager.NewNoopStateManager(), nil), + ec2MetadataClient.EXPECT().InstanceID().Return(expectedInstanceID, nil), + ) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + stateManagerFactory: stateManagerFactory, + ec2MetadataClient: ec2MetadataClient, + saveableOptionFactory: saveableOptionFactory, + } + + _, instanceID, err := agent.newTaskEngine(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, execCmdMgr) + assert.NoError(t, err) + assert.Equal(t, expectedInstanceID, instanceID) + assert.NotEqual(t, "prev-container-inst", agent.containerInstanceARN) + assert.NotEqual(t, "us-west-2b", agent.availabilityZone) +} + +func TestNewTaskEngineRestoreFromCheckpointClusterIDMismatch(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.Cluster = "default" + ec2InstanceID := "inst-1" + + gomock.InOrder( + saveableOptionFactory.EXPECT().AddSaveable("TaskEngine", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Do( + func(name string, saveable statemanager.Saveable) { + previousContainerInstanceARN, ok := saveable.(*string) + assert.True(t, ok) + *previousContainerInstanceARN = ec2InstanceID + }).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Do( + func(name string, saveable statemanager.Saveable) { + previousCluster, ok := saveable.(*string) + assert.True(t, ok) + *previousCluster = clusterName + }).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("latestSeqNumberTaskManifest", gomock.Any()).Return(nil), + + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + statemanager.NewNoopStateManager(), nil), + ec2MetadataClient.EXPECT().InstanceID().Return(ec2InstanceID, nil), + ) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + stateManagerFactory: stateManagerFactory, + ec2MetadataClient: ec2MetadataClient, + saveableOptionFactory: saveableOptionFactory, + } + + _, _, err := agent.newTaskEngine(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, execCmdMgr) + assert.Error(t, err) + assert.IsType(t, clusterMismatchError{}, err) +} + +func TestNewTaskEngineRestoreFromCheckpointNewStateManagerError(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + gomock.InOrder( + saveableOptionFactory.EXPECT().AddSaveable("TaskEngine", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("latestSeqNumberTaskManifest", gomock.Any()).Return(nil), + + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + nil, errors.New("error")), + ) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + stateManagerFactory: stateManagerFactory, + ec2MetadataClient: ec2MetadataClient, + saveableOptionFactory: saveableOptionFactory, + } + + _, _, err := agent.newTaskEngine(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, execCmdMgr) + assert.Error(t, err) + assert.False(t, isTransient(err)) +} + +func TestNewTaskEngineRestoreFromCheckpointStateLoadError(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + stateManager := mock_statemanager.NewMockStateManager(ctrl) + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + gomock.InOrder( + saveableOptionFactory.EXPECT().AddSaveable("TaskEngine", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil), + saveableOptionFactory.EXPECT().AddSaveable("latestSeqNumberTaskManifest", gomock.Any()).Return(nil), + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(stateManager, nil), + stateManager.EXPECT().Load().Return(errors.New("error")), + ) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + stateManagerFactory: stateManagerFactory, + ec2MetadataClient: ec2MetadataClient, + saveableOptionFactory: saveableOptionFactory, + } + + _, _, err := agent.newTaskEngine(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, execCmdMgr) + assert.Error(t, err) + assert.False(t, isTransient(err)) +} + +func TestNewTaskEngineRestoreFromCheckpoint(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.Cluster = testCluster + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + ec2MetadataClient.EXPECT().InstanceID().Return(testEC2InstanceID, nil) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + // Populate boldtb with test data. + populateBoltDB(dataClient, t) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + dockerClient: dockerClient, + stateManagerFactory: stateManagerFactory, + ec2MetadataClient: ec2MetadataClient, + pauseLoader: mockPauseLoader, + saveableOptionFactory: saveableOptionFactory, + } + + state := dockerstate.NewTaskEngineState() + _, instanceID, err := agent.newTaskEngine(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, execCmdMgr) + assert.NoError(t, err) + assert.Equal(t, testEC2InstanceID, instanceID) + + require.NotNil(t, agent.latestSeqNumberTaskManifest) + s := &savedData{ + availabilityZone: agent.availabilityZone, + cluster: cfg.Cluster, + containerInstanceARN: agent.containerInstanceARN, + ec2InstanceID: instanceID, + latestTaskManifestSeqNum: *agent.latestSeqNumberTaskManifest, + } + checkLoadedData(state, s, t) +} + +func TestSetClusterInConfigMismatch(t *testing.T) { + clusterNamesInConfig := []string{"", "foo"} + for _, clusterNameInConfig := range clusterNamesInConfig { + t.Run(fmt.Sprintf("cluster in config is '%s'", clusterNameInConfig), func(t *testing.T) { + cfg := getTestConfig() + cfg.Cluster = "" + agent := &ecsAgent{cfg: &cfg} + err := agent.setClusterInConfig("bar") + assert.Error(t, err) + }) + } +} + +func TestSetClusterInConfig(t *testing.T) { + cfg := getTestConfig() + cfg.Cluster = clusterName + agent := &ecsAgent{cfg: &cfg} + err := agent.setClusterInConfig(clusterName) + assert.NoError(t, err) +} + +func TestGetEC2InstanceIDIIDError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + agent := &ecsAgent{ec2MetadataClient: ec2MetadataClient} + + ec2MetadataClient.EXPECT().InstanceID().Return("", errors.New("error")) + assert.Equal(t, "", agent.getEC2InstanceID()) +} + +func TestGetOupostIDError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + agent := &ecsAgent{ec2MetadataClient: ec2MetadataClient} + + ec2MetadataClient.EXPECT().OutpostARN().Return("", errors.New("error")) + assert.Equal(t, "", agent.getoutpostARN()) +} + +func TestReregisterContainerInstanceHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + client := mock_api.NewMockECSClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + gomock.InOrder( + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + mockDockerClient.EXPECT().SupportedVersions().Return(nil), + mockDockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil), + mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).Return(containerInstanceARN, availabilityZone, nil), + ) + cfg := getTestConfig() + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: mockDockerClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + ec2MetadataClient: mockEC2Metadata, + } + agent.containerInstanceARN = containerInstanceARN + agent.availabilityZone = availabilityZone + + err := agent.registerContainerInstance(client, nil) + assert.NoError(t, err) +} + +func TestReregisterContainerInstanceInstanceTypeChanged(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + client := mock_api.NewMockECSClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + gomock.InOrder( + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + mockDockerClient.EXPECT().SupportedVersions().Return(nil), + mockDockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil), + mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).Return("", "", awserr.New("", + apierrors.InstanceTypeChangedErrorMessage, errors.New(""))), + ) + + cfg := getTestConfig() + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: mockDockerClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + ec2MetadataClient: mockEC2Metadata, + mobyPlugins: mockMobyPlugins, + } + agent.containerInstanceARN = containerInstanceARN + agent.availabilityZone = availabilityZone + + err := agent.registerContainerInstance(client, nil) + assert.Error(t, err) + assert.False(t, isTransient(err)) +} + +func TestReregisterContainerInstanceAttributeError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + client := mock_api.NewMockECSClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + + gomock.InOrder( + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + mockDockerClient.EXPECT().SupportedVersions().Return(nil), + mockDockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).Return("", "", apierrors.NewAttributeError("error")), + ) + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + ec2MetadataClient: mockEC2Metadata, + dockerClient: mockDockerClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + agent.containerInstanceARN = containerInstanceARN + agent.availabilityZone = availabilityZone + + err := agent.registerContainerInstance(client, nil) + assert.Error(t, err) + assert.False(t, isTransient(err)) +} + +func TestReregisterContainerInstanceNonTerminalError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + client := mock_api.NewMockECSClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + gomock.InOrder( + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + mockDockerClient.EXPECT().SupportedVersions().Return(nil), + mockDockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).Return("", "", errors.New("error")), + ) + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: mockDockerClient, + ec2MetadataClient: mockEC2Metadata, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + agent.containerInstanceARN = containerInstanceARN + agent.availabilityZone = availabilityZone + + err := agent.registerContainerInstance(client, nil) + assert.Error(t, err) + assert.True(t, isTransient(err)) +} + +func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + client := mock_api.NewMockECSClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + gomock.InOrder( + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + mockDockerClient.EXPECT().SupportedVersions().Return(nil), + mockDockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return(containerInstanceARN, availabilityZone, nil), + ) + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: mockDockerClient, + ec2MetadataClient: mockEC2Metadata, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + err := agent.registerContainerInstance(client, nil) + assert.NoError(t, err) + assert.Equal(t, containerInstanceARN, agent.containerInstanceARN) + assert.Equal(t, availabilityZone, agent.availabilityZone) +} + +func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetCanRetryError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + client := mock_api.NewMockECSClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + retriableError := apierrors.NewRetriableError(apierrors.NewRetriable(true), errors.New("error")) + gomock.InOrder( + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + mockDockerClient.EXPECT().SupportedVersions().Return(nil), + mockDockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return("", "", retriableError), + ) + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: mockDockerClient, + ec2MetadataClient: mockEC2Metadata, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + + err := agent.registerContainerInstance(client, nil) + assert.Error(t, err) + assert.True(t, isTransient(err)) +} + +func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetCannotRetryError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + client := mock_api.NewMockECSClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + cannotRetryError := apierrors.NewRetriableError(apierrors.NewRetriable(false), errors.New("error")) + gomock.InOrder( + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + mockDockerClient.EXPECT().SupportedVersions().Return(nil), + mockDockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return("", "", cannotRetryError), + ) + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + ec2MetadataClient: mockEC2Metadata, + dockerClient: mockDockerClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + + err := agent.registerContainerInstance(client, nil) + assert.Error(t, err) + assert.False(t, isTransient(err)) +} + +func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetAttributeError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + client := mock_api.NewMockECSClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + gomock.InOrder( + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + mockDockerClient.EXPECT().SupportedVersions().Return(nil), + mockDockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return("", "", apierrors.NewAttributeError("error")), + ) + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + cfg.Cluster = clusterName + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + ec2MetadataClient: mockEC2Metadata, + dockerClient: mockDockerClient, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + mobyPlugins: mockMobyPlugins, + } + + err := agent.registerContainerInstance(client, nil) + assert.Error(t, err) + assert.False(t, isTransient(err)) +} + +func TestRegisterContainerInstanceInvalidParameterTerminalError(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockEC2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(false, nil).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + gomock.InOrder( + dockerClient.EXPECT().SupportedVersions().Return(apiVersions), + mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil), + dockerClient.EXPECT().SupportedVersions().Return(nil), + dockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil), + dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).AnyTimes().Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return("", "", awserr.New("InvalidParameterException", "", nil)), + ) + mockEC2Metadata.EXPECT().OutpostARN().Return("", nil) + + cfg := getTestConfig() + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + ec2MetadataClient: mockEC2Metadata, + cfg: &cfg, + pauseLoader: mockPauseLoader, + credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider), + dockerClient: dockerClient, + mobyPlugins: mockMobyPlugins, + } + + exitCode := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + assert.Equal(t, exitcodes.ExitTerminal, exitCode) +} +func TestMergeTags(t *testing.T) { + ec2Key := "ec2Key" + ec2Value := "ec2Value" + localKey := "localKey" + localValue := "localValue" + commonKey := "commonKey" + commonKeyEC2Value := "commonEC2Value" + commonKeyLocalValue := "commonKeyLocalValue" + + localTags := []*ecs.Tag{ + { + Key: aws.String(localKey), + Value: aws.String(localValue), + }, + { + Key: aws.String(commonKey), + Value: aws.String(commonKeyLocalValue), + }, + } + + ec2Tags := []*ecs.Tag{ + { + Key: aws.String(ec2Key), + Value: aws.String(ec2Value), + }, + { + Key: aws.String(commonKey), + Value: aws.String(commonKeyEC2Value), + }, + } + + mergedTags := mergeTags(localTags, ec2Tags) + + assert.Equal(t, len(mergedTags), 3) + sort.Slice(mergedTags, func(i, j int) bool { + return aws.StringValue(mergedTags[i].Key) < aws.StringValue(mergedTags[j].Key) + }) + + assert.Equal(t, commonKey, aws.StringValue(mergedTags[0].Key)) + assert.Equal(t, commonKeyLocalValue, aws.StringValue(mergedTags[0].Value)) + assert.Equal(t, ec2Key, aws.StringValue(mergedTags[1].Key)) + assert.Equal(t, ec2Value, aws.StringValue(mergedTags[1].Value)) + assert.Equal(t, localKey, aws.StringValue(mergedTags[2].Key)) + assert.Equal(t, localValue, aws.StringValue(mergedTags[2].Value)) +} + +func TestGetContainerInstanceTagsFromEC2API(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + } + instanceID := "iid" + tags := []*ecs.Tag{ + { + Key: aws.String("key"), + Value: aws.String("value"), + }, + } + gomock.InOrder( + ec2MetadataClient.EXPECT().InstanceID().Return(instanceID, nil), + ec2Client.EXPECT().DescribeECSTagsForInstance(instanceID).Return(tags, nil), + ) + + resTags, err := agent.getContainerInstanceTagsFromEC2API() + assert.NoError(t, err) + assert.Equal(t, tags, resTags) +} + +func TestGetContainerInstanceTagsFromEC2APIFailToDescribeECSTagsForInstance(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + } + instanceID := "iid" + gomock.InOrder( + ec2MetadataClient.EXPECT().InstanceID().Return(instanceID, nil), + ec2Client.EXPECT().DescribeECSTagsForInstance(instanceID).Return(nil, errors.New("error")), + ) + + resTags, err := agent.getContainerInstanceTagsFromEC2API() + assert.Error(t, err) + assert.Nil(t, resTags) +} + +func TestGetHostPrivateIPv4AddressFromEC2Metadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + } + ec2MetadataClient.EXPECT().PrivateIPv4Address().Return(hostPrivateIPv4Address, nil) + + assert.Equal(t, hostPrivateIPv4Address, agent.getHostPrivateIPv4AddressFromEC2Metadata()) +} + +func TestGetHostPrivateIPv4AddressFromEC2MetadataFailWithError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + } + ec2MetadataClient.EXPECT().PrivateIPv4Address().Return("", errors.New("Unable to get IP Address")) + + assert.Empty(t, agent.getHostPrivateIPv4AddressFromEC2Metadata()) +} + +func TestGetHostPublicIPv4AddressFromEC2Metadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + } + ec2MetadataClient.EXPECT().PublicIPv4Address().Return(hostPublicIPv4Address, nil) + + assert.Equal(t, hostPublicIPv4Address, agent.getHostPublicIPv4AddressFromEC2Metadata()) +} + +func TestGetHostPublicIPv4AddressFromEC2MetadataFailWithError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + } + ec2MetadataClient.EXPECT().PublicIPv4Address().Return("", errors.New("Unable to get IP Address")) + + assert.Empty(t, agent.getHostPublicIPv4AddressFromEC2Metadata()) +} + +func TestSpotInstanceActionCheck_Sunny(t *testing.T) { + tests := []struct { + jsonresp string + }{ + {jsonresp: `{"action": "terminate", "time": "2017-09-18T08:22:00Z"}`}, + {jsonresp: `{"action": "hibernate", "time": "2017-09-18T08:22:00Z"}`}, + {jsonresp: `{"action": "stop", "time": "2017-09-18T08:22:00Z"}`}, + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + for _, test := range tests { + myARN := "myARN" + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + containerInstanceARN: myARN, + } + ec2MetadataClient.EXPECT().SpotInstanceAction().Return(test.jsonresp, nil) + ecsClient.EXPECT().UpdateContainerInstancesState(myARN, "DRAINING").Return(nil) + + assert.True(t, agent.spotInstanceDrainingPoller(ecsClient)) + } +} + +func TestSpotInstanceActionCheck_Fail(t *testing.T) { + tests := []struct { + jsonresp string + }{ + {jsonresp: `{"action": "terminate" "time": "2017-09-18T08:22:00Z"}`}, // invalid json + {jsonresp: ``}, // empty json + {jsonresp: `{"action": "flip!", "time": "2017-09-18T08:22:00Z"}`}, // invalid action + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + for _, test := range tests { + myARN := "myARN" + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + containerInstanceARN: myARN, + } + ec2MetadataClient.EXPECT().SpotInstanceAction().Return(test.jsonresp, nil) + // Container state should NOT be updated because the termination time field is empty. + ecsClient.EXPECT().UpdateContainerInstancesState(gomock.Any(), gomock.Any()).Times(0) + + assert.False(t, agent.spotInstanceDrainingPoller(ecsClient)) + } +} + +func TestSpotInstanceActionCheck_NoInstanceActionYet(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + ec2Client := mock_ec2.NewMockClient(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + myARN := "myARN" + agent := &ecsAgent{ + ec2MetadataClient: ec2MetadataClient, + ec2Client: ec2Client, + containerInstanceARN: myARN, + } + ec2MetadataClient.EXPECT().SpotInstanceAction().Return("", fmt.Errorf("404")) + + // Container state should NOT be updated because there is no termination time. + ecsClient.EXPECT().UpdateContainerInstancesState(gomock.Any(), gomock.Any()).Times(0) + + assert.False(t, agent.spotInstanceDrainingPoller(ecsClient)) +} + +func TestSaveMetadata(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + agent := &ecsAgent{ + dataClient: dataClient, + } + agent.saveMetadata(data.AvailabilityZoneKey, availabilityZone) + az, err := dataClient.GetMetadata(data.AvailabilityZoneKey) + require.NoError(t, err) + assert.Equal(t, availabilityZone, az) +} + +func getTestConfig() config.Config { + cfg := config.DefaultConfig() + cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + return cfg +} + +func newTestDataClient(t *testing.T) (data.Client, func()) { + testDir, err := ioutil.TempDir("", "agent_app_unit_test") + require.NoError(t, err) + + testClient, err := data.NewWithSetup(testDir) + + cleanup := func() { + require.NoError(t, testClient.Close()) + require.NoError(t, os.RemoveAll(testDir)) + } + return testClient, cleanup +} diff --git a/agent/app/agent_unix.go b/agent/app/agent_unix.go new file mode 100644 index 00000000000..42c947015f4 --- /dev/null +++ b/agent/app/agent_unix.go @@ -0,0 +1,255 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "fmt" + "os" + + asmfactory "github.com/aws/amazon-ecs-agent/agent/asm/factory" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper" + "github.com/aws/amazon-ecs-agent/agent/eni/watcher" + "github.com/aws/amazon-ecs-agent/agent/gpu" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + cgroup "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" + "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// initPID defines the process identifier for the init process +const initPID = 1 + +// awsVPCCNIPlugins is a list of CNI plugins required by the ECS Agent +// to configure the ENI for a task +var awsVPCCNIPlugins = []string{ecscni.ECSENIPluginName, + ecscni.ECSBridgePluginName, + ecscni.ECSIPAMPluginName, + ecscni.ECSAppMeshPluginName, + ecscni.ECSBranchENIPluginName, +} + +// startWindowsService is not supported on Linux +func (agent *ecsAgent) startWindowsService() int { + seelog.Error("Windows Services are not supported on Linux") + return 1 +} + +var getPid = os.Getpid + +// initializeTaskENIDependencies initializes all of the dependencies required by +// the Agent to support the 'awsvpc' networking mode. A non nil error is returned +// if an error is encountered during this process. An additional boolean flag to +// indicate if this error is considered terminal is also returned +func (agent *ecsAgent) initializeTaskENIDependencies(state dockerstate.TaskEngineState, taskEngine engine.TaskEngine) (error, bool) { + // Check if the Agent process's pid == 1, which means it's running without an init system + if getPid() == initPID { + // This is a terminal error. Bad things happen with invoking the + // the ENI plugin when there's no init process in the pid namespace. + // Specifically, the DHClient processes that are started as children + // of the Agent will not be reaped leading to the ENI device + // disappearing until the Agent is killed. + return errors.New("agent is not started with an init system"), true + } + + // Set VPC and Subnet IDs for the instance + if err, ok := agent.setVPCSubnet(); err != nil { + return err, ok + } + + // Validate that the CNI plugins exist in the expected path and that + // they possess the right capabilities + if err := agent.verifyCNIPluginsCapabilities(); err != nil { + // An error here is terminal as it means that the plugins + // do not support the ENI capability + return err, true + } + + if err := agent.startUdevWatcher(state, taskEngine.StateChangeEvents()); err != nil { + // If udev watcher was not initialized in this run because of the udev socket + // file not being available etc, the Agent might be able to retry and succeed + // on the next run. Hence, returning a false here for terminal bool + return err, false + } + + return nil, false +} + +// setVPCSubnet sets the vpc and subnet ids for the agent by querying the +// instance metadata service +func (agent *ecsAgent) setVPCSubnet() (error, bool) { + mac, err := agent.ec2MetadataClient.PrimaryENIMAC() + if err != nil { + return fmt.Errorf("unable to get mac address of instance's primary ENI from instance metadata: %v", err), false + } + + vpcID, err := agent.ec2MetadataClient.VPCID(mac) + if err != nil { + if isInstanceLaunchedInVPC(err) { + return fmt.Errorf("unable to get vpc id from instance metadata: %v", err), true + } + return instanceNotLaunchedInVPCError, false + } + + subnetID, err := agent.ec2MetadataClient.SubnetID(mac) + if err != nil { + return fmt.Errorf("unable to get subnet id from instance metadata: %v", err), false + } + agent.vpc = vpcID + agent.subnet = subnetID + agent.mac = mac + return nil, false +} + +// isInstanceLaunchedInVPC returns false when the awserr returned is an EC2MetadataError +// when querying the vpc id from instance metadata +func isInstanceLaunchedInVPC(err error) bool { + if aerr, ok := err.(awserr.Error); ok && + aerr.Code() == "EC2MetadataError" { + return false + } + return true +} + +// verifyCNIPluginsCapabilities returns an error if there's an error querying +// capabilities or if the required capability is absent from the capabilities +// of the following plugins: +// a. ecs-eni +// b. ecs-bridge +// c. ecs-ipam +// d. aws-appmesh +// e. vpc-branch-eni +func (agent *ecsAgent) verifyCNIPluginsCapabilities() error { + // Check if we can get capabilities from each plugin + for _, plugin := range awsVPCCNIPlugins { + // skip verifying branch cni plugin if eni trunking is not enabled + if plugin == ecscni.ECSBranchENIPluginName && agent.cfg != nil && !agent.cfg.ENITrunkingEnabled.Enabled() { + continue + } + + capabilities, err := agent.cniClient.Capabilities(plugin) + if err != nil { + return err + } + // appmesh plugin is not needed for awsvpc networking capability + if plugin == ecscni.ECSAppMeshPluginName { + continue + } + if !contains(capabilities, ecscni.CapabilityAWSVPCNetworkingMode) { + return errors.Errorf("plugin '%s' doesn't support the capability: %s", + plugin, ecscni.CapabilityAWSVPCNetworkingMode) + } + } + + return nil +} + +// startUdevWatcher starts the udev monitor and the watcher for receiving +// notifications from the monitor +func (agent *ecsAgent) startUdevWatcher(state dockerstate.TaskEngineState, stateChangeEvents chan<- statechange.Event) error { + seelog.Debug("Setting up ENI Watcher") + if agent.udevMonitor == nil { + monitor, err := udevwrapper.New() + if err != nil { + return errors.Wrapf(err, "unable to create udev monitor") + } + agent.udevMonitor = monitor + + // Create Watcher + eniWatcher := watcher.New(agent.ctx, agent.mac, agent.udevMonitor, state, stateChangeEvents) + if err := eniWatcher.Init(); err != nil { + return errors.Wrapf(err, "unable to initialize eni watcher") + } + go eniWatcher.Start() + } + return nil +} + +func contains(capabilities []string, capability string) bool { + for _, cap := range capabilities { + if cap == capability { + return true + } + } + + return false +} + +// initializeResourceFields exists mainly for testing doStart() to use mock Control +// object +func (agent *ecsAgent) initializeResourceFields(credentialsManager credentials.Manager) { + agent.resourceFields = &taskresource.ResourceFields{ + Control: cgroup.New(), + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + IOUtil: ioutilwrapper.NewIOUtil(), + ASMClientCreator: asmfactory.NewClientCreator(), + SSMClientCreator: ssmfactory.NewSSMClientCreator(), + CredentialsManager: credentialsManager, + EC2InstanceID: agent.getEC2InstanceID(), + }, + Ctx: agent.ctx, + DockerClient: agent.dockerClient, + NvidiaGPUManager: gpu.NewNvidiaGPUManager(), + } +} + +func (agent *ecsAgent) cgroupInit() error { + err := agent.resourceFields.Control.Init() + // When task CPU and memory limits are enabled, all tasks are placed + // under the '/ecs' cgroup root. + if err == nil { + return nil + } + if agent.cfg.TaskCPUMemLimit.Value == config.ExplicitlyEnabled { + return errors.Wrapf(err, "unable to setup '/ecs' cgroup") + } + seelog.Warnf("Disabling TaskCPUMemLimit because agent is unabled to setup '/ecs' cgroup: %v", err) + agent.cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + return nil +} + +func (agent *ecsAgent) initializeGPUManager() error { + if agent.resourceFields != nil && agent.resourceFields.NvidiaGPUManager != nil { + return agent.resourceFields.NvidiaGPUManager.Initialize() + } + return nil +} + +func (agent *ecsAgent) getPlatformDevices() []*ecs.PlatformDevice { + if agent.cfg.GPUSupportEnabled { + if agent.resourceFields != nil && agent.resourceFields.NvidiaGPUManager != nil { + return agent.resourceFields.NvidiaGPUManager.GetDevices() + } + } + return nil +} + +func (agent *ecsAgent) loadPauseContainer() error { + // Load the pause container's image from the 'disk' + _, err := agent.pauseLoader.LoadImage(agent.ctx, agent.cfg, agent.dockerClient) + + return err +} diff --git a/agent/app/agent_unix_test.go b/agent/app/agent_unix_test.go new file mode 100644 index 00000000000..4feebf54ce0 --- /dev/null +++ b/agent/app/agent_unix_test.go @@ -0,0 +1,740 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "errors" + "os" + "sync" + "testing" + + app_mocks "github.com/aws/amazon-ecs-agent/agent/app/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_ec2 "github.com/aws/amazon-ecs-agent/agent/ec2/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + mock_ecscni "github.com/aws/amazon-ecs-agent/agent/ecscni/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + mock_pause "github.com/aws/amazon-ecs-agent/agent/eni/pause/mocks" + mock_udev "github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper/mocks" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + mock_gpu "github.com/aws/amazon-ecs-agent/agent/gpu/mocks" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/mock_control" + mock_mobypkgwrapper "github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper/mocks" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + mac = "01:23:45:67:89:ab" + vpcID = "vpc-1234" + subnetID = "subnet-1234" +) + +func resetGetpid() { + getPid = os.Getpid +} + +func TestDoStartTaskENIHappyPath(t *testing.T) { + ctrl, credentialsManager, _, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + cniCapabilities := []string{ecscni.CapabilityAWSVPCNetworkingMode} + containerChangeEvents := make(chan dockerapi.DockerContainerChangeEvent) + monitoShutdownEvents := make(chan bool) + + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockUdevMonitor := mock_udev.NewMockUdev(ctrl) + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + var discoverEndpointsInvoked sync.WaitGroup + discoverEndpointsInvoked.Add(2) + + // These calls are expected to happen, but cannot be ordered as they are + // invoked via go routines, which will lead to occasional test failues + mockCredentialsProvider.EXPECT().IsExpired().Return(false).AnyTimes() + dockerClient.EXPECT().Version(gomock.Any(), gomock.Any()).AnyTimes() + dockerClient.EXPECT().SupportedVersions().Return(apiVersions) + dockerClient.EXPECT().ListContainers(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.ListContainersResponse{}).AnyTimes() + imageManager.EXPECT().StartImageCleanupProcess(gomock.Any()).MaxTimes(1) + client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Do(func(x interface{}) { + // Ensures that the test waits until acs session has bee started + discoverEndpointsInvoked.Done() + }).Return("poll-endpoint", nil) + client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return("acs-endpoint", nil).AnyTimes() + client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Do(func(x interface{}) { + // Ensures that the test waits until telemetry session has bee started + discoverEndpointsInvoked.Done() + }).Return("telemetry-endpoint", nil) + client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Return( + "tele-endpoint", nil).AnyTimes() + mockMetadata.EXPECT().OutpostARN().Return("", nil) + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil).AnyTimes() + mockUdevMonitor.EXPECT().Monitor(gomock.Any()).Return(monitoShutdownEvents).AnyTimes() + + gomock.InOrder( + mockMetadata.EXPECT().PrimaryENIMAC().Return(mac, nil), + mockMetadata.EXPECT().VPCID(mac).Return(vpcID, nil), + mockMetadata.EXPECT().SubnetID(mac).Return(subnetID, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSENIPluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSBridgePluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSIPAMPluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSAppMeshPluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSBranchENIPluginName).Return(cniCapabilities, nil), + mockCredentialsProvider.EXPECT().Retrieve().Return(credentials.Value{}, nil), + dockerClient.EXPECT().SupportedVersions().Return(nil), + dockerClient.EXPECT().KnownVersions().Return(nil), + cniClient.EXPECT().Version(ecscni.ECSENIPluginName).Return("v1", nil), + cniClient.EXPECT().Version(ecscni.ECSBranchENIPluginName).Return("v2", nil), + mockMobyPlugins.EXPECT().Scan().Return([]string{}, nil), + dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).Do( + func(x interface{}, attributes []*ecs.Attribute, y interface{}, z interface{}, w interface{}, + outpostARN interface{}) { + vpcFound := false + subnetFound := false + for _, attribute := range attributes { + if aws.StringValue(attribute.Name) == vpcIDAttributeName && + aws.StringValue(attribute.Value) == vpcID { + vpcFound = true + } + if aws.StringValue(attribute.Name) == subnetIDAttributeName && + aws.StringValue(attribute.Value) == subnetID { + subnetFound = true + } + } + assert.True(t, vpcFound) + assert.True(t, subnetFound) + }).Return("arn", "", nil), + imageManager.EXPECT().SetDataClient(gomock.Any()), + dockerClient.EXPECT().ContainerEvents(gomock.Any()).Return(containerChangeEvents, nil), + ) + + cfg := getTestConfig() + cfg.TaskENIEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.ENITrunkingEnabled = config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled} + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + credentialProvider: credentials.NewCredentials(mockCredentialsProvider), + dataClient: data.NewNoopClient(), + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + udevMonitor: mockUdevMonitor, + cniClient: cniClient, + ec2MetadataClient: mockMetadata, + terminationHandler: func(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + }, + mobyPlugins: mockMobyPlugins, + } + + getPid = func() int { + return 10 + } + defer resetGetpid() + + var agentW sync.WaitGroup + agentW.Add(1) + go func() { + agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, client, execCmdMgr) + agentW.Done() + }() + + // Wait for both DiscoverPollEndpointInput and DiscoverTelemetryEndpoint to be + // invoked. These are used as proxies to indicate that acs and tcs handlers' + // NewSession call has been invoked + discoverEndpointsInvoked.Wait() + cancel() + agentW.Wait() +} + +func TestSetVPCSubnetHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + gomock.InOrder( + mockMetadata.EXPECT().PrimaryENIMAC().Return(mac, nil), + mockMetadata.EXPECT().VPCID(mac).Return(vpcID, nil), + mockMetadata.EXPECT().SubnetID(mac).Return(subnetID, nil), + ) + + agent := &ecsAgent{ec2MetadataClient: mockMetadata} + err, ok := agent.setVPCSubnet() + assert.NoError(t, err) + assert.False(t, ok) + assert.Equal(t, vpcID, agent.vpc) + assert.Equal(t, subnetID, agent.subnet) +} + +func TestSetVPCSubnetClassicEC2(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + gomock.InOrder( + mockMetadata.EXPECT().PrimaryENIMAC().Return(mac, nil), + mockMetadata.EXPECT().VPCID(mac).Return("", awserr.New("EC2MetadataError", "failed to make EC2Metadata request", nil)), + ) + agent := &ecsAgent{ec2MetadataClient: mockMetadata} + err, ok := agent.setVPCSubnet() + assert.Error(t, err) + assert.Equal(t, instanceNotLaunchedInVPCError, err) + assert.False(t, ok) + assert.Equal(t, "", agent.vpc) + assert.Equal(t, "", agent.subnet) +} + +func TestSetVPCSubnetPrimaryENIMACError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockMetadata.EXPECT().PrimaryENIMAC().Return("", errors.New("error")) + agent := &ecsAgent{ec2MetadataClient: mockMetadata} + err, ok := agent.setVPCSubnet() + assert.Error(t, err) + assert.False(t, ok) + assert.Equal(t, "", agent.vpc) + assert.Equal(t, "", agent.subnet) +} + +func TestSetVPCSubnetVPCIDError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + gomock.InOrder( + mockMetadata.EXPECT().PrimaryENIMAC().Return(mac, nil), + mockMetadata.EXPECT().VPCID(mac).Return("", errors.New("error")), + ) + agent := &ecsAgent{ec2MetadataClient: mockMetadata} + err, ok := agent.setVPCSubnet() + assert.Error(t, err) + assert.True(t, ok) + assert.Equal(t, "", agent.vpc) + assert.Equal(t, "", agent.subnet) +} + +func TestSetVPCSubnetSubnetIDError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + gomock.InOrder( + mockMetadata.EXPECT().PrimaryENIMAC().Return(mac, nil), + mockMetadata.EXPECT().VPCID(mac).Return(vpcID, nil), + mockMetadata.EXPECT().SubnetID(mac).Return("", errors.New("error")), + ) + agent := &ecsAgent{ec2MetadataClient: mockMetadata} + err, ok := agent.setVPCSubnet() + assert.Error(t, err) + assert.False(t, ok) + assert.Equal(t, "", agent.vpc) + assert.Equal(t, "", agent.subnet) +} + +func TestQueryCNIPluginsCapabilitiesHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cniCapabilities := []string{ecscni.CapabilityAWSVPCNetworkingMode} + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + gomock.InOrder( + cniClient.EXPECT().Capabilities(ecscni.ECSENIPluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSBridgePluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSIPAMPluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSAppMeshPluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSBranchENIPluginName).Return(cniCapabilities, nil), + ) + agent := &ecsAgent{ + cniClient: cniClient, + cfg: &config.Config{ + ENITrunkingEnabled: config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled}, + }, + } + assert.NoError(t, agent.verifyCNIPluginsCapabilities()) +} + +func TestQueryCNIPluginsCapabilitiesEmptyCapabilityListFromPlugin(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + cniClient.EXPECT().Capabilities(ecscni.ECSENIPluginName).Return([]string{}, nil) + + agent := &ecsAgent{ + cniClient: cniClient, + } + + assert.Error(t, agent.verifyCNIPluginsCapabilities()) +} + +func TestQueryCNIPluginsCapabilitiesMissAppMesh(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cniCapabilities := []string{ecscni.CapabilityAWSVPCNetworkingMode} + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + gomock.InOrder( + cniClient.EXPECT().Capabilities(ecscni.ECSENIPluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSBridgePluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSIPAMPluginName).Return(cniCapabilities, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSAppMeshPluginName).Return(nil, errors.New("error")), + ) + cfg := getTestConfig() + agent := &ecsAgent{ + cniClient: cniClient, + cfg: &cfg, + } + assert.Error(t, agent.verifyCNIPluginsCapabilities()) +} + +func TestQueryCNIPluginsCapabilitiesErrorGettingCapabilitiesFromPlugin(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + cniClient.EXPECT().Capabilities(ecscni.ECSENIPluginName).Return(nil, errors.New("error")) + + agent := &ecsAgent{ + cniClient: cniClient, + } + + assert.Error(t, agent.verifyCNIPluginsCapabilities()) +} + +func setupMocksForInitializeTaskENIDependencies(t *testing.T) (*gomock.Controller, + *mock_dockerstate.MockTaskEngineState, + *mock_engine.MockTaskEngine) { + ctrl := gomock.NewController(t) + + return ctrl, + mock_dockerstate.NewMockTaskEngineState(ctrl), + mock_engine.NewMockTaskEngine(ctrl) +} + +func TestInitializeTaskENIDependenciesNoInit(t *testing.T) { + ctrl, state, taskEngine := setupMocksForInitializeTaskENIDependencies(t) + defer ctrl.Finish() + + agent := &ecsAgent{} + + getPid = func() int { + return 1 + } + defer resetGetpid() + + err, ok := agent.initializeTaskENIDependencies(state, taskEngine) + assert.Error(t, err) + assert.True(t, ok) +} + +func TestInitializeTaskENIDependenciesSetVPCSubnetError(t *testing.T) { + ctrl, state, taskEngine := setupMocksForInitializeTaskENIDependencies(t) + defer ctrl.Finish() + + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + gomock.InOrder( + mockMetadata.EXPECT().PrimaryENIMAC().Return("", errors.New("error")), + ) + agent := &ecsAgent{ + ec2MetadataClient: mockMetadata, + } + getPid = func() int { + return 10 + } + defer resetGetpid() + + err, ok := agent.initializeTaskENIDependencies(state, taskEngine) + assert.Error(t, err) + assert.False(t, ok) +} + +func TestInitializeTaskENIDependenciesQueryCNICapabilitiesError(t *testing.T) { + ctrl, state, taskEngine := setupMocksForInitializeTaskENIDependencies(t) + defer ctrl.Finish() + + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + gomock.InOrder( + mockMetadata.EXPECT().PrimaryENIMAC().Return(mac, nil), + mockMetadata.EXPECT().VPCID(mac).Return(vpcID, nil), + mockMetadata.EXPECT().SubnetID(mac).Return(subnetID, nil), + cniClient.EXPECT().Capabilities(ecscni.ECSENIPluginName).Return([]string{}, nil), + ) + agent := &ecsAgent{ + ec2MetadataClient: mockMetadata, + cniClient: cniClient, + } + + getPid = func() int { + return 10 + } + defer resetGetpid() + + err, ok := agent.initializeTaskENIDependencies(state, taskEngine) + assert.Error(t, err) + assert.True(t, ok) +} + +// TODO: At some point in the future, enisetup.New() will be refactored to be +// platform independent and we would be able to wrap it in a factory interface +// so that we can mock the factory and test the initialization code path for +// cases where udev monitor initialization fails as well + +func TestDoStartCgroupInitHappyPath(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockControl := mock_control.NewMockControl(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + var discoverEndpointsInvoked sync.WaitGroup + discoverEndpointsInvoked.Add(2) + containerChangeEvents := make(chan dockerapi.DockerContainerChangeEvent) + + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + dockerClient.EXPECT().Version(gomock.Any(), gomock.Any()).AnyTimes() + dockerClient.EXPECT().SupportedVersions().Return(apiVersions) + imageManager.EXPECT().StartImageCleanupProcess(gomock.Any()).MaxTimes(1) + mockCredentialsProvider.EXPECT().IsExpired().Return(false).AnyTimes() + ec2MetadataClient.EXPECT().OutpostARN().Return("", nil) + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil).AnyTimes() + + gomock.InOrder( + mockControl.EXPECT().Init().Return(nil), + mockCredentialsProvider.EXPECT().Retrieve().Return(credentials.Value{}, nil), + dockerClient.EXPECT().SupportedVersions().Return(nil), + dockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().Return([]string{}, nil), + dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return([]string{}, nil), + client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).Return("arn", "", nil), + imageManager.EXPECT().SetDataClient(gomock.Any()), + dockerClient.EXPECT().ContainerEvents(gomock.Any()).Return(containerChangeEvents, nil), + state.EXPECT().AllImageStates().Return(nil), + state.EXPECT().AllENIAttachments().Return(nil), + state.EXPECT().AllTasks().Return(nil), + client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Do(func(x interface{}) { + // Ensures that the test waits until acs session has bee started + discoverEndpointsInvoked.Done() + }).Return("poll-endpoint", nil), + client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return("acs-endpoint", nil).AnyTimes(), + client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Do(func(x interface{}) { + // Ensures that the test waits until telemetry session has bee started + discoverEndpointsInvoked.Done() + }).Return("telemetry-endpoint", nil), + client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Return( + "tele-endpoint", nil).AnyTimes(), + dockerClient.EXPECT().ListContainers(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.ListContainersResponse{}).AnyTimes(), + ) + + cfg := config.DefaultConfig() + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + credentialProvider: credentials.NewCredentials(mockCredentialsProvider), + pauseLoader: mockPauseLoader, + dockerClient: dockerClient, + terminationHandler: func(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + }, + mobyPlugins: mockMobyPlugins, + ec2MetadataClient: ec2MetadataClient, + resourceFields: &taskresource.ResourceFields{ + Control: mockControl, + }, + } + + var agentW sync.WaitGroup + agentW.Add(1) + go func() { + agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + agentW.Done() + }() + + // Wait for both DiscoverPollEndpointInput and DiscoverTelemetryEndpoint to be + // invoked. These are used as proxies to indicate that acs and tcs handlers' + // NewSession call has been invoked + + discoverEndpointsInvoked.Wait() + cancel() + agentW.Wait() +} + +func TestDoStartCgroupInitErrorPath(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockControl := mock_control.NewMockControl(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + var discoverEndpointsInvoked sync.WaitGroup + discoverEndpointsInvoked.Add(2) + + dockerClient.EXPECT().Version(gomock.Any(), gomock.Any()).AnyTimes() + dockerClient.EXPECT().SupportedVersions().Return(apiVersions) + imageManager.EXPECT().StartImageCleanupProcess(gomock.Any()).MaxTimes(1) + mockCredentialsProvider.EXPECT().IsExpired().Return(false).AnyTimes() + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil).AnyTimes() + + mockControl.EXPECT().Init().Return(errors.New("test error")) + + cfg := getTestConfig() + cfg.TaskCPUMemLimit.Value = config.ExplicitlyEnabled + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + credentialProvider: credentials.NewCredentials(mockCredentialsProvider), + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + terminationHandler: func(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + }, + resourceFields: &taskresource.ResourceFields{ + Control: mockControl, + }, + } + + status := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + + assert.Equal(t, exitcodes.ExitTerminal, status) +} + +func TestDoStartGPUManagerHappyPath(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockGPUManager := mock_gpu.NewMockGPUManager(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + ec2MetadataClient := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + + devices := []*ecs.PlatformDevice{ + { + Id: aws.String("id1"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, + { + Id: aws.String("id2"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, + { + Id: aws.String("id3"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, + } + var discoverEndpointsInvoked sync.WaitGroup + discoverEndpointsInvoked.Add(2) + containerChangeEvents := make(chan dockerapi.DockerContainerChangeEvent) + + dockerClient.EXPECT().Version(gomock.Any(), gomock.Any()).AnyTimes() + dockerClient.EXPECT().SupportedVersions().Return(apiVersions) + imageManager.EXPECT().StartImageCleanupProcess(gomock.Any()).MaxTimes(1) + mockCredentialsProvider.EXPECT().IsExpired().Return(false).AnyTimes() + ec2MetadataClient.EXPECT().OutpostARN().Return("", nil) + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil).AnyTimes() + + gomock.InOrder( + mockGPUManager.EXPECT().Initialize().Return(nil), + mockCredentialsProvider.EXPECT().Retrieve().Return(credentials.Value{}, nil), + dockerClient.EXPECT().SupportedVersions().Return(nil), + dockerClient.EXPECT().KnownVersions().Return(nil), + mockMobyPlugins.EXPECT().Scan().Return([]string{}, nil), + dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return([]string{}, nil), + mockGPUManager.EXPECT().GetDriverVersion().Return("396.44"), + mockGPUManager.EXPECT().GetDevices().Return(devices), + client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), devices, gomock.Any()).Return("arn", "", nil), + imageManager.EXPECT().SetDataClient(gomock.Any()), + dockerClient.EXPECT().ContainerEvents(gomock.Any()).Return(containerChangeEvents, nil), + state.EXPECT().AllImageStates().Return(nil), + state.EXPECT().AllENIAttachments().Return(nil), + state.EXPECT().AllTasks().Return(nil), + client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Do(func(x interface{}) { + // Ensures that the test waits until acs session has been started + discoverEndpointsInvoked.Done() + }).Return("poll-endpoint", nil), + client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return("acs-endpoint", nil).AnyTimes(), + client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Do(func(x interface{}) { + // Ensures that the test waits until telemetry session has been started + discoverEndpointsInvoked.Done() + }).Return("telemetry-endpoint", nil), + client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Return( + "tele-endpoint", nil).AnyTimes(), + dockerClient.EXPECT().ListContainers(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.ListContainersResponse{}).AnyTimes(), + ) + + cfg := getTestConfig() + cfg.GPUSupportEnabled = true + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + credentialProvider: credentials.NewCredentials(mockCredentialsProvider), + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + terminationHandler: func(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + }, + mobyPlugins: mockMobyPlugins, + ec2MetadataClient: ec2MetadataClient, + resourceFields: &taskresource.ResourceFields{ + NvidiaGPUManager: mockGPUManager, + }, + } + + var agentW sync.WaitGroup + agentW.Add(1) + go func() { + agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + agentW.Done() + }() + + // Wait for both DiscoverPollEndpointInput and DiscoverTelemetryEndpoint to be + // invoked. These are used as proxies to indicate that acs and tcs handlers' + // NewSession call has been invoked + + discoverEndpointsInvoked.Wait() + cancel() + agentW.Wait() +} + +func TestDoStartGPUManagerInitError(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockGPUManager := mock_gpu.NewMockGPUManager(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + var discoverEndpointsInvoked sync.WaitGroup + discoverEndpointsInvoked.Add(2) + + dockerClient.EXPECT().Version(gomock.Any(), gomock.Any()).AnyTimes() + dockerClient.EXPECT().SupportedVersions().Return(apiVersions) + imageManager.EXPECT().StartImageCleanupProcess(gomock.Any()).MaxTimes(1) + mockCredentialsProvider.EXPECT().IsExpired().Return(false).AnyTimes() + mockGPUManager.EXPECT().Initialize().Return(errors.New("init error")) + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + mockPauseLoader.EXPECT().IsLoaded(gomock.Any()).Return(true, nil).AnyTimes() + + cfg := getTestConfig() + cfg.GPUSupportEnabled = true + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + credentialProvider: credentials.NewCredentials(mockCredentialsProvider), + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + terminationHandler: func(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + }, + resourceFields: &taskresource.ResourceFields{ + NvidiaGPUManager: mockGPUManager, + }, + } + + status := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + + assert.Equal(t, exitcodes.ExitError, status) +} + +func TestDoStartTaskENIPauseError(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, _, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + mockCredentialsProvider := app_mocks.NewMockProvider(ctrl) + mockPauseLoader := mock_pause.NewMockLoader(ctrl) + mockMetadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl) + + var discoverEndpointsInvoked sync.WaitGroup + discoverEndpointsInvoked.Add(2) + + // These calls are expected to happen, but cannot be ordered as they are + // invoked via go routines, which will lead to occasional test failures + mockCredentialsProvider.EXPECT().IsExpired().Return(false).AnyTimes() + dockerClient.EXPECT().Version(gomock.Any(), gomock.Any()).AnyTimes() + dockerClient.EXPECT().SupportedVersions().Return(apiVersions) + dockerClient.EXPECT().ListContainers(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.ListContainersResponse{}).AnyTimes() + imageManager.EXPECT().StartImageCleanupProcess(gomock.Any()).MaxTimes(1) + mockPauseLoader.EXPECT().LoadImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error")).AnyTimes() + + cfg := getTestConfig() + cfg.TaskENIEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.ENITrunkingEnabled = config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled} + ctx, _ := context.WithCancel(context.TODO()) + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + credentialProvider: credentials.NewCredentials(mockCredentialsProvider), + dockerClient: dockerClient, + pauseLoader: mockPauseLoader, + cniClient: cniClient, + ec2MetadataClient: mockMetadata, + terminationHandler: func(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + }, + mobyPlugins: mockMobyPlugins, + } + + status := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + + assert.Equal(t, exitcodes.ExitError, status) +} diff --git a/agent/app/agent_unspecified.go b/agent/app/agent_unspecified.go new file mode 100644 index 00000000000..77241fc5f72 --- /dev/null +++ b/agent/app/agent_unspecified.go @@ -0,0 +1,55 @@ +// +build !linux,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "errors" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/cihub/seelog" +) + +func (agent *ecsAgent) initializeTaskENIDependencies(state dockerstate.TaskEngineState, taskEngine engine.TaskEngine) (error, bool) { + return errors.New("unsupported platform"), true +} + +// startWindowsService is not supported on non windows platforms +func (agent *ecsAgent) startWindowsService() int { + seelog.Error("Windows Services are not supported on unspecified platforms") + return 1 +} + +func (agent *ecsAgent) initializeResourceFields(credentialsManager credentials.Manager) { +} + +func (agent *ecsAgent) cgroupInit() error { + return nil +} + +func (agent *ecsAgent) initializeGPUManager() error { + return nil +} + +func (agent *ecsAgent) getPlatformDevices() []*ecs.PlatformDevice { + return nil +} + +func (agent *ecsAgent) loadPauseContainer() error { + return nil +} diff --git a/agent/app/agent_windows.go b/agent/app/agent_windows.go new file mode 100644 index 00000000000..5eb316926e6 --- /dev/null +++ b/agent/app/agent_windows.go @@ -0,0 +1,287 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "errors" + "sync" + "time" + + fsxfactory "github.com/aws/amazon-ecs-agent/agent/fsx/factory" + + asmfactory "github.com/aws/amazon-ecs-agent/agent/asm/factory" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + s3factory "github.com/aws/amazon-ecs-agent/agent/s3/factory" + "github.com/aws/amazon-ecs-agent/agent/sighandlers" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + + "github.com/cihub/seelog" + "golang.org/x/sys/windows/svc" +) + +const ( + //EcsSvcName is the name of the service + EcsSvcName = "AmazonECS" +) + +func (agent *ecsAgent) initializeTaskENIDependencies(state dockerstate.TaskEngineState, taskEngine engine.TaskEngine) (error, bool) { + return errors.New("unsupported platform"), true +} + +// startWindowsService runs the ECS agent as a Windows Service +func (agent *ecsAgent) startWindowsService() int { + svc.Run(EcsSvcName, newHandler(agent)) + return 0 +} + +// handler implements https://godoc.org/golang.org/x/sys/windows/svc#Handler +type handler struct { + ecsAgent agent +} + +func newHandler(agent agent) *handler { + return &handler{agent} +} + +// Execute implements https://godoc.org/golang.org/x/sys/windows/svc#Handler +// The basic way that this implementation works is through two channels (representing the requests from Windows and the +// responses we're sending to Windows) and two goroutines (one for message processing with Windows and the other to +// actually run the agent). Once we've set everything up and started both goroutines, we wait for either one to exit +// (the Windows goroutine will exit based on messages from Windows while the agent goroutine exits if the agent exits) +// and then cancel the other. Once everything has stopped running, this function returns and the process exits. +func (h *handler) Execute(args []string, requests <-chan svc.ChangeRequest, responses chan<- svc.Status) (bool, uint32) { + defer seelog.Flush() + // channels for communication between goroutines + ctx, cancel := context.WithCancel(context.Background()) + agentDone := make(chan struct{}) + windowsDone := make(chan struct{}) + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + defer close(windowsDone) + defer wg.Done() + h.handleWindowsRequests(ctx, requests, responses) + }() + + var agentExitCode uint32 + go func() { + defer close(agentDone) + defer wg.Done() + agentExitCode = h.runAgent(ctx) + }() + + // Wait until one of the goroutines is either told to stop or fails spectacularly. Under normal conditions we will + // be waiting here for a long time. + select { + case <-windowsDone: + // Service was told to stop by the Windows API. This happens either through manual intervention (i.e., + // "Stop-Service AmazonECS") or through system shutdown. Regardless, this is a normal exit event and not an + // error. + seelog.Info("Received normal signal from Windows to exit") + case <-agentDone: + // This means that the agent stopped on its own. This is where it's appropriate to light the event log on fire + // and set off all the alarms. + seelog.Errorf("Exiting %d", agentExitCode) + } + cancel() + wg.Wait() + seelog.Infof("Bye Bye! Exiting with %d", agentExitCode) + return true, agentExitCode +} + +// handleWindowsRequests is a loop intended to run in a goroutine. It handles bidirectional communication with the +// Windows service manager. This function works by pretty much immediately moving to running and then waiting for a +// stop or shut down message from Windows or to be canceled (which could happen if the agent exits by itself and the +// calling function cancels the context). +func (h *handler) handleWindowsRequests(ctx context.Context, requests <-chan svc.ChangeRequest, responses chan<- svc.Status) { + // Immediately tell Windows that we are pending service start. + responses <- svc.Status{State: svc.StartPending} + seelog.Info("Starting Windows service") + + // TODO: Pre-start hooks go here (unclear if we need any yet) + + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms682108(v=vs.85).aspx + // Not sure if a better link exists to describe what these values mean + accepts := svc.AcceptStop | svc.AcceptShutdown + + // Announce that we are running and we accept the above-mentioned commands + responses <- svc.Status{State: svc.Running, Accepts: accepts} + + defer func() { + // Announce that we are stopping + seelog.Info("Stopping Windows service") + responses <- svc.Status{State: svc.StopPending} + }() + + for { + select { + case <-ctx.Done(): + return + case r := <-requests: + switch r.Cmd { + case svc.Interrogate: + // Our status doesn't change unless we are told to stop or shutdown + responses <- r.CurrentStatus + case svc.Stop, svc.Shutdown: + return + default: + continue + } + } + } +} + +// runAgent runs the ECS agent inside a goroutine and waits to be told to exit. +func (h *handler) runAgent(ctx context.Context) uint32 { + agentCtx, cancel := context.WithCancel(ctx) + indicator := newTermHandlerIndicator() + + terminationHandler := func(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + // We're using a custom indicator to record that the handler is scheduled to be executed (has been invoked) and + // to determine whether it should run (we skip when the agent engine has already exited). After recording to + // the indicator that the handler has been invoked, we wait on the context. When we wake up, we determine + // whether to execute or not based on whether the agent is still running. + defer indicator.finish() + indicator.setInvoked() + <-agentCtx.Done() + if !indicator.isAgentRunning() { + return + } + + seelog.Info("Termination handler received signal to stop") + err := sighandlers.FinalSave(state, dataClient, taskEngine) + if err != nil { + seelog.Criticalf("Error saving state before final shutdown: %v", err) + } + } + h.ecsAgent.setTerminationHandler(terminationHandler) + + go func() { + defer cancel() + exitCode := h.ecsAgent.start() // should block forever, unless there is an error + + if exitCode == exitcodes.ExitTerminal { + seelog.Critical("Terminal exit code received from agent. Windows SCM will not restart the AmazonECS service.") + // We override the exit code to 0 here so that Windows does not treat this as a restartable failure even + // when "sc.exe failureflag" is set. + exitCode = 0 + } + + indicator.agentStopped(exitCode) + }() + + sleepCtx(agentCtx, time.Minute) // give the agent a minute to start and invoke terminationHandler + + // wait for the termination handler to run. Once the termination handler runs, we can safely exit. If the agent + // exits by itself, the termination handler doesn't need to do anything and skips. If the agent exits before the + // termination handler is invoked, we can exit immediately. + return indicator.wait() +} + +// sleepCtx provides a cancelable sleep +func sleepCtx(ctx context.Context, duration time.Duration) { + derivedCtx, _ := context.WithDeadline(ctx, time.Now().Add(duration)) + <-derivedCtx.Done() +} + +type termHandlerIndicator struct { + mu sync.Mutex + agentRunning bool + exitCode uint32 + handlerInvoked bool + handlerDone chan struct{} +} + +func newTermHandlerIndicator() *termHandlerIndicator { + return &termHandlerIndicator{ + agentRunning: true, + handlerInvoked: false, + handlerDone: make(chan struct{}), + } +} + +func (t *termHandlerIndicator) isAgentRunning() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.agentRunning +} + +func (t *termHandlerIndicator) agentStopped(exitCode int) { + t.mu.Lock() + defer t.mu.Unlock() + t.agentRunning = false + t.exitCode = uint32(exitCode) +} + +func (t *termHandlerIndicator) finish() { + close(t.handlerDone) +} + +func (t *termHandlerIndicator) setInvoked() { + t.mu.Lock() + defer t.mu.Unlock() + t.handlerInvoked = true +} + +func (t *termHandlerIndicator) wait() uint32 { + t.mu.Lock() + invoked := t.handlerInvoked + t.mu.Unlock() + if invoked { + <-t.handlerDone + } + t.mu.Lock() + defer t.mu.Unlock() + return t.exitCode +} + +func (agent *ecsAgent) initializeResourceFields(credentialsManager credentials.Manager) { + agent.resourceFields = &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + ASMClientCreator: asmfactory.NewClientCreator(), + SSMClientCreator: ssmfactory.NewSSMClientCreator(), + FSxClientCreator: fsxfactory.NewFSxClientCreator(), + CredentialsManager: credentialsManager, + }, + Ctx: agent.ctx, + DockerClient: agent.dockerClient, + S3ClientCreator: s3factory.NewS3ClientCreator(), + } +} + +func (agent *ecsAgent) cgroupInit() error { + return errors.New("unsupported platform") +} + +func (agent *ecsAgent) initializeGPUManager() error { + return nil +} + +func (agent *ecsAgent) getPlatformDevices() []*ecs.PlatformDevice { + return nil +} + +func (agent *ecsAgent) loadPauseContainer() error { + return nil +} diff --git a/agent/app/agent_windows_test.go b/agent/app/agent_windows_test.go new file mode 100644 index 00000000000..05c27ae6bcb --- /dev/null +++ b/agent/app/agent_windows_test.go @@ -0,0 +1,313 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/ec2" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/sighandlers" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + statemanager_mocks "github.com/aws/amazon-ecs-agent/agent/statemanager/mocks" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows/svc" +) + +type mockAgent struct { + startFunc func() int + terminationHandler sighandlers.TerminationHandler +} + +func (m *mockAgent) start() int { + return m.startFunc() +} +func (m *mockAgent) setTerminationHandler(handler sighandlers.TerminationHandler) { + m.terminationHandler = handler +} +func (m *mockAgent) printECSAttributes() int { return 0 } +func (m *mockAgent) startWindowsService() int { return 0 } + +func TestHandler_RunAgent_StartExitImmediately(t *testing.T) { + // register some mocks, but nothing should get called on any of them + ctrl := gomock.NewController(t) + _ = statemanager_mocks.NewMockStateManager(ctrl) + _ = mock_engine.NewMockTaskEngine(ctrl) + defer ctrl.Finish() + + wg := sync.WaitGroup{} + wg.Add(1) + startFunc := func() int { + // startFunc doesn't block, nothing is called + wg.Done() + return 0 + } + agent := &mockAgent{startFunc: startFunc} + handler := &handler{agent} + go handler.runAgent(context.TODO()) + wg.Wait() + assert.NotNil(t, agent.terminationHandler) +} + +func TestHandler_RunAgent_NoSaveWithNoTerminationHandler(t *testing.T) { + // register some mocks, but nothing should get called on any of them + ctrl := gomock.NewController(t) + _ = statemanager_mocks.NewMockStateManager(ctrl) + _ = mock_engine.NewMockTaskEngine(ctrl) + defer ctrl.Finish() + + done := make(chan struct{}) + startFunc := func() int { + <-done // block until after the test ends so that we can test that runAgent returns when cancelled + return 0 + } + agent := &mockAgent{startFunc: startFunc} + handler := &handler{agent} + ctx, cancel := context.WithCancel(context.TODO()) + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + handler.runAgent(ctx) + wg.Done() + }() + cancel() + wg.Wait() + assert.NotNil(t, agent.terminationHandler) +} + +func TestHandler_RunAgent_ForceSaveWithTerminationHandler(t *testing.T) { + ctrl := gomock.NewController(t) + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngineState := mock_dockerstate.NewMockTaskEngineState(ctrl) + defer ctrl.Finish() + dataClient := data.NewNoopClient() + + taskEngine.EXPECT().Disable() + taskEngineState.EXPECT().AllTasks().Return(nil) + taskEngineState.EXPECT().AllImageStates().Return(nil) + taskEngineState.EXPECT().AllENIAttachments().Return(nil) + taskEngineState.EXPECT().GetAllContainerIDs().Return([]string{"test-container"}) + taskEngineState.EXPECT().ContainerByID("test-container").Return(nil, false) + + agent := &mockAgent{} + + ctx, cancel := context.WithCancel(context.TODO()) + done := make(chan struct{}) + defer func() { done <- struct{}{} }() + startFunc := func() int { + go agent.terminationHandler(taskEngineState, dataClient, taskEngine, cancel) + <-done // block until after the test ends so that we can test that runAgent returns when cancelled + return 0 + } + agent.startFunc = startFunc + handler := &handler{agent} + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + handler.runAgent(ctx) + wg.Done() + }() + time.Sleep(time.Second) // give startFunc enough time to actually call the termination handler + cancel() + wg.Wait() +} + +func TestHandler_HandleWindowsRequests_StopService(t *testing.T) { + requests := make(chan svc.ChangeRequest) + responses := make(chan svc.Status) + + handler := &handler{} + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + handler.handleWindowsRequests(context.TODO(), requests, responses) + wg.Done() + }() + + go func() { + resp := <-responses + assert.Equal(t, svc.StartPending, resp.State, "Send StartPending immediately") + resp = <-responses + assert.Equal(t, svc.Running, resp.State, "Send Running after StartPending") + assert.Equal(t, svc.AcceptStop|svc.AcceptShutdown, resp.Accepts, "Accept stop & shutdown") + requests <- svc.ChangeRequest{Cmd: svc.Interrogate, CurrentStatus: svc.Status{State: svc.Running}} + resp = <-responses + assert.Equal(t, svc.Running, resp.State, "Send Running after Interrogate") + requests <- svc.ChangeRequest{Cmd: svc.Stop} + resp = <-responses + assert.Equal(t, svc.StopPending, resp.State, "Send StopPending after Stop") + wg.Done() + }() + + wg.Wait() +} + +func TestHandler_HandleWindowsRequests_Cancel(t *testing.T) { + requests := make(chan svc.ChangeRequest) + responses := make(chan svc.Status) + + handler := &handler{} + ctx, cancel := context.WithCancel(context.TODO()) + wg := sync.WaitGroup{} + wg.Add(2) + + go func() { + handler.handleWindowsRequests(ctx, requests, responses) + wg.Done() + }() + + go func() { + resp := <-responses + assert.Equal(t, svc.StartPending, resp.State, "Send StartPending immediately") + resp = <-responses + assert.Equal(t, svc.Running, resp.State, "Send Running after StartPending") + assert.Equal(t, svc.AcceptStop|svc.AcceptShutdown, resp.Accepts, "Accept stop & shutdown") + requests <- svc.ChangeRequest{Cmd: svc.Interrogate, CurrentStatus: svc.Status{State: svc.Running}} + resp = <-responses + assert.Equal(t, svc.Running, resp.State, "Send Running after Interrogate") + cancel() + resp = <-responses + assert.Equal(t, svc.StopPending, resp.State, "Send StopPending after Cancel") + wg.Done() + }() + + wg.Wait() +} + +func TestHandler_Execute_WindowsStops(t *testing.T) { + ctrl := gomock.NewController(t) + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + taskEngineState := mock_dockerstate.NewMockTaskEngineState(ctrl) + defer ctrl.Finish() + dataClient := data.NewNoopClient() + + taskEngine.EXPECT().Disable() + taskEngineState.EXPECT().AllTasks().Return(nil) + taskEngineState.EXPECT().AllImageStates().Return(nil) + taskEngineState.EXPECT().AllENIAttachments().Return(nil) + taskEngineState.EXPECT().GetAllContainerIDs().Return([]string{"test-container"}) + taskEngineState.EXPECT().ContainerByID("test-container").Return(nil, false) + + agent := &mockAgent{} + + _, cancel := context.WithCancel(context.TODO()) + done := make(chan struct{}) + defer func() { done <- struct{}{} }() + startFunc := func() int { + go agent.terminationHandler(taskEngineState, dataClient, taskEngine, cancel) + <-done // block until after the test ends so that we can test that Execute returns when Stopped + return 0 + } + agent.startFunc = startFunc + handler := &handler{agent} + requests := make(chan svc.ChangeRequest) + responses := make(chan svc.Status) + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + handler.Execute(nil, requests, responses) + wg.Done() + }() + + go func() { + resp := <-responses + assert.Equal(t, svc.StartPending, resp.State, "Send StartPending immediately") + resp = <-responses + assert.Equal(t, svc.Running, resp.State, "Send Running after StartPending") + assert.Equal(t, svc.AcceptStop|svc.AcceptShutdown, resp.Accepts, "Accept stop & shutdown") + time.Sleep(time.Second) // let it run for a second + requests <- svc.ChangeRequest{Cmd: svc.Shutdown} + resp = <-responses + assert.Equal(t, svc.StopPending, resp.State, "Send StopPending after Shutdown") + wg.Done() + }() + + wg.Wait() +} + +func TestHandler_Execute_AgentStops(t *testing.T) { + agent := &mockAgent{} + + ctx, cancel := context.WithCancel(context.TODO()) + startFunc := func() int { + <-ctx.Done() + return 0 + } + agent.startFunc = startFunc + handler := &handler{agent} + requests := make(chan svc.ChangeRequest) + responses := make(chan svc.Status) + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + handler.Execute(nil, requests, responses) + wg.Done() + }() + + go func() { + resp := <-responses + assert.Equal(t, svc.StartPending, resp.State, "Send StartPending immediately") + resp = <-responses + assert.Equal(t, svc.Running, resp.State, "Send Running after StartPending") + assert.Equal(t, svc.AcceptStop|svc.AcceptShutdown, resp.Accepts, "Accept stop & shutdown") + time.Sleep(time.Second) // let it run for a second + cancel() + resp = <-responses + assert.Equal(t, svc.StopPending, resp.State, "Send StopPending after agent goroutine stops") + wg.Done() + }() + + wg.Wait() +} + +func TestDoStartTaskLimitsFail(t *testing.T) { + ctrl, credentialsManager, state, imageManager, client, + dockerClient, stateManagerFactory, saveableOptionFactory, execCmdMgr := setup(t) + defer ctrl.Finish() + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.TaskCPUMemLimit.Value = config.ExplicitlyEnabled + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dockerClient: dockerClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: saveableOptionFactory, + ec2MetadataClient: ec2.NewBlackholeEC2MetadataClient(), + } + + dockerClient.EXPECT().SupportedVersions().Return(apiVersions) + + exitCode := agent.doStart(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, client, execCmdMgr) + assert.Equal(t, exitcodes.ExitTerminal, exitCode) +} diff --git a/agent/app/args/flag.go b/agent/app/args/flag.go new file mode 100644 index 00000000000..6aedaf5916b --- /dev/null +++ b/agent/app/args/flag.go @@ -0,0 +1,93 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package args + +import ( + "flag" +) + +const ( + versionUsage = "Print the agent version information and exit" + logLevelUsage = "Loglevel overrides loglevel-driver and loglevel-on-instance and sets the same level for both on-instance and driver logging: [||||]" + driverLogLevelUsage = "Loglevel for docker logging driver: [||||]" + instanceLogLevelUsage = "Loglevel for Agent on-instance log file: [|||||]" + ecsAttributesUsage = "Print the Agent's ECS Attributes based on its environment" + acceptInsecureCertUsage = "Disable SSL certificate verification. We do not recommend setting this option" + licenseUsage = "Print the LICENSE and NOTICE files and exit" + blacholeEC2MetadataUsage = "Blackhole the EC2 Metadata requests. Setting this option can cause the ECS Agent to fail to work properly. We do not recommend setting this option" + windowsServiceUsage = "Run the ECS agent as a Windows Service" + healthcheckServiceUsage = "Run the agent healthcheck" + + versionFlagName = "version" + logLevelFlagName = "loglevel" + driverLogLevelFlagName = "loglevel-driver" + instanceLogLevelFlagName = "loglevel-on-instance" + ecsAttributesFlagName = "ecs-attributes" + acceptInsecureCertFlagName = "k" + licenseFlagName = "license" + blackholeEC2MetadataFlagName = "blackhole-ec2-metadata" + windowsServiceFlagName = "windows-service" + healthCheckFlagName = "healthcheck" +) + +// Args wraps various ECS Agent arguments +type Args struct { + // Version indicates if the version information should be printed + Version *bool + // LogLevel represents the ECS Agent's logging level for both the on-instance file and the logging driver + LogLevel *string + // DriverLogLevel represents the ECS Agent's logging driver log level + DriverLogLevel *string + // InstanceLogLevel represents the ECS Agent's on-instance file log level + InstanceLogLevel *string + // AcceptInsecureCert indicates if SSL certificate verification + // should be disabled + AcceptInsecureCert *bool + // License indicates if the license information should be printed + License *bool + // BlackholeEC2Metadata indicates if EC2 Metadata requests should be + // blackholed + BlackholeEC2Metadata *bool + // ECSAttributes indicates that the agent should print its attributes + ECSAttributes *bool + // WindowsService indicates that the agent should run as a Windows service + WindowsService *bool + // Healthcheck indicates that agent should run healthcheck + Healthcheck *bool +} + +// New creates a new Args object from the argument list +func New(arguments []string) (*Args, error) { + flagset := flag.NewFlagSet("Amazon ECS Agent", flag.ContinueOnError) + + args := &Args{ + Version: flagset.Bool(versionFlagName, false, versionUsage), + LogLevel: flagset.String(logLevelFlagName, "", logLevelUsage), + DriverLogLevel: flagset.String(driverLogLevelFlagName, "", driverLogLevelUsage), + InstanceLogLevel: flagset.String(instanceLogLevelFlagName, "", instanceLogLevelUsage), + AcceptInsecureCert: flagset.Bool(acceptInsecureCertFlagName, false, acceptInsecureCertUsage), + License: flagset.Bool(licenseFlagName, false, licenseUsage), + BlackholeEC2Metadata: flagset.Bool(blackholeEC2MetadataFlagName, false, blacholeEC2MetadataUsage), + ECSAttributes: flagset.Bool(ecsAttributesFlagName, false, ecsAttributesUsage), + WindowsService: flagset.Bool(windowsServiceFlagName, false, windowsServiceUsage), + Healthcheck: flagset.Bool(healthCheckFlagName, false, healthcheckServiceUsage), + } + + err := flagset.Parse(arguments) + if err != nil { + return nil, err + } + + return args, nil +} diff --git a/agent/app/data.go b/agent/app/data.go new file mode 100644 index 00000000000..ea5930478c8 --- /dev/null +++ b/agent/app/data.go @@ -0,0 +1,196 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "strconv" + "strings" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + + "github.com/pkg/errors" +) + +const ( + objNotFoundErrMsg = "not found" +) + +// savedData holds all the data that we might save in db. +type savedData struct { + taskEngine engine.TaskEngine + agentVersion string + availabilityZone string + cluster string + containerInstanceARN string + ec2InstanceID string + latestTaskManifestSeqNum int64 +} + +// loadData loads data from previous checkpoint file, if any, with backward compatibility preserved. It first tries to +// load from boltdb, and if it doesn't get anything, it tries to load from state file and then save data it loaded to +// boltdb. Behavior of three cases are considered: +// +// 1. Agent starts from fresh instance (no previous state): +// (1) Try to load from boltdb, get nothing; +// (2) Try to load from state file, get nothing; +// (3) Return empty data. +// +// 2. Agent starts with previous state stored in boltdb: +// (1) Try to load from boltdb, get the data; +// (2) Return loaded data. +// +// 3. Agent starts with previous state stored in state file (i.e. it was just upgraded from an old agent that uses state file): +// (1) Try to load from boltdb, get nothing; +// (2) Try to load from state file, get something; +// (3) Save loaded data to boltdb; +// (4) Return loaded data. +func (agent *ecsAgent) loadData(containerChangeEventStream *eventstream.EventStream, + credentialsManager credentials.Manager, + state dockerstate.TaskEngineState, + imageManager engine.ImageManager, + execCmdMgr execcmd.Manager) (*savedData, error) { + s := &savedData{ + taskEngine: engine.NewTaskEngine(agent.cfg, agent.dockerClient, credentialsManager, + containerChangeEventStream, imageManager, state, + agent.metadataManager, agent.resourceFields, execCmdMgr), + } + s.taskEngine.SetDataClient(agent.dataClient) + + err := agent.loadDataFromBoltDB(s) + if err != nil { + return nil, errors.Wrapf(err, "failed to load previous data from BoltDB") + } + + // The fact that agent version is present in the boltdb database is served as an indicator that + // we has started using boltdb to store data. In the migration case (case 3 mentioned in method comment), + // we migrate the data to boltdb before saving the version, so if the version is saved, the data has been + // migrated to boltdb. + if s.agentVersion != "" { + return s, nil + } + + err = agent.loadDataFromStateFile(s) + if err != nil { + return nil, errors.Wrapf(err, "failed to load previous data from state file") + } + + err = agent.saveDataToBoltDB(s) + if err != nil { + return nil, errors.Wrapf(err, "failed to save loaded data to BoltDB") + } + + return s, nil +} + +func (agent *ecsAgent) loadDataFromBoltDB(s *savedData) error { + err := s.taskEngine.LoadState() + if err != nil { + return errors.Wrap(err, "failed to load task engine state") + } + + s.agentVersion, err = agent.loadMetadata(data.AgentVersionKey) + if err != nil { + return err + } + s.availabilityZone, err = agent.loadMetadata(data.AvailabilityZoneKey) + if err != nil { + return err + } + s.cluster, err = agent.loadMetadata(data.ClusterNameKey) + if err != nil { + return err + } + s.containerInstanceARN, err = agent.loadMetadata(data.ContainerInstanceARNKey) + if err != nil { + return err + } + s.ec2InstanceID, err = agent.loadMetadata(data.EC2InstanceIDKey) + if err != nil { + return err + } + seqNumStr, err := agent.loadMetadata(data.TaskManifestSeqNumKey) + if err != nil { + return err + } + if seqNumStr != "" { + s.latestTaskManifestSeqNum, err = strconv.ParseInt(seqNumStr, 10, 64) + if err != nil { + return errors.Wrapf(err, "failed to convert saved task manifest sequence number to int64: %s", seqNumStr) + } + } + return nil +} + +func (agent *ecsAgent) loadDataFromStateFile(s *savedData) error { + stateManager, err := agent.newStateManager(s.taskEngine, &s.cluster, &s.containerInstanceARN, + &s.ec2InstanceID, &s.availabilityZone, &s.latestTaskManifestSeqNum) + if err != nil { + return err + } + + err = stateManager.Load() + if err != nil { + return err + } + return nil +} + +func (agent *ecsAgent) saveDataToBoltDB(s *savedData) error { + if err := s.taskEngine.SaveState(); err != nil { + return err + } + if s.availabilityZone != "" { + if err := agent.dataClient.SaveMetadata(data.AvailabilityZoneKey, s.availabilityZone); err != nil { + return err + } + } + if s.cluster != "" { + if err := agent.dataClient.SaveMetadata(data.ClusterNameKey, s.cluster); err != nil { + return err + } + } + if s.containerInstanceARN != "" { + if err := agent.dataClient.SaveMetadata(data.ContainerInstanceARNKey, s.containerInstanceARN); err != nil { + return err + } + } + if s.ec2InstanceID != "" { + if err := agent.dataClient.SaveMetadata(data.EC2InstanceIDKey, s.ec2InstanceID); err != nil { + return err + } + } + if err := agent.dataClient.SaveMetadata(data.TaskManifestSeqNumKey, + strconv.FormatInt(s.latestTaskManifestSeqNum, 10)); err != nil { + return err + } + + return nil +} + +func (agent *ecsAgent) loadMetadata(key string) (string, error) { + val, err := agent.dataClient.GetMetadata(key) + if err != nil { + if strings.Contains(err.Error(), objNotFoundErrMsg) { + // Happen when starting with empty db, not actual error + return "", nil + } + return "", errors.Wrapf(err, "failed to get value for metadata '%s'", key) + } + return val, nil +} diff --git a/agent/app/data_test.go b/agent/app/data_test.go new file mode 100644 index 00000000000..786b7c9ef39 --- /dev/null +++ b/agent/app/data_test.go @@ -0,0 +1,269 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "context" + "io/ioutil" + "os" + "strconv" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/app/factory" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/statemanager" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testContainerName = "test-name" + testImageId = "test-imageId" + testMac = "test-mac" + testAttachmentArn = "arn:aws:ecs:us-west-2:1234567890:attachment/abc" + testDockerID = "test-docker-id" + testTaskARN = "arn:aws:ecs:region:account-id:task/task-id" + + testContainerInstanceARN = "test-ci-arn" + testCluster = "test-cluster" + testEC2InstanceID = "test-instance-id" + testAvailabilityZone = "test-az" + testAgentVersion = "1.40.0" + testLatestSeqNumberTaskManifest = int64(1) +) + +var ( + testContainer = &apicontainer.Container{ + Name: testContainerName, + TaskARNUnsafe: testTaskARN, + } + testDockerContainer = &apicontainer.DockerContainer{ + DockerID: testDockerID, + Container: testContainer, + } + testTask = &apitask.Task{ + Arn: testTaskARN, + Containers: []*apicontainer.Container{testContainer}, + } + + testImageState = &image.ImageState{ + Image: testImage, + PullSucceeded: false, + } + testImage = &image.Image{ + ImageID: testImageId, + } + + testENIAttachment = &eni.ENIAttachment{ + AttachmentARN: testAttachmentArn, + AttachStatusSent: false, + MACAddress: testMac, + } +) + +func TestLoadDataNoPreviousState(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + _, stateManagerFactory, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + stateManager, dataClient, cleanup := newTestClient(t) + defer cleanup() + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(cfg *config.Config, options ...statemanager.Option) { + for _, option := range options { + option(stateManager) + } + }).Return(stateManager, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: factory.NewSaveableOption(), + } + + _, err := agent.loadData(eventstream.NewEventStream("events", ctx), + credentialsManager, dockerstate.NewTaskEngineState(), imageManager, execCmdMgr) + assert.NoError(t, err) +} + +func TestLoadDataLoadFromBoltDB(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + _, stateManagerFactory, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + _, dataClient, cleanup := newTestClient(t) + defer cleanup() + // Populate boltdb with test data. + populateBoltDB(dataClient, t) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: factory.NewSaveableOption(), + } + + state := dockerstate.NewTaskEngineState() + s, err := agent.loadData(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, execCmdMgr) + assert.NoError(t, err) + checkLoadedData(state, s, t) +} + +func TestLoadDataLoadFromStateFile(t *testing.T) { + ctrl, credentialsManager, _, imageManager, _, + _, stateManagerFactory, _, execCmdMgr := setup(t) + defer ctrl.Finish() + + stateManager, dataClient, cleanup := newTestClient(t) + defer cleanup() + // Generate a state file with test data. + generateStateFile(stateManager, t) + + cfg := getTestConfig() + cfg.Checkpoint = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(cfg *config.Config, options ...statemanager.Option) { + for _, option := range options { + option(stateManager) + } + }).Return(stateManager, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + // Cancel the context to cancel async routines + defer cancel() + agent := &ecsAgent{ + ctx: ctx, + cfg: &cfg, + dataClient: dataClient, + stateManagerFactory: stateManagerFactory, + saveableOptionFactory: factory.NewSaveableOption(), + } + + state := dockerstate.NewTaskEngineState() + s, err := agent.loadData(eventstream.NewEventStream("events", ctx), + credentialsManager, state, imageManager, execCmdMgr) + assert.NoError(t, err) + checkLoadedData(state, s, t) + + // Also verify that the data in the state file has been migrated to boltdb. + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, tasks, 1) + containers, err := dataClient.GetContainers() + require.NoError(t, err) + assert.Len(t, containers, 1) + images, err := dataClient.GetImageStates() + require.NoError(t, err) + assert.Len(t, images, 1) + eniAttachments, err := dataClient.GetENIAttachments() + require.NoError(t, err) + assert.Len(t, eniAttachments, 1) +} + +func checkLoadedData(state dockerstate.TaskEngineState, s *savedData, t *testing.T) { + _, ok := state.TaskByArn(testTaskARN) + assert.True(t, ok) + _, ok = state.ContainerByID(testDockerID) + assert.True(t, ok) + assert.Len(t, state.AllImageStates(), 1) + assert.Len(t, state.AllENIAttachments(), 1) + assert.Equal(t, testAvailabilityZone, s.availabilityZone) + assert.Equal(t, testCluster, s.cluster) + assert.Equal(t, testContainerInstanceARN, s.containerInstanceARN) + assert.Equal(t, testEC2InstanceID, s.ec2InstanceID) + assert.Equal(t, testLatestSeqNumberTaskManifest, s.latestTaskManifestSeqNum) +} + +func newTestClient(t *testing.T) (statemanager.StateManager, data.Client, func()) { + testDir, err := ioutil.TempDir("", "agent_app_unit_test") + require.NoError(t, err) + + stateManager, err := statemanager.NewStateManager(&config.Config{DataDir: testDir}) + require.NoError(t, err) + + dataClient, err := data.NewWithSetup(testDir) + + cleanup := func() { + require.NoError(t, dataClient.Close()) + require.NoError(t, os.RemoveAll(testDir)) + } + return stateManager, dataClient, cleanup +} + +func generateStateFile(stateManager statemanager.StateManager, t *testing.T) { + engineState := getTestEngineState() + statemanager.AddSaveable("TaskEngine", engineState)(stateManager) + containerInstanceARN := testContainerInstanceARN + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceARN)(stateManager) + cluster := testCluster + statemanager.AddSaveable("Cluster", &cluster)(stateManager) + instanceID := testEC2InstanceID + statemanager.AddSaveable("EC2InstanceID", &instanceID)(stateManager) + availabilityZone := testAvailabilityZone + statemanager.AddSaveable("availabilityZone", &availabilityZone)(stateManager) + latestSeqNumberTaskManifest := testLatestSeqNumberTaskManifest + statemanager.AddSaveable("latestSeqNumberTaskManifest", &latestSeqNumberTaskManifest)(stateManager) + require.NoError(t, stateManager.ForceSave()) +} + +func populateBoltDB(dataClient data.Client, t *testing.T) { + require.NoError(t, dataClient.SaveTask(testTask)) + require.NoError(t, dataClient.SaveDockerContainer(testDockerContainer)) + require.NoError(t, dataClient.SaveImageState(testImageState)) + require.NoError(t, dataClient.SaveENIAttachment(testENIAttachment)) + require.NoError(t, dataClient.SaveMetadata(data.AgentVersionKey, testAgentVersion)) + require.NoError(t, dataClient.SaveMetadata(data.AvailabilityZoneKey, testAvailabilityZone)) + require.NoError(t, dataClient.SaveMetadata(data.ClusterNameKey, testCluster)) + require.NoError(t, dataClient.SaveMetadata(data.ContainerInstanceARNKey, testContainerInstanceARN)) + require.NoError(t, dataClient.SaveMetadata(data.EC2InstanceIDKey, testEC2InstanceID)) + require.NoError(t, dataClient.SaveMetadata(data.TaskManifestSeqNumKey, strconv.FormatInt(testLatestSeqNumberTaskManifest, 10))) +} + +func getTestEngineState() dockerstate.TaskEngineState { + state := dockerstate.NewTaskEngineState() + state.AddTask(testTask) + state.AddContainer(testDockerContainer, testTask) + state.AddImageState(testImageState) + state.AddENIAttachment(testENIAttachment) + return state +} diff --git a/agent/app/errors.go b/agent/app/errors.go new file mode 100644 index 00000000000..06b02349b84 --- /dev/null +++ b/agent/app/errors.go @@ -0,0 +1,31 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +// transientError represents a transient error when executing the ECS Agent +type transientError struct { + error +} + +// isTransient returns true if the error is transient +func isTransient(err error) bool { + _, ok := err.(transientError) + return ok +} + +// clusterMismatchError represents a mismatch in cluster name between the +// state file and the config object +type clusterMismatchError struct { + error +} diff --git a/agent/app/factory/generate_mocks.go b/agent/app/factory/generate_mocks.go new file mode 100644 index 00000000000..8d8a7a8919b --- /dev/null +++ b/agent/app/factory/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +//go:generate mockgen -destination=mocks/factory_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/app/factory StateManager,SaveableOption diff --git a/agent/app/factory/mocks/factory_mocks.go b/agent/app/factory/mocks/factory_mocks.go new file mode 100644 index 00000000000..b37eb634c87 --- /dev/null +++ b/agent/app/factory/mocks/factory_mocks.go @@ -0,0 +1,107 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/app/factory (interfaces: StateManager,SaveableOption) + +// Package mock_factory is a generated GoMock package. +package mock_factory + +import ( + reflect "reflect" + + config "github.com/aws/amazon-ecs-agent/agent/config" + statemanager "github.com/aws/amazon-ecs-agent/agent/statemanager" + gomock "github.com/golang/mock/gomock" +) + +// MockStateManager is a mock of StateManager interface +type MockStateManager struct { + ctrl *gomock.Controller + recorder *MockStateManagerMockRecorder +} + +// MockStateManagerMockRecorder is the mock recorder for MockStateManager +type MockStateManagerMockRecorder struct { + mock *MockStateManager +} + +// NewMockStateManager creates a new mock instance +func NewMockStateManager(ctrl *gomock.Controller) *MockStateManager { + mock := &MockStateManager{ctrl: ctrl} + mock.recorder = &MockStateManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockStateManager) EXPECT() *MockStateManagerMockRecorder { + return m.recorder +} + +// NewStateManager mocks base method +func (m *MockStateManager) NewStateManager(arg0 *config.Config, arg1 ...statemanager.Option) (statemanager.StateManager, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "NewStateManager", varargs...) + ret0, _ := ret[0].(statemanager.StateManager) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewStateManager indicates an expected call of NewStateManager +func (mr *MockStateManagerMockRecorder) NewStateManager(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewStateManager", reflect.TypeOf((*MockStateManager)(nil).NewStateManager), varargs...) +} + +// MockSaveableOption is a mock of SaveableOption interface +type MockSaveableOption struct { + ctrl *gomock.Controller + recorder *MockSaveableOptionMockRecorder +} + +// MockSaveableOptionMockRecorder is the mock recorder for MockSaveableOption +type MockSaveableOptionMockRecorder struct { + mock *MockSaveableOption +} + +// NewMockSaveableOption creates a new mock instance +func NewMockSaveableOption(ctrl *gomock.Controller) *MockSaveableOption { + mock := &MockSaveableOption{ctrl: ctrl} + mock.recorder = &MockSaveableOptionMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSaveableOption) EXPECT() *MockSaveableOptionMockRecorder { + return m.recorder +} + +// AddSaveable mocks base method +func (m *MockSaveableOption) AddSaveable(arg0 string, arg1 statemanager.Saveable) statemanager.Option { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddSaveable", arg0, arg1) + ret0, _ := ret[0].(statemanager.Option) + return ret0 +} + +// AddSaveable indicates an expected call of AddSaveable +func (mr *MockSaveableOptionMockRecorder) AddSaveable(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSaveable", reflect.TypeOf((*MockSaveableOption)(nil).AddSaveable), arg0, arg1) +} diff --git a/agent/app/factory/statemanager.go b/agent/app/factory/statemanager.go new file mode 100644 index 00000000000..2aba678fce5 --- /dev/null +++ b/agent/app/factory/statemanager.go @@ -0,0 +1,56 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +import ( + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/statemanager" +) + +// StateManager factory wraps the global statemenager.NewStateManager method, +// which creates a new state manager object +type StateManager interface { + // NewStateManager creates a new state manager + NewStateManager(cfg *config.Config, options ...statemanager.Option) (statemanager.StateManager, error) +} + +// SaveableOption factory wraps the global statemanager.AddSaveable method, +// which creates a new saveable state manager option +type SaveableOption interface { + // AddSaveable is an option that adds a given saveable as one that should be saved + // under the given name + AddSaveable(name string, saveable statemanager.Saveable) statemanager.Option +} + +type stateManager struct{} + +// NewStateManager creates a new StateManager factory +func NewStateManager() StateManager { + return &stateManager{} +} + +func (*stateManager) NewStateManager(cfg *config.Config, options ...statemanager.Option) (statemanager.StateManager, error) { + return statemanager.NewStateManager(cfg, options...) +} + +type saveableOption struct{} + +// NewSaveableOption creates a new SaveableOption factory +func NewSaveableOption() SaveableOption { + return &saveableOption{} +} + +func (*saveableOption) AddSaveable(name string, saveable statemanager.Saveable) statemanager.Option { + return statemanager.AddSaveable(name, saveable) +} diff --git a/agent/app/generate_mocks.go b/agent/app/generate_mocks.go new file mode 100644 index 00000000000..f68f30471d8 --- /dev/null +++ b/agent/app/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +//go:generate mockgen -destination=mocks/credentials_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/aws-sdk-go/aws/credentials Provider diff --git a/agent/app/healthcheck.go b/agent/app/healthcheck.go new file mode 100644 index 00000000000..f3940737aa3 --- /dev/null +++ b/agent/app/healthcheck.go @@ -0,0 +1,41 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "net/http" + "time" + + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/cihub/seelog" +) + +// runHealthcheck runs the Agent's healthcheck +func runHealthcheck(url string, timeout time.Duration) int { + client := &http.Client{ + Timeout: timeout, + } + r, err := http.NewRequest("HEAD", url, nil) + if err != nil { + seelog.Errorf("error creating healthcheck request: %v", err) + return exitcodes.ExitError + } + resp, err := client.Do(r) + if err != nil { + seelog.Errorf("health check [HEAD %s] failed with error: %v", url, err) + return exitcodes.ExitError + } + resp.Body.Close() + return exitcodes.ExitSuccess +} diff --git a/agent/app/healthcheck_test.go b/agent/app/healthcheck_test.go new file mode 100644 index 00000000000..7000bcd96c1 --- /dev/null +++ b/agent/app/healthcheck_test.go @@ -0,0 +1,94 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "fmt" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestHealthcheck_Sunny(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, client") + })) + defer ts.Close() + + rc := runHealthcheck(ts.URL, time.Second*2) + require.Equal(t, 0, rc) +} + +func TestHealthcheck_InvalidURL2(t *testing.T) { + // leading space in url is invalid + rc := runHealthcheck(" http://foobar", time.Second*2) + require.Equal(t, 1, rc) +} + +func TestHealthcheck_EnvvarConfig(t *testing.T) { + testEnvVar := "TEST_AGENT_HEALTHCHECK_ENV_VAR" + defer os.Unsetenv(testEnvVar) + os.Setenv(testEnvVar, "127.0.0.1") + + // testing healthcheck url setup from app/run.go + localhost := "localhost" + if localhostOverride := os.Getenv(testEnvVar); localhostOverride != "" { + localhost = localhostOverride + } + healthcheckUrl := fmt.Sprintf("http://%s:51678/v1/metadata", localhost) + expectedUrl := "http://127.0.0.1:51678/v1/metadata" + + require.Equal(t, healthcheckUrl, expectedUrl) +} + +func TestHealthcheck_Timeout(t *testing.T) { + sema := make(chan int) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-sema + })) + defer ts.Close() + + rc := runHealthcheck(ts.URL, time.Second*2) + require.Equal(t, 1, rc) + close(sema) +} + +// we actually pass the healthcheck in the event of a non-200 http status code. +func TestHealthcheck_404(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + w.Write([]byte("https://http.cat/404")) + })) + defer ts.Close() + + rc := runHealthcheck(ts.URL+"/foobar", time.Second*2) + require.Equal(t, 0, rc) +} + +var brc int + +func BenchmarkHealthcheck(b *testing.B) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "hello") + })) + defer ts.Close() + + for n := 0; n < b.N; n++ { + brc = runHealthcheck(ts.URL, time.Second*1) + } +} diff --git a/agent/app/license.go b/agent/app/license.go new file mode 100644 index 00000000000..7d2e48c329d --- /dev/null +++ b/agent/app/license.go @@ -0,0 +1,34 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "fmt" + "os" + + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/aws/amazon-ecs-agent/agent/utils" +) + +// printLicense prints the Agent's license text +func printLicense() int { + license := utils.NewLicenseProvider() + text, err := license.GetText() + if err != nil { + fmt.Fprintln(os.Stderr, err) + return exitcodes.ExitError + } + fmt.Println(text) + return exitcodes.ExitSuccess +} diff --git a/agent/app/mocks/credentials_mocks.go b/agent/app/mocks/credentials_mocks.go new file mode 100644 index 00000000000..baf76016675 --- /dev/null +++ b/agent/app/mocks/credentials_mocks.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/aws-sdk-go/aws/credentials (interfaces: Provider) + +// Package mock_credentials is a generated GoMock package. +package mock_credentials + +import ( + reflect "reflect" + + credentials "github.com/aws/aws-sdk-go/aws/credentials" + gomock "github.com/golang/mock/gomock" +) + +// MockProvider is a mock of Provider interface +type MockProvider struct { + ctrl *gomock.Controller + recorder *MockProviderMockRecorder +} + +// MockProviderMockRecorder is the mock recorder for MockProvider +type MockProviderMockRecorder struct { + mock *MockProvider +} + +// NewMockProvider creates a new mock instance +func NewMockProvider(ctrl *gomock.Controller) *MockProvider { + mock := &MockProvider{ctrl: ctrl} + mock.recorder = &MockProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockProvider) EXPECT() *MockProviderMockRecorder { + return m.recorder +} + +// IsExpired mocks base method +func (m *MockProvider) IsExpired() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsExpired") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsExpired indicates an expected call of IsExpired +func (mr *MockProviderMockRecorder) IsExpired() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsExpired", reflect.TypeOf((*MockProvider)(nil).IsExpired)) +} + +// Retrieve mocks base method +func (m *MockProvider) Retrieve() (credentials.Value, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Retrieve") + ret0, _ := ret[0].(credentials.Value) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Retrieve indicates an expected call of Retrieve +func (mr *MockProviderMockRecorder) Retrieve() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Retrieve", reflect.TypeOf((*MockProvider)(nil).Retrieve)) +} diff --git a/agent/app/run.go b/agent/app/run.go new file mode 100644 index 00000000000..8af36bfe3c9 --- /dev/null +++ b/agent/app/run.go @@ -0,0 +1,87 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package app + +import ( + "fmt" + "os" + "time" + + "github.com/aws/amazon-ecs-agent/agent/app/args" + "github.com/aws/amazon-ecs-agent/agent/logger" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/aws/amazon-ecs-agent/agent/version" + "github.com/aws/aws-sdk-go/aws" + log "github.com/cihub/seelog" +) + +const ( + ECS_AGENT_HEALTHCHECK_HOST_ENV_VAR = "ECS_AGENT_HEALTHCHECK_HOST" +) + +// Run runs the ECS Agent App. It returns an exit code, which is used by +// main() to set the status code for the program +func Run(arguments []string) int { + defer log.Flush() + + parsedArgs, err := args.New(arguments) + if err != nil { + return exitcodes.ExitTerminal + } + + if *parsedArgs.License { + return printLicense() + } else if *parsedArgs.Version { + return version.PrintVersion() + } else if *parsedArgs.Healthcheck { + // Timeout is purposely set to shorter than the default docker healthcheck + // timeout of 30s. This is so that we can catch any http timeout and log the + // issue within agent logs. + // see https://docs.docker.com/engine/reference/builder/#healthcheck + // if ECS_AGENT_HEALTHCHECK_HOST env var is set, it will override + // the healthcheck's localhost ip address inside the ecs-agent container + localhost := "localhost" + if localhostOverride := os.Getenv(ECS_AGENT_HEALTHCHECK_HOST_ENV_VAR); localhostOverride != "" { + localhost = localhostOverride + } + healthcheckUrl := fmt.Sprintf("http://%s:51678/v1/metadata", localhost) + return runHealthcheck(healthcheckUrl, time.Second*25) + } + + if *parsedArgs.LogLevel != "" { + logger.SetLevel(*parsedArgs.LogLevel, *parsedArgs.LogLevel) + } else { + logger.SetLevel(*parsedArgs.DriverLogLevel, *parsedArgs.InstanceLogLevel) + } + + // Create an Agent object + agent, err := newAgent(aws.BoolValue(parsedArgs.BlackholeEC2Metadata), parsedArgs.AcceptInsecureCert) + if err != nil { + // Failure to initialize either the docker client or the EC2 metadata + // service client are non terminal errors as they could be transient + return exitcodes.ExitError + } + + switch { + case *parsedArgs.ECSAttributes: + // Print agent's ecs attributes based on its environment and exit + return agent.printECSAttributes() + case *parsedArgs.WindowsService: + // Enable Windows Service + return agent.startWindowsService() + default: + // Start the agent + return agent.start() + } +} diff --git a/agent/app/testdata/test_cpu_info b/agent/app/testdata/test_cpu_info new file mode 100644 index 00000000000..102cf327e28 --- /dev/null +++ b/agent/app/testdata/test_cpu_info @@ -0,0 +1,21 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz +stepping : 4 +microcode : 0x200005e +cpu MHz : 2500.000 +cache size : 33792 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse4_1 sse4_2 avx ht syscall avx2 pdpe1gb rdtscp lm constant_tsc a nopl xtopology + diff --git a/agent/app/testdata/test_cpu_info_fail b/agent/app/testdata/test_cpu_info_fail new file mode 100644 index 00000000000..814af1b4472 --- /dev/null +++ b/agent/app/testdata/test_cpu_info_fail @@ -0,0 +1,42 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz +stepping : 4 +microcode : 0x200005e +cpu MHz : 2500.000 +cache size : 33792 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse4_1 sse4_2 avx ht syscall pdpe1gb rdtscp lm constant_tsc a nopl xtopology + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz +stepping : 4 +microcode : 0x200005e +cpu MHz : 2500.000 +cache size : 33792 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse4_1 sse4_2 avx ht syscall pdpe1gb rdtscp lm constant_tsc a nopl xtopology + diff --git a/agent/asm/asm.go b/agent/asm/asm.go new file mode 100644 index 00000000000..3d79123984a --- /dev/null +++ b/agent/asm/asm.go @@ -0,0 +1,132 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asm + +import ( + "encoding/json" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// AuthDataValue is the schema for +// the SecretStringValue returned by ASM +type AuthDataValue struct { + Username *string + Password *string +} + +// GetDockerAuthFromASM makes the api call to the AWS Secrets Manager service to +// retrieve the docker auth data +func GetDockerAuthFromASM(secretID string, client secretsmanageriface.SecretsManagerAPI) (types.AuthConfig, error) { + in := &secretsmanager.GetSecretValueInput{ + SecretId: aws.String(secretID), + } + + out, err := client.GetSecretValue(in) + if err != nil { + return types.AuthConfig{}, errors.Wrapf(err, + "asm fetching secret from the service for %s", secretID) + } + + return extractASMValue(out) +} + +func extractASMValue(out *secretsmanager.GetSecretValueOutput) (types.AuthConfig, error) { + if out == nil { + return types.AuthConfig{}, errors.New( + "asm fetching authorization data: empty response") + } + + secretValue := aws.StringValue(out.SecretString) + if secretValue == "" { + return types.AuthConfig{}, errors.New( + "asm fetching authorization data: empty secrets value") + } + + authDataValue := AuthDataValue{} + err := json.Unmarshal([]byte(secretValue), &authDataValue) + if err != nil { + // could not unmarshal, incorrect secret value schema + return types.AuthConfig{}, errors.New( + "asm fetching authorization data: unable to unmarshal secret value, invalid schema") + } + + username := aws.StringValue(authDataValue.Username) + password := aws.StringValue(authDataValue.Password) + + if username == "" { + return types.AuthConfig{}, errors.New( + "asm fetching username: AuthorizationData is malformed, empty field") + } + + if password == "" { + return types.AuthConfig{}, errors.New( + "asm fetching password: AuthorizationData is malformed, empty field") + } + + dac := types.AuthConfig{ + Username: username, + Password: password, + } + + return dac, nil +} + +func GetSecretFromASMWithInput(input *secretsmanager.GetSecretValueInput, + client secretsmanageriface.SecretsManagerAPI, jsonKey string) (string, error) { + out, err := client.GetSecretValue(input) + if err != nil { + return "", errors.Wrapf(err, "secret %s", *input.SecretId) + } + + if jsonKey == "" { + return aws.StringValue(out.SecretString), nil + } + + secretMap := make(map[string]interface{}) + jsonErr := json.Unmarshal([]byte(*out.SecretString), &secretMap) + if jsonErr != nil { + seelog.Warnf("Error when treating retrieved secret value with secret id %s as JSON and calling unmarshal.", *input.SecretId) + return "", jsonErr + } + + secretValue, ok := secretMap[jsonKey] + if !ok { + err = errors.New(fmt.Sprintf("retrieved secret from Secrets Manager did not contain json key %s", jsonKey)) + return "", err + } + + return fmt.Sprintf("%v", secretValue), nil +} + +// GetSecretFromASM makes the api call to the AWS Secrets Manager service to +// retrieve the secret value +func GetSecretFromASM(secretID string, client secretsmanageriface.SecretsManagerAPI) (string, error) { + in := &secretsmanager.GetSecretValueInput{ + SecretId: aws.String(secretID), + } + + out, err := client.GetSecretValue(in) + if err != nil { + return "", errors.Wrapf(err, "secret %s", secretID) + } + + return aws.StringValue(out.SecretString), nil +} diff --git a/agent/asm/asm_test.go b/agent/asm/asm_test.go new file mode 100644 index 00000000000..000f7c6ebd0 --- /dev/null +++ b/agent/asm/asm_test.go @@ -0,0 +1,190 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asm + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + versionID = "versionId" + versionStage = "versionStage" + jsonKey = "jsonKey" + valueFrom = "arn:aws:secretsmanager:region:account-id:secret:secretId" + secretValue = "secretValue" + jsonSecretValue = "{\"" + jsonKey + "\": \"" + secretValue + "\",\"some-other-key\": \"secret2\"}" + malformedJsonSecretValue = "{\"" + jsonKey + "\": \"" + secretValue +) + +type mockGetSecretValue struct { + secretsmanageriface.SecretsManagerAPI + Resp secretsmanager.GetSecretValueOutput +} + +func (m mockGetSecretValue) GetSecretValue(input *secretsmanager.GetSecretValueInput) (*secretsmanager.GetSecretValueOutput, error) { + return &m.Resp, nil +} + +func TestASMGetAuthConfig(t *testing.T) { + + cases := []struct { + Name string + Resp secretsmanager.GetSecretValueOutput + ShouldError bool + }{ + { + Name: "SuccessWithValidResponse", + Resp: secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(`{"username":"usr","password":"pwd"}`), + }, + ShouldError: false, + }, + { + Name: "MissingUsername", + Resp: secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(`{"password":"pwd"}`), + }, + ShouldError: true, + }, + { + Name: "MissingPassword", + Resp: secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(`{"username":"usr"}`), + }, + ShouldError: true, + }, + { + Name: "EmptyUsername", + Resp: secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(`{"username":"","password":"pwd"}`), + }, + ShouldError: true, + }, + { + Name: "EmptyPassword", + Resp: secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(`{"username":"usr","password":""}`), + }, + ShouldError: true, + }, + { + Name: "MalformedJson", + Resp: secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(`{"username":"usr"`), + }, + ShouldError: true, + }, + { + Name: "MissingSecretString", + Resp: secretsmanager.GetSecretValueOutput{}, + ShouldError: true, + }, + { + Name: "EmptyJsonStruct", + Resp: secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(`{}`), + }, + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + asmClient := mockGetSecretValue{Resp: c.Resp} + _, err := GetDockerAuthFromASM("secret-value-id", asmClient) + + if c.ShouldError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestGetSecretFromASM(t *testing.T) { + asmClient := createASMInterface(secretValue) + _, err := GetSecretFromASM("secretName", asmClient) + assert.NoError(t, err) +} + +func TestGetSecretFromASMWithJsonKey(t *testing.T) { + asmClient := createASMInterface(jsonSecretValue) + secretValueInput := createSecretValueInput(toPtr(valueFrom), nil, nil) + outSecretValue, _ := GetSecretFromASMWithInput(secretValueInput, asmClient, jsonKey) + assert.Equal(t, secretValue, outSecretValue) +} + +func TestGetSecretFromASMWithMalformedJSON(t *testing.T) { + asmClient := createASMInterface(malformedJsonSecretValue) + secretValueInput := createSecretValueInput(toPtr(valueFrom), nil, nil) + outSecretValue, err := GetSecretFromASMWithInput(secretValueInput, asmClient, jsonKey) + require.Error(t, err) + assert.Equal(t, "", outSecretValue) +} + +func TestGetSecretFromASMWithJSONKeyNotFound(t *testing.T) { + asmClient := createASMInterface(jsonSecretValue) + secretValueInput := createSecretValueInput(toPtr(valueFrom), nil, nil) + nonExistentKey := "nonExistentKey" + _, err := GetSecretFromASMWithInput(secretValueInput, asmClient, nonExistentKey) + assert.Error(t, err) +} + +func TestGetSecretFromASMWithVersionID(t *testing.T) { + asmClient := createASMInterface(secretValue) + secretValueInput := createSecretValueInput(toPtr(valueFrom), toPtr(versionID), nil) + outSecretValue, err := GetSecretFromASMWithInput(secretValueInput, asmClient, "") + require.NoError(t, err) + assert.Equal(t, secretValue, outSecretValue) +} + +func TestGetSecretFromASMWithVersionIDAndStage(t *testing.T) { + asmClient := createASMInterface(secretValue) + secretValueInput := createSecretValueInput(toPtr(valueFrom), toPtr(versionID), toPtr(versionStage)) + outSecretValue, err := GetSecretFromASMWithInput(secretValueInput, asmClient, "") + require.NoError(t, err) + assert.Equal(t, secretValue, outSecretValue) +} + +func toPtr(input string) *string { + if input == "" { + return nil + } + return &input +} + +func createSecretValueInput(secretID *string, versionID *string, versionStage *string) *secretsmanager.GetSecretValueInput { + return &secretsmanager.GetSecretValueInput{ + SecretId: secretID, + VersionId: versionID, + VersionStage: versionStage, + } +} + +func createASMInterface(secretValue string) mockGetSecretValue { + return mockGetSecretValue{ + Resp: secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(secretValue), + }, + } +} diff --git a/agent/asm/factory/factory.go b/agent/asm/factory/factory.go new file mode 100644 index 00000000000..d7c15072533 --- /dev/null +++ b/agent/asm/factory/factory.go @@ -0,0 +1,52 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/httpclient" + "github.com/aws/aws-sdk-go/aws" + awscreds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" +) + +const ( + roundtripTimeout = 5 * time.Second +) + +type ClientCreator interface { + NewASMClient(region string, creds credentials.IAMRoleCredentials) secretsmanageriface.SecretsManagerAPI +} + +func NewClientCreator() ClientCreator { + return &asmClientCreator{} +} + +type asmClientCreator struct{} + +func (*asmClientCreator) NewASMClient(region string, + creds credentials.IAMRoleCredentials) secretsmanageriface.SecretsManagerAPI { + cfg := aws.NewConfig(). + WithHTTPClient(httpclient.New(roundtripTimeout, false)). + WithRegion(region). + WithCredentials( + awscreds.NewStaticCredentials(creds.AccessKeyID, creds.SecretAccessKey, + creds.SessionToken)) + sess := session.Must(session.NewSession(cfg)) + return secretsmanager.New(sess) +} diff --git a/agent/asm/factory/generate_mocks.go b/agent/asm/factory/generate_mocks.go new file mode 100644 index 00000000000..aa26ee44719 --- /dev/null +++ b/agent/asm/factory/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +//go:generate mockgen -destination=mocks/factory_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/asm/factory ClientCreator diff --git a/agent/asm/factory/mocks/factory_mocks.go b/agent/asm/factory/mocks/factory_mocks.go new file mode 100644 index 00000000000..27126de22fe --- /dev/null +++ b/agent/asm/factory/mocks/factory_mocks.go @@ -0,0 +1,64 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/asm/factory (interfaces: ClientCreator) + +// Package mock_factory is a generated GoMock package. +package mock_factory + +import ( + reflect "reflect" + + credentials "github.com/aws/amazon-ecs-agent/agent/credentials" + secretsmanageriface "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" + gomock "github.com/golang/mock/gomock" +) + +// MockClientCreator is a mock of ClientCreator interface +type MockClientCreator struct { + ctrl *gomock.Controller + recorder *MockClientCreatorMockRecorder +} + +// MockClientCreatorMockRecorder is the mock recorder for MockClientCreator +type MockClientCreatorMockRecorder struct { + mock *MockClientCreator +} + +// NewMockClientCreator creates a new mock instance +func NewMockClientCreator(ctrl *gomock.Controller) *MockClientCreator { + mock := &MockClientCreator{ctrl: ctrl} + mock.recorder = &MockClientCreatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockClientCreator) EXPECT() *MockClientCreatorMockRecorder { + return m.recorder +} + +// NewASMClient mocks base method +func (m *MockClientCreator) NewASMClient(arg0 string, arg1 credentials.IAMRoleCredentials) secretsmanageriface.SecretsManagerAPI { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewASMClient", arg0, arg1) + ret0, _ := ret[0].(secretsmanageriface.SecretsManagerAPI) + return ret0 +} + +// NewASMClient indicates an expected call of NewASMClient +func (mr *MockClientCreatorMockRecorder) NewASMClient(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewASMClient", reflect.TypeOf((*MockClientCreator)(nil).NewASMClient), arg0, arg1) +} diff --git a/agent/asm/generate_mocks.go b/agent/asm/generate_mocks.go new file mode 100644 index 00000000000..0778594b667 --- /dev/null +++ b/agent/asm/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asm + +//go:generate mockgen -destination=mocks/secretsmanagerapi_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface SecretsManagerAPI diff --git a/agent/asm/mocks/secretsmanagerapi_mocks.go b/agent/asm/mocks/secretsmanagerapi_mocks.go new file mode 100644 index 00000000000..f421c7578b8 --- /dev/null +++ b/agent/asm/mocks/secretsmanagerapi_mocks.go @@ -0,0 +1,1067 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface (interfaces: SecretsManagerAPI) + +// Package mock_secretsmanageriface is a generated GoMock package. +package mock_secretsmanageriface + +import ( + context "context" + reflect "reflect" + + request "github.com/aws/aws-sdk-go/aws/request" + secretsmanager "github.com/aws/aws-sdk-go/service/secretsmanager" + gomock "github.com/golang/mock/gomock" +) + +// MockSecretsManagerAPI is a mock of SecretsManagerAPI interface +type MockSecretsManagerAPI struct { + ctrl *gomock.Controller + recorder *MockSecretsManagerAPIMockRecorder +} + +// MockSecretsManagerAPIMockRecorder is the mock recorder for MockSecretsManagerAPI +type MockSecretsManagerAPIMockRecorder struct { + mock *MockSecretsManagerAPI +} + +// NewMockSecretsManagerAPI creates a new mock instance +func NewMockSecretsManagerAPI(ctrl *gomock.Controller) *MockSecretsManagerAPI { + mock := &MockSecretsManagerAPI{ctrl: ctrl} + mock.recorder = &MockSecretsManagerAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSecretsManagerAPI) EXPECT() *MockSecretsManagerAPIMockRecorder { + return m.recorder +} + +// CancelRotateSecret mocks base method +func (m *MockSecretsManagerAPI) CancelRotateSecret(arg0 *secretsmanager.CancelRotateSecretInput) (*secretsmanager.CancelRotateSecretOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CancelRotateSecret", arg0) + ret0, _ := ret[0].(*secretsmanager.CancelRotateSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelRotateSecret indicates an expected call of CancelRotateSecret +func (mr *MockSecretsManagerAPIMockRecorder) CancelRotateSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelRotateSecret", reflect.TypeOf((*MockSecretsManagerAPI)(nil).CancelRotateSecret), arg0) +} + +// CancelRotateSecretRequest mocks base method +func (m *MockSecretsManagerAPI) CancelRotateSecretRequest(arg0 *secretsmanager.CancelRotateSecretInput) (*request.Request, *secretsmanager.CancelRotateSecretOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CancelRotateSecretRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.CancelRotateSecretOutput) + return ret0, ret1 +} + +// CancelRotateSecretRequest indicates an expected call of CancelRotateSecretRequest +func (mr *MockSecretsManagerAPIMockRecorder) CancelRotateSecretRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelRotateSecretRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).CancelRotateSecretRequest), arg0) +} + +// CancelRotateSecretWithContext mocks base method +func (m *MockSecretsManagerAPI) CancelRotateSecretWithContext(arg0 context.Context, arg1 *secretsmanager.CancelRotateSecretInput, arg2 ...request.Option) (*secretsmanager.CancelRotateSecretOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CancelRotateSecretWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.CancelRotateSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelRotateSecretWithContext indicates an expected call of CancelRotateSecretWithContext +func (mr *MockSecretsManagerAPIMockRecorder) CancelRotateSecretWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelRotateSecretWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).CancelRotateSecretWithContext), varargs...) +} + +// CreateSecret mocks base method +func (m *MockSecretsManagerAPI) CreateSecret(arg0 *secretsmanager.CreateSecretInput) (*secretsmanager.CreateSecretOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSecret", arg0) + ret0, _ := ret[0].(*secretsmanager.CreateSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSecret indicates an expected call of CreateSecret +func (mr *MockSecretsManagerAPIMockRecorder) CreateSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSecret", reflect.TypeOf((*MockSecretsManagerAPI)(nil).CreateSecret), arg0) +} + +// CreateSecretRequest mocks base method +func (m *MockSecretsManagerAPI) CreateSecretRequest(arg0 *secretsmanager.CreateSecretInput) (*request.Request, *secretsmanager.CreateSecretOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSecretRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.CreateSecretOutput) + return ret0, ret1 +} + +// CreateSecretRequest indicates an expected call of CreateSecretRequest +func (mr *MockSecretsManagerAPIMockRecorder) CreateSecretRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSecretRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).CreateSecretRequest), arg0) +} + +// CreateSecretWithContext mocks base method +func (m *MockSecretsManagerAPI) CreateSecretWithContext(arg0 context.Context, arg1 *secretsmanager.CreateSecretInput, arg2 ...request.Option) (*secretsmanager.CreateSecretOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateSecretWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.CreateSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSecretWithContext indicates an expected call of CreateSecretWithContext +func (mr *MockSecretsManagerAPIMockRecorder) CreateSecretWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSecretWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).CreateSecretWithContext), varargs...) +} + +// DeleteResourcePolicy mocks base method +func (m *MockSecretsManagerAPI) DeleteResourcePolicy(arg0 *secretsmanager.DeleteResourcePolicyInput) (*secretsmanager.DeleteResourcePolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteResourcePolicy", arg0) + ret0, _ := ret[0].(*secretsmanager.DeleteResourcePolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteResourcePolicy indicates an expected call of DeleteResourcePolicy +func (mr *MockSecretsManagerAPIMockRecorder) DeleteResourcePolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicy", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DeleteResourcePolicy), arg0) +} + +// DeleteResourcePolicyRequest mocks base method +func (m *MockSecretsManagerAPI) DeleteResourcePolicyRequest(arg0 *secretsmanager.DeleteResourcePolicyInput) (*request.Request, *secretsmanager.DeleteResourcePolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteResourcePolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.DeleteResourcePolicyOutput) + return ret0, ret1 +} + +// DeleteResourcePolicyRequest indicates an expected call of DeleteResourcePolicyRequest +func (mr *MockSecretsManagerAPIMockRecorder) DeleteResourcePolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicyRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DeleteResourcePolicyRequest), arg0) +} + +// DeleteResourcePolicyWithContext mocks base method +func (m *MockSecretsManagerAPI) DeleteResourcePolicyWithContext(arg0 context.Context, arg1 *secretsmanager.DeleteResourcePolicyInput, arg2 ...request.Option) (*secretsmanager.DeleteResourcePolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteResourcePolicyWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.DeleteResourcePolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteResourcePolicyWithContext indicates an expected call of DeleteResourcePolicyWithContext +func (mr *MockSecretsManagerAPIMockRecorder) DeleteResourcePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourcePolicyWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DeleteResourcePolicyWithContext), varargs...) +} + +// DeleteSecret mocks base method +func (m *MockSecretsManagerAPI) DeleteSecret(arg0 *secretsmanager.DeleteSecretInput) (*secretsmanager.DeleteSecretOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSecret", arg0) + ret0, _ := ret[0].(*secretsmanager.DeleteSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSecret indicates an expected call of DeleteSecret +func (mr *MockSecretsManagerAPIMockRecorder) DeleteSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSecret", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DeleteSecret), arg0) +} + +// DeleteSecretRequest mocks base method +func (m *MockSecretsManagerAPI) DeleteSecretRequest(arg0 *secretsmanager.DeleteSecretInput) (*request.Request, *secretsmanager.DeleteSecretOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSecretRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.DeleteSecretOutput) + return ret0, ret1 +} + +// DeleteSecretRequest indicates an expected call of DeleteSecretRequest +func (mr *MockSecretsManagerAPIMockRecorder) DeleteSecretRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSecretRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DeleteSecretRequest), arg0) +} + +// DeleteSecretWithContext mocks base method +func (m *MockSecretsManagerAPI) DeleteSecretWithContext(arg0 context.Context, arg1 *secretsmanager.DeleteSecretInput, arg2 ...request.Option) (*secretsmanager.DeleteSecretOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteSecretWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.DeleteSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSecretWithContext indicates an expected call of DeleteSecretWithContext +func (mr *MockSecretsManagerAPIMockRecorder) DeleteSecretWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSecretWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DeleteSecretWithContext), varargs...) +} + +// DescribeSecret mocks base method +func (m *MockSecretsManagerAPI) DescribeSecret(arg0 *secretsmanager.DescribeSecretInput) (*secretsmanager.DescribeSecretOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeSecret", arg0) + ret0, _ := ret[0].(*secretsmanager.DescribeSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeSecret indicates an expected call of DescribeSecret +func (mr *MockSecretsManagerAPIMockRecorder) DescribeSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeSecret", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DescribeSecret), arg0) +} + +// DescribeSecretRequest mocks base method +func (m *MockSecretsManagerAPI) DescribeSecretRequest(arg0 *secretsmanager.DescribeSecretInput) (*request.Request, *secretsmanager.DescribeSecretOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeSecretRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.DescribeSecretOutput) + return ret0, ret1 +} + +// DescribeSecretRequest indicates an expected call of DescribeSecretRequest +func (mr *MockSecretsManagerAPIMockRecorder) DescribeSecretRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeSecretRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DescribeSecretRequest), arg0) +} + +// DescribeSecretWithContext mocks base method +func (m *MockSecretsManagerAPI) DescribeSecretWithContext(arg0 context.Context, arg1 *secretsmanager.DescribeSecretInput, arg2 ...request.Option) (*secretsmanager.DescribeSecretOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeSecretWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.DescribeSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeSecretWithContext indicates an expected call of DescribeSecretWithContext +func (mr *MockSecretsManagerAPIMockRecorder) DescribeSecretWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeSecretWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).DescribeSecretWithContext), varargs...) +} + +// GetRandomPassword mocks base method +func (m *MockSecretsManagerAPI) GetRandomPassword(arg0 *secretsmanager.GetRandomPasswordInput) (*secretsmanager.GetRandomPasswordOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRandomPassword", arg0) + ret0, _ := ret[0].(*secretsmanager.GetRandomPasswordOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRandomPassword indicates an expected call of GetRandomPassword +func (mr *MockSecretsManagerAPIMockRecorder) GetRandomPassword(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRandomPassword", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetRandomPassword), arg0) +} + +// GetRandomPasswordRequest mocks base method +func (m *MockSecretsManagerAPI) GetRandomPasswordRequest(arg0 *secretsmanager.GetRandomPasswordInput) (*request.Request, *secretsmanager.GetRandomPasswordOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRandomPasswordRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.GetRandomPasswordOutput) + return ret0, ret1 +} + +// GetRandomPasswordRequest indicates an expected call of GetRandomPasswordRequest +func (mr *MockSecretsManagerAPIMockRecorder) GetRandomPasswordRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRandomPasswordRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetRandomPasswordRequest), arg0) +} + +// GetRandomPasswordWithContext mocks base method +func (m *MockSecretsManagerAPI) GetRandomPasswordWithContext(arg0 context.Context, arg1 *secretsmanager.GetRandomPasswordInput, arg2 ...request.Option) (*secretsmanager.GetRandomPasswordOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetRandomPasswordWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.GetRandomPasswordOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRandomPasswordWithContext indicates an expected call of GetRandomPasswordWithContext +func (mr *MockSecretsManagerAPIMockRecorder) GetRandomPasswordWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRandomPasswordWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetRandomPasswordWithContext), varargs...) +} + +// GetResourcePolicy mocks base method +func (m *MockSecretsManagerAPI) GetResourcePolicy(arg0 *secretsmanager.GetResourcePolicyInput) (*secretsmanager.GetResourcePolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetResourcePolicy", arg0) + ret0, _ := ret[0].(*secretsmanager.GetResourcePolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetResourcePolicy indicates an expected call of GetResourcePolicy +func (mr *MockSecretsManagerAPIMockRecorder) GetResourcePolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePolicy", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetResourcePolicy), arg0) +} + +// GetResourcePolicyRequest mocks base method +func (m *MockSecretsManagerAPI) GetResourcePolicyRequest(arg0 *secretsmanager.GetResourcePolicyInput) (*request.Request, *secretsmanager.GetResourcePolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetResourcePolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.GetResourcePolicyOutput) + return ret0, ret1 +} + +// GetResourcePolicyRequest indicates an expected call of GetResourcePolicyRequest +func (mr *MockSecretsManagerAPIMockRecorder) GetResourcePolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePolicyRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetResourcePolicyRequest), arg0) +} + +// GetResourcePolicyWithContext mocks base method +func (m *MockSecretsManagerAPI) GetResourcePolicyWithContext(arg0 context.Context, arg1 *secretsmanager.GetResourcePolicyInput, arg2 ...request.Option) (*secretsmanager.GetResourcePolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetResourcePolicyWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.GetResourcePolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetResourcePolicyWithContext indicates an expected call of GetResourcePolicyWithContext +func (mr *MockSecretsManagerAPIMockRecorder) GetResourcePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourcePolicyWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetResourcePolicyWithContext), varargs...) +} + +// GetSecretValue mocks base method +func (m *MockSecretsManagerAPI) GetSecretValue(arg0 *secretsmanager.GetSecretValueInput) (*secretsmanager.GetSecretValueOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecretValue", arg0) + ret0, _ := ret[0].(*secretsmanager.GetSecretValueOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSecretValue indicates an expected call of GetSecretValue +func (mr *MockSecretsManagerAPIMockRecorder) GetSecretValue(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretValue", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetSecretValue), arg0) +} + +// GetSecretValueRequest mocks base method +func (m *MockSecretsManagerAPI) GetSecretValueRequest(arg0 *secretsmanager.GetSecretValueInput) (*request.Request, *secretsmanager.GetSecretValueOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecretValueRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.GetSecretValueOutput) + return ret0, ret1 +} + +// GetSecretValueRequest indicates an expected call of GetSecretValueRequest +func (mr *MockSecretsManagerAPIMockRecorder) GetSecretValueRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretValueRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetSecretValueRequest), arg0) +} + +// GetSecretValueWithContext mocks base method +func (m *MockSecretsManagerAPI) GetSecretValueWithContext(arg0 context.Context, arg1 *secretsmanager.GetSecretValueInput, arg2 ...request.Option) (*secretsmanager.GetSecretValueOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetSecretValueWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.GetSecretValueOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSecretValueWithContext indicates an expected call of GetSecretValueWithContext +func (mr *MockSecretsManagerAPIMockRecorder) GetSecretValueWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretValueWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).GetSecretValueWithContext), varargs...) +} + +// ListSecretVersionIds mocks base method +func (m *MockSecretsManagerAPI) ListSecretVersionIds(arg0 *secretsmanager.ListSecretVersionIdsInput) (*secretsmanager.ListSecretVersionIdsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretVersionIds", arg0) + ret0, _ := ret[0].(*secretsmanager.ListSecretVersionIdsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretVersionIds indicates an expected call of ListSecretVersionIds +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretVersionIds(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretVersionIds", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretVersionIds), arg0) +} + +// ListSecretVersionIdsPages mocks base method +func (m *MockSecretsManagerAPI) ListSecretVersionIdsPages(arg0 *secretsmanager.ListSecretVersionIdsInput, arg1 func(*secretsmanager.ListSecretVersionIdsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretVersionIdsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListSecretVersionIdsPages indicates an expected call of ListSecretVersionIdsPages +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretVersionIdsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretVersionIdsPages", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretVersionIdsPages), arg0, arg1) +} + +// ListSecretVersionIdsPagesWithContext mocks base method +func (m *MockSecretsManagerAPI) ListSecretVersionIdsPagesWithContext(arg0 context.Context, arg1 *secretsmanager.ListSecretVersionIdsInput, arg2 func(*secretsmanager.ListSecretVersionIdsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListSecretVersionIdsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListSecretVersionIdsPagesWithContext indicates an expected call of ListSecretVersionIdsPagesWithContext +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretVersionIdsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretVersionIdsPagesWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretVersionIdsPagesWithContext), varargs...) +} + +// ListSecretVersionIdsRequest mocks base method +func (m *MockSecretsManagerAPI) ListSecretVersionIdsRequest(arg0 *secretsmanager.ListSecretVersionIdsInput) (*request.Request, *secretsmanager.ListSecretVersionIdsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretVersionIdsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.ListSecretVersionIdsOutput) + return ret0, ret1 +} + +// ListSecretVersionIdsRequest indicates an expected call of ListSecretVersionIdsRequest +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretVersionIdsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretVersionIdsRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretVersionIdsRequest), arg0) +} + +// ListSecretVersionIdsWithContext mocks base method +func (m *MockSecretsManagerAPI) ListSecretVersionIdsWithContext(arg0 context.Context, arg1 *secretsmanager.ListSecretVersionIdsInput, arg2 ...request.Option) (*secretsmanager.ListSecretVersionIdsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListSecretVersionIdsWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.ListSecretVersionIdsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretVersionIdsWithContext indicates an expected call of ListSecretVersionIdsWithContext +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretVersionIdsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretVersionIdsWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretVersionIdsWithContext), varargs...) +} + +// ListSecrets mocks base method +func (m *MockSecretsManagerAPI) ListSecrets(arg0 *secretsmanager.ListSecretsInput) (*secretsmanager.ListSecretsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecrets", arg0) + ret0, _ := ret[0].(*secretsmanager.ListSecretsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecrets indicates an expected call of ListSecrets +func (mr *MockSecretsManagerAPIMockRecorder) ListSecrets(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecrets", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecrets), arg0) +} + +// ListSecretsPages mocks base method +func (m *MockSecretsManagerAPI) ListSecretsPages(arg0 *secretsmanager.ListSecretsInput, arg1 func(*secretsmanager.ListSecretsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListSecretsPages indicates an expected call of ListSecretsPages +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretsPages", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretsPages), arg0, arg1) +} + +// ListSecretsPagesWithContext mocks base method +func (m *MockSecretsManagerAPI) ListSecretsPagesWithContext(arg0 context.Context, arg1 *secretsmanager.ListSecretsInput, arg2 func(*secretsmanager.ListSecretsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListSecretsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListSecretsPagesWithContext indicates an expected call of ListSecretsPagesWithContext +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretsPagesWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretsPagesWithContext), varargs...) +} + +// ListSecretsRequest mocks base method +func (m *MockSecretsManagerAPI) ListSecretsRequest(arg0 *secretsmanager.ListSecretsInput) (*request.Request, *secretsmanager.ListSecretsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.ListSecretsOutput) + return ret0, ret1 +} + +// ListSecretsRequest indicates an expected call of ListSecretsRequest +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretsRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretsRequest), arg0) +} + +// ListSecretsWithContext mocks base method +func (m *MockSecretsManagerAPI) ListSecretsWithContext(arg0 context.Context, arg1 *secretsmanager.ListSecretsInput, arg2 ...request.Option) (*secretsmanager.ListSecretsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListSecretsWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.ListSecretsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretsWithContext indicates an expected call of ListSecretsWithContext +func (mr *MockSecretsManagerAPIMockRecorder) ListSecretsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretsWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ListSecretsWithContext), varargs...) +} + +// PutResourcePolicy mocks base method +func (m *MockSecretsManagerAPI) PutResourcePolicy(arg0 *secretsmanager.PutResourcePolicyInput) (*secretsmanager.PutResourcePolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutResourcePolicy", arg0) + ret0, _ := ret[0].(*secretsmanager.PutResourcePolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutResourcePolicy indicates an expected call of PutResourcePolicy +func (mr *MockSecretsManagerAPIMockRecorder) PutResourcePolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicy", reflect.TypeOf((*MockSecretsManagerAPI)(nil).PutResourcePolicy), arg0) +} + +// PutResourcePolicyRequest mocks base method +func (m *MockSecretsManagerAPI) PutResourcePolicyRequest(arg0 *secretsmanager.PutResourcePolicyInput) (*request.Request, *secretsmanager.PutResourcePolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutResourcePolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.PutResourcePolicyOutput) + return ret0, ret1 +} + +// PutResourcePolicyRequest indicates an expected call of PutResourcePolicyRequest +func (mr *MockSecretsManagerAPIMockRecorder) PutResourcePolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicyRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).PutResourcePolicyRequest), arg0) +} + +// PutResourcePolicyWithContext mocks base method +func (m *MockSecretsManagerAPI) PutResourcePolicyWithContext(arg0 context.Context, arg1 *secretsmanager.PutResourcePolicyInput, arg2 ...request.Option) (*secretsmanager.PutResourcePolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutResourcePolicyWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.PutResourcePolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutResourcePolicyWithContext indicates an expected call of PutResourcePolicyWithContext +func (mr *MockSecretsManagerAPIMockRecorder) PutResourcePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourcePolicyWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).PutResourcePolicyWithContext), varargs...) +} + +// PutSecretValue mocks base method +func (m *MockSecretsManagerAPI) PutSecretValue(arg0 *secretsmanager.PutSecretValueInput) (*secretsmanager.PutSecretValueOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutSecretValue", arg0) + ret0, _ := ret[0].(*secretsmanager.PutSecretValueOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutSecretValue indicates an expected call of PutSecretValue +func (mr *MockSecretsManagerAPIMockRecorder) PutSecretValue(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutSecretValue", reflect.TypeOf((*MockSecretsManagerAPI)(nil).PutSecretValue), arg0) +} + +// PutSecretValueRequest mocks base method +func (m *MockSecretsManagerAPI) PutSecretValueRequest(arg0 *secretsmanager.PutSecretValueInput) (*request.Request, *secretsmanager.PutSecretValueOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutSecretValueRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.PutSecretValueOutput) + return ret0, ret1 +} + +// PutSecretValueRequest indicates an expected call of PutSecretValueRequest +func (mr *MockSecretsManagerAPIMockRecorder) PutSecretValueRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutSecretValueRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).PutSecretValueRequest), arg0) +} + +// PutSecretValueWithContext mocks base method +func (m *MockSecretsManagerAPI) PutSecretValueWithContext(arg0 context.Context, arg1 *secretsmanager.PutSecretValueInput, arg2 ...request.Option) (*secretsmanager.PutSecretValueOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutSecretValueWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.PutSecretValueOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutSecretValueWithContext indicates an expected call of PutSecretValueWithContext +func (mr *MockSecretsManagerAPIMockRecorder) PutSecretValueWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutSecretValueWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).PutSecretValueWithContext), varargs...) +} + +// RestoreSecret mocks base method +func (m *MockSecretsManagerAPI) RestoreSecret(arg0 *secretsmanager.RestoreSecretInput) (*secretsmanager.RestoreSecretOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreSecret", arg0) + ret0, _ := ret[0].(*secretsmanager.RestoreSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreSecret indicates an expected call of RestoreSecret +func (mr *MockSecretsManagerAPIMockRecorder) RestoreSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreSecret", reflect.TypeOf((*MockSecretsManagerAPI)(nil).RestoreSecret), arg0) +} + +// RestoreSecretRequest mocks base method +func (m *MockSecretsManagerAPI) RestoreSecretRequest(arg0 *secretsmanager.RestoreSecretInput) (*request.Request, *secretsmanager.RestoreSecretOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreSecretRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.RestoreSecretOutput) + return ret0, ret1 +} + +// RestoreSecretRequest indicates an expected call of RestoreSecretRequest +func (mr *MockSecretsManagerAPIMockRecorder) RestoreSecretRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreSecretRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).RestoreSecretRequest), arg0) +} + +// RestoreSecretWithContext mocks base method +func (m *MockSecretsManagerAPI) RestoreSecretWithContext(arg0 context.Context, arg1 *secretsmanager.RestoreSecretInput, arg2 ...request.Option) (*secretsmanager.RestoreSecretOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RestoreSecretWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.RestoreSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreSecretWithContext indicates an expected call of RestoreSecretWithContext +func (mr *MockSecretsManagerAPIMockRecorder) RestoreSecretWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreSecretWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).RestoreSecretWithContext), varargs...) +} + +// RotateSecret mocks base method +func (m *MockSecretsManagerAPI) RotateSecret(arg0 *secretsmanager.RotateSecretInput) (*secretsmanager.RotateSecretOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RotateSecret", arg0) + ret0, _ := ret[0].(*secretsmanager.RotateSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RotateSecret indicates an expected call of RotateSecret +func (mr *MockSecretsManagerAPIMockRecorder) RotateSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RotateSecret", reflect.TypeOf((*MockSecretsManagerAPI)(nil).RotateSecret), arg0) +} + +// RotateSecretRequest mocks base method +func (m *MockSecretsManagerAPI) RotateSecretRequest(arg0 *secretsmanager.RotateSecretInput) (*request.Request, *secretsmanager.RotateSecretOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RotateSecretRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.RotateSecretOutput) + return ret0, ret1 +} + +// RotateSecretRequest indicates an expected call of RotateSecretRequest +func (mr *MockSecretsManagerAPIMockRecorder) RotateSecretRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RotateSecretRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).RotateSecretRequest), arg0) +} + +// RotateSecretWithContext mocks base method +func (m *MockSecretsManagerAPI) RotateSecretWithContext(arg0 context.Context, arg1 *secretsmanager.RotateSecretInput, arg2 ...request.Option) (*secretsmanager.RotateSecretOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RotateSecretWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.RotateSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RotateSecretWithContext indicates an expected call of RotateSecretWithContext +func (mr *MockSecretsManagerAPIMockRecorder) RotateSecretWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RotateSecretWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).RotateSecretWithContext), varargs...) +} + +// TagResource mocks base method +func (m *MockSecretsManagerAPI) TagResource(arg0 *secretsmanager.TagResourceInput) (*secretsmanager.TagResourceOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TagResource", arg0) + ret0, _ := ret[0].(*secretsmanager.TagResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TagResource indicates an expected call of TagResource +func (mr *MockSecretsManagerAPIMockRecorder) TagResource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResource", reflect.TypeOf((*MockSecretsManagerAPI)(nil).TagResource), arg0) +} + +// TagResourceRequest mocks base method +func (m *MockSecretsManagerAPI) TagResourceRequest(arg0 *secretsmanager.TagResourceInput) (*request.Request, *secretsmanager.TagResourceOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TagResourceRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.TagResourceOutput) + return ret0, ret1 +} + +// TagResourceRequest indicates an expected call of TagResourceRequest +func (mr *MockSecretsManagerAPIMockRecorder) TagResourceRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResourceRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).TagResourceRequest), arg0) +} + +// TagResourceWithContext mocks base method +func (m *MockSecretsManagerAPI) TagResourceWithContext(arg0 context.Context, arg1 *secretsmanager.TagResourceInput, arg2 ...request.Option) (*secretsmanager.TagResourceOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "TagResourceWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.TagResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TagResourceWithContext indicates an expected call of TagResourceWithContext +func (mr *MockSecretsManagerAPIMockRecorder) TagResourceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResourceWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).TagResourceWithContext), varargs...) +} + +// UntagResource mocks base method +func (m *MockSecretsManagerAPI) UntagResource(arg0 *secretsmanager.UntagResourceInput) (*secretsmanager.UntagResourceOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UntagResource", arg0) + ret0, _ := ret[0].(*secretsmanager.UntagResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UntagResource indicates an expected call of UntagResource +func (mr *MockSecretsManagerAPIMockRecorder) UntagResource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResource", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UntagResource), arg0) +} + +// UntagResourceRequest mocks base method +func (m *MockSecretsManagerAPI) UntagResourceRequest(arg0 *secretsmanager.UntagResourceInput) (*request.Request, *secretsmanager.UntagResourceOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UntagResourceRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.UntagResourceOutput) + return ret0, ret1 +} + +// UntagResourceRequest indicates an expected call of UntagResourceRequest +func (mr *MockSecretsManagerAPIMockRecorder) UntagResourceRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResourceRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UntagResourceRequest), arg0) +} + +// UntagResourceWithContext mocks base method +func (m *MockSecretsManagerAPI) UntagResourceWithContext(arg0 context.Context, arg1 *secretsmanager.UntagResourceInput, arg2 ...request.Option) (*secretsmanager.UntagResourceOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UntagResourceWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.UntagResourceOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UntagResourceWithContext indicates an expected call of UntagResourceWithContext +func (mr *MockSecretsManagerAPIMockRecorder) UntagResourceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResourceWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UntagResourceWithContext), varargs...) +} + +// UpdateSecret mocks base method +func (m *MockSecretsManagerAPI) UpdateSecret(arg0 *secretsmanager.UpdateSecretInput) (*secretsmanager.UpdateSecretOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSecret", arg0) + ret0, _ := ret[0].(*secretsmanager.UpdateSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSecret indicates an expected call of UpdateSecret +func (mr *MockSecretsManagerAPIMockRecorder) UpdateSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecret", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UpdateSecret), arg0) +} + +// UpdateSecretRequest mocks base method +func (m *MockSecretsManagerAPI) UpdateSecretRequest(arg0 *secretsmanager.UpdateSecretInput) (*request.Request, *secretsmanager.UpdateSecretOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSecretRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.UpdateSecretOutput) + return ret0, ret1 +} + +// UpdateSecretRequest indicates an expected call of UpdateSecretRequest +func (mr *MockSecretsManagerAPIMockRecorder) UpdateSecretRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecretRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UpdateSecretRequest), arg0) +} + +// UpdateSecretVersionStage mocks base method +func (m *MockSecretsManagerAPI) UpdateSecretVersionStage(arg0 *secretsmanager.UpdateSecretVersionStageInput) (*secretsmanager.UpdateSecretVersionStageOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSecretVersionStage", arg0) + ret0, _ := ret[0].(*secretsmanager.UpdateSecretVersionStageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSecretVersionStage indicates an expected call of UpdateSecretVersionStage +func (mr *MockSecretsManagerAPIMockRecorder) UpdateSecretVersionStage(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecretVersionStage", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UpdateSecretVersionStage), arg0) +} + +// UpdateSecretVersionStageRequest mocks base method +func (m *MockSecretsManagerAPI) UpdateSecretVersionStageRequest(arg0 *secretsmanager.UpdateSecretVersionStageInput) (*request.Request, *secretsmanager.UpdateSecretVersionStageOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSecretVersionStageRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.UpdateSecretVersionStageOutput) + return ret0, ret1 +} + +// UpdateSecretVersionStageRequest indicates an expected call of UpdateSecretVersionStageRequest +func (mr *MockSecretsManagerAPIMockRecorder) UpdateSecretVersionStageRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecretVersionStageRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UpdateSecretVersionStageRequest), arg0) +} + +// UpdateSecretVersionStageWithContext mocks base method +func (m *MockSecretsManagerAPI) UpdateSecretVersionStageWithContext(arg0 context.Context, arg1 *secretsmanager.UpdateSecretVersionStageInput, arg2 ...request.Option) (*secretsmanager.UpdateSecretVersionStageOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateSecretVersionStageWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.UpdateSecretVersionStageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSecretVersionStageWithContext indicates an expected call of UpdateSecretVersionStageWithContext +func (mr *MockSecretsManagerAPIMockRecorder) UpdateSecretVersionStageWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecretVersionStageWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UpdateSecretVersionStageWithContext), varargs...) +} + +// UpdateSecretWithContext mocks base method +func (m *MockSecretsManagerAPI) UpdateSecretWithContext(arg0 context.Context, arg1 *secretsmanager.UpdateSecretInput, arg2 ...request.Option) (*secretsmanager.UpdateSecretOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateSecretWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.UpdateSecretOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSecretWithContext indicates an expected call of UpdateSecretWithContext +func (mr *MockSecretsManagerAPIMockRecorder) UpdateSecretWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecretWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).UpdateSecretWithContext), varargs...) +} + +// ValidateResourcePolicy mocks base method +func (m *MockSecretsManagerAPI) ValidateResourcePolicy(arg0 *secretsmanager.ValidateResourcePolicyInput) (*secretsmanager.ValidateResourcePolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateResourcePolicy", arg0) + ret0, _ := ret[0].(*secretsmanager.ValidateResourcePolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateResourcePolicy indicates an expected call of ValidateResourcePolicy +func (mr *MockSecretsManagerAPIMockRecorder) ValidateResourcePolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourcePolicy", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ValidateResourcePolicy), arg0) +} + +// ValidateResourcePolicyRequest mocks base method +func (m *MockSecretsManagerAPI) ValidateResourcePolicyRequest(arg0 *secretsmanager.ValidateResourcePolicyInput) (*request.Request, *secretsmanager.ValidateResourcePolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateResourcePolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*secretsmanager.ValidateResourcePolicyOutput) + return ret0, ret1 +} + +// ValidateResourcePolicyRequest indicates an expected call of ValidateResourcePolicyRequest +func (mr *MockSecretsManagerAPIMockRecorder) ValidateResourcePolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourcePolicyRequest", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ValidateResourcePolicyRequest), arg0) +} + +// ValidateResourcePolicyWithContext mocks base method +func (m *MockSecretsManagerAPI) ValidateResourcePolicyWithContext(arg0 context.Context, arg1 *secretsmanager.ValidateResourcePolicyInput, arg2 ...request.Option) (*secretsmanager.ValidateResourcePolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ValidateResourcePolicyWithContext", varargs...) + ret0, _ := ret[0].(*secretsmanager.ValidateResourcePolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateResourcePolicyWithContext indicates an expected call of ValidateResourcePolicyWithContext +func (mr *MockSecretsManagerAPIMockRecorder) ValidateResourcePolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourcePolicyWithContext", reflect.TypeOf((*MockSecretsManagerAPI)(nil).ValidateResourcePolicyWithContext), varargs...) +} diff --git a/agent/async/generate_mocks.go b/agent/async/generate_mocks.go new file mode 100644 index 00000000000..04eeabc3f52 --- /dev/null +++ b/agent/async/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package async + +//go:generate mockgen -destination=mocks/async_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/async Cache diff --git a/agent/async/lru_cache.go b/agent/async/lru_cache.go new file mode 100644 index 00000000000..c1f96268d07 --- /dev/null +++ b/agent/async/lru_cache.go @@ -0,0 +1,153 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package async + +import ( + "container/list" + "sync" + "time" +) + +type Cache interface { + // Get fetches a value from cache, returns nil, false on miss + Get(key string) (Value, bool) + // Set sets a value in cache. overrites any existing value + Set(key string, value Value) + // Delete deletes the value from the cache + Delete(key string) +} + +// Creates an LRUCache with maximum size, ttl for items. +func NewLRUCache(size int, ttl time.Duration) Cache { + lru := &lruCache{size: size, + ttl: ttl, + cache: make(map[string]*entry), + evictList: list.New(), + } + return lru +} + +type Value interface{} + +type entry struct { + value Value + added time.Time +} + +type lruCache struct { + sync.Mutex + cache map[string]*entry + evictList *list.List + size int + ttl time.Duration +} + +// Get returns the value associated with the key +func (lru *lruCache) Get(key string) (Value, bool) { + lru.Lock() + defer lru.Unlock() + + entry, ok := lru.cache[key] + + if !ok { + return nil, false + } + + ok = lru.evictStale(entry, key) + if !ok { + return nil, false + } + + lru.updateAccessed(key) + + return entry.value, true +} + +// Set sets the key-value pair in the cache +func (lru *lruCache) Set(key string, value Value) { + lru.Lock() + defer lru.Unlock() + + lru.cache[key] = &entry{value: value, added: time.Now()} + + // Remove from the evict list if an entry already existed + lru.removeFromEvictList(key) + + lru.evictList.PushBack(key) + lru.purgeSize() +} + +// Delete removes the entry associated with the key from cache +func (lru *lruCache) Delete(key string) { + lru.Lock() + defer lru.Unlock() + + lru.removeFromEvictList(key) + delete(lru.cache, key) +} + +func (lru *lruCache) updateAccessed(key string) { + // update evict list + for elem := lru.evictList.Front(); elem != nil; elem = elem.Next() { + if elem.Value == key { + lru.evictList.MoveToBack(elem) + break + } + } +} + +func (lru *lruCache) removeFromEvictList(key string) { + var next *list.Element + for element := lru.evictList.Front(); element != nil; element = next { + next = element.Next() + if element.Value == key { + lru.evictList.Remove(element) + break + } + } + +} + +func (lru *lruCache) evictStale(entry *entry, key string) bool { + if time.Since(entry.added) >= lru.ttl { + // remove from evict list + for elem := lru.evictList.Front(); elem != nil; elem = elem.Next() { + if elem.Value == key { + lru.evictList.Remove(elem) + break + } + } + + // delete from cache + delete(lru.cache, key) + + return false + } + + return true +} + +func (lru *lruCache) purgeSize() { + for elem := lru.evictList.Front(); elem != nil; elem = elem.Next() { + if len(lru.cache) <= lru.size { + break + } + key := elem.Value.(string) + // remove from list + lru.evictList.Remove(elem) + + // delete from cache + delete(lru.cache, key) + } +} diff --git a/agent/async/lru_cache_test.go b/agent/async/lru_cache_test.go new file mode 100644 index 00000000000..ebb2be85b6e --- /dev/null +++ b/agent/async/lru_cache_test.go @@ -0,0 +1,195 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package async + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestLRUSimple(t *testing.T) { + lru := NewLRUCache(10, time.Minute) + lru.Set("foo", "bar") + + bar, ok := lru.Get("foo") + assert.True(t, ok) + assert.Equal(t, bar, "bar") + + baz, ok := lru.Get("fooz") + assert.False(t, ok) + assert.Nil(t, baz) + + lru.Delete("foo") + bar, ok = lru.Get("foo") + assert.False(t, ok) + assert.Nil(t, bar) +} + +func TestLRUSetDelete(t *testing.T) { + lru := NewLRUCache(10, time.Minute) + + lru.Set("foo", "bar") + bar, ok := lru.Get("foo") + assert.True(t, ok) + assert.Equal(t, bar, "bar") + + lru.Set("foo", "bar2") + bar, ok = lru.Get("foo") + assert.True(t, ok) + assert.Equal(t, bar, "bar2") + + lru.Delete("foo") + bar, ok = lru.Get("foo") + assert.False(t, ok) + assert.Nil(t, bar) +} + +func TestLRUTTlPurge(t *testing.T) { + lru := NewLRUCache(10, 20*time.Millisecond) + lru.Set("foo", "bar") + + bar, ok := lru.Get("foo") + assert.True(t, ok) + assert.Equal(t, bar, "bar") + + time.Sleep(100 * time.Millisecond) + + bar, ok = lru.Get("foo") + assert.False(t, ok) + assert.Nil(t, bar) +} + +func TestLRUSizePurge(t *testing.T) { + lru := NewLRUCache(1, time.Minute) + lru.Set("foo", "bar") + + bar, ok := lru.Get("foo") + assert.True(t, ok) + assert.Equal(t, bar, "bar") + + lru.Set("bar", "baz") + + baz, ok := lru.Get("bar") + assert.True(t, ok) + assert.Equal(t, baz, "baz") + + bar, ok = lru.Get("foo") + assert.False(t, ok) + assert.Nil(t, bar) +} + +func BenchmarkLRUCacheConcurrency(b *testing.B) { + // prime a cache with %80 size + size := 10000 + lru := NewLRUCache(size/80, 30*time.Minute) + for i := 0; i < size; i++ { + lru.Set(fmt.Sprintf("%d", i), true) + } + + // fetch or set up to 5x concurrently + for n := 0; n < 5; n++ { + go func() { + for i := 0; i < size; i++ { + // 1 in 5 times, set the value + if rand.Intn(5) == 0 { + lru.Set(fmt.Sprintf("%d", i), true) + } else { + lru.Get(fmt.Sprintf("%d", i)) + } + } + }() + } +} + +func BenchmarkLRUCacheSet(b *testing.B) { + // prime a cache as large as the data set + lru := NewLRUCache(b.N, 30*time.Minute) + for i := 0; i < b.N; i++ { + lru.Set(fmt.Sprintf("%d", i), true) + } +} + +func BenchmarkLRUCacheFull(b *testing.B) { + // prime a cache as large as the data set + lru := NewLRUCache(b.N, 30*time.Minute) + for i := 0; i < b.N; i++ { + lru.Set(fmt.Sprintf("%d", i), true) + } + + // fetch up to 5x concurrently + for n := 0; n < 5; n++ { + go func() { + for i := 0; i < b.N; i++ { + lru.Get(fmt.Sprintf("%d", i)) + } + }() + } +} + +func BenchmarkLRUCacheHalf(b *testing.B) { + // prime a cache as large as the data set + lru := NewLRUCache(b.N/2, 30*time.Minute) + for i := 0; i < b.N; i++ { + lru.Set(fmt.Sprintf("%d", i), true) + } + + // fetch up to 5x concurrently + for n := 0; n < 5; n++ { + go func() { + for i := 0; i < b.N; i++ { + lru.Get(fmt.Sprintf("%d", i)) + } + }() + } +} + +func BenchmarkLRUCacheWorst(b *testing.B) { + // prime a cache with only one element + lru := NewLRUCache(1, 30*time.Minute) + for i := 0; i < b.N; i++ { + lru.Set(fmt.Sprintf("%d", i), true) + } + + // fetch up to 5x concurrently + for n := 0; n < 5; n++ { + go func() { + for i := 0; i < b.N; i++ { + lru.Get(fmt.Sprintf("%d", i)) + } + }() + } +} + +func BenchmarkLRUCacheTTLExpiry(b *testing.B) { + // prime a cache with everything expired immediately + lru := NewLRUCache(b.N, 0) + for i := 0; i < b.N; i++ { + lru.Set(fmt.Sprintf("%d", i), true) + } + + // fetch up to 5x concurrently + for n := 0; n < 5; n++ { + go func() { + for i := 0; i < b.N; i++ { + lru.Get(fmt.Sprintf("%d", i)) + } + }() + } +} diff --git a/agent/async/mocks/async_mocks.go b/agent/async/mocks/async_mocks.go new file mode 100644 index 00000000000..45e5c29900d --- /dev/null +++ b/agent/async/mocks/async_mocks.go @@ -0,0 +1,88 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/async (interfaces: Cache) + +// Package mock_async is a generated GoMock package. +package mock_async + +import ( + reflect "reflect" + + async "github.com/aws/amazon-ecs-agent/agent/async" + gomock "github.com/golang/mock/gomock" +) + +// MockCache is a mock of Cache interface +type MockCache struct { + ctrl *gomock.Controller + recorder *MockCacheMockRecorder +} + +// MockCacheMockRecorder is the mock recorder for MockCache +type MockCacheMockRecorder struct { + mock *MockCache +} + +// NewMockCache creates a new mock instance +func NewMockCache(ctrl *gomock.Controller) *MockCache { + mock := &MockCache{ctrl: ctrl} + mock.recorder = &MockCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockCache) EXPECT() *MockCacheMockRecorder { + return m.recorder +} + +// Delete mocks base method +func (m *MockCache) Delete(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Delete", arg0) +} + +// Delete indicates an expected call of Delete +func (mr *MockCacheMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockCache)(nil).Delete), arg0) +} + +// Get mocks base method +func (m *MockCache) Get(arg0 string) (async.Value, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].(async.Value) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockCacheMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockCache)(nil).Get), arg0) +} + +// Set mocks base method +func (m *MockCache) Set(arg0 string, arg1 async.Value) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Set", arg0, arg1) +} + +// Set indicates an expected call of Set +func (mr *MockCacheMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockCache)(nil).Set), arg0, arg1) +} diff --git a/agent/config/conditional.go b/agent/config/conditional.go new file mode 100644 index 00000000000..b6f8be049b8 --- /dev/null +++ b/agent/config/conditional.go @@ -0,0 +1,108 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package config + +import ( + "encoding/json" + "strconv" +) + +// Conditional makes it possible to understand if a variable was set explicitly or relies on a default setting +type Conditional int + +const ( + _ Conditional = iota + ExplicitlyEnabled + ExplicitlyDisabled + NotSet +) + +type BooleanDefaultTrue struct { + Value Conditional +} + +// Enabled is a convenience function for when consumers don't care if the value is implicit or explicit +func (b BooleanDefaultTrue) Enabled() bool { + return b.Value == ExplicitlyEnabled || b.Value == NotSet +} + +// MarshalJSON is used to serialize the type to json, per the Marshaller interface +func (b BooleanDefaultTrue) MarshalJSON() ([]byte, error) { + switch b.Value { + case ExplicitlyEnabled: + return json.Marshal(true) + case ExplicitlyDisabled: + return json.Marshal(false) + default: + return json.Marshal(nil) + } +} + +// UnmarshalJSON is used to deserialize json types into Conditional, per the Unmarshaller interface +func (b *BooleanDefaultTrue) UnmarshalJSON(jsonData []byte) error { + jsonString := string(jsonData) + jsonBool, err := strconv.ParseBool(jsonString) + if err != nil && jsonString != "null" { + return err + } + + if jsonString == "" || jsonString == "null" { + b.Value = NotSet + } else if jsonBool { + b.Value = ExplicitlyEnabled + } else { + b.Value = ExplicitlyDisabled + } + + return nil +} + +type BooleanDefaultFalse struct { + Value Conditional +} + +/// Enabled is a convenience function for when consumers don't care if the value is implicit or explicit +func (b BooleanDefaultFalse) Enabled() bool { + return b.Value == ExplicitlyEnabled +} + +// MarshalJSON is used to serialize the type to json, per the Marshaller interface +func (b BooleanDefaultFalse) MarshalJSON() ([]byte, error) { + switch b.Value { + case ExplicitlyEnabled: + return json.Marshal(true) + case ExplicitlyDisabled: + return json.Marshal(false) + default: + return json.Marshal(nil) + } +} + +// UnmarshalJSON is used to deserialize json types into Conditional, per the Unmarshaller interface +func (b *BooleanDefaultFalse) UnmarshalJSON(jsonData []byte) error { + jsonString := string(jsonData) + jsonBool, err := strconv.ParseBool(jsonString) + if err != nil && jsonString != "null" { + return err + } + + if jsonString == "" || jsonString == "null" { + b.Value = NotSet + } else if jsonBool { + b.Value = ExplicitlyEnabled + } else { + b.Value = ExplicitlyDisabled + } + + return nil +} diff --git a/agent/config/conditional_test.go b/agent/config/conditional_test.go new file mode 100644 index 00000000000..8f8dbcf9bf2 --- /dev/null +++ b/agent/config/conditional_test.go @@ -0,0 +1,69 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBooleanDefaultTrue(t *testing.T) { + x := BooleanDefaultTrue{Value: NotSet} + y := BooleanDefaultTrue{Value: ExplicitlyEnabled} + z := BooleanDefaultTrue{Value: ExplicitlyDisabled} + + assert.True(t, x.Enabled(), "NotSet is enabled") + assert.True(t, y.Enabled(), "ExplicitlyEnabled is enabled") + assert.False(t, z.Enabled(), "ExplicitlyDisabled is not enabled") +} + +func TestBooleanDefaultTrueImplements(t *testing.T) { + assert.Implements(t, (*json.Marshaler)(nil), BooleanDefaultTrue{Value: NotSet}) + assert.Implements(t, (*json.Unmarshaler)(nil), (*BooleanDefaultTrue)(nil)) +} + +// main conversion cases for Conditional marshalling +var cases = []struct { + defaultBool BooleanDefaultTrue + bytes []byte +}{ + {BooleanDefaultTrue{Value: ExplicitlyEnabled}, []byte("true")}, + {BooleanDefaultTrue{Value: ExplicitlyDisabled}, []byte("false")}, + {BooleanDefaultTrue{Value: NotSet}, []byte("null")}, +} + +func TestBooleanDefaultTrueMarshal(t *testing.T) { + for _, tc := range cases { + t.Run(string(tc.bytes), func(t *testing.T) { + m, err := json.Marshal(tc.defaultBool) + assert.NoError(t, err) + assert.Equal(t, tc.bytes, m) + }) + } +} + +func TestBooleanDefaultTrueUnmarshal(t *testing.T) { + for _, tc := range cases { + t.Run(string(tc.bytes), func(t *testing.T) { + var target BooleanDefaultTrue + err := json.Unmarshal(tc.bytes, &target) + assert.NoError(t, err) + assert.Equal(t, tc.defaultBool, target) + }) + } +} diff --git a/agent/config/config.go b/agent/config/config.go new file mode 100644 index 00000000000..b2fb2f76b78 --- /dev/null +++ b/agent/config/config.go @@ -0,0 +1,631 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "time" + + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/cihub/seelog" +) + +const ( + // http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DockerReservedPort = 2375 + DockerReservedSSLPort = 2376 + // DockerTagSeparator is the charactor used to separate names and tag in docker + DockerTagSeparator = ":" + // DefaultDockerTag is the default tag used by docker + DefaultDockerTag = "latest" + + SSHPort = 22 + + // AgentIntrospectionPort is used to serve the metadata about the agent and to query the tasks being managed by the agent. + AgentIntrospectionPort = 51678 + + // AgentCredentialsPort is used to serve the credentials for tasks. + AgentCredentialsPort = 51679 + + // AgentPrometheusExpositionPort is used to expose Prometheus metrics that can be scraped by a Prometheus server + AgentPrometheusExpositionPort = 51680 + + // defaultConfigFileName is the default (json-formatted) config file + defaultConfigFileName = "/etc/ecs_container_agent/config.json" + + // DefaultClusterName is the name of the default cluster. + DefaultClusterName = "default" + + // DefaultTaskCleanupWaitDuration specifies the default value for task cleanup duration. It is used to + // clean up task's containers. + DefaultTaskCleanupWaitDuration = 3 * time.Hour + + // DefaultPollingMetricsWaitDuration specifies the default value for polling metrics wait duration + // This is only used when PollMetrics is set to true + DefaultPollingMetricsWaitDuration = DefaultContainerMetricsPublishInterval / 2 + + // defaultDockerStopTimeout specifies the value for container stop timeout duration + defaultDockerStopTimeout = 30 * time.Second + + // DefaultImageCleanupTimeInterval specifies the default value for image cleanup duration. It is used to + // remove the images pulled by agent. + DefaultImageCleanupTimeInterval = 30 * time.Minute + + // DefaultNumImagesToDeletePerCycle specifies the default number of images to delete when agent performs + // image cleanup. + DefaultNumImagesToDeletePerCycle = 5 + + // DefaultNumNonECSContainersToDeletePerCycle specifies the default number of nonecs containers to delete when agent performs + // nonecs containers cleanup. + DefaultNumNonECSContainersToDeletePerCycle = 5 + + // DefaultImageDeletionAge specifies the default value for minimum amount of elapsed time after an image + // has been pulled before it can be deleted. + DefaultImageDeletionAge = 1 * time.Hour + + // DefaultNonECSImageDeletionAge specifies the default value for minimum amount of elapsed time after an image + // has been created before it can be deleted + DefaultNonECSImageDeletionAge = 1 * time.Hour + + //DefaultImagePullTimeout specifies the timeout for PullImage API. + DefaultImagePullTimeout = 2 * time.Hour + + // minimumTaskCleanupWaitDuration specifies the minimum duration to wait before cleaning up + // a task's container. This is used to enforce sane values for the config.TaskCleanupWaitDuration field. + minimumTaskCleanupWaitDuration = 1 * time.Minute + + // minimumImagePullInactivityTimeout specifies the minimum amount of time for that an image can be + // 'stuck' in the pull / unpack step. Very small values are unsafe and lead to high failure rate. + minimumImagePullInactivityTimeout = 1 * time.Minute + + // minimumPollingMetricsWaitDuration specifies the minimum duration to wait before polling for new stats + // from docker. This is only used when PollMetrics is set to true + minimumPollingMetricsWaitDuration = 5 * time.Second + + // maximumPollingMetricsWaitDuration specifies the maximum duration to wait before polling for new stats + // from docker. This is only used when PollMetrics is set to true + maximumPollingMetricsWaitDuration = DefaultContainerMetricsPublishInterval + + // minimumDockerStopTimeout specifies the minimum value for docker StopContainer API + minimumDockerStopTimeout = 1 * time.Second + + // minimumImageCleanupInterval specifies the minimum time for agent to wait before performing + // image cleanup. + minimumImageCleanupInterval = 10 * time.Minute + + // minimumNumImagesToDeletePerCycle specifies the minimum number of images that to be deleted when + // performing image cleanup. + minimumNumImagesToDeletePerCycle = 1 + + // defaultCNIPluginsPath is the default path where cni binaries are located + defaultCNIPluginsPath = "/amazon-ecs-cni-plugins" + + // DefaultMinSupportedCNIVersion denotes the minimum version of cni spec required + DefaultMinSupportedCNIVersion = "0.3.0" + + // pauseContainerTarball is the path to the pause container tarball + pauseContainerTarballPath = "/images/amazon-ecs-pause.tar" + + // DefaultTaskMetadataSteadyStateRate is set as 40. This is arrived from our benchmarking + // results where task endpoint can handle 4000 rps effectively. Here, 100 containers + // will be able to send out 40 rps. + DefaultTaskMetadataSteadyStateRate = 40 + + // DefaultTaskMetadataBurstRate is set to handle 60 burst requests at once + DefaultTaskMetadataBurstRate = 60 + + //Known cached image names + CachedImageNameAgentContainer = "amazon/amazon-ecs-agent:latest" + + // DefaultNvidiaRuntime is the name of the runtime to pass Nvidia GPUs to containers + DefaultNvidiaRuntime = "nvidia" + + // defaultCgroupCPUPeriod is set to 100 ms to set isCFS period and quota for task limits + defaultCgroupCPUPeriod = 100 * time.Millisecond + maximumCgroupCPUPeriod = 100 * time.Millisecond + minimumCgroupCPUPeriod = 8 * time.Millisecond + + // DefaultContainerMetricsPublishInterval is the default interval that we publish + // metrics to the ECS telemetry backend (TACS) + DefaultContainerMetricsPublishInterval = 20 * time.Second +) + +const ( + // ImagePullDefaultBehavior specifies the behavior that if an image pull API call fails, + // agent tries to start from the Docker image cache anyway, assuming that the image has not changed. + ImagePullDefaultBehavior ImagePullBehaviorType = iota + + // ImagePullAlwaysBehavior specifies the behavior that if an image pull API call fails, + // the task fails instead of using cached image. + ImagePullAlwaysBehavior + + // ImagePullOnceBehavior specifies the behavior that agent will only attempt to pull + // the same image once, once an image is pulled, local image cache will be used + // for all the containers. + ImagePullOnceBehavior + + // ImagePullPreferCachedBehavior specifies the behavior that agent will only attempt to pull + // the image if there is no cached image. + ImagePullPreferCachedBehavior +) + +const ( + // When ContainerInstancePropagateTagsFromNoneType is specified, no DescribeTags + // API call will be made. + ContainerInstancePropagateTagsFromNoneType ContainerInstancePropagateTagsFromType = iota + + // When ContainerInstancePropagateTagsFromEC2InstanceType is specified, agent will + // make DescribeTags API call to get tags remotely. + ContainerInstancePropagateTagsFromEC2InstanceType +) + +var ( + // DefaultPauseContainerImageName is the name of the pause container image. The linker's + // load flags are used to populate this value from the Makefile + DefaultPauseContainerImageName = "" + + // DefaultPauseContainerTag is the tag for the pause container image. The linker's load + // flags are used to populate this value from the Makefile + DefaultPauseContainerTag = "" +) + +// Merge merges two config files, preferring the ones on the left. Any nil or +// zero values present in the left that are present in the right will be overridden +func (cfg *Config) Merge(rhs Config) *Config { + left := reflect.ValueOf(cfg).Elem() + right := reflect.ValueOf(&rhs).Elem() + + for i := 0; i < left.NumField(); i++ { + leftField := left.Field(i) + switch leftField.Interface().(type) { + case BooleanDefaultFalse, BooleanDefaultTrue: + str, _ := json.Marshal(reflect.ValueOf(leftField.Interface()).Interface()) + if string(str) == "null" { + leftField.Set(reflect.ValueOf(right.Field(i).Interface())) + } + default: + if utils.ZeroOrNil(leftField.Interface()) { + leftField.Set(reflect.ValueOf(right.Field(i).Interface())) + } + } + } + + return cfg //make it chainable +} + +// NewConfig returns a config struct created by merging environment variables, +// a config file, and EC2 Metadata info. +// The 'config' struct it returns can be used, even if an error is returned. An +// error is returned, however, if the config is incomplete in some way that is +// considered fatal. +func NewConfig(ec2client ec2.EC2MetadataClient) (*Config, error) { + var errs []error + envConfig, err := environmentConfig() //Environment overrides all else + if err != nil { + errs = append(errs, err) + } + config := &envConfig + + if config.complete() { + // No need to do file / network IO + return config, nil + } + + fcfg, err := fileConfig() + if err != nil { + errs = append(errs, err) + } + config.Merge(fcfg) + + config.Merge(userDataConfig(ec2client)) + + if config.AWSRegion == "" { + if config.NoIID { + // get it from AWS SDK if we don't have instance identity document + awsRegion, err := ec2client.Region() + if err != nil { + errs = append(errs, err) + } + config.AWSRegion = awsRegion + } else { + // Get it from metadata only if we need to (network io) + config.Merge(ec2MetadataConfig(ec2client)) + } + } + + return config, config.mergeDefaultConfig(errs) +} + +func (config *Config) mergeDefaultConfig(errs []error) error { + config.trimWhitespace() + config.Merge(DefaultConfig()) + err := config.validateAndOverrideBounds() + if err != nil { + errs = append(errs, err) + } + if len(errs) != 0 { + return apierrors.NewMultiError(errs...) + } + return nil +} + +// trimWhitespace trims whitespace from all string cfg values with the +// `trim` tag +func (cfg *Config) trimWhitespace() { + cfgElem := reflect.ValueOf(cfg).Elem() + cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type() + + for i := 0; i < cfgElem.NumField(); i++ { + cfgField := cfgElem.Field(i) + if !cfgField.CanInterface() { + continue + } + trimTag := cfgStructField.Field(i).Tag.Get("trim") + if len(trimTag) == 0 { + continue + } + + if cfgField.Kind() != reflect.String { + seelog.Warnf("Cannot trim non-string field type %v index %v", cfgField.Kind().String(), i) + continue + } + str := cfgField.Interface().(string) + cfgField.SetString(strings.TrimSpace(str)) + } +} + +// validateAndOverrideBounds performs validation over members of the Config struct +// and check the value against the minimum required value. +func (cfg *Config) validateAndOverrideBounds() error { + err := cfg.checkMissingAndDepreciated() + if err != nil { + return err + } + + if cfg.DockerStopTimeout < minimumDockerStopTimeout { + return fmt.Errorf("config: invalid value for docker container stop timeout: %v", cfg.DockerStopTimeout.String()) + } + + if cfg.ContainerStartTimeout < minimumContainerStartTimeout { + return fmt.Errorf("config: invalid value for docker container start timeout: %v", cfg.ContainerStartTimeout.String()) + } + var badDrivers []string + for _, driver := range cfg.AvailableLoggingDrivers { + // Don't classify awsfirelens as a bad driver + if driver == dockerclient.AWSFirelensDriver { + continue + } + _, ok := dockerclient.LoggingDriverMinimumVersion[driver] + if !ok { + badDrivers = append(badDrivers, string(driver)) + } + } + if len(badDrivers) > 0 { + return errors.New("Invalid logging drivers: " + strings.Join(badDrivers, ", ")) + } + + // If a value has been set for taskCleanupWaitDuration and the value is less than the minimum allowed cleanup duration, + // print a warning and override it + if cfg.TaskCleanupWaitDuration < minimumTaskCleanupWaitDuration { + seelog.Warnf("Invalid value for ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultTaskCleanupWaitDuration.String(), cfg.TaskCleanupWaitDuration, minimumTaskCleanupWaitDuration) + cfg.TaskCleanupWaitDuration = DefaultTaskCleanupWaitDuration + } + + if cfg.ImagePullInactivityTimeout < minimumImagePullInactivityTimeout { + seelog.Warnf("Invalid value for image pull inactivity timeout duration, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", defaultImagePullInactivityTimeout.String(), cfg.ImagePullInactivityTimeout, minimumImagePullInactivityTimeout) + cfg.ImagePullInactivityTimeout = defaultImagePullInactivityTimeout + } + + if cfg.ImageCleanupInterval < minimumImageCleanupInterval { + seelog.Warnf("Invalid value for ECS_IMAGE_CLEANUP_INTERVAL, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultImageCleanupTimeInterval.String(), cfg.ImageCleanupInterval, minimumImageCleanupInterval) + cfg.ImageCleanupInterval = DefaultImageCleanupTimeInterval + } + + if cfg.NumImagesToDeletePerCycle < minimumNumImagesToDeletePerCycle { + seelog.Warnf("Invalid value for number of images to delete for image cleanup, will be overridden with the default value: %d. Parsed value: %d, minimum value: %d.", DefaultImageDeletionAge, cfg.NumImagesToDeletePerCycle, minimumNumImagesToDeletePerCycle) + cfg.NumImagesToDeletePerCycle = DefaultNumImagesToDeletePerCycle + } + + if cfg.TaskMetadataSteadyStateRate <= 0 || cfg.TaskMetadataBurstRate <= 0 { + seelog.Warnf("Invalid values for rate limits, will be overridden with default values: %d,%d.", DefaultTaskMetadataSteadyStateRate, DefaultTaskMetadataBurstRate) + cfg.TaskMetadataSteadyStateRate = DefaultTaskMetadataSteadyStateRate + cfg.TaskMetadataBurstRate = DefaultTaskMetadataBurstRate + } + + // check the PollMetrics specific configurations + cfg.pollMetricsOverrides() + + cfg.platformOverrides() + + return nil +} + +func (cfg *Config) pollMetricsOverrides() { + if cfg.PollMetrics.Enabled() { + if cfg.PollingMetricsWaitDuration < minimumPollingMetricsWaitDuration { + seelog.Warnf("ECS_POLLING_METRICS_WAIT_DURATION parsed value (%s) is less than the minimum of %s. Setting polling interval to minimum.", + cfg.PollingMetricsWaitDuration, minimumPollingMetricsWaitDuration) + cfg.PollingMetricsWaitDuration = minimumPollingMetricsWaitDuration + } + + if cfg.PollingMetricsWaitDuration > maximumPollingMetricsWaitDuration { + seelog.Warnf("ECS_POLLING_METRICS_WAIT_DURATION parsed value (%s) is greater than the maximum of %s. Setting polling interval to maximum.", + cfg.PollingMetricsWaitDuration, maximumPollingMetricsWaitDuration) + cfg.PollingMetricsWaitDuration = maximumPollingMetricsWaitDuration + } + } +} + +// checkMissingAndDeprecated checks all zero-valued fields for tags of the form +// missing:STRING and acts based on that string. Current options are: fatal, +// warn. Fatal will result in an error being returned, warn will result in a +// warning that the field is missing being logged. +func (cfg *Config) checkMissingAndDepreciated() error { + cfgElem := reflect.ValueOf(cfg).Elem() + cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type() + + fatalFields := []string{} + for i := 0; i < cfgElem.NumField(); i++ { + cfgField := cfgElem.Field(i) + if utils.ZeroOrNil(cfgField.Interface()) { + missingTag := cfgStructField.Field(i).Tag.Get("missing") + if len(missingTag) == 0 { + continue + } + switch missingTag { + case "warn": + seelog.Warnf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) + case "fatal": + seelog.Criticalf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) + fatalFields = append(fatalFields, cfgStructField.Field(i).Name) + default: + seelog.Warnf("Unexpected `missing` tag value, tag %v", missingTag) + } + } else { + // present + deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated") + if len(deprecatedTag) == 0 { + continue + } + seelog.Warnf("Use of deprecated configuration key, key: %v message: %v", cfgStructField.Field(i).Name, deprecatedTag) + } + } + if len(fatalFields) > 0 { + return errors.New("Missing required fields: " + strings.Join(fatalFields, ", ")) + } + return nil +} + +// complete returns true if all fields of the config are populated / nonzero +func (cfg *Config) complete() bool { + cfgElem := reflect.ValueOf(cfg).Elem() + + for i := 0; i < cfgElem.NumField(); i++ { + if utils.ZeroOrNil(cfgElem.Field(i).Interface()) { + return false + } + } + return true +} + +func fileConfig() (Config, error) { + fileName := utils.DefaultIfBlank(os.Getenv("ECS_AGENT_CONFIG_FILE_PATH"), defaultConfigFileName) + cfg := Config{} + + file, err := os.Open(fileName) + if err != nil { + return cfg, nil + } + data, err := ioutil.ReadAll(file) + if err != nil { + seelog.Errorf("Unable to read cfg file, err %v", err) + return cfg, err + } + if strings.TrimSpace(string(data)) == "" { + // empty file, not an error + return cfg, nil + } + + err = json.Unmarshal(data, &cfg) + if err != nil { + seelog.Criticalf("Error reading cfg json data, err %v", err) + return cfg, err + } + + // Handle any deprecated keys correctly here + if utils.ZeroOrNil(cfg.Cluster) && !utils.ZeroOrNil(cfg.ClusterArn) { + cfg.Cluster = cfg.ClusterArn + } + return cfg, nil +} + +// userDataConfig reads configuration JSON from instance's userdata. It doesn't +// return any error as it's entirely optional to configure the ECS agent using +// this method. +// Example: +// {"ECSAgentConfiguration":{"Cluster":"default"}} +func userDataConfig(ec2Client ec2.EC2MetadataClient) Config { + type userDataParser struct { + Config Config `json:"ECSAgentConfiguration"` + } + + parsedUserData := userDataParser{ + Config: Config{}, + } + + userData, err := ec2Client.GetUserData() + if err != nil { + seelog.Warnf("Unable to fetch user data: %v", err) + // Unable to read userdata from instance metadata. Just + // return early + return parsedUserData.Config + } + // In the future, if we want to support base64 encoded config, + // we'd need to add logic to decode the string here. + err = json.Unmarshal([]byte(userData), &parsedUserData) + if err != nil { + seelog.Debugf("Non-json user data, skip merging into agent config: %v", err) + // Unable to parse userdata as a valid JSON. Return the + // empty config + return Config{} + } + + return parsedUserData.Config +} + +// environmentConfig reads the given configs from the environment and attempts +// to convert them to the given type +func environmentConfig() (Config, error) { + dataDir := os.Getenv("ECS_DATADIR") + + steadyStateRate, burstRate := parseTaskMetadataThrottles() + + var errs []error + instanceAttributes, errs := parseInstanceAttributes(errs) + + containerInstanceTags, errs := parseContainerInstanceTags(errs) + + additionalLocalRoutes, errs := parseAdditionalLocalRoutes(errs) + + var err error + if len(errs) > 0 { + err = apierrors.NewMultiError(errs...) + } + return Config{ + Cluster: os.Getenv("ECS_CLUSTER"), + APIEndpoint: os.Getenv("ECS_BACKEND_HOST"), + AWSRegion: os.Getenv("AWS_DEFAULT_REGION"), + DockerEndpoint: os.Getenv("DOCKER_HOST"), + ReservedPorts: parseReservedPorts("ECS_RESERVED_PORTS"), + ReservedPortsUDP: parseReservedPorts("ECS_RESERVED_PORTS_UDP"), + DataDir: dataDir, + Checkpoint: parseCheckpoint(dataDir), + EngineAuthType: os.Getenv("ECS_ENGINE_AUTH_TYPE"), + EngineAuthData: NewSensitiveRawMessage([]byte(os.Getenv("ECS_ENGINE_AUTH_DATA"))), + UpdatesEnabled: parseBooleanDefaultFalseConfig("ECS_UPDATES_ENABLED"), + UpdateDownloadDir: os.Getenv("ECS_UPDATE_DOWNLOAD_DIR"), + DisableMetrics: parseBooleanDefaultFalseConfig("ECS_DISABLE_METRICS"), + ReservedMemory: parseEnvVariableUint16("ECS_RESERVED_MEMORY"), + AvailableLoggingDrivers: parseAvailableLoggingDrivers(), + PrivilegedDisabled: parseBooleanDefaultFalseConfig("ECS_DISABLE_PRIVILEGED"), + SELinuxCapable: parseBooleanDefaultFalseConfig("ECS_SELINUX_CAPABLE"), + AppArmorCapable: parseBooleanDefaultFalseConfig("ECS_APPARMOR_CAPABLE"), + TaskCleanupWaitDuration: parseEnvVariableDuration("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"), + TaskENIEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_TASK_ENI"), + TaskIAMRoleEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_TASK_IAM_ROLE"), + DeleteNonECSImagesEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP"), + TaskCPUMemLimit: parseBooleanDefaultTrueConfig("ECS_ENABLE_TASK_CPU_MEM_LIMIT"), + DockerStopTimeout: parseDockerStopTimeout(), + ContainerStartTimeout: parseContainerStartTimeout(), + ContainerCreateTimeout: parseContainerCreateTimeout(), + DependentContainersPullUpfront: parseBooleanDefaultFalseConfig("ECS_PULL_DEPENDENT_CONTAINERS_UPFRONT"), + ImagePullInactivityTimeout: parseImagePullInactivityTimeout(), + ImagePullTimeout: parseEnvVariableDuration("ECS_IMAGE_PULL_TIMEOUT"), + CredentialsAuditLogFile: os.Getenv("ECS_AUDIT_LOGFILE"), + CredentialsAuditLogDisabled: utils.ParseBool(os.Getenv("ECS_AUDIT_LOGFILE_DISABLED"), false), + TaskIAMRoleEnabledForNetworkHost: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST"), false), + ImageCleanupDisabled: parseBooleanDefaultFalseConfig("ECS_DISABLE_IMAGE_CLEANUP"), + MinimumImageDeletionAge: parseEnvVariableDuration("ECS_IMAGE_MINIMUM_CLEANUP_AGE"), + NonECSMinimumImageDeletionAge: parseEnvVariableDuration("NON_ECS_IMAGE_MINIMUM_CLEANUP_AGE"), + ImageCleanupInterval: parseEnvVariableDuration("ECS_IMAGE_CLEANUP_INTERVAL"), + NumImagesToDeletePerCycle: parseNumImagesToDeletePerCycle(), + NumNonECSContainersToDeletePerCycle: parseNumNonECSContainersToDeletePerCycle(), + ImagePullBehavior: parseImagePullBehavior(), + ImageCleanupExclusionList: parseImageCleanupExclusionList("ECS_EXCLUDE_UNTRACKED_IMAGE"), + InstanceAttributes: instanceAttributes, + CNIPluginsPath: os.Getenv("ECS_CNI_PLUGINS_PATH"), + AWSVPCBlockInstanceMetdata: parseBooleanDefaultFalseConfig("ECS_AWSVPC_BLOCK_IMDS"), + AWSVPCAdditionalLocalRoutes: additionalLocalRoutes, + ContainerMetadataEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_CONTAINER_METADATA"), + DataDirOnHost: os.Getenv("ECS_HOST_DATA_DIR"), + OverrideAWSLogsExecutionRole: parseBooleanDefaultFalseConfig("ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE"), + CgroupPath: os.Getenv("ECS_CGROUP_PATH"), + TaskMetadataSteadyStateRate: steadyStateRate, + TaskMetadataBurstRate: burstRate, + SharedVolumeMatchFullConfig: parseBooleanDefaultFalseConfig("ECS_SHARED_VOLUME_MATCH_FULL_CONFIG"), + ContainerInstanceTags: containerInstanceTags, + ContainerInstancePropagateTagsFrom: parseContainerInstancePropagateTagsFrom(), + PollMetrics: parseBooleanDefaultFalseConfig("ECS_POLL_METRICS"), + PollingMetricsWaitDuration: parseEnvVariableDuration("ECS_POLLING_METRICS_WAIT_DURATION"), + DisableDockerHealthCheck: parseBooleanDefaultFalseConfig("ECS_DISABLE_DOCKER_HEALTH_CHECK"), + GPUSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_GPU_SUPPORT"), false), + InferentiaSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_INF_SUPPORT"), false), + NvidiaRuntime: os.Getenv("ECS_NVIDIA_RUNTIME"), + TaskMetadataAZDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_TASK_METADATA_AZ"), false), + CgroupCPUPeriod: parseCgroupCPUPeriod(), + SpotInstanceDrainingEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_SPOT_INSTANCE_DRAINING"), + GMSACapable: parseGMSACapability(), + VolumePluginCapabilities: parseVolumePluginCapabilities(), + FSxWindowsFileServerCapable: parseFSxWindowsFileServerCapability(), + }, err +} + +func ec2MetadataConfig(ec2client ec2.EC2MetadataClient) Config { + iid, err := ec2client.InstanceIdentityDocument() + if err != nil { + seelog.Criticalf("Unable to communicate with EC2 Metadata service to infer region: %v", err.Error()) + return Config{} + } + return Config{AWSRegion: iid.Region} +} + +// String returns a lossy string representation of the config suitable for human readable display. +// Consequently, it *should not* return any sensitive information. +func (cfg *Config) String() string { + return fmt.Sprintf( + "Cluster: %v, "+ + " Region: %v, "+ + " DataDir: %v,"+ + " Checkpoint: %v, "+ + "AuthType: %v, "+ + "UpdatesEnabled: %v, "+ + "DisableMetrics: %v, "+ + "PollMetrics: %v, "+ + "PollingMetricsWaitDuration: %v, "+ + "ReservedMem: %v, "+ + "TaskCleanupWaitDuration: %v, "+ + "DockerStopTimeout: %v, "+ + "ContainerStartTimeout: %v, "+ + "ContainerCreateTimeout: %v, "+ + "DependentContainersPullUpfront: %v, "+ + "TaskCPUMemLimit: %v, "+ + "%s", + cfg.Cluster, + cfg.AWSRegion, + cfg.DataDir, + cfg.Checkpoint, + cfg.EngineAuthType, + cfg.UpdatesEnabled, + cfg.DisableMetrics, + cfg.PollMetrics, + cfg.PollingMetricsWaitDuration, + cfg.ReservedMemory, + cfg.TaskCleanupWaitDuration, + cfg.DockerStopTimeout, + cfg.ContainerStartTimeout, + cfg.ContainerCreateTimeout, + cfg.DependentContainersPullUpfront, + cfg.TaskCPUMemLimit, + cfg.platformString(), + ) +} diff --git a/agent/config/config_test.go b/agent/config/config_test.go new file mode 100644 index 00000000000..2e75dd39738 --- /dev/null +++ b/agent/config/config_test.go @@ -0,0 +1,855 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "encoding/json" + "errors" + "os" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/ec2" + mock_ec2 "github.com/aws/amazon-ecs-agent/agent/ec2/mocks" + + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestMerge(t *testing.T) { + conf1 := &Config{Cluster: "Foo"} + conf2 := Config{Cluster: "ignored", APIEndpoint: "Bar"} + conf3 := Config{AWSRegion: "us-west-2"} + + conf1.Merge(conf2).Merge(conf3) + + assert.Equal(t, "Foo", conf1.Cluster, "The cluster should not have been overridden") + assert.Equal(t, "Bar", conf1.APIEndpoint, "The APIEndpoint should have been merged in") + assert.Equal(t, "us-west-2", conf1.AWSRegion, "Incorrect region") +} + +// TODO: we need to move all boolean configs to use BooleanDefaultTrue/False +// Below TestBooleanMerge* tests pass for one that's been migrated. +func TestBooleanMergeFalseNotOverridden(t *testing.T) { + conf1 := &Config{Cluster: "Foo"} + conf2 := Config{Cluster: "ignored", DeleteNonECSImagesEnabled: BooleanDefaultFalse{Value: ExplicitlyDisabled}} + conf3 := Config{AWSRegion: "us-west-2", DeleteNonECSImagesEnabled: BooleanDefaultFalse{Value: ExplicitlyEnabled}} + + conf1.Merge(conf2).Merge(conf3) + + assert.Equal(t, "Foo", conf1.Cluster, "The cluster should not have been overridden") + assert.Equal(t, ExplicitlyDisabled, conf1.DeleteNonECSImagesEnabled.Value, "The DeleteNonECSImagesEnabled should not have been overridden") + assert.Equal(t, "us-west-2", conf1.AWSRegion, "Incorrect region") +} + +func TestBooleanMergeNotSetOverridden(t *testing.T) { + conf1 := &Config{Cluster: "Foo"} + conf2 := Config{Cluster: "ignored", DeleteNonECSImagesEnabled: BooleanDefaultFalse{Value: NotSet}} + conf3 := Config{AWSRegion: "us-west-2", DeleteNonECSImagesEnabled: BooleanDefaultFalse{Value: ExplicitlyDisabled}} + + conf1.Merge(conf2).Merge(conf3) + + assert.Equal(t, "Foo", conf1.Cluster, "The cluster should not have been overridden") + assert.Equal(t, ExplicitlyDisabled, conf1.DeleteNonECSImagesEnabled.Value, "The DeleteNonECSImagesEnabled should have been overridden") + assert.Equal(t, "us-west-2", conf1.AWSRegion, "Incorrect region") +} + +func TestBrokenEC2Metadata(t *testing.T) { + ctrl := gomock.NewController(t) + mockEc2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockEc2Metadata.EXPECT().InstanceIdentityDocument().Return(ec2metadata.EC2InstanceIdentityDocument{}, errors.New("err")) + mockEc2Metadata.EXPECT().GetUserData() + + _, err := NewConfig(mockEc2Metadata) + assert.Error(t, err, "Expected error when region isn't set and metadata doesn't work") +} + +func TestBrokenEC2MetadataEndpoint(t *testing.T) { + defer setTestRegion()() + ctrl := gomock.NewController(t) + mockEc2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + + mockEc2Metadata.EXPECT().InstanceIdentityDocument().Return(ec2metadata.EC2InstanceIdentityDocument{}, errors.New("err")) + mockEc2Metadata.EXPECT().GetUserData() + + config, err := NewConfig(mockEc2Metadata) + assert.NoError(t, err) + assert.Equal(t, config.AWSRegion, "us-west-2", "Wrong region") + assert.Zero(t, config.APIEndpoint, "Endpoint env variable not set; endpoint should be blank") +} + +func TestGetRegionWithNoIID(t *testing.T) { + defer setTestEnv("AWS_DEFAULT_REGION", "")() + ctrl := gomock.NewController(t) + mockEc2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + + userDataResponse := `{ "ECSAgentConfiguration":{ + "Cluster":"arn:aws:ecs:us-east-1:123456789012:cluster/my-cluster", + "APIEndpoint":"https://some-endpoint.com", + "NoIID":true + }}` + mockEc2Metadata.EXPECT().GetUserData().Return(userDataResponse, nil) + mockEc2Metadata.EXPECT().Region().Return("us-east-1", nil) + + config, err := NewConfig(mockEc2Metadata) + assert.NoError(t, err) + assert.Equal(t, "us-east-1", config.AWSRegion, "Wrong region") + assert.Equal(t, "https://some-endpoint.com", config.APIEndpoint, "Endpoint env variable not set; endpoint should be blank") +} + +func TestEnvironmentConfig(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CLUSTER", "myCluster")() + defer setTestEnv("ECS_RESERVED_PORTS_UDP", "[42,99]")() + defer setTestEnv("ECS_RESERVED_MEMORY", "20")() + defer setTestEnv("ECS_CONTAINER_STOP_TIMEOUT", "60s")() + defer setTestEnv("ECS_CONTAINER_START_TIMEOUT", "5m")() + defer setTestEnv("ECS_CONTAINER_CREATE_TIMEOUT", "4m")() + defer setTestEnv("ECS_IMAGE_PULL_INACTIVITY_TIMEOUT", "10m")() + defer setTestEnv("ECS_AVAILABLE_LOGGING_DRIVERS", "[\""+string(dockerclient.SyslogDriver)+"\"]")() + defer setTestEnv("ECS_SELINUX_CAPABLE", "true")() + defer setTestEnv("ECS_APPARMOR_CAPABLE", "true")() + defer setTestEnv("ECS_DISABLE_PRIVILEGED", "true")() + defer setTestEnv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "90s")() + defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE", "true")() + defer setTestEnv("ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP", "true")() + defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST", "true")() + defer setTestEnv("ECS_DISABLE_IMAGE_CLEANUP", "true")() + defer setTestEnv("ECS_IMAGE_CLEANUP_INTERVAL", "2h")() + defer setTestEnv("ECS_IMAGE_MINIMUM_CLEANUP_AGE", "30m")() + defer setTestEnv("NON_ECS_IMAGE_MINIMUM_CLEANUP_AGE", "30m")() + defer setTestEnv("ECS_NUM_IMAGES_DELETE_PER_CYCLE", "2")() + defer setTestEnv("ECS_IMAGE_PULL_BEHAVIOR", "always")() + defer setTestEnv("ECS_INSTANCE_ATTRIBUTES", "{\"my_attribute\": \"testing\"}")() + defer setTestEnv("ECS_CONTAINER_INSTANCE_TAGS", `{"my_tag": "testing"}`)() + defer setTestEnv("ECS_ENABLE_TASK_ENI", "true")() + defer setTestEnv("ECS_TASK_METADATA_RPS_LIMIT", "1000,1100")() + defer setTestEnv("ECS_SHARED_VOLUME_MATCH_FULL_CONFIG", "true")() + defer setTestEnv("ECS_ENABLE_GPU_SUPPORT", "true")() + defer setTestEnv("ECS_DISABLE_TASK_METADATA_AZ", "true")() + defer setTestEnv("ECS_NVIDIA_RUNTIME", "nvidia")() + defer setTestEnv("ECS_POLL_METRICS", "true")() + defer setTestEnv("ECS_POLLING_METRICS_WAIT_DURATION", "10s")() + defer setTestEnv("ECS_CGROUP_CPU_PERIOD", "") + defer setTestEnv("ECS_PULL_DEPENDENT_CONTAINERS_UPFRONT", "true")() + additionalLocalRoutesJSON := `["1.2.3.4/22","5.6.7.8/32"]` + setTestEnv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES", additionalLocalRoutesJSON) + setTestEnv("ECS_ENABLE_CONTAINER_METADATA", "true") + setTestEnv("ECS_HOST_DATA_DIR", "/etc/ecs/") + setTestEnv("ECS_CGROUP_CPU_PERIOD", "10ms") + setTestEnv("ECS_VOLUME_PLUGIN_CAPABILITIES", "[\"efsAuth\"]") + + conf, err := environmentConfig() + assert.NoError(t, err) + assert.Equal(t, "myCluster", conf.Cluster) + assert.Equal(t, 2, len(conf.ReservedPortsUDP)) + assert.Contains(t, conf.ReservedPortsUDP, uint16(42)) + assert.Contains(t, conf.ReservedPortsUDP, uint16(99)) + assert.Equal(t, uint16(20), conf.ReservedMemory) + expectedDurationDockerStopTimeout, _ := time.ParseDuration("60s") + assert.Equal(t, expectedDurationDockerStopTimeout, conf.DockerStopTimeout) + expectedDurationContainerStartTimeout, _ := time.ParseDuration("5m") + assert.Equal(t, expectedDurationContainerStartTimeout, conf.ContainerStartTimeout) + expectedDurationContainerCreateTimeout, _ := time.ParseDuration("4m") + assert.Equal(t, expectedDurationContainerCreateTimeout, conf.ContainerCreateTimeout) + assert.Equal(t, []dockerclient.LoggingDriver{dockerclient.SyslogDriver}, conf.AvailableLoggingDrivers) + assert.True(t, conf.PrivilegedDisabled.Enabled()) + assert.True(t, conf.SELinuxCapable.Enabled(), "Wrong value for SELinuxCapable") + assert.True(t, conf.AppArmorCapable.Enabled(), "Wrong value for AppArmorCapable") + assert.True(t, conf.TaskIAMRoleEnabled.Enabled(), "Wrong value for TaskIAMRoleEnabled") + assert.Equal(t, ExplicitlyEnabled, conf.DeleteNonECSImagesEnabled.Value, "Wrong value for DeleteNonECSImagesEnabled") + assert.True(t, conf.TaskIAMRoleEnabledForNetworkHost, "Wrong value for TaskIAMRoleEnabledForNetworkHost") + assert.True(t, conf.ImageCleanupDisabled.Enabled(), "Wrong value for ImageCleanupDisabled") + assert.True(t, conf.PollMetrics.Enabled(), "Wrong value for PollMetrics") + expectedDurationPollingMetricsWaitDuration, _ := time.ParseDuration("10s") + assert.Equal(t, expectedDurationPollingMetricsWaitDuration, conf.PollingMetricsWaitDuration) + assert.True(t, conf.TaskENIEnabled.Enabled(), "Wrong value for TaskNetwork") + assert.Equal(t, (30 * time.Minute), conf.MinimumImageDeletionAge) + assert.Equal(t, (30 * time.Minute), conf.NonECSMinimumImageDeletionAge) + assert.Equal(t, (2 * time.Hour), conf.ImageCleanupInterval) + assert.Equal(t, 2, conf.NumImagesToDeletePerCycle) + assert.Equal(t, ImagePullAlwaysBehavior, conf.ImagePullBehavior) + assert.Equal(t, "testing", conf.InstanceAttributes["my_attribute"]) + assert.Equal(t, "testing", conf.ContainerInstanceTags["my_tag"]) + assert.Equal(t, (90 * time.Second), conf.TaskCleanupWaitDuration) + serializedAdditionalLocalRoutesJSON, err := json.Marshal(conf.AWSVPCAdditionalLocalRoutes) + assert.NoError(t, err, "should marshal additional local routes") + assert.Equal(t, additionalLocalRoutesJSON, string(serializedAdditionalLocalRoutesJSON)) + assert.Equal(t, "/etc/ecs/", conf.DataDirOnHost, "Wrong value for DataDirOnHost") + assert.True(t, conf.ContainerMetadataEnabled.Enabled(), "Wrong value for ContainerMetadataEnabled") + assert.Equal(t, 1000, conf.TaskMetadataSteadyStateRate) + assert.Equal(t, 1100, conf.TaskMetadataBurstRate) + assert.True(t, conf.SharedVolumeMatchFullConfig.Enabled(), "Wrong value for SharedVolumeMatchFullConfig") + assert.True(t, conf.GPUSupportEnabled, "Wrong value for GPUSupportEnabled") + assert.Equal(t, "nvidia", conf.NvidiaRuntime) + assert.True(t, conf.TaskMetadataAZDisabled, "Wrong value for TaskMetadataAZDisabled") + assert.Equal(t, 10*time.Millisecond, conf.CgroupCPUPeriod) + assert.False(t, conf.SpotInstanceDrainingEnabled.Enabled()) + assert.Equal(t, []string{"efsAuth"}, conf.VolumePluginCapabilities) + assert.True(t, conf.DependentContainersPullUpfront.Enabled(), "Wrong value for DependentContainersPullUpfront") +} + +func TestTrimWhitespaceWhenCreating(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CLUSTER", "default \r")() + defer setTestEnv("ECS_ENGINE_AUTH_TYPE", "dockercfg\r")() + + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, "default", cfg.Cluster, "Wrong cluster") + assert.Equal(t, "dockercfg", cfg.EngineAuthType, "Wrong auth type") +} + +func TestTrimWhitespace(t *testing.T) { + cfg := &Config{ + Cluster: " asdf ", + AWSRegion: " us-east-1\r\t", + DataDir: "/trailing/space/directory ", + } + + cfg.trimWhitespace() + assert.Equal(t, &Config{Cluster: "asdf", AWSRegion: "us-east-1", DataDir: "/trailing/space/directory "}, cfg) +} + +func TestConfigBoolean(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_DISABLE_DOCKER_HEALTH_CHECK", "true")() + defer setTestEnv("ECS_DISABLE_METRICS", "true")() + defer setTestEnv("ECS_ENABLE_SPOT_INSTANCE_DRAINING", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.DisableMetrics.Enabled()) + assert.True(t, cfg.DisableDockerHealthCheck.Enabled()) + assert.True(t, cfg.SpotInstanceDrainingEnabled.Enabled()) +} + +func TestBadLoggingDriverSerialization(t *testing.T) { + defer setTestEnv("ECS_AVAILABLE_LOGGING_DRIVERS", "[\"malformed]") + defer setTestRegion()() + conf, err := environmentConfig() + assert.NoError(t, err) + assert.Zero(t, len(conf.AvailableLoggingDrivers), "Wrong value for AvailableLoggingDrivers") +} + +func TestBadAttributesSerialization(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_INSTANCE_ATTRIBUTES", "This is not valid JSON")() + _, err := environmentConfig() + assert.Error(t, err) +} + +func TestBadTagsSerialization(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_INSTANCE_TAGS", "This is not valid JSON")() + _, err := environmentConfig() + assert.Error(t, err) +} + +func TestInvalidLoggingDriver(t *testing.T) { + conf := DefaultConfig() + conf.AWSRegion = "us-west-2" + conf.AvailableLoggingDrivers = []dockerclient.LoggingDriver{"invalid-logging-driver"} + assert.Error(t, conf.validateAndOverrideBounds(), "Should be error with invalid-logging-driver") +} + +func TestAwsFirelensLoggingDriver(t *testing.T) { + conf := DefaultConfig() + conf.AWSRegion = "us-west-2" + conf.AvailableLoggingDrivers = []dockerclient.LoggingDriver{"awsfirelens"} + assert.NoError(t, conf.validateAndOverrideBounds(), "awsfirelens is a valid logging driver, no error was expected") +} + +func TestDefaultPollMetricsWithoutECSDataDir(t *testing.T) { + conf, err := environmentConfig() + assert.NoError(t, err) + assert.False(t, conf.PollMetrics.Enabled()) +} + +func TestDefaultCheckpointWithoutECSDataDir(t *testing.T) { + conf, err := environmentConfig() + assert.NoError(t, err) + assert.False(t, conf.Checkpoint.Enabled()) +} + +func TestDefaultCheckpointWithECSDataDir(t *testing.T) { + defer setTestEnv("ECS_DATADIR", "/some/dir")() + conf, err := environmentConfig() + assert.NoError(t, err) + assert.True(t, conf.Checkpoint.Enabled()) +} + +func TestCheckpointWithoutECSDataDir(t *testing.T) { + defer setTestEnv("ECS_CHECKPOINT", "true")() + conf, err := environmentConfig() + assert.NoError(t, err) + assert.True(t, conf.Checkpoint.Enabled()) +} + +func TestInvalidFormatDockerStopTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_STOP_TIMEOUT", "invalid")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, conf.DockerStopTimeout, defaultDockerStopTimeout, "Wrong value for DockerStopTimeout") +} + +func TestZeroValueDockerStopTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_STOP_TIMEOUT", "0s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, defaultDockerStopTimeout, conf.DockerStopTimeout, "Wrong value for DockerStopTimeout") +} + +func TestInvalidValueDockerStopTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_STOP_TIMEOUT", "-10s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, minimumDockerStopTimeout, conf.DockerStopTimeout, "Wrong value for DockerStopTimeout") +} + +func TestInvalidFormatContainerStartTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_START_TIMEOUT", "invalid")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, defaultContainerStartTimeout, conf.ContainerStartTimeout, "Wrong value for ContainerStartTimeout") +} + +func TestInvalidFormatContainerCreateTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_CREATE_TIMEOUT", "invalid")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, defaultContainerCreateTimeout, conf.ContainerCreateTimeout, "Wrong value for ContainerCreateTimeout") +} + +func TestInvalidFormatDockerInactivityTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_IMAGE_PULL_INACTIVITY_TIMEOUT", "invalid")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, defaultImagePullInactivityTimeout, conf.ImagePullInactivityTimeout, "Wrong value for ImagePullInactivityTimeout") +} + +func TestTooSmallDockerInactivityTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_IMAGE_PULL_INACTIVITY_TIMEOUT", "5s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, minimumImagePullInactivityTimeout, conf.ImagePullInactivityTimeout, "Wrong value for ImagePullInactivityTimeout") +} + +func TestNegativeValueDockerInactivityTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_IMAGE_PULL_INACTIVITY_TIMEOUT", "-10s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, minimumImagePullInactivityTimeout, conf.ImagePullInactivityTimeout, "Wrong value for ImagePullInactivityTimeout") +} + +// Zero is also how the config api handles 'bad' values... so we get a 'default' and not a minimum +func TestZeroValueContainerStartTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_START_TIMEOUT", "0s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, defaultContainerStartTimeout, conf.ContainerStartTimeout, "Wrong value for ContainerStartTimeout") +} + +func TestZeroValueContainerCreateTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_CREATE_TIMEOUT", "0s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, defaultContainerCreateTimeout, conf.ContainerCreateTimeout, "Wrong value for ContainerCreateTimeout") +} + +func TestInvalidValueContainerStartTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_START_TIMEOUT", "-10s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, minimumContainerStartTimeout, conf.ContainerStartTimeout, "Wrong value for ContainerStartTimeout") +} + +func TestInvalidValueContainerCreateTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_CREATE_TIMEOUT", "-10s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, minimumContainerCreateTimeout, conf.ContainerCreateTimeout, "Wrong value for ContainerCreataeTimeout") +} + +func TestZeroValueDockerPullInactivityTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_DOCKER_PULL_INACTIVITY_TIMEOUT", "0s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, defaultImagePullInactivityTimeout, conf.ImagePullInactivityTimeout, "Wrong value for ImagePullInactivityTimeout") +} + +func TestInvalidValueDockerPullInactivityTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_DOCKER_PULL_INACTIVITY_TIMEOUT", "-10s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, defaultImagePullInactivityTimeout, conf.ImagePullInactivityTimeout, "Wrong value for ImagePullInactivityTimeout") +} + +func TestInvalidValueMaxPollingMetricsWaitDuration(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_POLL_METRICS", "true")() + defer setTestEnv("ECS_POLLING_METRICS_WAIT_DURATION", "21s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, maximumPollingMetricsWaitDuration, conf.PollingMetricsWaitDuration, "Wrong value for PollingMetricsWaitDuration") +} + +func TestInvalidValueMinPollingMetricsWaitDuration(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_POLL_METRICS", "true")() + defer setTestEnv("ECS_POLLING_METRICS_WAIT_DURATION", "1s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, minimumPollingMetricsWaitDuration, conf.PollingMetricsWaitDuration, "Wrong value for PollingMetricsWaitDuration") +} + +func TestInvalidValuePollingMetricsWaitDuration(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_POLL_METRICS", "true")() + defer setTestEnv("ECS_POLLING_METRICS_WAIT_DURATION", "0s")() + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, DefaultPollingMetricsWaitDuration, conf.PollingMetricsWaitDuration, "Wrong value for PollingMetricsWaitDuration") +} + +func TestInvalidFormatParseEnvVariableUint16(t *testing.T) { + defer setTestRegion()() + setTestEnv("FOO", "foo") + var16 := parseEnvVariableUint16("FOO") + assert.Zero(t, var16, "Expected 0 from parseEnvVariableUint16 for invalid Uint16 format") +} + +func TestValidFormatParseEnvVariableUint16(t *testing.T) { + defer setTestRegion()() + setTestEnv("FOO", "1") + var16 := parseEnvVariableUint16("FOO") + assert.Equal(t, uint16(1), var16, "Unexpected value parsed in parseEnvVariableUint16.") +} + +func TestInvalidFormatParseEnvVariableDuration(t *testing.T) { + defer setTestRegion()() + setTestEnv("FOO", "foo") + duration := parseEnvVariableDuration("FOO") + assert.Zero(t, duration, "Expected 0 from parseEnvVariableDuration for invalid format") +} + +func TestValidForImagesCleanupExclusion(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_EXCLUDE_UNTRACKED_IMAGE", "amazonlinux:2,amazonlinux:3")() + imagesNotDelete := parseImageCleanupExclusionList("ECS_EXCLUDE_UNTRACKED_IMAGE") + expectedImages := []string{"amazonlinux:2", "amazonlinux:3"} + assert.Equal(t, expectedImages, imagesNotDelete, "unexpected imageCleanupExclusionList") +} + +func TestValidFormatParseEnvVariableDuration(t *testing.T) { + defer setTestRegion()() + setTestEnv("FOO", "1s") + duration := parseEnvVariableDuration("FOO") + assert.Equal(t, 1*time.Second, duration, "Unexpected value parsed in parseEnvVariableDuration.") +} + +func TestInvalidTaskCleanupTimeoutOverridesToThreeHours(t *testing.T) { + defer setTestRegion()() + setTestEnv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "1s") + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + + // If an invalid value is set, the config should pick up the default value for + // cleaning up the task. + assert.Equal(t, 3*time.Hour, cfg.TaskCleanupWaitDuration, "Default task cleanup wait duration set incorrectly") +} + +func TestTaskCleanupTimeout(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "10m")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, 10*time.Minute, cfg.TaskCleanupWaitDuration, "Task cleanup wait duration set incorrectly") +} + +func TestInvalidReservedMemoryOverridesToZero(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_RESERVED_MEMORY", "-1")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + // If an invalid value is set, the config should pick up the default value for + // reserved memory, which is 0. + assert.Zero(t, cfg.ReservedMemory, "Wrong value for ReservedMemory") +} + +func TestReservedMemory(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_RESERVED_MEMORY", "1")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, uint16(1), cfg.ReservedMemory, "Wrong value for ReservedMemory.") +} + +func TestTaskIAMRoleEnabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.TaskIAMRoleEnabled.Enabled(), "Wrong value for TaskIAMRoleEnabled") +} + +func TestDeleteNonECSImagesEnabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, ExplicitlyEnabled, cfg.DeleteNonECSImagesEnabled.Value, "Wrong value for DeleteNonECSImagesEnabled") +} + +func TestTaskIAMRoleForHostNetworkEnabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.TaskIAMRoleEnabledForNetworkHost, "Wrong value for TaskIAMRoleEnabledForNetworkHost") +} + +func TestCredentialsAuditLogFile(t *testing.T) { + defer setTestRegion()() + dummyLocation := "/foo/bar.log" + defer setTestEnv("ECS_AUDIT_LOGFILE", dummyLocation)() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, dummyLocation, cfg.CredentialsAuditLogFile, "Wrong value for CredentialsAuditLogFile") +} + +func TestCredentialsAuditLogDisabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_AUDIT_LOGFILE_DISABLED", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.CredentialsAuditLogDisabled, "Wrong value for CredentialsAuditLogDisabled") +} + +func TestImageCleanupMinimumInterval(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_IMAGE_CLEANUP_INTERVAL", "1m")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, DefaultImageCleanupTimeInterval, cfg.ImageCleanupInterval, "Wrong value for ImageCleanupInterval") +} + +func TestImageCleanupMinimumNumImagesToDeletePerCycle(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_NUM_IMAGES_DELETE_PER_CYCLE", "-1")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, DefaultNumImagesToDeletePerCycle, cfg.NumImagesToDeletePerCycle, "Wrong value for NumImagesToDeletePerCycle") +} + +func TestInvalidImagePullBehavior(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_IMAGE_PULL_BEHAVIOR", "invalid")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, ImagePullDefaultBehavior, cfg.ImagePullBehavior, "Wrong value for ImagePullBehavior") +} + +func TestSharedVolumeMatchFullConfigEnabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_SHARED_VOLUME_MATCH_FULL_CONFIG", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.SharedVolumeMatchFullConfig.Enabled(), "Wrong value for SharedVolumeMatchFullConfig") +} + +func TestParseImagePullBehavior(t *testing.T) { + testcases := []struct { + name string + envVarVal string + expectedImagePullBehavior ImagePullBehaviorType + }{ + { + name: "default agent behavior", + envVarVal: "default", + expectedImagePullBehavior: ImagePullDefaultBehavior, + }, + { + name: "always agent behavior", + envVarVal: "always", + expectedImagePullBehavior: ImagePullAlwaysBehavior, + }, + { + name: "once agent behavior", + envVarVal: "once", + expectedImagePullBehavior: ImagePullOnceBehavior, + }, + { + name: "prefer-cached agent behavior", + envVarVal: "prefer-cached", + expectedImagePullBehavior: ImagePullPreferCachedBehavior, + }, + { + name: "invalid agent behavior", + envVarVal: "invalid", + expectedImagePullBehavior: ImagePullDefaultBehavior, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_IMAGE_PULL_BEHAVIOR", tc.envVarVal)() + assert.Equal(t, parseImagePullBehavior(), tc.expectedImagePullBehavior, "Wrong value for ImagePullBehavior") + }) + } +} + +func TestTaskResourceLimitsOverride(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_TASK_CPU_MEM_LIMIT", "false")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.False(t, cfg.TaskCPUMemLimit.Enabled(), "Task cpu and memory limits should be overridden to false") + assert.Equal(t, ExplicitlyDisabled, cfg.TaskCPUMemLimit.Value, "Task cpu and memory limits should be explicitly set") +} + +func TestAWSVPCBlockInstanceMetadata(t *testing.T) { + defer setTestEnv("ECS_AWSVPC_BLOCK_IMDS", "true")() + defer setTestRegion()() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.AWSVPCBlockInstanceMetdata.Enabled()) +} + +func TestInvalidAWSVPCAdditionalLocalRoutes(t *testing.T) { + os.Setenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES", `["300.300.300.300/64"]`) + defer os.Unsetenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES") + _, err := environmentConfig() + assert.Error(t, err) +} + +func TestAWSLogsExecutionRole(t *testing.T) { + setTestEnv("ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE", "true") + conf, err := environmentConfig() + assert.NoError(t, err) + assert.True(t, conf.OverrideAWSLogsExecutionRole.Enabled()) +} + +func TestTaskMetadataRPSLimits(t *testing.T) { + testCases := []struct { + name string + envVarVal string + expectedSteadyStateRate int + expectedBurstRate int + }{ + { + name: "negative limit values", + envVarVal: "-10,-10", + expectedSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + expectedBurstRate: DefaultTaskMetadataBurstRate, + }, + { + name: "negative limit,valid burst", + envVarVal: "-10,10", + expectedSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + expectedBurstRate: DefaultTaskMetadataBurstRate, + }, + { + name: "missing limit,valid burst", + envVarVal: " ,10", + expectedSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + expectedBurstRate: DefaultTaskMetadataBurstRate, + }, + { + name: "valid limit,missing burst", + envVarVal: "10,", + expectedSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + expectedBurstRate: DefaultTaskMetadataBurstRate, + }, + { + name: "empty variable", + envVarVal: "", + expectedSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + expectedBurstRate: DefaultTaskMetadataBurstRate, + }, + { + name: "missing burst", + envVarVal: "10", + expectedSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + expectedBurstRate: DefaultTaskMetadataBurstRate, + }, + { + name: "more than expected values", + envVarVal: "10,10,10", + expectedSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + expectedBurstRate: DefaultTaskMetadataBurstRate, + }, + { + name: "values with spaces", + envVarVal: " 10 ,5 ", + expectedSteadyStateRate: 10, + expectedBurstRate: 5, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + defer setTestEnv("ECS_TASK_METADATA_RPS_LIMIT", tc.envVarVal)() + defer setTestRegion()() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, tc.expectedSteadyStateRate, cfg.TaskMetadataSteadyStateRate) + assert.Equal(t, tc.expectedBurstRate, cfg.TaskMetadataBurstRate) + }) + } +} + +func TestUserDataConfig(t *testing.T) { + testcases := []struct { + name string + userDataResponse string + userDataResponseError error + expectedConfigCluster string + expectedConfigAPIEndpoint string + shouldFail bool + }{ + { + name: "successful consume userdata config", + userDataResponse: `{ "ECSAgentConfiguration":{ + "Cluster":"arn:aws:ecs:us-east-1:123456789012:cluster/my-cluster", + "APIEndpoint":"https://some-endpoint.com" + } + }`, + userDataResponseError: nil, + expectedConfigCluster: "arn:aws:ecs:us-east-1:123456789012:cluster/my-cluster", + expectedConfigAPIEndpoint: "https://some-endpoint.com", + }, + { + name: "returns errors retrieving ec2 userdata", + userDataResponse: "", + userDataResponseError: errors.New("failed to get userdata"), + expectedConfigCluster: "", + expectedConfigAPIEndpoint: "", + }, + { + name: "returns error, failed to parse json", + userDataResponse: `{{{ "ECSAgentConfiguration":{ + "Cluster":"arn:aws:ecs:us-east-1:123456789012:cluster/my-cluster", + "APIEndpoint":"https://some-endpoint.com" + } + }`, + userDataResponseError: nil, + expectedConfigCluster: "", + expectedConfigAPIEndpoint: "", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + mockEc2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl) + mockEc2Metadata.EXPECT().GetUserData().Return(tc.userDataResponse, tc.userDataResponseError) + cfg := userDataConfig(mockEc2Metadata) + assert.Equal(t, tc.expectedConfigAPIEndpoint, cfg.APIEndpoint) + assert.Equal(t, tc.expectedConfigCluster, cfg.Cluster) + }) + } +} + +func TestContainerInstancePropagateTagsFrom(t *testing.T) { + testcases := []struct { + name string + envVarVal string + expectedContainerInstancePropagateTagsFrom ContainerInstancePropagateTagsFromType + }{ + { + name: "none container instance propagate tags", + envVarVal: "none", + expectedContainerInstancePropagateTagsFrom: ContainerInstancePropagateTagsFromNoneType, + }, + { + name: "ec2_instance container instance propagate tags", + envVarVal: "ec2_instance", + expectedContainerInstancePropagateTagsFrom: ContainerInstancePropagateTagsFromEC2InstanceType, + }, + { + name: "invalid container instance propagate tags", + envVarVal: "none", + expectedContainerInstancePropagateTagsFrom: ContainerInstancePropagateTagsFromNoneType, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM", tc.envVarVal)() + + // Test the parse function only. + assert.Equal(t, parseContainerInstancePropagateTagsFrom(), tc.expectedContainerInstancePropagateTagsFrom, + "Wrong value from parseContainerInstancePropagateTagsFrom") + + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, cfg.ContainerInstancePropagateTagsFrom, tc.expectedContainerInstancePropagateTagsFrom, + "Wrong value for ContainerInstancePropagateTagsFrom") + }) + } +} + +func TestGPUSupportEnabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_GPU_SUPPORT", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.GPUSupportEnabled, "Wrong value for GPUSupportEnabled") +} + +func TestInferentiaSupportEnabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_INF_SUPPORT", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.InferentiaSupportEnabled, "Wrong value for InferentiaSupportEnabled") +} + +func TestTaskMetadataAZDisabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_DISABLE_TASK_METADATA_AZ", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.True(t, cfg.TaskMetadataAZDisabled, "Wrong value for TaskMetadataAZDisabled") +} + +func setTestRegion() func() { + return setTestEnv("AWS_DEFAULT_REGION", "us-west-2") +} + +func setTestEnv(k, v string) func() { + os.Setenv(k, v) + return func() { + os.Unsetenv(k) + } +} diff --git a/agent/config/config_unix.go b/agent/config/config_unix.go new file mode 100644 index 00000000000..5eaac5deeef --- /dev/null +++ b/agent/config/config_unix.go @@ -0,0 +1,119 @@ +// +build !windows +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "fmt" + "os" + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/utils" +) + +const ( + // AgentCredentialsAddress is used to serve the credentials for tasks. + AgentCredentialsAddress = "" // this is left blank right now for net=bridge + // defaultAuditLogFile specifies the default audit log filename + defaultCredentialsAuditLogFile = "/log/audit.log" + // DefaultTaskCgroupPrefix is default cgroup prefix for ECS tasks + DefaultTaskCgroupPrefix = "/ecs" + + // Default cgroup memory system root path, this is the default used if the + // path has not been configured through ECS_CGROUP_PATH + defaultCgroupPath = "/sys/fs/cgroup" + // defaultContainerStartTimeout specifies the value for container start timeout duration + defaultContainerStartTimeout = 3 * time.Minute + // minimumContainerStartTimeout specifies the minimum value for starting a container + minimumContainerStartTimeout = 45 * time.Second + // defaultContainerCreateTimeout specifies the value for container create timeout duration + defaultContainerCreateTimeout = 4 * time.Minute + // minimumContainerCreateTimeout specifies the minimum value for creating a container + minimumContainerCreateTimeout = 1 * time.Minute + // default docker inactivity time is extra time needed on container extraction + defaultImagePullInactivityTimeout = 1 * time.Minute +) + +// DefaultConfig returns the default configuration for Linux +func DefaultConfig() Config { + return Config{ + DockerEndpoint: "unix:///var/run/docker.sock", + ReservedPorts: []uint16{SSHPort, DockerReservedPort, DockerReservedSSLPort, AgentIntrospectionPort, AgentCredentialsPort}, + ReservedPortsUDP: []uint16{}, + DataDir: "/data/", + DataDirOnHost: "/var/lib/ecs", + DisableMetrics: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + ReservedMemory: 0, + AvailableLoggingDrivers: []dockerclient.LoggingDriver{dockerclient.JSONFileDriver, dockerclient.NoneDriver}, + TaskCleanupWaitDuration: DefaultTaskCleanupWaitDuration, + DockerStopTimeout: defaultDockerStopTimeout, + ContainerStartTimeout: defaultContainerStartTimeout, + ContainerCreateTimeout: defaultContainerCreateTimeout, + DependentContainersPullUpfront: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + CredentialsAuditLogFile: defaultCredentialsAuditLogFile, + CredentialsAuditLogDisabled: false, + ImageCleanupDisabled: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + MinimumImageDeletionAge: DefaultImageDeletionAge, + NonECSMinimumImageDeletionAge: DefaultNonECSImageDeletionAge, + ImageCleanupInterval: DefaultImageCleanupTimeInterval, + ImagePullInactivityTimeout: defaultImagePullInactivityTimeout, + ImagePullTimeout: DefaultImagePullTimeout, + NumImagesToDeletePerCycle: DefaultNumImagesToDeletePerCycle, + NumNonECSContainersToDeletePerCycle: DefaultNumNonECSContainersToDeletePerCycle, + CNIPluginsPath: defaultCNIPluginsPath, + PauseContainerTarballPath: pauseContainerTarballPath, + PauseContainerImageName: DefaultPauseContainerImageName, + PauseContainerTag: DefaultPauseContainerTag, + AWSVPCBlockInstanceMetdata: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + ContainerMetadataEnabled: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + TaskCPUMemLimit: BooleanDefaultTrue{Value: NotSet}, + CgroupPath: defaultCgroupPath, + TaskMetadataSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + TaskMetadataBurstRate: DefaultTaskMetadataBurstRate, + SharedVolumeMatchFullConfig: BooleanDefaultFalse{Value: ExplicitlyDisabled}, // only requiring shared volumes to match on name, which is default docker behavior + ContainerInstancePropagateTagsFrom: ContainerInstancePropagateTagsFromNoneType, + PrometheusMetricsEnabled: false, + PollMetrics: BooleanDefaultFalse{Value: NotSet}, + PollingMetricsWaitDuration: DefaultPollingMetricsWaitDuration, + NvidiaRuntime: DefaultNvidiaRuntime, + CgroupCPUPeriod: defaultCgroupCPUPeriod, + GMSACapable: false, + FSxWindowsFileServerCapable: false, + } +} + +func (cfg *Config) platformOverrides() { + cfg.PrometheusMetricsEnabled = utils.ParseBool(os.Getenv("ECS_ENABLE_PROMETHEUS_METRICS"), false) + if cfg.PrometheusMetricsEnabled { + cfg.ReservedPorts = append(cfg.ReservedPorts, AgentPrometheusExpositionPort) + } + + if cfg.TaskENIEnabled.Enabled() { // when task networking is enabled, eni trunking is enabled by default + cfg.ENITrunkingEnabled = parseBooleanDefaultTrueConfig("ECS_ENABLE_HIGH_DENSITY_ENI") + } +} + +// platformString returns platform-specific config data that can be serialized +// to string for debugging +func (cfg *Config) platformString() string { + // Returns a string if the default image name/tag of the Pause container has + // been overridden + if cfg.PauseContainerImageName == DefaultPauseContainerImageName && + cfg.PauseContainerTag == DefaultPauseContainerTag { + return fmt.Sprintf(", PauseContainerImageName: %s, PauseContainerTag: %s", + cfg.PauseContainerImageName, cfg.PauseContainerTag) + } + return "" +} diff --git a/agent/config/config_unix_test.go b/agent/config/config_unix_test.go new file mode 100644 index 00000000000..35252623465 --- /dev/null +++ b/agent/config/config_unix_test.go @@ -0,0 +1,282 @@ +// +build !windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/ec2" + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConfigDefault(t *testing.T) { + defer setTestRegion()() + os.Unsetenv("ECS_HOST_DATA_DIR") + + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + require.NoError(t, err) + + assert.Equal(t, "unix:///var/run/docker.sock", cfg.DockerEndpoint, "Default docker endpoint set incorrectly") + assert.Equal(t, "/data/", cfg.DataDir, "Default datadir set incorrectly") + assert.False(t, cfg.DisableMetrics.Enabled(), "Default disablemetrics set incorrectly") + assert.Equal(t, 5, len(cfg.ReservedPorts), "Default reserved ports set incorrectly") + assert.Equal(t, uint16(0), cfg.ReservedMemory, "Default reserved memory set incorrectly") + assert.Equal(t, 30*time.Second, cfg.DockerStopTimeout, "Default docker stop container timeout set incorrectly") + assert.Equal(t, 3*time.Minute, cfg.ContainerStartTimeout, "Default docker start container timeout set incorrectly") + assert.Equal(t, 4*time.Minute, cfg.ContainerCreateTimeout, "Default docker create container timeout set incorrectly") + assert.False(t, cfg.PrivilegedDisabled.Enabled(), "Default PrivilegedDisabled set incorrectly") + assert.Equal(t, []dockerclient.LoggingDriver{dockerclient.JSONFileDriver, dockerclient.NoneDriver}, + cfg.AvailableLoggingDrivers, "Default logging drivers set incorrectly") + assert.Equal(t, 3*time.Hour, cfg.TaskCleanupWaitDuration, "Default task cleanup wait duration set incorrectly") + assert.False(t, cfg.TaskENIEnabled.Enabled(), "TaskENIEnabled set incorrectly") + assert.False(t, cfg.TaskIAMRoleEnabled.Enabled(), "TaskIAMRoleEnabled set incorrectly") + assert.False(t, cfg.TaskIAMRoleEnabledForNetworkHost, "TaskIAMRoleEnabledForNetworkHost set incorrectly") + assert.Equal(t, NotSet, cfg.TaskCPUMemLimit.Value, "TaskCPUMemLimit should be NotSet") + assert.False(t, cfg.CredentialsAuditLogDisabled, "CredentialsAuditLogDisabled set incorrectly") + assert.Equal(t, defaultCredentialsAuditLogFile, cfg.CredentialsAuditLogFile, "CredentialsAuditLogFile is set incorrectly") + assert.False(t, cfg.ImageCleanupDisabled.Enabled(), "ImageCleanupDisabled default is set incorrectly") + assert.Equal(t, DefaultImageDeletionAge, cfg.MinimumImageDeletionAge, "MinimumImageDeletionAge default is set incorrectly") + assert.Equal(t, DefaultNonECSImageDeletionAge, cfg.NonECSMinimumImageDeletionAge, "NonECSMinimumImageDeletionAge default is set incorrectly") + assert.Equal(t, DefaultImageCleanupTimeInterval, cfg.ImageCleanupInterval, "ImageCleanupInterval default is set incorrectly") + assert.Equal(t, DefaultNumImagesToDeletePerCycle, cfg.NumImagesToDeletePerCycle, "NumImagesToDeletePerCycle default is set incorrectly") + assert.Equal(t, defaultCNIPluginsPath, cfg.CNIPluginsPath, "CNIPluginsPath default is set incorrectly") + assert.False(t, cfg.AWSVPCBlockInstanceMetdata.Enabled(), "AWSVPCBlockInstanceMetdata default is incorrectly set") + assert.Equal(t, "/var/lib/ecs", cfg.DataDirOnHost, "Default DataDirOnHost set incorrectly") + assert.Equal(t, DefaultTaskMetadataSteadyStateRate, cfg.TaskMetadataSteadyStateRate, + "Default TaskMetadataSteadyStateRate is set incorrectly") + assert.Equal(t, DefaultTaskMetadataBurstRate, cfg.TaskMetadataBurstRate, + "Default TaskMetadataBurstRate is set incorrectly") + assert.False(t, cfg.SharedVolumeMatchFullConfig.Enabled(), "Default SharedVolumeMatchFullConfig set incorrectly") + assert.Equal(t, defaultCgroupCPUPeriod, cfg.CgroupCPUPeriod, "CFS cpu period set incorrectly") + assert.Equal(t, DefaultImagePullTimeout, cfg.ImagePullTimeout, "Default ImagePullTimeout set incorrectly") + assert.False(t, cfg.DependentContainersPullUpfront.Enabled(), "Default DependentContainersPullUpfront set incorrectly") + assert.False(t, cfg.PollMetrics.Enabled(), "ECS_POLL_METRICS default should be false") +} + +// TestConfigFromFile tests the configuration can be read from file +func TestConfigFromFile(t *testing.T) { + cluster := "TestCluster" + dockerAuthType := "dockercfg" + dockerAuth := `{ + "https://index.docker.io/v1/":{ + "auth":"admin", + "email":"email" + } +}` + testPauseImageName := "pause-image-name" + testPauseTag := "pause-image-tag" + content := fmt.Sprintf(`{ + "AWSRegion": "not-real-1", + "Cluster": "%s", + "EngineAuthType": "%s", + "EngineAuthData": %s, + "DataDir": "/var/run/ecs_agent", + "TaskIAMRoleEnabled": true, + "TaskCPUMemLimit": true, + "InstanceAttributes": { + "attribute1": "value1" + }, + "ContainerInstanceTags": { + "tag1": "value1" + }, + "PauseContainerImageName":"%s", + "PauseContainerTag":"%s", + "AWSVPCAdditionalLocalRoutes":["169.254.172.1/32"] +}`, cluster, dockerAuthType, dockerAuth, testPauseImageName, testPauseTag) + + filePath := setupFileConfiguration(t, content) + defer os.Remove(filePath) + + defer setTestEnv("ECS_AGENT_CONFIG_FILE_PATH", filePath)() + defer setTestEnv("AWS_DEFAULT_REGION", "us-west-2")() + + cfg, err := fileConfig() + assert.NoError(t, err, "reading configuration from file failed") + + assert.Equal(t, cluster, cfg.Cluster, "cluster name not as expected from file") + assert.Equal(t, dockerAuthType, cfg.EngineAuthType, "docker auth type not as expected from file") + assert.Equal(t, dockerAuth, string(cfg.EngineAuthData.Contents()), "docker auth data not as expected from file") + assert.Equal(t, map[string]string{"attribute1": "value1"}, cfg.InstanceAttributes) + assert.Equal(t, map[string]string{"tag1": "value1"}, cfg.ContainerInstanceTags) + assert.Equal(t, testPauseImageName, cfg.PauseContainerImageName, "should read PauseContainerImageName") + assert.Equal(t, testPauseTag, cfg.PauseContainerTag, "should read PauseContainerTag") + assert.Equal(t, 1, len(cfg.AWSVPCAdditionalLocalRoutes), "should have one additional local route") + expectedLocalRoute, err := cnitypes.ParseCIDR("169.254.172.1/32") + assert.NoError(t, err) + assert.Equal(t, expectedLocalRoute.IP, cfg.AWSVPCAdditionalLocalRoutes[0].IP, "should match expected route IP") + assert.Equal(t, expectedLocalRoute.Mask, cfg.AWSVPCAdditionalLocalRoutes[0].Mask, "should match expected route Mask") + assert.Equal(t, ExplicitlyEnabled, cfg.TaskCPUMemLimit.Value, "TaskCPUMemLimit should be explicitly enabled") +} + +// TestDockerAuthMergeFromFile tests docker auth read from file correctly after merge +func TestDockerAuthMergeFromFile(t *testing.T) { + cluster := "myCluster" + dockerAuthType := "dockercfg" + dockerAuth := `{ + "https://index.docker.io/v1/":{ + "auth":"admin", + "email":"email" + } +}` + content := fmt.Sprintf(`{ + "AWSRegion": "not-real-1", + "Cluster": "TestCluster", + "EngineAuthType": "%s", + "EngineAuthData": %s, + "DataDir": "/var/run/ecs_agent", + "TaskIAMRoleEnabled": true, + "InstanceAttributes": { + "attribute1": "value1" + }, + "ContainerInstanceTags": { + "tag1": "value1" + } +}`, dockerAuthType, dockerAuth) + + filePath := setupFileConfiguration(t, content) + defer os.Remove(filePath) + + defer setTestEnv("ECS_CLUSTER", cluster)() + defer setTestEnv("ECS_AGENT_CONFIG_FILE_PATH", filePath)() + defer setTestEnv("AWS_DEFAULT_REGION", "us-west-2")() + + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err, "create configuration failed") + + assert.Equal(t, cluster, cfg.Cluster, "cluster name not as expected from environment variable") + assert.Equal(t, dockerAuthType, cfg.EngineAuthType, "docker auth type not as expected from file") + assert.Equal(t, dockerAuth, string(cfg.EngineAuthData.Contents()), "docker auth data not as expected from file") + assert.Equal(t, map[string]string{"attribute1": "value1"}, cfg.InstanceAttributes) + assert.Equal(t, map[string]string{"tag1": "value1"}, cfg.ContainerInstanceTags) +} + +func TestBadFileContent(t *testing.T) { + content := `{ + "AWSRegion": "not-real-1", + "AWSVPCAdditionalLocalRoutes":["169.254.172.1/32", "300.300.300.300/32", "foo"] + }` + + filePath := setupFileConfiguration(t, content) + defer os.Remove(filePath) + + os.Setenv("ECS_AGENT_CONFIG_FILE_PATH", filePath) + defer os.Unsetenv("ECS_AGENT_CONFIG_FILE_PATH") + + _, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.Error(t, err, "create configuration should fail") +} + +func TestPrometheusMetricsPlatformOverrides(t *testing.T) { + defer setTestRegion()() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + require.NoError(t, err) + + defer setTestEnv("ECS_ENABLE_PROMETHEUS_METRICS", "true")() + cfg.platformOverrides() + assert.True(t, cfg.PrometheusMetricsEnabled, "Prometheus metrics should be enabled") + assert.Equal(t, 6, len(cfg.ReservedPorts), "Reserved ports should have added Prometheus endpoint") +} + +// TestENITrunkingEnabled tests that when task networking is enabled, eni trunking is enabled by default +func TestENITrunkingEnabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_TASK_ENI", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + require.NoError(t, err) + + cfg.platformOverrides() + assert.True(t, cfg.ENITrunkingEnabled.Enabled(), "ENI trunking should be enabled") +} + +// TestENITrunkingDisabled tests that when task networking is enabled, eni trunking can be disabled +func TestENITrunkingDisabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_TASK_ENI", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + require.NoError(t, err) + + defer setTestEnv("ECS_ENABLE_HIGH_DENSITY_ENI", "false")() + cfg.platformOverrides() + assert.False(t, cfg.ENITrunkingEnabled.Enabled(), "ENI trunking should be disabled") +} + +// setupFileConfiguration create a temp file store the configuration +func setupFileConfiguration(t *testing.T, configContent string) string { + file, err := ioutil.TempFile("", "ecs-test") + require.NoError(t, err, "creating temp file for configuration failed") + + _, err = file.Write([]byte(configContent)) + require.NoError(t, err, "writing configuration to file failed") + + return file.Name() +} + +func TestEmptyNvidiaRuntime(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_NVIDIA_RUNTIME", "")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, DefaultNvidiaRuntime, cfg.NvidiaRuntime, "Wrong value for NvidiaRuntime") +} + +func TestCPUPeriodSettings(t *testing.T) { + cases := []struct { + Name string + Env string + Response time.Duration + }{ + { + Name: "OverrideDefaultCPUPeriod", + Env: "10ms", + Response: 10 * time.Millisecond, + }, + { + Name: "DefaultCPUPeriod", + Env: "", + Response: defaultCgroupCPUPeriod, + }, + { + Name: "TestCPUPeriodUpperBoundLimit", + Env: "110ms", + Response: defaultCgroupCPUPeriod, + }, + { + Name: "TestCPUPeriodLowerBoundLimit", + Env: "7ms", + Response: defaultCgroupCPUPeriod, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + defer setTestRegion()() + defer os.Setenv("ECS_CGROUP_CPU_PERIOD", "100ms") + + os.Setenv("ECS_CGROUP_CPU_PERIOD", c.Env) + conf, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + + assert.NoError(t, err) + assert.Equal(t, c.Response, conf.CgroupCPUPeriod, "Wrong value for CgroupCPUPeriod") + }) + } +} diff --git a/agent/config/config_windows.go b/agent/config/config_windows.go new file mode 100644 index 00000000000..3c09c265fc6 --- /dev/null +++ b/agent/config/config_windows.go @@ -0,0 +1,146 @@ +// +build windows +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "os" + "path/filepath" + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/utils" +) + +const ( + // AgentCredentialsAddress is used to serve the credentials for tasks. + AgentCredentialsAddress = "127.0.0.1" + + // defaultAuditLogFile specifies the default audit log filename + defaultCredentialsAuditLogFile = `log\audit.log` + // When using IAM roles for tasks on Windows, the credential proxy consumes port 80 + httpPort = 80 + // Remote Desktop / Terminal Services + rdpPort = 3389 + // RPC client + rpcPort = 135 + // Server Message Block (SMB) over TCP + smbPort = 445 + // HTTP port for Windows Remote Management (WinRM) listener + winRMPortHTTP = 5985 + // HTTPS port for Windows Remote Management (WinRM) listener + winRMPortHTTPS = 5986 + // DNS client + dnsPort = 53 + // NetBIOS over TCP/IP + netBIOSPort = 139 + // defaultContainerStartTimeout specifies the value for container start timeout duration + defaultContainerStartTimeout = 8 * time.Minute + // minimumContainerStartTimeout specifies the minimum value for starting a container + minimumContainerStartTimeout = 2 * time.Minute + // defaultContainerCreateTimeout specifies the value for container create timeout duration + defaultContainerCreateTimeout = 4 * time.Minute + // minimumContainerCreateTimeout specifies the minimum value for creating a container + minimumContainerCreateTimeout = 1 * time.Minute + // default image pull inactivity time is extra time needed on container extraction + defaultImagePullInactivityTimeout = 3 * time.Minute +) + +// DefaultConfig returns the default configuration for Windows +func DefaultConfig() Config { + programData := utils.DefaultIfBlank(os.Getenv("ProgramData"), `C:\ProgramData`) + ecsRoot := filepath.Join(programData, "Amazon", "ECS") + dataDir := filepath.Join(ecsRoot, "data") + platformVariables := PlatformVariables{ + CPUUnbounded: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + MemoryUnbounded: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + } + return Config{ + DockerEndpoint: "npipe:////./pipe/docker_engine", + ReservedPorts: []uint16{ + DockerReservedPort, + DockerReservedSSLPort, + AgentIntrospectionPort, + AgentCredentialsPort, + rdpPort, + rpcPort, + smbPort, + winRMPortHTTP, + winRMPortHTTPS, + dnsPort, + netBIOSPort, + }, + ReservedPortsUDP: []uint16{}, + DataDir: dataDir, + // DataDirOnHost is identical to DataDir for Windows because we do not + // run as a container + DataDirOnHost: dataDir, + ReservedMemory: 0, + AvailableLoggingDrivers: []dockerclient.LoggingDriver{dockerclient.JSONFileDriver, dockerclient.NoneDriver, dockerclient.AWSLogsDriver}, + TaskCleanupWaitDuration: DefaultTaskCleanupWaitDuration, + DockerStopTimeout: defaultDockerStopTimeout, + ContainerStartTimeout: defaultContainerStartTimeout, + ContainerCreateTimeout: defaultContainerCreateTimeout, + DependentContainersPullUpfront: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + ImagePullInactivityTimeout: defaultImagePullInactivityTimeout, + ImagePullTimeout: DefaultImagePullTimeout, + CredentialsAuditLogFile: filepath.Join(ecsRoot, defaultCredentialsAuditLogFile), + CredentialsAuditLogDisabled: false, + ImageCleanupDisabled: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + MinimumImageDeletionAge: DefaultImageDeletionAge, + NonECSMinimumImageDeletionAge: DefaultNonECSImageDeletionAge, + ImageCleanupInterval: DefaultImageCleanupTimeInterval, + NumImagesToDeletePerCycle: DefaultNumImagesToDeletePerCycle, + NumNonECSContainersToDeletePerCycle: DefaultNumNonECSContainersToDeletePerCycle, + ContainerMetadataEnabled: BooleanDefaultFalse{Value: ExplicitlyDisabled}, + TaskCPUMemLimit: BooleanDefaultTrue{Value: ExplicitlyDisabled}, + PlatformVariables: platformVariables, + TaskMetadataSteadyStateRate: DefaultTaskMetadataSteadyStateRate, + TaskMetadataBurstRate: DefaultTaskMetadataBurstRate, + SharedVolumeMatchFullConfig: BooleanDefaultFalse{Value: ExplicitlyDisabled}, //only requiring shared volumes to match on name, which is default docker behavior + PollMetrics: BooleanDefaultFalse{Value: NotSet}, + PollingMetricsWaitDuration: DefaultPollingMetricsWaitDuration, + GMSACapable: true, + FSxWindowsFileServerCapable: true, + } +} + +func (cfg *Config) platformOverrides() { + // Enabling task IAM roles for Windows requires the credential proxy to run on port 80, + // so we reserve this port by default when that happens. + if cfg.TaskIAMRoleEnabled.Enabled() { + if cfg.ReservedPorts == nil { + cfg.ReservedPorts = []uint16{} + } + cfg.ReservedPorts = append(cfg.ReservedPorts, httpPort) + } + + // ensure TaskResourceLimit is disabled + cfg.TaskCPUMemLimit.Value = ExplicitlyDisabled + + cpuUnbounded := parseBooleanDefaultFalseConfig("ECS_ENABLE_CPU_UNBOUNDED_WINDOWS_WORKAROUND") + memoryUnbounded := parseBooleanDefaultFalseConfig("ECS_ENABLE_MEMORY_UNBOUNDED_WINDOWS_WORKAROUND") + + platformVariables := PlatformVariables{ + CPUUnbounded: cpuUnbounded, + MemoryUnbounded: memoryUnbounded, + } + cfg.PlatformVariables = platformVariables +} + +// platformString returns platform-specific config data that can be serialized +// to string for debugging +func (cfg *Config) platformString() string { + return "" +} diff --git a/agent/config/config_windows_test.go b/agent/config/config_windows_test.go new file mode 100644 index 00000000000..7ef1002a1aa --- /dev/null +++ b/agent/config/config_windows_test.go @@ -0,0 +1,136 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "os" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConfigDefault(t *testing.T) { + defer setTestRegion()() + os.Unsetenv("ECS_HOST_DATA_DIR") + + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + require.NoError(t, err) + + assert.Equal(t, "npipe:////./pipe/docker_engine", cfg.DockerEndpoint, "Default docker endpoint set incorrectly") + assert.Equal(t, `C:\ProgramData\Amazon\ECS\data`, cfg.DataDir, "Default datadir set incorrectly") + assert.False(t, cfg.DisableMetrics.Enabled(), "Default disablemetrics set incorrectly") + assert.Equal(t, 11, len(cfg.ReservedPorts), "Default reserved ports set incorrectly") + assert.Equal(t, uint16(0), cfg.ReservedMemory, "Default reserved memory set incorrectly") + assert.Equal(t, 30*time.Second, cfg.DockerStopTimeout, "Default docker stop container timeout set incorrectly") + assert.Equal(t, 8*time.Minute, cfg.ContainerStartTimeout, "Default docker start container timeout set incorrectly") + assert.Equal(t, 4*time.Minute, cfg.ContainerCreateTimeout, "Default docker create container timeout set incorrectly") + assert.False(t, cfg.PrivilegedDisabled.Enabled(), "Default PrivilegedDisabled set incorrectly") + assert.Equal(t, []dockerclient.LoggingDriver{dockerclient.JSONFileDriver, dockerclient.NoneDriver, dockerclient.AWSLogsDriver}, + cfg.AvailableLoggingDrivers, "Default logging drivers set incorrectly") + assert.Equal(t, 3*time.Hour, cfg.TaskCleanupWaitDuration, "Default task cleanup wait duration set incorrectly") + assert.False(t, cfg.TaskIAMRoleEnabled.Enabled(), "TaskIAMRoleEnabled set incorrectly") + assert.False(t, cfg.TaskIAMRoleEnabledForNetworkHost, "TaskIAMRoleEnabledForNetworkHost set incorrectly") + assert.False(t, cfg.CredentialsAuditLogDisabled, "CredentialsAuditLogDisabled set incorrectly") + assert.Equal(t, `C:\ProgramData\Amazon\ECS\log\audit.log`, cfg.CredentialsAuditLogFile, "CredentialsAuditLogFile is set incorrectly") + assert.False(t, cfg.ImageCleanupDisabled.Enabled(), "ImageCleanupDisabled default is set incorrectly") + assert.Equal(t, DefaultImageDeletionAge, cfg.MinimumImageDeletionAge, "MinimumImageDeletionAge default is set incorrectly") + assert.Equal(t, DefaultNonECSImageDeletionAge, cfg.NonECSMinimumImageDeletionAge, "NonECSMinimumImageDeletionAge default is set incorrectly") + assert.Equal(t, DefaultImageCleanupTimeInterval, cfg.ImageCleanupInterval, "ImageCleanupInterval default is set incorrectly") + assert.Equal(t, DefaultNumImagesToDeletePerCycle, cfg.NumImagesToDeletePerCycle, "NumImagesToDeletePerCycle default is set incorrectly") + assert.Equal(t, `C:\ProgramData\Amazon\ECS\data`, cfg.DataDirOnHost, "Default DataDirOnHost set incorrectly") + assert.False(t, cfg.PlatformVariables.CPUUnbounded.Enabled(), "CPUUnbounded should be false by default") + assert.Equal(t, DefaultTaskMetadataSteadyStateRate, cfg.TaskMetadataSteadyStateRate, + "Default TaskMetadataSteadyStateRate is set incorrectly") + assert.Equal(t, DefaultTaskMetadataBurstRate, cfg.TaskMetadataBurstRate, + "Default TaskMetadataBurstRate is set incorrectly") + assert.False(t, cfg.SharedVolumeMatchFullConfig.Enabled(), "Default SharedVolumeMatchFullConfig set incorrectly") + assert.Equal(t, DefaultImagePullTimeout, cfg.ImagePullTimeout, "Default ImagePullTimeout set incorrectly") + assert.False(t, cfg.DependentContainersPullUpfront.Enabled(), "Default DependentContainersPullUpfront set incorrectly") +} + +func TestConfigIAMTaskRolesReserves80(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_TASK_IAM_ROLE", "true")() + + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, []uint16{ + DockerReservedPort, + DockerReservedSSLPort, + AgentIntrospectionPort, + AgentCredentialsPort, + rdpPort, + rpcPort, + smbPort, + winRMPortHTTP, + winRMPortHTTPS, + dnsPort, + netBIOSPort, + httpPort, + }, cfg.ReservedPorts) + + defer setTestEnv("ECS_RESERVED_PORTS", "[1]")() + cfg, err = NewConfig(ec2.NewBlackholeEC2MetadataClient()) + assert.NoError(t, err) + assert.Equal(t, []uint16{1, httpPort}, cfg.ReservedPorts) +} + +func TestTaskResourceLimitPlatformOverrideDisabled(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_TASK_CPU_MEM_LIMIT", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + cfg.platformOverrides() + assert.NoError(t, err) + assert.False(t, cfg.TaskCPUMemLimit.Enabled()) +} + +func TestCPUUnboundedSet(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_CPU_UNBOUNDED_WINDOWS_WORKAROUND", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + cfg.platformOverrides() + assert.NoError(t, err) + assert.True(t, cfg.PlatformVariables.CPUUnbounded.Enabled()) +} + +func TestCPUUnboundedWindowsDisabled(t *testing.T) { + defer setTestRegion()() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + cfg.platformOverrides() + assert.NoError(t, err) + assert.False(t, cfg.PlatformVariables.CPUUnbounded.Enabled()) +} + +func TestMemoryUnboundedSet(t *testing.T) { + defer setTestRegion()() + defer setTestEnv("ECS_ENABLE_MEMORY_UNBOUNDED_WINDOWS_WORKAROUND", "true")() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + cfg.platformOverrides() + assert.NoError(t, err) + assert.True(t, cfg.PlatformVariables.MemoryUnbounded.Enabled()) +} + +func TestMemoryUnboundedWindowsDisabled(t *testing.T) { + defer setTestRegion()() + cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient()) + cfg.platformOverrides() + assert.NoError(t, err) + assert.False(t, cfg.PlatformVariables.MemoryUnbounded.Enabled()) +} diff --git a/agent/config/const_linux.go b/agent/config/const_linux.go new file mode 100644 index 00000000000..10ef8b6ef04 --- /dev/null +++ b/agent/config/const_linux.go @@ -0,0 +1,19 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +// OSType is the type of operating system where agent is running +const OSType = "linux" diff --git a/agent/config/const_unknown.go b/agent/config/const_unknown.go new file mode 100644 index 00000000000..ffc6051e2cb --- /dev/null +++ b/agent/config/const_unknown.go @@ -0,0 +1,18 @@ +// +build !windows,!linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +const OSType = "unknown" diff --git a/agent/config/const_windows.go b/agent/config/const_windows.go new file mode 100644 index 00000000000..912d648100d --- /dev/null +++ b/agent/config/const_windows.go @@ -0,0 +1,19 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +// OSType is the type of operating system where agent is running +const OSType = "windows" diff --git a/agent/config/doc.go b/agent/config/doc.go new file mode 100644 index 00000000000..d67369a5d5c --- /dev/null +++ b/agent/config/doc.go @@ -0,0 +1,35 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +/* +Package config handles loading configuration data, warning on missing data, +and setting sane defaults. + +Configuration Sources + +Configuration data is loaded from two sources currently: the environment and +a json config file. + +Environment Variables: + +The environment variables from which configuration values are loaded are +documented in the README file which can be found at +https://github.com/aws/amazon-ecs-agent#environment-variables. + +Config file: + +The config file will be loaded from the path stored in the environment key +ECS_AGENT_CONFIG_FILE_PATH. It must be a JSON file of the format described +by the "Config" struct below. +*/ +package config diff --git a/agent/config/interface.go b/agent/config/interface.go new file mode 100644 index 00000000000..cd58b20aa11 --- /dev/null +++ b/agent/config/interface.go @@ -0,0 +1,18 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +type ConfigReader interface { + ReadConfig() *Config +} diff --git a/agent/config/parse.go b/agent/config/parse.go new file mode 100644 index 00000000000..6ee6dde0ea8 --- /dev/null +++ b/agent/config/parse.go @@ -0,0 +1,373 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/cihub/seelog" + cnitypes "github.com/containernetworking/cni/pkg/types" +) + +func parseCheckpoint(dataDir string) BooleanDefaultFalse { + checkPoint := parseBooleanDefaultFalseConfig("ECS_CHECKPOINT") + if dataDir != "" { + // if we have a directory to checkpoint to, default it to be on + if checkPoint.Value == NotSet { + checkPoint.Value = ExplicitlyEnabled + } + } else { + // if the directory is not set, default to checkpointing off for + // backwards compatibility + if checkPoint.Value == NotSet { + checkPoint.Value = ExplicitlyDisabled + } + } + return checkPoint +} + +func parseReservedPorts(env string) []uint16 { + // Format: json array, e.g. [1,2,3] + reservedPortEnv := os.Getenv(env) + portDecoder := json.NewDecoder(strings.NewReader(reservedPortEnv)) + var reservedPorts []uint16 + err := portDecoder.Decode(&reservedPorts) + // EOF means the string was blank as opposed to UnexepctedEof which means an + // invalid parse + // Blank is not a warning; we have sane defaults + if err != io.EOF && err != nil { + err := fmt.Errorf("Invalid format for \"%s\" environment variable; expected a JSON array like [1,2,3]. err %v", env, err) + seelog.Warn(err) + } + + return reservedPorts +} + +func parseDockerStopTimeout() time.Duration { + var dockerStopTimeout time.Duration + parsedStopTimeout := parseEnvVariableDuration("ECS_CONTAINER_STOP_TIMEOUT") + if parsedStopTimeout >= minimumDockerStopTimeout { + dockerStopTimeout = parsedStopTimeout + // if the ECS_CONTAINER_STOP_TIMEOUT is invalid or empty, then the parsedStopTimeout + // will be 0, in this case we should return a 0, + // because the DockerStopTimeout will merge with the DefaultDockerStopTimeout, + // only when the DockerStopTimeout is empty + } else if parsedStopTimeout != 0 { + // if the configured ECS_CONTAINER_STOP_TIMEOUT is smaller than minimumDockerStopTimeout, + // DockerStopTimeout will be set to minimumDockerStopTimeout + // if the ECS_CONTAINER_STOP_TIMEOUT is 0, empty or an invalid value, then DockerStopTimeout + // will be set to defaultDockerStopTimeout during the config merge operation + dockerStopTimeout = minimumDockerStopTimeout + seelog.Warnf("Discarded invalid value for docker stop timeout, parsed as: %v", parsedStopTimeout) + } + return dockerStopTimeout +} + +func parseContainerStartTimeout() time.Duration { + var containerStartTimeout time.Duration + parsedStartTimeout := parseEnvVariableDuration("ECS_CONTAINER_START_TIMEOUT") + if parsedStartTimeout >= minimumContainerStartTimeout { + containerStartTimeout = parsedStartTimeout + // do the parsedStartTimeout != 0 check for the same reason as in getDockerStopTimeout() + } else if parsedStartTimeout != 0 { + containerStartTimeout = minimumContainerStartTimeout + seelog.Warnf("Discarded invalid value for container start timeout, parsed as: %v", parsedStartTimeout) + } + return containerStartTimeout +} + +func parseContainerCreateTimeout() time.Duration { + var containerCreateTimeout time.Duration + parsedCreateTimeout := parseEnvVariableDuration("ECS_CONTAINER_CREATE_TIMEOUT") + if parsedCreateTimeout >= minimumContainerCreateTimeout { + containerCreateTimeout = parsedCreateTimeout + // do the parsedCreateTimeout != 0 check for the same reason as in getDockerStopTimeout() + } else if parsedCreateTimeout != 0 { + containerCreateTimeout = minimumContainerCreateTimeout + seelog.Warnf("Discarded invalid value for container create timeout, parsed as: %v", parsedCreateTimeout) + } + return containerCreateTimeout +} + +func parseImagePullInactivityTimeout() time.Duration { + var imagePullInactivityTimeout time.Duration + parsedImagePullInactivityTimeout := parseEnvVariableDuration("ECS_IMAGE_PULL_INACTIVITY_TIMEOUT") + if parsedImagePullInactivityTimeout >= minimumImagePullInactivityTimeout { + imagePullInactivityTimeout = parsedImagePullInactivityTimeout + // do the parsedStartTimeout != 0 check for the same reason as in getDockerStopTimeout() + } else if parsedImagePullInactivityTimeout != 0 { + imagePullInactivityTimeout = minimumImagePullInactivityTimeout + seelog.Warnf("Discarded invalid value for image pull inactivity timeout, parsed as: %v", parsedImagePullInactivityTimeout) + } + return imagePullInactivityTimeout +} + +func parseAvailableLoggingDrivers() []dockerclient.LoggingDriver { + availableLoggingDriversEnv := os.Getenv("ECS_AVAILABLE_LOGGING_DRIVERS") + loggingDriverDecoder := json.NewDecoder(strings.NewReader(availableLoggingDriversEnv)) + var availableLoggingDrivers []dockerclient.LoggingDriver + err := loggingDriverDecoder.Decode(&availableLoggingDrivers) + // EOF means the string was blank as opposed to UnexpectedEof which means an + // invalid parse + // Blank is not a warning; we have sane defaults + if err != io.EOF && err != nil { + err := fmt.Errorf("Invalid format for \"ECS_AVAILABLE_LOGGING_DRIVERS\" environment variable; expected a JSON array like [\"json-file\",\"syslog\"]. err %v", err) + seelog.Warn(err) + } + + return availableLoggingDrivers +} + +func parseVolumePluginCapabilities() []string { + capsFromEnv := os.Getenv("ECS_VOLUME_PLUGIN_CAPABILITIES") + if capsFromEnv == "" { + return []string{} + } + capsDecoder := json.NewDecoder(strings.NewReader(capsFromEnv)) + var caps []string + err := capsDecoder.Decode(&caps) + if err != nil { + seelog.Warnf("Invalid format for \"ECS_VOLUME_PLUGIN_CAPABILITIES\", expected a json list of string. error: %v", err) + } + return caps +} + +func parseNumImagesToDeletePerCycle() int { + numImagesToDeletePerCycleEnvVal := os.Getenv("ECS_NUM_IMAGES_DELETE_PER_CYCLE") + numImagesToDeletePerCycle, err := strconv.Atoi(numImagesToDeletePerCycleEnvVal) + if numImagesToDeletePerCycleEnvVal != "" && err != nil { + seelog.Warnf("Invalid format for \"ECS_NUM_IMAGES_DELETE_PER_CYCLE\", expected an integer. err %v", err) + } + + return numImagesToDeletePerCycle +} + +func parseNumNonECSContainersToDeletePerCycle() int { + numNonEcsContainersToDeletePerCycleEnvVal := os.Getenv("NONECS_NUM_CONTAINERS_DELETE_PER_CYCLE") + numNonEcsContainersToDeletePerCycle, err := strconv.Atoi(numNonEcsContainersToDeletePerCycleEnvVal) + if numNonEcsContainersToDeletePerCycleEnvVal != "" && err != nil { + seelog.Warnf("Invalid format for \"NONECS_NUM_CONTAINERS_DELETE_PER_CYCLE\", expected an integer. err %v", err) + } + return numNonEcsContainersToDeletePerCycle +} + +func parseImagePullBehavior() ImagePullBehaviorType { + ImagePullBehaviorString := os.Getenv("ECS_IMAGE_PULL_BEHAVIOR") + switch ImagePullBehaviorString { + case "always": + return ImagePullAlwaysBehavior + case "once": + return ImagePullOnceBehavior + case "prefer-cached": + return ImagePullPreferCachedBehavior + default: + // Use the default image pull behavior when ECS_IMAGE_PULL_BEHAVIOR is + // "default" or not valid + return ImagePullDefaultBehavior + } +} + +func parseInstanceAttributes(errs []error) (map[string]string, []error) { + var instanceAttributes map[string]string + instanceAttributesEnv := os.Getenv("ECS_INSTANCE_ATTRIBUTES") + err := json.Unmarshal([]byte(instanceAttributesEnv), &instanceAttributes) + if instanceAttributesEnv != "" { + if err != nil { + wrappedErr := fmt.Errorf("Invalid format for ECS_INSTANCE_ATTRIBUTES. Expected a json hash: %v", err) + seelog.Error(wrappedErr) + errs = append(errs, wrappedErr) + } + } + for attributeKey, attributeValue := range instanceAttributes { + seelog.Debugf("Setting instance attribute %v: %v", attributeKey, attributeValue) + } + + return instanceAttributes, errs +} + +func parseAdditionalLocalRoutes(errs []error) ([]cnitypes.IPNet, []error) { + var additionalLocalRoutes []cnitypes.IPNet + additionalLocalRoutesEnv := os.Getenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES") + if additionalLocalRoutesEnv != "" { + err := json.Unmarshal([]byte(additionalLocalRoutesEnv), &additionalLocalRoutes) + if err != nil { + seelog.Errorf("Invalid format for ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES, expected a json array of CIDRs: %v", err) + errs = append(errs, err) + } + } + + return additionalLocalRoutes, errs +} + +func parseBooleanDefaultFalseConfig(envVarName string) BooleanDefaultFalse { + boolDefaultFalseCofig := BooleanDefaultFalse{Value: NotSet} + configString := strings.TrimSpace(os.Getenv(envVarName)) + if configString == "" { + // if intentionally not set, do not add warning log + return boolDefaultFalseCofig + } + + res, err := strconv.ParseBool(configString) + if err == nil { + if res { + boolDefaultFalseCofig.Value = ExplicitlyEnabled + } else { + boolDefaultFalseCofig.Value = ExplicitlyDisabled + } + } else { + seelog.Warnf("Invalid format for \"%s\", expected a boolean. err %v", envVarName, err) + } + return boolDefaultFalseCofig +} + +func parseBooleanDefaultTrueConfig(envVarName string) BooleanDefaultTrue { + boolDefaultTrueCofig := BooleanDefaultTrue{Value: NotSet} + configString := strings.TrimSpace(os.Getenv(envVarName)) + if configString == "" { + // if intentionally not set, do not add warning log + return boolDefaultTrueCofig + } + + res, err := strconv.ParseBool(configString) + if err == nil { + if res { + boolDefaultTrueCofig.Value = ExplicitlyEnabled + } else { + boolDefaultTrueCofig.Value = ExplicitlyDisabled + } + } else { + seelog.Warnf("Invalid format for \"%s\", expected a boolean. err %v", envVarName, err) + } + return boolDefaultTrueCofig +} + +func parseTaskMetadataThrottles() (int, int) { + var steadyStateRate, burstRate int + rpsLimitEnvVal := os.Getenv("ECS_TASK_METADATA_RPS_LIMIT") + if rpsLimitEnvVal == "" { + seelog.Debug("Environment variable empty: ECS_TASK_METADATA_RPS_LIMIT") + return 0, 0 + } + rpsLimitSplits := strings.Split(rpsLimitEnvVal, ",") + if len(rpsLimitSplits) != 2 { + seelog.Warn(`Invalid format for "ECS_TASK_METADATA_RPS_LIMIT", expected: "rateLimit,burst"`) + return 0, 0 + } + steadyStateRate, err := strconv.Atoi(strings.TrimSpace(rpsLimitSplits[0])) + if err != nil { + seelog.Warnf(`Invalid format for "ECS_TASK_METADATA_RPS_LIMIT", expected integer for steady state rate: %v`, err) + return 0, 0 + } + burstRate, err = strconv.Atoi(strings.TrimSpace(rpsLimitSplits[1])) + if err != nil { + seelog.Warnf(`Invalid format for "ECS_TASK_METADATA_RPS_LIMIT", expected integer for burst rate: %v`, err) + return 0, 0 + } + return steadyStateRate, burstRate +} + +func parseContainerInstanceTags(errs []error) (map[string]string, []error) { + var containerInstanceTags map[string]string + containerInstanceTagsConfigString := os.Getenv("ECS_CONTAINER_INSTANCE_TAGS") + + // If duplicate keys exist, the value of the key will be the value of latter key. + err := json.Unmarshal([]byte(containerInstanceTagsConfigString), &containerInstanceTags) + if containerInstanceTagsConfigString != "" { + if err != nil { + wrappedErr := fmt.Errorf("Invalid format for ECS_CONTAINER_INSTANCE_TAGS. Expected a json hash: %v", err) + seelog.Error(wrappedErr) + errs = append(errs, wrappedErr) + } + } + + for tagKey, tagValue := range containerInstanceTags { + seelog.Debugf("Setting instance tag %v: %v", tagKey, tagValue) + } + + return containerInstanceTags, errs +} + +func parseContainerInstancePropagateTagsFrom() ContainerInstancePropagateTagsFromType { + containerInstancePropagateTagsFromString := os.Getenv("ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM") + switch containerInstancePropagateTagsFromString { + case "ec2_instance": + return ContainerInstancePropagateTagsFromEC2InstanceType + default: + // Use the default "none" type when ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM is + // "none" or not valid. + return ContainerInstancePropagateTagsFromNoneType + } +} + +func parseEnvVariableUint16(envVar string) uint16 { + envVal := os.Getenv(envVar) + var var16 uint16 + if envVal != "" { + var64, err := strconv.ParseUint(envVal, 10, 16) + if err != nil { + seelog.Warnf("Invalid format for \""+envVar+"\" environment variable; expected unsigned integer. err %v", err) + } else { + var16 = uint16(var64) + } + } + return var16 +} + +func parseEnvVariableDuration(envVar string) time.Duration { + var duration time.Duration + envVal := os.Getenv(envVar) + if envVal == "" { + seelog.Debugf("Environment variable empty: %v", envVar) + } else { + var err error + duration, err = time.ParseDuration(envVal) + if err != nil { + seelog.Warnf("Could not parse duration value: %v for Environment Variable %v : %v", envVal, envVar, err) + } + } + return duration +} + +func parseImageCleanupExclusionList(envVar string) []string { + imageEnv := os.Getenv(envVar) + var imageCleanupExclusionList []string + if imageEnv == "" { + seelog.Debugf("Environment variable empty: %s", imageEnv) + return nil + } else { + imageCleanupExclusionList = strings.Split(imageEnv, ",") + } + + return imageCleanupExclusionList +} + +func parseCgroupCPUPeriod() time.Duration { + duration := parseEnvVariableDuration("ECS_CGROUP_CPU_PERIOD") + + if duration >= minimumCgroupCPUPeriod && duration <= maximumCgroupCPUPeriod { + return duration + } else if duration != 0 { + seelog.Warnf("CPU Period duration value: %v for Environment Variable ECS_CGROUP_CPU_PERIOD is not within [%v, %v], using default value instead", + duration, minimumCgroupCPUPeriod, maximumCgroupCPUPeriod) + } + + return defaultCgroupCPUPeriod +} diff --git a/agent/config/parse_linux.go b/agent/config/parse_linux.go new file mode 100644 index 00000000000..54b4b9ea403 --- /dev/null +++ b/agent/config/parse_linux.go @@ -0,0 +1,24 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// httpaws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +func parseGMSACapability() bool { + return false +} + +func parseFSxWindowsFileServerCapability() bool { + return false +} diff --git a/agent/config/parse_unsupported.go b/agent/config/parse_unsupported.go new file mode 100644 index 00000000000..056aa91d30d --- /dev/null +++ b/agent/config/parse_unsupported.go @@ -0,0 +1,24 @@ +// +build !linux,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// httpaws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +func parseGMSACapability() bool { + return false +} + +func parseFSxWindowsFileServerCapability() bool { + return false +} diff --git a/agent/config/parse_windows.go b/agent/config/parse_windows.go new file mode 100644 index 00000000000..da38d40fe72 --- /dev/null +++ b/agent/config/parse_windows.go @@ -0,0 +1,115 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "os" + "strings" + "syscall" + "unsafe" + + "golang.org/x/sys/windows/registry" + + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/cihub/seelog" +) + +const ( + // envSkipDomainJoinCheck is an environment setting that can be used to skip + // domain join check validation. This is useful for integration and + // functional-tests but should not be set for any non-test use-case. + envSkipDomainJoinCheck = "ZZZ_SKIP_DOMAIN_JOIN_CHECK_NOT_SUPPORTED_IN_PRODUCTION" +) + +// parseGMSACapability is used to determine if gMSA support can be enabled +func parseGMSACapability() bool { + envStatus := utils.ParseBool(os.Getenv("ECS_GMSA_SUPPORTED"), true) + return checkDomainJoinWithEnvOverride(envStatus) +} + +// parseFSxWindowsFileServerCapability is used to determine if fsxWindowsFileServer support can be enabled +func parseFSxWindowsFileServerCapability() bool { + // fsxwindowsfileserver is not supported on Windows 2016 and non-domain-joined container instances + status, err := isWindows2016() + if err != nil || status == true { + return false + } + + envStatus := utils.ParseBool(os.Getenv("ECS_FSX_WINDOWS_FILE_SERVER_SUPPORTED"), true) + return checkDomainJoinWithEnvOverride(envStatus) +} + +func checkDomainJoinWithEnvOverride(envStatus bool) bool { + if envStatus { + // Check if domain join check override is present + skipDomainJoinCheck := utils.ParseBool(os.Getenv(envSkipDomainJoinCheck), false) + if skipDomainJoinCheck { + seelog.Debug("Skipping domain join validation based on environment override") + return true + } + // check if container instance is domain joined. + // If container instance is not domain joined, explicitly disable feature configuration. + status, err := isDomainJoined() + if err == nil && status == true { + return true + } + seelog.Errorf("Unable to determine valid domain join: %v", err) + } + + return false +} + +// isDomainJoined is used to validate if container instance is part of a valid active directory. +// Reference: https://golang.org/src/os/user/lookup_windows.go +func isDomainJoined() (bool, error) { + var domain *uint16 + var status uint32 + + err := syscall.NetGetJoinInformation(nil, &domain, &status) + if err != nil { + return false, err + } + + err = syscall.NetApiBufferFree((*byte)(unsafe.Pointer(domain))) + if err != nil { + return false, err + } + + return status == syscall.NetSetupDomainName, nil +} + +// isWindows2016 is used to check if container instance is versioned Windows 2016 +// Reference: https://godoc.org/golang.org/x/sys/windows/registry +var isWindows2016 = func() (bool, error) { + key, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + seelog.Errorf("Unable to open Windows registry key to determine Windows version: %v", err) + return false, err + } + defer key.Close() + + version, _, err := key.GetStringValue("ProductName") + if err != nil { + seelog.Errorf("Unable to read current version from Windows registry: %v", err) + return false, err + } + + if strings.HasPrefix(version, "Windows Server 2016") { + return true, nil + } + + return false, nil +} diff --git a/agent/config/parse_windows_test.go b/agent/config/parse_windows_test.go new file mode 100644 index 00000000000..8f6579bffbe --- /dev/null +++ b/agent/config/parse_windows_test.go @@ -0,0 +1,52 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseGMSACapability(t *testing.T) { + os.Setenv("ECS_GMSA_SUPPORTED", "False") + defer os.Unsetenv("ECS_GMSA_SUPPORTED") + + assert.False(t, parseGMSACapability()) +} + +func TestParseBooleanEnvVar(t *testing.T) { + os.Setenv("EXAMPLE_SETTING", "True") + defer os.Unsetenv("EXAMPLE_SETTING") + + assert.True(t, parseBooleanDefaultFalseConfig("EXAMPLE_SETTING").Enabled()) + assert.True(t, parseBooleanDefaultTrueConfig("EXAMPLE_SETTING").Enabled()) + + os.Setenv("EXAMPLE_SETTING", "False") + assert.False(t, parseBooleanDefaultFalseConfig("EXAMPLE_SETTING").Enabled()) + assert.False(t, parseBooleanDefaultTrueConfig("EXAMPLE_SETTING").Enabled()) +} + +func TestParseFSxWindowsFileServerCapability(t *testing.T) { + isWindows2016 = func() (bool, error) { + return false, nil + } + os.Setenv("ECS_FSX_WINDOWS_FILE_SERVER_SUPPORTED", "False") + defer os.Unsetenv("ECS_FSX_WINDOWS_FILE_SERVER_SUPPORTED") + + assert.False(t, parseFSxWindowsFileServerCapability()) +} diff --git a/agent/config/sensitive.go b/agent/config/sensitive.go new file mode 100644 index 00000000000..f2841f0777f --- /dev/null +++ b/agent/config/sensitive.go @@ -0,0 +1,57 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "encoding/json" +) + +// SensitiveRawMessage is a struct to store some data that should not be logged +// or printed. +// This struct is a Stringer which will not print its contents with 'String'. +// It is a json.Marshaler and json.Unmarshaler and will present its actual +// contents in plaintext when read/written from/to json. +type SensitiveRawMessage struct { + contents json.RawMessage +} + +// NewSensitiveRawMessage returns a new encapsulated json.RawMessage or nil if +// the data is empty. It cannot be accidentally logged via .String/.GoString/%v/%#v +func NewSensitiveRawMessage(data json.RawMessage) *SensitiveRawMessage { + if len(data) == 0 { + return nil + } + return &SensitiveRawMessage{contents: data} +} + +func (data SensitiveRawMessage) String() string { + return "[redacted]" +} + +func (data SensitiveRawMessage) GoString() string { + return "[redacted]" +} + +func (data SensitiveRawMessage) Contents() json.RawMessage { + return data.contents +} + +func (data SensitiveRawMessage) MarshalJSON() ([]byte, error) { + return data.contents, nil +} + +func (data *SensitiveRawMessage) UnmarshalJSON(jsonData []byte) error { + data.contents = json.RawMessage(jsonData) + return nil +} diff --git a/agent/config/sensitive_test.go b/agent/config/sensitive_test.go new file mode 100644 index 00000000000..2a140471d13 --- /dev/null +++ b/agent/config/sensitive_test.go @@ -0,0 +1,52 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSensitiveRawMessageImplements(t *testing.T) { + assert.Implements(t, (*fmt.Stringer)(nil), SensitiveRawMessage{}) + assert.Implements(t, (*fmt.GoStringer)(nil), SensitiveRawMessage{}) + assert.Implements(t, (*json.Marshaler)(nil), SensitiveRawMessage{}) + assert.Implements(t, (*json.Unmarshaler)(nil), &SensitiveRawMessage{}) +} + +func TestSensitiveRawMessage(t *testing.T) { + sensitive := NewSensitiveRawMessage(json.RawMessage("secret")) + + for _, str := range []string{ + sensitive.String(), + sensitive.GoString(), + fmt.Sprintf("%v", sensitive), + fmt.Sprintf("%#v", sensitive), + fmt.Sprint(sensitive), + } { + assert.Equal(t, "[redacted]", str, "expected redacted") + } +} + +// TestEmptySensitiveRawMessage tests the message content is empty +func TestEmptySensitiveRawMessage(t *testing.T) { + sensitive := NewSensitiveRawMessage(json.RawMessage("")) + + assert.Nil(t, sensitive, "empty message should return nil") +} diff --git a/agent/config/types.go b/agent/config/types.go new file mode 100644 index 00000000000..d95a6f41f26 --- /dev/null +++ b/agent/config/types.go @@ -0,0 +1,332 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + cnitypes "github.com/containernetworking/cni/pkg/types" +) + +// ImagePullBehaviorType is an enum variable type corresponding to different agent pull +// behaviors including default, always, never and once. +type ImagePullBehaviorType int8 + +// ContainerInstancePropagateTagsFromType is an enum variable type corresponding to different +// ways to propagate tags, it includes none (default) and ec2_instance. +type ContainerInstancePropagateTagsFromType int8 + +type Config struct { + // DEPRECATED + // ClusterArn is the Name or full ARN of a Cluster to register into. It has + // been deprecated (and will eventually be removed) in favor of Cluster + ClusterArn string `deprecated:"Please use Cluster instead"` + // Cluster can either be the Name or full ARN of a Cluster. This is the + // cluster the agent should register this ContainerInstance into. If this + // value is not set, it will default to "default" + Cluster string `trim:"true"` + // APIEndpoint is the endpoint, such as "ecs.us-east-1.amazonaws.com", to + // make calls against. If this value is not set, it will default to the + // endpoint for your current AWSRegion + APIEndpoint string `trim:"true"` + // DockerEndpoint is the address the agent will attempt to connect to the + // Docker daemon at. This should have the same value as "DOCKER_HOST" + // normally would to interact with the daemon. It defaults to + // unix:///var/run/docker.sock + DockerEndpoint string + // AWSRegion is the region to run in (such as "us-east-1"). This value will + // be inferred from the EC2 metadata service, but if it cannot be found this + // will be fatal. + AWSRegion string `missing:"fatal" trim:"true"` + + // ReservedPorts is an array of ports which should be registered as + // unavailable. If not set, they default to [22,2375,2376,51678]. + ReservedPorts []uint16 + // ReservedPortsUDP is an array of UDP ports which should be registered as + // unavailable. If not set, it defaults to []. + ReservedPortsUDP []uint16 + + // DataDir is the directory data is saved to in order to preserve state + // across agent restarts. + // It is also used to keep the metadata of containers managed by the agent + DataDir string + // DataDirOnHost is the directory in the instance from which we mount + // DataDir to the ecs-agent container and to agent managed containers + DataDirOnHost string + // Checkpoint configures whether data should be periodically to a checkpoint + // file, in DataDir, such that on instance or agent restarts it will resume + // as the same ContainerInstance. It defaults to false. + Checkpoint BooleanDefaultFalse + + // EngineAuthType configures what type of data is in EngineAuthData. + // Supported types, right now, can be found in the dockerauth package: https://godoc.org/github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerauth + EngineAuthType string `trim:"true"` + // EngineAuthData contains authentication data. Please see the documentation + // for EngineAuthType for more information. + EngineAuthData *SensitiveRawMessage + + // UpdatesEnabled specifies whether updates should be applied to this agent. + // Default true + UpdatesEnabled BooleanDefaultFalse + // UpdateDownloadDir specifies where new agent versions should be placed + // within the container in order for the external updating process to + // correctly handle them. + UpdateDownloadDir string + + // DisableMetrics configures whether task utilization metrics should be + // sent to the ECS telemetry endpoint + DisableMetrics BooleanDefaultFalse + + // PollMetrics configures whether metrics are constantly streamed for each container or + // polled on interval instead. + PollMetrics BooleanDefaultFalse + + // PollingMetricsWaitDuration configures how long a container should wait before polling metrics + // again when PollMetrics is set to true + PollingMetricsWaitDuration time.Duration + + // DisableDockerHealthCheck configures whether container health feature was enabled + // on the instance + DisableDockerHealthCheck BooleanDefaultFalse + + // ReservedMemory specifies the amount of memory (in MB) to reserve for things + // other than containers managed by ECS + ReservedMemory uint16 + + // DockerStopTimeout specifies the amount of time before a SIGKILL is issued to + // containers managed by ECS + DockerStopTimeout time.Duration + + // ContainerStartTimeout specifies the amount of time to wait to start a container + ContainerStartTimeout time.Duration + + // ContainerCreateTimeout specifies the amount of time to wait to create a container + ContainerCreateTimeout time.Duration + + // DependentContainersPullUpfront specifies whether pulling images upfront should be applied to this agent. + // Default false + DependentContainersPullUpfront BooleanDefaultFalse + + // ImagePullInactivityTimeout is here to override the amount of time to wait when pulling and extracting a container + ImagePullInactivityTimeout time.Duration + + //ImagePullTimeout is here to override the timeout for PullImage API + ImagePullTimeout time.Duration + + // AvailableLoggingDrivers specifies the logging drivers available for use + // with Docker. If not set, it defaults to ["json-file","none"]. + AvailableLoggingDrivers []dockerclient.LoggingDriver + + // PrivilegedDisabled specified whether the Agent is capable of launching + // tasks with privileged containers + PrivilegedDisabled BooleanDefaultFalse + + // SELinxuCapable specifies whether the Agent is capable of using SELinux + // security options + SELinuxCapable BooleanDefaultFalse + + // AppArmorCapable specifies whether the Agent is capable of using AppArmor + // security options + AppArmorCapable BooleanDefaultFalse + + // TaskCleanupWaitDuration specifies the time to wait after a task is stopped + // until cleanup of task resources is started. + TaskCleanupWaitDuration time.Duration + + // TaskIAMRoleEnabled specifies if the Agent is capable of launching + // tasks with IAM Roles. + TaskIAMRoleEnabled BooleanDefaultFalse + + // DeleteNonECSImagesEnabled specifies if the Agent can delete the cached, unused non-ecs images. + DeleteNonECSImagesEnabled BooleanDefaultFalse + + // TaskCPUMemLimit specifies if Agent can launch a task with a hierarchical cgroup + TaskCPUMemLimit BooleanDefaultTrue + + // CredentialsAuditLogFile specifies the path/filename of the audit log. + CredentialsAuditLogFile string + + // CredentialsAuditLogEnabled specifies whether audit logging is disabled. + CredentialsAuditLogDisabled bool + + // TaskIAMRoleEnabledForNetworkHost specifies if the Agent is capable of launching + // tasks with IAM Roles when networkMode is set to 'host' + TaskIAMRoleEnabledForNetworkHost bool + + // TaskENIEnabled specifies if the Agent is capable of launching task within + // defined EC2 networks + TaskENIEnabled BooleanDefaultFalse + + // ENITrunkingEnabled specifies if the Agent is enabled to launch awsvpc + // task with ENI Trunking + ENITrunkingEnabled BooleanDefaultTrue + + // ImageCleanupDisabled specifies whether the Agent will periodically perform + // automated image cleanup + ImageCleanupDisabled BooleanDefaultFalse + + // MinimumImageDeletionAge specifies the minimum time since it was pulled + // before it can be deleted + MinimumImageDeletionAge time.Duration + + // NonECSMinimumImageDeletionAge specifies the minimum time since non ecs images created before it can be deleted + NonECSMinimumImageDeletionAge time.Duration + + // ImageCleanupInterval specifies the time to wait before performing the image + // cleanup since last time it was executed + ImageCleanupInterval time.Duration + + // NumImagesToDeletePerCycle specifies the num of image to delete every time + // when Agent performs cleanup + NumImagesToDeletePerCycle int + + // NumNonECSContainersToDeletePerCycle specifies the num of NonECS containers to delete every time + // when Agent performs cleanup + NumNonECSContainersToDeletePerCycle int + + // ImagePullBehavior specifies the agent's behavior for pulling image and loading + // local Docker image cache + ImagePullBehavior ImagePullBehaviorType + + // InstanceAttributes contains key/value pairs representing + // attributes to be associated with this instance within the + // ECS service and used to influence behavior such as launch + // placement. + InstanceAttributes map[string]string + + // Set if clients validate ssl certificates. Used mainly for testing + AcceptInsecureCert bool `json:"-"` + + // CNIPluginsPath is the path for the cni plugins + CNIPluginsPath string + + // PauseContainerTarballPath is the path to the pause container tarball + PauseContainerTarballPath string + + // PauseContainerImageName is the name for the pause container image. + // Setting this value to be different from the default will disable loading + // the image from the tarball; the referenced image must already be loaded. + PauseContainerImageName string + + // PauseContainerTag is the tag for the pause container image. + // Setting this value to be different from the default will disable loading + // the image from the tarball; the referenced image must already be loaded. + PauseContainerTag string + + // PrometheusMetricsEnabled configures whether Agent metrics should be + // collected and published to the specified endpoint. This is disabled by + // default. + PrometheusMetricsEnabled bool + + // AWSVPCBlockInstanceMetdata specifies if InstanceMetadata endpoint should be blocked + // for tasks that are launched with network mode "awsvpc" when ECS_AWSVPC_BLOCK_IMDS=true + AWSVPCBlockInstanceMetdata BooleanDefaultFalse + + // OverrideAWSVPCLocalIPv4Address overrides the local IPv4 address chosen + // for a task using the `awsvpc` networking mode. Using this configuration + // will limit you to running one `awsvpc` task at a time. IPv4 addresses + // must be specified in decimal-octet form and also specify the subnet + // size (e.g., "169.254.172.42/22"). + OverrideAWSVPCLocalIPv4Address *cnitypes.IPNet + + // AWSVPCAdditionalLocalRoutes allows the specification of routing table + // entries that will be added in the task's network namespace via the + // instance bridge interface rather than via the ENI. + AWSVPCAdditionalLocalRoutes []cnitypes.IPNet + + // ContainerMetadataEnabled specifies if the agent should provide a metadata + // file for containers. + ContainerMetadataEnabled BooleanDefaultFalse + + // OverrideAWSLogsExecutionRole is config option used to enable awslogs + // driver authentication over the task's execution role + OverrideAWSLogsExecutionRole BooleanDefaultFalse + + // CgroupPath is the path expected by the agent, defaults to + // '/sys/fs/cgroup' + CgroupPath string + + // PlatformVariables consists of configuration variables specific to linux/windows + PlatformVariables PlatformVariables + + // TaskMetadataSteadyStateRate specifies the steady state throttle for the task metadata endpoint + TaskMetadataSteadyStateRate int + + // TaskMetadataBurstRate specifies the burst rate throttle for the task metadata endpoint + TaskMetadataBurstRate int + + // SharedVolumeMatchFullConfig is config option used to short-circuit volume validation against a + // provisioned volume, if false (default). If true, we perform deep comparison including driver options + // and labels. For comparing shared volume across 2 instances, this should be set to false as docker's + // default behavior is to match name only, and does not propagate driver options and labels through volume drivers. + SharedVolumeMatchFullConfig BooleanDefaultFalse + + // NoIID when set to true, specifies that the agent should not register the instance + // with instance identity document. This is required in order to accomodate scenarios in + // which ECS agent tries to register the instance where the instance id document is + // not available or needed + NoIID bool + + // ContainerInstancePropagateTagsFrom when set to "ec2_instance", agent will call EC2 API to + // get the tags and register them through RegisterContainerInstance call. + // When set to "none" (or any other string), no API call will be made. + ContainerInstancePropagateTagsFrom ContainerInstancePropagateTagsFromType + + // ContainerInstanceTags contains key/value pairs representing + // tags extracted from config file and will be associated with this instance + // through RegisterContainerInstance call. Tags with the same keys from DescribeTags + // API call will be overridden. + ContainerInstanceTags map[string]string + + // GPUSupportEnabled specifies if the Agent is capable of launching GPU tasks + GPUSupportEnabled bool + // InferentiaSupportEnabled specifies whether the built-in support for inferentia task is enabled. + InferentiaSupportEnabled bool + + // ImageCleanupExclusionList is the list of image names customers want to keep for their own use and delete automatically + ImageCleanupExclusionList []string + + // NvidiaRuntime is the runtime to be used for passing Nvidia GPU devices to containers + NvidiaRuntime string `trim:"true"` + + // TaskMetadataAZDisabled specifies if availability zone should be disabled in Task Metadata endpoint + TaskMetadataAZDisabled bool + + // ENIPauseContainerCleanupDelaySeconds specifies how long to wait before cleaning up the pause container after all + // other containers have stopped. + ENIPauseContainerCleanupDelaySeconds int + + // CgroupCPUPeriod is config option to set different CFS quota and period values in microsecond, defaults to 100 ms + CgroupCPUPeriod time.Duration + + // SpotInstanceDrainingEnabled, if true, agent will poll the container instance's metadata endpoint for an ec2 spot + // instance termination notice. If EC2 sends a spot termination notice, then agent will set the instance's state + // to DRAINING, which gracefully shuts down all running tasks on the instance. + // If the instance is not spot then the poller will still run but it will never receive a termination notice. + // Defaults to false. + // see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-draining.html + SpotInstanceDrainingEnabled BooleanDefaultFalse + + // GMSACapable is the config option to indicate if gMSA is supported. + // It should be enabled by default only if the container instance is part of a valid active directory domain. + GMSACapable bool + + // VolumePluginCapabilities specifies the capabilities of the ecs volume plugin. + VolumePluginCapabilities []string + + // FSxWindowsFileServerCapable is the config option to indicate if fsxWindowsFileServer is supported. + // It should be enabled by default only if the container instance is part of a valid active directory domain. + FSxWindowsFileServerCapable bool +} diff --git a/agent/config/types_unix.go b/agent/config/types_unix.go new file mode 100644 index 00000000000..0d2ff84af79 --- /dev/null +++ b/agent/config/types_unix.go @@ -0,0 +1,18 @@ +// +build !windows +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +// PlatformVariables consists of configuration variables specific to Linux +type PlatformVariables struct{} diff --git a/agent/config/types_windows.go b/agent/config/types_windows.go new file mode 100644 index 00000000000..5d893daf1e6 --- /dev/null +++ b/agent/config/types_windows.go @@ -0,0 +1,25 @@ +// +build windows +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +// PlatformVariables consists of configuration variables specific to Windows +type PlatformVariables struct { + // CPUUnbounded specifies if agent can run a mix of CPU bounded and + // unbounded tasks for windows + CPUUnbounded BooleanDefaultFalse + // MemoryUnbounded specifies if agent can run a mix of Memory bounded and + // unbounded tasks for windows + MemoryUnbounded BooleanDefaultFalse +} diff --git a/agent/containermetadata/generate_mocks.go b/agent/containermetadata/generate_mocks.go new file mode 100644 index 00000000000..71d6646fc82 --- /dev/null +++ b/agent/containermetadata/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +//go:generate mockgen -destination=mocks/containermetadata_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/containermetadata Manager,DockerMetadataClient diff --git a/agent/containermetadata/manager.go b/agent/containermetadata/manager.go new file mode 100644 index 00000000000..511b4df9e54 --- /dev/null +++ b/agent/containermetadata/manager.go @@ -0,0 +1,176 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "context" + "encoding/json" + "fmt" + "os" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + dockercontainer "github.com/docker/docker/api/types/container" +) + +const ( + // metadataEnvironmentVariable is the environment variable passed to the + // container for the metadata file path. + metadataEnvironmentVariable = "ECS_CONTAINER_METADATA_FILE" + metadataFile = "ecs-container-metadata.json" + metadataPerm = 0644 +) + +// Manager is an interface that allows us to abstract away the metadata +// operations +type Manager interface { + SetContainerInstanceARN(string) + SetAvailabilityZone(string) + SetHostPrivateIPv4Address(string) + SetHostPublicIPv4Address(string) + Create(*dockercontainer.Config, *dockercontainer.HostConfig, *apitask.Task, string, []string) error + Update(context.Context, string, *apitask.Task, string) error + Clean(string) error +} + +// metadataManager implements the Manager interface +type metadataManager struct { + // client is the Docker API Client that the metadata manager uses. It defaults + // to 1.21 on Linux and 1.24 on Windows + client DockerMetadataClient + // cluster is the cluster where this agent is run + cluster string + // dataDir is the directory where the metadata is being written. For Linux + // this is a container directory + dataDir string + // dataDirOnHost is the directory from which dataDir is mounted for Linux + // version of the agent + dataDirOnHost string + // containerInstanceARN is the Container Instance ARN registered for this agent + containerInstanceARN string + // availabilityZone is the availabiltyZone where task is in + availabilityZone string + // hostPrivateIPv4Address is the private IPv4 address associated with the EC2 instance + hostPrivateIPv4Address string + // hostPublicIPv4Address is the public IPv4 address associated with the EC2 instance + hostPublicIPv4Address string +} + +// NewManager creates a metadataManager for a given DockerTaskEngine settings. +func NewManager(client DockerMetadataClient, cfg *config.Config) Manager { + return &metadataManager{ + client: client, + cluster: cfg.Cluster, + dataDir: cfg.DataDir, + dataDirOnHost: cfg.DataDirOnHost, + } +} + +// SetContainerInstanceARN sets the metadataManager's ContainerInstanceArn which is not available +// at its creation as this information is not present immediately at the agent's startup +func (manager *metadataManager) SetContainerInstanceARN(containerInstanceARN string) { + manager.containerInstanceARN = containerInstanceARN +} + +// SetAvailabilityzone sets the metadataManager's AvailabilityZone which is not available +// at its creation as this information is not present immediately at the agent's startup +func (manager *metadataManager) SetAvailabilityZone(availabilityZone string) { + manager.availabilityZone = availabilityZone +} + +// SetHostPrivateIPv4Address sets the metadataManager's hostPrivateIPv4Address which is not available +// at its creation as this information is not present immediately at the agent's startup +func (manager *metadataManager) SetHostPrivateIPv4Address(ipv4address string) { + manager.hostPrivateIPv4Address = ipv4address +} + +// SetHostPublicIPv4Address sets the metadataManager's hostPublicIPv4Address which is not available +// at its creation as this information is not present immediately at the agent's startup +func (manager *metadataManager) SetHostPublicIPv4Address(ipv4address string) { + manager.hostPublicIPv4Address = ipv4address +} + +var mkdirAll = os.MkdirAll + +// Create creates the metadata file and adds the metadata directory to +// the container's mounted host volumes +// Pointer hostConfig is modified directly so there is risk of concurrency errors. +func (manager *metadataManager) Create(config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, + task *apitask.Task, containerName string, dockerSecurityOptions []string) error { + // Create task and container directories if they do not yet exist + metadataDirectoryPath, err := getMetadataFilePath(task.Arn, containerName, manager.dataDir) + // Stop metadata creation if path is malformed for any reason + if err != nil { + return fmt.Errorf("container metadata create for task %s container %s: %v", task.Arn, containerName, err) + } + + err = mkdirAll(metadataDirectoryPath, os.ModePerm) + if err != nil { + return fmt.Errorf("creating metadata directory for task %s: %v", task.Arn, err) + } + + // Acquire the metadata then write it in JSON format to the file + metadata := manager.parseMetadataAtContainerCreate(task, containerName) + err = manager.marshalAndWrite(metadata, task.Arn, containerName) + if err != nil { + return err + } + + // Add the directory of this container's metadata to the container's mount binds + // Then add the destination directory as an environment variable in the container $METADATA + binds, env := createBindsEnv(hostConfig.Binds, config.Env, manager.dataDirOnHost, metadataDirectoryPath, dockerSecurityOptions) + config.Env = env + hostConfig.Binds = binds + return nil +} + +// Update updates the metadata file after container starts and dynamic metadata is available +func (manager *metadataManager) Update(ctx context.Context, dockerID string, task *apitask.Task, containerName string) error { + // Get docker container information through api call + dockerContainer, err := manager.client.InspectContainer(ctx, dockerID, dockerclient.InspectContainerTimeout) + if err != nil { + return err + } + + // Ensure we do not update a container that is invalid or is not running + if dockerContainer == nil || !dockerContainer.State.Running { + return fmt.Errorf("container metadata update for container %s in task %s: container not running or invalid", containerName, task.Arn) + } + + // Acquire the metadata then write it in JSON format to the file + metadata := manager.parseMetadata(dockerContainer, task, containerName) + return manager.marshalAndWrite(metadata, task.Arn, containerName) +} + +var removeAll = os.RemoveAll + +// Clean removes the metadata files of all containers associated with a task +func (manager *metadataManager) Clean(taskARN string) error { + metadataPath, err := getTaskMetadataDir(taskARN, manager.dataDir) + if err != nil { + return fmt.Errorf("clean task metadata: unable to get metadata directory for task %s: %v", taskARN, err) + } + return removeAll(metadataPath) +} + +func (manager *metadataManager) marshalAndWrite(metadata Metadata, taskARN string, containerName string) error { + data, err := json.MarshalIndent(metadata, "", "\t") + if err != nil { + return fmt.Errorf("create metadata for container %s in task %s: failed to marshal metadata: %v", containerName, taskARN, err) + } + + // Write the metadata to file + return writeToMetadataFile(data, taskARN, containerName, manager.dataDir) +} diff --git a/agent/containermetadata/manager_test.go b/agent/containermetadata/manager_test.go new file mode 100644 index 00000000000..11c6cc5e807 --- /dev/null +++ b/agent/containermetadata/manager_test.go @@ -0,0 +1,213 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + mock_containermetadata "github.com/aws/amazon-ecs-agent/agent/containermetadata/mocks" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + containerInstanceARN = "a6348116-0ba6-43b5-87c9-8a7e10294b75" + dockerID = "888888888887" + invalidTaskARN = "invalidARN" + validTaskARN = "arn:aws:ecs:region:account-id:task/task-id" + taskDefinitionFamily = "taskdefinitionfamily" + taskDefinitionRevision = "8" + containerName = "container" + dataDir = "ecs_mockdata" + availabilityZone = "us-west-2b" + hostPrivateIPv4Address = "127.0.0.1" + hostPublicIPv4Address = "127.0.0.1" +) + +func managerSetup(t *testing.T) (*mock_containermetadata.MockDockerMetadataClient, oswrapper.File, func()) { + ctrl := gomock.NewController(t) + mockDockerMetadataClient := mock_containermetadata.NewMockDockerMetadataClient(ctrl) + mockFile := mock_oswrapper.NewMockFile() + return mockDockerMetadataClient, mockFile, ctrl.Finish +} + +// TestSetContainerInstanceARN checks whether the container instance ARN is set correctly. +func TestSetContainerInstanceARN(t *testing.T) { + _, _, done := managerSetup(t) + defer done() + + mockARN := containerInstanceARN + + newManager := &metadataManager{} + newManager.SetContainerInstanceARN(mockARN) + assert.Equal(t, mockARN, newManager.containerInstanceARN) +} + +// TestAvailabilityZone checks whether the container availabilityZone is set correctly. +func TestSetAvailabilityZone(t *testing.T) { + _, _, done := managerSetup(t) + defer done() + mockAvailabilityZone := availabilityZone + newManager := &metadataManager{} + newManager.SetAvailabilityZone(mockAvailabilityZone) + assert.Equal(t, mockAvailabilityZone, newManager.availabilityZone) +} + +// TestSetHostPrivateIPv4Address checks whether the container hostPublicIPv4Address is set correctly. +func TestSetHostPrivateIPv4Address(t *testing.T) { + _, _, done := managerSetup(t) + defer done() + newManager := &metadataManager{} + newManager.SetHostPrivateIPv4Address(hostPrivateIPv4Address) + assert.Equal(t, hostPrivateIPv4Address, newManager.hostPrivateIPv4Address) +} + +// TestSetHostPublicIPv4Address checks whether the container hostPublicIPv4Address is set correctly. +func TestSetHostPublicIPv4Address(t *testing.T) { + _, _, done := managerSetup(t) + defer done() + newManager := &metadataManager{} + newManager.SetHostPublicIPv4Address(hostPublicIPv4Address) + assert.Equal(t, hostPublicIPv4Address, newManager.hostPublicIPv4Address) +} + +// TestCreateMalformedFilepath checks case when taskARN is invalid resulting in an invalid file path +func TestCreateMalformedFilepath(t *testing.T) { + _, _, done := managerSetup(t) + defer done() + + mockTaskARN := invalidTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockDockerSecurityOptions := types.Info{SecurityOptions: make([]string, 0)}.SecurityOptions + + newManager := &metadataManager{} + err := newManager.Create(nil, nil, mockTask, mockContainerName, mockDockerSecurityOptions) + assert.Error(t, err) +} + +// TestCreateMkdirAllFail checks case when MkdirAll call fails +func TestCreateMkdirAllFail(t *testing.T) { + _, _, done := managerSetup(t) + defer done() + + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockDockerSecurityOptions := types.Info{SecurityOptions: make([]string, 0)}.SecurityOptions + + mkdirAll = func(path string, perm os.FileMode) error { + return errors.New("err") + } + defer func() { + mkdirAll = os.MkdirAll + }() + + newManager := &metadataManager{} + err := newManager.Create(nil, nil, mockTask, mockContainerName, mockDockerSecurityOptions) + assert.Error(t, err) +} + +// TestUpdateInspectFail checks case when Inspect call fails +func TestUpdateInspectFail(t *testing.T) { + mockClient, _, done := managerSetup(t) + defer done() + + mockDockerID := dockerID + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + + newManager := &metadataManager{ + client: mockClient, + } + + mockClient.EXPECT().InspectContainer(gomock.Any(), mockDockerID, dockerclient.InspectContainerTimeout).Return(nil, errors.New("Inspect fail")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := newManager.Update(ctx, mockDockerID, mockTask, mockContainerName) + + assert.Error(t, err, "Expected inspect error to result in update fail") +} + +// TestUpdateNotRunningFail checks case where container is not running +func TestUpdateNotRunningFail(t *testing.T) { + mockClient, _, done := managerSetup(t) + defer done() + + mockDockerID := dockerID + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockState := types.ContainerState{ + Running: false, + } + mockContainer := types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &mockState, + }, + } + + newManager := &metadataManager{ + client: mockClient, + } + + mockClient.EXPECT().InspectContainer(gomock.Any(), mockDockerID, dockerclient.InspectContainerTimeout).Return(&mockContainer, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := newManager.Update(ctx, mockDockerID, mockTask, mockContainerName) + assert.Error(t, err) +} + +// TestMalformedFilepath checks case where ARN is invalid +func TestMalformedFilepath(t *testing.T) { + _, _, done := managerSetup(t) + defer done() + + mockTaskARN := invalidTaskARN + + newManager := &metadataManager{} + err := newManager.Clean(mockTaskARN) + assert.Error(t, err) +} + +// TestHappyPath is the mainline case for metadata create +func TestHappyPath(t *testing.T) { + _, _, done := managerSetup(t) + defer done() + + mockTaskARN := validTaskARN + + removeAll = func(path string) error { + return nil + } + defer func() { + removeAll = os.RemoveAll + }() + + newManager := &metadataManager{} + err := newManager.Clean(mockTaskARN) + assert.NoError(t, err) +} diff --git a/agent/containermetadata/manager_unix_test.go b/agent/containermetadata/manager_unix_test.go new file mode 100644 index 00000000000..275109e8b33 --- /dev/null +++ b/agent/containermetadata/manager_unix_test.go @@ -0,0 +1,127 @@ +// +build unit,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "context" + "os" + "testing" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +var ( + mockRename = func(oldpath, newpath string) error { + return nil + } + mockMkdirAll = func(path string, perm os.FileMode) error { + return nil + } +) + +// TestCreate is the happypath case for metadata create +func TestCreate(t *testing.T) { + _, mockFile, done := managerSetup(t) + defer done() + + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockConfig := &dockercontainer.Config{Env: make([]string, 0)} + mockHostConfig := &dockercontainer.HostConfig{Binds: make([]string, 0)} + mockDockerSecurityOptions := types.Info{SecurityOptions: make([]string, 0)}.SecurityOptions + + oTempFile := TempFile + TempFile = func(dir, pattern string) (oswrapper.File, error) { + return mockFile, nil + } + rename = mockRename + mkdirAll = mockMkdirAll + + defer func() { + rename = os.Rename + mkdirAll = os.MkdirAll + TempFile = oTempFile + }() + + newManager := &metadataManager{} + err := newManager.Create(mockConfig, mockHostConfig, mockTask, mockContainerName, mockDockerSecurityOptions) + + assert.NoError(t, err) + assert.Equal(t, 1, len(mockConfig.Env), "Unexpected number of environment variables in config") + assert.Equal(t, 1, len(mockHostConfig.Binds), "Unexpected number of binds in host config") +} + +// TestUpdate is happypath case for metadata update +func TestUpdate(t *testing.T) { + mockClient, mockFile, done := managerSetup(t) + defer done() + + mockDockerID := dockerID + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockState := types.ContainerState{ + Running: true, + } + + mockConfig := &dockercontainer.Config{Image: "image"} + + mockNetworks := map[string]*network.EndpointSettings{} + mockNetworkSettings := &types.NetworkSettings{ + Networks: mockNetworks, + } + + mockContainer := types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &mockState, + }, + Config: mockConfig, + NetworkSettings: mockNetworkSettings, + } + + newManager := &metadataManager{ + client: mockClient, + } + + gomock.InOrder( + mockClient.EXPECT().InspectContainer(gomock.Any(), mockDockerID, dockerclient.InspectContainerTimeout).Return(&mockContainer, nil), + ) + + oTempFile := TempFile + TempFile = func(dir, pattern string) (oswrapper.File, error) { + return mockFile, nil + } + rename = mockRename + + defer func() { + rename = os.Rename + TempFile = oTempFile + }() + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := newManager.Update(ctx, mockDockerID, mockTask, mockContainerName) + + assert.NoError(t, err) +} diff --git a/agent/containermetadata/manager_windows_test.go b/agent/containermetadata/manager_windows_test.go new file mode 100644 index 00000000000..f34b2f3d2db --- /dev/null +++ b/agent/containermetadata/manager_windows_test.go @@ -0,0 +1,116 @@ +// +build unit,windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "context" + "os" + "testing" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +var mockMkdirAll = func(path string, perm os.FileMode) error { + return nil +} + +// TestCreate is the mainline case for metadata create +func TestCreate(t *testing.T) { + _, mockFile, done := managerSetup(t) + defer done() + + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockConfig := &dockercontainer.Config{Env: make([]string, 0)} + mockHostConfig := &dockercontainer.HostConfig{Binds: make([]string, 0)} + mockDockerSecurityOptions := types.Info{SecurityOptions: make([]string, 0)}.SecurityOptions + + tempOpenFile := openFile + openFile = func(name string, flag int, perm os.FileMode) (oswrapper.File, error) { + return mockFile, nil + } + mkdirAll = mockMkdirAll + + defer func() { + mkdirAll = os.MkdirAll + openFile = tempOpenFile + }() + + newManager := &metadataManager{} + err := newManager.Create(mockConfig, mockHostConfig, mockTask, mockContainerName, mockDockerSecurityOptions) + + assert.NoError(t, err) + assert.Equal(t, 1, len(mockConfig.Env), "Unexpected number of environment variables in config") + assert.Equal(t, 1, len(mockHostConfig.Binds), "Unexpected number of binds in host config") +} + +// TestUpdate is happy path case for metadata update +func TestUpdate(t *testing.T) { + mockClient, mockFile, done := managerSetup(t) + defer done() + + mockDockerID := dockerID + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockState := types.ContainerState{ + Running: true, + } + + mockConfig := &dockercontainer.Config{Image: "image"} + + mockNetworks := map[string]*network.EndpointSettings{} + mockNetworkSettings := types.NetworkSettings{Networks: mockNetworks} + + mockContainer := types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &mockState, + }, + Config: mockConfig, + NetworkSettings: &mockNetworkSettings, + } + + newManager := &metadataManager{ + client: mockClient, + } + + tempOpenFile := openFile + openFile = func(name string, flag int, perm os.FileMode) (oswrapper.File, error) { + return mockFile, nil + } + defer func() { + openFile = tempOpenFile + }() + + gomock.InOrder( + mockClient.EXPECT().InspectContainer(gomock.Any(), mockDockerID, dockerclient.InspectContainerTimeout).Return(&mockContainer, nil), + ) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := newManager.Update(ctx, mockDockerID, mockTask, mockContainerName) + + assert.NoError(t, err) +} diff --git a/agent/containermetadata/mocks/containermetadata_mocks.go b/agent/containermetadata/mocks/containermetadata_mocks.go new file mode 100644 index 00000000000..555ea9abe2f --- /dev/null +++ b/agent/containermetadata/mocks/containermetadata_mocks.go @@ -0,0 +1,181 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/containermetadata (interfaces: Manager,DockerMetadataClient) + +// Package mock_containermetadata is a generated GoMock package. +package mock_containermetadata + +import ( + context "context" + reflect "reflect" + time "time" + + task "github.com/aws/amazon-ecs-agent/agent/api/task" + types "github.com/docker/docker/api/types" + container "github.com/docker/docker/api/types/container" + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// Clean mocks base method +func (m *MockManager) Clean(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Clean", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Clean indicates an expected call of Clean +func (mr *MockManagerMockRecorder) Clean(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clean", reflect.TypeOf((*MockManager)(nil).Clean), arg0) +} + +// Create mocks base method +func (m *MockManager) Create(arg0 *container.Config, arg1 *container.HostConfig, arg2 *task.Task, arg3 string, arg4 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create +func (mr *MockManagerMockRecorder) Create(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockManager)(nil).Create), arg0, arg1, arg2, arg3, arg4) +} + +// SetAvailabilityZone mocks base method +func (m *MockManager) SetAvailabilityZone(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetAvailabilityZone", arg0) +} + +// SetAvailabilityZone indicates an expected call of SetAvailabilityZone +func (mr *MockManagerMockRecorder) SetAvailabilityZone(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAvailabilityZone", reflect.TypeOf((*MockManager)(nil).SetAvailabilityZone), arg0) +} + +// SetContainerInstanceARN mocks base method +func (m *MockManager) SetContainerInstanceARN(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetContainerInstanceARN", arg0) +} + +// SetContainerInstanceARN indicates an expected call of SetContainerInstanceARN +func (mr *MockManagerMockRecorder) SetContainerInstanceARN(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetContainerInstanceARN", reflect.TypeOf((*MockManager)(nil).SetContainerInstanceARN), arg0) +} + +// SetHostPrivateIPv4Address mocks base method +func (m *MockManager) SetHostPrivateIPv4Address(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetHostPrivateIPv4Address", arg0) +} + +// SetHostPrivateIPv4Address indicates an expected call of SetHostPrivateIPv4Address +func (mr *MockManagerMockRecorder) SetHostPrivateIPv4Address(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHostPrivateIPv4Address", reflect.TypeOf((*MockManager)(nil).SetHostPrivateIPv4Address), arg0) +} + +// SetHostPublicIPv4Address mocks base method +func (m *MockManager) SetHostPublicIPv4Address(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetHostPublicIPv4Address", arg0) +} + +// SetHostPublicIPv4Address indicates an expected call of SetHostPublicIPv4Address +func (mr *MockManagerMockRecorder) SetHostPublicIPv4Address(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHostPublicIPv4Address", reflect.TypeOf((*MockManager)(nil).SetHostPublicIPv4Address), arg0) +} + +// Update mocks base method +func (m *MockManager) Update(arg0 context.Context, arg1 string, arg2 *task.Task, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update +func (mr *MockManagerMockRecorder) Update(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockManager)(nil).Update), arg0, arg1, arg2, arg3) +} + +// MockDockerMetadataClient is a mock of DockerMetadataClient interface +type MockDockerMetadataClient struct { + ctrl *gomock.Controller + recorder *MockDockerMetadataClientMockRecorder +} + +// MockDockerMetadataClientMockRecorder is the mock recorder for MockDockerMetadataClient +type MockDockerMetadataClientMockRecorder struct { + mock *MockDockerMetadataClient +} + +// NewMockDockerMetadataClient creates a new mock instance +func NewMockDockerMetadataClient(ctrl *gomock.Controller) *MockDockerMetadataClient { + mock := &MockDockerMetadataClient{ctrl: ctrl} + mock.recorder = &MockDockerMetadataClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockDockerMetadataClient) EXPECT() *MockDockerMetadataClientMockRecorder { + return m.recorder +} + +// InspectContainer mocks base method +func (m *MockDockerMetadataClient) InspectContainer(arg0 context.Context, arg1 string, arg2 time.Duration) (*types.ContainerJSON, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InspectContainer", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ContainerJSON) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InspectContainer indicates an expected call of InspectContainer +func (mr *MockDockerMetadataClientMockRecorder) InspectContainer(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InspectContainer", reflect.TypeOf((*MockDockerMetadataClient)(nil).InspectContainer), arg0, arg1, arg2) +} diff --git a/agent/containermetadata/parse_metadata.go b/agent/containermetadata/parse_metadata.go new file mode 100644 index 00000000000..96d45148727 --- /dev/null +++ b/agent/containermetadata/parse_metadata.go @@ -0,0 +1,162 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "fmt" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" +) + +// parseMetadataAtContainerCreate gathers metadata from task and cluster configurations +// then packages it for JSON Marshaling. We use this version to get data +// available prior to container creation +// Since we accept incomplete metadata fields, we should not return +// errors here and handle them at this or the above stage. +func (manager *metadataManager) parseMetadataAtContainerCreate(task *apitask.Task, containerName string) Metadata { + return Metadata{ + cluster: manager.cluster, + taskMetadata: TaskMetadata{ + containerName: containerName, + taskARN: task.Arn, + taskDefinitionFamily: task.Family, + taskDefinitionRevision: task.Version, + }, + containerInstanceARN: manager.containerInstanceARN, + metadataStatus: MetadataInitial, + availabilityZone: manager.availabilityZone, + hostPrivateIPv4Address: manager.hostPrivateIPv4Address, + hostPublicIPv4Address: manager.hostPublicIPv4Address, + } +} + +// parseMetadata gathers metadata from a docker container, and task +// configuration and data then packages it for JSON Marshaling +// Since we accept incomplete metadata fields, we should not return +// errors here and handle them at this or the above stage. +func (manager *metadataManager) parseMetadata(dockerContainer *types.ContainerJSON, task *apitask.Task, containerName string) Metadata { + dockerMD := parseDockerContainerMetadata(task.Arn, containerName, dockerContainer) + return Metadata{ + cluster: manager.cluster, + taskMetadata: TaskMetadata{ + containerName: containerName, + taskARN: task.Arn, + taskDefinitionFamily: task.Family, + taskDefinitionRevision: task.Version, + }, + dockerContainerMetadata: dockerMD, + containerInstanceARN: manager.containerInstanceARN, + metadataStatus: MetadataReady, + availabilityZone: manager.availabilityZone, + hostPrivateIPv4Address: manager.hostPrivateIPv4Address, + hostPublicIPv4Address: manager.hostPublicIPv4Address, + } +} + +// parseDockerContainerMetadata parses the metadata in a docker container +// and packages this data for JSON marshaling +// Since we accept incomplete metadata fields, we should not return +// errors here and handle them at this stage. +func parseDockerContainerMetadata(taskARN string, containerName string, dockerContainer *types.ContainerJSON) DockerContainerMetadata { + if dockerContainer == nil { + seelog.Warnf("Failed to parse container metadata for task %s container %s: container metadata not available or does not exist", taskARN, containerName) + return DockerContainerMetadata{} + } + + // In most cases a container should never lack a config but we check regardless to avoid + // nil pointer exceptions (Could occur if there is some error in the docker api call, if the + // container we receive has incomplete information) + imageNameFromConfig := "" + if dockerContainer.Config != nil { + imageNameFromConfig = dockerContainer.Config.Image + } else { + seelog.Warnf("Failed to parse container metadata for task %s container %s: container has no configuration", taskARN, containerName) + } + + if dockerContainer.ContainerJSONBase == nil { + seelog.Warnf("Failed to parse container metadata for task %s container %s: container has no host configuration", taskARN, containerName) + return DockerContainerMetadata{ + imageName: imageNameFromConfig, + } + } + networkMetadata, err := parseNetworkMetadata(dockerContainer.NetworkSettings, dockerContainer.HostConfig) + + if err != nil { + seelog.Warnf("Failed to parse container metadata for task %s container %s: %v", taskARN, containerName, err) + } + + // Get Port bindings from NetworkSettings + var ports []apicontainer.PortBinding + ports, err = apicontainer.PortBindingFromDockerPortBinding(dockerContainer.NetworkSettings.Ports) + if err != nil { + seelog.Warnf("Failed to parse container metadata for task %s container %s: %v", taskARN, containerName, err) + } + + return DockerContainerMetadata{ + containerID: dockerContainer.ID, + dockerContainerName: dockerContainer.Name, + imageID: dockerContainer.Image, + imageName: imageNameFromConfig, + ports: ports, + networkInfo: networkMetadata, + } +} + +// parseNetworkMetadata parses the docker.NetworkSettings struct and +// packages the desired metadata for JSON marshaling +// Since we accept incomplete metadata fields, we should not return +// errors here and handle them at this stage. +func parseNetworkMetadata(settings *types.NetworkSettings, hostConfig *dockercontainer.HostConfig) (NetworkMetadata, error) { + // Network settings and Host configuration should not be missing except due to errors + if settings == nil { + err := fmt.Errorf("parse network metadata: could not find network settings") + return NetworkMetadata{}, err + } + + if hostConfig == nil { + err := fmt.Errorf("parse network metadata: could not find host configuration") + return NetworkMetadata{}, err + } + + // This metadata is the information provided in older versions of the API + // We get the NetworkMode (Network interface name) from the HostConfig because this + // this is the network with which the container is created + ipv4AddressFromSettings := settings.IPAddress + networkModeFromHostConfig := string(hostConfig.NetworkMode) + + // Extensive Network information is not available for Docker API versions 1.17-1.20 + // Instead we only get the details of the first network + networkList := make([]Network, 0) + if len(settings.Networks) > 0 { + for modeFromSettings, containerNetwork := range settings.Networks { + networkMode := modeFromSettings + ipv4Addresses := []string{containerNetwork.IPAddress} + network := Network{NetworkMode: networkMode, IPv4Addresses: ipv4Addresses} + networkList = append(networkList, network) + } + } else { + ipv4Addresses := []string{ipv4AddressFromSettings} + network := Network{NetworkMode: networkModeFromHostConfig, IPv4Addresses: ipv4Addresses} + networkList = append(networkList, network) + } + + return NetworkMetadata{ + networks: networkList, + }, nil +} diff --git a/agent/containermetadata/parse_metadata_test.go b/agent/containermetadata/parse_metadata_test.go new file mode 100644 index 00000000000..d0b9e278ec6 --- /dev/null +++ b/agent/containermetadata/parse_metadata_test.go @@ -0,0 +1,385 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "testing" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/assert" +) + +const ( + cluster = "us-west2" +) + +// TestParseContainerCreate checks case when parsing is done at metadata creation +func TestParseContainerCreate(t *testing.T) { + mockTaskARN := validTaskARN + mockTaskDefinitionFamily := taskDefinitionFamily + mockTaskDefinitionRevision := taskDefinitionRevision + mockTask := &apitask.Task{Arn: mockTaskARN, Family: mockTaskDefinitionFamily, Version: mockTaskDefinitionRevision} + mockContainerName := containerName + mockCluster := cluster + mockContainerInstanceARN := containerInstanceARN + mockAvailabilityZone := availabilityZone + mockHostPrivateIPv4Address := hostPrivateIPv4Address + mockHostPublicIPv4Address := hostPublicIPv4Address + + expectedStatus := string(MetadataInitial) + + newManager := &metadataManager{ + cluster: mockCluster, + containerInstanceARN: mockContainerInstanceARN, + availabilityZone: mockAvailabilityZone, + hostPrivateIPv4Address: mockHostPrivateIPv4Address, + hostPublicIPv4Address: mockHostPublicIPv4Address, + } + + metadata := newManager.parseMetadataAtContainerCreate(mockTask, mockContainerName) + assert.Equal(t, metadata.cluster, mockCluster, "Expected cluster "+mockCluster) + assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN) + assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN) + assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone) + assert.Equal(t, metadata.hostPrivateIPv4Address, mockHostPrivateIPv4Address, "Expected hostPrivateIPv4Address "+hostPrivateIPv4Address) + assert.Equal(t, metadata.hostPublicIPv4Address, mockHostPublicIPv4Address, "Expected hostPublicIPv4Address "+hostPublicIPv4Address) + assert.Equal(t, metadata.taskMetadata.taskDefinitionFamily, mockTaskDefinitionFamily, "Expected task definition family "+mockTaskDefinitionFamily) + assert.Equal(t, metadata.taskMetadata.taskDefinitionRevision, mockTaskDefinitionRevision, "Expected task definition revision "+mockTaskDefinitionRevision) + assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus) +} + +func TestParseHasNoContainer(t *testing.T) { + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockCluster := cluster + mockContainerInstanceARN := containerInstanceARN + mockAvailabilityZone := availabilityZone + mockHostPrivateIPv4Address := hostPrivateIPv4Address + mockHostPublicIPv4Address := hostPublicIPv4Address + + expectedStatus := string(MetadataReady) + + newManager := &metadataManager{ + cluster: mockCluster, + containerInstanceARN: mockContainerInstanceARN, + availabilityZone: mockAvailabilityZone, + hostPrivateIPv4Address: mockHostPrivateIPv4Address, + hostPublicIPv4Address: mockHostPublicIPv4Address, + } + + metadata := newManager.parseMetadata(nil, mockTask, mockContainerName) + assert.Equal(t, metadata.cluster, mockCluster, "Expected cluster "+mockCluster) + assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN) + assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN) + assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone) + assert.Equal(t, metadata.hostPrivateIPv4Address, mockHostPrivateIPv4Address, "Expected hostPrivateIPv4Address "+hostPrivateIPv4Address) + assert.Equal(t, metadata.hostPublicIPv4Address, mockHostPublicIPv4Address, "Expected hostPublicIPv4Address "+hostPublicIPv4Address) + assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus) + assert.Equal(t, metadata.dockerContainerMetadata.containerID, "", "Expected empty container metadata") + assert.Equal(t, metadata.dockerContainerMetadata.dockerContainerName, "", "Expected empty container metadata") + assert.Equal(t, metadata.dockerContainerMetadata.imageID, "", "Expected empty container metadata") + assert.Equal(t, metadata.dockerContainerMetadata.imageName, "", "Expected empty container metadata") +} + +func TestParseHasConfig(t *testing.T) { + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockCluster := cluster + mockContainerInstanceARN := containerInstanceARN + mockAvailabilityZone := availabilityZone + mockHostPrivateIPv4Address := hostPrivateIPv4Address + mockHostPublicIPv4Address := hostPublicIPv4Address + + mockConfig := &dockercontainer.Config{Image: "image"} + + mockNetworks := map[string]*network.EndpointSettings{} + mockNetworkSettings := &types.NetworkSettings{Networks: mockNetworks} + + mockContainer := &types.ContainerJSON{ + Config: mockConfig, + NetworkSettings: mockNetworkSettings, + } + + expectedStatus := string(MetadataReady) + + newManager := &metadataManager{ + cluster: mockCluster, + containerInstanceARN: mockContainerInstanceARN, + availabilityZone: mockAvailabilityZone, + hostPrivateIPv4Address: mockHostPrivateIPv4Address, + hostPublicIPv4Address: mockHostPublicIPv4Address, + } + + metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName) + + assert.Equal(t, metadata.cluster, mockCluster, "Expected cluster "+mockCluster) + assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN) + assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN) + assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone) + assert.Equal(t, metadata.hostPrivateIPv4Address, mockHostPrivateIPv4Address, "Expected hostPrivateIPv4Address "+hostPrivateIPv4Address) + assert.Equal(t, metadata.hostPublicIPv4Address, mockHostPublicIPv4Address, "Expected hostPublicIPv4Address "+hostPublicIPv4Address) + assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus) + assert.Equal(t, metadata.dockerContainerMetadata.imageName, "image", "Expected nonempty imageID") +} + +func TestParseHasNetworkSettingsPortBindings(t *testing.T) { + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockCluster := cluster + mockContainerInstanceARN := containerInstanceARN + mockAvailabilityZone := availabilityZone + mockHostPrivateIPv4Address := hostPrivateIPv4Address + mockHostPublicIPv4Address := hostPublicIPv4Address + + mockPorts := nat.PortMap{} + mockPortBinding := make([]nat.PortBinding, 0) + mockPortBinding = append(mockPortBinding, nat.PortBinding{HostIP: "0.0.0.0", HostPort: "8080"}) + mockPorts["80/tcp"] = mockPortBinding + + mockHostConfig := &dockercontainer.HostConfig{NetworkMode: "bridge"} + mockNetworks := map[string]*network.EndpointSettings{} + mockNetworks["bridge"] = &network.EndpointSettings{} + mockNetworks["network0"] = &network.EndpointSettings{} + mockNetworkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Ports: mockPorts, + }, + Networks: mockNetworks, + } + mockContainer := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + HostConfig: mockHostConfig, + }, + NetworkSettings: mockNetworkSettings, + } + + expectedStatus := string(MetadataReady) + + newManager := &metadataManager{ + cluster: mockCluster, + containerInstanceARN: mockContainerInstanceARN, + availabilityZone: mockAvailabilityZone, + hostPrivateIPv4Address: mockHostPrivateIPv4Address, + hostPublicIPv4Address: mockHostPublicIPv4Address, + } + + metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName) + assert.Equal(t, metadata.cluster, mockCluster, "Expected cluster "+mockCluster) + assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN) + assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN) + assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone) + assert.Equal(t, metadata.hostPrivateIPv4Address, mockHostPrivateIPv4Address, "Expected hostPrivateIPv4Address "+hostPrivateIPv4Address) + assert.Equal(t, metadata.hostPublicIPv4Address, mockHostPublicIPv4Address, "Expected hostPublicIPv4Address "+hostPublicIPv4Address) + assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus) + assert.Equal(t, len(metadata.dockerContainerMetadata.networkInfo.networks), 2, "Expected two networks") + + assert.Equal(t, len(metadata.dockerContainerMetadata.ports), 1, "Expected nonempty list of ports") + assert.Equal(t, uint16(80), metadata.dockerContainerMetadata.ports[0].ContainerPort, "Expected nonempty ContainerPort field") + assert.Equal(t, uint16(8080), metadata.dockerContainerMetadata.ports[0].HostPort, "Expected nonempty HostPort field") + assert.Equal(t, "0.0.0.0", metadata.dockerContainerMetadata.ports[0].BindIP, "Expected nonempty HostIP field") +} + +func TestParseHasNetworkSettingsNetworksEmpty(t *testing.T) { + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockCluster := cluster + mockContainerInstanceARN := containerInstanceARN + mockAvailabilityZone := availabilityZone + mockHostPrivateIPv4Address := hostPrivateIPv4Address + mockHostPublicIPv4Address := hostPublicIPv4Address + + mockHostConfig := &dockercontainer.HostConfig{NetworkMode: "bridge"} + mockNetworkSettings := &types.NetworkSettings{ + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: "0.0.0.0", + }} + mockContainer := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + HostConfig: mockHostConfig, + }, + NetworkSettings: mockNetworkSettings, + } + + expectedStatus := string(MetadataReady) + + newManager := &metadataManager{ + cluster: mockCluster, + containerInstanceARN: mockContainerInstanceARN, + availabilityZone: mockAvailabilityZone, + hostPrivateIPv4Address: mockHostPrivateIPv4Address, + hostPublicIPv4Address: mockHostPublicIPv4Address, + } + + metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName) + assert.Equal(t, metadata.cluster, mockCluster, "Expected cluster "+mockCluster) + assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN) + assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN) + assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone) + assert.Equal(t, metadata.hostPrivateIPv4Address, mockHostPrivateIPv4Address, "Expected hostPrivateIPv4Address "+hostPrivateIPv4Address) + assert.Equal(t, metadata.hostPublicIPv4Address, mockHostPublicIPv4Address, "Expected hostPublicIPv4Address "+hostPublicIPv4Address) + assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus) + assert.Equal(t, len(metadata.dockerContainerMetadata.networkInfo.networks), 1, "Expected one network") +} + +func TestParseHasNetworkSettingsNetworksNonEmpty(t *testing.T) { + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockCluster := cluster + mockContainerInstanceARN := containerInstanceARN + mockAvailabilityZone := availabilityZone + mockHostPrivateIPv4Address := hostPrivateIPv4Address + mockHostPublicIPv4Address := hostPublicIPv4Address + + mockHostConfig := &dockercontainer.HostConfig{NetworkMode: dockercontainer.NetworkMode("bridge")} + mockNetworks := map[string]*network.EndpointSettings{} + mockNetworks["bridge"] = &network.EndpointSettings{} + mockNetworks["network0"] = &network.EndpointSettings{} + mockNetworkSettings := &types.NetworkSettings{ + Networks: mockNetworks, + } + mockContainer := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + HostConfig: mockHostConfig, + }, + NetworkSettings: mockNetworkSettings, + } + + expectedStatus := string(MetadataReady) + + newManager := &metadataManager{ + cluster: mockCluster, + containerInstanceARN: mockContainerInstanceARN, + availabilityZone: mockAvailabilityZone, + hostPrivateIPv4Address: mockHostPrivateIPv4Address, + hostPublicIPv4Address: mockHostPublicIPv4Address, + } + + metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName) + assert.Equal(t, metadata.cluster, mockCluster, "Expected cluster "+mockCluster) + assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN) + assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN) + assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected AvailabilityZone"+mockAvailabilityZone) + assert.Equal(t, metadata.hostPrivateIPv4Address, mockHostPrivateIPv4Address, "Expected hostPrivateIPv4Address "+hostPrivateIPv4Address) + assert.Equal(t, metadata.hostPublicIPv4Address, mockHostPublicIPv4Address, "Expected hostPublicIPv4Address "+hostPublicIPv4Address) + assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus) + assert.Equal(t, len(metadata.dockerContainerMetadata.networkInfo.networks), 2, "Expected two networks") +} + +func TestParseHasNoContainerJSONBase(t *testing.T) { + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockCluster := cluster + mockContainerInstanceARN := containerInstanceARN + + mockConfig := &dockercontainer.Config{Image: "image"} + mockNetworkSettings := &types.NetworkSettings{ + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: "0.0.0.0", + }} + mockContainer := &types.ContainerJSON{ + NetworkSettings: mockNetworkSettings, + Config: mockConfig, + } + + expectedStatus := string(MetadataReady) + + newManager := &metadataManager{ + cluster: mockCluster, + containerInstanceARN: mockContainerInstanceARN, + } + + metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName) + assert.Equal(t, metadata.cluster, mockCluster, "Expected cluster "+mockCluster) + assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN) + assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN) + assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus) + assert.Equal(t, len(metadata.dockerContainerMetadata.networkInfo.networks), 0, "Expected one network") + assert.Equal(t, metadata.dockerContainerMetadata.imageName, "image") +} + +func TestParseTaskDefinitionSettings(t *testing.T) { + mockTaskARN := validTaskARN + mockTask := &apitask.Task{Arn: mockTaskARN} + mockContainerName := containerName + mockCluster := cluster + mockContainerInstanceARN := containerInstanceARN + mockAvailabilityZone := availabilityZone + mockHostPrivateIPv4Address := hostPrivateIPv4Address + mockHostPublicIPv4Address := hostPublicIPv4Address + + mockHostConfig := &dockercontainer.HostConfig{NetworkMode: dockercontainer.NetworkMode("bridge")} + mockConfig := &dockercontainer.Config{Image: "image"} + mockNetworkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + LinkLocalIPv6Address: "0.0.0.0", + }, + } + mockContainer := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + HostConfig: mockHostConfig, + }, + Config: mockConfig, + NetworkSettings: mockNetworkSettings, + } + + expectedStatus := string(MetadataReady) + + newManager := &metadataManager{ + cluster: mockCluster, + containerInstanceARN: mockContainerInstanceARN, + availabilityZone: mockAvailabilityZone, + hostPrivateIPv4Address: mockHostPrivateIPv4Address, + hostPublicIPv4Address: mockHostPublicIPv4Address, + } + + metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName) + assert.Equal(t, metadata.cluster, mockCluster, "Expected cluster "+mockCluster) + assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN) + assert.Equal(t, metadata.taskMetadata.taskDefinitionFamily, "", "Expected no task definition family") + assert.Equal(t, metadata.taskMetadata.taskDefinitionRevision, "", "Expected no task definition revision") + assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN) + assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone) + assert.Equal(t, metadata.hostPrivateIPv4Address, mockHostPrivateIPv4Address, "Expected hostPrivateIPv4Address "+hostPrivateIPv4Address) + assert.Equal(t, metadata.hostPublicIPv4Address, mockHostPublicIPv4Address, "Expected hostPublicIPv4Address "+hostPublicIPv4Address) + assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus) + + // now add the task definition details + mockTaskDefinitionFamily := taskDefinitionFamily + mockTaskDefinitionRevision := taskDefinitionRevision + mockTask = &apitask.Task{Arn: mockTaskARN, Family: mockTaskDefinitionFamily, Version: mockTaskDefinitionRevision} + metadata = newManager.parseMetadata(nil, mockTask, mockContainerName) + assert.Equal(t, metadata.taskMetadata.taskDefinitionFamily, mockTaskDefinitionFamily, "Expected task definition family "+mockTaskDefinitionFamily) + assert.Equal(t, metadata.taskMetadata.taskDefinitionRevision, mockTaskDefinitionRevision, "Expected task definition revision "+mockTaskDefinitionRevision) +} diff --git a/agent/containermetadata/types.go b/agent/containermetadata/types.go new file mode 100644 index 00000000000..e3276a52c10 --- /dev/null +++ b/agent/containermetadata/types.go @@ -0,0 +1,177 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "context" + "encoding/json" + "fmt" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/docker/docker/api/types" +) + +const ( + // MetadataInitial is the initial state of the metadata file which + // contains metadata provided by the ECS Agent + MetadataInitialText = "INITIAL" + // MetadataReady is the final state of the metadata file which indicates + // it has acquired all the data it needs (Currently from the Agent and Docker) + MetadataReadyText = "READY" +) + +const ( + MetadataInitial MetadataStatus = iota + MetadataReady +) + +// MetadataStatus specifies the current update status of the metadata file. +// The purpose of this status is for users to check if the metadata file has +// reached the stage they need before they read the rest of the file to avoid +// race conditions (Since the final stage will need to be after the container +// starts up +// In the future the metadata may require multiple stages of update and these +// statuses should amended/appended accordingly. +type MetadataStatus int32 + +func (status MetadataStatus) MarshalText() (text []byte, err error) { + switch status { + case MetadataInitial: + text = []byte(MetadataInitialText) + case MetadataReady: + text = []byte(MetadataReadyText) + default: + return nil, fmt.Errorf("failed marshalling MetadataStatus %v", status) + } + return +} + +func (status *MetadataStatus) UnmarshalText(text []byte) error { + t := string(text) + switch t { + case MetadataInitialText: + *status = MetadataInitial + case MetadataReadyText: + *status = MetadataReady + default: + return fmt.Errorf("failed unmarshalling MetadataStatus %s", text) + } + return nil +} + +// DockerMetadataClient is a wrapper for the docker interface functions we need +// We use this as a dummy type to be able to pass in dockerapi.DockerClient to +// our functions without creating import cycles +// We make it exported because we need to use it for testing (Using the MockDockerClient +// in engine package leads to import cycles) +// The problems described above are indications dockerapi.DockerClient needs to be moved +// outside the engine package +type DockerMetadataClient interface { + InspectContainer(context.Context, string, time.Duration) (*types.ContainerJSON, error) +} + +// Network is a struct that keeps track of metadata of a network interface +type Network struct { + NetworkMode string `json:"NetworkMode,omitempty"` + IPv4Addresses []string `json:"IPv4Addresses,omitempty"` + IPv6Addresses []string `json:"IPv6Addresses,omitempty"` +} + +// NetworkMetadata keeps track of the data we parse from the Network Settings +// in docker containers. While most information is redundant with the internal +// Network struct, we keeps this wrapper in case we wish to add data specifically +// from the NetworkSettings +type NetworkMetadata struct { + networks []Network +} + +// DockerContainerMetadata keeps track of all metadata acquired from Docker inspection +// Has redundancies with dockerapi.DockerContainerMetadata but this packages all +// docker metadata we want in the service so we can change features easily +type DockerContainerMetadata struct { + containerID string + dockerContainerName string + imageID string + imageName string + ports []apicontainer.PortBinding + networkInfo NetworkMetadata +} + +// TaskMetadata keeps track of all metadata associated with a task +// provided by AWS, does not depend on the creation of the container +type TaskMetadata struct { + containerName string + taskARN string + taskDefinitionFamily string + taskDefinitionRevision string +} + +// Metadata packages all acquired metadata and is used to format it +// into JSON to write to the metadata file. We have it flattened, rather +// than simply containing the previous three structures to simplify JSON +// parsing and avoid exposing those structs in the final metadata file. +type Metadata struct { + cluster string + taskMetadata TaskMetadata + dockerContainerMetadata DockerContainerMetadata + containerInstanceARN string + metadataStatus MetadataStatus + availabilityZone string + hostPrivateIPv4Address string + hostPublicIPv4Address string +} + +// metadataSerializer is an intermediate struct that converts the information +// in Metadata into information to encode into JSON +type metadataSerializer struct { + Cluster string `json:"Cluster,omitempty"` + ContainerInstanceARN string `json:"ContainerInstanceARN,omitempty"` + TaskARN string `json:"TaskARN,omitempty"` + TaskDefinitionFamily string `json:"TaskDefinitionFamily,omitempty"` + TaskDefinitionRevision string `json:"TaskDefinitionRevision,omitempty"` + ContainerID string `json:"ContainerID,omitempty"` + ContainerName string `json:"ContainerName,omitempty"` + DockerContainerName string `json:"DockerContainerName,omitempty"` + ImageID string `json:"ImageID,omitempty"` + ImageName string `json:"ImageName,omitempty"` + Ports []apicontainer.PortBinding `json:"PortMappings,omitempty"` + Networks []Network `json:"Networks,omitempty"` + MetadataFileStatus MetadataStatus `json:"MetadataFileStatus,omitempty"` + AvailabilityZone string `json:"AvailabilityZone,omitempty"` + HostPrivateIPv4Address string `json:"HostPrivateIPv4Address,omitempty"` + HostPublicIPv4Address string `json:"HostPublicIPv4Address,omitempty"` +} + +func (m Metadata) MarshalJSON() ([]byte, error) { + return json.Marshal( + metadataSerializer{ + Cluster: m.cluster, + ContainerInstanceARN: m.containerInstanceARN, + TaskARN: m.taskMetadata.taskARN, + TaskDefinitionFamily: m.taskMetadata.taskDefinitionFamily, + TaskDefinitionRevision: m.taskMetadata.taskDefinitionRevision, + ContainerID: m.dockerContainerMetadata.containerID, + ContainerName: m.taskMetadata.containerName, + DockerContainerName: m.dockerContainerMetadata.dockerContainerName, + ImageID: m.dockerContainerMetadata.imageID, + ImageName: m.dockerContainerMetadata.imageName, + Ports: m.dockerContainerMetadata.ports, + Networks: m.dockerContainerMetadata.networkInfo.networks, + MetadataFileStatus: m.metadataStatus, + AvailabilityZone: m.availabilityZone, + HostPrivateIPv4Address: m.hostPrivateIPv4Address, + HostPublicIPv4Address: m.hostPublicIPv4Address, + }) +} diff --git a/agent/containermetadata/types_test.go b/agent/containermetadata/types_test.go new file mode 100644 index 00000000000..7e94fde5e7c --- /dev/null +++ b/agent/containermetadata/types_test.go @@ -0,0 +1,65 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnMarshalText(t *testing.T) { + + var nilMs MetadataStatus + + testCases := []struct { + param []byte + expectedReturn MetadataStatus + name string + }{ + {[]byte(""), nilMs, "unmarshal MetadataStatus from empty string"}, + {[]byte("Malformed"), nilMs, "unmarshal MetadataStatus from malformed string"}, + {[]byte(MetadataInitialText), MetadataInitial, "unmarshal MetadataInitial from string"}, + {[]byte(MetadataReadyText), MetadataReady, "unmarshal MetadataReady from string"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var ms MetadataStatus + ms.UnmarshalText(tc.param) + assert.Equal(t, tc.expectedReturn, ms, tc.name) + }) + } +} + +func TestMarshalText(t *testing.T) { + + testCases := []struct { + param MetadataStatus + expectedReturn []byte + name string + }{ + {MetadataInitial, []byte(MetadataInitialText), "marshal MetadataInitial to text"}, + {MetadataReady, []byte(MetadataReadyText), "marshal MetadataReady to text"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ret, _ := tc.param.MarshalText() + assert.Equal(t, tc.expectedReturn, ret, tc.name) + }) + } +} diff --git a/agent/containermetadata/utils.go b/agent/containermetadata/utils.go new file mode 100644 index 00000000000..4fe6170ff88 --- /dev/null +++ b/agent/containermetadata/utils.go @@ -0,0 +1,64 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "fmt" + "path/filepath" + "strings" +) + +const ( + metadataJoinSuffix = "metadata" +) + +// getTaskIDfromARN parses a task ARN and produces the task ID +// A task ARN has format arn:aws:ecs:[region]:[account-id]:task/[task-id] +// For a correctly formatted ARN we split it over ":" into 6 parts, the last part +// containing the task-id which we extract by splitting it by "/". +// This function should be eventually be replaced by a standardized ARN parsing +// module +func getTaskIDfromARN(taskARN string) (string, error) { + colonSplitARN := strings.SplitN(taskARN, ":", 6) + // Incorrectly formatted ARN (Should not happen) + if len(colonSplitARN) < 6 { + return "", fmt.Errorf("get task ARN: invalid TaskARN %s", taskARN) + } + arnTaskPartSplit := strings.SplitN(colonSplitARN[5], "/", 2) + // Incorrectly formatted ARN (Should not happen) + if len(arnTaskPartSplit) < 2 { + return "", fmt.Errorf("get task ARN: cannot find TaskID for TaskARN %s", taskARN) + } + return arnTaskPartSplit[1], nil +} + +// getMetadataFilePath gives the metadata file path for any agent-managed container +func getMetadataFilePath(taskARN string, containerName string, dataDir string) (string, error) { + taskID, err := getTaskIDfromARN(taskARN) + // Empty task ID indicates malformed ARN (Should not happen) + if err != nil { + return "", fmt.Errorf("get metdata file path of task %s container %s: %v", taskARN, containerName, err) + } + return filepath.Join(dataDir, metadataJoinSuffix, taskID, containerName), nil +} + +// getTaskMetadataDir acquires the directory with all of the metadata +// files of a given task +func getTaskMetadataDir(taskARN string, dataDir string) (string, error) { + taskID, err := getTaskIDfromARN(taskARN) + if err != nil { + return "", fmt.Errorf("get task metadata directory: %v", err) + } + return filepath.Join(dataDir, metadataJoinSuffix, taskID), err +} diff --git a/agent/containermetadata/utils_test.go b/agent/containermetadata/utils_test.go new file mode 100644 index 00000000000..dc8e6ae9635 --- /dev/null +++ b/agent/containermetadata/utils_test.go @@ -0,0 +1,85 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + invalidSplitTaskARN = "arn:aws:ecs:region:account-id:task" +) + +// TestGetTaskIDFailDueToInvalidID checks the case where task ARN has 6 parts but last section is invalid +func TestGetTaskIDFailDueToInvalidID(t *testing.T) { + mockTaskARN := invalidSplitTaskARN + + _, err := getTaskIDfromARN(mockTaskARN) + expectErrorMessage := fmt.Sprintf("get task ARN: cannot find TaskID for TaskARN %s", mockTaskARN) + + assert.Equal(t, expectErrorMessage, err.Error()) +} + +func TestGetMetadataFilePathFailDueToInvalidID(t *testing.T) { + mockTaskARN := invalidSplitTaskARN + mockContainerName := containerName + mockDataDir := dataDir + + _, err := getMetadataFilePath(mockTaskARN, mockContainerName, mockDataDir) + expectErrorMessage := fmt.Sprintf( + "get metdata file path of task %s container %s: get task ARN: cannot find TaskID for TaskARN %s", + mockTaskARN, mockContainerName, mockTaskARN) + + assert.Equal(t, expectErrorMessage, err.Error()) +} + +func TestGetMetadataFilePathSuccess(t *testing.T) { + mockTaskARN := validTaskARN + mockContainerName := containerName + mockDataDir := dataDir + + path, err := getMetadataFilePath(mockTaskARN, mockContainerName, mockDataDir) + expectedPath := filepath.Join(mockDataDir, metadataJoinSuffix, "task-id", mockContainerName) + + assert.Equal(t, expectedPath, path) + assert.NoError(t, err) +} + +func TestGetTaskMetadataDirFailDueToInvalidID(t *testing.T) { + mockTaskARN := invalidSplitTaskARN + mockDataDir := dataDir + + _, err := getTaskMetadataDir(mockTaskARN, mockDataDir) + expectErrorMessage := fmt.Sprintf( + "get task metadata directory: get task ARN: cannot find TaskID for TaskARN %s", mockTaskARN) + + assert.Equal(t, expectErrorMessage, err.Error()) +} + +func TestGetTaskMetadataDirSuccess(t *testing.T) { + mockTaskARN := validTaskARN + mockDataDir := dataDir + + path, err := getTaskMetadataDir(mockTaskARN, mockDataDir) + expectedPath := filepath.Join(mockDataDir, metadataJoinSuffix, "task-id") + + assert.Equal(t, expectedPath, path) + assert.NoError(t, err) +} diff --git a/agent/containermetadata/write_metadata_test.go b/agent/containermetadata/write_metadata_test.go new file mode 100644 index 00000000000..add59b5d900 --- /dev/null +++ b/agent/containermetadata/write_metadata_test.go @@ -0,0 +1,48 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "fmt" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func writeSetup(t *testing.T) (oswrapper.File, func()) { + ctrl := gomock.NewController(t) + mockFile := mock_oswrapper.NewMockFile() + return mockFile, ctrl.Finish +} + +// TestWriteInvalidARN checks case where task ARN passed in is invalid +func TestWriteInvalidARN(t *testing.T) { + _, done := writeSetup(t) + defer done() + + mockData := []byte("") + mockTaskARN := invalidTaskARN + mockContainerName := containerName + mockDataDir := dataDir + expectErrorMessage := fmt.Sprintf("write to metadata file for task %s container %s: get metdata file path of task %s container %s: get task ARN: invalid TaskARN %s", mockTaskARN, mockContainerName, mockTaskARN, mockContainerName, mockTaskARN) + + err := writeToMetadataFile(mockData, mockTaskARN, mockContainerName, mockDataDir) + assert.Equal(t, expectErrorMessage, err.Error()) +} diff --git a/agent/containermetadata/write_metadata_unix.go b/agent/containermetadata/write_metadata_unix.go new file mode 100644 index 00000000000..f3305ebfba1 --- /dev/null +++ b/agent/containermetadata/write_metadata_unix.go @@ -0,0 +1,90 @@ +// +build !windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + + "github.com/cihub/seelog" + "github.com/pborman/uuid" +) + +const ( + mountPoint = "/opt/ecs/metadata" + tempFile = "temp_metadata_file" + bindMode = "Z" + selinuxSecurityOption = "selinux" +) + +// createBindsEnv will do the appropriate formatting to add a new mount in a container's HostConfig +// and add the metadata file path as an environment variable ECS_CONTAINER_METADATA_FILE +// We add an additional uuid to the path to ensure it does not conflict with user mounts +func createBindsEnv(binds []string, env []string, dataDirOnHost string, metadataDirectoryPath string, dockerSecurityOptions []string) ([]string, []string) { + selinuxEnabled := false + for _, option := range dockerSecurityOptions { + if option == selinuxSecurityOption { + selinuxEnabled = true + } + } + + randID := uuid.New() + instanceBind := fmt.Sprintf(`%s/%s:%s/%s`, dataDirOnHost, metadataDirectoryPath, mountPoint, randID) + if selinuxEnabled { + seelog.Info("Selinux is enabled on docker, mounting data directory in Z mode") + instanceBind = fmt.Sprintf(`%s:%s`, instanceBind, bindMode) + } + metadataEnvVariable := fmt.Sprintf("%s=%s/%s/%s", metadataEnvironmentVariable, mountPoint, randID, metadataFile) + binds = append(binds, instanceBind) + env = append(env, metadataEnvVariable) + return binds, env +} + +var rename = os.Rename + +var TempFile = func(dir, pattern string) (oswrapper.File, error) { + return ioutil.TempFile(dir, pattern) +} + +// writeToMetadata puts the metadata into JSON format and writes into +// the metadata file +func writeToMetadataFile(data []byte, taskARN string, containerName string, dataDir string) error { + metadataFileDir, err := getMetadataFilePath(taskARN, containerName, dataDir) + // Boundary case if file path is bad (Such as if task arn is incorrectly formatted) + if err != nil { + return fmt.Errorf("write to metadata file for task %s container %s: %v", taskARN, containerName, err) + } + metadataFileName := filepath.Join(metadataFileDir, metadataFile) + + temp, err := TempFile(metadataFileDir, tempFile) + if err != nil { + return err + } + defer temp.Close() + _, err = temp.Write(data) + if err != nil { + return err + } + err = temp.Chmod(metadataPerm) + if err != nil { + return err + } + return rename(temp.Name(), metadataFileName) +} diff --git a/agent/containermetadata/write_metadata_unix_test.go b/agent/containermetadata/write_metadata_unix_test.go new file mode 100644 index 00000000000..2a06e1f2e04 --- /dev/null +++ b/agent/containermetadata/write_metadata_unix_test.go @@ -0,0 +1,148 @@ +// +build unit,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "errors" + "fmt" + "os" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" + + "github.com/stretchr/testify/assert" +) + +// TestWriteTempFileFail checks case where temp file cannot be made +func TestWriteTempFileFail(t *testing.T) { + _, done := writeSetup(t) + defer done() + + mockData := []byte("") + mockTaskARN := validTaskARN + mockContainerName := containerName + mockDataDir := dataDir + + oTempFile := TempFile + TempFile = func(dir, pattern string) (oswrapper.File, error) { + return nil, errors.New("temp file fail") + } + defer func() { + TempFile = oTempFile + }() + + err := writeToMetadataFile(mockData, mockTaskARN, mockContainerName, mockDataDir) + expectErrorMessage := "temp file fail" + + assert.Error(t, err) + assert.Equal(t, expectErrorMessage, err.Error()) +} + +// TestWriteFileWriteFail checks case where write to file fails +func TestWriteFileWriteFail(t *testing.T) { + mockFile, done := writeSetup(t) + defer done() + + mockData := []byte("") + mockTaskARN := validTaskARN + mockContainerName := containerName + mockDataDir := dataDir + + mockFile.(*mock_oswrapper.MockFile).WriteImpl = func(bytes []byte) (i int, e error) { + return 0, errors.New("write fail") + } + + oTempFile := TempFile + TempFile = func(dir, pattern string) (oswrapper.File, error) { + return mockFile, nil + } + defer func() { + TempFile = oTempFile + }() + + err := writeToMetadataFile(mockData, mockTaskARN, mockContainerName, mockDataDir) + expectErrorMessage := "write fail" + + assert.Error(t, err) + assert.Equal(t, expectErrorMessage, err.Error()) +} + +// TestWriteChmodFail checks case where chmod fails +func TestWriteChmodFail(t *testing.T) { + mockFile, done := writeSetup(t) + defer done() + + mockFile.(*mock_oswrapper.MockFile).ChmodImpl = func(os.FileMode) error { + return errors.New("chmod fail") + } + + oTempFile := TempFile + TempFile = func(dir, pattern string) (oswrapper.File, error) { + return mockFile, nil + } + defer func() { + TempFile = oTempFile + }() + + mockData := []byte("") + mockTaskARN := validTaskARN + mockContainerName := containerName + mockDataDir := dataDir + + err := writeToMetadataFile(mockData, mockTaskARN, mockContainerName, mockDataDir) + expectErrorMessage := "chmod fail" + + assert.Error(t, err) + assert.Equal(t, expectErrorMessage, err.Error()) +} + +func TestCreateBindEnv(t *testing.T) { + mockBinds := []string{} + mockEnv := []string{} + mockDataDirOnHost := "" + mockMetadataDirectoryPath := "" + expectedBindMode := fmt.Sprintf(`:%s`, bindMode) + + testcases := []struct { + name string + securityOptions []string + selinuxEnabled bool + }{ + { + name: "Selinux Enabled Bind Mode", + securityOptions: []string{"selinux"}, + selinuxEnabled: true, + }, + { + name: "Selinux Disabled Bind Mode", + securityOptions: []string{""}, + selinuxEnabled: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + binds, _ := createBindsEnv(mockBinds, mockEnv, mockDataDirOnHost, mockMetadataDirectoryPath, tc.securityOptions) + actualBindMode := binds[0][len(binds[0])-2:] + if tc.selinuxEnabled { + assert.Equal(t, expectedBindMode, actualBindMode) + } else { + assert.NotEqual(t, expectedBindMode, actualBindMode) + } + }) + } +} diff --git a/agent/containermetadata/write_metadata_windows.go b/agent/containermetadata/write_metadata_windows.go new file mode 100644 index 00000000000..5765cf42d74 --- /dev/null +++ b/agent/containermetadata/write_metadata_windows.go @@ -0,0 +1,68 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + + "github.com/pborman/uuid" +) + +const ( + mountPoint = `C:\ProgramData\Amazon\ECS\metadata` +) + +// createBindsEnv will do the appropriate formatting to add a new mount in a container's HostConfig +// and add the metadata file path as an environment variable ECS_CONTAINER_METADATA_FILE +func createBindsEnv(binds []string, env []string, dataDirOnHost string, metadataDirectoryPath string, dockerSecurityOptions []string) ([]string, []string) { + randID := uuid.New() + instanceBind := fmt.Sprintf(`%s:%s\%s`, metadataDirectoryPath, mountPoint, randID) + metadataEnvVariable := fmt.Sprintf(`%s=%s\%s\%s`, metadataEnvironmentVariable, mountPoint, randID, metadataFile) + binds = append(binds, instanceBind) + env = append(env, metadataEnvVariable) + return binds, env +} + +var openFile = func(name string, flag int, perm os.FileMode) (oswrapper.File, error) { + return os.OpenFile(name, flag, perm) +} + +// writeToMetadata puts the metadata into JSON format and writes into +// the metadata file +func writeToMetadataFile(data []byte, taskARN string, containerName string, dataDir string) error { + metadataFileDir, err := getMetadataFilePath(taskARN, containerName, dataDir) + // Boundary case if file path is bad (Such as if task arn is incorrectly formatted) + if err != nil { + return fmt.Errorf("write to metadata file for task %s container %s: %v", taskARN, containerName, err) + } + metadataFileName := filepath.Join(metadataFileDir, metadataFile) + + file, err := openFile(metadataFileName, os.O_WRONLY|os.O_CREATE, metadataPerm) + if err != nil { + return err + } + defer file.Close() + + _, err = file.Write(data) + if err != nil { + return err + } + return file.Sync() +} diff --git a/agent/containermetadata/write_metadata_windows_test.go b/agent/containermetadata/write_metadata_windows_test.go new file mode 100644 index 00000000000..973644ac461 --- /dev/null +++ b/agent/containermetadata/write_metadata_windows_test.go @@ -0,0 +1,83 @@ +// +build unit,windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package containermetadata + +import ( + "errors" + "os" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" + + "github.com/stretchr/testify/assert" +) + +// TestWriteOpenFileFail checks case where open file fails and does not return a NotExist error +func TestWriteOpenFileFail(t *testing.T) { + _, done := writeSetup(t) + defer done() + + mockData := []byte("") + mockTaskARN := validTaskARN + mockContainerName := containerName + mockDataDir := dataDir + mockOpenErr := errors.New("does exist") + + tempOpenFile := openFile + openFile = func(name string, flag int, perm os.FileMode) (oswrapper.File, error) { + return nil, mockOpenErr + } + defer func() { + openFile = tempOpenFile + }() + + err := writeToMetadataFile(mockData, mockTaskARN, mockContainerName, mockDataDir) + + expectErrorMessage := "does exist" + + assert.Error(t, err) + assert.Equal(t, expectErrorMessage, err.Error()) +} + +// TestWriteFileWrtieFail checks case where we fail to write to file +func TestWriteFileWriteFail(t *testing.T) { + mockFile, done := writeSetup(t) + defer done() + + mockData := []byte("") + mockTaskARN := validTaskARN + mockContainerName := containerName + mockDataDir := dataDir + + mockFile.(*mock_oswrapper.MockFile).WriteImpl = func(bytes []byte) (i int, e error) { + return 0, errors.New("write fail") + } + tempOpenFile := openFile + openFile = func(name string, flag int, perm os.FileMode) (oswrapper.File, error) { + return mockFile, nil + } + defer func() { + openFile = tempOpenFile + }() + + err := writeToMetadataFile(mockData, mockTaskARN, mockContainerName, mockDataDir) + + expectErrorMessage := "write fail" + + assert.Error(t, err) + assert.Equal(t, expectErrorMessage, err.Error()) +} diff --git a/agent/credentials/generate_mocks.go b/agent/credentials/generate_mocks.go new file mode 100644 index 00000000000..d7f43f9b37a --- /dev/null +++ b/agent/credentials/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentials + +//go:generate mockgen -destination=mocks/credentials_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/credentials Manager diff --git a/agent/credentials/interface.go b/agent/credentials/interface.go new file mode 100644 index 00000000000..a86a1be5f60 --- /dev/null +++ b/agent/credentials/interface.go @@ -0,0 +1,23 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentials + +// Manager is responsible for saving and retrieving credentials. A single +// instance of the credentials manager is created in the agent, and shared +// between the task engine, acs and credentials handlers +type Manager interface { + SetTaskCredentials(*TaskIAMRoleCredentials) error + GetTaskCredentials(string) (TaskIAMRoleCredentials, bool) + RemoveCredentials(string) +} diff --git a/agent/credentials/manager.go b/agent/credentials/manager.go new file mode 100644 index 00000000000..18ff5046cb4 --- /dev/null +++ b/agent/credentials/manager.go @@ -0,0 +1,164 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentials + +import ( + "fmt" + "sync" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/aws-sdk-go/aws" +) + +const ( + // CredentialsIDQueryParameterName is the name of GET query parameter for the task ID. + CredentialsIDQueryParameterName = "id" + + // CredentialsPath is the path to the credentials handler. + CredentialsPath = V2CredentialsPath + + V1CredentialsPath = "/v1/credentials" + V2CredentialsPath = "/v2/credentials" + + // credentialsEndpointRelativeURIFormat defines the relative URI format + // for the credentials endpoint. The place holders are the API Path and + // credentials ID + credentialsEndpointRelativeURIFormat = v2CredentialsEndpointRelativeURIFormat + + v1CredentialsEndpointRelativeURIFormat = "%s?" + CredentialsIDQueryParameterName + "=%s" + v2CredentialsEndpointRelativeURIFormat = "%s/%s" + + // ApplicationRoleType specifies the credentials that are to be used by the + // task itself + ApplicationRoleType = "TaskApplication" + + // ExecutionRoleType specifies the credentials used for non task application + // uses + ExecutionRoleType = "TaskExecution" +) + +// IAMRoleCredentials is used to save credentials sent by ACS +type IAMRoleCredentials struct { + CredentialsID string `json:"-"` + RoleArn string `json:"RoleArn"` + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string `json:"SecretAccessKey"` + SessionToken string `json:"Token"` + // Expiration is a string instead of a timestamp. This is to avoid any loss of context + // while marshalling/unmarshalling this field in the agent. The agent just echo's + // whatever is sent by the backend. + Expiration string `json:"Expiration"` + // RoleType distinguishes between TaskRole and ExecutionRole for the + // credentials that are sent from the backend + RoleType string `json:"-"` +} + +// TaskIAMRoleCredentials wraps the task arn and the credentials object for the same +type TaskIAMRoleCredentials struct { + ARN string + IAMRoleCredentials IAMRoleCredentials + lock sync.RWMutex +} + +// GetIAMRoleCredentials returns the IAM role credentials in the task IAM role struct +func (role *TaskIAMRoleCredentials) GetIAMRoleCredentials() IAMRoleCredentials { + role.lock.RLock() + defer role.lock.RUnlock() + + return role.IAMRoleCredentials +} + +// GenerateCredentialsEndpointRelativeURI generates the relative URI for the +// credentials endpoint, for a given task id. +func (roleCredentials *IAMRoleCredentials) GenerateCredentialsEndpointRelativeURI() string { + return fmt.Sprintf(credentialsEndpointRelativeURIFormat, CredentialsPath, roleCredentials.CredentialsID) +} + +// credentialsManager implements the Manager interface. It is used to +// save credentials sent from ACS and to retrieve credentials from +// the credentials endpoint +type credentialsManager struct { + // idToTaskCredentials maps credentials id to its corresponding TaskIAMRoleCredentials object + idToTaskCredentials map[string]TaskIAMRoleCredentials + taskCredentialsLock sync.RWMutex +} + +// IAMRoleCredentialsFromACS translates ecsacs.IAMRoleCredentials object to +// api.IAMRoleCredentials +func IAMRoleCredentialsFromACS(roleCredentials *ecsacs.IAMRoleCredentials, roleType string) IAMRoleCredentials { + return IAMRoleCredentials{ + CredentialsID: aws.StringValue(roleCredentials.CredentialsId), + SessionToken: aws.StringValue(roleCredentials.SessionToken), + RoleArn: aws.StringValue(roleCredentials.RoleArn), + AccessKeyID: aws.StringValue(roleCredentials.AccessKeyId), + SecretAccessKey: aws.StringValue(roleCredentials.SecretAccessKey), + Expiration: aws.StringValue(roleCredentials.Expiration), + RoleType: roleType, + } +} + +// NewManager creates a new credentials manager object +func NewManager() Manager { + return &credentialsManager{ + idToTaskCredentials: make(map[string]TaskIAMRoleCredentials), + } +} + +// SetTaskCredentials adds or updates credentials in the credentials manager +func (manager *credentialsManager) SetTaskCredentials(taskCredentials *TaskIAMRoleCredentials) error { + manager.taskCredentialsLock.Lock() + defer manager.taskCredentialsLock.Unlock() + + credentials := taskCredentials.IAMRoleCredentials + // Validate that credentials id is not empty + if credentials.CredentialsID == "" { + return fmt.Errorf("CredentialsId is empty") + } + + // Validate that task arn is not empty + if taskCredentials.ARN == "" { + return fmt.Errorf("task ARN is empty") + } + + manager.idToTaskCredentials[credentials.CredentialsID] = TaskIAMRoleCredentials{ + ARN: taskCredentials.ARN, + IAMRoleCredentials: taskCredentials.GetIAMRoleCredentials(), + } + + return nil +} + +// GetTaskCredentials retrieves credentials for a given credentials id +func (manager *credentialsManager) GetTaskCredentials(id string) (TaskIAMRoleCredentials, bool) { + manager.taskCredentialsLock.RLock() + defer manager.taskCredentialsLock.RUnlock() + + taskCredentials, ok := manager.idToTaskCredentials[id] + + if !ok { + return TaskIAMRoleCredentials{}, ok + } + return TaskIAMRoleCredentials{ + ARN: taskCredentials.ARN, + IAMRoleCredentials: taskCredentials.GetIAMRoleCredentials(), + }, ok +} + +// RemoveCredentials removes credentials from the credentials manager +func (manager *credentialsManager) RemoveCredentials(id string) { + manager.taskCredentialsLock.Lock() + defer manager.taskCredentialsLock.Unlock() + + delete(manager.idToTaskCredentials, id) +} diff --git a/agent/credentials/manager_test.go b/agent/credentials/manager_test.go new file mode 100644 index 00000000000..f184bf528f3 --- /dev/null +++ b/agent/credentials/manager_test.go @@ -0,0 +1,173 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentials + +import ( + "fmt" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +// TestIAMRoleCredentialsFromACS tests if credentials sent from ACS can be +// represented correctly as IAMRoleCredentials +func TestIAMRoleCredentialsFromACS(t *testing.T) { + acsCredentials := &ecsacs.IAMRoleCredentials{ + CredentialsId: aws.String("credsId"), + AccessKeyId: aws.String("keyId"), + Expiration: aws.String("soon"), + RoleArn: aws.String("roleArn"), + SecretAccessKey: aws.String("OhhSecret"), + SessionToken: aws.String("sessionToken"), + } + + roleType := "roleType" + + credentials := IAMRoleCredentialsFromACS(acsCredentials, roleType) + expectedCredentials := IAMRoleCredentials{ + CredentialsID: "credsId", + AccessKeyID: "keyId", + Expiration: "soon", + RoleArn: "roleArn", + SecretAccessKey: "OhhSecret", + SessionToken: "sessionToken", + RoleType: "roleType", + } + assert.Equal(t, credentials, expectedCredentials, "Mismatch between expected and constructed credentials") +} + +// TestGetTaskCredentialsUnknownId tests if GetTaskCredentials returns a false value +// when credentials for a given id are not be found in the engine +func TestGetTaskCredentialsUnknownId(t *testing.T) { + manager := NewManager() + _, ok := manager.GetTaskCredentials("id") + if ok { + t.Error("GetTaskCredentials should return false for non existing id") + } +} + +// TestSetTaskCredentialsEmptyTaskCredentials tests if credentials manager returns an +// error when invalid credentials are used to set credentials +func TestSetTaskCredentialsEmptyTaskCredentials(t *testing.T) { + manager := NewManager() + err := manager.SetTaskCredentials(&TaskIAMRoleCredentials{}) + assert.Error(t, err, "Expected error adding empty task credentials") +} + +// TestSetTaskCredentialsNoCredentialsId tests if credentials manager returns an +// error when credentials object with no credentials id is used to set credentials +func TestSetTaskCredentialsNoCredentialsId(t *testing.T) { + manager := NewManager() + err := manager.SetTaskCredentials(&TaskIAMRoleCredentials{ARN: "t1", IAMRoleCredentials: IAMRoleCredentials{}}) + assert.Error(t, err, "Expected error adding credentials payload without credential ID") +} + +// TestSetTaskCredentialsNoTaskArn tests if credentials manager returns an +// error when credentials object with no task arn used to set credentials +func TestSetTaskCredentialsNoTaskArn(t *testing.T) { + manager := NewManager() + err := manager.SetTaskCredentials(&TaskIAMRoleCredentials{IAMRoleCredentials: IAMRoleCredentials{CredentialsID: "id"}}) + assert.Error(t, err, "Expected error adding credentials payload without task ARN") +} + +// TestSetAndGetTaskCredentialsHappyPath tests the happy path workflow for setting +// and getting credentials +func TestSetAndGetTaskCredentialsHappyPath(t *testing.T) { + manager := NewManager() + credentials := TaskIAMRoleCredentials{ + ARN: "t1", + IAMRoleCredentials: IAMRoleCredentials{ + RoleArn: "r1", + AccessKeyID: "akid1", + SecretAccessKey: "skid1", + SessionToken: "stkn", + Expiration: "ts", + CredentialsID: "cid1", + }, + } + + err := manager.SetTaskCredentials(&credentials) + assert.NoError(t, err, "Error adding credentials") + + credentialsFromManager, ok := manager.GetTaskCredentials("cid1") + assert.True(t, ok, "GetTaskCredentials returned false for existing credentials") + assert.Equal(t, credentials, credentialsFromManager, "Mismatch between added and retrieved credentials") + + updatedCredentials := TaskIAMRoleCredentials{ + ARN: "t1", + IAMRoleCredentials: IAMRoleCredentials{ + RoleArn: "r1", + AccessKeyID: "akid2", + SecretAccessKey: "skid2", + SessionToken: "stkn2", + Expiration: "ts2", + CredentialsID: "cid1", + }, + } + err = manager.SetTaskCredentials(&updatedCredentials) + assert.NoError(t, err, "Error updating credentials") + credentialsFromManager, ok = manager.GetTaskCredentials("cid1") + + assert.True(t, ok, "GetTaskCredentials returned false for existing credentials") + assert.Equal(t, updatedCredentials, credentialsFromManager, "Mismatch between added and retrieved credentials") +} + +// TestGenerateCredentialsEndpointRelativeURI tests if the relative credentials endpoint +// URI is generated correctly +func TestGenerateCredentialsEndpointRelativeURI(t *testing.T) { + credentials := IAMRoleCredentials{ + RoleArn: "r1", + AccessKeyID: "akid1", + SecretAccessKey: "skid1", + SessionToken: "stkn", + Expiration: "ts", + CredentialsID: "cid1", + } + generatedURI := credentials.GenerateCredentialsEndpointRelativeURI() + expectedURI := fmt.Sprintf(credentialsEndpointRelativeURIFormat, CredentialsPath, "cid1") + assert.Equal(t, expectedURI, generatedURI, "Credentials endpoint mismatch") +} + +// TestRemoveExistingCredentials tests that GetTaskCredentials returns false when +// credentials are removed from the credentials manager +func TestRemoveExistingCredentials(t *testing.T) { + manager := NewManager() + credentials := TaskIAMRoleCredentials{ + ARN: "t1", + IAMRoleCredentials: IAMRoleCredentials{ + RoleArn: "r1", + AccessKeyID: "akid1", + SecretAccessKey: "skid1", + SessionToken: "stkn", + Expiration: "ts", + CredentialsID: "cid1", + }, + } + err := manager.SetTaskCredentials(&credentials) + assert.NoError(t, err, "Error adding credentials") + + credentialsFromManager, ok := manager.GetTaskCredentials("cid1") + assert.True(t, ok, "GetTaskCredentials returned false for existing credentials") + assert.Equal(t, credentials, credentialsFromManager, "Mismatch between added and retrieved credentials") + + manager.RemoveCredentials("cid1") + _, ok = manager.GetTaskCredentials("cid1") + if ok { + t.Error("Expected GetTaskCredentials to return false for removed credentials") + } +} diff --git a/agent/credentials/mocks/credentials_mocks.go b/agent/credentials/mocks/credentials_mocks.go new file mode 100644 index 00000000000..946c14780e4 --- /dev/null +++ b/agent/credentials/mocks/credentials_mocks.go @@ -0,0 +1,90 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/credentials (interfaces: Manager) + +// Package mock_credentials is a generated GoMock package. +package mock_credentials + +import ( + reflect "reflect" + + credentials "github.com/aws/amazon-ecs-agent/agent/credentials" + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// GetTaskCredentials mocks base method +func (m *MockManager) GetTaskCredentials(arg0 string) (credentials.TaskIAMRoleCredentials, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskCredentials", arg0) + ret0, _ := ret[0].(credentials.TaskIAMRoleCredentials) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetTaskCredentials indicates an expected call of GetTaskCredentials +func (mr *MockManagerMockRecorder) GetTaskCredentials(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskCredentials", reflect.TypeOf((*MockManager)(nil).GetTaskCredentials), arg0) +} + +// RemoveCredentials mocks base method +func (m *MockManager) RemoveCredentials(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveCredentials", arg0) +} + +// RemoveCredentials indicates an expected call of RemoveCredentials +func (mr *MockManagerMockRecorder) RemoveCredentials(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveCredentials", reflect.TypeOf((*MockManager)(nil).RemoveCredentials), arg0) +} + +// SetTaskCredentials mocks base method +func (m *MockManager) SetTaskCredentials(arg0 *credentials.TaskIAMRoleCredentials) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetTaskCredentials", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetTaskCredentials indicates an expected call of SetTaskCredentials +func (mr *MockManagerMockRecorder) SetTaskCredentials(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTaskCredentials", reflect.TypeOf((*MockManager)(nil).SetTaskCredentials), arg0) +} diff --git a/agent/data/client.go b/agent/data/client.go new file mode 100644 index 00000000000..78ad03a23b1 --- /dev/null +++ b/agent/data/client.go @@ -0,0 +1,143 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "path/filepath" + "sync" + + "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + + bolt "github.com/etcd-io/bbolt" +) + +const ( + dbName = "agent.db" + dbMode = 0600 + + containersBucketName = "containers" + tasksBucketName = "tasks" + imagesBucketName = "images" + eniAttachmentsBucketName = "eniattachments" + metadataBucketName = "metadata" +) + +var ( + dbClient Client + once sync.Once + + buckets = []string{ + imagesBucketName, + containersBucketName, + tasksBucketName, + eniAttachmentsBucketName, + metadataBucketName, + } +) + +// Client specifies the data management interface to persist and manage various kinds of data in the agent. +type Client interface { + // SaveContainer saves the data of a container. + SaveContainer(*container.Container) error + // SaveDockerContainer saves the data of a docker container. + // We have both SaveContainer and SaveDockerContainer so that a caller who doesn't have docker information + // of the container can save container with SaveContainer, while a caller who wants to save the docker + // information of the container can save it with SaveDockerContainer. + SaveDockerContainer(*container.DockerContainer) error + // DeleteContainer deletes the data of a container. + DeleteContainer(string) error + // GetContainers gets the data of all the containers. + GetContainers() ([]*container.DockerContainer, error) + + // SaveTask saves the data of a task. + SaveTask(*task.Task) error + // DeleteTask deletes the data of a task. + DeleteTask(string) error + // GetTasks gets the data of all the tasks. + GetTasks() ([]*task.Task, error) + + // SaveImageState saves the data of an image state. + SaveImageState(*image.ImageState) error + // DeleteImageState deletes the data of an image state. + DeleteImageState(string) error + // GetImageStates gets the data of all the image states. + GetImageStates() ([]*image.ImageState, error) + + // SaveENIAttachment saves the data of an ENI attachment. + SaveENIAttachment(*eni.ENIAttachment) error + // DeleteENIAttachment deletes the data of an ENI atttachment. + DeleteENIAttachment(string) error + // GetENIAttachments gets the data of all the ENI attachment. + GetENIAttachments() ([]*eni.ENIAttachment, error) + + // SaveMetadata saves a key value pair of metadata. + SaveMetadata(string, string) error + // GetMetadata gets the value of a certain kind of metadata. + GetMetadata(string) (string, error) + + // Close closes the connection to database. + Close() error +} + +// client implements the Client interface using boltdb as the backing data store. +type client struct { + db *bolt.DB +} + +// New returns a data client that implements the Client interface with boltdb. +func New(dataDir string) (Client, error) { + var err error + once.Do(func() { + dbClient, err = setup(dataDir) + }) + if err != nil { + return nil, err + } + return dbClient, nil +} + +// NewWithSetup returns a data client that implements the Client interface with boltdb. +// It always runs the db setup. Used for testing. +func NewWithSetup(dataDir string) (Client, error) { + return setup(dataDir) +} + +// setup initiates the boltdb client and makes sure the buckets we use are created. +func setup(dataDir string) (*client, error) { + db, err := bolt.Open(filepath.Join(dataDir, dbName), dbMode, nil) + err = db.Update(func(tx *bolt.Tx) error { + for _, b := range buckets { + _, err = tx.CreateBucketIfNotExists([]byte(b)) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return nil, err + } + return &client{ + db: db, + }, nil +} + +// Close closes the boltdb connection. +func (c *client) Close() error { + return c.db.Close() +} diff --git a/agent/data/client_test.go b/agent/data/client_test.go new file mode 100644 index 00000000000..c7277bd2ead --- /dev/null +++ b/agent/data/client_test.go @@ -0,0 +1,53 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + bolt "github.com/etcd-io/bbolt" + "github.com/stretchr/testify/require" +) + +func newTestClient(t *testing.T) (Client, func()) { + testDir, err := ioutil.TempDir("", "agent_data_unit_test") + require.NoError(t, err) + + testDB, err := bolt.Open(filepath.Join(testDir, dbName), dbMode, nil) + require.NoError(t, err) + require.NoError(t, testDB.Update(func(tx *bolt.Tx) error { + for _, b := range buckets { + _, err = tx.CreateBucketIfNotExists([]byte(b)) + if err != nil { + return err + } + } + + return nil + })) + testClient := &client{ + db: testDB, + } + + cleanup := func() { + require.NoError(t, testClient.Close()) + require.NoError(t, os.RemoveAll(testDir)) + } + return testClient, cleanup +} diff --git a/agent/data/container_client.go b/agent/data/container_client.go new file mode 100644 index 00000000000..2d12bbcad32 --- /dev/null +++ b/agent/data/container_client.go @@ -0,0 +1,98 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "encoding/json" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/utils" + + bolt "github.com/etcd-io/bbolt" + "github.com/pkg/errors" +) + +// SaveDockerContainer saves a docker container to the container bucket. +func (c *client) SaveDockerContainer(container *apicontainer.DockerContainer) error { + id, err := GetContainerID(container.Container) + if err != nil { + return errors.Wrap(err, "failed to generate database id") + } + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(containersBucketName)) + return putObject(b, id, container) + }) +} + +// SaveContainer saves an apicontainer.Container to the container bucket. If a corresponding +// apicontainer.DockerContainer exists, this updates the Container part of it; otherwise, a new +// apicontainer.DockerContainer is created and saved. +func (c *client) SaveContainer(container *apicontainer.Container) error { + id, err := GetContainerID(container) + if err != nil { + return errors.Wrap(err, "failed to generate database id") + } + + dockerContainer, err := c.getDockerContainer(id) + if err != nil { + dockerContainer = &apicontainer.DockerContainer{} + } + dockerContainer.Container = container + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(containersBucketName)) + return putObject(b, id, dockerContainer) + }) +} + +func (c *client) getDockerContainer(id string) (*apicontainer.DockerContainer, error) { + container := &apicontainer.DockerContainer{} + err := c.db.View(func(tx *bolt.Tx) error { + return getObject(tx, containersBucketName, id, container) + }) + return container, err +} + +// DeleteContainer deletes a container from the container bucket. +func (c *client) DeleteContainer(id string) error { + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(containersBucketName)) + return b.Delete([]byte(id)) + }) +} + +// GetContainers returns all the containers in the container bucket. +func (c *client) GetContainers() ([]*apicontainer.DockerContainer, error) { + var containers []*apicontainer.DockerContainer + err := c.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(containersBucketName)) + return walk(bucket, func(id string, data []byte) error { + container := apicontainer.DockerContainer{} + if err := json.Unmarshal(data, &container); err != nil { + return err + } + containers = append(containers, &container) + return nil + }) + }) + return containers, err +} + +// GetContainerID returns a unique ID for a container to use as key when saving to DB. +func GetContainerID(c *apicontainer.Container) (string, error) { + taskID, err := utils.GetTaskID(c.GetTaskARN()) + if err != nil { + return "", err + } + return taskID + "-" + c.Name, nil +} diff --git a/agent/data/container_client_test.go b/agent/data/container_client_test.go new file mode 100644 index 00000000000..57fbc32c96c --- /dev/null +++ b/agent/data/container_client_test.go @@ -0,0 +1,104 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testDockerID = "test-docker-id" + testDockerName = "test-docker-name" + testContainerName = "test-name" + testContainerName2 = "test-name-2" +) + +func TestManageContainers(t *testing.T) { + testClient, cleanup := newTestClient(t) + defer cleanup() + + // Test saving a container with SaveDockerContainer and updating it with SaveContainer. + testDockerContainer := &apicontainer.DockerContainer{ + DockerID: testDockerID, + DockerName: testDockerName, + Container: &apicontainer.Container{ + Name: testContainerName, + TaskARNUnsafe: testTaskArn, + }, + } + require.NoError(t, testClient.SaveDockerContainer(testDockerContainer)) + testDockerContainer.Container.SetKnownStatus(apicontainerstatus.ContainerRunning) + require.NoError(t, testClient.SaveContainer(testDockerContainer.Container)) + res, err := testClient.GetContainers() + require.NoError(t, err) + assert.Len(t, res, 1) + assert.Equal(t, apicontainerstatus.ContainerRunning, res[0].Container.GetKnownStatus()) + assert.Equal(t, testDockerID, res[0].DockerID) + assert.Equal(t, testDockerName, res[0].DockerName) + + // Test saving a container with SaveContainer. + testContainer := &apicontainer.Container{ + Name: testContainerName2, + TaskARNUnsafe: testTaskArn, + } + require.NoError(t, testClient.SaveContainer(testContainer)) + res, err = testClient.GetContainers() + require.NoError(t, err) + assert.Len(t, res, 2) + + // Test deleting containers. + require.NoError(t, testClient.DeleteContainer("abc-test-name")) + require.NoError(t, testClient.DeleteContainer("abc-test-name-2")) + res, err = testClient.GetContainers() + require.NoError(t, err) + assert.Len(t, res, 0) +} + +func TestSaveContainerInvalidID(t *testing.T) { + testClient, cleanup := newTestClient(t) + defer cleanup() + + testDockerContainer := &apicontainer.DockerContainer{ + DockerID: testDockerID, + DockerName: testDockerName, + Container: &apicontainer.Container{ + Name: testContainerName, + TaskARNUnsafe: "invalid-arn", + }, + } + assert.Error(t, testClient.SaveDockerContainer(testDockerContainer)) + assert.Error(t, testClient.SaveContainer(testDockerContainer.Container)) +} + +func TestGetContainerID(t *testing.T) { + c := &apicontainer.Container{ + TaskARNUnsafe: testTaskArn, + Name: testContainerName, + } + id, err := GetContainerID(c) + require.NoError(t, err) + assert.Equal(t, "abc-test-name", id) + + c.TaskARNUnsafe = "invalid" + _, err = GetContainerID(c) + assert.Error(t, err) +} diff --git a/agent/data/eniattachment_client.go b/agent/data/eniattachment_client.go new file mode 100644 index 00000000000..e51960eac30 --- /dev/null +++ b/agent/data/eniattachment_client.go @@ -0,0 +1,58 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "encoding/json" + + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/utils" + + bolt "github.com/etcd-io/bbolt" + "github.com/pkg/errors" +) + +func (c *client) SaveENIAttachment(eni *apieni.ENIAttachment) error { + id, err := utils.GetENIAttachmentId(eni.AttachmentARN) + if err != nil { + return errors.Wrap(err, "failed to generate database id") + } + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(eniAttachmentsBucketName)) + return putObject(b, id, eni) + }) +} + +func (c *client) DeleteENIAttachment(id string) error { + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(eniAttachmentsBucketName)) + return b.Delete([]byte(id)) + }) +} + +func (c *client) GetENIAttachments() ([]*apieni.ENIAttachment, error) { + var eniAttachments []*apieni.ENIAttachment + err := c.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(eniAttachmentsBucketName)) + return walk(bucket, func(id string, data []byte) error { + eniAttachment := apieni.ENIAttachment{} + if err := json.Unmarshal(data, &eniAttachment); err != nil { + return err + } + eniAttachments = append(eniAttachments, &eniAttachment) + return nil + }) + }) + return eniAttachments, err +} diff --git a/agent/data/eniattachment_client_test.go b/agent/data/eniattachment_client_test.go new file mode 100644 index 00000000000..287ca030134 --- /dev/null +++ b/agent/data/eniattachment_client_test.go @@ -0,0 +1,76 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "testing" + + "github.com/aws/amazon-ecs-agent/agent/api/eni" + + "github.com/stretchr/testify/assert" +) + +const ( + testAttachmentArn = "arn:aws:ecs:us-west-2:167933679560:attachment/test-arn" + testAttachmentArn2 = "arn:aws:ecs:us-west-2:167933679560:attachment/test-arn2" +) + +func TestManageENIAttachments(t *testing.T) { + testClient, cleanup := newTestClient(t) + defer cleanup() + + testEniAttachment := &eni.ENIAttachment{ + AttachmentARN: testAttachmentArn, + AttachStatusSent: false, + } + + assert.NoError(t, testClient.SaveENIAttachment(testEniAttachment)) + testEniAttachment.SetSentStatus() + assert.NoError(t, testClient.SaveENIAttachment(testEniAttachment)) + res, err := testClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, res, 1) + assert.Equal(t, true, res[0].AttachStatusSent) + assert.Equal(t, testAttachmentArn, res[0].AttachmentARN) + + testEniAttachment2 := &eni.ENIAttachment{ + AttachmentARN: testAttachmentArn2, + AttachStatusSent: true, + } + + assert.NoError(t, testClient.SaveENIAttachment(testEniAttachment2)) + res, err = testClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, res, 2) + + assert.NoError(t, testClient.DeleteENIAttachment("test-arn")) + assert.NoError(t, testClient.DeleteENIAttachment("test-arn2")) + res, err = testClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, res, 0) +} + +func TestSaveENIAttachmentInvalidID(t *testing.T) { + testClient, cleanup := newTestClient(t) + defer cleanup() + + testEniAttachment := &eni.ENIAttachment{ + AttachmentARN: "invalid-arn", + AttachStatusSent: false, + } + + assert.Error(t, testClient.SaveENIAttachment(testEniAttachment)) +} diff --git a/agent/data/helpers.go b/agent/data/helpers.go new file mode 100644 index 00000000000..0ba52279d6d --- /dev/null +++ b/agent/data/helpers.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "encoding/json" + + bolt "github.com/etcd-io/bbolt" + "github.com/pkg/errors" +) + +func putObject(bucket *bolt.Bucket, key string, obj interface{}) error { + keyBytes := []byte(key) + data, err := json.Marshal(obj) + if err != nil { + return errors.Wrapf(err, "failed to marshal object with key %q", key) + } + + if err := bucket.Put(keyBytes, data); err != nil { + return errors.Wrapf(err, "failed to insert object with key %q", key) + } + + return nil +} + +func getObject(tx *bolt.Tx, bucketName, id string, out interface{}) error { + bucket := tx.Bucket([]byte(bucketName)) + data := bucket.Get([]byte(id)) + if data == nil { + return errors.Errorf("object %s not found in bucket %s", id, bucketName) + } + + if out != nil { + if err := json.Unmarshal(data, out); err != nil { + return errors.Wrapf(err, "failed to unmarshal object with key %q", id) + } + } + + return nil +} + +func walk(bucket *bolt.Bucket, callback func(id string, data []byte) error) error { + cursor := bucket.Cursor() + + for id, data := cursor.First(); id != nil; id, data = cursor.Next() { + if err := callback(string(id), data); err != nil { + return err + } + } + + return nil +} diff --git a/agent/data/helpers_test.go b/agent/data/helpers_test.go new file mode 100644 index 00000000000..3ce1b8e537e --- /dev/null +++ b/agent/data/helpers_test.go @@ -0,0 +1,93 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "testing" + + bolt "github.com/etcd-io/bbolt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testBucketName = "test" +) + +type testObjType struct { + Key string + Val string +} + +func setupHelpersTest(t *testing.T) (*bolt.DB, func()) { + testDir, err := ioutil.TempDir("", "agent_data_unit_test") + require.NoError(t, err) + db, err := bolt.Open(filepath.Join(testDir, dbName), dbMode, nil) + require.NoError(t, err) + require.NoError(t, db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte(testBucketName)) + return err + })) + + return db, func() { + require.NoError(t, db.Close()) + require.NoError(t, os.RemoveAll(testDir)) + } +} + +func TestHelpers(t *testing.T) { + db, cleanup := setupHelpersTest(t) + defer cleanup() + + testObj := &testObjType{ + Key: "key", + Val: "test", + } + + require.NoError(t, db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(testBucketName)) + return putObject(b, testObj.Key, testObj) + })) + + assert.Error(t, db.Update(func(tx *bolt.Tx) error { + return getObject(tx, testBucketName, "xx", &testObjType{}) + })) + + res := &testObjType{} + assert.NoError(t, db.Update(func(tx *bolt.Tx) error { + return getObject(tx, testBucketName, testObj.Key, res) + })) + assert.Equal(t, testObj.Val, res.Val) + + var resArr []*testObjType + require.NoError(t, db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(testBucketName)) + return walk(b, func(id string, data []byte) error { + obj := &testObjType{} + if err := json.Unmarshal(data, &obj); err != nil { + return err + } + resArr = append(resArr, obj) + return nil + }) + })) + assert.Len(t, resArr, 1) + assert.Equal(t, testObj.Val, resArr[0].Val) +} diff --git a/agent/data/imagestate_client.go b/agent/data/imagestate_client.go new file mode 100644 index 00000000000..d615bc5ed9d --- /dev/null +++ b/agent/data/imagestate_client.go @@ -0,0 +1,57 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "encoding/json" + + "github.com/aws/amazon-ecs-agent/agent/engine/image" + + bolt "github.com/etcd-io/bbolt" + "github.com/pkg/errors" +) + +func (c *client) SaveImageState(img *image.ImageState) error { + id := img.GetImageID() + if id == "" { + return errors.New("failed to generate database image id") + } + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(imagesBucketName)) + return putObject(b, id, img) + }) +} + +func (c *client) DeleteImageState(id string) error { + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(imagesBucketName)) + return b.Delete([]byte(id)) + }) +} + +func (c *client) GetImageStates() ([]*image.ImageState, error) { + var imageStates []*image.ImageState + err := c.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(imagesBucketName)) + return walk(bucket, func(id string, data []byte) error { + imageState := image.ImageState{} + if err := json.Unmarshal(data, &imageState); err != nil { + return err + } + imageStates = append(imageStates, &imageState) + return nil + }) + }) + return imageStates, err +} diff --git a/agent/data/imagestate_client_test.go b/agent/data/imagestate_client_test.go new file mode 100644 index 00000000000..300588fa888 --- /dev/null +++ b/agent/data/imagestate_client_test.go @@ -0,0 +1,72 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "testing" + + "github.com/aws/amazon-ecs-agent/agent/engine/image" + + "github.com/stretchr/testify/assert" +) + +const ( + testImageId = "test-id" + testImageId2 = "test-id2" + testImageName = "testName" + testImageName2 = "testName2" +) + +func TestManageImageStates(t *testing.T) { + testClient, cleanup := newTestClient(t) + defer cleanup() + + testImageState := &image.ImageState{ + Image: &image.Image{ + ImageID: testImageId, + Names: []string{testImageName}, + }, + PullSucceeded: false, + } + + assert.NoError(t, testClient.SaveImageState(testImageState)) + testImageState.SetPullSucceeded(true) + assert.NoError(t, testClient.SaveImageState(testImageState)) + res, err := testClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, res, 1) + assert.Equal(t, true, res[0].PullSucceeded) + assert.Equal(t, testImageId, res[0].Image.ImageID) + assert.Equal(t, testImageName, res[0].Image.Names[0]) + + testImageState2 := &image.ImageState{ + Image: &image.Image{ + ImageID: testImageId2, + Names: []string{testImageName2}, + }, + PullSucceeded: false, + } + assert.NoError(t, testClient.SaveImageState(testImageState2)) + res, err = testClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, res, 2) + + assert.NoError(t, testClient.DeleteImageState(testImageId)) + assert.NoError(t, testClient.DeleteImageState(testImageId2)) + res, err = testClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, res, 0) +} diff --git a/agent/data/metadata_client.go b/agent/data/metadata_client.go new file mode 100644 index 00000000000..ee81afdb52d --- /dev/null +++ b/agent/data/metadata_client.go @@ -0,0 +1,42 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + bolt "github.com/etcd-io/bbolt" +) + +const ( + AgentVersionKey = "agent-version" + AvailabilityZoneKey = "availability-zone" + ClusterNameKey = "cluster-name" + ContainerInstanceARNKey = "container-instance-arn" + EC2InstanceIDKey = "ec2-instance-id" + TaskManifestSeqNumKey = "task-manifest-seq-num" +) + +func (c *client) SaveMetadata(key, val string) error { + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(metadataBucketName)) + return putObject(b, key, val) + }) +} + +func (c *client) GetMetadata(key string) (string, error) { + var val string + err := c.db.View(func(tx *bolt.Tx) error { + return getObject(tx, metadataBucketName, key, &val) + }) + return val, err +} diff --git a/agent/data/metadata_client_test.go b/agent/data/metadata_client_test.go new file mode 100644 index 00000000000..eaed8a6af5e --- /dev/null +++ b/agent/data/metadata_client_test.go @@ -0,0 +1,39 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testKey = "test-key" + testVal = "test-val" +) + +func TestManageMetadata(t *testing.T) { + testClient, cleanup := newTestClient(t) + defer cleanup() + + require.NoError(t, testClient.SaveMetadata(testKey, testVal)) + + val, err := testClient.GetMetadata(testKey) + require.NoError(t, err) + assert.Equal(t, testVal, val) +} diff --git a/agent/data/noop_client.go b/agent/data/noop_client.go new file mode 100644 index 00000000000..d49f1ff190b --- /dev/null +++ b/agent/data/noop_client.go @@ -0,0 +1,93 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/engine/image" +) + +type noopClient struct{} + +// NewNoopClient returns a client that implements the data interface with no-op. It is used +// by the agent when ECS_CHECKPOINT is set to false, and is also used in testing. +func NewNoopClient() Client { + return &noopClient{} +} + +func (c *noopClient) SaveContainer(*container.Container) error { + return nil +} + +func (c *noopClient) SaveDockerContainer(*container.DockerContainer) error { + return nil +} + +func (c *noopClient) DeleteContainer(string) error { + return nil +} + +func (c *noopClient) GetContainers() ([]*container.DockerContainer, error) { + return nil, nil +} + +func (c *noopClient) SaveTask(*task.Task) error { + return nil +} + +func (c *noopClient) DeleteTask(string) error { + return nil +} + +func (c *noopClient) GetTasks() ([]*task.Task, error) { + return nil, nil +} + +func (c *noopClient) SaveImageState(*image.ImageState) error { + return nil +} + +func (c *noopClient) DeleteImageState(string) error { + return nil +} + +func (c *noopClient) GetImageStates() ([]*image.ImageState, error) { + return nil, nil +} + +func (c *noopClient) SaveENIAttachment(*eni.ENIAttachment) error { + return nil +} + +func (c *noopClient) DeleteENIAttachment(string) error { + return nil +} + +func (c *noopClient) GetENIAttachments() ([]*eni.ENIAttachment, error) { + return nil, nil +} + +func (c *noopClient) SaveMetadata(string, string) error { + return nil +} + +func (c *noopClient) GetMetadata(string) (string, error) { + return "", nil +} + +func (c *noopClient) Close() error { + return nil +} diff --git a/agent/data/task_client.go b/agent/data/task_client.go new file mode 100644 index 00000000000..a15e3a5a9ec --- /dev/null +++ b/agent/data/task_client.go @@ -0,0 +1,61 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "encoding/json" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/utils" + + bolt "github.com/etcd-io/bbolt" + "github.com/pkg/errors" +) + +// SaveTask saves a task to the task bucket. +func (c *client) SaveTask(task *apitask.Task) error { + id, err := utils.GetTaskID(task.Arn) + if err != nil { + return errors.Wrap(err, "failed to generate database id") + } + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(tasksBucketName)) + return putObject(b, id, task) + }) +} + +// DeleteTask deletes a task from the task bucket. +func (c *client) DeleteTask(id string) error { + return c.db.Batch(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(tasksBucketName)) + return b.Delete([]byte(id)) + }) +} + +// GetTasks returns all the tasks in the task bucket. +func (c *client) GetTasks() ([]*apitask.Task, error) { + var tasks []*apitask.Task + err := c.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(tasksBucketName)) + return walk(bucket, func(id string, data []byte) error { + task := apitask.Task{} + if err := json.Unmarshal(data, &task); err != nil { + return err + } + tasks = append(tasks, &task) + return nil + }) + }) + return tasks, err +} diff --git a/agent/data/task_client_test.go b/agent/data/task_client_test.go new file mode 100644 index 00000000000..1093df316c5 --- /dev/null +++ b/agent/data/task_client_test.go @@ -0,0 +1,57 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package data + +import ( + "testing" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testTaskArn = "arn:aws:ecs:us-west-2:1234567890:task/test-cluster/abc" +) + +func TestManageTask(t *testing.T) { + testClient, cleanup := newTestClient(t) + defer cleanup() + testTask := &apitask.Task{ + Arn: testTaskArn, + } + + require.NoError(t, testClient.SaveTask(testTask)) + res, err := testClient.GetTasks() + require.NoError(t, err) + assert.Len(t, res, 1) + + require.NoError(t, testClient.DeleteTask("abc")) + res, err = testClient.GetTasks() + require.NoError(t, err) + assert.Len(t, res, 0) +} + +func TestSaveTaskInvalidID(t *testing.T) { + testClient, cleanup := newTestClient(t) + defer cleanup() + + testTask := &apitask.Task{ + Arn: "invalid-arn", + } + assert.Error(t, testClient.SaveTask(testTask)) +} diff --git a/agent/dockerclient/dockerapi/docker_client.go b/agent/dockerclient/dockerapi/docker_client.go new file mode 100644 index 00000000000..4935d7d8b7c --- /dev/null +++ b/agent/dockerclient/dockerapi/docker_client.go @@ -0,0 +1,1671 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/async" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerauth" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + "github.com/aws/amazon-ecs-agent/agent/ecr" + "github.com/aws/amazon-ecs-agent/agent/metrics" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" +) + +const ( + dockerDefaultTag = "latest" + // healthCheckStarting is the initial status returned from docker container health check + healthCheckStarting = "starting" + // healthCheckHealthy is the healthy status returned from docker container health check + healthCheckHealthy = "healthy" + // healthCheckUnhealthy is unhealthy status returned from docker container health check + healthCheckUnhealthy = "unhealthy" + // maxHealthCheckOutputLength is the maximum length of healthcheck command output that agent will save + maxHealthCheckOutputLength = 1024 + // VolumeDriverType is one of the plugin capabilities see https://docs.docker.com/engine/reference/commandline/plugin_ls/#filtering + VolumeDriverType = "volumedriver" +) + +// Timelimits for docker operations enforced above docker +const ( + // Parameters for caching the docker auth for ECR + tokenCacheSize = 100 + // tokenCacheTTL is the default ttl of the docker auth for ECR + tokenCacheTTL = 12 * time.Hour + + // pullStatusSuppressDelay controls the time where pull status progress bar + // output will be suppressed in debug mode + pullStatusSuppressDelay = 2 * time.Second + + // retry settings for pulling images + maximumPullRetries = 5 + minimumPullRetryDelay = 1100 * time.Millisecond + maximumPullRetryDelay = 5 * time.Second + pullRetryDelayMultiplier = 2 + pullRetryJitterMultiplier = 0.2 + + // pollStatsTimeout is the timeout for polling Docker Stats API; + // keeping it same as streaming stats inactivity timeout + pollStatsTimeout = 18 * time.Second +) + +// stopContainerTimeoutBuffer is a buffer added to the timeout passed into the docker +// StopContainer api call. The reason for this buffer is that when the regular "stop" +// command fails, the docker api falls back to other kill methods, such as a containerd +// kill and SIGKILL. This buffer adds time onto the context timeout to allow time +// for these backup kill methods to finish. +var stopContainerTimeoutBuffer = 2 * time.Minute + +type inactivityTimeoutHandlerFunc func(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) + +// DockerClient interface to make testing it easier +type DockerClient interface { + // SupportedVersions returns a slice of the supported docker versions (or at least supposedly supported). + SupportedVersions() []dockerclient.DockerVersion + + // KnownVersions returns a slice of the Docker API versions known to the Docker daemon. + KnownVersions() []dockerclient.DockerVersion + + // WithVersion returns a new DockerClient for which all operations will use the given remote api version. + // A default version will be used for a client not produced via this method. + WithVersion(dockerclient.DockerVersion) DockerClient + + // ContainerEvents returns a channel of DockerContainerChangeEvents. Events are placed into the channel and should + // be processed by the listener. + ContainerEvents(context.Context) (<-chan DockerContainerChangeEvent, error) + + // PullImage pulls an image. authData should contain authentication data provided by the ECS backend. + PullImage(context.Context, string, *apicontainer.RegistryAuthenticationData, time.Duration) DockerContainerMetadata + + // CreateContainer creates a container with the provided Config, HostConfig, and name. A timeout value + // and a context should be provided for the request. + CreateContainer(context.Context, *dockercontainer.Config, *dockercontainer.HostConfig, string, time.Duration) DockerContainerMetadata + + // StartContainer starts the container identified by the name provided. A timeout value and a context should be + // provided for the request. + StartContainer(context.Context, string, time.Duration) DockerContainerMetadata + + // StopContainer stops the container identified by the name provided. A timeout value and a context should be provided + // for the request. + StopContainer(context.Context, string, time.Duration) DockerContainerMetadata + + // DescribeContainer returns status information about the specified container. A context should be provided + // for the request + DescribeContainer(context.Context, string) (apicontainerstatus.ContainerStatus, DockerContainerMetadata) + + // RemoveContainer removes a container (typically the rootfs, logs, and associated metadata) identified by the name. + // A timeout value and a context should be provided for the request. + RemoveContainer(context.Context, string, time.Duration) error + + // InspectContainer returns information about the specified container. A timeout value and a context should be + // provided for the request. + InspectContainer(context.Context, string, time.Duration) (*types.ContainerJSON, error) + + // TopContainer returns information about the top processes running in the specified container. A timeout value and a context + // should be provided for the request. The last argument is an optional parameter for passing in 'ps' arguments + // as part of the top command. + TopContainer(context.Context, string, time.Duration, ...string) (*dockercontainer.ContainerTopOKBody, error) + + // CreateContainerExec creates a new exec configuration to run an exec process with the provided Config. A timeout value + // and a context should be provided for the request. + CreateContainerExec(ctx context.Context, containerID string, execConfig types.ExecConfig, timeout time.Duration) (*types.IDResponse, error) + + // StartContainerExec starts an exec process already created in the docker host. A timeout value + // and a context should be provided for the request. + StartContainerExec(ctx context.Context, execID string, timeout time.Duration) error + + // InspectContainerExec returns information about a specific exec process on the docker host. A timeout value + // and a context should be provided for the request. + InspectContainerExec(ctx context.Context, execID string, timeout time.Duration) (*types.ContainerExecInspect, error) + + // ListContainers returns the set of containers known to the Docker daemon. A timeout value and a context + // should be provided for the request. + ListContainers(context.Context, bool, time.Duration) ListContainersResponse + + // ListImages returns the set of the images known to the Docker daemon + ListImages(context.Context, time.Duration) ListImagesResponse + + // CreateVolume creates a docker volume. A timeout value should be provided for the request + CreateVolume(context.Context, string, string, map[string]string, map[string]string, time.Duration) SDKVolumeResponse + + // InspectVolume returns a volume by its name. A timeout value should be provided for the request + InspectVolume(context.Context, string, time.Duration) SDKVolumeResponse + + // RemoveVolume removes a volume by its name. A timeout value should be provided for the request + RemoveVolume(context.Context, string, time.Duration) error + + // ListPluginsWithFilters returns the set of docker plugins installed on the host, filtered by options provided. + // A timeout value should be provided for the request. + // TODO ListPluginsWithFilters can be removed since ListPlugins takes in filters + ListPluginsWithFilters(context.Context, bool, []string, time.Duration) ([]string, error) + + // ListPlugins returns the set of docker plugins installed on the host. A timeout value should be provided for + // the request. + ListPlugins(context.Context, time.Duration, filters.Args) ListPluginsResponse + + // Stats returns a channel of stat data for the specified container. A context should be provided so the request can + // be canceled. + Stats(context.Context, string, time.Duration) (<-chan *types.StatsJSON, <-chan error) + + // Version returns the version of the Docker daemon. + Version(context.Context, time.Duration) (string, error) + + // APIVersion returns the api version of the client + APIVersion() (dockerclient.DockerVersion, error) + + // InspectImage returns information about the specified image. + InspectImage(string) (*types.ImageInspect, error) + + // RemoveImage removes the metadata associated with an image and may remove the underlying layer data. A timeout + // value and a context should be provided for the request. + RemoveImage(context.Context, string, time.Duration) error + + // LoadImage loads an image from an input stream. A timeout value and a context should be provided for the request. + LoadImage(context.Context, io.Reader, time.Duration) error + + // Info returns the information of the Docker server. + Info(context.Context, time.Duration) (types.Info, error) +} + +// DockerGoClient wraps the underlying go-dockerclient and docker/docker library. +// It exists primarily for the following four purposes: +// 1) Provide an abstraction over inputs and outputs, +// a) Inputs: Trims them down to what we actually need (largely unchanged tbh) +// b) Outputs: Unifies error handling and the common 'start->inspect' +// pattern by having a consistent error output. This error output +// contains error data with a given Name that aims to be presentable as a +// 'reason' in state changes. It also filters out the information about a +// container that is of interest, such as network bindings, while +// ignoring the rest. +// 2) Timeouts: It adds timeouts everywhere, mostly as a reaction to +// pull-related issues in the Docker daemon. +// 3) Versioning: It abstracts over multiple client versions to allow juggling +// appropriately there. +// 4) Allows for both the go-dockerclient client and Docker SDK client to live +// side-by-side until migration to the Docker SDK is complete. +// Implements DockerClient +// TODO Remove clientfactory field once all API calls are migrated to sdkclientFactory +type dockerGoClient struct { + sdkClientFactory sdkclientfactory.Factory + version dockerclient.DockerVersion + ecrClientFactory ecr.ECRFactory + auth dockerauth.DockerAuthProvider + ecrTokenCache async.Cache + config *config.Config + context context.Context + imagePullBackoff retry.Backoff + inactivityTimeoutHandler inactivityTimeoutHandlerFunc + + _time ttime.Time + _timeOnce sync.Once + + daemonVersionUnsafe string + lock sync.Mutex +} + +type ImagePullResponse struct { + Id string `json:"id,omitempty"` + Status string `json:"status,omitempty"` + ProgressDetail struct { + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + } `json:"progressDetail,omitempty"` + Progress string `json:"progress,omitempty"` + Error string `json:"error,omitempty"` +} + +func (dg *dockerGoClient) WithVersion(version dockerclient.DockerVersion) DockerClient { + return &dockerGoClient{ + sdkClientFactory: dg.sdkClientFactory, + version: version, + auth: dg.auth, + config: dg.config, + context: dg.context, + } +} + +// NewDockerGoClient creates a new DockerGoClient +// TODO Remove clientfactory parameter once migration to Docker SDK is complete. +func NewDockerGoClient(sdkclientFactory sdkclientfactory.Factory, + cfg *config.Config, ctx context.Context) (DockerClient, error) { + // Ensure SDK client can connect to the Docker daemon. + sdkclient, err := sdkclientFactory.GetDefaultClient() + + if err != nil { + seelog.Errorf("DockerGoClient: Docker SDK client unable to connect to Docker daemon. "+ + "Ensure Docker is running: %v", err) + return nil, err + } + + // Even if we have a DockerClient, the daemon might not be running. Ping from both clients + // to ensure it's up. + _, err = sdkclient.Ping(ctx) + if err != nil { + seelog.Errorf("DockerGoClient: Docker SDK client unable to ping Docker daemon. "+ + "Ensure Docker is running: %v", err) + return nil, err + } + + var dockerAuthData json.RawMessage + if cfg.EngineAuthData != nil { + dockerAuthData = cfg.EngineAuthData.Contents() + } + return &dockerGoClient{ + sdkClientFactory: sdkclientFactory, + auth: dockerauth.NewDockerAuthProvider(cfg.EngineAuthType, dockerAuthData), + ecrClientFactory: ecr.NewECRFactory(cfg.AcceptInsecureCert), + ecrTokenCache: async.NewLRUCache(tokenCacheSize, tokenCacheTTL), + config: cfg, + context: ctx, + imagePullBackoff: retry.NewExponentialBackoff(minimumPullRetryDelay, maximumPullRetryDelay, + pullRetryJitterMultiplier, pullRetryDelayMultiplier), + inactivityTimeoutHandler: handleInactivityTimeout, + }, nil +} + +// Returns the Docker SDK Client +func (dg *dockerGoClient) sdkDockerClient() (sdkclient.Client, error) { + if dg.version == "" { + return dg.sdkClientFactory.GetDefaultClient() + } + return dg.sdkClientFactory.GetClient(dg.version) +} + +func (dg *dockerGoClient) time() ttime.Time { + dg._timeOnce.Do(func() { + if dg._time == nil { + dg._time = &ttime.DefaultTime{} + } + }) + return dg._time +} + +func (dg *dockerGoClient) PullImage(ctx context.Context, image string, + authData *apicontainer.RegistryAuthenticationData, timeout time.Duration) DockerContainerMetadata { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("PULL_IMAGE")() + response := make(chan DockerContainerMetadata, 1) + go func() { + err := retry.RetryNWithBackoffCtx(ctx, dg.imagePullBackoff, maximumPullRetries, + func() error { + err := dg.pullImage(ctx, image, authData) + if err != nil { + seelog.Errorf("DockerGoClient: failed to pull image %s: [%s] %s", image, err.ErrorName(), err.Error()) + } + return err + }) + response <- DockerContainerMetadata{Error: wrapPullErrorAsNamedError(err)} + }() + + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "pulled"}} + } + // Context was canceled even though there was no timeout. Send + // back an error. + return DockerContainerMetadata{Error: &CannotPullContainerError{err}} + } +} + +func wrapPullErrorAsNamedError(err error) apierrors.NamedError { + var retErr apierrors.NamedError + if err != nil { + engErr, ok := err.(apierrors.NamedError) + if !ok { + engErr = CannotPullContainerError{err} + } + retErr = engErr + } + return retErr +} + +func (dg *dockerGoClient) pullImage(ctx context.Context, image string, + authData *apicontainer.RegistryAuthenticationData) apierrors.NamedError { + seelog.Debugf("DockerGoClient: pulling image: %s", image) + client, err := dg.sdkDockerClient() + if err != nil { + return CannotGetDockerClientError{version: dg.version, err: err} + } + + sdkAuthConfig, err := dg.getAuthdata(image, authData) + if err != nil { + return wrapPullErrorAsNamedError(err) + } + // encode auth data + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(sdkAuthConfig); err != nil { + return CannotPullECRContainerError{err} + } + + imagePullOpts := types.ImagePullOptions{ + All: false, + RegistryAuth: base64.URLEncoding.EncodeToString(buf.Bytes()), + } + + repository := getRepository(image) + + timeout := dg.time().After(dockerclient.DockerPullBeginTimeout) + // pullBegan is a channel indicating that we have seen at least one line of data on the 'OutputStream' above. + // It is here to guard against a bug wherein Docker never writes anything to that channel and hangs in pulling forever. + pullBegan := make(chan bool, 1) + // pullBeganOnce ensures we only indicate it began once (since our channel will only be read 0 or 1 times) + pullBeganOnce := sync.Once{} + + pullFinished := make(chan error, 1) + subCtx, cancelRequest := context.WithCancel(ctx) + + go func() { + defer cancelRequest() + reader, err := client.ImagePull(subCtx, repository, imagePullOpts) + if err != nil { + pullFinished <- err + return + } + + // handle inactivity timeout + var canceled uint32 + var ch chan<- struct{} + reader, ch = dg.inactivityTimeoutHandler(reader, dg.config.ImagePullInactivityTimeout, cancelRequest, &canceled) + defer reader.Close() + defer close(ch) + decoder := json.NewDecoder(reader) + data := new(ImagePullResponse) + var statusDisplayed time.Time + for err := decoder.Decode(data); err != io.EOF; err = decoder.Decode(data) { + if err != nil { + seelog.Warnf("DockerGoClient: Unable to decode pull event message for image %s: %v", image, err) + pullFinished <- err + return + } + if data.Error != "" { + seelog.Warnf("DockerGoClient: Error while pulling image %s: %v", image, data.Error) + pullFinished <- errors.New(data.Error) + } + if atomic.LoadUint32(&canceled) != 0 { + seelog.Warnf("DockerGoClient: inactivity time exceeded timeout while pulling image %s", image) + pullErr := errors.New("inactivity time exceeded timeout while pulling image") + pullFinished <- pullErr + return + } + + pullBeganOnce.Do(func() { + pullBegan <- true + }) + + statusDisplayed = dg.filterPullDebugOutput(data, image, statusDisplayed) + + data = new(ImagePullResponse) + } + pullFinished <- nil + }() + + select { + case <-pullBegan: + break + case pullErr := <-pullFinished: + if pullErr != nil { + return CannotPullContainerError{pullErr} + } + seelog.Debugf("DockerGoClient: pulling image complete: %s", image) + return nil + case <-timeout: + return &DockerTimeoutError{dockerclient.DockerPullBeginTimeout, "pullBegin"} + } + seelog.Debugf("DockerGoClient: pull began for image: %s", image) + + err = <-pullFinished + if err != nil { + return CannotPullContainerError{err} + } + + seelog.Debugf("DockerGoClient: pulling image complete: %s", image) + return nil +} + +func (dg *dockerGoClient) filterPullDebugOutput(data *ImagePullResponse, image string, statusDisplayed time.Time) time.Time { + + now := time.Now() + if !strings.Contains(data.Progress, "[=") || now.After(statusDisplayed.Add(pullStatusSuppressDelay)) { + // data.Progress shows the progress bar lines for Status=downlaoding or Extracting, logging data.Status to retain enough for debugging + seelog.Debugf("DockerGoClient: pulling image %s, status %s", image, data.Status) + } + + if strings.Contains(data.Status, "already being pulled by another client. Waiting.") { + // This can mean the daemon is 'hung' in pulling status for this image, but we can't be sure. + seelog.Errorf("DockerGoClient: image 'pull' status marked as already being pulled for image %s, status %s", + image, data.Status) + } + return now +} + +func getRepository(image string) string { + repository, tag := utils.ParseRepositoryTag(image) + if tag == "" { + repository = repository + ":" + dockerDefaultTag + } else { + repository = image + } + return repository +} + +func (dg *dockerGoClient) InspectImage(image string) (*types.ImageInspect, error) { + defer metrics.MetricsEngineGlobal.RecordDockerMetric("INSPECT_IMAGE")() + client, err := dg.sdkDockerClient() + if err != nil { + return nil, err + } + imageData, _, err := client.ImageInspectWithRaw(dg.context, image) + return &imageData, err +} + +func (dg *dockerGoClient) getAuthdata(image string, authData *apicontainer.RegistryAuthenticationData) (types.AuthConfig, error) { + + if authData == nil { + return dg.auth.GetAuthconfig(image, nil) + } + + switch authData.Type { + case apicontainer.AuthTypeECR: + provider := dockerauth.NewECRAuthProvider(dg.ecrClientFactory, dg.ecrTokenCache) + authConfig, err := provider.GetAuthconfig(image, authData) + if err != nil { + return authConfig, CannotPullECRContainerError{err} + } + return authConfig, nil + + case apicontainer.AuthTypeASM: + return authData.ASMAuthData.GetDockerAuthConfig(), nil + + default: + return dg.auth.GetAuthconfig(image, nil) + } +} + +func (dg *dockerGoClient) CreateContainer(ctx context.Context, + config *dockercontainer.Config, + hostConfig *dockercontainer.HostConfig, + name string, + timeout time.Duration) DockerContainerMetadata { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("CREATE_CONTAINER")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan DockerContainerMetadata, 1) + go func() { response <- dg.createContainer(ctx, config, hostConfig, name) }() + + // Wait until we get a response or for the 'done' context channel + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "created"}} + } + // Context was canceled even though there was no timeout. Send + // back an error. + return DockerContainerMetadata{Error: &CannotCreateContainerError{err}} + } +} + +func (dg *dockerGoClient) createContainer(ctx context.Context, + config *dockercontainer.Config, + hostConfig *dockercontainer.HostConfig, + name string) DockerContainerMetadata { + client, err := dg.sdkDockerClient() + if err != nil { + return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}} + } + + dockerContainer, err := client.ContainerCreate(ctx, config, hostConfig, &network.NetworkingConfig{}, name) + if err != nil { + return DockerContainerMetadata{Error: CannotCreateContainerError{err}} + } + + // TODO Remove ContainerInspect call + return dg.containerMetadata(ctx, dockerContainer.ID) +} + +func (dg *dockerGoClient) StartContainer(ctx context.Context, id string, timeout time.Duration) DockerContainerMetadata { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("START_CONTAINER")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan DockerContainerMetadata, 1) + go func() { response <- dg.startContainer(ctx, id) }() + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "started"}} + } + return DockerContainerMetadata{Error: CannotStartContainerError{err}} + } +} + +func (dg *dockerGoClient) startContainer(ctx context.Context, id string) DockerContainerMetadata { + client, err := dg.sdkDockerClient() + if err != nil { + return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}} + } + + err = client.ContainerStart(ctx, id, types.ContainerStartOptions{}) + metadata := dg.containerMetadata(ctx, id) + if err != nil { + metadata.Error = CannotStartContainerError{err} + } + + return metadata +} + +// DockerStateToState converts the container status from docker to status recognized by the agent +func DockerStateToState(state *types.ContainerState) apicontainerstatus.ContainerStatus { + if state.Running { + return apicontainerstatus.ContainerRunning + } + + if state.Dead { + return apicontainerstatus.ContainerStopped + } + + // StartAt field in ContainerState is a string and need to convert to compare to zero time instant + startTime, _ := time.Parse(time.RFC3339, state.StartedAt) + if startTime.IsZero() && state.Error == "" { + return apicontainerstatus.ContainerCreated + } + + return apicontainerstatus.ContainerStopped +} + +func (dg *dockerGoClient) DescribeContainer(ctx context.Context, dockerID string) (apicontainerstatus.ContainerStatus, DockerContainerMetadata) { + dockerContainer, err := dg.InspectContainer(ctx, dockerID, dockerclient.InspectContainerTimeout) + if err != nil { + return apicontainerstatus.ContainerStatusNone, DockerContainerMetadata{Error: CannotDescribeContainerError{err}} + } + return DockerStateToState(dockerContainer.ContainerJSONBase.State), MetadataFromContainer(dockerContainer) +} + +func (dg *dockerGoClient) InspectContainer(ctx context.Context, dockerID string, timeout time.Duration) (*types.ContainerJSON, error) { + type inspectResponse struct { + container *types.ContainerJSON + err error + } + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("INSPECT_CONTAINER")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan inspectResponse, 1) + go func() { + container, err := dg.inspectContainer(ctx, dockerID) + response <- inspectResponse{container, err} + }() + + // Wait until we get a response or for the 'done' context channel + select { + case resp := <-response: + return resp.container, resp.err + case <-ctx.Done(): + err := ctx.Err() + if err == context.DeadlineExceeded { + return nil, &DockerTimeoutError{timeout, "inspecting"} + } + + return nil, &CannotInspectContainerError{err} + } +} + +func (dg *dockerGoClient) inspectContainer(ctx context.Context, dockerID string) (*types.ContainerJSON, error) { + client, err := dg.sdkDockerClient() + if err != nil { + return nil, err + } + containerData, err := client.ContainerInspect(ctx, dockerID) + return &containerData, err +} + +func (dg *dockerGoClient) TopContainer(ctx context.Context, dockerID string, timeout time.Duration, psArgs ...string) (*dockercontainer.ContainerTopOKBody, error) { + type topResponse struct { + top *dockercontainer.ContainerTopOKBody + err error + } + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("TOP_CONTAINER")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan topResponse, 1) + go func() { + top, err := dg.topContainer(ctx, dockerID, psArgs...) + response <- topResponse{top, err} + }() + + // Wait until we get a response or for the 'done' context channel + select { + case resp := <-response: + return resp.top, resp.err + case <-ctx.Done(): + err := ctx.Err() + if err == context.DeadlineExceeded { + return nil, &DockerTimeoutError{timeout, "listing top"} + } + + return nil, &CannotGetContainerTopError{err} + } +} + +func (dg *dockerGoClient) topContainer(ctx context.Context, dockerID string, psArgs ...string) (*dockercontainer.ContainerTopOKBody, error) { + client, err := dg.sdkDockerClient() + if err != nil { + return nil, err + } + topResponse, err := client.ContainerTop(ctx, dockerID, psArgs) + return &topResponse, err +} + +func (dg *dockerGoClient) StopContainer(ctx context.Context, dockerID string, timeout time.Duration) DockerContainerMetadata { + ctxTimeout := timeout + stopContainerTimeoutBuffer + ctx, cancel := context.WithTimeout(ctx, ctxTimeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("STOP_CONTAINER")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan DockerContainerMetadata, 1) + go func() { response <- dg.stopContainer(ctx, dockerID, timeout) }() + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return DockerContainerMetadata{Error: &DockerTimeoutError{ctxTimeout, "stopped"}} + } + return DockerContainerMetadata{Error: CannotStopContainerError{err}} + } +} + +func (dg *dockerGoClient) stopContainer(ctx context.Context, dockerID string, timeout time.Duration) DockerContainerMetadata { + client, err := dg.sdkDockerClient() + if err != nil { + return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}} + } + err = client.ContainerStop(ctx, dockerID, &timeout) + metadata := dg.containerMetadata(ctx, dockerID) + if err != nil { + seelog.Errorf("DockerGoClient: error stopping container ID=%s: %v", dockerID, err) + if metadata.Error == nil { + if strings.Contains(err.Error(), "No such container") { + err = NoSuchContainerError{dockerID} + } + metadata.Error = CannotStopContainerError{err} + } + } + return metadata +} + +func (dg *dockerGoClient) RemoveContainer(ctx context.Context, dockerID string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("REMOVE_CONTAINER")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan error, 1) + go func() { response <- dg.removeContainer(ctx, dockerID) }() + // Wait until we get a response or for the 'done' context channel + select { + case resp := <-response: + return resp + case <-ctx.Done(): + err := ctx.Err() + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + if err == context.DeadlineExceeded { + return &DockerTimeoutError{dockerclient.RemoveContainerTimeout, "removing"} + } + return &CannotRemoveContainerError{err} + } +} + +func (dg *dockerGoClient) removeContainer(ctx context.Context, dockerID string) error { + client, err := dg.sdkDockerClient() + if err != nil { + return err + } + return client.ContainerRemove(ctx, dockerID, + types.ContainerRemoveOptions{ + RemoveVolumes: true, + RemoveLinks: false, + Force: false, + }) +} + +func (dg *dockerGoClient) containerMetadata(ctx context.Context, id string) DockerContainerMetadata { + ctx, cancel := context.WithTimeout(ctx, dockerclient.InspectContainerTimeout) + defer cancel() + dockerContainer, err := dg.InspectContainer(ctx, id, dockerclient.InspectContainerTimeout) + if err != nil { + return DockerContainerMetadata{DockerID: id, Error: CannotInspectContainerError{err}} + } + return MetadataFromContainer(dockerContainer) +} + +// MetadataFromContainer translates dockerContainer into DockerContainerMetadata +func MetadataFromContainer(dockerContainer *types.ContainerJSON) DockerContainerMetadata { + var bindings []apicontainer.PortBinding + var err apierrors.NamedError + if dockerContainer.NetworkSettings != nil { + // Convert port bindings into the format our container expects + bindings, err = apicontainer.PortBindingFromDockerPortBinding(dockerContainer.NetworkSettings.Ports) + if err != nil { + seelog.Criticalf("DockerGoClient: Docker had network bindings we couldn't understand: %v", err) + return DockerContainerMetadata{Error: apierrors.NamedError(err)} + } + } + + createdTime, _ := time.Parse(time.RFC3339, dockerContainer.Created) + startedTime := time.Time{} + finishedTime := time.Time{} + // Need to check for nil to make sure we do not try to access fields of nil pointer + if dockerContainer.State != nil { + startedTime, _ = time.Parse(time.RFC3339, dockerContainer.State.StartedAt) + finishedTime, _ = time.Parse(time.RFC3339, dockerContainer.State.FinishedAt) + } + + metadata := DockerContainerMetadata{ + DockerID: dockerContainer.ID, + PortBindings: bindings, + Volumes: dockerContainer.Mounts, + CreatedAt: createdTime, + StartedAt: startedTime, + FinishedAt: finishedTime, + } + + if dockerContainer.NetworkSettings != nil { + metadata.NetworkSettings = dockerContainer.NetworkSettings + } + + if dockerContainer.HostConfig != nil { + metadata.NetworkMode = string(dockerContainer.HostConfig.NetworkMode) + } + + if dockerContainer.Config != nil { + metadata.Labels = dockerContainer.Config.Labels + } + + if dockerContainer.State == nil { + return metadata + } + if !dockerContainer.State.Running && !finishedTime.IsZero() { + // Only record an exitcode if it has exited + metadata.ExitCode = &dockerContainer.State.ExitCode + } + if dockerContainer.State.Error != "" { + metadata.Error = NewDockerStateError(dockerContainer.State.Error) + } + if dockerContainer.State.OOMKilled { + metadata.Error = OutOfMemoryError{} + } + // Health field in Docker SDK is a pointer, need to check before not nil before dereference. + if dockerContainer.State.Health == nil || dockerContainer.State.Health.Status == "" || dockerContainer.State.Health.Status == healthCheckStarting { + return metadata + } + // Record the health check information if exists + metadata.Health = getMetadataHealthCheck(dockerContainer) + return metadata +} + +func getMetadataHealthCheck(dockerContainer *types.ContainerJSON) apicontainer.HealthStatus { + health := apicontainer.HealthStatus{} + if dockerContainer.State == nil || dockerContainer.State.Health == nil { + return health + } + logLength := len(dockerContainer.State.Health.Log) + + if logLength != 0 { + // Only save the last log from the health check + output := dockerContainer.State.Health.Log[logLength-1].Output + size := len(output) + if size > maxHealthCheckOutputLength { + size = maxHealthCheckOutputLength + } + health.Output = output[:size] + } + switch dockerContainer.State.Health.Status { + case healthCheckHealthy: + health.Status = apicontainerstatus.ContainerHealthy + case healthCheckUnhealthy: + health.Status = apicontainerstatus.ContainerUnhealthy + if logLength == 0 { + seelog.Warn("DockerGoClient: no container healthcheck data returned by Docker") + break + } + health.ExitCode = dockerContainer.State.Health.Log[logLength-1].ExitCode + default: + seelog.Debugf("DockerGoClient: unknown healthcheck status event from docker: %s", dockerContainer.State.Health.Status) + } + return health +} + +// Listen to the docker event stream for container changes and pass them up +func (dg *dockerGoClient) ContainerEvents(ctx context.Context) (<-chan DockerContainerChangeEvent, error) { + client, err := dg.sdkDockerClient() + if err != nil { + return nil, err + } + + events := make(chan *events.Message) + buffer := NewInfiniteBuffer() + + derivedCtx, cancel := context.WithCancel(ctx) + dockerEvents, eventErr := client.Events(derivedCtx, types.EventsOptions{}) + + // Cache the event from docker client. Channel closes when an error is passed to eventErr. + go buffer.StartListening(derivedCtx, dockerEvents) + // Receive errors from channels. If error thrown is not EOF, log and reopen channel. + // TODO: move the error check into StartListening() to keep event streaming and error handling in one place. + go func() { + for { + select { + case err := <-eventErr: + // If parent ctx has been canceled, stop listening and return. Otherwise reopen the stream. + if ctx.Err() != nil { + return + } + + if err == io.EOF { + seelog.Infof("DockerGoClient: Docker events stream closed with: %v", err) + } else { + seelog.Errorf("DockerGoClient: Docker events stream closed with error: %v", err) + } + + // Reopen a new event stream to continue listening. + nextCtx, nextCancel := context.WithCancel(ctx) + dockerEvents, eventErr = client.Events(nextCtx, types.EventsOptions{}) + // Cache the event from docker client. + go buffer.StartListening(nextCtx, dockerEvents) + // Close previous stream after starting to listen on new one + cancel() + // Reassign cancel variable next Cancel function to setup next iteration of loop. + cancel = nextCancel + case <-ctx.Done(): + return + } + } + }() + + // Read the buffered events and send to task engine + go buffer.Consume(events) + changedContainers := make(chan DockerContainerChangeEvent) + go dg.handleContainerEvents(ctx, events, changedContainers) + + return changedContainers, nil +} + +func (dg *dockerGoClient) handleContainerEvents(ctx context.Context, + events <-chan *events.Message, + changedContainers chan<- DockerContainerChangeEvent) { + for event := range events { + containerID := event.ID + seelog.Debugf("DockerGoClient: got event from docker daemon: %v", event) + + var status apicontainerstatus.ContainerStatus + eventType := apicontainer.ContainerStatusEvent + switch event.Status { + case "create": + status = apicontainerstatus.ContainerCreated + changedContainers <- DockerContainerChangeEvent{ + Status: status, + Type: eventType, + DockerContainerMetadata: DockerContainerMetadata{ + DockerID: containerID, + }, + } + continue + case "start": + status = apicontainerstatus.ContainerRunning + case "stop": + fallthrough + case "die": + status = apicontainerstatus.ContainerStopped + case "oom": + containerInfo := event.ID + // events only contain the container's name in newer Docker API + // versions (starting with 1.22) + if containerName, ok := event.Actor.Attributes["name"]; ok { + containerInfo += fmt.Sprintf(" (name: %q)", containerName) + } + + seelog.Infof("DockerGoClient: process within container %s died due to OOM", containerInfo) + // "oom" can either means any process got OOM'd, but doesn't always + // mean the container dies (non-init processes). If the container also + // dies, you see a "die" status as well; we'll update suitably there + continue + case "health_status: healthy": + fallthrough + case "health_status: unhealthy": + eventType = apicontainer.ContainerHealthEvent + default: + // Because docker emits new events even when you use an old event api + // version, it's not that big a deal + seelog.Debugf("DockerGoClient: unknown status event from docker: %v", event) + } + + metadata := dg.containerMetadata(ctx, containerID) + + changedContainers <- DockerContainerChangeEvent{ + Status: status, + Type: eventType, + DockerContainerMetadata: metadata, + } + } +} + +// ListContainers returns a slice of container IDs. +func (dg *dockerGoClient) ListContainers(ctx context.Context, all bool, timeout time.Duration) ListContainersResponse { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan ListContainersResponse, 1) + go func() { response <- dg.listContainers(ctx, all) }() + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return ListContainersResponse{Error: &DockerTimeoutError{timeout, "listing"}} + } + return ListContainersResponse{Error: &CannotListContainersError{err}} + } +} + +func (dg *dockerGoClient) listContainers(ctx context.Context, all bool) ListContainersResponse { + client, err := dg.sdkDockerClient() + if err != nil { + return ListContainersResponse{Error: err} + } + + containers, err := client.ContainerList(ctx, types.ContainerListOptions{ + All: all, + }) + if err != nil { + return ListContainersResponse{Error: err} + } + + // We get an empty slice if there are no containers to be listed. + // Extract container IDs from this list. + containerIDs := make([]string, len(containers)) + for i, container := range containers { + containerIDs[i] = container.ID + } + + return ListContainersResponse{DockerIDs: containerIDs, Error: nil} +} + +func (dg *dockerGoClient) ListImages(ctx context.Context, timeout time.Duration) ListImagesResponse { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + response := make(chan ListImagesResponse, 1) + go func() { response <- dg.listImages(ctx) }() + select { + case resp := <-response: + return resp + case <-ctx.Done(): + err := ctx.Err() + if err == context.DeadlineExceeded { + return ListImagesResponse{Error: &DockerTimeoutError{timeout, "listing"}} + } + return ListImagesResponse{Error: &CannotListImagesError{err}} + } +} + +func (dg *dockerGoClient) listImages(ctx context.Context) ListImagesResponse { + client, err := dg.sdkDockerClient() + if err != nil { + return ListImagesResponse{Error: err} + } + images, err := client.ImageList(ctx, types.ImageListOptions{}) + if err != nil { + return ListImagesResponse{Error: err} + } + var imageRepoTags []string + imageIDs := make([]string, len(images)) + for i, image := range images { + imageIDs[i] = image.ID + imageRepoTags = append(imageRepoTags, image.RepoTags...) + } + return ListImagesResponse{ImageIDs: imageIDs, RepoTags: imageRepoTags, Error: nil} +} + +func (dg *dockerGoClient) SupportedVersions() []dockerclient.DockerVersion { + return dg.sdkClientFactory.FindSupportedAPIVersions() +} + +func (dg *dockerGoClient) KnownVersions() []dockerclient.DockerVersion { + return dg.sdkClientFactory.FindKnownAPIVersions() +} + +func (dg *dockerGoClient) Version(ctx context.Context, timeout time.Duration) (string, error) { + version := dg.getDaemonVersion() + if version != "" { + return version, nil + } + + derivedCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + client, err := dg.sdkDockerClient() + if err != nil { + return "", err + } + info, err := client.ServerVersion(derivedCtx) + if err != nil { + return "", err + } + + version = info.Version + dg.setDaemonVersion(version) + return version, nil +} + +func (dg *dockerGoClient) Info(ctx context.Context, timeout time.Duration) (types.Info, error) { + derivedCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + client, err := dg.sdkDockerClient() + if err != nil { + return types.Info{}, err + } + info, infoErr := client.Info(derivedCtx) + if infoErr != nil { + return types.Info{}, infoErr + } + + return info, nil +} + +func (dg *dockerGoClient) getDaemonVersion() string { + dg.lock.Lock() + defer dg.lock.Unlock() + + return dg.daemonVersionUnsafe +} + +func (dg *dockerGoClient) setDaemonVersion(version string) { + dg.lock.Lock() + defer dg.lock.Unlock() + + dg.daemonVersionUnsafe = version +} + +func (dg *dockerGoClient) CreateVolume(ctx context.Context, name string, + driver string, + driverOptions map[string]string, + labels map[string]string, + timeout time.Duration) SDKVolumeResponse { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("CREATE_VOLUME")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan SDKVolumeResponse, 1) + go func() { response <- dg.createVolume(ctx, name, driver, driverOptions, labels) }() + + // Wait until we get a response or for the 'done' context channel + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return SDKVolumeResponse{DockerVolume: nil, Error: &DockerTimeoutError{timeout, "creating volume"}} + } + // Context was canceled even though there was no timeout. Send + // back an error. + return SDKVolumeResponse{DockerVolume: nil, Error: &CannotCreateVolumeError{err}} + } +} + +func (dg *dockerGoClient) createVolume(ctx context.Context, + name string, + driver string, + driverOptions map[string]string, + labels map[string]string) SDKVolumeResponse { + client, err := dg.sdkDockerClient() + if err != nil { + return SDKVolumeResponse{DockerVolume: nil, Error: &CannotGetDockerClientError{version: dg.version, err: err}} + } + + volumeOptions := volume.VolumeCreateBody{ + Driver: driver, + DriverOpts: driverOptions, + Labels: labels, + Name: name, + } + dockerVolume, err := client.VolumeCreate(ctx, volumeOptions) + if err != nil { + return SDKVolumeResponse{DockerVolume: nil, Error: &CannotCreateVolumeError{err}} + } + + return SDKVolumeResponse{DockerVolume: &dockerVolume, Error: nil} +} + +func (dg *dockerGoClient) InspectVolume(ctx context.Context, name string, timeout time.Duration) SDKVolumeResponse { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("INSPECT_VOLUME")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan SDKVolumeResponse, 1) + go func() { response <- dg.inspectVolume(ctx, name) }() + + // Wait until we get a response or for the 'done' context channel + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return SDKVolumeResponse{DockerVolume: nil, Error: &DockerTimeoutError{timeout, "inspecting volume"}} + } + // Context was canceled even though there was no timeout. Send + // back an error. + return SDKVolumeResponse{DockerVolume: nil, Error: &CannotInspectVolumeError{err}} + } +} + +func (dg *dockerGoClient) inspectVolume(ctx context.Context, name string) SDKVolumeResponse { + client, err := dg.sdkDockerClient() + if err != nil { + return SDKVolumeResponse{ + DockerVolume: nil, + Error: &CannotGetDockerClientError{version: dg.version, err: err}} + } + + dockerVolume, err := client.VolumeInspect(ctx, name) + if err != nil { + return SDKVolumeResponse{DockerVolume: nil, Error: &CannotInspectVolumeError{err}} + } + + return SDKVolumeResponse{DockerVolume: &dockerVolume, Error: nil} +} + +func (dg *dockerGoClient) RemoveVolume(ctx context.Context, name string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("REMOVE_VOLUME")() + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan error, 1) + go func() { response <- dg.removeVolume(ctx, name) }() + + // Wait until we get a response or for the 'done' context channel + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return &DockerTimeoutError{timeout, "removing volume"} + } + // Context was canceled even though there was no timeout. Send + // back an error. + return &CannotRemoveVolumeError{err} + } +} + +func (dg *dockerGoClient) removeVolume(ctx context.Context, name string) error { + client, err := dg.sdkDockerClient() + if err != nil { + return &CannotGetDockerClientError{version: dg.version, err: err} + } + + err = client.VolumeRemove(ctx, name, false) + if err != nil { + return &CannotRemoveVolumeError{err} + } + + return nil +} + +// ListPluginsWithFilters takes in filter arguments and returns the string of filtered Plugin names +func (dg *dockerGoClient) ListPluginsWithFilters(ctx context.Context, enabled bool, capabilities []string, timeout time.Duration) ([]string, error) { + // Create filter list + filterList := filters.NewArgs(filters.Arg("enabled", strconv.FormatBool(enabled))) + for _, capability := range capabilities { + filterList.Add("capability", capability) + } + + var filteredPluginNames []string + response := dg.ListPlugins(ctx, timeout, filterList) + + if response.Error != nil { + return nil, response.Error + } + // Create a list of the filtered plugin names + for _, plugin := range response.Plugins { + filteredPluginNames = append(filteredPluginNames, plugin.Name) + } + return filteredPluginNames, nil +} + +func (dg *dockerGoClient) ListPlugins(ctx context.Context, timeout time.Duration, filters filters.Args) ListPluginsResponse { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // Buffered channel so in the case of timeout it takes one write, never gets + // read, and can still be GC'd + response := make(chan ListPluginsResponse, 1) + go func() { response <- dg.listPlugins(ctx, filters) }() + + // Wait until we get a response or for the 'done' context channel + select { + case resp := <-response: + return resp + case <-ctx.Done(): + // Context has either expired or canceled. If it has timed out, + // send back the DockerTimeoutError + err := ctx.Err() + if err == context.DeadlineExceeded { + return ListPluginsResponse{Plugins: nil, Error: &DockerTimeoutError{timeout, "listing plugins"}} + } + // Context was canceled even though there was no timeout. Send + // back an error. + return ListPluginsResponse{Plugins: nil, Error: &CannotListPluginsError{err}} + } +} + +func (dg *dockerGoClient) listPlugins(ctx context.Context, filters filters.Args) ListPluginsResponse { + client, err := dg.sdkDockerClient() + if err != nil { + return ListPluginsResponse{Plugins: nil, Error: &CannotGetDockerClientError{version: dg.version, err: err}} + } + + plugins, err := client.PluginList(ctx, filters) + if err != nil { + return ListPluginsResponse{Plugins: nil, Error: &CannotListPluginsError{err}} + } + + return ListPluginsResponse{Plugins: plugins, Error: nil} +} + +// APIVersion returns the client api version +func (dg *dockerGoClient) APIVersion() (dockerclient.DockerVersion, error) { + client, err := dg.sdkDockerClient() + if err != nil { + return "", err + } + return dg.sdkClientFactory.FindClientAPIVersion(client), nil +} + +// Stats returns a channel of *types.StatsJSON entries for the container. +func (dg *dockerGoClient) Stats(ctx context.Context, id string, inactivityTimeout time.Duration) (<-chan *types.StatsJSON, <-chan error) { + subCtx, cancelRequest := context.WithCancel(ctx) + + errC := make(chan error) + statsC := make(chan *types.StatsJSON) + client, err := dg.sdkDockerClient() + if err != nil { + cancelRequest() + go func() { + // upstream function should consume error + errC <- err + close(statsC) + }() + return statsC, errC + } + + var resp types.ContainerStats + if !dg.config.PollMetrics.Enabled() { + // Streaming metrics is the default behavior + seelog.Infof("DockerGoClient: Starting streaming metrics for container %s", id) + go func() { + defer cancelRequest() + defer close(statsC) + stream := true + resp, err = client.ContainerStats(subCtx, id, stream) + if err != nil { + errC <- fmt.Errorf("DockerGoClient: Unable to retrieve stats for container %s: %v", id, err) + return + } + + // handle inactivity timeout + var canceled uint32 + var ch chan<- struct{} + resp.Body, ch = dg.inactivityTimeoutHandler(resp.Body, inactivityTimeout, cancelRequest, &canceled) + defer resp.Body.Close() + defer close(ch) + + decoder := json.NewDecoder(resp.Body) + data := new(types.StatsJSON) + for err := decoder.Decode(data); err != io.EOF; err = decoder.Decode(data) { + if err != nil { + errC <- fmt.Errorf("DockerGoClient: Unable to decode stats for container %s: %v", id, err) + return + } + if atomic.LoadUint32(&canceled) != 0 { + errC <- fmt.Errorf("DockerGoClient: inactivity time exceeded timeout while retrieving stats for container %s", id) + return + } + + statsC <- data + data = new(types.StatsJSON) + } + }() + } else { + seelog.Infof("DockerGoClient: Starting to Poll for metrics for container %s", id) + + go func() { + defer cancelRequest() + defer close(statsC) + // we need to start by getting container stats so that the task stats + // endpoint will be populated immediately. + stats, err := getContainerStatsNotStreamed(client, subCtx, id, pollStatsTimeout) + if err != nil { + errC <- err + return + } + statsC <- stats + + // sleeping here jitters the time at which the ticker is created, so that + // containers do not synchronize on calling the docker stats api. + // the max sleep is 80% of the polling interval so that we have a chance to + // get two stats in the first publishing interval. + time.Sleep(retry.AddJitter(time.Nanosecond, dg.config.PollingMetricsWaitDuration*8/10)) + statPollTicker := time.NewTicker(dg.config.PollingMetricsWaitDuration) + defer statPollTicker.Stop() + for range statPollTicker.C { + stats, err := getContainerStatsNotStreamed(client, subCtx, id, pollStatsTimeout) + if err != nil { + errC <- err + return + } + statsC <- stats + } + }() + } + + return statsC, errC +} + +func getContainerStatsNotStreamed(client sdkclient.Client, ctx context.Context, id string, timeout time.Duration) (*types.StatsJSON, error) { + ctxWithTimeout, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + type statsResponse struct { + stats types.ContainerStats + err error + } + response := make(chan statsResponse, 1) + go func() { + stats, err := client.ContainerStats(ctxWithTimeout, id, false) + response <- statsResponse{stats, err} + }() + select { + case resp := <-response: + if resp.err != nil { + return nil, fmt.Errorf("DockerGoClient: Unable to retrieve stats for container %s: %v", id, resp.err) + } + decoder := json.NewDecoder(resp.stats.Body) + stats := &types.StatsJSON{} + err := decoder.Decode(stats) + if err != nil { + return nil, fmt.Errorf("DockerGoClient: Unable to decode stats for container %s: %v", id, err) + } + defer resp.stats.Body.Close() + return stats, nil + case <-ctxWithTimeout.Done(): + err := ctxWithTimeout.Err() + if err == context.DeadlineExceeded { + return nil, fmt.Errorf("DockerGoClient: timed out retrieving stats for container %s", id) + } + return nil, fmt.Errorf("DockerGoClient: Unable to retrieve stats for container %s: %v", id, err) + } +} + +func (dg *dockerGoClient) RemoveImage(ctx context.Context, imageName string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + response := make(chan error, 1) + go func() { response <- dg.removeImage(ctx, imageName) }() + select { + case resp := <-response: + return resp + case <-ctx.Done(): + return &DockerTimeoutError{timeout, "removing image"} + } +} + +func (dg *dockerGoClient) removeImage(ctx context.Context, imageName string) error { + client, err := dg.sdkDockerClient() + if err != nil { + return err + } + _, err = client.ImageRemove(ctx, imageName, types.ImageRemoveOptions{}) + return err +} + +// LoadImage invokes loads an image from an input stream, with a specified timeout +func (dg *dockerGoClient) LoadImage(ctx context.Context, inputStream io.Reader, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("LOAD_IMAGE")() + response := make(chan error, 1) + go func() { + response <- dg.loadImage(ctx, inputStream) + }() + select { + case resp := <-response: + return resp + case <-ctx.Done(): + return &DockerTimeoutError{timeout, "loading image"} + } +} + +func (dg *dockerGoClient) loadImage(ctx context.Context, reader io.Reader) error { + client, err := dg.sdkDockerClient() + if err != nil { + return err + } + resp, err := client.ImageLoad(ctx, reader, false) + if err != nil { + return err + } + + // flush and close response reader + if resp.Body != nil { + defer resp.Body.Close() + _, err = io.Copy(ioutil.Discard, resp.Body) + } + return err +} + +func (dg *dockerGoClient) CreateContainerExec(ctx context.Context, containerID string, execConfig types.ExecConfig, timeout time.Duration) (*types.IDResponse, error) { + type createContainerExecResponse struct { + execID *types.IDResponse + err error + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("CREATE_CONTAINER_EXEC")() + response := make(chan createContainerExecResponse, 1) + go func() { + execIDresponse, err := dg.createContainerExec(ctx, containerID, execConfig) + response <- createContainerExecResponse{execIDresponse, err} + }() + + select { + case resp := <-response: + return resp.execID, resp.err + case <-ctx.Done(): + err := ctx.Err() + if err == context.DeadlineExceeded { + return nil, &DockerTimeoutError{timeout, "exec command"} + } + return nil, &CannotCreateContainerExecError{err} + } +} + +func (dg *dockerGoClient) createContainerExec(ctx context.Context, containerID string, config types.ExecConfig) (*types.IDResponse, error) { + client, err := dg.sdkDockerClient() + if err != nil { + return nil, err + } + + execIDResponse, err := client.ContainerExecCreate(ctx, containerID, config) + if err != nil { + return nil, &CannotCreateContainerExecError{err} + } + return &execIDResponse, nil +} + +func (dg *dockerGoClient) StartContainerExec(ctx context.Context, execID string, timeout time.Duration) error { + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("START_CONTAINER_EXEC")() + response := make(chan error, 1) + go func() { + err := dg.startContainerExec(ctx, execID) + response <- err + }() + + select { + case resp := <-response: + return resp + case <-ctx.Done(): + err := ctx.Err() + if err == context.DeadlineExceeded { + return &DockerTimeoutError{timeout, "start exec command"} + } + return &CannotStartContainerExecError{err} + } +} + +func (dg *dockerGoClient) startContainerExec(ctx context.Context, execID string) error { + client, err := dg.sdkDockerClient() + if err != nil { + return err + } + + execStartCheck := types.ExecStartCheck{ + Detach: true, + Tty: false, + } + + err = client.ContainerExecStart(ctx, execID, execStartCheck) + if err != nil { + return &CannotStartContainerExecError{err} + } + return nil +} + +func (dg *dockerGoClient) InspectContainerExec(ctx context.Context, execID string, timeout time.Duration) (*types.ContainerExecInspect, error) { + type inspectContainerExecResponse struct { + execInspect *types.ContainerExecInspect + err error + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + defer metrics.MetricsEngineGlobal.RecordDockerMetric("INSPECT_CONTAINER_EXEC")() + response := make(chan inspectContainerExecResponse, 1) + go func() { + execInspectResponse, err := dg.inspectContainerExec(ctx, execID) + response <- inspectContainerExecResponse{execInspectResponse, err} + }() + + select { + case resp := <-response: + return resp.execInspect, resp.err + + case <-ctx.Done(): + err := ctx.Err() + if err == context.DeadlineExceeded { + return nil, &DockerTimeoutError{timeout, "inspect exec command"} + } + return nil, &CannotInspectContainerExecError{err} + } +} + +func (dg *dockerGoClient) inspectContainerExec(ctx context.Context, containerID string) (*types.ContainerExecInspect, error) { + client, err := dg.sdkDockerClient() + if err != nil { + return nil, err + } + + execInspectResponse, err := client.ContainerExecInspect(ctx, containerID) + if err != nil { + return nil, &CannotInspectContainerExecError{err} + } + return &execInspectResponse, nil +} diff --git a/agent/dockerclient/dockerapi/docker_client_test.go b/agent/dockerclient/dockerapi/docker_client_test.go new file mode 100644 index 00000000000..7436eac7f36 --- /dev/null +++ b/agent/dockerclient/dockerapi/docker_client_test.go @@ -0,0 +1,2034 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "context" + "encoding/base64" + "errors" + "io" + "io/ioutil" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + mock_sdkclient "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient/mocks" + mock_sdkclientfactory "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory/mocks" + "github.com/aws/amazon-ecs-agent/agent/ec2" + mock_ecr "github.com/aws/amazon-ecs-agent/agent/ecr/mocks" + ecrapi "github.com/aws/amazon-ecs-agent/agent/ecr/model/ecr" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + mock_ttime "github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks" + + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" + "github.com/docker/go-connections/nat" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// xContainerShortTimeout is a short duration intended to be used by the +// docker client APIs that test if the underlying context gets canceled +// upon the expiration of the timeout duration. +// TODO Remove mock go-dockerclient calls and fields after migration is complete. +const xContainerShortTimeout = 1 * time.Millisecond + +// xImgesShortTimeout is a short duration intended to be used by the +// docker client APIs that test if the underlying context gets canceled +// upon the expiration of the timeout duration. +const xImageShortTimeout = 1 * time.Millisecond + +const ( + // retry settings for pulling images mock backoff + xMaximumPullRetries = 5 + xMinimumPullRetryDelay = 25 * time.Millisecond + xMaximumPullRetryDelay = 100 * time.Microsecond + xPullRetryDelayMultiplier = 2 + xPullRetryJitterMultiplier = 0.2 + dockerEventBufferSize = 100 +) + +func defaultTestConfig() *config.Config { + cfg, _ := config.NewConfig(ec2.NewBlackholeEC2MetadataClient()) + return cfg +} + +func dockerClientSetup(t *testing.T) ( + *mock_sdkclient.MockClient, + *dockerGoClient, + *mock_ttime.MockTime, + *gomock.Controller, + *mock_ecr.MockECRFactory, + func()) { + return dockerClientSetupWithConfig(t, config.DefaultConfig()) +} + +func dockerClientSetupWithConfig(t *testing.T, conf config.Config) ( + *mock_sdkclient.MockClient, + *dockerGoClient, + *mock_ttime.MockTime, + *gomock.Controller, + *mock_ecr.MockECRFactory, + func()) { + ctrl := gomock.NewController(t) + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + mockTime := mock_ttime.NewMockTime(ctrl) + conf.EngineAuthData = config.NewSensitiveRawMessage([]byte{}) + client, _ := NewDockerGoClient(sdkFactory, &conf, ctx) + goClient, _ := client.(*dockerGoClient) + ecrClientFactory := mock_ecr.NewMockECRFactory(ctrl) + goClient.ecrClientFactory = ecrClientFactory + goClient._time = mockTime + goClient.imagePullBackoff = retry.NewExponentialBackoff(xMinimumPullRetryDelay, xMaximumPullRetryDelay, + xPullRetryJitterMultiplier, xPullRetryDelayMultiplier) + return mockDockerSDK, goClient, mockTime, ctrl, ecrClientFactory, ctrl.Finish +} + +func TestPullImageOutputTimeout(t *testing.T) { + mockDockerSDK, client, testTime, _, _, done := dockerClientSetup(t) + defer done() + + pullBeginTimeout := make(chan time.Time) + testTime.EXPECT().After(dockerclient.DockerPullBeginTimeout).Return(pullBeginTimeout).MinTimes(1) + + // multiple invocations will happen due to retries, but all should timeout + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), "image:latest", gomock.Any()).DoAndReturn( + func(x, y, z interface{}) (io.ReadCloser, error) { + pullBeginTimeout <- time.Now() + + reader := &mockReadCloser{ + reader: strings.NewReader(`{"status":"pull in progress"}`), + } + return reader, nil + }).Times(maximumPullRetries) // expected number of retries + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, "image", nil, defaultTestConfig().ImagePullTimeout) + assert.Error(t, metadata.Error, "Expected error for pull timeout") + assert.Equal(t, "DockerTimeoutError", metadata.Error.(apierrors.NamedError).ErrorName()) +} + +func TestImagePullGlobalTimeout(t *testing.T) { + mockDockerSDK, client, testTime, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + pullBeginTimeout := make(chan time.Time, 1) + testTime.EXPECT().After(dockerclient.DockerPullBeginTimeout).Return(pullBeginTimeout) + + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), "image:latest", gomock.Any()).Do(func(x, y, z interface{}) { + wait.Wait() + }).Return(mockReadCloser{reader: strings.NewReader(`{"status":"pull in progress"}`)}, nil).MaxTimes(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, "image", nil, xContainerShortTimeout) + assert.Error(t, metadata.Error, "Expected error for pull timeout") + assert.Equal(t, "DockerTimeoutError", metadata.Error.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestPullImageInactivityTimeout(t *testing.T) { + mockDockerSDK, client, testTime, _, _, done := dockerClientSetup(t) + defer done() + + client.config.ImagePullInactivityTimeout = 1 * time.Millisecond + + testTime.EXPECT().After(gomock.Any()).AnyTimes() + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), "image:latest", gomock.Any()).DoAndReturn( + func(x, y, z interface{}) (io.ReadCloser, error) { + + reader := mockReadCloser{ + reader: strings.NewReader(`{"status":"pull in progress"}`), + delay: 300 * time.Millisecond, + } + return reader, nil + }).Times(maximumPullRetries) // expected number of retries + + client.inactivityTimeoutHandler = func(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) { + assert.Equal(t, client.config.ImagePullInactivityTimeout, timeout) + atomic.AddUint32(canceled, 1) + return reader, make(chan struct{}) + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, "image", nil, defaultTestConfig().ImagePullTimeout) + assert.Error(t, metadata.Error, "Expected error for pull inactivity timeout") + assert.Equal(t, "CannotPullContainerError", metadata.Error.(apierrors.NamedError).ErrorName(), "Wrong error type") +} + +func TestImagePull(t *testing.T) { + mockDockerSDK, client, testTime, _, _, done := dockerClientSetup(t) + defer done() + + testTime.EXPECT().After(gomock.Any()).AnyTimes() + + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), "image:latest", gomock.Any()).Return( + mockReadCloser{ + reader: strings.NewReader(`{"status":"pull complete"}`), + }, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, "image", nil, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") +} + +func TestImagePullTag(t *testing.T) { + mockDockerSDK, client, testTime, _, _, done := dockerClientSetup(t) + defer done() + client.config.ImagePullInactivityTimeout = 10 * time.Second + + testTime.EXPECT().After(gomock.Any()).AnyTimes() + + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), "image:mytag", gomock.Any()).Return( + mockReadCloser{ + reader: strings.NewReader(`{"status":"pull complete"}`), + }, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, "image:mytag", nil, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") +} + +func TestImagePullDigest(t *testing.T) { + mockDockerSDK, client, testTime, _, _, done := dockerClientSetup(t) + defer done() + + testTime.EXPECT().After(gomock.Any()).AnyTimes() + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), "image@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb", gomock.Any()).Return( + mockReadCloser{ + reader: strings.NewReader(`{"status":"pull complete"}`), + }, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, "image@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb", nil, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") +} + +func TestPullImageECRSuccess(t *testing.T) { + mockDockerSDK, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t) + defer done() + + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + ecrClient := mock_ecr.NewMockECRClient(ctrl) + + registryID := "123456789012" + region := "eu-west-1" + endpointOverride := "my.endpoint" + authData := &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + RegistryID: registryID, + Region: region, + EndpointOverride: endpointOverride, + }, + } + imageEndpoint := "registry.endpoint" + image := imageEndpoint + "/myimage:tag" + username := "username" + password := "password" + + imagePullOpts := types.ImagePullOptions{ + All: false, + RegistryAuth: "eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwicGFzc3dvcmQiOiJwYXNzd29yZCIsInNlcnZlcmFkZHJlc3MiOiJodHRwczovL3JlZ2lzdHJ5LmVuZHBvaW50In0K", + } + + ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil) + ecrClient.EXPECT().GetAuthorizationToken(registryID).Return( + &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String("https://" + imageEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + }, nil) + + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), image, imagePullOpts).Return( + mockReadCloser{ + reader: strings.NewReader(`{"status":"pull complete"}`), + }, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, image, authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") +} + +func TestPullImageECRAuthFail(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, _ := NewDockerGoClient(sdkFactory, defaultTestConfig(), ctx) + goClient, _ := client.(*dockerGoClient) + ecrClientFactory := mock_ecr.NewMockECRFactory(ctrl) + ecrClient := mock_ecr.NewMockECRClient(ctrl) + mockTime := mock_ttime.NewMockTime(ctrl) + goClient.ecrClientFactory = ecrClientFactory + goClient._time = mockTime + + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + + registryID := "123456789012" + region := "eu-west-1" + endpointOverride := "my.endpoint" + authData := &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + RegistryID: registryID, + Region: region, + EndpointOverride: endpointOverride, + }, + } + imageEndpoint := "registry.endpoint" + image := imageEndpoint + "/myimage:tag" + + // no retries for this error + ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil) + ecrClient.EXPECT().GetAuthorizationToken(gomock.Any()).Return(nil, errors.New("test error")) + + metadata := client.PullImage(ctx, image, authData, defaultTestConfig().ImagePullTimeout) + assert.Error(t, metadata.Error, "expected pull to fail") +} + +func TestPullImageError(t *testing.T) { + mockDockerSDK, client, testTime, _, _, _ := dockerClientSetup(t) + + testTime.EXPECT().After(gomock.Any()).AnyTimes() + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), "image:latest", gomock.Any()).DoAndReturn( + func(x, y, z interface{}) (io.ReadCloser, error) { + + reader := mockReadCloser{ + reader: strings.NewReader(`{"error":"toomanyrequests: Rate exceeded"}`), + delay: 300 * time.Millisecond, + } + return reader, nil + }).Times(maximumPullRetries) // expected number of retries + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, "image", nil, defaultTestConfig().ImagePullTimeout) + assert.Error(t, metadata.Error, "toomanyrequests: Rate exceeded") + assert.Equal(t, "CannotPullContainerError", metadata.Error.(apierrors.NamedError).ErrorName(), "Wrong error type") +} + +type mockReadCloser struct { + reader io.Reader + delay time.Duration +} + +func (mr mockReadCloser) Read(data []byte) (n int, err error) { + time.Sleep(mr.delay) + return mr.reader.Read(data) +} +func (mr mockReadCloser) Close() error { + return nil +} +func TestGetRepositoryWithTaggedImage(t *testing.T) { + image := "registry.endpoint/myimage:tag" + repository := getRepository(image) + + assert.Equal(t, image, repository) +} + +func TestGetRepositoryWithUntaggedImage(t *testing.T) { + image := "registry.endpoint/myimage" + repository := getRepository(image) + + assert.Equal(t, image+":"+dockerDefaultTag, repository) +} + +func TestCreateContainerTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + hostConfig := &dockercontainer.HostConfig{Resources: dockercontainer.Resources{Memory: 100}} + mockDockerSDK.EXPECT().ContainerCreate(gomock.Any(), &dockercontainer.Config{}, hostConfig, + &network.NetworkingConfig{}, "containerName").Do(func(v, w, x, y, z interface{}) { + wait.Wait() + }).MaxTimes(1).Return(dockercontainer.ContainerCreateCreatedBody{}, errors.New("test error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.CreateContainer(ctx, &dockercontainer.Config{}, hostConfig, "containerName", xContainerShortTimeout) + assert.Error(t, metadata.Error, "expected error for pull timeout") + assert.Equal(t, "DockerTimeoutError", metadata.Error.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestCreateContainer(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + name := "containerName" + hostConfig := &dockercontainer.HostConfig{Resources: dockercontainer.Resources{Memory: 100}} + gomock.InOrder( + mockDockerSDK.EXPECT().ContainerCreate(gomock.Any(), gomock.Any(), hostConfig, gomock.Any(), name). + Do(func(v, w, x, y, z interface{}) { + assert.True(t, reflect.DeepEqual(x, hostConfig), + "Mismatch in create container HostConfig, %v != %v", y, hostConfig) + assert.Equal(t, z, name, + "Mismatch in create container options, %s != %s", z, name) + }).Return(dockercontainer.ContainerCreateCreatedBody{ID: "id"}, nil), + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "id"). + Return(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "id", + }, + }, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.CreateContainer(ctx, nil, hostConfig, name, defaultTestConfig().ContainerCreateTimeout) + assert.NoError(t, metadata.Error) + assert.Equal(t, "id", metadata.DockerID) + assert.Nil(t, metadata.ExitCode, "Expected a created container to not have an exit code") +} + +func TestCreateContainerExecTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + execConfig := types.ExecConfig{ + Privileged: false, + AttachStdin: false, + AttachStderr: false, + AttachStdout: false, + Detach: true, + DetachKeys: "", + Env: []string{}, + Cmd: []string{"ls"}, + } + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerExecCreate(gomock.Any(), gomock.Any(), execConfig).Do(func(v, w, x interface{}) { + wait.Wait() // wait until timeout happens + }).MaxTimes(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, err := client.CreateContainerExec(ctx, "id", execConfig, xContainerShortTimeout) + assert.NotNil(t, err, "Expected error for create container exec") + assert.Equal(t, "DockerTimeoutError", err.(apierrors.NamedError).ErrorName(), "Wrong error type") + wait.Done() +} + +func TestCreateContainerExec(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + name := "containerName" + execEnv := make([]string, 0) + execCmd := make([]string, 0) + execCmd = append(execCmd, "ls") + execConfig := types.ExecConfig{ + Privileged: false, + AttachStdin: false, + AttachStderr: false, + AttachStdout: false, + Detach: true, + DetachKeys: "", + Env: execEnv, + Cmd: execCmd, + } + + execCreateResponse := types.IDResponse{ID: "id"} + + gomock.InOrder( + mockDockerSDK.EXPECT().ContainerExecCreate(gomock.Any(), gomock.Any(), execConfig). + Do(func(v, w, x interface{}) { + assert.True(t, reflect.DeepEqual(x, execConfig), + "Mismatch in create container ExecConfig, %v != %v", x, execConfig) + }).Return(execCreateResponse, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + response, err := client.CreateContainerExec(ctx, name, execConfig, dockerclient.ContainerExecCreateTimeout) + assert.NoError(t, err) + assert.NotNil(t, response) + assert.Equal(t, execCreateResponse, *response) +} + +func TestStartContainerExecTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + execStartCheck := types.ExecStartCheck{ + Detach: true, + Tty: false, + } + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerExecStart(gomock.Any(), "id", execStartCheck).Do(func(x, y, z interface{}) { + wait.Wait() // wait until timeout happens + }).MaxTimes(1).Return(nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.StartContainerExec(ctx, "id", xContainerShortTimeout) + assert.NotNil(t, err, "Expected error for start container exec") + assert.Equal(t, "DockerTimeoutError", err.(apierrors.NamedError).ErrorName(), "Wrong error type") + wait.Done() +} + +func TestStartContainerExec(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + execStartCheck := types.ExecStartCheck{ + Detach: true, + Tty: false, + } + + gomock.InOrder( + mockDockerSDK.EXPECT().ContainerExecStart(gomock.Any(), "id", execStartCheck).Return(nil), + ) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.StartContainerExec(ctx, "id", dockerclient.ContainerExecStartTimeout) + assert.NoError(t, err) +} + +func TestInspectContainerExecTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerExecInspect(gomock.Any(), "id").Do(func(x, y interface{}) { + wait.Wait() // wait until timeout happens + }).MaxTimes(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, err := client.InspectContainerExec(ctx, "id", xContainerShortTimeout) + assert.NotNil(t, err, "Expected error for inspect container exec") + assert.Equal(t, "DockerTimeoutError", err.(apierrors.NamedError).ErrorName(), "Wrong error type") + wait.Done() +} + +func TestInspectContainerExec(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + inspectContainerResponse := types.ContainerExecInspect{ + ExecID: "id", + ContainerID: "cont", + Running: true, + ExitCode: 0, + Pid: 25537, + } + gomock.InOrder( + mockDockerSDK.EXPECT().ContainerExecInspect(gomock.Any(), "id").Return(inspectContainerResponse, nil), + ) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + resp, err := client.InspectContainerExec(ctx, "id", dockerclient.ContainerExecInspectTimeout) + assert.NoError(t, err) + assert.Equal(t, "id", resp.ExecID) + assert.Equal(t, "cont", resp.ContainerID) + assert.Equal(t, true, resp.Running) + assert.Equal(t, 0, resp.ExitCode) + assert.Equal(t, 25537, resp.Pid) +} + +func TestStartContainerTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerStart(gomock.Any(), "id", types.ContainerStartOptions{}).Do(func(x, y, z interface{}) { + wait.Wait() // wait until timeout happens + }).MaxTimes(1) + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "id").Return(types.ContainerJSON{}, errors.New("test error")).AnyTimes() + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.StartContainer(ctx, "id", xContainerShortTimeout) + assert.NotNil(t, metadata.Error, "Expected error for pull timeout") + assert.Equal(t, "DockerTimeoutError", metadata.Error.(apierrors.NamedError).ErrorName(), "Wrong error type") + wait.Done() +} + +func TestStartContainer(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + gomock.InOrder( + mockDockerSDK.EXPECT().ContainerStart(gomock.Any(), "id", types.ContainerStartOptions{}).Return(nil), + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "id"). + Return(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "id", + }}, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.StartContainer(ctx, "id", defaultTestConfig().ContainerStartTimeout) + assert.NoError(t, metadata.Error) + assert.Equal(t, "id", metadata.DockerID) +} + +func TestStopContainerTimeout(t *testing.T) { + cfg := config.DefaultConfig() + cfg.DockerStopTimeout = xContainerShortTimeout + mockDockerSDK, client, _, _, _, done := dockerClientSetupWithConfig(t, cfg) + defer done() + reset := stopContainerTimeoutBuffer + stopContainerTimeoutBuffer = xContainerShortTimeout + defer func() { + stopContainerTimeoutBuffer = reset + }() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerStop(gomock.Any(), "id", &client.config.DockerStopTimeout).Do(func(x, y, z interface{}) { + wait.Wait() + // Don't return, verify timeout happens + }).MaxTimes(1).Return(errors.New("test error")) + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), gomock.Any()).AnyTimes() + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.StopContainer(ctx, "id", xContainerShortTimeout) + assert.Error(t, metadata.Error, "Expected error for stop timeout") + assert.Equal(t, "DockerTimeoutError", metadata.Error.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestStopContainer(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + gomock.InOrder( + mockDockerSDK.EXPECT().ContainerStop(gomock.Any(), "id", &client.config.DockerStopTimeout).Return(nil), + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "id"). + Return( + types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "id", + State: &types.ContainerState{ + ExitCode: 10, + }, + }, + Config: &dockercontainer.Config{}, + }, + nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.StopContainer(ctx, "id", client.config.DockerStopTimeout) + assert.NoError(t, metadata.Error) + assert.Equal(t, "id", metadata.DockerID) +} + +func TestRemoveContainerTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerRemove(gomock.Any(), "id", + types.ContainerRemoveOptions{ + RemoveVolumes: true, + RemoveLinks: false, + Force: false, + }).Do(func(x, y, z interface{}) { + wait.Wait() // wait until timeout happens + }).MaxTimes(1) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + err := client.RemoveContainer(ctx, "id", xContainerShortTimeout) + assert.Error(t, err, "Expected error for remove timeout") + assert.Equal(t, "DockerTimeoutError", err.(apierrors.NamedError).ErrorName(), "Wrong error type") + wait.Done() +} + +func TestRemoveContainer(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + mockDockerSDK.EXPECT().ContainerRemove(gomock.Any(), "id", + types.ContainerRemoveOptions{ + RemoveVolumes: true, + RemoveLinks: false, + Force: false, + }).Return(nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.RemoveContainer(ctx, "id", dockerclient.RemoveContainerTimeout) + assert.NoError(t, err) +} + +func TestInspectContainerTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "id").Do(func(ctx, x interface{}) { + wait.Wait() + // Don't return, verify timeout happens + }).MaxTimes(1).Return(types.ContainerJSON{}, errors.New("test error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, err := client.InspectContainer(ctx, "id", xContainerShortTimeout) + assert.Error(t, err, "Expected error for inspect timeout") + assert.Equal(t, "DockerTimeoutError", err.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestInspectContainer(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + containerOutput := types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "id", + State: &types.ContainerState{ + ExitCode: 10, + Health: &types.Health{ + Status: "healthy", + Log: []*types.HealthcheckResult{ + { + ExitCode: 1, + Output: "health output", + }, + }, + }, + }}} + gomock.InOrder( + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "id").Return(containerOutput, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + container, err := client.InspectContainer(ctx, "id", dockerclient.InspectContainerTimeout) + assert.NoError(t, err) + assert.True(t, reflect.DeepEqual(&containerOutput, container)) +} + +func TestTopContainerTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerTop(gomock.Any(), "id", gomock.Any()).Do(func(ctx context.Context, x interface{}, y interface{}) { + wait.Wait() + }).MaxTimes(1).Return(dockercontainer.ContainerTopOKBody{}, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, err := client.TopContainer(ctx, "id", xContainerShortTimeout) + assert.Error(t, err, "Expected error for top timeout") + assert.Equal(t, "DockerTimeoutError", err.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestTopContainer(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + topOutput := dockercontainer.ContainerTopOKBody{} + gomock.InOrder( + mockDockerSDK.EXPECT().ContainerTop(gomock.Any(), "id", gomock.Any()).Return(topOutput, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + topResponse, err := client.TopContainer(ctx, "id", dockerclient.TopContainerTimeout, "pid") + assert.NoError(t, err) + assert.Equal(t, &topOutput, topResponse) +} + +func TestContainerEvents(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + eventsChan := make(chan events.Message, dockerEventBufferSize) + errChan := make(chan error) + mockDockerSDK.EXPECT().Events(gomock.Any(), gomock.Any()).Return(eventsChan, errChan) + + dockerEvents, err := client.ContainerEvents(context.TODO()) + require.NoError(t, err, "Could not get container events") + go func() { + eventsChan <- events.Message{Type: "container", ID: "containerId", Status: "create"} + }() + + event := <-dockerEvents + assert.Equal(t, event.DockerID, "containerId", "Wrong docker id") + assert.Equal(t, event.Status, apicontainerstatus.ContainerCreated, "Wrong status") + + container := types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "cid2", + }, + NetworkSettings: &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Ports: nat.PortMap{ + nat.Port("80/tcp"): []nat.PortBinding{{HostPort: "9001"}}, + }, + }, + }, + Config: &dockercontainer.Config{}, + Mounts: []types.MountPoint{ + {Source: "/host/path", + Destination: "/container/path"}, + }, + } + + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "cid2").Return(container, nil) + go func() { + eventsChan <- events.Message{Type: "container", ID: "cid2", Status: "start"} + }() + event = <-dockerEvents + assert.Equal(t, event.DockerID, "cid2", "Wrong docker id") + assert.Equal(t, event.Status, apicontainerstatus.ContainerRunning, "Wrong status") + assert.Equal(t, event.PortBindings[0].ContainerPort, uint16(80), "Incorrect port bindings") + assert.Equal(t, event.PortBindings[0].HostPort, uint16(9001), "Incorrect port bindings") + assert.Equal(t, event.Volumes[0].Source, "/host/path", "Incorrect volume mapping") + assert.Equal(t, event.Volumes[0].Destination, "/container/path", "Incorrect volume mapping") + + for i := 0; i < 2; i++ { + stoppedContainer := types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "cid3" + strconv.Itoa(i), + State: &types.ContainerState{ + FinishedAt: (time.Now()).Format(time.RFC3339), + ExitCode: 20, + }, + }, + } + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "cid3"+strconv.Itoa(i)).Return(stoppedContainer, nil) + } + go func() { + eventsChan <- events.Message{Type: "container", ID: "cid30", Status: "stop"} + eventsChan <- events.Message{Type: "container", ID: "cid31", Status: "die"} + }() + + for i := 0; i < 2; i++ { + anEvent := <-dockerEvents + assert.True(t, anEvent.DockerID == "cid30" || anEvent.DockerID == "cid31", "Wrong container id: "+anEvent.DockerID) + assert.Equal(t, anEvent.Status, apicontainerstatus.ContainerStopped, "Should be stopped") + assert.Equal(t, aws.IntValue(anEvent.ExitCode), 20, "Incorrect exit code") + } + + containerWithHealthInfo := types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_health", + State: &types.ContainerState{ + Health: &types.Health{ + Status: "healthy", + Log: []*types.HealthcheckResult{ + { + ExitCode: 1, + Output: "health output", + }, + }, + }, + }}, + } + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), "container_health").Return(containerWithHealthInfo, nil) + go func() { + eventsChan <- events.Message{ + Type: "container", + ID: "container_health", + Action: "health_status: unhealthy", + Status: "health_status: unhealthy", + Actor: events.Actor{ + ID: "container_health", + }, + } + }() + + anEvent := <-dockerEvents + assert.Equal(t, anEvent.Type, apicontainer.ContainerHealthEvent, "unexpected docker events type received") + assert.Equal(t, anEvent.Health.Status, apicontainerstatus.ContainerHealthy) + assert.Equal(t, anEvent.Health.Output, "health output") + + // Verify the following events do not translate into our event stream + + // + // Docker 1.8.3 sends the full command appended to exec_create and exec_start + // events. Test that we ignore there as well.. + // + ignore := []string{ + "pause", + "exec_create", + "exec_create: /bin/bash", + "exec_start", + "exec_start: /bin/bash", + "top", + "attach", + "export", + "pull", + "push", + "tag", + "untag", + "import", + "delete", + "oom", + "kill", + } + for _, eventStatus := range ignore { + eventsChan <- events.Message{Type: "container", ID: "123", Status: eventStatus} + select { + case <-dockerEvents: + t.Error("No event should be available for " + eventStatus) + default: + } + } + + // Verify only the container type event will translate to our event stream + // Events type: network, image, volume, daemon, plugins won't be handled + ignoreEventType := map[string]string{ + "network": "connect", + "image": "pull", + "volume": "create", + "plugin": "install", + "daemon": "reload", + } + + for eventType, eventStatus := range ignoreEventType { + eventsChan <- events.Message{Type: eventType, ID: "123", Status: eventStatus} + select { + case <-dockerEvents: + t.Errorf("No event should be available for %v", eventType) + default: + } + } +} + +func TestContainerEventsError(t *testing.T) { + testCases := []struct { + name string + err error + }{ + { + name: "EOF error", + err: io.EOF, + }, + { + name: "Unexpected EOF error", + err: io.ErrUnexpectedEOF, + }, + { + name: "other error", + err: errors.New("test error"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + eventsChan := make(chan events.Message, dockerEventBufferSize) + errChan := make(chan error) + mockDockerSDK.EXPECT().Events(gomock.Any(), gomock.Any()).Return(eventsChan, errChan).MinTimes(1) + + dockerEvents, err := client.ContainerEvents(context.TODO()) + require.NoError(t, err, "Could not get container events") + go func() { + errChan <- tc.err + eventsChan <- events.Message{Type: "container", ID: "containerId", Status: "create"} + }() + + event := <-dockerEvents + assert.Equal(t, event.DockerID, "containerId", "Wrong docker id") + assert.Equal(t, event.Status, apicontainerstatus.ContainerCreated, "Wrong status") + }) + } +} + +func TestDockerVersion(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + mockDockerSDK.EXPECT().ServerVersion(gomock.Any()).Return(types.Version{Version: "1.6.0"}, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + str, err := client.Version(ctx, dockerclient.VersionTimeout) + assert.NoError(t, err) + assert.Equal(t, "1.6.0", str, "Got unexpected version string: "+str) +} + +func TestDockerInfo(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + mockDockerSDK.EXPECT().Info(gomock.Any()).Return(types.Info{SecurityOptions: []string{"selinux"}}, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + info, err := client.Info(ctx, dockerclient.InfoTimeout) + + assert.NoError(t, err) + assert.Equal(t, []string{"selinux"}, info.SecurityOptions) +} + +func TestDockerInfoError(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + errorMsg := "Error getting docker info" + + mockDockerSDK.EXPECT().Info(gomock.Any()).Return(types.Info{}, errors.New(errorMsg)) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + info, err := client.Info(ctx, dockerclient.InfoTimeout) + + assert.Error(t, err, errorMsg) + assert.Equal(t, types.Info{}, info) +} + +func TestDockerInfoClientError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + errorMsg := "Error getting client" + + // Mock SDKFactory + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Return the Docker Go client for the first call + sdkFactory.EXPECT().GetDefaultClient().Times(1).Return(mockDockerSDK, nil) + client, err := NewDockerGoClient(sdkFactory, defaultTestConfig(), ctx) + assert.NoError(t, err) + + // Throw error when `Info` tries to get the client + sdkFactory.EXPECT().GetDefaultClient().Return(nil, errors.New(errorMsg)) + info, err := client.Info(ctx, dockerclient.InfoTimeout) + + assert.Error(t, err, errorMsg) + assert.Equal(t, types.Info{}, info) +} + +func TestDockerVersionCached(t *testing.T) { + _, client, _, _, _, done := dockerClientSetup(t) + defer done() + + // Explicitly set daemon version so that mockDocker (the docker client) + // is not invoked again + client.setDaemonVersion("1.6.0") + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + str, err := client.Version(ctx, dockerclient.VersionTimeout) + assert.NoError(t, err) + assert.Equal(t, "1.6.0", str, "Got unexpected version string: "+str) +} + +func TestListContainers(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + containers := []types.Container{{ID: "id"}} + mockDockerSDK.EXPECT().ContainerList(gomock.Any(), types.ContainerListOptions{All: true}).Return(containers, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + response := client.ListContainers(ctx, true, dockerclient.ListContainersTimeout) + assert.NoError(t, response.Error) + + containerIds := response.DockerIDs + assert.Equal(t, 1, len(containerIds), "Unexpected number of containers in list: ", len(containerIds)) + assert.Equal(t, "id", containerIds[0], "Unexpected container id in the list: ", containerIds[0]) +} + +func TestListContainersTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerList(gomock.Any(), types.ContainerListOptions{All: true}). + Do(func(x, y interface{}) { + wait.Wait() + // Don't return, verify timeout happens + }).MaxTimes(1).Return(nil, errors.New("test error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + response := client.ListContainers(ctx, true, xContainerShortTimeout) + assert.Error(t, response.Error, "Expected error for pull timeout") + assert.Equal(t, "DockerTimeoutError", response.Error.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestListImages(t *testing.T) { + mockDocker, client, _, _, _, done := dockerClientSetup(t) + defer done() + + images := []types.ImageSummary{{ID: "id"}} + mockDocker.EXPECT().ImageList(gomock.Any(), gomock.Any()).Return(images, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + response := client.ListImages(ctx, dockerclient.ListImagesTimeout) + assert.NoError(t, response.Error, "Did not expect error") + + imageIDs := response.ImageIDs + assert.EqualValues(t, len(imageIDs), 1, "Unexpected number of images in list") + assert.EqualValues(t, imageIDs[0], "id", "Unexpected id in list of images") +} + +func TestListImagesTimeout(t *testing.T) { + mockDocker, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDocker.EXPECT().ImageList(gomock.Any(), gomock.Any()).Do(func(x, y interface{}) { + wait.Wait() + // Don't return, verify timeout happens + }).MaxTimes(1).Return(nil, errors.New("test error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + response := client.ListImages(ctx, xImageShortTimeout) + assert.Error(t, response.Error, "Expected error for pull timeout") + assert.Equal(t, response.Error.(apierrors.NamedError).ErrorName(), "DockerTimeoutError", "Wrong error type") + + wait.Done() +} + +// Test for constructor fail when Docker SDK Client Ping() fails +func TestPingSdkFailError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, errors.New("test error")) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + _, err := NewDockerGoClient(sdkFactory, defaultTestConfig(), ctx) + assert.Error(t, err, "Expected ping error to result in constructor fail") +} + +func TestUsesVersionedClient(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := NewDockerGoClient(sdkFactory, defaultTestConfig(), ctx) + assert.NoError(t, err) + + vclient := client.WithVersion(dockerclient.DockerVersion("1.20")) + + sdkFactory.EXPECT().GetClient(dockerclient.DockerVersion("1.20")).Times(2).Return(mockDockerSDK, nil) + mockDockerSDK.EXPECT().ContainerStart(gomock.Any(), gomock.Any(), types.ContainerStartOptions{}).Return(nil) + mockDockerSDK.EXPECT().ContainerInspect(gomock.Any(), gomock.Any()).Return(types.ContainerJSON{}, errors.New("test error")) + vclient.StartContainer(ctx, "foo", defaultTestConfig().ContainerStartTimeout) +} + +func TestUnavailableVersionError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := NewDockerGoClient(sdkFactory, defaultTestConfig(), ctx) + assert.NoError(t, err) + + vclient := client.WithVersion(dockerclient.DockerVersion("1.21")) + + sdkFactory.EXPECT().GetClient(dockerclient.DockerVersion("1.21")).Times(1).Return(nil, errors.New("Cannot get client")) + metadata := vclient.StartContainer(ctx, "foo", defaultTestConfig().ContainerStartTimeout) + + assert.NotNil(t, metadata.Error, "Expected error, didn't get one") + if namederr, ok := metadata.Error.(apierrors.NamedError); ok { + if namederr.ErrorName() != "CannotGetDockerclientError" { + t.Fatal("Wrong error name, expected CannotGetDockerclientError but got " + namederr.ErrorName()) + } + } else { + t.Fatal("Error was not a named error") + } +} + +func TestStatsNormalExit(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + mockDockerSDK.EXPECT().ContainerStats(gomock.Any(), gomock.Any(), true).Return(types.ContainerStats{ + Body: mockStream{ + data: []byte(`{"memory_stats":{"Usage":50},"cpu_stats":{"system_cpu_usage":100}}`), + index: 0, + }, + }, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + stats, _ := client.Stats(ctx, "foo", dockerclient.StatsInactivityTimeout) + newStat := <-stats + waitForStats(t, newStat) + + assert.Equal(t, uint64(50), newStat.MemoryStats.Usage) + assert.Equal(t, uint64(100), newStat.CPUStats.SystemUsage) +} + +func TestStatsErrorReading(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + mockDockerSDK.EXPECT().ContainerStats(gomock.Any(), gomock.Any(), gomock.Any()).Return(types.ContainerStats{ + Body: mockStream{ + data: []byte(`{"memory_stats":{"Usage":50},"cpu_stats":{"system_cpu_usage":100}}`), + index: 0, + }, + }, errors.New("test error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, errC := client.Stats(ctx, "foo", dockerclient.StatsInactivityTimeout) + + assert.Error(t, <-errC) +} + +func TestStatsErrorDecoding(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + mockDockerSDK.EXPECT().ContainerStats(gomock.Any(), gomock.Any(), true).Return(types.ContainerStats{ + Body: mockStream{ + data: []byte(`stuff`), + index: 0, + }, + }, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, errC := client.Stats(ctx, "foo", dockerclient.StatsInactivityTimeout) + assert.Error(t, <-errC) +} + +func TestStatsClientError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(nil, errors.New("No client")) + client := &dockerGoClient{ + sdkClientFactory: sdkFactory, + } + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + statsC, errC := client.Stats(ctx, "foo", dockerclient.StatsInactivityTimeout) + // should get an error from the channel + err := <-errC + // stats channel should be closed (ok=false) + _, ok := <-statsC + assert.False(t, ok) + assert.Error(t, err) +} + +type mockStream struct { + data []byte + index int64 + delay time.Duration +} + +func (ms mockStream) Read(data []byte) (n int, err error) { + time.Sleep(ms.delay) + if ms.index >= int64(len(ms.data)) { + err = io.EOF + return + } + n = copy(data, ms.data[ms.index:]) + ms.index += int64(n) + return +} +func (ms mockStream) Close() error { + return nil +} +func waitForStats(t *testing.T, stat *types.StatsJSON) { + ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) + defer cancel() + for { + select { + case <-ctx.Done(): + t.Error("Timed out waiting for container stats") + default: + if stat == nil { + time.Sleep(time.Second) + continue + } + return + } + } +} + +func TestStatsInactivityTimeout(t *testing.T) { + shortInactivityTimeout := 1 * time.Millisecond + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + mockDockerSDK.EXPECT().ContainerStats(gomock.Any(), gomock.Any(), true).Return(types.ContainerStats{ + Body: mockStream{ + data: []byte(`{"memory_stats":{"Usage":50},"cpu_stats":{"system_cpu_usage":100}}`), + index: 0, + delay: 300 * time.Millisecond, + }, + }, nil) + + client.inactivityTimeoutHandler = func(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) { + assert.Equal(t, shortInactivityTimeout, timeout) + atomic.AddUint32(canceled, 1) + return reader, make(chan struct{}) + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, errC := client.Stats(ctx, "foo", shortInactivityTimeout) + assert.Error(t, <-errC) +} + +func TestPollStatsTimeout(t *testing.T) { + shortTimeout := 1 * time.Millisecond + mockDockerSDK, _, _, _, _, done := dockerClientSetup(t) + defer done() + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ContainerStats(gomock.Any(), gomock.Any(), false).Do(func(x, y, z interface{}) { + wait.Wait() + }).MaxTimes(1).Return(types.ContainerStats{Body: mockStream{}}, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, err := getContainerStatsNotStreamed(mockDockerSDK, ctx, "", shortTimeout) + assert.Error(t, err) + wait.Done() +} + +func TestPollStatsError(t *testing.T) { + shortTimeout := 1 * time.Millisecond + mockDockerSDK, _, _, _, _, done := dockerClientSetup(t) + defer done() + mockDockerSDK.EXPECT().ContainerStats(gomock.Any(), gomock.Any(), false).MaxTimes(1).Return(types.ContainerStats{ + Body: nil}, + errors.New("Container stats error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + _, err := getContainerStatsNotStreamed(mockDockerSDK, ctx, "foo", shortTimeout) + assert.Error(t, err) +} + +func TestStatsInactivityTimeoutNoHit(t *testing.T) { + longInactivityTimeout := 500 * time.Millisecond + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + mockDockerSDK.EXPECT().ContainerStats(gomock.Any(), gomock.Any(), true).Return(types.ContainerStats{ + Body: mockStream{ + data: []byte(`{"memory_stats":{"Usage":50},"cpu_stats":{"system_cpu_usage":100}}`), + index: 0, + delay: 300 * time.Millisecond, + }, + }, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + stats, _ := client.Stats(ctx, "foo", longInactivityTimeout) + newStat := <-stats + + waitForStats(t, newStat) + assert.Equal(t, uint64(50), newStat.MemoryStats.Usage) + assert.Equal(t, uint64(100), newStat.CPUStats.SystemUsage) +} + +func TestRemoveImageTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ImageRemove(gomock.Any(), "image", types.ImageRemoveOptions{}).Do(func(x, y, z interface{}) { + wait.Wait() + }) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.RemoveImage(ctx, "image", 2*time.Millisecond) + assert.Error(t, err, "Expected error for remove image timeout") + wait.Done() +} + +func TestRemoveImage(t *testing.T) { + mockDockerSDK, client, testTime, _, _, done := dockerClientSetup(t) + defer done() + + testTime.EXPECT().After(gomock.Any()).AnyTimes() + mockDockerSDK.EXPECT().ImageRemove(gomock.Any(), "image", types.ImageRemoveOptions{}).Return([]types.ImageDeleteResponseItem{}, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.RemoveImage(ctx, "image", dockerclient.RemoveImageTimeout) + assert.NoError(t, err, "Did not expect error, err: %v", err) +} + +func TestLoadImageHappyPath(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + mockDockerSDK.EXPECT().ImageLoad(gomock.Any(), gomock.Any(), false).Return(types.ImageLoadResponse{ + Body: ioutil.NopCloser(strings.NewReader("dummy load message")), + }, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.LoadImage(ctx, nil, dockerclient.LoadImageTimeout) + assert.NoError(t, err) +} + +func TestLoadImageTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().ImageLoad(gomock.Any(), gomock.Any(), false).Do(func(x, y, z interface{}) { + wait.Wait() + }).MaxTimes(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.LoadImage(ctx, nil, time.Millisecond) + assert.Error(t, err) + _, ok := err.(*DockerTimeoutError) + assert.True(t, ok) + wait.Done() +} + +// TestECRAuthCache tests the client will use cached docker auth if pulling +// from same registry on ecr with default instance profile +func TestECRAuthCacheWithoutExecutionRole(t *testing.T) { + mockDockerSDK, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t) + + defer done() + + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + ecrClient := mock_ecr.NewMockECRClient(ctrl) + + region := "eu-west-1" + registryID := "1234567890" + endpointOverride := "my.endpoint" + authData := &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + RegistryID: registryID, + Region: region, + EndpointOverride: endpointOverride, + }, + } + + imageEndpoint := "registry.endpoint" + image := imageEndpoint + "myimage:tag" + username := "username" + password := "password" + + ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1) + ecrClient.EXPECT().GetAuthorizationToken(registryID).Return( + &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String("https://" + imageEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)), + }, nil).Times(1) + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), gomock.Any(), gomock.Any()).Return( + mockReadCloser{ + reader: strings.NewReader(`{"status":"pull complete"}`), + }, nil).Times(4) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, image, authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") + + // Pull from the same registry shouldn't expect ecr client call + metadata = client.PullImage(ctx, image+"2", authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") + + // Pull from the same registry shouldn't expect ecr client call + metadata = client.PullImage(ctx, image+"3", authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") + + // Pull from the same registry shouldn't expect ecr client call + metadata = client.PullImage(ctx, image+"4", authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") +} + +// TestECRAuthCacheForDifferentRegistry tests the client will call ecr client to get docker +// auth for different registry +func TestECRAuthCacheForDifferentRegistry(t *testing.T) { + mockDockerSDK, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t) + defer done() + + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + ecrClient := mock_ecr.NewMockECRClient(ctrl) + + region := "eu-west-1" + registryID := "1234567890" + endpointOverride := "my.endpoint" + authData := &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + RegistryID: registryID, + Region: region, + EndpointOverride: endpointOverride, + }, + } + + imageEndpoint := "registry.endpoint" + image := imageEndpoint + "/myimage:tag" + username := "username" + password := "password" + + ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1) + ecrClient.EXPECT().GetAuthorizationToken(registryID).Return( + &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String("https://" + imageEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)), + }, nil).Times(1) + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), gomock.Any(), gomock.Any()).Return( + mockReadCloser{ + reader: strings.NewReader(`{"status":"pull complete"}`), + }, nil).Times(2) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, image, authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") + + // Pull from the different registry should expect ECR client call + authData.ECRAuthData.RegistryID = "another" + ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1) + ecrClient.EXPECT().GetAuthorizationToken("another").Return( + &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String("https://" + imageEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)), + }, nil).Times(1) + metadata = client.PullImage(ctx, image, authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") +} + +// TestECRAuthCacheWithExecutionRole tests the client will use the cached docker auth +// for ecr when pull from the same registry with same execution role +func TestECRAuthCacheWithSameExecutionRole(t *testing.T) { + mockDockerSDK, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t) + defer done() + + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + ecrClient := mock_ecr.NewMockECRClient(ctrl) + + region := "eu-west-1" + registryID := "1234567890" + imageEndpoint := "registry.endpoint" + image := imageEndpoint + "/myimage:tag" + endpointOverride := "my.endpoint" + authData := &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + RegistryID: registryID, + Region: region, + EndpointOverride: endpointOverride, + }, + } + authData.ECRAuthData.SetPullCredentials(credentials.IAMRoleCredentials{ + RoleArn: "executionRole", + }) + + username := "username" + password := "password" + + ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1) + ecrClient.EXPECT().GetAuthorizationToken(registryID).Return( + &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String("https://" + imageEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)), + }, nil).Times(1) + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), gomock.Any(), gomock.Any()).Return( + mockReadCloser{ + reader: strings.NewReader(`{"status":"pull complete"}`), + }, nil).Times(3) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, image, authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") + + // Pull from the same registry shouldn't expect ecr client call + metadata = client.PullImage(ctx, image+"2", authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") + + // Pull from the same registry shouldn't expect ecr client call + metadata = client.PullImage(ctx, image+"3", authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") +} + +// TestECRAuthCacheWithDifferentExecutionRole tests client will call ecr client to get +// docker auth credentials for different execution role +func TestECRAuthCacheWithDifferentExecutionRole(t *testing.T) { + mockDockerSDK, client, mockTime, ctrl, ecrClientFactory, done := dockerClientSetup(t) + defer done() + + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + ecrClient := mock_ecr.NewMockECRClient(ctrl) + + region := "eu-west-1" + registryID := "1234567890" + endpointOverride := "my.endpoint" + authData := &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + RegistryID: registryID, + Region: region, + EndpointOverride: endpointOverride, + }, + } + authData.ECRAuthData.SetPullCredentials(credentials.IAMRoleCredentials{ + RoleArn: "executionRole", + }) + + imageEndpoint := "registry.endpoint" + image := imageEndpoint + "/myimage:tag" + username := "username" + password := "password" + + ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1) + ecrClient.EXPECT().GetAuthorizationToken(registryID).Return( + &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String("https://" + imageEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)), + }, nil).Times(1) + mockDockerSDK.EXPECT().ImagePull(gomock.Any(), gomock.Any(), gomock.Any()).Return( + mockReadCloser{ + reader: strings.NewReader(`{"status":"pull complete"}`), + }, nil).Times(2) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + metadata := client.PullImage(ctx, image, authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") + + // Pull from the same registry but with different role + authData.ECRAuthData.SetPullCredentials(credentials.IAMRoleCredentials{ + RoleArn: "executionRole2", + }) + ecrClientFactory.EXPECT().GetClient(authData.ECRAuthData).Return(ecrClient, nil).Times(1) + ecrClient.EXPECT().GetAuthorizationToken(registryID).Return( + &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String("https://" + imageEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now().Add(10 * time.Hour)), + }, nil).Times(1) + metadata = client.PullImage(ctx, image, authData, defaultTestConfig().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Expected pull to succeed") +} + +func TestMetadataFromContainer(t *testing.T) { + ports := nat.PortMap{ + "80/tcp": []nat.PortBinding{ + { + HostIP: "0.0.0.0", + HostPort: "80", + }, + }, + } + // Representation of Volumes in ContainerJSON + volumes := []types.MountPoint{ + {Destination: "/foo", + Source: "/bar", + }, + } + labels := map[string]string{ + "name": "metadata", + } + + created := time.Now().Format(time.RFC3339) + started := time.Now().Format(time.RFC3339) + finished := time.Now().Format(time.RFC3339) + + dockerContainer := types.ContainerJSON{ + NetworkSettings: &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Ports: ports, + }, + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: "17.0.0.3", + }, + }, + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1234", + Created: created, + State: &types.ContainerState{ + Running: true, + StartedAt: started, + FinishedAt: finished, + }, + HostConfig: &dockercontainer.HostConfig{ + NetworkMode: dockercontainer.NetworkMode("bridge"), + }, + }, + Config: &dockercontainer.Config{ + Labels: labels, + }, + Mounts: volumes, + } + + metadata := MetadataFromContainer(&dockerContainer) + assert.Equal(t, "1234", metadata.DockerID) + assert.Equal(t, volumes, metadata.Volumes) + assert.Equal(t, labels, metadata.Labels) + assert.Len(t, metadata.PortBindings, 1) + assert.Equal(t, "bridge", metadata.NetworkMode) + assert.NotNil(t, metadata.NetworkSettings) + assert.Equal(t, "17.0.0.3", metadata.NetworkSettings.IPAddress) + + // Need to convert both strings to same format to be able to compare. Parse and Format are not inverses. + createdTimeSDK, _ := time.Parse(time.RFC3339, dockerContainer.Created) + startedTimeSDK, _ := time.Parse(time.RFC3339, dockerContainer.State.StartedAt) + finishedTimeSDK, _ := time.Parse(time.RFC3339, dockerContainer.State.FinishedAt) + + createdTime, _ := time.Parse(time.RFC3339, created) + startedTime, _ := time.Parse(time.RFC3339, started) + finishedTime, _ := time.Parse(time.RFC3339, finished) + + assert.True(t, createdTime.Equal(createdTimeSDK)) + assert.True(t, startedTime.Equal(startedTimeSDK)) + assert.True(t, finishedTime.Equal(finishedTimeSDK)) +} + +func TestMetadataFromContainerHealthCheckWithNoLogs(t *testing.T) { + + dockerContainer := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Health: &types.Health{Status: "unhealthy"}, + }, + }, + } + + metadata := MetadataFromContainer(dockerContainer) + assert.Equal(t, apicontainerstatus.ContainerUnhealthy, metadata.Health.Status) +} + +func TestCreateVolumeTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().VolumeCreate(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, x interface{}) { + wait.Wait() + }).MaxTimes(1).Return(types.Volume{}, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volumeResponse := client.CreateVolume(ctx, "name", "driver", nil, nil, xContainerShortTimeout) + assert.Error(t, volumeResponse.Error, "expected error for timeout") + assert.Equal(t, "DockerTimeoutError", volumeResponse.Error.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestCreateVolumeError(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + mockDockerSDK.EXPECT().VolumeCreate(gomock.Any(), gomock.Any()).Return(types.Volume{}, errors.New("some docker error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volumeResponse := client.CreateVolume(ctx, "name", "driver", nil, nil, dockerclient.CreateVolumeTimeout) + assert.Equal(t, "CannotCreateVolumeError", volumeResponse.Error.(apierrors.NamedError).ErrorName()) +} + +func TestCreateVolume(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + volumeName := "volumeName" + mountPoint := "some/mount/point" + driver := "driver" + driverOptions := map[string]string{ + "opt1": "val1", + "opt2": "val2", + } + gomock.InOrder( + mockDockerSDK.EXPECT().VolumeCreate(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, opts volume.VolumeCreateBody) { + assert.Equal(t, opts.Name, volumeName) + assert.Equal(t, opts.Driver, driver) + assert.EqualValues(t, opts.DriverOpts, driverOptions) + }).Return(types.Volume{Name: volumeName, Driver: driver, Mountpoint: mountPoint, Labels: nil}, nil), + ) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + // This function eventually makes an API call, is that not possible in testing + volumeResponse := client.CreateVolume(ctx, volumeName, driver, driverOptions, nil, dockerclient.CreateVolumeTimeout) + assert.NoError(t, volumeResponse.Error) + assert.Equal(t, volumeResponse.DockerVolume.Name, volumeName) + assert.Equal(t, volumeResponse.DockerVolume.Driver, driver) + assert.Equal(t, volumeResponse.DockerVolume.Mountpoint, mountPoint) + assert.Nil(t, volumeResponse.DockerVolume.Labels) +} + +func TestInspectVolumeTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().VolumeInspect(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, x interface{}) { + wait.Wait() + }).MaxTimes(1).Return(types.Volume{}, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volumeResponse := client.InspectVolume(ctx, "name", xContainerShortTimeout) + assert.Error(t, volumeResponse.Error, "expected error for timeout") + assert.Equal(t, "DockerTimeoutError", volumeResponse.Error.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestInspectVolumeError(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + mockDockerSDK.EXPECT().VolumeInspect(gomock.Any(), gomock.Any()).Return(types.Volume{}, errors.New("some docker error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volumeResponse := client.InspectVolume(ctx, "name", dockerclient.InspectVolumeTimeout) + assert.Equal(t, "CannotInspectVolumeError", volumeResponse.Error.(apierrors.NamedError).ErrorName()) +} + +func TestInspectVolume(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + volumeName := "volumeName" + + volumeOutput := types.Volume{ + Name: volumeName, + Driver: "driver", + Mountpoint: "local/mount/point", + Labels: map[string]string{ + "label1": "val1", + "label2": "val2", + }, + } + + mockDockerSDK.EXPECT().VolumeInspect(gomock.Any(), volumeName).Return(volumeOutput, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volumeResponse := client.InspectVolume(ctx, volumeName, dockerclient.InspectVolumeTimeout) + assert.NoError(t, volumeResponse.Error) + assert.Equal(t, volumeOutput.Driver, volumeResponse.DockerVolume.Driver) + assert.Equal(t, volumeOutput.Mountpoint, volumeResponse.DockerVolume.Mountpoint) + assert.Equal(t, volumeOutput.Labels, volumeResponse.DockerVolume.Labels) +} + +func TestRemoveVolumeTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().VolumeRemove(gomock.Any(), "name", false).Do(func(ctx context.Context, + x interface{}, y bool) { + wait.Wait() + }).MaxTimes(1) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.RemoveVolume(ctx, "name", xContainerShortTimeout) + assert.Error(t, err, "expected error for timeout") + assert.Equal(t, "DockerTimeoutError", err.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestRemoveVolumeError(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + mockDockerSDK.EXPECT().VolumeRemove(gomock.Any(), "name", false).Return(errors.New("some docker error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.RemoveVolume(ctx, "name", dockerclient.RemoveVolumeTimeout) + assert.Equal(t, "CannotRemoveVolumeError", err.(apierrors.NamedError).ErrorName()) + assert.NotNil(t, err.Error(), "Nested error cannot be nil") +} + +func TestRemoveVolume(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + volumeName := "volumeName" + + mockDockerSDK.EXPECT().VolumeRemove(gomock.Any(), volumeName, false).Return(nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := client.RemoveVolume(ctx, volumeName, dockerclient.RemoveVolumeTimeout) + assert.NoError(t, err) +} + +func TestListPluginsTimeout(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + wait := &sync.WaitGroup{} + wait.Add(1) + mockDockerSDK.EXPECT().PluginList(gomock.Any(), filters.Args{}).Do(func(x, y interface{}) { + wait.Wait() + }).MaxTimes(1) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + response := client.ListPlugins(ctx, xContainerShortTimeout, filters.Args{}) + assert.Error(t, response.Error, "expected error for timeout") + assert.Equal(t, "DockerTimeoutError", response.Error.(apierrors.NamedError).ErrorName()) + wait.Done() +} + +func TestListPluginsError(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + mockDockerSDK.EXPECT().PluginList(gomock.Any(), filters.Args{}).Return(nil, errors.New("some docker error")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + response := client.ListPlugins(ctx, dockerclient.ListPluginsTimeout, filters.Args{}) + assert.Equal(t, "CannotListPluginsError", response.Error.(apierrors.NamedError).ErrorName()) +} + +func TestListPlugins(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + pluginID := "id" + pluginName := "name" + plugin := &types.Plugin{ + ID: pluginID, + Name: pluginName, + Enabled: true, + } + + mockDockerSDK.EXPECT().PluginList(gomock.Any(), filters.Args{}).Return([]*types.Plugin{plugin}, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + response := client.ListPlugins(ctx, dockerclient.ListPluginsTimeout, filters.Args{}) + assert.NoError(t, response.Error) + assert.Equal(t, plugin, response.Plugins[0]) +} + +func TestListPluginsWithFilter(t *testing.T) { + mockDockerSDK, client, _, _, _, done := dockerClientSetup(t) + defer done() + + plugins := []*types.Plugin{ + &types.Plugin{ + ID: "id1", + Name: "name1", + Enabled: false, + }, + &types.Plugin{ + ID: "id2", + Name: "name2", + Enabled: true, + Config: types.PluginConfig{ + Description: "A sample volume plugin for Docker", + Interface: types.PluginConfigInterface{ + Types: []types.PluginInterfaceType{ + {Capability: "docker.volumedriver/1.0"}, + }, + Socket: "plugins.sock", + }, + }, + }, + &types.Plugin{ + ID: "id3", + Name: "name3", + Enabled: true, + Config: types.PluginConfig{ + Description: "A sample network plugin for Docker", + Interface: types.PluginConfigInterface{ + Types: []types.PluginInterfaceType{ + {Capability: "docker.networkdriver/1.0"}, + }, + Socket: "plugins.sock", + }, + }, + }, + } + + filterList := filters.NewArgs(filters.Arg("enabled", "true")) + filterList.Add("capability", VolumeDriverType) + mockDockerSDK.EXPECT().PluginList(gomock.Any(), filterList).Return([]*types.Plugin{plugins[1]}, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + pluginNames, error := client.ListPluginsWithFilters(ctx, true, []string{VolumeDriverType}, dockerclient.ListPluginsTimeout) + assert.NoError(t, error) + assert.Equal(t, 1, len(pluginNames)) + assert.Equal(t, "name2", pluginNames[0]) +} diff --git a/agent/dockerclient/dockerapi/docker_events_buffer.go b/agent/dockerclient/dockerapi/docker_events_buffer.go new file mode 100644 index 00000000000..8e9667427e0 --- /dev/null +++ b/agent/dockerclient/dockerapi/docker_events_buffer.go @@ -0,0 +1,116 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "context" + "sync" + + "github.com/docker/docker/api/types/events" +) + +const ( + // TODO add support for filter in go-dockerclient + containerTypeEvent = "container" +) + +var containerEvents = []string{ + "create", + "start", + "stop", + "die", + "restart", + "oom", + "health_status: unhealthy", + "health_status: healthy", +} + +// InfiniteBuffer defines an unlimited buffer, where it reads from +// input channel and write to output channel. +type InfiniteBuffer struct { + events []*events.Message + empty bool + waitForEvent sync.WaitGroup + lock sync.RWMutex +} + +// NewInfiniteBuffer returns an InfiniteBuffer object +func NewInfiniteBuffer() *InfiniteBuffer { + return &InfiniteBuffer{} +} + +// StartListening starts reading from the input channel and writes to the buffer +// When context is cancelled, stop listening +func (buffer *InfiniteBuffer) StartListening(ctx context.Context, eventChan <-chan events.Message) { + for { + select { + // If context is cancelled, drain remaining events and return + case <-ctx.Done(): + for len(eventChan) > 0 { + event := <-eventChan + go buffer.CopyEvents(&event) + } + return + case event := <-eventChan: + go buffer.CopyEvents(&event) + } + } +} + +// CopyEvents copies the event into the buffer +func (buffer *InfiniteBuffer) CopyEvents(event *events.Message) { + if event.ID == "" || event.Type != containerTypeEvent { + return + } + + // Only add the events agent is interested + for _, containerEvent := range containerEvents { + if event.Status == containerEvent { + buffer.lock.Lock() + defer buffer.lock.Unlock() + + buffer.events = append(buffer.events, event) + // Check if there is consumer waiting for events + if buffer.empty { + buffer.empty = false + + // Unblock the consumer + buffer.waitForEvent.Done() + } + return + } + } +} + +// Consume reads the buffer and write to a listener channel +func (buffer *InfiniteBuffer) Consume(in chan<- *events.Message) { + for { + buffer.lock.Lock() + + if len(buffer.events) == 0 { + // Mark the buffer as empty and start waiting for events + buffer.empty = true + buffer.waitForEvent.Add(1) + buffer.lock.Unlock() + buffer.waitForEvent.Wait() + } else { + event := buffer.events[0] + buffer.events = buffer.events[1:] + buffer.lock.Unlock() + + // Send event to the buffer listener + in <- event + } + } +} diff --git a/agent/dockerclient/dockerapi/docker_events_buffer_test.go b/agent/dockerclient/dockerapi/docker_events_buffer_test.go new file mode 100644 index 00000000000..468ef3e4b8f --- /dev/null +++ b/agent/dockerclient/dockerapi/docker_events_buffer_test.go @@ -0,0 +1,72 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "context" + "strconv" + "testing" + + "github.com/docker/docker/api/types/events" + "github.com/stretchr/testify/assert" +) + +func TestProduceConsume(t *testing.T) { + buffer := NewInfiniteBuffer() + producer := make(chan events.Message) + consumer := make(chan *events.Message) + + // Start the process of producer and consumer + go buffer.StartListening(context.TODO(), producer) + go buffer.Consume(consumer) + + // writing multiple events to the buffer + go func() { + for i := 0; i < 10000; i++ { + producer <- events.Message{ + ID: strconv.Itoa(i), + Type: containerTypeEvent, + Status: "die", + } + } + }() + + for i := 0; i < 10000; i++ { + <-consumer + } +} + +func TestIgnoreEvents(t *testing.T) { + buffer := NewInfiniteBuffer() + producer := make(chan events.Message) + consumer := make(chan *events.Message) + + // Start the process of producer and consumer + go buffer.StartListening(context.TODO(), producer) + go buffer.Consume(consumer) + + // event with empty ID + producer <- events.Message{Type: containerTypeEvent, Status: "stop"} + // event with wrong type + producer <- events.Message{ID: "id", Status: "stop", Type: "image"} + for _, event := range containerEvents { + producer <- events.Message{ID: "id", Type: containerTypeEvent, Status: event + "invalid"} + } + + buffer.lock.Lock() + defer buffer.lock.Unlock() + assert.Len(t, buffer.events, 0) +} diff --git a/agent/dockerclient/dockerapi/errors.go b/agent/dockerclient/dockerapi/errors.go new file mode 100644 index 00000000000..8d8f179feb8 --- /dev/null +++ b/agent/dockerclient/dockerapi/errors.go @@ -0,0 +1,402 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" +) + +const ( + // DockerTimeoutErrorName is the name of docker timeout error. + DockerTimeoutErrorName = "DockerTimeoutError" + // CannotInspectContainerErrorName is the name of container inspect error. + CannotInspectContainerErrorName = "CannotInspectContainerError" + // CannotStartContainerErrorName is the name of container start error. + CannotStartContainerErrorName = "CannotStartContainerError" + // CannotDescribeContainerErrorName is the name of describe container error. + CannotDescribeContainerErrorName = "CannotDescribeContainerError" + // CannotGetContainerTopErrorName is the name of the top container error. + CannotGetContainerTopErrorName = "CannotGetContainerTopError" + // TopProcessNotFoundErrorName is the error thrown when the specified pid does + // not exist in the container + TopProcessNotFoundErrorName = "ps: exit status 1" +) + +// DockerTimeoutError is an error type for describing timeouts +type DockerTimeoutError struct { + // Duration is the timeout period. + Duration time.Duration + // Transition is the description of operation that timed out. + Transition string +} + +func (err *DockerTimeoutError) Error() string { + return "Could not transition to " + err.Transition + "; timed out after waiting " + err.Duration.String() +} + +// ErrorName returns the name of the error +func (err *DockerTimeoutError) ErrorName() string { return DockerTimeoutErrorName } + +// OutOfMemoryError is a type for errors caused by running out of memory +type OutOfMemoryError struct{} + +func (err OutOfMemoryError) Error() string { return "Container killed due to memory usage" } + +// ErrorName returns the name of the error +func (err OutOfMemoryError) ErrorName() string { return "OutOfMemoryError" } + +// DockerStateError is a wrapper around the error docker puts in the '.State.Error' field of its inspect output. +type DockerStateError struct { + dockerError string + name string +} + +// NewDockerStateError creates a DockerStateError +func NewDockerStateError(err string) DockerStateError { + // Add stringmatching logic as needed to provide better output than docker + return DockerStateError{ + dockerError: err, + name: "DockerStateError", + } +} + +func (err DockerStateError) Error() string { + return err.dockerError +} + +// ErrorName returns the name of the DockerStateError. +func (err DockerStateError) ErrorName() string { + return err.name +} + +// CannotGetDockerClientError is a type for failing to get a specific Docker client +type CannotGetDockerClientError struct { + version dockerclient.DockerVersion + err error +} + +func (c CannotGetDockerClientError) Error() string { + if c.version != "" { + return "(v" + string(c.version) + ") - " + c.err.Error() + } + return c.err.Error() +} + +// ErrorName returns the name of the CannotGetDockerClientError. +func (CannotGetDockerClientError) ErrorName() string { + return "CannotGetDockerclientError" +} + +// CannotStopContainerError indicates any error when trying to stop a container +type CannotStopContainerError struct { + FromError error +} + +func (err CannotStopContainerError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotStopContainerError. +func (err CannotStopContainerError) ErrorName() string { + return "CannotStopContainerError" +} + +// IsRetriableError returns a boolean indicating whether the call that +// generated the error can be retried. +// When stopping a container, most errors that we can get should be +// considered retriable. However, in the case where the container is +// already stopped or doesn't exist at all, there's no sense in +// retrying. +func (err CannotStopContainerError) IsRetriableError() bool { + if _, ok := err.FromError.(NoSuchContainerError); ok { + return false + } + + return true +} + +// CannotPullContainerError indicates any error when trying to pull +// a container image +type CannotPullContainerError struct { + FromError error +} + +func (err CannotPullContainerError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotPullContainerError. +func (err CannotPullContainerError) ErrorName() string { + return "CannotPullContainerError" +} + +// CannotPullECRContainerError indicates any error when trying to pull +// a container image from ECR +type CannotPullECRContainerError struct { + FromError error +} + +func (err CannotPullECRContainerError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotPullECRContainerError. +func (err CannotPullECRContainerError) ErrorName() string { + return "CannotPullECRContainerError" +} + +// Retry fulfills the utils.Retrier interface and allows retries to be skipped by utils.Retry* functions +func (err CannotPullECRContainerError) Retry() bool { + return false +} + +// CannotPullContainerAuthError indicates any error when trying to pull +// a container image +type CannotPullContainerAuthError struct { + FromError error +} + +func (err CannotPullContainerAuthError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotPullContainerAuthError. +func (err CannotPullContainerAuthError) ErrorName() string { + return "CannotPullContainerAuthError" +} + +// Retry fulfills the utils.Retrier interface and allows retries to be skipped by utils.Retry* functions +func (err CannotPullContainerAuthError) Retry() bool { + return false +} + +// CannotCreateContainerError indicates any error when trying to create a container +type CannotCreateContainerError struct { + FromError error +} + +func (err CannotCreateContainerError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotCreateContainerError. +func (err CannotCreateContainerError) ErrorName() string { + return "CannotCreateContainerError" +} + +// CannotStartContainerError indicates any error when trying to start a container +type CannotStartContainerError struct { + FromError error +} + +func (err CannotStartContainerError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotStartContainerError +func (err CannotStartContainerError) ErrorName() string { + return CannotStartContainerErrorName +} + +// CannotInspectContainerError indicates any error when trying to inspect a container +type CannotInspectContainerError struct { + FromError error +} + +func (err CannotInspectContainerError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotInspectContainerError +func (err CannotInspectContainerError) ErrorName() string { + return CannotInspectContainerErrorName +} + +// CannotGetContainerTopError indicates any error when trying to get container top processes +type CannotGetContainerTopError struct { + FromError error +} + +func (err CannotGetContainerTopError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotGetContainerTopError +func (err CannotGetContainerTopError) ErrorName() string { + return CannotGetContainerTopErrorName +} + +// CannotRemoveContainerError indicates any error when trying to remove a container +type CannotRemoveContainerError struct { + FromError error +} + +func (err CannotRemoveContainerError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotRemoveContainerError +func (err CannotRemoveContainerError) ErrorName() string { + return "CannotRemoveContainerError" +} + +// CannotDescribeContainerError indicates any error when trying to describe a container +type CannotDescribeContainerError struct { + FromError error +} + +func (err CannotDescribeContainerError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotDescribeContainerError +func (err CannotDescribeContainerError) ErrorName() string { + return CannotDescribeContainerErrorName +} + +// CannotListContainersError indicates any error when trying to list containers +type CannotListContainersError struct { + FromError error +} + +func (err CannotListContainersError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotListContainersError +func (err CannotListContainersError) ErrorName() string { + return "CannotListContainersError" +} + +type CannotListImagesError struct { + FromError error +} + +func (err CannotListImagesError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotListImagesError +func (err CannotListImagesError) ErrorName() string { + return "CannotListImagesError" +} + +// CannotCreateVolumeError indicates any error when trying to create a volume +type CannotCreateVolumeError struct { + fromError error +} + +func (err CannotCreateVolumeError) Error() string { + return err.fromError.Error() +} + +func (err CannotCreateVolumeError) ErrorName() string { + return "CannotCreateVolumeError" +} + +// CannotInspectVolumeError indicates any error when trying to inspect a volume +type CannotInspectVolumeError struct { + fromError error +} + +func (err CannotInspectVolumeError) Error() string { + return err.fromError.Error() +} + +func (err CannotInspectVolumeError) ErrorName() string { + return "CannotInspectVolumeError" +} + +// CannotRemoveVolumeError indicates any error when trying to inspect a volume +type CannotRemoveVolumeError struct { + fromError error +} + +func (err CannotRemoveVolumeError) Error() string { + return err.fromError.Error() +} + +func (err CannotRemoveVolumeError) ErrorName() string { + return "CannotRemoveVolumeError" +} + +// CannotListPluginsError indicates any error when trying to list docker plugins +type CannotListPluginsError struct { + fromError error +} + +func (err CannotListPluginsError) Error() string { + return err.fromError.Error() +} + +func (err CannotListPluginsError) ErrorName() string { + return "CannotListPluginsError" +} + +// NoSuchContainerError indicates error when a given container is not found. +type NoSuchContainerError struct { + ID string +} + +func (err NoSuchContainerError) Error() string { + return "Container not found: " + err.ID +} + +func (err NoSuchContainerError) ErrorName() string { + return "NoSuchContainerError" +} + +// CannotCreateContainerExecError indicates any error when trying to create an exec object +type CannotCreateContainerExecError struct { + FromError error +} + +func (err CannotCreateContainerExecError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotCreateContainerExecError. +func (err CannotCreateContainerExecError) ErrorName() string { + return "CannotCreateContainerExecError" +} + +// CannotStartContainerExecError indicates any error when trying to start an exec process +type CannotStartContainerExecError struct { + FromError error +} + +func (err CannotStartContainerExecError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotCreateContainerExecError. +func (err CannotStartContainerExecError) ErrorName() string { + return "CannotStartContainerExecError" +} + +// CannotInspectContainerExecError indicates any error when trying to start an exec process +type CannotInspectContainerExecError struct { + FromError error +} + +func (err CannotInspectContainerExecError) Error() string { + return err.FromError.Error() +} + +// ErrorName returns name of the CannotCreateContainerExecError. +func (err CannotInspectContainerExecError) ErrorName() string { + return "CannotInspectContainerExecError" +} diff --git a/agent/dockerclient/dockerapi/errors_test.go b/agent/dockerclient/dockerapi/errors_test.go new file mode 100644 index 00000000000..c617f594ed1 --- /dev/null +++ b/agent/dockerclient/dockerapi/errors_test.go @@ -0,0 +1,33 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRetriableErrorReturnsFalseForNoSuchContainer(t *testing.T) { + err := CannotStopContainerError{NoSuchContainerError{}} + assert.False(t, err.IsRetriableError(), "No such container error should be treated as unretriable docker error") +} + +func TestRetriableErrorReturnsTrue(t *testing.T) { + err := CannotStopContainerError{errors.New("error")} + assert.True(t, err.IsRetriableError(), "Non unretriable error treated as unretriable docker error") +} diff --git a/agent/dockerclient/dockerapi/generate_mocks.go b/agent/dockerclient/dockerapi/generate_mocks.go new file mode 100644 index 00000000000..01e0ab8db89 --- /dev/null +++ b/agent/dockerclient/dockerapi/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +//go:generate mockgen -destination=mocks/dockerapi_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi DockerClient diff --git a/agent/dockerclient/dockerapi/inactivity_timeout_handler.go b/agent/dockerclient/dockerapi/inactivity_timeout_handler.go new file mode 100644 index 00000000000..66d18ba0fa4 --- /dev/null +++ b/agent/dockerclient/dockerapi/inactivity_timeout_handler.go @@ -0,0 +1,70 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "io" + "sync/atomic" + "time" +) + +type proxyReader struct { + io.ReadCloser + calls uint64 +} + +func (p *proxyReader) callCount() uint64 { + return atomic.LoadUint64(&p.calls) +} + +func (p *proxyReader) Read(data []byte) (int, error) { + atomic.AddUint64(&p.calls, 1) + return p.ReadCloser.Read(data) +} + +// When pulling an image, the docker api will pull and then subsequently unzip the downloaded artifacts. Docker does +// not separate the "pull" from the "unpack" step. What this means is that this timeout doesn't 'tick' while unpacking +// the downloaded files. This only causes noticeable impact with large files, but we should investigate improving this. +func handleInactivityTimeout(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) { + done := make(chan struct{}) + pReader := &proxyReader{ReadCloser: reader} + go checkInactivityTimeout(pReader, timeout, cancelRequest, canceled, done) + return pReader, done +} + +// checkInactivityTimeout checks whether there's new read activity in the proxyReader as recorded by its call count, in every interval +// as specified by the timeout argument. It returns either when there's no new read activity in an interval (in that case the +// 'canceled' flag is marked to 1 to indicate that), or when it's canceled (via the 'done' channel). +func checkInactivityTimeout(pr *proxyReader, timeout time.Duration, cancelRequest func(), canceled *uint32, done chan struct{}) { + var lastCallCount uint64 + finished := false + for !finished { + lastCallCount, finished = checkReadActivityOnce(pr, timeout, cancelRequest, canceled, done, lastCallCount) + } +} + +func checkReadActivityOnce(pr *proxyReader, timeout time.Duration, cancelRequest func(), canceled *uint32, done chan struct{}, lastCallCount uint64) (uint64, bool) { + select { + case <-time.After(timeout): + case <-done: + return lastCallCount, true + } + curCallCount := pr.callCount() + if curCallCount == lastCallCount { + atomic.AddUint32(canceled, 1) + cancelRequest() + return lastCallCount, true + } + return curCallCount, false +} diff --git a/agent/dockerclient/dockerapi/inactivity_timeout_handler_test.go b/agent/dockerclient/dockerapi/inactivity_timeout_handler_test.go new file mode 100644 index 00000000000..04cbdf63b38 --- /dev/null +++ b/agent/dockerclient/dockerapi/inactivity_timeout_handler_test.go @@ -0,0 +1,76 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + testTimeout = 100 * time.Millisecond +) + +func TestCheckInactivityTimeout(t *testing.T) { + pReader := &proxyReader{ReadCloser: ioutil.NopCloser(strings.NewReader("test"))} + pReader.calls = 0 + var canceled uint32 + var cancelRequestInvoked bool + done := make(chan struct{}) + checkInactivityTimeout(pReader, testTimeout, func() { + cancelRequestInvoked = true + }, &canceled, done) + assert.True(t, cancelRequestInvoked) + assert.Equal(t, uint32(1), canceled) +} + +func TestCheckReadActivityOnce(t *testing.T) { + testCases := []struct { + name string + readerCallCount uint64 + expectCallCount uint64 + expectFinish bool + }{ + { + name: "Test check read activity with new read activity", + readerCallCount: 1, + expectCallCount: 1, + expectFinish: false, + }, + { + name: "Test check read activity without new read activity", + readerCallCount: 0, + expectCallCount: 0, + expectFinish: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pReader := &proxyReader{ReadCloser: ioutil.NopCloser(strings.NewReader("test"))} + pReader.calls = tc.readerCallCount + var canceled uint32 + done := make(chan struct{}) + callCount, finished := checkReadActivityOnce(pReader, testTimeout, func() {}, &canceled, done, 0) + assert.Equal(t, tc.expectCallCount, callCount) + assert.Equal(t, tc.expectFinish, finished) + }) + } +} diff --git a/agent/dockerclient/dockerapi/mocks/dockerapi_mocks.go b/agent/dockerclient/dockerapi/mocks/dockerapi_mocks.go new file mode 100644 index 00000000000..63ff0bb7ddd --- /dev/null +++ b/agent/dockerclient/dockerapi/mocks/dockerapi_mocks.go @@ -0,0 +1,481 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi (interfaces: DockerClient) + +// Package mock_dockerapi is a generated GoMock package. +package mock_dockerapi + +import ( + context "context" + io "io" + reflect "reflect" + time "time" + + container "github.com/aws/amazon-ecs-agent/agent/api/container" + status "github.com/aws/amazon-ecs-agent/agent/api/container/status" + dockerclient "github.com/aws/amazon-ecs-agent/agent/dockerclient" + dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + types "github.com/docker/docker/api/types" + container0 "github.com/docker/docker/api/types/container" + filters "github.com/docker/docker/api/types/filters" + gomock "github.com/golang/mock/gomock" +) + +// MockDockerClient is a mock of DockerClient interface +type MockDockerClient struct { + ctrl *gomock.Controller + recorder *MockDockerClientMockRecorder +} + +// MockDockerClientMockRecorder is the mock recorder for MockDockerClient +type MockDockerClientMockRecorder struct { + mock *MockDockerClient +} + +// NewMockDockerClient creates a new mock instance +func NewMockDockerClient(ctrl *gomock.Controller) *MockDockerClient { + mock := &MockDockerClient{ctrl: ctrl} + mock.recorder = &MockDockerClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockDockerClient) EXPECT() *MockDockerClientMockRecorder { + return m.recorder +} + +// APIVersion mocks base method +func (m *MockDockerClient) APIVersion() (dockerclient.DockerVersion, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "APIVersion") + ret0, _ := ret[0].(dockerclient.DockerVersion) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// APIVersion indicates an expected call of APIVersion +func (mr *MockDockerClientMockRecorder) APIVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "APIVersion", reflect.TypeOf((*MockDockerClient)(nil).APIVersion)) +} + +// ContainerEvents mocks base method +func (m *MockDockerClient) ContainerEvents(arg0 context.Context) (<-chan dockerapi.DockerContainerChangeEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerEvents", arg0) + ret0, _ := ret[0].(<-chan dockerapi.DockerContainerChangeEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainerEvents indicates an expected call of ContainerEvents +func (mr *MockDockerClientMockRecorder) ContainerEvents(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerEvents", reflect.TypeOf((*MockDockerClient)(nil).ContainerEvents), arg0) +} + +// CreateContainer mocks base method +func (m *MockDockerClient) CreateContainer(arg0 context.Context, arg1 *container0.Config, arg2 *container0.HostConfig, arg3 string, arg4 time.Duration) dockerapi.DockerContainerMetadata { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateContainer", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(dockerapi.DockerContainerMetadata) + return ret0 +} + +// CreateContainer indicates an expected call of CreateContainer +func (mr *MockDockerClientMockRecorder) CreateContainer(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateContainer", reflect.TypeOf((*MockDockerClient)(nil).CreateContainer), arg0, arg1, arg2, arg3, arg4) +} + +// CreateContainerExec mocks base method +func (m *MockDockerClient) CreateContainerExec(arg0 context.Context, arg1 string, arg2 types.ExecConfig, arg3 time.Duration) (*types.IDResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateContainerExec", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*types.IDResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateContainerExec indicates an expected call of CreateContainerExec +func (mr *MockDockerClientMockRecorder) CreateContainerExec(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateContainerExec", reflect.TypeOf((*MockDockerClient)(nil).CreateContainerExec), arg0, arg1, arg2, arg3) +} + +// CreateVolume mocks base method +func (m *MockDockerClient) CreateVolume(arg0 context.Context, arg1, arg2 string, arg3, arg4 map[string]string, arg5 time.Duration) dockerapi.SDKVolumeResponse { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(dockerapi.SDKVolumeResponse) + return ret0 +} + +// CreateVolume indicates an expected call of CreateVolume +func (mr *MockDockerClientMockRecorder) CreateVolume(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockDockerClient)(nil).CreateVolume), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// DescribeContainer mocks base method +func (m *MockDockerClient) DescribeContainer(arg0 context.Context, arg1 string) (status.ContainerStatus, dockerapi.DockerContainerMetadata) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeContainer", arg0, arg1) + ret0, _ := ret[0].(status.ContainerStatus) + ret1, _ := ret[1].(dockerapi.DockerContainerMetadata) + return ret0, ret1 +} + +// DescribeContainer indicates an expected call of DescribeContainer +func (mr *MockDockerClientMockRecorder) DescribeContainer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeContainer", reflect.TypeOf((*MockDockerClient)(nil).DescribeContainer), arg0, arg1) +} + +// Info mocks base method +func (m *MockDockerClient) Info(arg0 context.Context, arg1 time.Duration) (types.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Info", arg0, arg1) + ret0, _ := ret[0].(types.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Info indicates an expected call of Info +func (mr *MockDockerClientMockRecorder) Info(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockDockerClient)(nil).Info), arg0, arg1) +} + +// InspectContainer mocks base method +func (m *MockDockerClient) InspectContainer(arg0 context.Context, arg1 string, arg2 time.Duration) (*types.ContainerJSON, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InspectContainer", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ContainerJSON) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InspectContainer indicates an expected call of InspectContainer +func (mr *MockDockerClientMockRecorder) InspectContainer(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InspectContainer", reflect.TypeOf((*MockDockerClient)(nil).InspectContainer), arg0, arg1, arg2) +} + +// InspectContainerExec mocks base method +func (m *MockDockerClient) InspectContainerExec(arg0 context.Context, arg1 string, arg2 time.Duration) (*types.ContainerExecInspect, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InspectContainerExec", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ContainerExecInspect) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InspectContainerExec indicates an expected call of InspectContainerExec +func (mr *MockDockerClientMockRecorder) InspectContainerExec(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InspectContainerExec", reflect.TypeOf((*MockDockerClient)(nil).InspectContainerExec), arg0, arg1, arg2) +} + +// InspectImage mocks base method +func (m *MockDockerClient) InspectImage(arg0 string) (*types.ImageInspect, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InspectImage", arg0) + ret0, _ := ret[0].(*types.ImageInspect) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InspectImage indicates an expected call of InspectImage +func (mr *MockDockerClientMockRecorder) InspectImage(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InspectImage", reflect.TypeOf((*MockDockerClient)(nil).InspectImage), arg0) +} + +// InspectVolume mocks base method +func (m *MockDockerClient) InspectVolume(arg0 context.Context, arg1 string, arg2 time.Duration) dockerapi.SDKVolumeResponse { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InspectVolume", arg0, arg1, arg2) + ret0, _ := ret[0].(dockerapi.SDKVolumeResponse) + return ret0 +} + +// InspectVolume indicates an expected call of InspectVolume +func (mr *MockDockerClientMockRecorder) InspectVolume(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InspectVolume", reflect.TypeOf((*MockDockerClient)(nil).InspectVolume), arg0, arg1, arg2) +} + +// KnownVersions mocks base method +func (m *MockDockerClient) KnownVersions() []dockerclient.DockerVersion { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "KnownVersions") + ret0, _ := ret[0].([]dockerclient.DockerVersion) + return ret0 +} + +// KnownVersions indicates an expected call of KnownVersions +func (mr *MockDockerClientMockRecorder) KnownVersions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KnownVersions", reflect.TypeOf((*MockDockerClient)(nil).KnownVersions)) +} + +// ListContainers mocks base method +func (m *MockDockerClient) ListContainers(arg0 context.Context, arg1 bool, arg2 time.Duration) dockerapi.ListContainersResponse { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListContainers", arg0, arg1, arg2) + ret0, _ := ret[0].(dockerapi.ListContainersResponse) + return ret0 +} + +// ListContainers indicates an expected call of ListContainers +func (mr *MockDockerClientMockRecorder) ListContainers(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListContainers", reflect.TypeOf((*MockDockerClient)(nil).ListContainers), arg0, arg1, arg2) +} + +// ListImages mocks base method +func (m *MockDockerClient) ListImages(arg0 context.Context, arg1 time.Duration) dockerapi.ListImagesResponse { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListImages", arg0, arg1) + ret0, _ := ret[0].(dockerapi.ListImagesResponse) + return ret0 +} + +// ListImages indicates an expected call of ListImages +func (mr *MockDockerClientMockRecorder) ListImages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockDockerClient)(nil).ListImages), arg0, arg1) +} + +// ListPlugins mocks base method +func (m *MockDockerClient) ListPlugins(arg0 context.Context, arg1 time.Duration, arg2 filters.Args) dockerapi.ListPluginsResponse { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPlugins", arg0, arg1, arg2) + ret0, _ := ret[0].(dockerapi.ListPluginsResponse) + return ret0 +} + +// ListPlugins indicates an expected call of ListPlugins +func (mr *MockDockerClientMockRecorder) ListPlugins(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPlugins", reflect.TypeOf((*MockDockerClient)(nil).ListPlugins), arg0, arg1, arg2) +} + +// ListPluginsWithFilters mocks base method +func (m *MockDockerClient) ListPluginsWithFilters(arg0 context.Context, arg1 bool, arg2 []string, arg3 time.Duration) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPluginsWithFilters", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPluginsWithFilters indicates an expected call of ListPluginsWithFilters +func (mr *MockDockerClientMockRecorder) ListPluginsWithFilters(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPluginsWithFilters", reflect.TypeOf((*MockDockerClient)(nil).ListPluginsWithFilters), arg0, arg1, arg2, arg3) +} + +// LoadImage mocks base method +func (m *MockDockerClient) LoadImage(arg0 context.Context, arg1 io.Reader, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadImage", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// LoadImage indicates an expected call of LoadImage +func (mr *MockDockerClientMockRecorder) LoadImage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadImage", reflect.TypeOf((*MockDockerClient)(nil).LoadImage), arg0, arg1, arg2) +} + +// PullImage mocks base method +func (m *MockDockerClient) PullImage(arg0 context.Context, arg1 string, arg2 *container.RegistryAuthenticationData, arg3 time.Duration) dockerapi.DockerContainerMetadata { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PullImage", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(dockerapi.DockerContainerMetadata) + return ret0 +} + +// PullImage indicates an expected call of PullImage +func (mr *MockDockerClientMockRecorder) PullImage(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockDockerClient)(nil).PullImage), arg0, arg1, arg2, arg3) +} + +// RemoveContainer mocks base method +func (m *MockDockerClient) RemoveContainer(arg0 context.Context, arg1 string, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveContainer", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveContainer indicates an expected call of RemoveContainer +func (mr *MockDockerClientMockRecorder) RemoveContainer(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveContainer", reflect.TypeOf((*MockDockerClient)(nil).RemoveContainer), arg0, arg1, arg2) +} + +// RemoveImage mocks base method +func (m *MockDockerClient) RemoveImage(arg0 context.Context, arg1 string, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveImage", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveImage indicates an expected call of RemoveImage +func (mr *MockDockerClientMockRecorder) RemoveImage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockDockerClient)(nil).RemoveImage), arg0, arg1, arg2) +} + +// RemoveVolume mocks base method +func (m *MockDockerClient) RemoveVolume(arg0 context.Context, arg1 string, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveVolume", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveVolume indicates an expected call of RemoveVolume +func (mr *MockDockerClientMockRecorder) RemoveVolume(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveVolume", reflect.TypeOf((*MockDockerClient)(nil).RemoveVolume), arg0, arg1, arg2) +} + +// StartContainer mocks base method +func (m *MockDockerClient) StartContainer(arg0 context.Context, arg1 string, arg2 time.Duration) dockerapi.DockerContainerMetadata { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartContainer", arg0, arg1, arg2) + ret0, _ := ret[0].(dockerapi.DockerContainerMetadata) + return ret0 +} + +// StartContainer indicates an expected call of StartContainer +func (mr *MockDockerClientMockRecorder) StartContainer(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartContainer", reflect.TypeOf((*MockDockerClient)(nil).StartContainer), arg0, arg1, arg2) +} + +// StartContainerExec mocks base method +func (m *MockDockerClient) StartContainerExec(arg0 context.Context, arg1 string, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartContainerExec", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// StartContainerExec indicates an expected call of StartContainerExec +func (mr *MockDockerClientMockRecorder) StartContainerExec(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartContainerExec", reflect.TypeOf((*MockDockerClient)(nil).StartContainerExec), arg0, arg1, arg2) +} + +// Stats mocks base method +func (m *MockDockerClient) Stats(arg0 context.Context, arg1 string, arg2 time.Duration) (<-chan *types.StatsJSON, <-chan error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stats", arg0, arg1, arg2) + ret0, _ := ret[0].(<-chan *types.StatsJSON) + ret1, _ := ret[1].(<-chan error) + return ret0, ret1 +} + +// Stats indicates an expected call of Stats +func (mr *MockDockerClientMockRecorder) Stats(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockDockerClient)(nil).Stats), arg0, arg1, arg2) +} + +// StopContainer mocks base method +func (m *MockDockerClient) StopContainer(arg0 context.Context, arg1 string, arg2 time.Duration) dockerapi.DockerContainerMetadata { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StopContainer", arg0, arg1, arg2) + ret0, _ := ret[0].(dockerapi.DockerContainerMetadata) + return ret0 +} + +// StopContainer indicates an expected call of StopContainer +func (mr *MockDockerClientMockRecorder) StopContainer(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopContainer", reflect.TypeOf((*MockDockerClient)(nil).StopContainer), arg0, arg1, arg2) +} + +// SupportedVersions mocks base method +func (m *MockDockerClient) SupportedVersions() []dockerclient.DockerVersion { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SupportedVersions") + ret0, _ := ret[0].([]dockerclient.DockerVersion) + return ret0 +} + +// SupportedVersions indicates an expected call of SupportedVersions +func (mr *MockDockerClientMockRecorder) SupportedVersions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportedVersions", reflect.TypeOf((*MockDockerClient)(nil).SupportedVersions)) +} + +// TopContainer mocks base method +func (m *MockDockerClient) TopContainer(arg0 context.Context, arg1 string, arg2 time.Duration, arg3 ...string) (*container0.ContainerTopOKBody, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "TopContainer", varargs...) + ret0, _ := ret[0].(*container0.ContainerTopOKBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TopContainer indicates an expected call of TopContainer +func (mr *MockDockerClientMockRecorder) TopContainer(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TopContainer", reflect.TypeOf((*MockDockerClient)(nil).TopContainer), varargs...) +} + +// Version mocks base method +func (m *MockDockerClient) Version(arg0 context.Context, arg1 time.Duration) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version +func (mr *MockDockerClientMockRecorder) Version(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockDockerClient)(nil).Version), arg0, arg1) +} + +// WithVersion mocks base method +func (m *MockDockerClient) WithVersion(arg0 dockerclient.DockerVersion) dockerapi.DockerClient { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WithVersion", arg0) + ret0, _ := ret[0].(dockerapi.DockerClient) + return ret0 +} + +// WithVersion indicates an expected call of WithVersion +func (mr *MockDockerClientMockRecorder) WithVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithVersion", reflect.TypeOf((*MockDockerClient)(nil).WithVersion), arg0) +} diff --git a/agent/dockerclient/dockerapi/types.go b/agent/dockerclient/dockerapi/types.go new file mode 100644 index 00000000000..b490dab9a52 --- /dev/null +++ b/agent/dockerclient/dockerapi/types.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerapi + +import ( + "fmt" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" +) + +// ContainerNotFound is a type for a missing container +type ContainerNotFound struct { + // TaskArn is the ARN of the task the container belongs to + TaskArn string + // ContainerName is the name of the container that's missing + ContainerName string +} + +// Error returns an error string for the ContainerNotFound error +func (cnferror ContainerNotFound) Error() string { + return fmt.Sprintf("Could not find container '%s' in task '%s'", + cnferror.ContainerName, cnferror.TaskArn) +} + +// DockerContainerChangeEvent is a type for container change events +type DockerContainerChangeEvent struct { + // Status represents the container's status in the event + Status apicontainerstatus.ContainerStatus + // DockerContainerMetadata is the metadata of the container in the event + DockerContainerMetadata + // Type is the event type received from docker events + Type apicontainer.DockerEventType +} + +// DockerContainerMetadata is a type for metadata about Docker containers +type DockerContainerMetadata struct { + // DockerID is the contianer's id generated by Docker + DockerID string + // ExitCode contains container's exit code if it has stopped + ExitCode *int + // PortBindings is the list of port binding information of the container + PortBindings []apicontainer.PortBinding + // Error wraps various container transition errors and is set if engine + // is unable to perform any of the required container transitions + Error apierrors.NamedError + // Volumes contains volume informaton for the container + Volumes []types.MountPoint + // Labels contains labels set for the container + Labels map[string]string + // CreatedAt is the timestamp of container creation + CreatedAt time.Time + // StartedAt is the timestamp of container start + StartedAt time.Time + // FinishedAt is the timestamp of container stop + FinishedAt time.Time + // Health contains the result of a container health check + Health apicontainer.HealthStatus + // NetworkMode denotes the network mode in which the container is started + NetworkMode string + // NetworksUnsafe denotes the Docker Network Settings in the container + NetworkSettings *types.NetworkSettings +} + +// ListContainersResponse encapsulates the response from the docker client for the +// ListContainers call. +type ListContainersResponse struct { + // DockerIDs is the list of container IDs from the ListContainers call + DockerIDs []string + // Error contains any error returned when listing containers + Error error +} + +// ListImagesResponse encapsulates the response from the docker client for the +// ListImages call. +type ListImagesResponse struct { + // ImagesIDs is the list of Images IDs from the ListImages call + ImageIDs []string + // RepoTags is the list of Images names from the ListImages call + RepoTags []string + // Error contains any error returned when listing images + Error error +} + +// VolumeResponse wrapper for CreateVolume and InspectVolume +// TODO Remove type when migration is complete +type VolumeResponse struct { + DockerVolume *types.Volume + Error error +} + +// VolumeResponse wrapper for CreateVolume for SDK Clients +type SDKVolumeResponse struct { + DockerVolume *types.Volume + Error error +} + +// ListPluginsResponse is a wrapper for ListPlugins api +type ListPluginsResponse struct { + Plugins []*types.Plugin + Error error +} + +// String returns a human readable string of the container change event +func (event *DockerContainerChangeEvent) String() string { + res := fmt.Sprintf("Status: %s, DockerID: %s", event.Status.String(), event.DockerID) + res += ", health: " + event.Health.Status.String() + + if event.ExitCode != nil { + res += fmt.Sprintf(", ExitCode: %d", aws.IntValue(event.ExitCode)) + } + + if len(event.PortBindings) != 0 { + res += fmt.Sprintf(", PortBindings: %v", event.PortBindings) + } + + if event.Error != nil { + res += ", Error: " + event.Error.Error() + } + + if len(event.Volumes) != 0 { + res += fmt.Sprintf(", Volumes: %v", event.Volumes) + } + + if len(event.Labels) != 0 { + res += fmt.Sprintf(", Labels: %v", event.Labels) + } + + if !event.CreatedAt.IsZero() { + res += ", CreatedAt: " + event.CreatedAt.String() + } + if !event.StartedAt.IsZero() { + res += ", StartedAt: " + event.StartedAt.String() + } + if !event.FinishedAt.IsZero() { + res += ", FinishedAt: " + event.FinishedAt.String() + } + + return res +} diff --git a/agent/dockerclient/dockerapi_compare_versions.go b/agent/dockerclient/dockerapi_compare_versions.go new file mode 100644 index 00000000000..ac54aaee662 --- /dev/null +++ b/agent/dockerclient/dockerapi_compare_versions.go @@ -0,0 +1,125 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerclient + +import ( + "fmt" + "strconv" + "strings" +) + +type DockerAPIVersion string + +type dockerVersion struct { + major int + minor int +} + +// Matches returns whether or not a version matches a given selector. +// The selector can be any of the following: +// +// * x.y -- Matches a version exactly the same as the selector version +// * >=x.y -- Matches a version greater than or equal to the selector version +// * >x.y -- Matches a version greater than the selector version +// * <=x.y -- Matches a version less than or equal to the selector version +// * =") { + rhsVersion, err := parseDockerVersions(selector[2:]) + if err != nil { + return false, err + } + return compareDockerVersions(lhsVersion, rhsVersion) >= 0, nil + } else if strings.HasPrefix(selector, ">") { + rhsVersion, err := parseDockerVersions(selector[1:]) + if err != nil { + return false, err + } + return compareDockerVersions(lhsVersion, rhsVersion) > 0, nil + } else if strings.HasPrefix(selector, "<=") { + rhsVersion, err := parseDockerVersions(selector[2:]) + if err != nil { + return false, err + } + return compareDockerVersions(lhsVersion, rhsVersion) <= 0, nil + } else if strings.HasPrefix(selector, "<") { + rhsVersion, err := parseDockerVersions(selector[1:]) + if err != nil { + return false, err + } + return compareDockerVersions(lhsVersion, rhsVersion) < 0, nil + } + + rhsVersion, err := parseDockerVersions(selector) + if err != nil { + return false, err + } + return compareDockerVersions(lhsVersion, rhsVersion) == 0, nil +} + +func parseDockerVersions(version string) (dockerVersion, error) { + var result dockerVersion + // x.x + versionParts := strings.Split(version, ".") + // [x, x] + if len(versionParts) != 2 { + return result, fmt.Errorf("Not enough '.' characters in the version part") + } + major, err := strconv.Atoi(versionParts[0]) + if err != nil { + return result, fmt.Errorf("Cannot parse major version as int: %v", err) + } + minor, err := strconv.Atoi(versionParts[1]) + if err != nil { + return result, fmt.Errorf("Cannot parse minor version as int: %v", err) + } + result.major = major + result.minor = minor + return result, nil +} + +// compareDockerVersions compares two docker api versions, 'lhs' and 'rhs', and returns -1 if lhs is less +// than rhs, 0 if they are equal, and 1 if lhs is greater than rhs +func compareDockerVersions(lhs, rhs dockerVersion) int { + switch { + case lhs.major < rhs.major: + return -1 + case lhs.major > rhs.major: + return 1 + case lhs.minor < rhs.minor: + return -1 + case lhs.minor > rhs.minor: + return 1 + } + return 0 +} diff --git a/agent/dockerclient/dockerapi_compare_versions_test.go b/agent/dockerclient/dockerapi_compare_versions_test.go new file mode 100644 index 00000000000..baa13cf69de --- /dev/null +++ b/agent/dockerclient/dockerapi_compare_versions_test.go @@ -0,0 +1,125 @@ +// +build unit +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerclient + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseDockerVersions(t *testing.T) { + testCases := []struct { + input string + output dockerVersion + }{ + { + input: "1.0", + output: dockerVersion{ + major: 1, + minor: 0, + }, + }, + { + input: "5.2", + output: dockerVersion{ + major: 5, + minor: 2, + }, + }, + } + + for i, testCase := range testCases { + result, err := parseDockerVersions(testCase.input) + assert.NoError(t, err, "#%v: Error parsing %v: %v", i, testCase.input, err) + assert.Equal(t, testCase.output, result, "#%v: Expected %v, got %v", i, testCase.output, result) + } + + invalidCases := []string{"foo", "", "bar", "x.y.z", "1.x.y", "1.1.z"} + for i, invalidCase := range invalidCases { + _, err := parseDockerVersions(invalidCase) + assert.Error(t, err, "#%v: Expected error, didn't get one. Input: %v", i, invalidCase) + } +} + +func TestDockerVersionMatches(t *testing.T) { + testCases := []struct { + version string + selector string + expectedOutput bool + }{ + { + version: "1.0", + selector: "1.0", + expectedOutput: true, + }, + { + version: "1.0", + selector: ">=1.0", + expectedOutput: true, + }, + { + version: "2.1", + selector: ">=1.0", + expectedOutput: true, + }, + { + version: "2.1", + selector: "<=1.0", + expectedOutput: false, + }, + { + version: "0.1", + selector: "<1.0", + expectedOutput: true, + }, + { + version: "0.1", + selector: "<=1.0", + expectedOutput: true, + }, + { + version: "1.0", + selector: "<1.1", + expectedOutput: true, + }, + { + version: "1.1", + selector: ">1.0", + expectedOutput: true, + }, + { + version: "1.1", + selector: "2.1,1.1", + expectedOutput: true, + }, + { + version: "2.0", + selector: "2.1,1.1", + expectedOutput: false, + }, + { + version: "1.9", + selector: ">=1.9,<=1.9", + expectedOutput: true, + }, + } + + for i, testCase := range testCases { + result, err := DockerAPIVersion(testCase.version).Matches(testCase.selector) + assert.NoError(t, err, "#%v: Unexpected error %v", i, err) + assert.Equal(t, testCase.expectedOutput, result, "#%v: %v(%v) expected %v but got %v", i, testCase.version, testCase.selector, testCase.expectedOutput, result) + } +} diff --git a/agent/dockerclient/dockerauth/doc.go b/agent/dockerclient/dockerauth/doc.go new file mode 100644 index 00000000000..2ee606d8a07 --- /dev/null +++ b/agent/dockerclient/dockerauth/doc.go @@ -0,0 +1,58 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +/* +Package dockerauth handles storing auth configuration information for Docker +registries. + +Usage + +This package pulls authentication information from the passed configuration. +A user should set the "EngineAuthType" and "EngineAuthData" configuration +keys to values indicated below. + +These keys may be set by either setting the environment variables +"ECS_ENGINE_AUTH_TYPE" and "ECS_ENGINE_AUTH_DATA" or by setting the keys "EngineAuthData" and "EngineAuthType" in the JSON configuration file located at the configured "ECS_AGENT_CONFIG_FILE_PATH" (see http://godoc.org/github.com/aws/amazon-ecs-agent/agent/config) + +Auth Types + +The two currently supported auth types are "docker" and "dockercfg". + +Docker: + +The auth type "docker" is intended to work most naturally with a JSON +configuration file. The "AuthData" is a structured JSON object which specifies +values for the docker "AuthConfig" structure. The "AuthData" should be an object +similar to the following: + { + "my.registry.example.com": { + "username": "myUsername", + "password": "myPassword" + }, + "https://index.docker.io/v1/user": { + "username": "my-dockerhub-username", + "password": "my-dockerhub-password", + "email": "my-optinallyincluded-email" + } + } + + +Dockercfg: + +The auth type "dockercfg" is intended to allow easy use of an existing +".dockercfg" file generated by running "docker login". This auth type expects +the "AuthData" to be a string containing the contents of that file. The contents +of your ".dockercfg" will generally be a string of the following form: + '{"http://myregistry.com/v1/":{"auth":"dXNlcjpzd29yZGZpc2g=","email":"email"}}' +*/ +package dockerauth diff --git a/agent/dockerclient/dockerauth/dockerauth.go b/agent/dockerclient/dockerauth/dockerauth.go new file mode 100644 index 00000000000..b1b602a6fb4 --- /dev/null +++ b/agent/dockerclient/dockerauth/dockerauth.go @@ -0,0 +1,189 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Portions copyright 2012-2015 Docker, Inc. Please see LICENSE for applicable +// license terms and NOTICE for applicable notices. +package dockerauth + +import ( + "encoding/base64" + "encoding/json" + "strings" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/utils" + + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" +) + +func NewDockerAuthProvider(authType string, authData json.RawMessage) DockerAuthProvider { + return &dockerAuthProvider{ + authMap: parseAuthData(authType, authData), + } +} + +type dockerAuthProvider struct { + authMap dockerAuths +} + +// map from registry url (minus schema) to auth information +type dockerAuths map[string]types.AuthConfig + +type dockercfgConfigEntry struct { + Auth string `json:"auth"` +} + +type dockercfgData map[string]dockercfgConfigEntry + +// GetAuthconfig retrieves the correct auth configuration for the given repository +func (authProvider *dockerAuthProvider) GetAuthconfig(image string, registryAuthData *apicontainer.RegistryAuthenticationData) (types.AuthConfig, error) { + // Ignore 'tag', not used in auth determination + repository, _ := utils.ParseRepositoryTag(image) + authDataMap := authProvider.authMap + + // Ignore repo/image name for some auth checks (see use of 'image' below for where it's not ignored. + indexName, _ := splitReposName(repository) + + if isDockerhubHostname(indexName) { + return authDataMap[dockerRegistryKey], nil + } + + // Try to find the longest match that at least matches the hostname + // This is to support e.g. 'registry.tld: auth1, registry.tld/username: + // auth2' for multiple different auths on the same registry. + longestKey := "" + authConfigKey := indexName + + // Take a direct match of the index hostname as a sane default + if _, found := authDataMap[authConfigKey]; found && len(authConfigKey) > len(longestKey) { + longestKey = authConfigKey + } + + for registry := range authDataMap { + nameParts := strings.SplitN(registry, "/", 2) + hostname := nameParts[0] + + // Only ever take a new key if the hostname matches in normal cases + if authConfigKey == hostname { + if longestKey == "" { + longestKey = registry + } else if len(registry) > len(longestKey) && strings.HasPrefix(image, registry) { + // If we have a longer match, that indicates a username / namespace appended + longestKey = registry + } + } + } + if longestKey != "" { + return authDataMap[longestKey], nil + } + return types.AuthConfig{}, nil +} + +// Normalize all auth types into a uniform 'dockerAuths' type. +// On error, any appropriate information will be logged and an empty dockerAuths will be returned +func parseAuthData(authType string, authData json.RawMessage) dockerAuths { + intermediateAuthData := make(dockerAuths) + switch authType { + case "docker": + err := json.Unmarshal(authData, &intermediateAuthData) + if err != nil { + seelog.Warn("Could not parse 'docker' type auth config") + return dockerAuths{} + } + case "dockercfg": + var base64dAuthInfo dockercfgData + err := json.Unmarshal(authData, &base64dAuthInfo) + if err != nil { + seelog.Warn("Could not parse 'dockercfg' type auth config") + return dockerAuths{} + } + + for registry, auth := range base64dAuthInfo { + data, err := base64.StdEncoding.DecodeString(auth.Auth) + if err != nil { + seelog.Warnf("Malformed auth data for registry %v", registry) + continue + } + + usernamePass := strings.SplitN(string(data), ":", 2) + if len(usernamePass) != 2 { + seelog.Warnf("Malformed auth data for registry %v; must contain ':'", registry) + continue + } + intermediateAuthData[registry] = types.AuthConfig{ + Username: usernamePass[0], + Password: usernamePass[1], + } + } + case "": + // not set; no warn + return dockerAuths{} + default: + seelog.Warnf("Unknown auth configuration: %v", authType) + return dockerAuths{} + } + + // Normalize intermediate registry keys into not having a schema + output := make(dockerAuths) + for key, val := range intermediateAuthData { + output[stripRegistrySchema(key)] = val + } + return output +} + +// stripRegistrySchema normalizes the registry url by removing http/https from it. +// because registries only support those two schemas, this function can be quite naive. +func stripRegistrySchema(registry string) string { + if strings.HasPrefix(registry, "http://") { + return strings.TrimPrefix(registry, "http://") + } else if strings.HasPrefix(registry, "https://") { + return strings.TrimPrefix(registry, "https://") + } + return registry +} + +// https://github.com/docker/docker/blob/729c9a97822ebee2c978a322d37060454af6bc66/cliconfig/config.go#L25 +// `docker login` still uses this, including the /v1/ for me, as of the 1.9.0 RCs +const dockerRegistryKey = "index.docker.io/v1/" + +var dockerRegistryHosts = []string{"docker.io", "index.docker.io", "registry-1.docker.io"} + +func isDockerhubHostname(hostname string) bool { + for _, dockerHostname := range dockerRegistryHosts { + if hostname == dockerHostname { + return true + } + } + return false +} + +// This is taken from Docker's codebase in whole or in part, Copyright Docker Inc. +// https://github.com/docker/docker/blob/v1.8.3/registry/config.go#L290 +const IndexName = "docker.io" + +func splitReposName(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} diff --git a/agent/dockerclient/dockerauth/dockerauth_test.go b/agent/dockerclient/dockerauth/dockerauth_test.go new file mode 100644 index 00000000000..da568d0ca5b --- /dev/null +++ b/agent/dockerclient/dockerauth/dockerauth_test.go @@ -0,0 +1,137 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerauth + +import ( + "encoding/base64" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" +) + +type authTestPair struct { + Image string + ExpectedUser string + ExpectedPass string +} + +var secretAuth string = base64.StdEncoding.EncodeToString([]byte("user:swordfish")) +var dragonAuth string = base64.StdEncoding.EncodeToString([]byte("test:dragon")) +var dockerhubAuth string = base64.StdEncoding.EncodeToString([]byte("dockerhub:password")) + +func TestDockerCfgAuth(t *testing.T) { + var expectedPairs = []authTestPair{ + {"example.tld/my/image", "user", "swordfish"}, + {"example.tld/user2/image", "test", "dragon"}, + {"registry.tld/image", "", ""}, + {"nginx", "dockerhub", "password"}, + {"busybox", "dockerhub", "password"}, + {"library/busybox", "dockerhub", "password"}, + {"amazon/amazon-ecs-agent", "dockerhub", "password"}, + {"aregistry.tld/foo/bar", "user", "swordfish"}, + {"aregistry.tld/foo", "user", "swordfish"}, + {"aregistry.tld2/foo", "", ""}, + {"foo.aregistry.tld/foo", "", ""}, + {"anotherregistry.tld/foo/bar", "user", "swordfish"}, + {"anotherregistry.tld/foo", "user", "swordfish"}, + {"anotherregistry.tld2/foo", "", ""}, + {"foo.anotherregistry.tld/foo", "", ""}, + } + authData := []byte(strings.Join(append([]string{`{`}, + `"example.tld/user2":{"auth":"`+dragonAuth+`","email":"test@test.test"},`, + `"example.tld":{"auth":"`+secretAuth+`","email":"user2@example.com"},`, + `"https://aregistry.tld":{"auth":"`+secretAuth+`"},`, + `"http://anotherregistry.tld":{"auth":"`+secretAuth+`"},`, + `"https://index.docker.io/v1/":{"auth":"`+dockerhubAuth+`"}`, + `}`), "")) + dockerAuthData := []byte(strings.Join(append([]string{`{`}, + `"example.tld/user2":{"username":"test","password":"dragon","email":"foo@bar"},`, + `"example.tld":{"username":"user","password":"swordfish"},`, + `"https://aregistry.tld":{"username":"user","password":"swordfish"},`, + `"http://anotherregistry.tld":{"username":"user","password":"swordfish"},`, + `"https://index.docker.io/v1/":{"username":"dockerhub","password":"password"}`, + `}`), "")) + + providerCfg := NewDockerAuthProvider("dockercfg", authData) + providerDocker := NewDockerAuthProvider("docker", dockerAuthData) + + for ndx, pair := range expectedPairs { + authConfig, _ := providerCfg.GetAuthconfig(pair.Image, nil) + if authConfig.Username != pair.ExpectedUser || authConfig.Password != pair.ExpectedPass { + t.Errorf("Expectation failure: #%v. Got %v, wanted %v", ndx, authConfig, pair) + } + + authConfig, _ = providerDocker.GetAuthconfig(pair.Image, nil) + if authConfig.Username != pair.ExpectedUser || authConfig.Password != pair.ExpectedPass { + t.Errorf("Expectation failure: #%v. Got %v, wanted %v", ndx, authConfig, pair) + } + } +} + +func TestAuthAppliesToOnlyRegistry(t *testing.T) { + authData := []byte(`{ + "example.tld/user2":{"auth":"` + secretAuth + `","email":"test@test.test"} + }`) + + var expectedPairs = []authTestPair{ + // '/user2' should apply here because of matching hostname + {"example.tld/foo", "user", "swordfish"}, + {"registry.tld", "", ""}, + {"nginx", "", ""}, + } + + provider := NewDockerAuthProvider("dockercfg", authData) + + for ndx, pair := range expectedPairs { + authConfig, _ := provider.GetAuthconfig(pair.Image, nil) + if authConfig.Username != pair.ExpectedUser || authConfig.Password != pair.ExpectedPass { + t.Errorf("Expectation failure: #%v. Got %v, wanted %v", ndx, authConfig, pair) + } + } +} + +func TestAuthErrors(t *testing.T) { + badPairs := []struct { + t string + a string + }{ + {"docker", `{"registry.tld":{"auth":"` + secretAuth + `"}}`}, + {"docker", `{"registry.tld":{"username":true}}`}, + {"dockercfg", `{"registry.tld":{"auth":"malformedbase64"}}`}, + {"dockercfg", `{"registry.tld":{"auth":true}}`}, + {"dockercfg", `{"registry.tld":{"auth":"` + base64.StdEncoding.EncodeToString([]byte("noColon")) + `"}}`}, + {"invalid", ""}, + } + + for _, pair := range badPairs { + provider := NewDockerAuthProvider(pair.t, []byte(pair.a)) + result, _ := provider.GetAuthconfig("nginx", nil) + if !reflect.DeepEqual(result, types.AuthConfig{}) { + t.Errorf("Expected empty auth config for %v; got %v", pair, result) + } + } + +} + +func TestEmptyConfig(t *testing.T) { + provider := NewDockerAuthProvider("", []byte("")) + authConfig, _ := provider.GetAuthconfig("nginx", nil) + if !reflect.DeepEqual(authConfig, types.AuthConfig{}) { + t.Errorf("Expected empty authconfig to not return any auth data at all") + } +} diff --git a/agent/dockerclient/dockerauth/ecr.go b/agent/dockerclient/dockerauth/ecr.go new file mode 100644 index 00000000000..e3cb34a2bc6 --- /dev/null +++ b/agent/dockerclient/dockerauth/ecr.go @@ -0,0 +1,188 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerauth + +import ( + "encoding/base64" + "fmt" + "strings" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/async" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/ecr" + ecrapi "github.com/aws/amazon-ecs-agent/agent/ecr/model/ecr" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/aws/aws-sdk-go/aws" + log "github.com/cihub/seelog" + "github.com/docker/docker/api/types" +) + +type cacheKey struct { + region string + roleARN string + registryID string + endpointOverride string +} + +type ecrAuthProvider struct { + tokenCache async.Cache + factory ecr.ECRFactory +} + +const ( + // MinimumJitterDuration is the minimum duration to mark the credentials + // as expired before it's actually expired + MinimumJitterDuration = 30 * time.Minute + roundtripTimeout = 5 * time.Second + proxyEndpointScheme = "https://" +) + +// String formats the cachKey as a string +func (key *cacheKey) String() string { + return fmt.Sprintf("%s-%s-%s-%s", key.roleARN, key.region, key.registryID, key.endpointOverride) +} + +// NewECRAuthProvider returns a DockerAuthProvider that can handle retrieve +// credentials for pulling from Amazon EC2 Container Registry +func NewECRAuthProvider(ecrFactory ecr.ECRFactory, cache async.Cache) DockerAuthProvider { + return &ecrAuthProvider{ + tokenCache: cache, + factory: ecrFactory, + } +} + +// GetAuthconfig retrieves the correct auth configuration for the given repository +func (authProvider *ecrAuthProvider) GetAuthconfig(image string, + registryAuthData *apicontainer.RegistryAuthenticationData) (types.AuthConfig, error) { + + if registryAuthData == nil { + return types.AuthConfig{}, fmt.Errorf("dockerauth: missing container's registry auth data") + } + + authData := registryAuthData.ECRAuthData + + if authData == nil { + return types.AuthConfig{}, fmt.Errorf("dockerauth: missing container's ecr auth data") + } + + // First try to get the token from cache, if the token does not exist, + // then call ECR api to get the new token + key := cacheKey{ + region: authData.Region, + endpointOverride: authData.EndpointOverride, + registryID: authData.RegistryID, + } + + // If the container is using execution role credentials to pull, + // add the roleARN as part of the cache key so that docker auth for + // containers pull with the same role can be cached + if authData.GetPullCredentials() != (credentials.IAMRoleCredentials{}) { + key.roleARN = authData.GetPullCredentials().RoleArn + } + + // Try to get the auth config from cache + auth := authProvider.getAuthConfigFromCache(key) + if auth != nil { + return *auth, nil + } + + // Get the auth config from ECR + return authProvider.getAuthConfigFromECR(image, key, authData) +} + +// getAuthconfigFromCache retrieves the token from cache +func (authProvider *ecrAuthProvider) getAuthConfigFromCache(key cacheKey) *types.AuthConfig { + token, ok := authProvider.tokenCache.Get(key.String()) + if !ok { + return nil + } + + cachedToken, ok := token.(*ecrapi.AuthorizationData) + if !ok { + log.Warnf("Reading ECR credentials from cache failed") + return nil + } + + if authProvider.IsTokenValid(cachedToken) { + auth, err := extractToken(cachedToken) + if err != nil { + log.Errorf("Extract docker auth from cache failed, err: %v", err) + // Remove invalid token from cache + authProvider.tokenCache.Delete(key.String()) + return nil + } + return &auth + } else { + // Remove invalid token from cache + authProvider.tokenCache.Delete(key.String()) + } + return nil +} + +// getAuthConfigFromECR calls the ECR API to get docker auth config +func (authProvider *ecrAuthProvider) getAuthConfigFromECR(image string, key cacheKey, authData *apicontainer.ECRAuthData) (types.AuthConfig, error) { + // Create ECR client to get the token + client, err := authProvider.factory.GetClient(authData) + if err != nil { + return types.AuthConfig{}, err + } + + log.Debugf("Calling ECR.GetAuthorizationToken for %s", image) + ecrAuthData, err := client.GetAuthorizationToken(authData.RegistryID) + if err != nil { + return types.AuthConfig{}, err + } + if ecrAuthData == nil { + return types.AuthConfig{}, fmt.Errorf("ecr auth: missing AuthorizationData in ECR response for %s", image) + } + + // Verify the auth data has the correct format for ECR + if ecrAuthData.ProxyEndpoint != nil && + strings.HasPrefix(proxyEndpointScheme+image, aws.StringValue(ecrAuthData.ProxyEndpoint)) && + ecrAuthData.AuthorizationToken != nil { + + // Cache the new token + authProvider.tokenCache.Set(key.String(), ecrAuthData) + return extractToken(ecrAuthData) + } + return types.AuthConfig{}, fmt.Errorf("ecr auth: AuthorizationData is malformed for %s", image) +} + +func extractToken(authData *ecrapi.AuthorizationData) (types.AuthConfig, error) { + decodedToken, err := base64.StdEncoding.DecodeString(aws.StringValue(authData.AuthorizationToken)) + if err != nil { + return types.AuthConfig{}, err + } + parts := strings.SplitN(string(decodedToken), ":", 2) + return types.AuthConfig{ + Username: parts[0], + Password: parts[1], + ServerAddress: aws.StringValue(authData.ProxyEndpoint), + }, nil +} + +// IsTokenValid checks the token is still within it's expiration window. We early expire to allow +// for timing in calls and add jitter to avoid refreshing all of the tokens at once. +func (authProvider *ecrAuthProvider) IsTokenValid(authData *ecrapi.AuthorizationData) bool { + if authData == nil || authData.ExpiresAt == nil { + return false + } + + refreshTime := aws.TimeValue(authData.ExpiresAt). + Add(-1 * retry.AddJitter(MinimumJitterDuration, MinimumJitterDuration)) + + return time.Now().Before(refreshTime) +} diff --git a/agent/dockerclient/dockerauth/ecr_test.go b/agent/dockerclient/dockerauth/ecr_test.go new file mode 100644 index 00000000000..dd5b07cd053 --- /dev/null +++ b/agent/dockerclient/dockerauth/ecr_test.go @@ -0,0 +1,519 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerauth + +import ( + "encoding/base64" + "errors" + "fmt" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/async" + mock_async "github.com/aws/amazon-ecs-agent/agent/async/mocks" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_ecr "github.com/aws/amazon-ecs-agent/agent/ecr/mocks" + ecrapi "github.com/aws/amazon-ecs-agent/agent/ecr/model/ecr" + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testToken = "testToken" + testProxyEndpoint = "testProxyEndpoint" + tokenCacheSize = 100 + tokenCacheTTL = 12 * time.Hour +) + +func TestNewAuthProviderECRAuth(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + + provider := NewECRAuthProvider(factory, async.NewLRUCache(tokenCacheSize, tokenCacheTTL)) + _, ok := provider.(*ecrAuthProvider) + assert.True(t, ok, "Should have returned ecrAuthProvider") +} + +func TestGetAuthConfigSuccess(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_ecr.NewMockECRClient(ctrl) + factory := mock_ecr.NewMockECRFactory(ctrl) + + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + proxyEndpoint := "proxy" + username := "username" + password := "password" + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: async.NewLRUCache(tokenCacheSize, tokenCacheTTL), + } + + factory.EXPECT().GetClient(authData).Return(client, nil) + client.EXPECT().GetAuthorizationToken(authData.RegistryID).Return(&ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + proxyEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + }, nil) + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"/myimage", registryAuthData) + require.NoError(t, err, "Unexpected error in getting auth config from ecr") + + assert.Equal(t, username, authconfig.Username, "Expected username to be %s, but was %s", username, authconfig.Username) + assert.Equal(t, password, authconfig.Password, "Expected password to be %s, but was %s", password, authconfig.Password) +} + +func TestGetAuthConfigNoMatchAuthorizationToken(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + client := mock_ecr.NewMockECRClient(ctrl) + + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + proxyEndpoint := "proxy" + username := "username" + password := "password" + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: async.NewLRUCache(tokenCacheSize, tokenCacheTTL), + } + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + factory.EXPECT().GetClient(authData).Return(client, nil) + client.EXPECT().GetAuthorizationToken(authData.RegistryID).Return(&ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + "notproxy"), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + }, nil) + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"/myimage", registryAuthData) + require.Error(t, err, "Expected error if the proxy does not match") + assert.Equal(t, types.AuthConfig{}, authconfig, "Expected Authconfig to be empty, but was %v", authconfig) +} + +func TestGetAuthConfigBadBase64(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + client := mock_ecr.NewMockECRClient(ctrl) + + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + proxyEndpoint := "proxy" + username := "username" + password := "password" + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: async.NewLRUCache(tokenCacheSize, tokenCacheTTL), + } + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + factory.EXPECT().GetClient(authData).Return(client, nil) + client.EXPECT().GetAuthorizationToken(authData.RegistryID).Return(&ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + "notproxy"), + AuthorizationToken: aws.String((username + ":" + password)), + }, nil) + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"/myimage", registryAuthData) + require.Error(t, err, "Expected error to be present, but was nil", err) + assert.Equal(t, types.AuthConfig{}, authconfig, "Expected Authconfig to be empty, but was %v", authconfig) +} + +func TestGetAuthConfigMissingResponse(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_ecr.NewMockECRClient(ctrl) + factory := mock_ecr.NewMockECRFactory(ctrl) + + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + proxyEndpoint := "proxy" + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: async.NewLRUCache(tokenCacheSize, tokenCacheTTL), + } + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + factory.EXPECT().GetClient(authData).Return(client, nil) + client.EXPECT().GetAuthorizationToken(authData.RegistryID) + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"/myimage", registryAuthData) + if err == nil { + t.Fatal("Expected error to be present, but was nil", err) + } + require.Error(t, err, "Expected error to be present, but was nil", err) + assert.Equal(t, types.AuthConfig{}, authconfig, "Expected Authconfig to be empty, but was %v", authconfig) +} + +func TestGetAuthConfigECRError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_ecr.NewMockECRClient(ctrl) + factory := mock_ecr.NewMockECRFactory(ctrl) + + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + proxyEndpoint := "proxy" + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: async.NewLRUCache(tokenCacheSize, tokenCacheTTL), + } + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + factory.EXPECT().GetClient(authData).Return(client, nil) + client.EXPECT().GetAuthorizationToken(authData.RegistryID).Return(nil, errors.New("test error")) + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"/myimage", registryAuthData) + require.Error(t, err, "Expected error to be present, but was nil", err) + assert.Equal(t, types.AuthConfig{}, authconfig, "Expected Authconfig to be empty, but was %v", authconfig) +} + +func TestGetAuthConfigNoAuthData(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + proxyEndpoint := "proxy" + provider := ecrAuthProvider{ + factory: factory, + tokenCache: async.NewLRUCache(tokenCacheSize, tokenCacheTTL), + } + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"/myimage", nil) + require.Error(t, err, "Expected error to be present, but was nil", err) + assert.Equal(t, types.AuthConfig{}, authconfig, "Expected Authconfig to be empty, but was %v", authconfig) +} + +func TestIsTokenValid(t *testing.T) { + provider := ecrAuthProvider{} + + var testAuthTimes = []struct { + expireIn time.Duration + expected bool + }{ + {-1 * time.Minute, false}, + {time.Duration(0), false}, + {1 * time.Minute, false}, + {MinimumJitterDuration, false}, + {MinimumJitterDuration*2 + (1 * time.Second), true}, + } + + for _, testCase := range testAuthTimes { + testAuthData := &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(testProxyEndpoint), + AuthorizationToken: aws.String(testToken), + ExpiresAt: aws.Time(time.Now().Add(testCase.expireIn)), + } + + actual := provider.IsTokenValid(testAuthData) + assert.Equal(t, testCase.expected, actual, + fmt.Sprintf("Expected IsTokenValid to be %t, got %t: for expiraing at %s", testCase.expected, actual, testCase.expireIn)) + } +} + +func TestAuthorizationTokenCacheMiss(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + ecrClient := mock_ecr.NewMockECRClient(ctrl) + mockCache := mock_async.NewMockCache(ctrl) + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: mockCache, + } + username := "test_user" + password := "test_passwd" + + proxyEndpoint := "proxy" + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + authData.SetPullCredentials(credentials.IAMRoleCredentials{ + RoleArn: "arn:aws:iam::123456789012:role/test", + }) + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + key := cacheKey{ + roleARN: authData.GetPullCredentials().RoleArn, + region: authData.Region, + registryID: authData.RegistryID, + endpointOverride: authData.EndpointOverride, + } + + dockerAuthData := &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + proxyEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + } + + mockCache.EXPECT().Get(key.String()).Return(nil, false) + factory.EXPECT().GetClient(authData).Return(ecrClient, nil) + ecrClient.EXPECT().GetAuthorizationToken(authData.RegistryID).Return(dockerAuthData, nil) + mockCache.EXPECT().Set(key.String(), dockerAuthData) + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"myimage", registryAuthData) + assert.NoError(t, err) + assert.Equal(t, username, authconfig.Username) + assert.Equal(t, password, authconfig.Password) +} + +func TestAuthorizationTokenCacheHit(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + mockCache := mock_async.NewMockCache(ctrl) + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: mockCache, + } + username := "test_user" + password := "test_passwd" + + proxyEndpoint := "proxy" + testAuthData := &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + proxyEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now().Add(12 * time.Hour)), + } + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + key := cacheKey{ + region: authData.Region, + registryID: authData.RegistryID, + endpointOverride: authData.EndpointOverride, + } + + mockCache.EXPECT().Get(key.String()).Return(testAuthData, true) + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"myimage", registryAuthData) + assert.NoError(t, err) + assert.Equal(t, username, authconfig.Username) + assert.Equal(t, password, authconfig.Password) +} + +func TestAuthorizationTokenCacheWithCredentialsHit(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + mockCache := mock_async.NewMockCache(ctrl) + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: mockCache, + } + username := "test_user" + password := "test_passwd" + + proxyEndpoint := "proxy" + testAuthData := &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + proxyEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now().Add(12 * time.Hour)), + } + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + authData.SetPullCredentials(credentials.IAMRoleCredentials{ + RoleArn: "arn:aws:iam::123456789012:role/test", + }) + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + key := cacheKey{ + roleARN: authData.GetPullCredentials().RoleArn, + region: authData.Region, + registryID: authData.RegistryID, + endpointOverride: authData.EndpointOverride, + } + + mockCache.EXPECT().Get(key.String()).Return(testAuthData, true) + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"myimage", registryAuthData) + assert.NoError(t, err) + assert.Equal(t, username, authconfig.Username) + assert.Equal(t, password, authconfig.Password) +} + +func TestAuthorizationTokenCacheHitExpired(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + ecrClient := mock_ecr.NewMockECRClient(ctrl) + mockCache := mock_async.NewMockCache(ctrl) + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: mockCache, + } + username := "test_user" + password := "test_passwd" + + proxyEndpoint := "proxy" + testAuthData := &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + proxyEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + ExpiresAt: aws.Time(time.Now()), + } + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + authData.SetPullCredentials(credentials.IAMRoleCredentials{ + RoleArn: "arn:aws:iam::123456789012:role/test", + }) + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + key := cacheKey{ + roleARN: authData.GetPullCredentials().RoleArn, + region: authData.Region, + registryID: authData.RegistryID, + endpointOverride: authData.EndpointOverride, + } + + dockerAuthData := &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + proxyEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + } + + mockCache.EXPECT().Get(key.String()).Return(testAuthData, true) + mockCache.EXPECT().Delete(key.String()) + factory.EXPECT().GetClient(authData).Return(ecrClient, nil) + ecrClient.EXPECT().GetAuthorizationToken(authData.RegistryID).Return(dockerAuthData, nil) + mockCache.EXPECT().Set(key.String(), dockerAuthData) + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"myimage", registryAuthData) + assert.NoError(t, err) + assert.Equal(t, username, authconfig.Username) + assert.Equal(t, password, authconfig.Password) +} + +func TestExtractECRTokenError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + factory := mock_ecr.NewMockECRFactory(ctrl) + ecrClient := mock_ecr.NewMockECRClient(ctrl) + mockCache := mock_async.NewMockCache(ctrl) + + provider := ecrAuthProvider{ + factory: factory, + tokenCache: mockCache, + } + username := "test_user" + password := "test_passwd" + + proxyEndpoint := "proxy" + testAuthData := &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + proxyEndpoint), + // This will makes the extract fail + AuthorizationToken: aws.String("-"), + ExpiresAt: aws.Time(time.Now().Add(1 * time.Hour)), + } + authData := &apicontainer.ECRAuthData{ + Region: "us-west-2", + RegistryID: "0123456789012", + EndpointOverride: "my.endpoint", + } + authData.SetPullCredentials(credentials.IAMRoleCredentials{ + RoleArn: "arn:aws:iam::123456789012:role/test", + }) + + registryAuthData := &apicontainer.RegistryAuthenticationData{ + ECRAuthData: authData, + } + + key := cacheKey{ + roleARN: authData.GetPullCredentials().RoleArn, + region: authData.Region, + registryID: authData.RegistryID, + endpointOverride: authData.EndpointOverride, + } + + dockerAuthData := &ecrapi.AuthorizationData{ + ProxyEndpoint: aws.String(proxyEndpointScheme + proxyEndpoint), + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))), + } + + mockCache.EXPECT().Get(key.String()).Return(testAuthData, true) + mockCache.EXPECT().Delete(key.String()) + factory.EXPECT().GetClient(authData).Return(ecrClient, nil) + ecrClient.EXPECT().GetAuthorizationToken(authData.RegistryID).Return(dockerAuthData, nil) + mockCache.EXPECT().Set(key.String(), dockerAuthData) + + authconfig, err := provider.GetAuthconfig(proxyEndpoint+"myimage", registryAuthData) + assert.NoError(t, err) + assert.Equal(t, username, authconfig.Username) + assert.Equal(t, password, authconfig.Password) +} diff --git a/agent/dockerclient/dockerauth/interface.go b/agent/dockerclient/dockerauth/interface.go new file mode 100644 index 00000000000..3d59e71b5cb --- /dev/null +++ b/agent/dockerclient/dockerauth/interface.go @@ -0,0 +1,26 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package dockerauth handles storing auth configuration information for Docker +// registries +package dockerauth + +import ( + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/docker/docker/api/types" +) + +// DockerAuthProvider is something that can give the auth information for a given docker image +type DockerAuthProvider interface { + GetAuthconfig(image string, registryAuthData *apicontainer.RegistryAuthenticationData) (types.AuthConfig, error) +} diff --git a/agent/dockerclient/logging_drivers.go b/agent/dockerclient/logging_drivers.go new file mode 100644 index 00000000000..a938055d2d5 --- /dev/null +++ b/agent/dockerclient/logging_drivers.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerclient + +type LoggingDriver string + +const ( + JSONFileDriver LoggingDriver = "json-file" + SyslogDriver LoggingDriver = "syslog" + JournaldDriver LoggingDriver = "journald" + GelfDriver LoggingDriver = "gelf" + FluentdDriver LoggingDriver = "fluentd" + AWSLogsDriver LoggingDriver = "awslogs" + SplunklogsDriver LoggingDriver = "splunk" + LogentriesDriver LoggingDriver = "logentries" + SumoLogicDriver LoggingDriver = "sumologic" + NoneDriver LoggingDriver = "none" + AWSFirelensDriver LoggingDriver = "awsfirelens" +) + +var LoggingDriverMinimumVersion = map[LoggingDriver]DockerVersion{ + JSONFileDriver: Version_1_18, + SyslogDriver: Version_1_18, + JournaldDriver: Version_1_19, + GelfDriver: Version_1_20, + FluentdDriver: Version_1_20, + AWSLogsDriver: Version_1_21, + SplunklogsDriver: Version_1_22, + LogentriesDriver: Version_1_25, + SumoLogicDriver: Version_1_29, + NoneDriver: Version_1_19, +} diff --git a/agent/dockerclient/sdkclient/generate_mocks.go b/agent/dockerclient/sdkclient/generate_mocks.go new file mode 100644 index 00000000000..af086e6669b --- /dev/null +++ b/agent/dockerclient/sdkclient/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sdkclient + +//go:generate mockgen -destination=mocks/sdkclient_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient Client diff --git a/agent/dockerclient/sdkclient/interface.go b/agent/dockerclient/sdkclient/interface.go new file mode 100644 index 00000000000..f9d48832ac4 --- /dev/null +++ b/agent/dockerclient/sdkclient/interface.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package sdkclient contains an interface for moby matching the +// subset used by the agent +package sdkclient + +import ( + "context" + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" +) + +// Client is an interface specifying the subset of +// github.com/docker/docker/client that the agent uses. +type Client interface { + ClientVersion() string + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, + networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) + ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error + ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error + ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) + ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, + options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, + error) + Ping(ctx context.Context) (types.Ping, error) + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + VolumeCreate(ctx context.Context, options volume.VolumeCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + ServerVersion(ctx context.Context) (types.Version, error) + Info(ctx context.Context) (types.Info, error) +} diff --git a/agent/dockerclient/sdkclient/mocks/sdkclient_mocks.go b/agent/dockerclient/sdkclient/mocks/sdkclient_mocks.go new file mode 100644 index 00000000000..da11a5c5a58 --- /dev/null +++ b/agent/dockerclient/sdkclient/mocks/sdkclient_mocks.go @@ -0,0 +1,442 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient (interfaces: Client) + +// Package mock_sdkclient is a generated GoMock package. +package mock_sdkclient + +import ( + context "context" + io "io" + reflect "reflect" + time "time" + + types "github.com/docker/docker/api/types" + container "github.com/docker/docker/api/types/container" + events "github.com/docker/docker/api/types/events" + filters "github.com/docker/docker/api/types/filters" + network "github.com/docker/docker/api/types/network" + volume "github.com/docker/docker/api/types/volume" + gomock "github.com/golang/mock/gomock" +) + +// MockClient is a mock of Client interface +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// ClientVersion mocks base method +func (m *MockClient) ClientVersion() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientVersion") + ret0, _ := ret[0].(string) + return ret0 +} + +// ClientVersion indicates an expected call of ClientVersion +func (mr *MockClientMockRecorder) ClientVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientVersion", reflect.TypeOf((*MockClient)(nil).ClientVersion)) +} + +// ContainerCreate mocks base method +func (m *MockClient) ContainerCreate(arg0 context.Context, arg1 *container.Config, arg2 *container.HostConfig, arg3 *network.NetworkingConfig, arg4 string) (container.ContainerCreateCreatedBody, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerCreate", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(container.ContainerCreateCreatedBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainerCreate indicates an expected call of ContainerCreate +func (mr *MockClientMockRecorder) ContainerCreate(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerCreate", reflect.TypeOf((*MockClient)(nil).ContainerCreate), arg0, arg1, arg2, arg3, arg4) +} + +// ContainerExecCreate mocks base method +func (m *MockClient) ContainerExecCreate(arg0 context.Context, arg1 string, arg2 types.ExecConfig) (types.IDResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerExecCreate", arg0, arg1, arg2) + ret0, _ := ret[0].(types.IDResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainerExecCreate indicates an expected call of ContainerExecCreate +func (mr *MockClientMockRecorder) ContainerExecCreate(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExecCreate", reflect.TypeOf((*MockClient)(nil).ContainerExecCreate), arg0, arg1, arg2) +} + +// ContainerExecInspect mocks base method +func (m *MockClient) ContainerExecInspect(arg0 context.Context, arg1 string) (types.ContainerExecInspect, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerExecInspect", arg0, arg1) + ret0, _ := ret[0].(types.ContainerExecInspect) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainerExecInspect indicates an expected call of ContainerExecInspect +func (mr *MockClientMockRecorder) ContainerExecInspect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExecInspect", reflect.TypeOf((*MockClient)(nil).ContainerExecInspect), arg0, arg1) +} + +// ContainerExecStart mocks base method +func (m *MockClient) ContainerExecStart(arg0 context.Context, arg1 string, arg2 types.ExecStartCheck) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerExecStart", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ContainerExecStart indicates an expected call of ContainerExecStart +func (mr *MockClientMockRecorder) ContainerExecStart(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExecStart", reflect.TypeOf((*MockClient)(nil).ContainerExecStart), arg0, arg1, arg2) +} + +// ContainerInspect mocks base method +func (m *MockClient) ContainerInspect(arg0 context.Context, arg1 string) (types.ContainerJSON, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerInspect", arg0, arg1) + ret0, _ := ret[0].(types.ContainerJSON) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainerInspect indicates an expected call of ContainerInspect +func (mr *MockClientMockRecorder) ContainerInspect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerInspect", reflect.TypeOf((*MockClient)(nil).ContainerInspect), arg0, arg1) +} + +// ContainerList mocks base method +func (m *MockClient) ContainerList(arg0 context.Context, arg1 types.ContainerListOptions) ([]types.Container, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerList", arg0, arg1) + ret0, _ := ret[0].([]types.Container) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainerList indicates an expected call of ContainerList +func (mr *MockClientMockRecorder) ContainerList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerList", reflect.TypeOf((*MockClient)(nil).ContainerList), arg0, arg1) +} + +// ContainerRemove mocks base method +func (m *MockClient) ContainerRemove(arg0 context.Context, arg1 string, arg2 types.ContainerRemoveOptions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerRemove", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ContainerRemove indicates an expected call of ContainerRemove +func (mr *MockClientMockRecorder) ContainerRemove(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerRemove", reflect.TypeOf((*MockClient)(nil).ContainerRemove), arg0, arg1, arg2) +} + +// ContainerStart mocks base method +func (m *MockClient) ContainerStart(arg0 context.Context, arg1 string, arg2 types.ContainerStartOptions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerStart", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ContainerStart indicates an expected call of ContainerStart +func (mr *MockClientMockRecorder) ContainerStart(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStart", reflect.TypeOf((*MockClient)(nil).ContainerStart), arg0, arg1, arg2) +} + +// ContainerStats mocks base method +func (m *MockClient) ContainerStats(arg0 context.Context, arg1 string, arg2 bool) (types.ContainerStats, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerStats", arg0, arg1, arg2) + ret0, _ := ret[0].(types.ContainerStats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainerStats indicates an expected call of ContainerStats +func (mr *MockClientMockRecorder) ContainerStats(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStats", reflect.TypeOf((*MockClient)(nil).ContainerStats), arg0, arg1, arg2) +} + +// ContainerStop mocks base method +func (m *MockClient) ContainerStop(arg0 context.Context, arg1 string, arg2 *time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerStop", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ContainerStop indicates an expected call of ContainerStop +func (mr *MockClientMockRecorder) ContainerStop(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStop", reflect.TypeOf((*MockClient)(nil).ContainerStop), arg0, arg1, arg2) +} + +// ContainerTop mocks base method +func (m *MockClient) ContainerTop(arg0 context.Context, arg1 string, arg2 []string) (container.ContainerTopOKBody, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerTop", arg0, arg1, arg2) + ret0, _ := ret[0].(container.ContainerTopOKBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ContainerTop indicates an expected call of ContainerTop +func (mr *MockClientMockRecorder) ContainerTop(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerTop", reflect.TypeOf((*MockClient)(nil).ContainerTop), arg0, arg1, arg2) +} + +// Events mocks base method +func (m *MockClient) Events(arg0 context.Context, arg1 types.EventsOptions) (<-chan events.Message, <-chan error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Events", arg0, arg1) + ret0, _ := ret[0].(<-chan events.Message) + ret1, _ := ret[1].(<-chan error) + return ret0, ret1 +} + +// Events indicates an expected call of Events +func (mr *MockClientMockRecorder) Events(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Events", reflect.TypeOf((*MockClient)(nil).Events), arg0, arg1) +} + +// ImageImport mocks base method +func (m *MockClient) ImageImport(arg0 context.Context, arg1 types.ImageImportSource, arg2 string, arg3 types.ImageImportOptions) (io.ReadCloser, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImageImport", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(io.ReadCloser) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImageImport indicates an expected call of ImageImport +func (mr *MockClientMockRecorder) ImageImport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageImport", reflect.TypeOf((*MockClient)(nil).ImageImport), arg0, arg1, arg2, arg3) +} + +// ImageInspectWithRaw mocks base method +func (m *MockClient) ImageInspectWithRaw(arg0 context.Context, arg1 string) (types.ImageInspect, []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImageInspectWithRaw", arg0, arg1) + ret0, _ := ret[0].(types.ImageInspect) + ret1, _ := ret[1].([]byte) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ImageInspectWithRaw indicates an expected call of ImageInspectWithRaw +func (mr *MockClientMockRecorder) ImageInspectWithRaw(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageInspectWithRaw", reflect.TypeOf((*MockClient)(nil).ImageInspectWithRaw), arg0, arg1) +} + +// ImageList mocks base method +func (m *MockClient) ImageList(arg0 context.Context, arg1 types.ImageListOptions) ([]types.ImageSummary, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImageList", arg0, arg1) + ret0, _ := ret[0].([]types.ImageSummary) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImageList indicates an expected call of ImageList +func (mr *MockClientMockRecorder) ImageList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageList", reflect.TypeOf((*MockClient)(nil).ImageList), arg0, arg1) +} + +// ImageLoad mocks base method +func (m *MockClient) ImageLoad(arg0 context.Context, arg1 io.Reader, arg2 bool) (types.ImageLoadResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImageLoad", arg0, arg1, arg2) + ret0, _ := ret[0].(types.ImageLoadResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImageLoad indicates an expected call of ImageLoad +func (mr *MockClientMockRecorder) ImageLoad(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageLoad", reflect.TypeOf((*MockClient)(nil).ImageLoad), arg0, arg1, arg2) +} + +// ImagePull mocks base method +func (m *MockClient) ImagePull(arg0 context.Context, arg1 string, arg2 types.ImagePullOptions) (io.ReadCloser, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImagePull", arg0, arg1, arg2) + ret0, _ := ret[0].(io.ReadCloser) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImagePull indicates an expected call of ImagePull +func (mr *MockClientMockRecorder) ImagePull(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImagePull", reflect.TypeOf((*MockClient)(nil).ImagePull), arg0, arg1, arg2) +} + +// ImageRemove mocks base method +func (m *MockClient) ImageRemove(arg0 context.Context, arg1 string, arg2 types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImageRemove", arg0, arg1, arg2) + ret0, _ := ret[0].([]types.ImageDeleteResponseItem) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ImageRemove indicates an expected call of ImageRemove +func (mr *MockClientMockRecorder) ImageRemove(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageRemove", reflect.TypeOf((*MockClient)(nil).ImageRemove), arg0, arg1, arg2) +} + +// Info mocks base method +func (m *MockClient) Info(arg0 context.Context) (types.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Info", arg0) + ret0, _ := ret[0].(types.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Info indicates an expected call of Info +func (mr *MockClientMockRecorder) Info(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockClient)(nil).Info), arg0) +} + +// Ping mocks base method +func (m *MockClient) Ping(arg0 context.Context) (types.Ping, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Ping", arg0) + ret0, _ := ret[0].(types.Ping) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Ping indicates an expected call of Ping +func (mr *MockClientMockRecorder) Ping(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockClient)(nil).Ping), arg0) +} + +// PluginList mocks base method +func (m *MockClient) PluginList(arg0 context.Context, arg1 filters.Args) (types.PluginsListResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PluginList", arg0, arg1) + ret0, _ := ret[0].(types.PluginsListResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PluginList indicates an expected call of PluginList +func (mr *MockClientMockRecorder) PluginList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginList", reflect.TypeOf((*MockClient)(nil).PluginList), arg0, arg1) +} + +// ServerVersion mocks base method +func (m *MockClient) ServerVersion(arg0 context.Context) (types.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ServerVersion", arg0) + ret0, _ := ret[0].(types.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ServerVersion indicates an expected call of ServerVersion +func (mr *MockClientMockRecorder) ServerVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServerVersion", reflect.TypeOf((*MockClient)(nil).ServerVersion), arg0) +} + +// VolumeCreate mocks base method +func (m *MockClient) VolumeCreate(arg0 context.Context, arg1 volume.VolumeCreateBody) (types.Volume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VolumeCreate", arg0, arg1) + ret0, _ := ret[0].(types.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// VolumeCreate indicates an expected call of VolumeCreate +func (mr *MockClientMockRecorder) VolumeCreate(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeCreate", reflect.TypeOf((*MockClient)(nil).VolumeCreate), arg0, arg1) +} + +// VolumeInspect mocks base method +func (m *MockClient) VolumeInspect(arg0 context.Context, arg1 string) (types.Volume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VolumeInspect", arg0, arg1) + ret0, _ := ret[0].(types.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// VolumeInspect indicates an expected call of VolumeInspect +func (mr *MockClientMockRecorder) VolumeInspect(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeInspect", reflect.TypeOf((*MockClient)(nil).VolumeInspect), arg0, arg1) +} + +// VolumeRemove mocks base method +func (m *MockClient) VolumeRemove(arg0 context.Context, arg1 string, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VolumeRemove", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// VolumeRemove indicates an expected call of VolumeRemove +func (mr *MockClientMockRecorder) VolumeRemove(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VolumeRemove", reflect.TypeOf((*MockClient)(nil).VolumeRemove), arg0, arg1, arg2) +} diff --git a/agent/dockerclient/sdkclientfactory/generate_mocks.go b/agent/dockerclient/sdkclientfactory/generate_mocks.go new file mode 100644 index 00000000000..34d2a9f8491 --- /dev/null +++ b/agent/dockerclient/sdkclientfactory/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sdkclientfactory + +//go:generate mockgen -destination=mocks/sdkclientfactory_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory Factory diff --git a/agent/dockerclient/sdkclientfactory/mocks/sdkclientfactory_mocks.go b/agent/dockerclient/sdkclientfactory/mocks/sdkclientfactory_mocks.go new file mode 100644 index 00000000000..3c85da6acb1 --- /dev/null +++ b/agent/dockerclient/sdkclientfactory/mocks/sdkclientfactory_mocks.go @@ -0,0 +1,122 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory (interfaces: Factory) + +// Package mock_sdkclientfactory is a generated GoMock package. +package mock_sdkclientfactory + +import ( + reflect "reflect" + + dockerclient "github.com/aws/amazon-ecs-agent/agent/dockerclient" + sdkclient "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient" + gomock "github.com/golang/mock/gomock" +) + +// MockFactory is a mock of Factory interface +type MockFactory struct { + ctrl *gomock.Controller + recorder *MockFactoryMockRecorder +} + +// MockFactoryMockRecorder is the mock recorder for MockFactory +type MockFactoryMockRecorder struct { + mock *MockFactory +} + +// NewMockFactory creates a new mock instance +func NewMockFactory(ctrl *gomock.Controller) *MockFactory { + mock := &MockFactory{ctrl: ctrl} + mock.recorder = &MockFactoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { + return m.recorder +} + +// FindClientAPIVersion mocks base method +func (m *MockFactory) FindClientAPIVersion(arg0 sdkclient.Client) dockerclient.DockerVersion { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindClientAPIVersion", arg0) + ret0, _ := ret[0].(dockerclient.DockerVersion) + return ret0 +} + +// FindClientAPIVersion indicates an expected call of FindClientAPIVersion +func (mr *MockFactoryMockRecorder) FindClientAPIVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindClientAPIVersion", reflect.TypeOf((*MockFactory)(nil).FindClientAPIVersion), arg0) +} + +// FindKnownAPIVersions mocks base method +func (m *MockFactory) FindKnownAPIVersions() []dockerclient.DockerVersion { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindKnownAPIVersions") + ret0, _ := ret[0].([]dockerclient.DockerVersion) + return ret0 +} + +// FindKnownAPIVersions indicates an expected call of FindKnownAPIVersions +func (mr *MockFactoryMockRecorder) FindKnownAPIVersions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindKnownAPIVersions", reflect.TypeOf((*MockFactory)(nil).FindKnownAPIVersions)) +} + +// FindSupportedAPIVersions mocks base method +func (m *MockFactory) FindSupportedAPIVersions() []dockerclient.DockerVersion { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindSupportedAPIVersions") + ret0, _ := ret[0].([]dockerclient.DockerVersion) + return ret0 +} + +// FindSupportedAPIVersions indicates an expected call of FindSupportedAPIVersions +func (mr *MockFactoryMockRecorder) FindSupportedAPIVersions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindSupportedAPIVersions", reflect.TypeOf((*MockFactory)(nil).FindSupportedAPIVersions)) +} + +// GetClient mocks base method +func (m *MockFactory) GetClient(arg0 dockerclient.DockerVersion) (sdkclient.Client, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClient", arg0) + ret0, _ := ret[0].(sdkclient.Client) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClient indicates an expected call of GetClient +func (mr *MockFactoryMockRecorder) GetClient(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockFactory)(nil).GetClient), arg0) +} + +// GetDefaultClient mocks base method +func (m *MockFactory) GetDefaultClient() (sdkclient.Client, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDefaultClient") + ret0, _ := ret[0].(sdkclient.Client) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDefaultClient indicates an expected call of GetDefaultClient +func (mr *MockFactoryMockRecorder) GetDefaultClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDefaultClient", reflect.TypeOf((*MockFactory)(nil).GetDefaultClient)) +} diff --git a/agent/dockerclient/sdkclientfactory/sdkclientfactory.go b/agent/dockerclient/sdkclientfactory/sdkclientfactory.go new file mode 100644 index 00000000000..dc9f099404a --- /dev/null +++ b/agent/dockerclient/sdkclientfactory/sdkclientfactory.go @@ -0,0 +1,183 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sdkclientfactory + +import ( + "context" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient" + log "github.com/cihub/seelog" + docker "github.com/docker/docker/client" + "github.com/pkg/errors" +) + +// Factory provides a collection of docker remote clients that include a +// recommended client version as well as a set of alternative supported +// docker clients. +type Factory interface { + // GetDefaultClient returns a versioned client for the default version + GetDefaultClient() (sdkclient.Client, error) + + // GetClient returns a client with the specified version or an error + // if the client doesn't exist. + GetClient(version dockerclient.DockerVersion) (sdkclient.Client, error) + + // FindSupportedAPIVersions returns a slice of agent-supported Docker API + // versions. Versions are tested by making calls against the Docker daemon + // and may occur either at Factory creation time or lazily upon invocation + // of this function. The slice represents the intersection of + // agent-supported versions and daemon-supported versions. + FindSupportedAPIVersions() []dockerclient.DockerVersion + + // FindKnownAPIVersions returns a slice of Docker API versions that are + // known to the Docker daemon. Versions are tested by making calls against + // the Docker daemon and may occur either at Factory creation time or + // lazily upon invocation of this function. The slice represents the + // intersection of the API versions that the agent knows exist (but does + // not necessarily fully support) and the versions that result in + // successful responses by the Docker daemon. + FindKnownAPIVersions() []dockerclient.DockerVersion + + // FindClientAPIVersion returns the client api version + FindClientAPIVersion(sdkclient.Client) dockerclient.DockerVersion +} + +type factory struct { + endpoint string + clients map[dockerclient.DockerVersion]sdkclient.Client +} + +// newVersionedClient is a variable such that the implementation can be +// swapped out for unit tests +var newVersionedClient = func(endpoint, version string) (sdkclient.Client, error) { + return docker.NewClientWithOpts(docker.WithVersion(version), docker.WithHost(endpoint)) +} + +// NewFactory initializes a client factory using a specified endpoint. +func NewFactory(ctx context.Context, endpoint string) Factory { + return &factory{ + endpoint: endpoint, + clients: findDockerVersions(ctx, endpoint), + } +} + +func (f *factory) GetDefaultClient() (sdkclient.Client, error) { + return f.GetClient(GetDefaultVersion()) +} + +func (f *factory) FindSupportedAPIVersions() []dockerclient.DockerVersion { + var supportedVersions []dockerclient.DockerVersion + for _, testVersion := range getAgentSupportedDockerVersions() { + _, err := f.GetClient(testVersion) + if err != nil { + continue + } + supportedVersions = append(supportedVersions, testVersion) + } + return supportedVersions +} + +func (f *factory) FindKnownAPIVersions() []dockerclient.DockerVersion { + var knownVersions []dockerclient.DockerVersion + for _, testVersion := range dockerclient.GetKnownAPIVersions() { + _, err := f.GetClient(testVersion) + if err != nil { + continue + } + knownVersions = append(knownVersions, testVersion) + } + return knownVersions +} + +// FindClientAPIVersion returns the version of the client from the map +func (f *factory) FindClientAPIVersion(client sdkclient.Client) dockerclient.DockerVersion { + return dockerclient.DockerVersion(client.ClientVersion()) +} + +// getClient returns a client specified by the docker version. It's wrapped +// by GetClient so that it can do platform-specific magic. +func (f *factory) getClient(version dockerclient.DockerVersion) (sdkclient.Client, error) { + client, ok := f.clients[version] + if !ok { + return nil, errors.New("docker client factory: client not found for docker version: " + string(version)) + } + return client, nil +} + +// findDockerVersions loops over all known API versions and finds which ones +// are supported by the docker daemon on the host +func findDockerVersions(ctx context.Context, endpoint string) map[dockerclient.DockerVersion]sdkclient.Client { + // if the client version returns a MinAPIVersion, use it to return all the + // Docker clients between MinAPIVersion and APIVersion, else try pinging + // the clients in getKnownAPIVersions + var minAPIVersion, apiVersion string + // get a Docker client with the default supported version + client, err := newVersionedClient(endpoint, string(minDockerAPIVersion)) + if err == nil { + derivedCtx, cancel := context.WithTimeout(ctx, dockerclient.VersionTimeout) + defer cancel() + serverVersion, err := client.ServerVersion(derivedCtx) + if err == nil { + apiVersion = serverVersion.APIVersion + // check if the docker.Version has MinAPIVersion value + if serverVersion.MinAPIVersion != "" { + minAPIVersion = serverVersion.MinAPIVersion + } + } + } + clients := make(map[dockerclient.DockerVersion]sdkclient.Client) + for _, version := range dockerclient.GetKnownAPIVersions() { + dockerClient, err := getDockerClientForVersion(endpoint, string(version), minAPIVersion, apiVersion, ctx) + if err != nil { + log.Infof("Unable to get Docker client for version %s: %v", version, err) + continue + } + clients[version] = dockerClient + } + return clients +} + +func getDockerClientForVersion( + endpoint string, + version string, + minAPIVersion string, + apiVersion string, + derivedCtx context.Context) (sdkclient.Client, error) { + if minAPIVersion != "" && apiVersion != "" { + lessThanMinCheck := "<" + minAPIVersion + moreThanMaxCheck := ">" + apiVersion + minVersionCheck, err := dockerclient.DockerAPIVersion(version).Matches(lessThanMinCheck) + if err != nil { + return nil, errors.Wrapf(err, "version detection using MinAPIVersion: unable to get min version: %s", minAPIVersion) + } + maxVersionCheck, err := dockerclient.DockerAPIVersion(version).Matches(moreThanMaxCheck) + if err != nil { + return nil, errors.Wrapf(err, "version detection using MinAPIVersion: unable to get max version: %s", apiVersion) + } + // Do not add the version when it is outside minVersion to maxVersion + if minVersionCheck || maxVersionCheck { + return nil, errors.Errorf("version detection using MinAPIVersion: unsupported version: %s", version) + } + } + client, err := newVersionedClient(endpoint, string(version)) + if err != nil { + return nil, errors.Wrapf(err, "version detection check: unable to create Docker client for version: %s", version) + } + _, err = client.Ping(derivedCtx) + if err != nil { + return nil, errors.Wrapf(err, "version detection check: failed to ping with Docker version: %s", string(version)) + } + return client, nil +} diff --git a/agent/dockerclient/sdkclientfactory/sdkclientfactory_test.go b/agent/dockerclient/sdkclientfactory/sdkclientfactory_test.go new file mode 100644 index 00000000000..39480f44955 --- /dev/null +++ b/agent/dockerclient/sdkclientfactory/sdkclientfactory_test.go @@ -0,0 +1,186 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sdkclientfactory + +import ( + "context" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient" + mock_sdkclient "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient/mocks" + docker "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const expectedEndpoint = "expectedEndpoint" + +func TestGetDefaultClientSuccess(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + expectedClient := mock_sdkclient.NewMockClient(ctrl) + newVersionedClient = func(endpoint, version string) (sdkclient.Client, error) { + mockClient := mock_sdkclient.NewMockClient(ctrl) + if version == string(GetDefaultVersion()) { + mockClient = expectedClient + } + mockClient.EXPECT().ServerVersion(gomock.Any()).Return(docker.Version{}, nil).AnyTimes() + mockClient.EXPECT().Ping(gomock.Any()).AnyTimes() + + return mockClient, nil + } + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + factory := NewFactory(ctx, expectedEndpoint) + actualClient, err := factory.GetDefaultClient() + assert.Nil(t, err) + assert.Equal(t, expectedClient, actualClient) +} + +func TestFindSupportedAPIVersions(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + dockerVersions := getAgentSupportedDockerVersions() + allVersions := dockerclient.GetKnownAPIVersions() + + // Set up the mocks and expectations + mockClients := make(map[string]*mock_sdkclient.MockClient) + + // Ensure that agent pings all known versions of Docker API + for i := 0; i < len(allVersions); i++ { + mockClients[string(allVersions[i])] = mock_sdkclient.NewMockClient(ctrl) + mockClients[string(allVersions[i])].EXPECT().ServerVersion(gomock.Any()).Return(docker.Version{}, nil).AnyTimes() + mockClients[string(allVersions[i])].EXPECT().Ping(gomock.Any()).AnyTimes() + } + + // Define the function for the mock client + // For simplicity, we will pretend all versions of docker are available + newVersionedClient = func(endpoint, version string) (sdkclient.Client, error) { + return mockClients[version], nil + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + factory := NewFactory(ctx, expectedEndpoint) + actualVersions := factory.FindSupportedAPIVersions() + + assert.Equal(t, len(dockerVersions), len(actualVersions)) + for i := 0; i < len(actualVersions); i++ { + assert.Equal(t, dockerVersions[i], actualVersions[i]) + } +} + +func TestVerifyAgentVersions(t *testing.T) { + var isKnown = func(v1 dockerclient.DockerVersion) bool { + for _, v2 := range getAgentSupportedDockerVersions() { + if v1 == v2 { + return true + } + } + return false + } + + // Validate that agentVersions is a subset of allVersions + for _, agentVersion := range getAgentSupportedDockerVersions() { + assert.True(t, isKnown(agentVersion)) + } +} + +func TestFindSupportedAPIVersionsFromMinAPIVersions(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + dockerVersions := getAgentSupportedDockerVersions() + allVersions := dockerclient.GetKnownAPIVersions() + + // Set up the mocks and expectations + mockClients := make(map[string]*mock_sdkclient.MockClient) + + // Ensure that agent pings all known versions of Docker API + for i := 0; i < len(allVersions); i++ { + mockClients[string(allVersions[i])] = mock_sdkclient.NewMockClient(ctrl) + mockClients[string(allVersions[i])].EXPECT().ServerVersion(gomock.Any()).Return(docker.Version{}, nil).AnyTimes() + mockClients[string(allVersions[i])].EXPECT().Ping(gomock.Any()).AnyTimes() + } + + // Define the function for the mock client + // For simplicity, we will pretend all versions of docker are available + newVersionedClient = func(endpoint, version string) (sdkclient.Client, error) { + return mockClients[version], nil + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + factory := NewFactory(ctx, expectedEndpoint) + actualVersions := factory.FindSupportedAPIVersions() + + assert.Equal(t, len(dockerVersions), len(actualVersions)) + for i := 0; i < len(actualVersions); i++ { + assert.Equal(t, dockerVersions[i], actualVersions[i]) + } +} + +func TestCompareDockerVersionsWithMinAPIVersion(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + minAPIVersion := "1.12" + apiVersion := "1.32" + versions := []string{"1.11", "1.33"} + rightVersion := "1.25" + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + for _, version := range versions { + _, err := getDockerClientForVersion("endpoint", version, minAPIVersion, apiVersion, ctx) + assert.EqualError(t, err, "version detection using MinAPIVersion: unsupported version: "+version) + } + + mockClients := make(map[string]*mock_sdkclient.MockClient) + newVersionedClient = func(endpoint, version string) (sdkclient.Client, error) { + mockClients[version] = mock_sdkclient.NewMockClient(ctrl) + mockClients[version].EXPECT().Ping(gomock.Any()) + return mockClients[version], nil + } + client, _ := getDockerClientForVersion("endpoint", rightVersion, minAPIVersion, apiVersion, ctx) + assert.Equal(t, mockClients[rightVersion], client) +} + +func TestGetClientCached(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + newVersionedClient = func(endpoint, version string) (sdkclient.Client, error) { + mockClient := mock_sdkclient.NewMockClient(ctrl) + mockClient.EXPECT().ServerVersion(gomock.Any()).Return(docker.Version{}, nil).AnyTimes() + mockClient.EXPECT().Ping(gomock.Any()).AnyTimes() + return mockClient, nil + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + factory := NewFactory(ctx, expectedEndpoint) + client, err := factory.GetClient(dockerclient.Version_1_17) + assert.Nil(t, err) + + clientAgain, errAgain := factory.GetClient(dockerclient.Version_1_17) + assert.Nil(t, errAgain) + + assert.Equal(t, client, clientAgain) +} diff --git a/agent/dockerclient/sdkclientfactory/sdkclientfactory_unix_test.go b/agent/dockerclient/sdkclientfactory/sdkclientfactory_unix_test.go new file mode 100644 index 00000000000..ffeadf29ba3 --- /dev/null +++ b/agent/dockerclient/sdkclientfactory/sdkclientfactory_unix_test.go @@ -0,0 +1,38 @@ +// +build unit,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sdkclientfactory + +import ( + "context" + "testing" + + mock_sdkclient "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestFindClientAPIVersion(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl := gomock.NewController(t) + mockClient := mock_sdkclient.NewMockClient(ctrl) + factory := NewFactory(ctx, expectedEndpoint) + + for _, version := range getAgentSupportedDockerVersions() { + mockClient.EXPECT().ClientVersion().Return(string(version)) + assert.Equal(t, version, factory.FindClientAPIVersion(mockClient)) + } +} diff --git a/agent/dockerclient/sdkclientfactory/sdkclientfactory_windows_test.go b/agent/dockerclient/sdkclientfactory/sdkclientfactory_windows_test.go new file mode 100644 index 00000000000..d88efd86edb --- /dev/null +++ b/agent/dockerclient/sdkclientfactory/sdkclientfactory_windows_test.go @@ -0,0 +1,80 @@ +// +build unit,windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sdkclientfactory + +import ( + "context" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient" + mock_sdkclient "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient/mocks" + docker "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestGetClientMinimumVersion(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + expectedClient := mock_sdkclient.NewMockClient(ctrl) + + newVersionedClient = func(endpoint, version string) (sdkclient.Client, error) { + mockClient := mock_sdkclient.NewMockClient(ctrl) + if version == string(minDockerAPIVersion) { + mockClient = expectedClient + } + mockClient.EXPECT().ServerVersion(gomock.Any()).Return(docker.Version{}, nil).AnyTimes() + mockClient.EXPECT().Ping(gomock.Any()).AnyTimes() + return mockClient, nil + } + + // Ensure a call to GetClient with a version below the minDockerAPIVersion + // is replaced by the minDockerAPIVersion + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + factory := NewFactory(ctx, expectedEndpoint) + actualClient, err := factory.GetClient(dockerclient.Version_1_19) + + assert.NoError(t, err) + assert.Equal(t, expectedClient, actualClient) +} + +func TestFindClientAPIVersion(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl := gomock.NewController(t) + mockClient := mock_sdkclient.NewMockClient(ctrl) + factory := NewFactory(ctx, expectedEndpoint) + + for _, version := range getAgentSupportedDockerVersions() { + if isWindowsReplaceableVersion(version) { + version = minDockerAPIVersion + } + mockClient.EXPECT().ClientVersion().Return(string(version)) + assert.Equal(t, version, factory.FindClientAPIVersion(mockClient)) + } +} + +func isWindowsReplaceableVersion(version dockerclient.DockerVersion) bool { + for _, v := range getWindowsReplaceableVersions() { + if v == version { + return true + } + } + return false +} diff --git a/agent/dockerclient/sdkclientfactory/versionsupport_unix.go b/agent/dockerclient/sdkclientfactory/versionsupport_unix.go new file mode 100644 index 00000000000..8a9a8e1261e --- /dev/null +++ b/agent/dockerclient/sdkclientfactory/versionsupport_unix.go @@ -0,0 +1,57 @@ +// +build !windows +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sdkclientfactory + +import ( + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient" +) + +const ( + // minDockerAPIVersion is the min Docker API version supported by agent + minDockerAPIVersion = dockerclient.Version_1_17 +) + +// GetClient on linux will simply return the cached client from the map +func (f *factory) GetClient(version dockerclient.DockerVersion) (sdkclient.Client, error) { + return f.getClient(version) +} + +// getAgentSupportedDockerVersions returns a list of agent-supported Docker versions for linux +func getAgentSupportedDockerVersions() []dockerclient.DockerVersion { + return []dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + dockerclient.Version_1_19, + dockerclient.Version_1_20, + dockerclient.Version_1_21, + dockerclient.Version_1_22, + dockerclient.Version_1_23, + dockerclient.Version_1_24, + dockerclient.Version_1_25, + dockerclient.Version_1_26, + dockerclient.Version_1_27, + dockerclient.Version_1_28, + dockerclient.Version_1_29, + dockerclient.Version_1_30, + dockerclient.Version_1_31, + dockerclient.Version_1_32, + } +} + +// getDefaultVersion will return the default Docker API version for linux +func GetDefaultVersion() dockerclient.DockerVersion { + return dockerclient.Version_1_21 +} diff --git a/agent/dockerclient/sdkclientfactory/versionsupport_windows.go b/agent/dockerclient/sdkclientfactory/versionsupport_windows.go new file mode 100644 index 00000000000..bec18833a71 --- /dev/null +++ b/agent/dockerclient/sdkclientfactory/versionsupport_windows.go @@ -0,0 +1,73 @@ +// +build windows +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sdkclientfactory + +import ( + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient" +) + +// minDockerAPIVersion is the min Docker API version supported by agent +const minDockerAPIVersion = dockerclient.Version_1_24 + +// GetClient will replace some versions of Docker on Windows. We need this because +// agent assumes that it can always call older versions of the docker API. +func (f *factory) GetClient(version dockerclient.DockerVersion) (sdkclient.Client, error) { + for _, v := range getWindowsReplaceableVersions() { + if v == version { + version = minDockerAPIVersion + break + } + } + return f.getClient(version) +} + +// getWindowsReplaceableVersions returns the set of versions that agent will report +// as Docker 1.24 +func getWindowsReplaceableVersions() []dockerclient.DockerVersion { + return []dockerclient.DockerVersion{ + dockerclient.Version_1_17, + dockerclient.Version_1_18, + dockerclient.Version_1_19, + dockerclient.Version_1_20, + dockerclient.Version_1_21, + dockerclient.Version_1_22, + dockerclient.Version_1_23, + } +} + +// getWindowsSupportedVersions returns the set of remote api versions that are +// supported by agent in windows +func getWindowsSupportedVersions() []dockerclient.DockerVersion { + return []dockerclient.DockerVersion{ + dockerclient.Version_1_24, + dockerclient.Version_1_25, + dockerclient.Version_1_26, + dockerclient.Version_1_27, + dockerclient.Version_1_28, + dockerclient.Version_1_29, + dockerclient.Version_1_30, + } +} + +// getAgentSupportedDockerVersions for Windows should return all of the replaceable versions plus supported versions +func getAgentSupportedDockerVersions() []dockerclient.DockerVersion { + return append(getWindowsReplaceableVersions(), getWindowsSupportedVersions()...) +} + +// getDefaultVersion returns agent's default version of the Docker API +func GetDefaultVersion() dockerclient.DockerVersion { + return minDockerAPIVersion +} diff --git a/agent/dockerclient/timeout.go b/agent/dockerclient/timeout.go new file mode 100644 index 00000000000..de39d863ca0 --- /dev/null +++ b/agent/dockerclient/timeout.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerclient + +import "time" + +// Timelimits for docker operations enforced above docker +const ( + // ListImagesTimeout is the timeout for the ListImages API + ListImagesTimeout = 10 * time.Minute + // LoadImageTimeout is the timeout for the LoadImage API. It's set + // to much lower value than pullImageTimeout as it involves loading + // image from either a file or STDIN calls involved. + // This value is set based on benchmarking the time of loading the pause container image, + // since it's currently only used to load that image. If this is to be used to load any + // other image in the future, additional benchmarking will be required. + // Per benchmark, 2 min is roughly 4x of the worst case (29s) across 400 image loads on + // al1/al2/al2gpu/al2arm with smallest instance type available (t2.nano/a1.medium) and + // burst balance = 0. + LoadImageTimeout = 2 * time.Minute + // RemoveImageTimeout is the timeout for the RemoveImage API. + RemoveImageTimeout = 3 * time.Minute + // ListContainersTimeout is the timeout for the ListContainers API. + ListContainersTimeout = 10 * time.Minute + // InspectContainerTimeout is the timeout for the InspectContainer API. + InspectContainerTimeout = 30 * time.Second + // TopContainerTimeout is the timeout for the TopContainer API. + TopContainerTimeout = 30 * time.Second + // ContainerExecCreateTimeout is the timeout for the ContainerExecCreate API. + ContainerExecCreateTimeout = 1 * time.Minute + // ContainerExecStartTimeout is the timeout for the ContainerExecStart API. + ContainerExecStartTimeout = 1 * time.Minute + // ContainerExecInspectTimeout is the timeout for the ContainerExecInspect API. + ContainerExecInspectTimeout = 1 * time.Minute + // StopContainerTimeout is the timeout for the StopContainer API. + StopContainerTimeout = 30 * time.Second + // RemoveContainerTimeout is the timeout for the RemoveContainer API. + RemoveContainerTimeout = 5 * time.Minute + + // CreateVolumeTimeout is the timeout for CreateVolume API. + CreateVolumeTimeout = 5 * time.Minute + // InspectVolumeTimeout is the timeout for InspectVolume API. + InspectVolumeTimeout = 5 * time.Minute + // RemoveVolumeTimeout is the timeout for RemoveVolume API. + RemoveVolumeTimeout = 5 * time.Minute + + // ListPluginsTimeout is the timeout for ListPlugins API. + ListPluginsTimeout = 1 * time.Minute + + // StatsInactivityTimeout controls the amount of time we hold open a + // connection to the Docker daemon waiting for stats data. + // We set this a few seconds below our stats publishling interval (20s) + // so that we can disconnect and warn the user when metrics reporting + // may be impacted. This is usually caused by a over-stressed docker daemon. + StatsInactivityTimeout = 18 * time.Second + + // DockerPullBeginTimeout is the timeout from when a 'pull' is called to when + // we expect to see output on the pull progress stream. This is to work + // around a docker bug which sometimes results in pulls not progressing. + DockerPullBeginTimeout = 5 * time.Minute + + // VersionTimeout is the timeout for the Version API + VersionTimeout = 10 * time.Second + + // InfoTimeout is the timeout for the Info API + InfoTimeout = 10 * time.Second +) diff --git a/agent/dockerclient/versions.go b/agent/dockerclient/versions.go new file mode 100644 index 00000000000..c9e9913e556 --- /dev/null +++ b/agent/dockerclient/versions.go @@ -0,0 +1,62 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerclient + +type DockerVersion string + +const ( + Version_1_17 DockerVersion = "1.17" + Version_1_18 DockerVersion = "1.18" + Version_1_19 DockerVersion = "1.19" + Version_1_20 DockerVersion = "1.20" + Version_1_21 DockerVersion = "1.21" + Version_1_22 DockerVersion = "1.22" + Version_1_23 DockerVersion = "1.23" + Version_1_24 DockerVersion = "1.24" + Version_1_25 DockerVersion = "1.25" + Version_1_26 DockerVersion = "1.26" + Version_1_27 DockerVersion = "1.27" + Version_1_28 DockerVersion = "1.28" + Version_1_29 DockerVersion = "1.29" + Version_1_30 DockerVersion = "1.30" + Version_1_31 DockerVersion = "1.31" + Version_1_32 DockerVersion = "1.32" +) + +func (d DockerVersion) String() string { + return string(d) +} + +// GetKnownAPIVersions returns all of the API versions that we know about. +// It doesn't care if the version is supported by Docker or ECS agent +func GetKnownAPIVersions() []DockerVersion { + return []DockerVersion{ + Version_1_17, + Version_1_18, + Version_1_19, + Version_1_20, + Version_1_21, + Version_1_22, + Version_1_23, + Version_1_24, + Version_1_25, + Version_1_26, + Version_1_27, + Version_1_28, + Version_1_29, + Version_1_30, + Version_1_31, + Version_1_32, + } +} diff --git a/agent/ec2/blackhole_ec2_metadata_client.go b/agent/ec2/blackhole_ec2_metadata_client.go new file mode 100644 index 00000000000..eb7d33d8758 --- /dev/null +++ b/agent/ec2/blackhole_ec2_metadata_client.go @@ -0,0 +1,86 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ec2 + +import ( + "errors" + + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +type blackholeMetadataClient struct{} + +func NewBlackholeEC2MetadataClient() EC2MetadataClient { + return blackholeMetadataClient{} +} + +func (blackholeMetadataClient) DefaultCredentials() (*RoleCredentials, error) { + return nil, errors.New("blackholed") +} + +func (blackholeMetadataClient) InstanceIdentityDocument() (ec2metadata.EC2InstanceIdentityDocument, error) { + return ec2metadata.EC2InstanceIdentityDocument{}, errors.New("blackholed") +} + +func (blackholeMetadataClient) PrimaryENIMAC() (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) AllENIMacs() (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) VPCID(mac string) (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) SubnetID(mac string) (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) InstanceID() (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) GetMetadata(path string) (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) GetDynamicData(path string) (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) GetUserData() (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) Region() (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) PrivateIPv4Address() (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) PublicIPv4Address() (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) SpotInstanceAction() (string, error) { + return "", errors.New("blackholed") +} + +func (blackholeMetadataClient) OutpostARN() (string, error) { + return "", errors.New("blackholed") +} diff --git a/agent/ec2/ec2_client.go b/agent/ec2/ec2_client.go new file mode 100644 index 00000000000..44243d62b33 --- /dev/null +++ b/agent/ec2/ec2_client.go @@ -0,0 +1,107 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ec2 + +import ( + "strings" + + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + ec2sdk "github.com/aws/aws-sdk-go/service/ec2" + "github.com/cihub/seelog" +) + +const ( + clientRetriesNum = 5 + + // InstanceMaxTagsNum is the maximum number of tags that an instance can have. + InstanceMaxTagsNum = 50 + + ResourceIDFilterName = "resource-id" + ResourceTypeFilterName = "resource-type" + ResourceTypeFilterValueInstance = "instance" + awsTagPrefix = "aws:" +) + +type Client interface { + CreateTags(input *ec2sdk.CreateTagsInput) (*ec2sdk.CreateTagsOutput, error) + DescribeECSTagsForInstance(instanceID string) ([]*ecs.Tag, error) +} + +type ClientSDK interface { + CreateTags(input *ec2sdk.CreateTagsInput) (*ec2sdk.CreateTagsOutput, error) + DescribeTags(input *ec2sdk.DescribeTagsInput) (*ec2sdk.DescribeTagsOutput, error) +} + +type ClientImpl struct { + client ClientSDK +} + +func NewClientImpl(awsRegion string) Client { + var ec2Config aws.Config + ec2Config.Region = aws.String(awsRegion) + client := ec2sdk.New(session.New(&ec2Config), aws.NewConfig().WithMaxRetries(clientRetriesNum)) + return &ClientImpl{ + client: client, + } +} + +// SetSDK overrides the SDK to the given one. This is useful for injecting a +// test implementation +func (c *ClientImpl) SetClientSDK(sdk ClientSDK) { + c.client = sdk +} + +// DescribeECSTagsForInstance calls DescribeTags API to get the EC2 tags of the +// instance id, and return it back as ECS tags +func (c *ClientImpl) DescribeECSTagsForInstance(instanceID string) ([]*ecs.Tag, error) { + describeTagsInput := ec2sdk.DescribeTagsInput{ + Filters: []*ec2sdk.Filter{ + { + Name: aws.String(ResourceIDFilterName), + Values: []*string{aws.String(instanceID)}, + }, + { + Name: aws.String(ResourceTypeFilterName), + Values: []*string{aws.String(ResourceTypeFilterValueInstance)}, + }, + }, + MaxResults: aws.Int64(InstanceMaxTagsNum), + } + res, err := c.client.DescribeTags(&describeTagsInput) + + if err != nil { + seelog.Criticalf("Error calling DescribeTags API: %v", err) + return nil, err + } + + var tags []*ecs.Tag + // Convert ec2 tags to ecs tags + for _, ec2Tag := range res.Tags { + // Filter out all tags "aws:" prefix + if !strings.HasPrefix(strings.ToLower(aws.StringValue(ec2Tag.Key)), awsTagPrefix) && + !strings.HasPrefix(strings.ToLower(aws.StringValue(ec2Tag.Value)), awsTagPrefix) { + tags = append(tags, &ecs.Tag{ + Key: ec2Tag.Key, + Value: ec2Tag.Value, + }) + } + } + return tags, nil +} + +func (c *ClientImpl) CreateTags(input *ec2sdk.CreateTagsInput) (*ec2sdk.CreateTagsOutput, error) { + return c.client.CreateTags(input) +} diff --git a/agent/ec2/ec2_client_test.go b/agent/ec2/ec2_client_test.go new file mode 100644 index 00000000000..87d5bb3931b --- /dev/null +++ b/agent/ec2/ec2_client_test.go @@ -0,0 +1,87 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ec2_test + +import ( + "testing" + + "github.com/aws/amazon-ecs-agent/agent/ec2" + mock_ec2 "github.com/aws/amazon-ecs-agent/agent/ec2/mocks" + "github.com/aws/aws-sdk-go/aws" + ec2sdk "github.com/aws/aws-sdk-go/service/ec2" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestCreateTags(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClientSDK := mock_ec2.NewMockClientSDK(ctrl) + testClient := ec2.NewClientImpl("us-west-2") + testClient.(*ec2.ClientImpl).SetClientSDK(mockClientSDK) + + createTagsInput := &ec2sdk.CreateTagsInput{} + createTagsOutput := &ec2sdk.CreateTagsOutput{} + + mockClientSDK.EXPECT().CreateTags(createTagsInput).Return(createTagsOutput, nil) + + res, err := testClient.CreateTags(createTagsInput) + assert.NoError(t, err) + assert.Equal(t, createTagsOutput, res) +} + +func TestDescribeECSTagsForInstance(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + instanceID := "iid" + mockClientSDK := mock_ec2.NewMockClientSDK(ctrl) + testClient := ec2.NewClientImpl("us-west-2") + testClient.(*ec2.ClientImpl).SetClientSDK(mockClientSDK) + + describeTagsOutput := &ec2sdk.DescribeTagsOutput{ + Tags: []*ec2sdk.TagDescription{ + { + Key: aws.String("key"), + Value: aws.String("value"), + }, + { + Key: aws.String("aws:key"), + Value: aws.String("aws:value"), + }, + { + Key: aws.String("aWS:key"), + Value: aws.String("value"), + }, + { + Key: aws.String("key"), + Value: aws.String("Aws:value"), + }, + }, + } + + mockClientSDK.EXPECT().DescribeTags(gomock.Any()).Do(func(input *ec2sdk.DescribeTagsInput) { + assert.Equal(t, len(input.Filters), 2) + assert.Equal(t, aws.StringValue(input.Filters[0].Values[0]), instanceID) + }).Return(describeTagsOutput, nil) + + tags, err := testClient.DescribeECSTagsForInstance(instanceID) + assert.NoError(t, err) + assert.Equal(t, len(tags), 1) + assert.Equal(t, aws.StringValue(tags[0].Key), "key") + assert.Equal(t, aws.StringValue(tags[0].Value), "value") +} diff --git a/agent/ec2/ec2_metadata_client.go b/agent/ec2/ec2_metadata_client.go new file mode 100644 index 00000000000..e45de83af96 --- /dev/null +++ b/agent/ec2/ec2_metadata_client.go @@ -0,0 +1,202 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ec2 + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" +) + +const ( + SecurityCrednetialsResource = "iam/security-credentials/" + InstanceIdentityDocumentResource = "instance-identity/document" + InstanceIdentityDocumentSignatureResource = "instance-identity/signature" + MacResource = "mac" + AllMacResource = "network/interfaces/macs" + VPCIDResourceFormat = "network/interfaces/macs/%s/vpc-id" + SubnetIDResourceFormat = "network/interfaces/macs/%s/subnet-id" + SpotInstanceActionResource = "spot/instance-action" + InstanceIDResource = "instance-id" + PrivateIPv4Resource = "local-ipv4" + PublicIPv4Resource = "public-ipv4" + OutpostARN = "outpost-arn" +) + +const ( + metadataRetries = 5 +) + +// RoleCredentials contains the information associated with an IAM role +type RoleCredentials struct { + Code string `json:"Code"` + LastUpdated time.Time `json:"LastUpdated"` + Type string `json:"Type"` + AccessKeyId string `json:"AccessKeyId"` + SecretAccessKey string `json:"SecretAccessKey"` + Token string `json:"Token"` + Expiration time.Time `json:"Expiration"` +} + +type HttpClient interface { + GetMetadata(string) (string, error) + GetDynamicData(string) (string, error) + GetInstanceIdentityDocument() (ec2metadata.EC2InstanceIdentityDocument, error) + GetUserData() (string, error) + Region() (string, error) +} + +// EC2MetadataClient is the client used to get metadata from instance metadata service +type EC2MetadataClient interface { + DefaultCredentials() (*RoleCredentials, error) + GetMetadata(string) (string, error) + GetDynamicData(string) (string, error) + InstanceIdentityDocument() (ec2metadata.EC2InstanceIdentityDocument, error) + VPCID(mac string) (string, error) + SubnetID(mac string) (string, error) + PrimaryENIMAC() (string, error) + AllENIMacs() (string, error) + InstanceID() (string, error) + GetUserData() (string, error) + Region() (string, error) + PrivateIPv4Address() (string, error) + PublicIPv4Address() (string, error) + SpotInstanceAction() (string, error) + OutpostARN() (string, error) +} + +type ec2MetadataClientImpl struct { + client HttpClient +} + +// NewEC2MetadataClient creates an ec2metadata client to retrieve metadata +func NewEC2MetadataClient(client HttpClient) EC2MetadataClient { + if client == nil { + return &ec2MetadataClientImpl{ + client: ec2metadata.New( + session.New(), aws.NewConfig().WithMaxRetries(metadataRetries)), + } + } else { + return &ec2MetadataClientImpl{client: client} + } +} + +// DefaultCredentials returns the credentials associated with the instance iam role +func (c *ec2MetadataClientImpl) DefaultCredentials() (*RoleCredentials, error) { + securityCredential, err := c.client.GetMetadata(SecurityCrednetialsResource) + if err != nil { + return nil, err + } + + securityCredentialList := strings.Split(strings.TrimSpace(securityCredential), "\n") + if len(securityCredentialList) == 0 { + return nil, errors.New("No security credentials in response") + } + + defaultCredentialName := securityCredentialList[0] + + defaultCredentialStr, err := c.client.GetMetadata(SecurityCrednetialsResource + defaultCredentialName) + if err != nil { + return nil, err + } + var credential RoleCredentials + err = json.Unmarshal([]byte(defaultCredentialStr), &credential) + if err != nil { + return nil, err + } + return &credential, nil +} + +// GetDynamicData returns the dynamic data with provided path from instance metadata +func (c *ec2MetadataClientImpl) GetDynamicData(path string) (string, error) { + return c.client.GetDynamicData(path) +} + +// InstanceIdentityDocument returns instance identity documents +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +func (c *ec2MetadataClientImpl) InstanceIdentityDocument() (ec2metadata.EC2InstanceIdentityDocument, error) { + return c.client.GetInstanceIdentityDocument() +} + +// GetMetadata returns the metadata from instance metadata service specified by the path +func (c *ec2MetadataClientImpl) GetMetadata(path string) (string, error) { + return c.client.GetMetadata(path) +} + +// PrimaryENIMAC returns the MAC address for the primary +// network interface of the instance +func (c *ec2MetadataClientImpl) PrimaryENIMAC() (string, error) { + return c.client.GetMetadata(MacResource) +} + +// AllENIMacs returns the mac addresses for all the network interfaces attached to the instance +func (c *ec2MetadataClientImpl) AllENIMacs() (string, error) { + return c.client.GetMetadata(AllMacResource) +} + +// VPCID returns the VPC id for the network interface, given +// its mac address +func (c *ec2MetadataClientImpl) VPCID(mac string) (string, error) { + return c.client.GetMetadata(fmt.Sprintf(VPCIDResourceFormat, mac)) +} + +// SubnetID returns the subnet id for the network interface, +// given its mac address +func (c *ec2MetadataClientImpl) SubnetID(mac string) (string, error) { + return c.client.GetMetadata(fmt.Sprintf(SubnetIDResourceFormat, mac)) +} + +// InstanceID returns the id of this instance. +func (c *ec2MetadataClientImpl) InstanceID() (string, error) { + return c.client.GetMetadata(InstanceIDResource) +} + +// GetUserData returns the userdata that was configured for the +func (c *ec2MetadataClientImpl) GetUserData() (string, error) { + return c.client.GetUserData() +} + +// Region returns the region the instance is running in. +func (c *ec2MetadataClientImpl) Region() (string, error) { + return c.client.Region() +} + +// PublicIPv4Address returns the public IPv4 of this instance +// if this instance has a public address +func (c *ec2MetadataClientImpl) PublicIPv4Address() (string, error) { + return c.client.GetMetadata(PublicIPv4Resource) +} + +// PrivateIPv4Address returns the private IPv4 of this instance +func (c *ec2MetadataClientImpl) PrivateIPv4Address() (string, error) { + return c.client.GetMetadata(PrivateIPv4Resource) +} + +// SpotInstanceAction returns the spot instance-action, if it has been set. +// If the time has not been set (ie, the instance is not scheduled for interruption) +// then this function returns an error. +// see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#using-spot-instances-managing-interruptions +func (c *ec2MetadataClientImpl) SpotInstanceAction() (string, error) { + return c.client.GetMetadata(SpotInstanceActionResource) +} + +func (c *ec2MetadataClientImpl) OutpostARN() (string, error) { + return c.client.GetMetadata(OutpostARN) +} diff --git a/agent/ec2/ec2_metadata_client_test.go b/agent/ec2/ec2_metadata_client_test.go new file mode 100644 index 00000000000..0fbcbf0097f --- /dev/null +++ b/agent/ec2/ec2_metadata_client_test.go @@ -0,0 +1,254 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ec2_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/ec2" + mock_ec2 "github.com/aws/amazon-ecs-agent/agent/ec2/mocks" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + testRoleName = "test-role" + mac = "01:23:45:67:89:ab" + macs = "01:23:45:67:89:ab/\n01:23:45:67:89:ac" + vpcID = "vpc-1234" + subnetID = "subnet-1234" + iidRegion = "us-east-1" + privateIP = "127.0.0.1" + publicIP = "127.0.0.1" +) + +func makeTestRoleCredentials() ec2.RoleCredentials { + return ec2.RoleCredentials{ + Code: "Success", + LastUpdated: time.Now(), + Type: "AWS-HMAC", + AccessKeyId: "ACCESSKEY", + SecretAccessKey: "SECREKEY", + Token: "TOKEN", + Expiration: time.Now().Add(time.Duration(2 * time.Hour)), + } +} + +func ignoreError(v interface{}, _ error) interface{} { + return v +} + +var testInstanceIdentityDoc = ec2metadata.EC2InstanceIdentityDocument{ + PrivateIP: "172.1.1.1", + AvailabilityZone: "us-east-1a", + Version: "2010-08-31", + Region: "us-east-1", + AccountID: "012345678901", + InstanceID: "i-01234567", + BillingProducts: []string{"bp-01234567"}, + ImageID: "ami-12345678", + InstanceType: "t2.micro", + PendingTime: time.Now(), + Architecture: "x86_64", +} + +func testSuccessResponse(s string) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + Body: ioutil.NopCloser(bytes.NewReader([]byte(s))), + }, nil +} + +func testErrorResponse() (*http.Response, error) { + return &http.Response{ + Status: "500 Broken", + StatusCode: 500, + Proto: "HTTP/1.0", + }, nil +} + +func TestDefaultCredentials(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata(ec2.SecurityCrednetialsResource).Return(testRoleName, nil) + mockGetter.EXPECT().GetMetadata(ec2.SecurityCrednetialsResource+testRoleName).Return( + string(ignoreError(json.Marshal(makeTestRoleCredentials())).([]byte)), nil) + + credentials, err := testClient.DefaultCredentials() + if err != nil { + t.Fail() + } + testCredentials := makeTestRoleCredentials() + if credentials.AccessKeyId != testCredentials.AccessKeyId { + t.Fail() + } +} + +func TestGetInstanceIdentityDoc(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetInstanceIdentityDocument().Return(testInstanceIdentityDoc, nil) + + doc, err := testClient.InstanceIdentityDocument() + assert.NoError(t, err) + assert.Equal(t, iidRegion, doc.Region) +} + +func TestErrorPropogatesUp(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetInstanceIdentityDocument().Return( + ec2metadata.EC2InstanceIdentityDocument{}, + errors.New("Something broke")) + + _, err := testClient.InstanceIdentityDocument() + assert.Error(t, err) +} + +func TestPrimaryMAC(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata(ec2.MacResource).Return(mac, nil) + + macResponse, err := testClient.PrimaryENIMAC() + assert.NoError(t, err) + assert.Equal(t, mac, macResponse) +} + +func TestAllENIMacs(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata(ec2.AllMacResource).Return(macs, nil) + + macsResponse, err := testClient.AllENIMacs() + assert.NoError(t, err) + assert.Equal(t, macs, macsResponse) +} + +func TestVPCID(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata( + fmt.Sprintf(ec2.VPCIDResourceFormat, mac)).Return(vpcID, nil) + + vpcIDResponse, err := testClient.VPCID(mac) + assert.NoError(t, err) + assert.Equal(t, vpcID, vpcIDResponse) +} + +func TestSubnetID(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata( + fmt.Sprintf(ec2.SubnetIDResourceFormat, mac)).Return(subnetID, nil) + subnetIDResponse, err := testClient.SubnetID(mac) + assert.NoError(t, err) + assert.Equal(t, subnetID, subnetIDResponse) +} + +func TestPrivateIPv4Address(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata( + ec2.PrivateIPv4Resource).Return(privateIP, nil) + privateIPResponse, err := testClient.PrivateIPv4Address() + assert.NoError(t, err) + assert.Equal(t, privateIP, privateIPResponse) +} + +func TestPublicIPv4Address(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata( + ec2.PublicIPv4Resource).Return(publicIP, nil) + publicIPResponse, err := testClient.PublicIPv4Address() + assert.NoError(t, err) + assert.Equal(t, publicIP, publicIPResponse) +} + +func TestSpotInstanceAction(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata( + ec2.SpotInstanceActionResource).Return("{\"action\": \"terminate\", \"time\": \"2017-09-18T08:22:00Z\"}", nil) + resp, err := testClient.SpotInstanceAction() + assert.NoError(t, err) + assert.Equal(t, "{\"action\": \"terminate\", \"time\": \"2017-09-18T08:22:00Z\"}", resp) +} + +func TestSpotInstanceActionError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockGetter := mock_ec2.NewMockHttpClient(ctrl) + testClient := ec2.NewEC2MetadataClient(mockGetter) + + mockGetter.EXPECT().GetMetadata( + ec2.SpotInstanceActionResource).Return("", fmt.Errorf("ERROR")) + resp, err := testClient.SpotInstanceAction() + assert.Error(t, err) + assert.Equal(t, "", resp) +} diff --git a/agent/ec2/generate_mocks.go b/agent/ec2/generate_mocks.go new file mode 100644 index 00000000000..12f79878d7f --- /dev/null +++ b/agent/ec2/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ec2 + +//go:generate mockgen -destination=mocks/ec2_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/ec2 EC2MetadataClient,HttpClient,Client,ClientSDK diff --git a/agent/ec2/http/client.go b/agent/ec2/http/client.go new file mode 100644 index 00000000000..5bd8985a742 --- /dev/null +++ b/agent/ec2/http/client.go @@ -0,0 +1,22 @@ +package http + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +import "net/http" + +// Client wraps the HTTP Get method used by the metadata client to +// read various resource values from the instance metadata service +type Client interface { + Get(string) (*http.Response, error) +} diff --git a/agent/ec2/http/generate_mocks.go b/agent/ec2/http/generate_mocks.go new file mode 100644 index 00000000000..76e3b5ec75e --- /dev/null +++ b/agent/ec2/http/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package http + +//go:generate mockgen -destination=mocks/http_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/ec2/http Client diff --git a/agent/ec2/http/mocks/http_mocks.go b/agent/ec2/http/mocks/http_mocks.go new file mode 100644 index 00000000000..978ccf6210e --- /dev/null +++ b/agent/ec2/http/mocks/http_mocks.go @@ -0,0 +1,64 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/ec2/http (interfaces: Client) + +// Package mock_http is a generated GoMock package. +package mock_http + +import ( + http "net/http" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockClient is a mock of Client interface +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Get mocks base method +func (m *MockClient) Get(arg0 string) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockClientMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), arg0) +} diff --git a/agent/ec2/mocks/ec2_mocks.go b/agent/ec2/mocks/ec2_mocks.go new file mode 100644 index 00000000000..8257b54cabf --- /dev/null +++ b/agent/ec2/mocks/ec2_mocks.go @@ -0,0 +1,481 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/ec2 (interfaces: EC2MetadataClient,HttpClient,Client,ClientSDK) + +// Package mock_ec2 is a generated GoMock package. +package mock_ec2 + +import ( + reflect "reflect" + + ec2 "github.com/aws/amazon-ecs-agent/agent/ec2" + ecs "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + ec2metadata "github.com/aws/aws-sdk-go/aws/ec2metadata" + ec20 "github.com/aws/aws-sdk-go/service/ec2" + gomock "github.com/golang/mock/gomock" +) + +// MockEC2MetadataClient is a mock of EC2MetadataClient interface +type MockEC2MetadataClient struct { + ctrl *gomock.Controller + recorder *MockEC2MetadataClientMockRecorder +} + +// MockEC2MetadataClientMockRecorder is the mock recorder for MockEC2MetadataClient +type MockEC2MetadataClientMockRecorder struct { + mock *MockEC2MetadataClient +} + +// NewMockEC2MetadataClient creates a new mock instance +func NewMockEC2MetadataClient(ctrl *gomock.Controller) *MockEC2MetadataClient { + mock := &MockEC2MetadataClient{ctrl: ctrl} + mock.recorder = &MockEC2MetadataClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockEC2MetadataClient) EXPECT() *MockEC2MetadataClientMockRecorder { + return m.recorder +} + +// AllENIMacs mocks base method +func (m *MockEC2MetadataClient) AllENIMacs() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllENIMacs") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AllENIMacs indicates an expected call of AllENIMacs +func (mr *MockEC2MetadataClientMockRecorder) AllENIMacs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllENIMacs", reflect.TypeOf((*MockEC2MetadataClient)(nil).AllENIMacs)) +} + +// DefaultCredentials mocks base method +func (m *MockEC2MetadataClient) DefaultCredentials() (*ec2.RoleCredentials, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DefaultCredentials") + ret0, _ := ret[0].(*ec2.RoleCredentials) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DefaultCredentials indicates an expected call of DefaultCredentials +func (mr *MockEC2MetadataClientMockRecorder) DefaultCredentials() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultCredentials", reflect.TypeOf((*MockEC2MetadataClient)(nil).DefaultCredentials)) +} + +// GetDynamicData mocks base method +func (m *MockEC2MetadataClient) GetDynamicData(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDynamicData", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDynamicData indicates an expected call of GetDynamicData +func (mr *MockEC2MetadataClientMockRecorder) GetDynamicData(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDynamicData", reflect.TypeOf((*MockEC2MetadataClient)(nil).GetDynamicData), arg0) +} + +// GetMetadata mocks base method +func (m *MockEC2MetadataClient) GetMetadata(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMetadata", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMetadata indicates an expected call of GetMetadata +func (mr *MockEC2MetadataClientMockRecorder) GetMetadata(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockEC2MetadataClient)(nil).GetMetadata), arg0) +} + +// GetUserData mocks base method +func (m *MockEC2MetadataClient) GetUserData() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserData") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserData indicates an expected call of GetUserData +func (mr *MockEC2MetadataClientMockRecorder) GetUserData() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserData", reflect.TypeOf((*MockEC2MetadataClient)(nil).GetUserData)) +} + +// InstanceID mocks base method +func (m *MockEC2MetadataClient) InstanceID() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InstanceID") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InstanceID indicates an expected call of InstanceID +func (mr *MockEC2MetadataClientMockRecorder) InstanceID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstanceID", reflect.TypeOf((*MockEC2MetadataClient)(nil).InstanceID)) +} + +// InstanceIdentityDocument mocks base method +func (m *MockEC2MetadataClient) InstanceIdentityDocument() (ec2metadata.EC2InstanceIdentityDocument, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InstanceIdentityDocument") + ret0, _ := ret[0].(ec2metadata.EC2InstanceIdentityDocument) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InstanceIdentityDocument indicates an expected call of InstanceIdentityDocument +func (mr *MockEC2MetadataClientMockRecorder) InstanceIdentityDocument() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstanceIdentityDocument", reflect.TypeOf((*MockEC2MetadataClient)(nil).InstanceIdentityDocument)) +} + +// OutpostARN mocks base method +func (m *MockEC2MetadataClient) OutpostARN() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OutpostARN") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// OutpostARN indicates an expected call of OutpostARN +func (mr *MockEC2MetadataClientMockRecorder) OutpostARN() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OutpostARN", reflect.TypeOf((*MockEC2MetadataClient)(nil).OutpostARN)) +} + +// PrimaryENIMAC mocks base method +func (m *MockEC2MetadataClient) PrimaryENIMAC() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrimaryENIMAC") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PrimaryENIMAC indicates an expected call of PrimaryENIMAC +func (mr *MockEC2MetadataClientMockRecorder) PrimaryENIMAC() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryENIMAC", reflect.TypeOf((*MockEC2MetadataClient)(nil).PrimaryENIMAC)) +} + +// PrivateIPv4Address mocks base method +func (m *MockEC2MetadataClient) PrivateIPv4Address() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrivateIPv4Address") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PrivateIPv4Address indicates an expected call of PrivateIPv4Address +func (mr *MockEC2MetadataClientMockRecorder) PrivateIPv4Address() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrivateIPv4Address", reflect.TypeOf((*MockEC2MetadataClient)(nil).PrivateIPv4Address)) +} + +// PublicIPv4Address mocks base method +func (m *MockEC2MetadataClient) PublicIPv4Address() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublicIPv4Address") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PublicIPv4Address indicates an expected call of PublicIPv4Address +func (mr *MockEC2MetadataClientMockRecorder) PublicIPv4Address() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicIPv4Address", reflect.TypeOf((*MockEC2MetadataClient)(nil).PublicIPv4Address)) +} + +// Region mocks base method +func (m *MockEC2MetadataClient) Region() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Region") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Region indicates an expected call of Region +func (mr *MockEC2MetadataClientMockRecorder) Region() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Region", reflect.TypeOf((*MockEC2MetadataClient)(nil).Region)) +} + +// SpotInstanceAction mocks base method +func (m *MockEC2MetadataClient) SpotInstanceAction() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SpotInstanceAction") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SpotInstanceAction indicates an expected call of SpotInstanceAction +func (mr *MockEC2MetadataClientMockRecorder) SpotInstanceAction() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SpotInstanceAction", reflect.TypeOf((*MockEC2MetadataClient)(nil).SpotInstanceAction)) +} + +// SubnetID mocks base method +func (m *MockEC2MetadataClient) SubnetID(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubnetID", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubnetID indicates an expected call of SubnetID +func (mr *MockEC2MetadataClientMockRecorder) SubnetID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetID", reflect.TypeOf((*MockEC2MetadataClient)(nil).SubnetID), arg0) +} + +// VPCID mocks base method +func (m *MockEC2MetadataClient) VPCID(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VPCID", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// VPCID indicates an expected call of VPCID +func (mr *MockEC2MetadataClientMockRecorder) VPCID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VPCID", reflect.TypeOf((*MockEC2MetadataClient)(nil).VPCID), arg0) +} + +// MockHttpClient is a mock of HttpClient interface +type MockHttpClient struct { + ctrl *gomock.Controller + recorder *MockHttpClientMockRecorder +} + +// MockHttpClientMockRecorder is the mock recorder for MockHttpClient +type MockHttpClientMockRecorder struct { + mock *MockHttpClient +} + +// NewMockHttpClient creates a new mock instance +func NewMockHttpClient(ctrl *gomock.Controller) *MockHttpClient { + mock := &MockHttpClient{ctrl: ctrl} + mock.recorder = &MockHttpClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockHttpClient) EXPECT() *MockHttpClientMockRecorder { + return m.recorder +} + +// GetDynamicData mocks base method +func (m *MockHttpClient) GetDynamicData(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDynamicData", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDynamicData indicates an expected call of GetDynamicData +func (mr *MockHttpClientMockRecorder) GetDynamicData(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDynamicData", reflect.TypeOf((*MockHttpClient)(nil).GetDynamicData), arg0) +} + +// GetInstanceIdentityDocument mocks base method +func (m *MockHttpClient) GetInstanceIdentityDocument() (ec2metadata.EC2InstanceIdentityDocument, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInstanceIdentityDocument") + ret0, _ := ret[0].(ec2metadata.EC2InstanceIdentityDocument) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetInstanceIdentityDocument indicates an expected call of GetInstanceIdentityDocument +func (mr *MockHttpClientMockRecorder) GetInstanceIdentityDocument() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceIdentityDocument", reflect.TypeOf((*MockHttpClient)(nil).GetInstanceIdentityDocument)) +} + +// GetMetadata mocks base method +func (m *MockHttpClient) GetMetadata(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMetadata", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMetadata indicates an expected call of GetMetadata +func (mr *MockHttpClientMockRecorder) GetMetadata(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockHttpClient)(nil).GetMetadata), arg0) +} + +// GetUserData mocks base method +func (m *MockHttpClient) GetUserData() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUserData") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUserData indicates an expected call of GetUserData +func (mr *MockHttpClientMockRecorder) GetUserData() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserData", reflect.TypeOf((*MockHttpClient)(nil).GetUserData)) +} + +// Region mocks base method +func (m *MockHttpClient) Region() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Region") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Region indicates an expected call of Region +func (mr *MockHttpClientMockRecorder) Region() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Region", reflect.TypeOf((*MockHttpClient)(nil).Region)) +} + +// MockClient is a mock of Client interface +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// CreateTags mocks base method +func (m *MockClient) CreateTags(arg0 *ec20.CreateTagsInput) (*ec20.CreateTagsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTags", arg0) + ret0, _ := ret[0].(*ec20.CreateTagsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTags indicates an expected call of CreateTags +func (mr *MockClientMockRecorder) CreateTags(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTags", reflect.TypeOf((*MockClient)(nil).CreateTags), arg0) +} + +// DescribeECSTagsForInstance mocks base method +func (m *MockClient) DescribeECSTagsForInstance(arg0 string) ([]*ecs.Tag, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeECSTagsForInstance", arg0) + ret0, _ := ret[0].([]*ecs.Tag) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeECSTagsForInstance indicates an expected call of DescribeECSTagsForInstance +func (mr *MockClientMockRecorder) DescribeECSTagsForInstance(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeECSTagsForInstance", reflect.TypeOf((*MockClient)(nil).DescribeECSTagsForInstance), arg0) +} + +// MockClientSDK is a mock of ClientSDK interface +type MockClientSDK struct { + ctrl *gomock.Controller + recorder *MockClientSDKMockRecorder +} + +// MockClientSDKMockRecorder is the mock recorder for MockClientSDK +type MockClientSDKMockRecorder struct { + mock *MockClientSDK +} + +// NewMockClientSDK creates a new mock instance +func NewMockClientSDK(ctrl *gomock.Controller) *MockClientSDK { + mock := &MockClientSDK{ctrl: ctrl} + mock.recorder = &MockClientSDKMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockClientSDK) EXPECT() *MockClientSDKMockRecorder { + return m.recorder +} + +// CreateTags mocks base method +func (m *MockClientSDK) CreateTags(arg0 *ec20.CreateTagsInput) (*ec20.CreateTagsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTags", arg0) + ret0, _ := ret[0].(*ec20.CreateTagsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTags indicates an expected call of CreateTags +func (mr *MockClientSDKMockRecorder) CreateTags(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTags", reflect.TypeOf((*MockClientSDK)(nil).CreateTags), arg0) +} + +// DescribeTags mocks base method +func (m *MockClientSDK) DescribeTags(arg0 *ec20.DescribeTagsInput) (*ec20.DescribeTagsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTags", arg0) + ret0, _ := ret[0].(*ec20.DescribeTagsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTags indicates an expected call of DescribeTags +func (mr *MockClientSDKMockRecorder) DescribeTags(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTags", reflect.TypeOf((*MockClientSDK)(nil).DescribeTags), arg0) +} diff --git a/agent/ecr/client.go b/agent/ecr/client.go new file mode 100644 index 00000000000..897c0570560 --- /dev/null +++ b/agent/ecr/client.go @@ -0,0 +1,69 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecr + +import ( + "fmt" + "time" + + ecrapi "github.com/aws/amazon-ecs-agent/agent/ecr/model/ecr" + "github.com/aws/aws-sdk-go/aws" + log "github.com/cihub/seelog" +) + +const ( + MinimumJitterDuration = 30 * time.Minute + MaximumJitterDuration = 1 * time.Hour +) + +// ECRClient wrapper interface for mocking +type ECRClient interface { + GetAuthorizationToken(registryId string) (*ecrapi.AuthorizationData, error) +} + +// ECRSDK is an interface that specifies the subset of the AWS Go SDK's ECR +// client that the Agent uses. This interface is meant to allow injecting a +// mock for testing. +type ECRSDK interface { + GetAuthorizationToken(*ecrapi.GetAuthorizationTokenInput) (*ecrapi.GetAuthorizationTokenOutput, error) +} + +type ecrClient struct { + sdkClient ECRSDK +} + +// NewECRClient creates an ECR client used to get docker auth from ECR +func NewECRClient(sdkClient ECRSDK) ECRClient { + return &ecrClient{ + sdkClient: sdkClient, + } +} + +// GetAuthorizationToken calls the ecr api to get the docker auth for the specified registry +func (client *ecrClient) GetAuthorizationToken(registryId string) (*ecrapi.AuthorizationData, error) { + log.Debugf("Calling GetAuthorizationToken for %q", registryId) + + output, err := client.sdkClient.GetAuthorizationToken(&ecrapi.GetAuthorizationTokenInput{ + RegistryIds: []*string{aws.String(registryId)}, + }) + + if err != nil { + return nil, err + } + + if len(output.AuthorizationData) != 1 { + return nil, fmt.Errorf("unexpected number of results in AuthorizationData (%d)", len(output.AuthorizationData)) + } + return output.AuthorizationData[0], nil +} diff --git a/agent/ecr/client_test.go b/agent/ecr/client_test.go new file mode 100644 index 00000000000..cc1a5eff436 --- /dev/null +++ b/agent/ecr/client_test.go @@ -0,0 +1,79 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// ecr_test package to avoid test dependency cycle on ecr/mocks +package ecr_test + +import ( + "errors" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/ecr" + mock_ecr "github.com/aws/amazon-ecs-agent/agent/ecr/mocks" + ecrapi "github.com/aws/amazon-ecs-agent/agent/ecr/model/ecr" + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +const testRegistryId = "testRegistryId" + +// test suite struct for handling mocks and test client +type GetAuthorizationTokenTestSuite struct { + suite.Suite + ctrl *gomock.Controller + mockClient *mock_ecr.MockECRSDK + ecrClient ecr.ECRClient +} + +// test suite setup & teardown +func TestGetAuthorizationTokenSuite(t *testing.T) { + suite.Run(t, new(GetAuthorizationTokenTestSuite)) +} + +func (suite *GetAuthorizationTokenTestSuite) SetupTest() { + suite.ctrl = gomock.NewController(suite.T()) + suite.mockClient = mock_ecr.NewMockECRSDK(suite.ctrl) + suite.ecrClient = ecr.NewECRClient(suite.mockClient) +} + +func (suite *GetAuthorizationTokenTestSuite) TeardownTest() { + suite.ctrl.Finish() +} + +func (suite *GetAuthorizationTokenTestSuite) TestGetAuthorizationTokenMissingAuthData() { + suite.mockClient.EXPECT().GetAuthorizationToken( + &ecrapi.GetAuthorizationTokenInput{ + RegistryIds: []*string{aws.String(testRegistryId)}, + }).Return(&ecrapi.GetAuthorizationTokenOutput{ + AuthorizationData: []*ecrapi.AuthorizationData{}, + }, nil) + + authorizationData, err := suite.ecrClient.GetAuthorizationToken(testRegistryId) + assert.Error(suite.T(), err) + assert.Nil(suite.T(), authorizationData) +} + +func (suite *GetAuthorizationTokenTestSuite) TestGetAuthorizationTokenError() { + suite.mockClient.EXPECT().GetAuthorizationToken( + &ecrapi.GetAuthorizationTokenInput{ + RegistryIds: []*string{aws.String(testRegistryId)}, + }).Return(nil, errors.New("Nope Nope Nope")) + + authorizationData, err := suite.ecrClient.GetAuthorizationToken(testRegistryId) + assert.Error(suite.T(), err) + assert.Nil(suite.T(), authorizationData) +} diff --git a/agent/ecr/factory.go b/agent/ecr/factory.go new file mode 100644 index 00000000000..6fc7cd3401c --- /dev/null +++ b/agent/ecr/factory.go @@ -0,0 +1,84 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package ecr helps generate clients to talk to the ECR API +package ecr + +import ( + "fmt" + "net/http" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/credentials" + ecrapi "github.com/aws/amazon-ecs-agent/agent/ecr/model/ecr" + "github.com/aws/amazon-ecs-agent/agent/httpclient" + "github.com/aws/aws-sdk-go/aws" + awscreds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" +) + +// ECRFactory defines the interface to produce an ECR SDK client +type ECRFactory interface { + GetClient(*apicontainer.ECRAuthData) (ECRClient, error) +} + +type ecrFactory struct { + httpClient *http.Client +} + +const ( + roundtripTimeout = 5 * time.Second +) + +// NewECRFactory returns an ECRFactory capable of producing ECRSDK clients +func NewECRFactory(acceptInsecureCert bool) ECRFactory { + return &ecrFactory{ + httpClient: httpclient.New(roundtripTimeout, acceptInsecureCert), + } +} + +// GetClient creates the ECR SDK client based on the authdata +func (factory *ecrFactory) GetClient(authData *apicontainer.ECRAuthData) (ECRClient, error) { + clientConfig, err := getClientConfig(factory.httpClient, authData) + if err != nil { + return &ecrClient{}, err + } + + return factory.newClient(clientConfig), nil +} + +// getClientConfig returns the config for the ecr client based on authData +func getClientConfig(httpClient *http.Client, authData *apicontainer.ECRAuthData) (*aws.Config, error) { + cfg := aws.NewConfig().WithRegion(authData.Region).WithHTTPClient(httpClient) + if authData.EndpointOverride != "" { + cfg.Endpoint = aws.String(authData.EndpointOverride) + } + + if authData.UseExecutionRole { + if authData.GetPullCredentials() == (credentials.IAMRoleCredentials{}) { + return nil, fmt.Errorf("container uses execution credentials, but the credentials are empty") + } + creds := awscreds.NewStaticCredentials(authData.GetPullCredentials().AccessKeyID, + authData.GetPullCredentials().SecretAccessKey, + authData.GetPullCredentials().SessionToken) + cfg = cfg.WithCredentials(creds) + } + + return cfg, nil +} + +func (factory *ecrFactory) newClient(cfg *aws.Config) ECRClient { + sdkClient := ecrapi.New(session.New(cfg)) + return NewECRClient(sdkClient) +} diff --git a/agent/ecr/factory_test.go b/agent/ecr/factory_test.go new file mode 100644 index 00000000000..9a5259f0ec0 --- /dev/null +++ b/agent/ecr/factory_test.go @@ -0,0 +1,36 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecr + +import ( + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/stretchr/testify/assert" +) + +func TestGetClientConfigEndpointOverride(t *testing.T) { + testAuthData := &apicontainer.ECRAuthData{ + EndpointOverride: "api.ecr.us-west-2.amazonaws.com", + Region: "us-west-2", + UseExecutionRole: false, + } + + cfg, err := getClientConfig(nil, testAuthData) + + assert.Nil(t, err) + assert.Equal(t, testAuthData.EndpointOverride, *cfg.Endpoint) +} diff --git a/agent/ecr/generate_mocks.go b/agent/ecr/generate_mocks.go new file mode 100644 index 00000000000..bff164b3aa3 --- /dev/null +++ b/agent/ecr/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecr + +//go:generate mockgen -destination=mocks/ecr_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/ecr ECRSDK,ECRFactory,ECRClient diff --git a/agent/ecr/mocks/ecr_mocks.go b/agent/ecr/mocks/ecr_mocks.go new file mode 100644 index 00000000000..9a36e3f831b --- /dev/null +++ b/agent/ecr/mocks/ecr_mocks.go @@ -0,0 +1,142 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/ecr (interfaces: ECRSDK,ECRFactory,ECRClient) + +// Package mock_ecr is a generated GoMock package. +package mock_ecr + +import ( + reflect "reflect" + + container "github.com/aws/amazon-ecs-agent/agent/api/container" + ecr "github.com/aws/amazon-ecs-agent/agent/ecr" + ecr0 "github.com/aws/amazon-ecs-agent/agent/ecr/model/ecr" + gomock "github.com/golang/mock/gomock" +) + +// MockECRSDK is a mock of ECRSDK interface +type MockECRSDK struct { + ctrl *gomock.Controller + recorder *MockECRSDKMockRecorder +} + +// MockECRSDKMockRecorder is the mock recorder for MockECRSDK +type MockECRSDKMockRecorder struct { + mock *MockECRSDK +} + +// NewMockECRSDK creates a new mock instance +func NewMockECRSDK(ctrl *gomock.Controller) *MockECRSDK { + mock := &MockECRSDK{ctrl: ctrl} + mock.recorder = &MockECRSDKMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockECRSDK) EXPECT() *MockECRSDKMockRecorder { + return m.recorder +} + +// GetAuthorizationToken mocks base method +func (m *MockECRSDK) GetAuthorizationToken(arg0 *ecr0.GetAuthorizationTokenInput) (*ecr0.GetAuthorizationTokenOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAuthorizationToken", arg0) + ret0, _ := ret[0].(*ecr0.GetAuthorizationTokenOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAuthorizationToken indicates an expected call of GetAuthorizationToken +func (mr *MockECRSDKMockRecorder) GetAuthorizationToken(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizationToken", reflect.TypeOf((*MockECRSDK)(nil).GetAuthorizationToken), arg0) +} + +// MockECRFactory is a mock of ECRFactory interface +type MockECRFactory struct { + ctrl *gomock.Controller + recorder *MockECRFactoryMockRecorder +} + +// MockECRFactoryMockRecorder is the mock recorder for MockECRFactory +type MockECRFactoryMockRecorder struct { + mock *MockECRFactory +} + +// NewMockECRFactory creates a new mock instance +func NewMockECRFactory(ctrl *gomock.Controller) *MockECRFactory { + mock := &MockECRFactory{ctrl: ctrl} + mock.recorder = &MockECRFactoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockECRFactory) EXPECT() *MockECRFactoryMockRecorder { + return m.recorder +} + +// GetClient mocks base method +func (m *MockECRFactory) GetClient(arg0 *container.ECRAuthData) (ecr.ECRClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClient", arg0) + ret0, _ := ret[0].(ecr.ECRClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClient indicates an expected call of GetClient +func (mr *MockECRFactoryMockRecorder) GetClient(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockECRFactory)(nil).GetClient), arg0) +} + +// MockECRClient is a mock of ECRClient interface +type MockECRClient struct { + ctrl *gomock.Controller + recorder *MockECRClientMockRecorder +} + +// MockECRClientMockRecorder is the mock recorder for MockECRClient +type MockECRClientMockRecorder struct { + mock *MockECRClient +} + +// NewMockECRClient creates a new mock instance +func NewMockECRClient(ctrl *gomock.Controller) *MockECRClient { + mock := &MockECRClient{ctrl: ctrl} + mock.recorder = &MockECRClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockECRClient) EXPECT() *MockECRClientMockRecorder { + return m.recorder +} + +// GetAuthorizationToken mocks base method +func (m *MockECRClient) GetAuthorizationToken(arg0 string) (*ecr0.AuthorizationData, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAuthorizationToken", arg0) + ret0, _ := ret[0].(*ecr0.AuthorizationData) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAuthorizationToken indicates an expected call of GetAuthorizationToken +func (mr *MockECRClientMockRecorder) GetAuthorizationToken(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizationToken", reflect.TypeOf((*MockECRClient)(nil).GetAuthorizationToken), arg0) +} diff --git a/agent/ecr/model/api/api-2.json b/agent/ecr/model/api/api-2.json new file mode 100644 index 00000000000..75ecd89cb36 --- /dev/null +++ b/agent/ecr/model/api/api-2.json @@ -0,0 +1,95 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-09-21", + "endpointPrefix":"api.ecr", + "jsonVersion":"1.1", + "serviceAbbreviation":"Amazon ECR", + "serviceFullName":"Amazon EC2 Container Registry", + "signatureVersion":"v4", + "signingName":"ecr", + "targetPrefix":"AmazonEC2ContainerRegistry_V20150921", + "protocol":"json" + }, + "operations":{ + "GetAuthorizationToken":{ + "name":"GetAuthorizationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAuthorizationTokenRequest"}, + "output":{"shape":"GetAuthorizationTokenResponse"}, + "errors":[ + { + "shape":"ServerException", + "exception":true, + "fault":true + }, + { + "shape":"InvalidParameterException", + "error":{"httpStatusCode":422}, + "exception":true + } + ] + } + }, + "shapes":{ + "Arn":{"type":"string"}, + "AuthorizationData":{ + "type":"structure", + "members":{ + "authorizationToken":{"shape":"Base64"}, + "expiresAt":{"shape":"ExpirationTimestamp"}, + "proxyEndpoint":{"shape":"ProxyEndpoint"} + } + }, + "AuthorizationDataList":{ + "type":"list", + "member":{"shape":"AuthorizationData"} + }, + "Base64":{ + "type":"string", + "pattern":"^\\S+$" + }, + "ExceptionMessage":{"type":"string"}, + "ExpirationTimestamp":{"type":"timestamp"}, + "GetAuthorizationTokenRequest":{ + "type":"structure", + "members":{ + "registryIds":{"shape":"RegistryIdList"} + } + }, + "GetAuthorizationTokenResponse":{ + "type":"structure", + "members":{ + "authorizationData":{"shape":"AuthorizationDataList"} + } + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "error":{"httpStatusCode":422}, + "exception":true + }, + "ProxyEndpoint":{"type":"string"}, + "RegistryId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "RegistryIdList":{ + "type":"list", + "member":{"shape":"RegistryId"} + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true, + "fault":true + } + } +} diff --git a/agent/ecr/model/ecr/api.go b/agent/ecr/model/ecr/api.go new file mode 100644 index 00000000000..9a03b27f87e --- /dev/null +++ b/agent/ecr/model/ecr/api.go @@ -0,0 +1,293 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// +// Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. + +package ecr + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opGetAuthorizationToken = "GetAuthorizationToken" + +// GetAuthorizationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetAuthorizationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAuthorizationToken for more information on using the GetAuthorizationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAuthorizationTokenRequest method. +// req, resp := client.GetAuthorizationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) { + op := &request.Operation{ + Name: opGetAuthorizationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAuthorizationTokenInput{} + } + + output = &GetAuthorizationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAuthorizationToken API operation for Amazon EC2 Container Registry. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetAuthorizationToken for usage and error information. +// +// Returned Error Types: +// * ServerException +// +// * InvalidParameterException +// +func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) { + req, out := c.GetAuthorizationTokenRequest(input) + return out, req.Send() +} + +// GetAuthorizationTokenWithContext is the same as GetAuthorizationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetAuthorizationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetAuthorizationTokenWithContext(ctx aws.Context, input *GetAuthorizationTokenInput, opts ...request.Option) (*GetAuthorizationTokenOutput, error) { + req, out := c.GetAuthorizationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AuthorizationData struct { + _ struct{} `type:"structure"` + + AuthorizationToken *string `locationName:"authorizationToken" type:"string"` + + ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp"` + + ProxyEndpoint *string `locationName:"proxyEndpoint" type:"string"` +} + +// String returns the string representation +func (s AuthorizationData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizationData) GoString() string { + return s.String() +} + +// SetAuthorizationToken sets the AuthorizationToken field's value. +func (s *AuthorizationData) SetAuthorizationToken(v string) *AuthorizationData { + s.AuthorizationToken = &v + return s +} + +// SetExpiresAt sets the ExpiresAt field's value. +func (s *AuthorizationData) SetExpiresAt(v time.Time) *AuthorizationData { + s.ExpiresAt = &v + return s +} + +// SetProxyEndpoint sets the ProxyEndpoint field's value. +func (s *AuthorizationData) SetProxyEndpoint(v string) *AuthorizationData { + s.ProxyEndpoint = &v + return s +} + +type GetAuthorizationTokenInput struct { + _ struct{} `type:"structure"` + + RegistryIds []*string `locationName:"registryIds" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenInput) GoString() string { + return s.String() +} + +// SetRegistryIds sets the RegistryIds field's value. +func (s *GetAuthorizationTokenInput) SetRegistryIds(v []*string) *GetAuthorizationTokenInput { + s.RegistryIds = v + return s +} + +type GetAuthorizationTokenOutput struct { + _ struct{} `type:"structure"` + + AuthorizationData []*AuthorizationData `locationName:"authorizationData" type:"list"` +} + +// String returns the string representation +func (s GetAuthorizationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAuthorizationTokenOutput) GoString() string { + return s.String() +} + +// SetAuthorizationData sets the AuthorizationData field's value. +func (s *GetAuthorizationTokenOutput) SetAuthorizationData(v []*AuthorizationData) *GetAuthorizationTokenOutput { + s.AuthorizationData = v + return s +} + +type InvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidParameterException) GoString() string { + return s.String() +} + +func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { + return &InvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidParameterException) Code() string { + return "InvalidParameterException" +} + +// Message returns the exception's message. +func (s *InvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameterException) OrigErr() error { + return nil +} + +func (s *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerException) GoString() string { + return s.String() +} + +func newErrorServerException(v protocol.ResponseMetadata) error { + return &ServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServerException) Code() string { + return "ServerException" +} + +// Message returns the exception's message. +func (s *ServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServerException) OrigErr() error { + return nil +} + +func (s *ServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/agent/ecr/model/ecr/errors.go b/agent/ecr/model/ecr/errors.go new file mode 100644 index 00000000000..79b546c67c2 --- /dev/null +++ b/agent/ecr/model/ecr/errors.go @@ -0,0 +1,36 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// +// Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. + +package ecr + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeServerException for service response error code + // "ServerException". + ErrCodeServerException = "ServerException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "InvalidParameterException": newErrorInvalidParameterException, + "ServerException": newErrorServerException, +} diff --git a/agent/ecr/model/ecr/service.go b/agent/ecr/model/ecr/service.go new file mode 100644 index 00000000000..c58f433ab7f --- /dev/null +++ b/agent/ecr/model/ecr/service.go @@ -0,0 +1,119 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// +// Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. + +package ecr + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// ECR provides the API operation methods for making requests to +// Amazon EC2 Container Registry. See this package's package overview docs +// for details on the service. +// +// ECR methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type ECR struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "ecr" // Name of service. + EndpointsID = "api.ecr" // ID to lookup a service endpoint with. + ServiceID = "ECR" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the ECR client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a ECR client from just a session. +// svc := ecr.New(mySession) +// +// // Create a ECR client with additional configuration +// svc := ecr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "ecr" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECR { + svc := &ECR{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2015-09-21", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerRegistry_V20150921", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECR operation and runs any +// custom request initialization. +func (c *ECR) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/ecr/model/generate.go b/agent/ecr/model/generate.go new file mode 100644 index 00000000000..80d6bb73084 --- /dev/null +++ b/agent/ecr/model/generate.go @@ -0,0 +1,17 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package model + +// codegen tag required by AWS SDK generators +//go:generate go run -tags codegen ../../gogenerate/awssdk.go -typesOnly=false -copyright_file ../../../scripts/copyright_file diff --git a/agent/ecs_client/model/api/api-2.json b/agent/ecs_client/model/api/api-2.json new file mode 100644 index 00000000000..6bd3502e205 --- /dev/null +++ b/agent/ecs_client/model/api/api-2.json @@ -0,0 +1,2439 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2014-11-13", + "endpointPrefix":"ecs", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon ECS", + "serviceFullName":"Amazon Elastic Container Service", + "serviceId":"ECS", + "signatureVersion":"v4", + "targetPrefix":"AmazonEC2ContainerServiceV20141113", + "uid":"ecs-2014-11-13" + }, + "operations":{ + "CreateCluster":{ + "name":"CreateCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterRequest"}, + "output":{"shape":"CreateClusterResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "CreateService":{ + "name":"CreateService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateServiceRequest"}, + "output":{"shape":"CreateServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"UnsupportedFeatureException"}, + {"shape":"PlatformUnknownException"}, + {"shape":"PlatformTaskDefinitionIncompatibilityException"}, + {"shape":"AccessDeniedException"} + ] + }, + "DeleteAccountSetting":{ + "name":"DeleteAccountSetting", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAccountSettingRequest"}, + "output":{"shape":"DeleteAccountSettingResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DeleteAttributes":{ + "name":"DeleteAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAttributesRequest"}, + "output":{"shape":"DeleteAttributesResponse"}, + "errors":[ + {"shape":"ClusterNotFoundException"}, + {"shape":"TargetNotFoundException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DeleteCluster":{ + "name":"DeleteCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterRequest"}, + "output":{"shape":"DeleteClusterResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ClusterContainsContainerInstancesException"}, + {"shape":"ClusterContainsServicesException"}, + {"shape":"ClusterContainsTasksException"} + ] + }, + "DeleteService":{ + "name":"DeleteService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteServiceRequest"}, + "output":{"shape":"DeleteServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"} + ] + }, + "DeregisterContainerInstance":{ + "name":"DeregisterContainerInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterContainerInstanceRequest"}, + "output":{"shape":"DeregisterContainerInstanceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DeregisterTaskDefinition":{ + "name":"DeregisterTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterTaskDefinitionRequest"}, + "output":{"shape":"DeregisterTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeClusters":{ + "name":"DescribeClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClustersRequest"}, + "output":{"shape":"DescribeClustersResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeContainerInstances":{ + "name":"DescribeContainerInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeContainerInstancesRequest"}, + "output":{"shape":"DescribeContainerInstancesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DescribeServices":{ + "name":"DescribeServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeServicesRequest"}, + "output":{"shape":"DescribeServicesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DescribeTaskDefinition":{ + "name":"DescribeTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTaskDefinitionRequest"}, + "output":{"shape":"DescribeTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "DescribeTasks":{ + "name":"DescribeTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTasksRequest"}, + "output":{"shape":"DescribeTasksResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "DiscoverPollEndpoint":{ + "name":"DiscoverPollEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DiscoverPollEndpointRequest"}, + "output":{"shape":"DiscoverPollEndpointResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"} + ] + }, + "ListAttributes":{ + "name":"ListAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAttributesRequest"}, + "output":{"shape":"ListAttributesResponse"}, + "errors":[ + {"shape":"ClusterNotFoundException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListClusters":{ + "name":"ListClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListClustersRequest"}, + "output":{"shape":"ListClustersResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListContainerInstances":{ + "name":"ListContainerInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListContainerInstancesRequest"}, + "output":{"shape":"ListContainerInstancesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "ListServices":{ + "name":"ListServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServicesRequest"}, + "output":{"shape":"ListServicesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListTaskDefinitionFamilies":{ + "name":"ListTaskDefinitionFamilies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTaskDefinitionFamiliesRequest"}, + "output":{"shape":"ListTaskDefinitionFamiliesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListTaskDefinitions":{ + "name":"ListTaskDefinitions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTaskDefinitionsRequest"}, + "output":{"shape":"ListTaskDefinitionsResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "ListTasks":{ + "name":"ListTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTasksRequest"}, + "output":{"shape":"ListTasksResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"} + ] + }, + "PutAccountSetting":{ + "name":"PutAccountSetting", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutAccountSettingRequest"}, + "output":{"shape":"PutAccountSettingResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "PutAttributes":{ + "name":"PutAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutAttributesRequest"}, + "output":{"shape":"PutAttributesResponse"}, + "errors":[ + {"shape":"ClusterNotFoundException"}, + {"shape":"TargetNotFoundException"}, + {"shape":"AttributeLimitExceededException"}, + {"shape":"InvalidParameterException"} + ] + }, + "RegisterContainerInstance":{ + "name":"RegisterContainerInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterContainerInstanceRequest"}, + "output":{"shape":"RegisterContainerInstanceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "RegisterTaskDefinition":{ + "name":"RegisterTaskDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterTaskDefinitionRequest"}, + "output":{"shape":"RegisterTaskDefinitionResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ] + }, + "RunTask":{ + "name":"RunTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RunTaskRequest"}, + "output":{"shape":"RunTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"UnsupportedFeatureException"}, + {"shape":"PlatformUnknownException"}, + {"shape":"PlatformTaskDefinitionIncompatibilityException"}, + {"shape":"AccessDeniedException"}, + {"shape":"BlockedException"} + ] + }, + "StartTask":{ + "name":"StartTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartTaskRequest"}, + "output":{"shape":"StartTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "StopTask":{ + "name":"StopTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopTaskRequest"}, + "output":{"shape":"StopTaskResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "SubmitAttachmentStateChanges":{ + "name":"SubmitAttachmentStateChanges", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubmitAttachmentStateChangesRequest"}, + "output":{"shape":"SubmitAttachmentStateChangesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterException"} + ] + }, + "SubmitContainerStateChange":{ + "name":"SubmitContainerStateChange", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubmitContainerStateChangeRequest"}, + "output":{"shape":"SubmitContainerStateChangeResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"AccessDeniedException"} + ] + }, + "SubmitTaskStateChange":{ + "name":"SubmitTaskStateChange", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SubmitTaskStateChangeRequest"}, + "output":{"shape":"SubmitTaskStateChangeResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"AccessDeniedException"} + ] + }, + "UpdateContainerAgent":{ + "name":"UpdateContainerAgent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateContainerAgentRequest"}, + "output":{"shape":"UpdateContainerAgentResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"UpdateInProgressException"}, + {"shape":"NoUpdateAvailableException"}, + {"shape":"MissingVersionException"} + ] + }, + "UpdateContainerInstancesState":{ + "name":"UpdateContainerInstancesState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateContainerInstancesStateRequest"}, + "output":{"shape":"UpdateContainerInstancesStateResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"} + ] + }, + "UpdateService":{ + "name":"UpdateService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateServiceRequest"}, + "output":{"shape":"UpdateServiceResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClusterNotFoundException"}, + {"shape":"ServiceNotFoundException"}, + {"shape":"ServiceNotActiveException"}, + {"shape":"PlatformUnknownException"}, + {"shape":"PlatformTaskDefinitionIncompatibilityException"}, + {"shape":"AccessDeniedException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "AgentUpdateStatus":{ + "type":"string", + "enum":[ + "PENDING", + "STAGING", + "STAGED", + "UPDATING", + "UPDATED", + "FAILED" + ] + }, + "AssignPublicIp":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "Attachment":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "type":{"shape":"String"}, + "status":{"shape":"String"}, + "details":{"shape":"AttachmentDetails"} + } + }, + "AttachmentDetails":{ + "type":"list", + "member":{"shape":"KeyValuePair"} + }, + "AttachmentStateChange":{ + "type":"structure", + "required":[ + "attachmentArn", + "status" + ], + "members":{ + "attachmentArn":{"shape":"String"}, + "status":{"shape":"String"} + } + }, + "AttachmentStateChanges":{ + "type":"list", + "member":{"shape":"AttachmentStateChange"} + }, + "Attachments":{ + "type":"list", + "member":{"shape":"Attachment"} + }, + "Attribute":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"String"}, + "value":{"shape":"String"}, + "targetType":{"shape":"TargetType"}, + "targetId":{"shape":"String"} + } + }, + "AttributeLimitExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Attributes":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "AwsVpcConfiguration":{ + "type":"structure", + "required":["subnets"], + "members":{ + "subnets":{"shape":"StringList"}, + "securityGroups":{"shape":"StringList"}, + "assignPublicIp":{"shape":"AssignPublicIp"} + } + }, + "BlockedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "BoxedBoolean":{ + "type":"boolean", + "box":true + }, + "BoxedInteger":{ + "type":"integer", + "box":true + }, + "ClientException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "Cluster":{ + "type":"structure", + "members":{ + "clusterArn":{"shape":"String"}, + "clusterName":{"shape":"String"}, + "status":{"shape":"String"}, + "registeredContainerInstancesCount":{"shape":"Integer"}, + "runningTasksCount":{"shape":"Integer"}, + "pendingTasksCount":{"shape":"Integer"}, + "activeServicesCount":{"shape":"Integer"} + } + }, + "ClusterContainsContainerInstancesException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ClusterContainsServicesException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ClusterContainsTasksException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ClusterField":{ + "type":"string", + "enum":["STATISTICS"] + }, + "ClusterFieldList":{ + "type":"list", + "member":{"shape":"ClusterField"} + }, + "ClusterNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Clusters":{ + "type":"list", + "member":{"shape":"Cluster"} + }, + "Compatibility":{ + "type":"string", + "enum":[ + "EC2", + "FARGATE" + ] + }, + "CompatibilityList":{ + "type":"list", + "member":{"shape":"Compatibility"} + }, + "Connectivity":{ + "type":"string", + "enum":[ + "CONNECTED", + "DISCONNECTED" + ] + }, + "Container":{ + "type":"structure", + "members":{ + "containerArn":{"shape":"String"}, + "taskArn":{"shape":"String"}, + "name":{"shape":"String"}, + "runtimeId":{"shape": "String"}, + "imageDigest": {"shape": "String"}, + "lastStatus":{"shape":"String"}, + "exitCode":{"shape":"BoxedInteger"}, + "reason":{"shape":"String"}, + "networkBindings":{"shape":"NetworkBindings"}, + "networkInterfaces":{"shape":"NetworkInterfaces"}, + "healthStatus":{"shape":"HealthStatus"}, + "managedAgents":{"shape":"ManagedAgents"}, + "firelensConfiguration":{"shape":"FirelensConfiguration"} + } + }, + "ContainerCondition":{ + "type":"string", + "enum":[ + "START", + "COMPLETE", + "SUCCESS", + "HEALTHY" + ] + }, + "ContainerDefinition":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "image":{"shape":"String"}, + "repositoryCredentials":{"shape":"RepositoryCredentials"}, + "cpu":{"shape":"Integer"}, + "memory":{"shape":"BoxedInteger"}, + "memoryReservation":{"shape":"BoxedInteger"}, + "links":{"shape":"StringList"}, + "portMappings":{"shape":"PortMappingList"}, + "essential":{"shape":"BoxedBoolean"}, + "entryPoint":{"shape":"StringList"}, + "command":{"shape":"StringList"}, + "environment":{"shape":"EnvironmentVariables"}, + "mountPoints":{"shape":"MountPointList"}, + "volumesFrom":{"shape":"VolumeFromList"}, + "linuxParameters":{"shape":"LinuxParameters"}, + "secrets":{"shape":"SecretList"}, + "inferenceDevices":{"shape":"StringList"}, + "dependsOn":{"shape":"ContainerDependencies"}, + "startTimeout":{"shape":"BoxedInteger"}, + "stopTimeout":{"shape":"BoxedInteger"}, + "hostname":{"shape":"String"}, + "user":{"shape":"String"}, + "workingDirectory":{"shape":"String"}, + "disableNetworking":{"shape":"BoxedBoolean"}, + "privileged":{"shape":"BoxedBoolean"}, + "readonlyRootFilesystem":{"shape":"BoxedBoolean"}, + "dnsServers":{"shape":"StringList"}, + "dnsSearchDomains":{"shape":"StringList"}, + "extraHosts":{"shape":"HostEntryList"}, + "dockerSecurityOptions":{"shape":"StringList"}, + "interactive":{"shape":"BoxedBoolean"}, + "pseudoTerminal":{"shape":"BoxedBoolean"}, + "dockerLabels":{"shape":"DockerLabelsMap"}, + "ulimits":{"shape":"UlimitList"}, + "logConfiguration":{"shape":"LogConfiguration"}, + "healthCheck":{"shape":"HealthCheck"}, + "systemControls":{"shape":"SystemControls"}, + "resourceRequirements":{"shape":"ResourceRequirements"}, + "firelensConfiguration":{"shape": "FirelensConfiguration"} + } + }, + "ContainerDefinitions":{ + "type":"list", + "member":{"shape":"ContainerDefinition"} + }, + "ContainerDependencies":{ + "type":"list", + "member":{"shape":"ContainerDependency"} + }, + "ContainerDependency":{ + "type":"structure", + "required":[ + "containerName", + "condition" + ], + "members":{ + "containerName":{"shape":"String"}, + "condition":{"shape":"ContainerCondition"} + } + }, + "ContainerInstance":{ + "type":"structure", + "members":{ + "containerInstanceArn":{"shape":"String"}, + "ec2InstanceId":{"shape":"String"}, + "version":{"shape":"Long"}, + "versionInfo":{"shape":"VersionInfo"}, + "remainingResources":{"shape":"Resources"}, + "registeredResources":{"shape":"Resources"}, + "status":{"shape":"String"}, + "agentConnected":{"shape":"Boolean"}, + "runningTasksCount":{"shape":"Integer"}, + "pendingTasksCount":{"shape":"Integer"}, + "agentUpdateStatus":{"shape":"AgentUpdateStatus"}, + "attributes":{"shape":"Attributes"}, + "registeredAt":{"shape":"Timestamp"}, + "attachments":{"shape":"Attachments"}, + "clientToken":{ + "shape":"String" + }, + "tags":{"shape":"Tags"} + } + }, + "ContainerInstanceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DRAINING" + ] + }, + "ContainerInstances":{ + "type":"list", + "member":{"shape":"ContainerInstance"} + }, + "ContainerOverride":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "command":{"shape":"StringList"}, + "environment":{"shape":"EnvironmentVariables"}, + "cpu":{"shape":"BoxedInteger"}, + "memory":{"shape":"BoxedInteger"}, + "memoryReservation":{"shape":"BoxedInteger"}, + "resourceRequirements":{"shape":"ResourceRequirements"} + } + }, + "ContainerOverrides":{ + "type":"list", + "member":{"shape":"ContainerOverride"} + }, + "ContainerStateChange":{ + "type":"structure", + "members":{ + "containerName":{"shape":"String"}, + "imageDigest":{"shape":"String"}, + "runtimeId":{"shape":"String"}, + "exitCode":{"shape":"BoxedInteger"}, + "networkBindings":{"shape":"NetworkBindings"}, + "managedAgents":{"shape":"ManagedAgentStateChanges"}, + "reason":{"shape":"String"}, + "status":{"shape":"String"} + } + }, + "ContainerStateChanges":{ + "type":"list", + "member":{"shape":"ContainerStateChange"} + }, + "Containers":{ + "type":"list", + "member":{"shape":"Container"} + }, + "CreateClusterRequest":{ + "type":"structure", + "members":{ + "clusterName":{"shape":"String"} + } + }, + "CreateClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{"shape":"Cluster"} + } + }, + "CreateServiceRequest":{ + "type":"structure", + "required":[ + "serviceName", + "taskDefinition" + ], + "members":{ + "cluster":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "loadBalancers":{"shape":"LoadBalancers"}, + "serviceRegistries":{"shape":"ServiceRegistries"}, + "desiredCount":{"shape":"BoxedInteger"}, + "clientToken":{"shape":"String"}, + "launchType":{"shape":"LaunchType"}, + "platformVersion":{"shape":"String"}, + "role":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"}, + "placementConstraints":{"shape":"PlacementConstraints"}, + "placementStrategy":{"shape":"PlacementStrategies"}, + "networkConfiguration":{"shape":"NetworkConfiguration"}, + "healthCheckGracePeriodSeconds":{"shape":"BoxedInteger"}, + "schedulingStrategy":{"shape":"SchedulingStrategy"} + } + }, + "CreateServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "DeleteAccountSettingRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"SettingName"}, + "principalArn":{"shape":"String"} + } + }, + "DeleteAccountSettingResponse":{ + "type":"structure", + "members":{ + "setting":{"shape":"Setting"} + } + }, + "DeleteAttributesRequest":{ + "type":"structure", + "required":["attributes"], + "members":{ + "cluster":{"shape":"String"}, + "attributes":{"shape":"Attributes"} + } + }, + "DeleteAttributesResponse":{ + "type":"structure", + "members":{ + "attributes":{"shape":"Attributes"} + } + }, + "DeleteClusterRequest":{ + "type":"structure", + "required":["cluster"], + "members":{ + "cluster":{"shape":"String"} + } + }, + "DeleteClusterResponse":{ + "type":"structure", + "members":{ + "cluster":{"shape":"Cluster"} + } + }, + "DeleteServiceRequest":{ + "type":"structure", + "required":["service"], + "members":{ + "cluster":{"shape":"String"}, + "service":{"shape":"String"}, + "force":{"shape":"BoxedBoolean"} + } + }, + "DeleteServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "Deployment":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "status":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "desiredCount":{"shape":"BoxedInteger"}, + "pendingCount":{"shape":"Integer"}, + "runningCount":{"shape":"Integer"}, + "createdAt":{"shape":"Timestamp"}, + "updatedAt":{"shape":"Timestamp"}, + "launchType":{"shape":"LaunchType"}, + "platformVersion":{"shape":"String"}, + "networkConfiguration":{"shape":"NetworkConfiguration"} + } + }, + "DeploymentConfiguration":{ + "type":"structure", + "members":{ + "maximumPercent":{"shape":"BoxedInteger"}, + "minimumHealthyPercent":{"shape":"BoxedInteger"} + } + }, + "Deployments":{ + "type":"list", + "member":{"shape":"Deployment"} + }, + "DeregisterContainerInstanceRequest":{ + "type":"structure", + "required":["containerInstance"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "force":{"shape":"BoxedBoolean"} + } + }, + "DeregisterContainerInstanceResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "DeregisterTaskDefinitionRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "taskDefinition":{"shape":"String"} + } + }, + "DeregisterTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "DescribeClustersRequest":{ + "type":"structure", + "members":{ + "clusters":{"shape":"StringList"}, + "include":{"shape":"ClusterFieldList"} + } + }, + "DescribeClustersResponse":{ + "type":"structure", + "members":{ + "clusters":{"shape":"Clusters"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeContainerInstancesRequest":{ + "type":"structure", + "required":["containerInstances"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstances":{"shape":"StringList"} + } + }, + "DescribeContainerInstancesResponse":{ + "type":"structure", + "members":{ + "containerInstances":{"shape":"ContainerInstances"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeServicesRequest":{ + "type":"structure", + "required":["services"], + "members":{ + "cluster":{"shape":"String"}, + "services":{"shape":"StringList"} + } + }, + "DescribeServicesResponse":{ + "type":"structure", + "members":{ + "services":{"shape":"Services"}, + "failures":{"shape":"Failures"} + } + }, + "DescribeTaskDefinitionRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "taskDefinition":{"shape":"String"} + } + }, + "DescribeTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "DescribeTasksRequest":{ + "type":"structure", + "required":["tasks"], + "members":{ + "cluster":{"shape":"String"}, + "tasks":{"shape":"StringList"} + } + }, + "DescribeTasksResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "DesiredStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "PENDING", + "STOPPED" + ] + }, + "Device":{ + "type":"structure", + "required":["hostPath"], + "members":{ + "hostPath":{"shape":"String"}, + "containerPath":{"shape":"String"}, + "permissions":{"shape":"DeviceCgroupPermissions"} + } + }, + "DeviceCgroupPermission":{ + "type":"string", + "enum":[ + "read", + "write", + "mknod" + ] + }, + "DeviceCgroupPermissions":{ + "type":"list", + "member":{"shape":"DeviceCgroupPermission"} + }, + "DevicesList":{ + "type":"list", + "member":{"shape":"Device"} + }, + "DiscoverPollEndpointRequest":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"String"}, + "cluster":{"shape":"String"} + } + }, + "DiscoverPollEndpointResponse":{ + "type":"structure", + "members":{ + "endpoint":{"shape":"String"}, + "telemetryEndpoint":{"shape":"String"} + } + }, + "DockerLabelsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "DockerVolumeConfiguration":{ + "type":"structure", + "members":{ + "scope":{"shape":"Scope"}, + "autoprovision":{"shape":"BoxedBoolean"}, + "driver":{"shape":"String"}, + "driverOpts":{"shape":"StringMap"}, + "labels":{"shape":"StringMap"} + } + }, + "Double":{"type":"double"}, + "EnvironmentVariables":{ + "type":"list", + "member":{"shape":"KeyValuePair"} + }, + "EFSAuthorizationConfig":{ + "type":"structure", + "members":{ + "accessPointId":{"shape":"String"}, + "iam":{"shape":"EFSAuthorizationConfigIAM"} + } + }, + "EFSAuthorizationConfigIAM":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "EFSTransitEncryption":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "EFSVolumeConfiguration":{ + "type":"structure", + "required":["fileSystemId"], + "members":{ + "fileSystemId":{"shape":"String"}, + "rootDirectory":{"shape":"String"}, + "transitEncryption":{"shape":"EFSTransitEncryption"}, + "authorizationConfig":{"shape":"EFSAuthorizationConfig"} + } + }, + "FSxWindowsFileServerAuthorizationConfig":{ + "type":"structure", + "required":[ + "credentialsParameter", + "domain" + ], + "members":{ + "credentialsParameter":{"shape":"String"}, + "domain":{"shape":"String"} + } + }, + "FSxWindowsFileServerVolumeConfiguration":{ + "type":"structure", + "required":[ + "fileSystemId", + "rootDirectory", + "authorizationConfig" + ], + "members":{ + "fileSystemId":{"shape":"String"}, + "rootDirectory":{"shape":"String"}, + "authorizationConfig":{"shape":"FSxWindowsFileServerAuthorizationConfig"} + } + }, + "Failure":{ + "type":"structure", + "members":{ + "arn":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "Failures":{ + "type":"list", + "member":{"shape":"Failure"} + }, + "HealthCheck":{ + "type":"structure", + "required":["command"], + "members":{ + "command":{"shape":"StringList"}, + "interval":{"shape":"BoxedInteger"}, + "timeout":{"shape":"BoxedInteger"}, + "retries":{"shape":"BoxedInteger"}, + "startPeriod":{"shape":"BoxedInteger"} + } + }, + "HealthStatus":{ + "type":"string", + "enum":[ + "HEALTHY", + "UNHEALTHY", + "UNKNOWN" + ] + }, + "HostEntry":{ + "type":"structure", + "required":[ + "hostname", + "ipAddress" + ], + "members":{ + "hostname":{"shape":"String"}, + "ipAddress":{"shape":"String"} + } + }, + "HostEntryList":{ + "type":"list", + "member":{"shape":"HostEntry"} + }, + "HostVolumeProperties":{ + "type":"structure", + "members":{ + "sourcePath":{"shape":"String"} + } + }, + "InferenceAccelerator":{ + "type":"structure", + "required":[ + "deviceName", + "deviceType" + ], + "members":{ + "deviceName":{"shape":"String"}, + "deviceType":{"shape":"String"} + } + }, + "InferenceAccelerators":{ + "type":"list", + "member":{"shape":"InferenceAccelerator"} + }, + "Integer":{"type":"integer"}, + "InvalidParameterException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "IpcMode":{ + "type":"string", + "enum":[ + "host", + "task", + "none" + ] + }, + "KernelCapabilities":{ + "type":"structure", + "members":{ + "add":{"shape":"StringList"}, + "drop":{"shape":"StringList"} + } + }, + "KeyValuePair":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "value":{"shape":"String"} + } + }, + "LaunchType":{ + "type":"string", + "enum":[ + "EC2", + "FARGATE" + ] + }, + "LinuxParameters":{ + "type":"structure", + "members":{ + "capabilities":{"shape":"KernelCapabilities"}, + "devices":{"shape":"DevicesList"}, + "initProcessEnabled":{"shape":"BoxedBoolean"}, + "sharedMemorySize":{"shape":"BoxedInteger"}, + "tmpfs":{"shape":"TmpfsList"} + } + }, + "ListAttributesRequest":{ + "type":"structure", + "required":["targetType"], + "members":{ + "cluster":{"shape":"String"}, + "targetType":{"shape":"TargetType"}, + "attributeName":{"shape":"String"}, + "attributeValue":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListAttributesResponse":{ + "type":"structure", + "members":{ + "attributes":{"shape":"Attributes"}, + "nextToken":{"shape":"String"} + } + }, + "ListClustersRequest":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListClustersResponse":{ + "type":"structure", + "members":{ + "clusterArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListContainerInstancesRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "filter":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"}, + "status":{"shape":"ContainerInstanceStatus"} + } + }, + "ListContainerInstancesResponse":{ + "type":"structure", + "members":{ + "containerInstanceArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListServicesRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"}, + "launchType":{"shape":"LaunchType"}, + "schedulingStrategy":{"shape":"SchedulingStrategy"} + } + }, + "ListServicesResponse":{ + "type":"structure", + "members":{ + "serviceArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{"shape":"String"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{"shape":"Tags"} + } + }, + "ListTaskDefinitionFamiliesRequest":{ + "type":"structure", + "members":{ + "familyPrefix":{"shape":"String"}, + "status":{"shape":"TaskDefinitionFamilyStatus"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListTaskDefinitionFamiliesResponse":{ + "type":"structure", + "members":{ + "families":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTaskDefinitionsRequest":{ + "type":"structure", + "members":{ + "familyPrefix":{"shape":"String"}, + "status":{"shape":"TaskDefinitionStatus"}, + "sort":{"shape":"SortOrder"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"} + } + }, + "ListTaskDefinitionsResponse":{ + "type":"structure", + "members":{ + "taskDefinitionArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "ListTasksRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "family":{"shape":"String"}, + "nextToken":{"shape":"String"}, + "maxResults":{"shape":"BoxedInteger"}, + "startedBy":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "desiredStatus":{"shape":"DesiredStatus"}, + "launchType":{"shape":"LaunchType"} + } + }, + "ListTasksResponse":{ + "type":"structure", + "members":{ + "taskArns":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, + "LoadBalancer":{ + "type":"structure", + "members":{ + "targetGroupArn":{"shape":"String"}, + "loadBalancerName":{"shape":"String"}, + "containerName":{"shape":"String"}, + "containerPort":{"shape":"BoxedInteger"} + } + }, + "LoadBalancers":{ + "type":"list", + "member":{"shape":"LoadBalancer"} + }, + "LogConfiguration":{ + "type":"structure", + "required":["logDriver"], + "members":{ + "logDriver":{"shape":"LogDriver"}, + "options":{"shape":"LogConfigurationOptionsMap"}, + "secretOptions":{ + "shape":"SecretList" + } + } + }, + "LogConfigurationOptionsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "LogDriver":{ + "type":"string", + "enum":[ + "json-file", + "syslog", + "journald", + "gelf", + "fluentd", + "awslogs", + "splunk", + "awsfirelens" + ] + }, + "FirelensConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{"shape":"FirelensConfigurationType"}, + "options":{"shape":"FirelensConfigurationOptionsMap"} + } + }, + "FirelensConfigurationType":{ + "type":"string", + "enum":[ + "fluentd", + "fluentbit" + ] + }, + "FirelensConfigurationOptionsMap": { + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Long":{"type":"long"}, + "ManagedAgent":{ + "type":"structure", + "members":{ + "lastStartedAt":{"shape":"Timestamp"}, + "name":{"shape":"ManagedAgentName"}, + "reason":{"shape":"String"}, + "lastStatus":{"shape":"String"} + } + }, + "ManagedAgentName":{ + "type":"string", + "enum":["ExecuteCommandAgent"] + }, + "ManagedAgentStateChange":{ + "type":"structure", + "required":[ + "name", + "status" + ], + "members":{ + "managedAgentName":{"shape":"ManagedAgentName"}, + "containerName": {"shape":"String"}, + "status":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "ManagedAgentStateChanges":{ + "type":"list", + "member":{"shape":"ManagedAgentStateChange"} + }, + "ManagedAgents":{ + "type":"list", + "member":{"shape":"ManagedAgent"} + }, + "MissingVersionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "MountPoint":{ + "type":"structure", + "members":{ + "sourceVolume":{"shape":"String"}, + "containerPath":{"shape":"String"}, + "readOnly":{"shape":"BoxedBoolean"} + } + }, + "MountPointList":{ + "type":"list", + "member":{"shape":"MountPoint"} + }, + "NetworkBinding":{ + "type":"structure", + "members":{ + "bindIP":{"shape":"String"}, + "containerPort":{"shape":"BoxedInteger"}, + "hostPort":{"shape":"BoxedInteger"}, + "protocol":{"shape":"TransportProtocol"} + } + }, + "NetworkBindings":{ + "type":"list", + "member":{"shape":"NetworkBinding"} + }, + "NetworkConfiguration":{ + "type":"structure", + "members":{ + "awsvpcConfiguration":{"shape":"AwsVpcConfiguration"} + } + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "attachmentId":{"shape":"String"}, + "privateIpv4Address":{"shape":"String"}, + "ipv6Address":{"shape":"String"} + } + }, + "NetworkInterfaces":{ + "type":"list", + "member":{"shape":"NetworkInterface"} + }, + "NetworkMode":{ + "type":"string", + "enum":[ + "bridge", + "host", + "awsvpc", + "none" + ] + }, + "NoUpdateAvailableException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PidMode":{ + "type":"string", + "enum":[ + "host", + "task" + ] + }, + "PlacementConstraint":{ + "type":"structure", + "members":{ + "type":{"shape":"PlacementConstraintType"}, + "expression":{"shape":"String"} + } + }, + "PlacementConstraintType":{ + "type":"string", + "enum":[ + "distinctInstance", + "memberOf" + ] + }, + "PlacementConstraints":{ + "type":"list", + "member":{"shape":"PlacementConstraint"} + }, + "PlacementStrategies":{ + "type":"list", + "member":{"shape":"PlacementStrategy"} + }, + "PlacementStrategy":{ + "type":"structure", + "members":{ + "type":{"shape":"PlacementStrategyType"}, + "field":{"shape":"String"} + } + }, + "PlacementStrategyType":{ + "type":"string", + "enum":[ + "random", + "spread", + "binpack" + ] + }, + "PlatformDevice":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "type":{"shape":"PlatformDeviceType"} + } + }, + "PlatformDeviceType":{ + "type":"string", + "enum":["GPU"] + }, + "PlatformDevices":{ + "type":"list", + "member":{"shape":"PlatformDevice"} + }, + "PlatformTaskDefinitionIncompatibilityException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PlatformUnknownException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PortMapping":{ + "type":"structure", + "members":{ + "containerPort":{"shape":"BoxedInteger"}, + "hostPort":{"shape":"BoxedInteger"}, + "protocol":{"shape":"TransportProtocol"} + } + }, + "PortMappingList":{ + "type":"list", + "member":{"shape":"PortMapping"} + }, + "ProxyConfiguration":{ + "type":"structure", + "required":["containerName"], + "members":{ + "type":{"shape":"ProxyConfigurationType"}, + "containerName":{"shape":"String"}, + "properties":{"shape":"ProxyConfigurationProperties"} + } + }, + "ProxyConfigurationProperties":{ + "type":"list", + "member":{"shape":"KeyValuePair"} + }, + "ProxyConfigurationType":{ + "type":"string", + "enum":["APPMESH"] + }, + "PutAccountSettingRequest":{ + "type":"structure", + "required":[ + "name", + "value" + ], + "members":{ + "name":{"shape":"SettingName"}, + "value":{"shape":"String"}, + "principalArn":{"shape":"String"} + } + }, + "PutAccountSettingResponse":{ + "type":"structure", + "members":{ + "setting":{"shape":"Setting"} + } + }, + "PutAttributesRequest":{ + "type":"structure", + "required":["attributes"], + "members":{ + "cluster":{"shape":"String"}, + "attributes":{"shape":"Attributes"} + } + }, + "PutAttributesResponse":{ + "type":"structure", + "members":{ + "attributes":{"shape":"Attributes"} + } + }, + "RegisterContainerInstanceRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "instanceIdentityDocument":{"shape":"String"}, + "instanceIdentityDocumentSignature":{"shape":"String"}, + "totalResources":{"shape":"Resources"}, + "versionInfo":{"shape":"VersionInfo"}, + "containerInstanceArn":{"shape":"String"}, + "attributes":{"shape":"Attributes"}, + "tags":{"shape":"Tags"}, + "clientToken":{ + "shape":"String" + }, + "platformDevices":{"shape":"PlatformDevices"} + } + }, + "RegisterContainerInstanceResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "RegisterTaskDefinitionRequest":{ + "type":"structure", + "required":[ + "family", + "containerDefinitions" + ], + "members":{ + "family":{"shape":"String"}, + "taskRoleArn":{"shape":"String"}, + "executionRoleArn":{"shape":"String"}, + "networkMode":{"shape":"NetworkMode"}, + "containerDefinitions":{"shape":"ContainerDefinitions"}, + "volumes":{"shape":"VolumeList"}, + "placementConstraints":{"shape":"TaskDefinitionPlacementConstraints"}, + "requiresCompatibilities":{"shape":"CompatibilityList"}, + "cpu":{"shape":"String"}, + "memory":{"shape":"String"}, + "pidMode":{"shape":"PidMode"}, + "ipcMode":{"shape":"IpcMode"}, + "tags":{"shape":"Tags"}, + "inferenceAccelerators":{"shape":"InferenceAccelerators"}, + "proxyConfiguration":{ + "shape":"ProxyConfiguration" + } + } + }, + "RegisterTaskDefinitionResponse":{ + "type":"structure", + "members":{ + "taskDefinition":{"shape":"TaskDefinition"} + } + }, + "RepositoryCredentials":{ + "type":"structure", + "required":["credentialsParameter"], + "members":{ + "credentialsParameter":{"shape":"String"} + } + }, + "RequiresAttributes":{ + "type":"list", + "member":{"shape":"Attribute"} + }, + "Resource":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "type":{"shape":"String"}, + "doubleValue":{"shape":"Double"}, + "longValue":{"shape":"Long"}, + "integerValue":{"shape":"Integer"}, + "stringSetValue":{"shape":"StringList"} + } + }, + "ResourceRequirement":{ + "type":"structure", + "members":{ + "value":{"shape":"String"}, + "type":{"shape":"ResourceType"} + } + }, + "ResourceRequirements":{ + "type":"list", + "member":{"shape":"ResourceRequirement"} + }, + "ResourceType":{ + "type":"string", + "enum":["GPU"] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Resources":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "RunTaskRequest":{ + "type":"structure", + "required":["taskDefinition"], + "members":{ + "cluster":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "count":{"shape":"BoxedInteger"}, + "startedBy":{"shape":"String"}, + "group":{"shape":"String"}, + "placementConstraints":{"shape":"PlacementConstraints"}, + "placementStrategy":{"shape":"PlacementStrategies"}, + "launchType":{"shape":"LaunchType"}, + "platformVersion":{"shape":"String"}, + "networkConfiguration":{"shape":"NetworkConfiguration"} + } + }, + "RunTaskResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "Scope":{ + "type":"string", + "enum":[ + "task", + "shared" + ] + }, + "SchedulingStrategy":{ + "type":"string", + "enum":[ + "REPLICA", + "DAEMON" + ] + }, + "Scope":{ + "type":"string", + "enum":[ + "task", + "shared" + ] + }, + "Secret":{ + "type":"structure", + "required":[ + "name", + "valueFrom" + ], + "members":{ + "name":{"shape":"String"}, + "valueFrom":{"shape":"String"} + } + }, + "SecretList":{ + "type":"list", + "member":{"shape":"Secret"} + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true, + "fault":true + }, + "Service":{ + "type":"structure", + "members":{ + "serviceArn":{"shape":"String"}, + "serviceName":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "loadBalancers":{"shape":"LoadBalancers"}, + "serviceRegistries":{"shape":"ServiceRegistries"}, + "status":{"shape":"String"}, + "desiredCount":{"shape":"BoxedInteger"}, + "runningCount":{"shape":"Integer"}, + "pendingCount":{"shape":"Integer"}, + "launchType":{"shape":"LaunchType"}, + "platformVersion":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"}, + "deployments":{"shape":"Deployments"}, + "roleArn":{"shape":"String"}, + "events":{"shape":"ServiceEvents"}, + "createdAt":{"shape":"Timestamp"}, + "placementConstraints":{"shape":"PlacementConstraints"}, + "placementStrategy":{"shape":"PlacementStrategies"}, + "networkConfiguration":{"shape":"NetworkConfiguration"}, + "healthCheckGracePeriodSeconds":{"shape":"BoxedInteger"}, + "schedulingStrategy":{"shape":"SchedulingStrategy"} + } + }, + "ServiceEvent":{ + "type":"structure", + "members":{ + "id":{"shape":"String"}, + "createdAt":{"shape":"Timestamp"}, + "message":{"shape":"String"} + } + }, + "ServiceEvents":{ + "type":"list", + "member":{"shape":"ServiceEvent"} + }, + "ServiceNotActiveException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ServiceNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ServiceRegistries":{ + "type":"list", + "member":{"shape":"ServiceRegistry"} + }, + "ServiceRegistry":{ + "type":"structure", + "members":{ + "registryArn":{"shape":"String"}, + "port":{"shape":"BoxedInteger"}, + "containerName":{"shape":"String"}, + "containerPort":{"shape":"BoxedInteger"} + } + }, + "Services":{ + "type":"list", + "member":{"shape":"Service"} + }, + "Setting":{ + "type":"structure", + "members":{ + "name":{"shape":"SettingName"}, + "value":{"shape":"String"}, + "principalArn":{"shape":"String"} + } + }, + "SettingName":{ + "type":"string", + "enum":[ + "serviceLongArnFormat", + "taskLongArnFormat", + "containerInstanceLongArnFormat" + ] + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASC", + "DESC" + ] + }, + "StartTaskRequest":{ + "type":"structure", + "required":[ + "taskDefinition", + "containerInstances" + ], + "members":{ + "cluster":{"shape":"String"}, + "taskDefinition":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "containerInstances":{"shape":"StringList"}, + "startedBy":{"shape":"String"}, + "group":{"shape":"String"}, + "networkConfiguration":{"shape":"NetworkConfiguration"} + } + }, + "StartTaskResponse":{ + "type":"structure", + "members":{ + "tasks":{"shape":"Tasks"}, + "failures":{"shape":"Failures"} + } + }, + "StopTaskRequest":{ + "type":"structure", + "required":["task"], + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "reason":{"shape":"String"} + } + }, + "StopTaskResponse":{ + "type":"structure", + "members":{ + "task":{"shape":"Task"} + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "SubmitAttachmentStateChangesRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "attachments":{"shape":"AttachmentStateChanges"} + } + }, + "SubmitAttachmentStateChangesResponse":{ + "type":"structure", + "members":{ + "acknowledgment":{"shape":"String"} + } + }, + "SubmitContainerStateChangeRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "containerName":{"shape":"String"}, + "runtimeId":{"shape":"String"}, + "status":{"shape":"String"}, + "exitCode":{"shape":"BoxedInteger"}, + "reason":{"shape":"String"}, + "networkBindings":{"shape":"NetworkBindings"}, + "managedAgents":{"shape":"ManagedAgentStateChanges"} + } + }, + "SubmitContainerStateChangeResponse":{ + "type":"structure", + "members":{ + "acknowledgment":{"shape":"String"} + } + }, + "SubmitTaskStateChangeRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "task":{"shape":"String"}, + "status":{"shape":"String"}, + "reason":{"shape":"String"}, + "containers":{"shape":"ContainerStateChanges"}, + "managedAgents":{"shape":"ManagedAgentStateChanges"}, + "attachments":{"shape":"AttachmentStateChanges"}, + "pullStartedAt":{"shape":"Timestamp"}, + "pullStoppedAt":{"shape":"Timestamp"}, + "executionStoppedAt":{"shape":"Timestamp"} + } + }, + "SubmitTaskStateChangeResponse":{ + "type":"structure", + "members":{ + "acknowledgment":{"shape":"String"} + } + }, + "SystemControl":{ + "type":"structure", + "members":{ + "namespace":{"shape":"String"}, + "value":{"shape":"String"} + } + }, + "SystemControls":{ + "type":"list", + "member":{"shape":"SystemControl"} + }, + "Tag":{ + "type":"structure", + "members":{ + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":0 + }, + "TargetNotFoundException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "TargetType":{ + "type":"string", + "enum":["container-instance"] + }, + "Task":{ + "type":"structure", + "members":{ + "taskArn":{"shape":"String"}, + "clusterArn":{"shape":"String"}, + "taskDefinitionArn":{"shape":"String"}, + "containerInstanceArn":{"shape":"String"}, + "overrides":{"shape":"TaskOverride"}, + "lastStatus":{"shape":"String"}, + "desiredStatus":{"shape":"String"}, + "cpu":{"shape":"String"}, + "memory":{"shape":"String"}, + "containers":{"shape":"Containers"}, + "startedBy":{"shape":"String"}, + "version":{"shape":"Long"}, + "stoppedReason":{"shape":"String"}, + "connectivity":{"shape":"Connectivity"}, + "connectivityAt":{"shape":"Timestamp"}, + "pullStartedAt":{"shape":"Timestamp"}, + "pullStoppedAt":{"shape":"Timestamp"}, + "executionStoppedAt":{"shape":"Timestamp"}, + "createdAt":{"shape":"Timestamp"}, + "startedAt":{"shape":"Timestamp"}, + "stoppingAt":{"shape":"Timestamp"}, + "stoppedAt":{"shape":"Timestamp"}, + "group":{"shape":"String"}, + "launchType":{"shape":"LaunchType"}, + "platformVersion":{"shape":"String"}, + "attachments":{"shape":"Attachments"}, + "healthStatus":{"shape":"HealthStatus"}, + "tags":{"shape":"Tags"} + } + }, + "TaskDefinition":{ + "type":"structure", + "members":{ + "taskDefinitionArn":{"shape":"String"}, + "containerDefinitions":{"shape":"ContainerDefinitions"}, + "family":{"shape":"String"}, + "taskRoleArn":{"shape":"String"}, + "executionRoleArn":{"shape":"String"}, + "networkMode":{"shape":"NetworkMode"}, + "revision":{"shape":"Integer"}, + "volumes":{"shape":"VolumeList"}, + "status":{"shape":"TaskDefinitionStatus"}, + "requiresAttributes":{"shape":"RequiresAttributes"}, + "placementConstraints":{"shape":"TaskDefinitionPlacementConstraints"}, + "compatibilities":{"shape":"CompatibilityList"}, + "requiresCompatibilities":{"shape":"CompatibilityList"}, + "cpu":{"shape":"String"}, + "memory":{"shape":"String"}, + "pidMode":{"shape":"PidMode"}, + "ipcMode":{"shape":"IpcMode"}, + "inferenceAccelerators":{"shape":"InferenceAccelerators"}, + "proxyConfiguration":{ + "shape":"ProxyConfiguration" + } + } + }, + "TaskDefinitionFamilyStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "ALL" + ] + }, + "TaskDefinitionPlacementConstraint":{ + "type":"structure", + "members":{ + "type":{"shape":"TaskDefinitionPlacementConstraintType"}, + "expression":{"shape":"String"} + } + }, + "TaskDefinitionPlacementConstraintType":{ + "type":"string", + "enum":["memberOf"] + }, + "TaskDefinitionPlacementConstraints":{ + "type":"list", + "member":{"shape":"TaskDefinitionPlacementConstraint"} + }, + "TaskDefinitionStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE" + ] + }, + "TaskOverride":{ + "type":"structure", + "members":{ + "containerOverrides":{"shape":"ContainerOverrides"}, + "taskRoleArn":{"shape":"String"}, + "executionRoleArn":{"shape":"String"} + } + }, + "Tasks":{ + "type":"list", + "member":{"shape":"Task"} + }, + "Timestamp":{"type":"timestamp"}, + "Tmpfs":{ + "type":"structure", + "required":[ + "containerPath", + "size" + ], + "members":{ + "containerPath":{"shape":"String"}, + "size":{"shape":"Integer"}, + "mountOptions":{"shape":"StringList"} + } + }, + "TmpfsList":{ + "type":"list", + "member":{"shape":"Tmpfs"} + }, + "TransportProtocol":{ + "type":"string", + "enum":[ + "tcp", + "udp" + ] + }, + "Ulimit":{ + "type":"structure", + "required":[ + "name", + "softLimit", + "hardLimit" + ], + "members":{ + "name":{"shape":"UlimitName"}, + "softLimit":{"shape":"Integer"}, + "hardLimit":{"shape":"Integer"} + } + }, + "UlimitList":{ + "type":"list", + "member":{"shape":"Ulimit"} + }, + "UlimitName":{ + "type":"string", + "enum":[ + "core", + "cpu", + "data", + "fsize", + "locks", + "memlock", + "msgqueue", + "nice", + "nofile", + "nproc", + "rss", + "rtprio", + "rttime", + "sigpending", + "stack" + ] + }, + "UnsupportedFeatureException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UpdateContainerAgentRequest":{ + "type":"structure", + "required":["containerInstance"], + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"} + } + }, + "UpdateContainerAgentResponse":{ + "type":"structure", + "members":{ + "containerInstance":{"shape":"ContainerInstance"} + } + }, + "UpdateContainerInstancesStateRequest":{ + "type":"structure", + "required":[ + "containerInstances", + "status" + ], + "members":{ + "cluster":{"shape":"String"}, + "containerInstances":{"shape":"StringList"}, + "status":{"shape":"ContainerInstanceStatus"} + } + }, + "UpdateContainerInstancesStateResponse":{ + "type":"structure", + "members":{ + "containerInstances":{"shape":"ContainerInstances"}, + "failures":{"shape":"Failures"} + } + }, + "UpdateInProgressException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "UpdateServiceRequest":{ + "type":"structure", + "required":["service"], + "members":{ + "cluster":{"shape":"String"}, + "service":{"shape":"String"}, + "desiredCount":{"shape":"BoxedInteger"}, + "taskDefinition":{"shape":"String"}, + "deploymentConfiguration":{"shape":"DeploymentConfiguration"}, + "networkConfiguration":{"shape":"NetworkConfiguration"}, + "platformVersion":{"shape":"String"}, + "forceNewDeployment":{"shape":"Boolean"}, + "healthCheckGracePeriodSeconds":{"shape":"BoxedInteger"} + } + }, + "UpdateServiceResponse":{ + "type":"structure", + "members":{ + "service":{"shape":"Service"} + } + }, + "VersionInfo":{ + "type":"structure", + "members":{ + "agentVersion":{"shape":"String"}, + "agentHash":{"shape":"String"}, + "dockerVersion":{"shape":"String"} + } + }, + "Volume":{ + "type":"structure", + "members":{ + "name":{"shape":"String"}, + "host":{"shape":"HostVolumeProperties"}, + "dockerVolumeConfiguration":{"shape":"DockerVolumeConfiguration"}, + "efsVolumeConfiguration":{"shape":"EFSVolumeConfiguration"}, + "fsxWindowsFileServerVolumeConfiguration":{"shape":"FSxWindowsFileServerVolumeConfiguration"} + } + }, + "VolumeFrom":{ + "type":"structure", + "members":{ + "sourceContainer":{"shape":"String"}, + "readOnly":{"shape":"BoxedBoolean"} + } + }, + "VolumeFromList":{ + "type":"list", + "member":{"shape":"VolumeFrom"} + }, + "VolumeList":{ + "type":"list", + "member":{"shape":"Volume"} + } + } +} diff --git a/agent/ecs_client/model/api/docs-2.json b/agent/ecs_client/model/api/docs-2.json new file mode 100644 index 00000000000..45574139415 --- /dev/null +++ b/agent/ecs_client/model/api/docs-2.json @@ -0,0 +1,1547 @@ +{ + "version": "2.0", + "service": "

Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes it easy to run, stop, and manage Docker containers on a cluster. You can host your cluster on a serverless infrastructure that is managed by Amazon ECS by launching your services or tasks using the Fargate launch type. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage by using the EC2 launch type. For more information about launch types, see Amazon ECS Launch Types.

Amazon ECS lets you launch and stop container-based applications with simple API calls, allows you to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features.

You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates the need for you to operate your own cluster management and configuration management systems or worry about scaling your management infrastructure.

", + "operations": { + "CreateCluster": "

Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster action.

When you call the CreateCluster API operation, Amazon ECS attempts to create the service-linked role for your account so that required resources in other AWS services can be managed on your behalf. However, if the IAM user that makes the call does not have permissions to create the service-linked role, it is not created. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

", + "CreateService": "

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount, Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. During a deployment, the service scheduler uses the minimumHealthyPercent and maximumPercent parameters to determine the deployment strategy. The deployment is triggered by changing the task definition or the desired count of a service with an UpdateService operation.

The minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent of 50%, the scheduler can stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer. The default value for a replica service for minimumHealthyPercent is 50% in the console and 100% for the AWS CLI, the AWS SDKs, and the APIs. The default value for a daemon service for minimumHealthyPercent is 0% for the AWS CLI, the AWS SDKs, and the APIs and 50% for the console.

The maximumPercent parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your replica service has a desiredCount of four tasks and a maximumPercent value of 200%, the scheduler can start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for a replica service for maximumPercent is 200%. If you are using a daemon service type, the maximumPercent should remain at 100%, which is the default value.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy parameter):

    • Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

", + "DeleteAttributes": "

Deletes one or more custom attributes from an Amazon ECS resource.

", + "DeleteCluster": "

Deletes the specified cluster. You must deregister all container instances from this cluster before you may delete it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.

", + "DeleteService": "

Deletes a specified service within a cluster. You can delete a service if you have no running tasks in it and the desired task count is zero. If the service is actively maintaining tasks, you cannot delete it, and you must update the service to a desired task count of zero. For more information, see UpdateService.

When you delete a service, if there are still running tasks that require cleanup, the service status moves from ACTIVE to DRAINING, and the service is no longer visible in the console or in ListServices API operations. After the tasks have stopped, then the service status moves from DRAINING to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices API operations. However, in the future, INACTIVE services may be cleaned up and purged from Amazon ECS record keeping, and DescribeServices API operations on those services return a ServiceNotFoundException error.

", + "DeregisterContainerInstance": "

Deregisters an Amazon ECS container instance from the specified cluster. This instance is no longer available to run tasks.

If you intend to use the container instance for some other purpose after deregistration, you should stop all of the tasks running on the container instance before deregistration. That prevents any orphaned tasks from consuming resources.

Deregistering a container instance removes the instance from a cluster, but it does not terminate the EC2 instance; if you are finished using the instance, be sure to terminate it in the Amazon EC2 console to stop billing.

If you terminate a running container instance, Amazon ECS automatically deregisters the instance from your cluster (stopped container instances or instances with disconnected agents are not automatically deregistered when terminated).

", + "DeregisterTaskDefinition": "

Deregisters the specified task definition by family and revision. Upon deregistration, the task definition is marked as INACTIVE. Existing tasks and services that reference an INACTIVE task definition continue to run without disruption. Existing services that reference an INACTIVE task definition can still scale up or down by modifying the service's desired count.

You cannot use an INACTIVE task definition to run new tasks or create new services, and you cannot update an existing service to reference an INACTIVE task definition (although there may be up to a 10-minute window following deregistration where these restrictions have not yet taken effect).

At this time, INACTIVE task definitions remain discoverable in your account indefinitely; however, this behavior is subject to change in the future, so you should not rely on INACTIVE task definitions persisting beyond the lifecycle of any associated tasks and services.

", + "DescribeClusters": "

Describes one or more of your clusters.

", + "DescribeContainerInstances": "

Describes Amazon Elastic Container Service container instances. Returns metadata about registered and remaining resources on each container instance requested.

", + "DescribeServices": "

Describes the specified services running in your cluster.

", + "DescribeTaskDefinition": "

Describes a task definition. You can specify a family and revision to find information about a specific task definition, or you can simply specify the family to find the latest ACTIVE revision in that family.

You can only describe INACTIVE task definitions while an active task or service references them.

", + "DescribeTasks": "

Describes a specified task or tasks.

", + "DiscoverPollEndpoint": "

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

Returns an endpoint for the Amazon ECS agent to poll for updates.

", + "ListAttributes": "

Lists the attributes for Amazon ECS resources within a specified target type and cluster. When you specify a target type and cluster, ListAttributes returns a list of attribute objects, one for each attribute on each resource. You can filter the list of results to a single attribute name to only return results that have that name. You can also filter the results by attribute name and value, for example, to see which container instances in a cluster are running a Linux AMI (ecs.os-type=linux).

", + "ListClusters": "

Returns a list of existing clusters.

", + "ListContainerInstances": "

Returns a list of container instances in a specified cluster. You can filter the results of a ListContainerInstances operation with cluster query language statements inside the filter parameter. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", + "ListServices": "

Lists the services that are running in a specified cluster.

", + "ListTaskDefinitionFamilies": "

Returns a list of task definition families that are registered to your account (which may include task definition families that no longer have any ACTIVE task definition revisions).

You can filter out task definition families that do not contain any ACTIVE task definition revisions by setting the status parameter to ACTIVE. You can also filter the results with the familyPrefix parameter.

", + "ListTaskDefinitions": "

Returns a list of task definitions that are registered to your account. You can filter the results by family name with the familyPrefix parameter or by status with the status parameter.

", + "ListTasks": "

Returns a list of tasks for a specified cluster. You can filter the results by family name, by a particular container instance, or by the desired status of the task with the family, containerInstance, and desiredStatus parameters.

Recently stopped tasks might appear in the returned results. Currently, stopped tasks appear in the returned results for at least one hour.

", + "PutAttributes": "

Create or update an attribute on an Amazon ECS resource. If the attribute does not exist, it is created. If the attribute exists, its value is replaced with the specified value. To delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

", + "RegisterContainerInstance": "

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

Registers an EC2 instance into the specified cluster. This instance becomes available to place containers on.

", + "RegisterTaskDefinition": "

Registers a new task definition from the supplied family and containerDefinitions. Optionally, you can add data volumes to your containers with the volumes parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

You can specify an IAM role for your task with the taskRoleArn parameter. When you specify an IAM role for a task, its containers can then use the latest versions of the AWS CLI or SDKs to make API requests to the AWS services that are specified in the IAM policy associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

You can specify a Docker networking mode for the containers in your task definition with the networkMode parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

", + "RunTask": "

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

The Amazon ECS API follows an eventual consistency model, due to the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. You should keep this in mind when you carry out an API command that immediately follows a previous API command.

To manage eventual consistency, you can do the following:

  • Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.

  • Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.

", + "StartTask": "

Starts a new task from the specified task definition on the specified container instance or instances.

Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

", + "StopTask": "

Stops a running task.

When StopTask is called on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a default 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

", + "SubmitContainerStateChange": "

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

Sent to acknowledge that a container changed states.

", + "SubmitTaskStateChange": "

This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.

Sent to acknowledge that a task changed states.

", + "UpdateContainerAgent": "

Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent does not interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.

UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux with the ecs-init service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide.

", + "UpdateContainerInstancesState": "

Modifies the status of an Amazon ECS container instance.

You can change the status of a container instance to DRAINING to manually remove an instance from a cluster, for example to perform system updates, update the Docker daemon, or scale down the cluster size.

When you set a container instance to DRAINING, Amazon ECS prevents new tasks from being scheduled for placement on the container instance and replacement service tasks are started on other container instances in the cluster if the resources are available. Service tasks on the container instance that are in the PENDING state are stopped immediately.

Service tasks on the container instance that are in the RUNNING state are stopped and replaced according to the service's deployment configuration parameters, minimumHealthyPercent and maximumPercent. You can change the deployment configuration of your service using UpdateService.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during task replacement. For example, desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler can't remove existing tasks until the replacement tasks are considered healthy. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during task replacement, which enables you to define the replacement batch size. For example, if desiredCount of four tasks, a maximum of 200% starts four new tasks before stopping the four tasks to be drained (provided that the cluster resources required to do this are available). If the maximum is 100%, then replacement tasks can't start until the draining tasks have stopped.

Any PENDING or RUNNING tasks that do not belong to a service are not affected; you must wait for them to finish or stop them manually.

A container instance has completed draining when it has no more RUNNING tasks. You can verify this using ListTasks.

When you set a container instance to ACTIVE, the Amazon ECS scheduler can begin scheduling tasks on the instance again.

", + "UpdateService": "

Modifies the desired count, deployment configuration, network configuration, or task definition used in a service.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you do not need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy):

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

  • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

  • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

" + }, + "shapes": { + "AccessDeniedException": { + "base": "

You do not have authorization to perform the requested action.

", + "refs": { + } + }, + "AgentUpdateStatus": { + "base": null, + "refs": { + "ContainerInstance$agentUpdateStatus": "

The status of the most recent agent update. If an update has never been requested, this value is NULL.

" + } + }, + "AssignPublicIp": { + "base": null, + "refs": { + "AwsVpcConfiguration$assignPublicIp": "

Whether the task's elastic network interface receives a public IP address.

" + } + }, + "Attachment": { + "base": "

An object representing a container instance or task attachment.

", + "refs": { + "Attachments$member": null + } + }, + "AttachmentDetails": { + "base": null, + "refs": { + "Attachment$details": "

Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.

" + } + }, + "AttachmentStateChange": { + "base": "

An object representing a change in state for a task attachment.

", + "refs": { + "AttachmentStateChanges$member": null + } + }, + "AttachmentStateChanges": { + "base": null, + "refs": { + "SubmitTaskStateChangeRequest$attachments": "

Any attachments associated with the state change request.

" + } + }, + "Attachments": { + "base": null, + "refs": { + "ContainerInstance$attachments": "

The elastic network interfaces associated with the container instance.

", + "Task$attachments": "

The elastic network adapter associated with the task if the task uses the awsvpc network mode.

" + } + }, + "Attribute": { + "base": "

An attribute is a name-value pair associated with an Amazon ECS object. Attributes enable you to extend the Amazon ECS data model by adding custom metadata to your resources. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

", + "refs": { + "Attributes$member": null, + "RequiresAttributes$member": null + } + }, + "AttributeLimitExceededException": { + "base": "

You can apply up to 10 custom attributes per resource. You can view the attributes of a resource with ListAttributes. You can remove existing attributes on a resource with DeleteAttributes.

", + "refs": { + } + }, + "Attributes": { + "base": null, + "refs": { + "ContainerInstance$attributes": "

The attributes set for the container instance, either by the Amazon ECS container agent at instance registration or manually with the PutAttributes operation.

", + "DeleteAttributesRequest$attributes": "

The attributes to delete from your resource. You can specify up to 10 attributes per request. For custom attributes, specify the attribute name and target ID, but do not specify the value. If you specify the target ID using the short form, you must also specify the target type.

", + "DeleteAttributesResponse$attributes": "

A list of attribute objects that were successfully deleted from your resource.

", + "ListAttributesResponse$attributes": "

A list of attribute objects that meet the criteria of the request.

", + "PutAttributesRequest$attributes": "

The attributes to apply to your resource. You can specify up to 10 custom attributes per resource. You can specify up to 10 attributes in a single call.

", + "PutAttributesResponse$attributes": "

The attributes applied to your resource.

", + "RegisterContainerInstanceRequest$attributes": "

The container instance attributes that this container instance supports.

" + } + }, + "AwsVpcConfiguration": { + "base": "

An object representing the networking details for a task or service.

", + "refs": { + "NetworkConfiguration$awsvpcConfiguration": "

The VPC subnets and security groups associated with a task.

All specified subnets and security groups must be from the same VPC.

" + } + }, + "BlockedException": { + "base": "

Your AWS account has been blocked. Contact AWS Support for more information.

", + "refs": { + } + }, + "Boolean": { + "base": null, + "refs": { + "ContainerInstance$agentConnected": "

This parameter returns true if the agent is connected to Amazon ECS. Registered instances with an agent that may be unhealthy or stopped return false. Only instances connected to an agent can accept placement requests.

", + "UpdateServiceRequest$forceNewDeployment": "

Whether to force a new deployment of the service. Deployments are not forced by default. You can use this option to trigger a new deployment with no service definition changes. For example, you can update a service's tasks to use a newer Docker image with the same image/tag combination (my_image:latest) or to roll Fargate tasks onto a newer platform version.

" + } + }, + "BoxedBoolean": { + "base": null, + "refs": { + "ContainerDefinition$essential": "

If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, then its failure does not affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

All tasks must have at least one essential container. If you have an application that is composed of multiple containers, you should group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.

", + "ContainerDefinition$disableNetworking": "

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers.

", + "ContainerDefinition$privileged": "

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

", + "ContainerDefinition$readonlyRootFilesystem": "

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

", + "DeleteServiceRequest$force": "

If true, allows you to delete a service even if it has not been scaled down to zero tasks. It is only necessary to use this if the service is using the REPLICA scheduling strategy.

", + "DeregisterContainerInstanceRequest$force": "

Forces the deregistration of the container instance. If you have tasks running on the container instance when you deregister it with the force option, these tasks remain running until you terminate the instance or the tasks stop through some other means, but they are orphaned (no longer monitored or accounted for by Amazon ECS). If an orphaned task on your container instance is part of an Amazon ECS service, then the service scheduler starts another copy of that task, on a different container instance if possible.

Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer target group are deregistered. They begin connection draining according to the settings on the load balancer or target group.

", + "LinuxParameters$initProcessEnabled": "

Run an init process inside the container that forwards signals and reaps processes. This parameter maps to the --init option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

", + "MountPoint$readOnly": "

If this value is true, the container has read-only access to the volume. If this value is false, then the container can write to the volume. The default value is false.

", + "VolumeFrom$readOnly": "

If this value is true, the container has read-only access to the volume. If this value is false, then the container can write to the volume. The default value is false.

" + } + }, + "BoxedInteger": { + "base": null, + "refs": { + "Container$exitCode": "

The exit code returned from the container.

", + "ContainerDefinition$memory": "

The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If your containers are part of a task using the Fargate launch type, this field is optional and the only requirement is that the total amount of memory reserved for all containers within a task be lower than the task memory value.

For containers that are part of a task using the EC2 launch type, you must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", + "ContainerDefinition$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit; however, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

You must specify a non-zero integer for one or both of memory or memoryReservation in container definitions. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed; otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", + "ContainerOverride$cpu": "

The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name.

", + "ContainerOverride$memory": "

The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.

", + "ContainerOverride$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name.

", + "ContainerStateChange$exitCode": "

The exit code for the container, if the state change is a result of the container exiting.

", + "CreateServiceRequest$desiredCount": "

The number of instantiations of the specified task definition to place and keep running on your cluster.

", + "CreateServiceRequest$healthCheckGracePeriodSeconds": "

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 7,200 seconds during which the ECS service scheduler ignores health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

", + "Deployment$desiredCount": "

The most recent desired count of tasks that was specified for the service to deploy or maintain.

", + "DeploymentConfiguration$maximumPercent": "

The upper limit (as a percentage of the service's desiredCount) of the number of tasks that are allowed in the RUNNING or PENDING state in a service during a deployment. The maximum number of tasks during a deployment is the desiredCount multiplied by maximumPercent/100, rounded down to the nearest integer value.

", + "DeploymentConfiguration$minimumHealthyPercent": "

The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain in the RUNNING state in a service during a deployment. The minimum number of healthy tasks during a deployment is the desiredCount multiplied by minimumHealthyPercent/100, rounded up to the nearest integer value.

", + "HealthCheck$interval": "

The time period in seconds between each health check execution. You may specify between 5 and 300 seconds. The default value is 30 seconds.

", + "HealthCheck$timeout": "

The time period in seconds to wait for a health check to succeed before it is considered a failure. You may specify between 2 and 60 seconds. The default value is 5.

", + "HealthCheck$retries": "

The number of times to retry a failed health check before the container is considered unhealthy. You may specify between 1 and 10 retries. The default value is 3.

", + "HealthCheck$startPeriod": "

The optional grace period within which to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You may specify between 0 and 300 seconds. The startPeriod is disabled by default.

If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

", + "LinuxParameters$sharedMemorySize": "

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

", + "ListAttributesRequest$maxResults": "

The maximum number of cluster results returned by ListAttributes in paginated output. When this parameter is used, ListAttributes only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListAttributes request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListAttributes returns up to 100 results and a nextToken value if applicable.

", + "ListClustersRequest$maxResults": "

The maximum number of cluster results returned by ListClusters in paginated output. When this parameter is used, ListClusters only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListClusters request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListClusters returns up to 100 results and a nextToken value if applicable.

", + "ListContainerInstancesRequest$maxResults": "

The maximum number of container instance results returned by ListContainerInstances in paginated output. When this parameter is used, ListContainerInstances only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListContainerInstances request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListContainerInstances returns up to 100 results and a nextToken value if applicable.

", + "ListServicesRequest$maxResults": "

The maximum number of service results returned by ListServices in paginated output. When this parameter is used, ListServices only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListServices request with the returned nextToken value. This value can be between 1 and 10. If this parameter is not used, then ListServices returns up to 10 results and a nextToken value if applicable.

", + "ListTaskDefinitionFamiliesRequest$maxResults": "

The maximum number of task definition family results returned by ListTaskDefinitionFamilies in paginated output. When this parameter is used, ListTaskDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTaskDefinitionFamilies request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitionFamilies returns up to 100 results and a nextToken value if applicable.

", + "ListTaskDefinitionsRequest$maxResults": "

The maximum number of task definition results returned by ListTaskDefinitions in paginated output. When this parameter is used, ListTaskDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTaskDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitions returns up to 100 results and a nextToken value if applicable.

", + "ListTasksRequest$maxResults": "

The maximum number of task results returned by ListTasks in paginated output. When this parameter is used, ListTasks only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListTasks request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListTasks returns up to 100 results and a nextToken value if applicable.

", + "LoadBalancer$containerPort": "

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.

", + "NetworkBinding$containerPort": "

The port number on the container that is used with the network binding.

", + "NetworkBinding$hostPort": "

The port number on the host that is used with the network binding.

", + "PortMapping$containerPort": "

The port number on the container that is bound to the user-specified or automatically assigned host port.

If using containers in a task with the awsvpc or host network mode, exposed ports should be specified using containerPort.

If using containers in a task with the bridge network mode and you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range (for more information, see hostPort). Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.

", + "PortMapping$hostPort": "

The port number on the container instance to reserve for your container.

If using containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort.

If using containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range; if this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. You should not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

The default ephemeral port range from 49153 through 65535 is always used for Docker versions before 1.6.0.

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678 and 51679. Any host port that was previously specified in a running task is also reserved while the task is running (after a task stops, the host port is released). The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output, and a container instance may have up to 100 reserved ports at a time, including the default reserved ports (automatically assigned ports do not count toward the 100 reserved ports limit).

", + "RunTaskRequest$count": "

The number of instantiations of the specified task to place on your cluster. You can specify up to 10 tasks per call.

", + "Service$desiredCount": "

The desired number of instantiations of the task definition to keep running on the service. This value is specified when the service is created with CreateService, and it can be modified with UpdateService.

", + "Service$healthCheckGracePeriodSeconds": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started.

", + "ServiceRegistry$port": "

The port value used if your service discovery service specified an SRV record. This field is required if both the awsvpc network mode and SRV records are used.

", + "ServiceRegistry$containerPort": "

The port value, already specified in the task definition, to be used for your service discovery service. If the task definition your service task specifies uses the bridge or host network mode, you must specify a containerName and containerPort combination from the task definition. If the task definition your service task specifies uses the awsvpc network mode and a type SRV DNS record is used, you must specify either a containerName and containerPort combination or a port value, but not both.

", + "SubmitContainerStateChangeRequest$exitCode": "

The exit code returned for the state change request.

", + "UpdateServiceRequest$desiredCount": "

The number of instantiations of the task to place and keep running in your service.

", + "UpdateServiceRequest$healthCheckGracePeriodSeconds": "

The period of time, in seconds, that the Amazon ECS service scheduler should ignore unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 1,800 seconds during which the ECS service scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.

" + } + }, + "ClientException": { + "base": "

These errors are usually caused by a client action, such as using an action or resource on behalf of a user that doesn't have permissions to use the action or resource, or specifying an identifier that is not valid.

", + "refs": { + } + }, + "Cluster": { + "base": "

A regional grouping of one or more container instances on which you can run task requests. Each account receives a default cluster the first time you use the Amazon ECS service, but you may also create other clusters. Clusters may contain more than one instance type simultaneously.

", + "refs": { + "Clusters$member": null, + "CreateClusterResponse$cluster": "

The full description of your new cluster.

", + "DeleteClusterResponse$cluster": "

The full description of the deleted cluster.

" + } + }, + "ClusterContainsContainerInstancesException": { + "base": "

You cannot delete a cluster that has registered container instances. You must first deregister the container instances before you can delete the cluster. For more information, see DeregisterContainerInstance.

", + "refs": { + } + }, + "ClusterContainsServicesException": { + "base": "

You cannot delete a cluster that contains services. You must first update the service to reduce its desired task count to 0 and then delete the service. For more information, see UpdateService and DeleteService.

", + "refs": { + } + }, + "ClusterContainsTasksException": { + "base": "

You cannot delete a cluster that has active tasks.

", + "refs": { + } + }, + "ClusterField": { + "base": null, + "refs": { + "ClusterFieldList$member": null + } + }, + "ClusterFieldList": { + "base": null, + "refs": { + "DescribeClustersRequest$include": "

Additional information about your clusters to be separated by launch type, including:

  • runningEC2TasksCount

  • runningFargateTasksCount

  • pendingEC2TasksCount

  • pendingFargateTasksCount

  • activeEC2ServiceCount

  • activeFargateServiceCount

  • drainingEC2ServiceCount

  • drainingFargateServiceCount

" + } + }, + "ClusterNotFoundException": { + "base": "

The specified cluster could not be found. You can view your available clusters with ListClusters. Amazon ECS clusters are region-specific.

", + "refs": { + } + }, + "Clusters": { + "base": null, + "refs": { + "DescribeClustersResponse$clusters": "

The list of clusters.

" + } + }, + "Compatibility": { + "base": null, + "refs": { + "CompatibilityList$member": null + } + }, + "CompatibilityList": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$requiresCompatibilities": "

The launch type required by the task. If no value is specified, it defaults to EC2.

", + "TaskDefinition$compatibilities": "

The launch type to use with your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

", + "TaskDefinition$requiresCompatibilities": "

The launch type the task is using.

" + } + }, + "Connectivity": { + "base": null, + "refs": { + "Task$connectivity": "

The connectivity status of a task.

" + } + }, + "Container": { + "base": "

A Docker container that is part of a task.

", + "refs": { + "Containers$member": null + } + }, + "ContainerCondition": { + "base": null, + "refs": { + "ContainerDependency$condition": null + } + }, + "ContainerDefinition": { + "base": "

Container definitions are used in task definitions to describe the different containers that are launched as part of a task.

", + "refs": { + "ContainerDefinitions$member": null + } + }, + "ContainerDefinitions": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$containerDefinitions": "

A list of container definitions in JSON format that describe the different containers that make up your task.

", + "TaskDefinition$containerDefinitions": "

A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

" + } + }, + "ContainerDependencies": { + "base": null, + "refs": { + "ContainerDefinition$dependsOn": null + } + }, + "ContainerDependency": { + "base": null, + "refs": { + "ContainerDependencies$member": null + } + }, + "ContainerInstance": { + "base": "

An EC2 instance that is running the Amazon ECS agent and has been registered with a cluster.

", + "refs": { + "ContainerInstances$member": null, + "DeregisterContainerInstanceResponse$containerInstance": "

The container instance that was deregistered.

", + "RegisterContainerInstanceResponse$containerInstance": "

The container instance that was registered.

", + "UpdateContainerAgentResponse$containerInstance": "

The container instance for which the container agent was updated.

" + } + }, + "ContainerInstanceStatus": { + "base": null, + "refs": { + "ListContainerInstancesRequest$status": "

Filters the container instances by status. For example, if you specify the DRAINING status, the results include only container instances that have been set to DRAINING using UpdateContainerInstancesState. If you do not specify this parameter, the default is to include container instances set to ACTIVE and DRAINING.

", + "UpdateContainerInstancesStateRequest$status": "

The container instance state with which to update the container instance.

" + } + }, + "ContainerInstances": { + "base": null, + "refs": { + "DescribeContainerInstancesResponse$containerInstances": "

The list of container instances.

", + "UpdateContainerInstancesStateResponse$containerInstances": "

The list of container instances.

" + } + }, + "ContainerOverride": { + "base": "

The overrides that should be sent to a container.

", + "refs": { + "ContainerOverrides$member": null + } + }, + "ContainerOverrides": { + "base": null, + "refs": { + "TaskOverride$containerOverrides": "

One or more container overrides sent to a task.

" + } + }, + "ContainerStateChange": { + "base": "

An object representing a change in state for a container.

", + "refs": { + "ContainerStateChanges$member": null + } + }, + "ContainerStateChanges": { + "base": null, + "refs": { + "SubmitTaskStateChangeRequest$containers": "

Any containers associated with the state change request.

" + } + }, + "Containers": { + "base": null, + "refs": { + "Task$containers": "

The containers associated with the task.

" + } + }, + "CreateClusterRequest": { + "base": null, + "refs": { + } + }, + "CreateClusterResponse": { + "base": null, + "refs": { + } + }, + "CreateServiceRequest": { + "base": null, + "refs": { + } + }, + "CreateServiceResponse": { + "base": null, + "refs": { + } + }, + "DeleteAttributesRequest": { + "base": null, + "refs": { + } + }, + "DeleteAttributesResponse": { + "base": null, + "refs": { + } + }, + "DeleteClusterRequest": { + "base": null, + "refs": { + } + }, + "DeleteClusterResponse": { + "base": null, + "refs": { + } + }, + "DeleteServiceRequest": { + "base": null, + "refs": { + } + }, + "DeleteServiceResponse": { + "base": null, + "refs": { + } + }, + "Deployment": { + "base": "

The details of an Amazon ECS service deployment.

", + "refs": { + "Deployments$member": null + } + }, + "DeploymentConfiguration": { + "base": "

Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

", + "refs": { + "CreateServiceRequest$deploymentConfiguration": "

Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

", + "Service$deploymentConfiguration": "

Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

", + "UpdateServiceRequest$deploymentConfiguration": "

Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.

" + } + }, + "Deployments": { + "base": null, + "refs": { + "Service$deployments": "

The current state of deployments for the service.

" + } + }, + "DeregisterContainerInstanceRequest": { + "base": null, + "refs": { + } + }, + "DeregisterContainerInstanceResponse": { + "base": null, + "refs": { + } + }, + "DeregisterTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "DeregisterTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "DescribeClustersRequest": { + "base": null, + "refs": { + } + }, + "DescribeClustersResponse": { + "base": null, + "refs": { + } + }, + "DescribeContainerInstancesRequest": { + "base": null, + "refs": { + } + }, + "DescribeContainerInstancesResponse": { + "base": null, + "refs": { + } + }, + "DescribeServicesRequest": { + "base": null, + "refs": { + } + }, + "DescribeServicesResponse": { + "base": null, + "refs": { + } + }, + "DescribeTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "DescribeTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "DescribeTasksRequest": { + "base": null, + "refs": { + } + }, + "DescribeTasksResponse": { + "base": null, + "refs": { + } + }, + "DesiredStatus": { + "base": null, + "refs": { + "ListTasksRequest$desiredStatus": "

The task desired status with which to filter the ListTasks results. Specifying a desiredStatus of STOPPED limits the results to tasks that Amazon ECS has set the desired status to STOPPED, which can be useful for debugging tasks that are not starting properly or have died or finished. The default status filter is RUNNING, which shows tasks that Amazon ECS has set the desired status to RUNNING.

Although you can filter results based on a desired status of PENDING, this does not return any results because Amazon ECS never sets the desired status of a task to that value (only a task's lastStatus may have a value of PENDING).

" + } + }, + "Device": { + "base": "

An object representing a container instance host device.

", + "refs": { + "DevicesList$member": null + } + }, + "DeviceCgroupPermission": { + "base": null, + "refs": { + "DeviceCgroupPermissions$member": null + } + }, + "DeviceCgroupPermissions": { + "base": null, + "refs": { + "Device$permissions": "

The explicit permissions to provide to the container for the device. By default, the container has permissions for read, write, and mknod for the device.

" + } + }, + "DevicesList": { + "base": null, + "refs": { + "LinuxParameters$devices": "

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" + } + }, + "DiscoverPollEndpointRequest": { + "base": null, + "refs": { + } + }, + "DiscoverPollEndpointResponse": { + "base": null, + "refs": { + } + }, + "DockerLabelsMap": { + "base": null, + "refs": { + "ContainerDefinition$dockerLabels": "

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + } + }, + "Double": { + "base": null, + "refs": { + "Resource$doubleValue": "

When the doubleValue type is set, the value of the resource must be a double precision floating-point type.

" + } + }, + "EnvironmentVariables": { + "base": null, + "refs": { + "ContainerDefinition$environment": "

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

", + "ContainerOverride$environment": "

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. You must also specify a container name.

" + } + }, + "Failure": { + "base": "

A failed resource.

", + "refs": { + "Failures$member": null + } + }, + "Failures": { + "base": null, + "refs": { + "DescribeClustersResponse$failures": "

Any failures associated with the call.

", + "DescribeContainerInstancesResponse$failures": "

Any failures associated with the call.

", + "DescribeServicesResponse$failures": "

Any failures associated with the call.

", + "DescribeTasksResponse$failures": "

Any failures associated with the call.

", + "RunTaskResponse$failures": "

Any failures associated with the call.

", + "StartTaskResponse$failures": "

Any failures associated with the call.

", + "UpdateContainerInstancesStateResponse$failures": "

Any failures associated with the call.

" + } + }, + "HealthCheck": { + "base": "

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

", + "refs": { + "ContainerDefinition$healthCheck": "

The health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" + } + }, + "HealthStatus": { + "base": null, + "refs": { + "Container$healthStatus": "

The health status of the container. If health checks are not configured for this container in its task definition, then it reports health status as UNKNOWN.

", + "Task$healthStatus": "

The health status for the task, which is determined by the health of the essential containers in the task. If all essential containers in the task are reporting as HEALTHY, then the task status also reports as HEALTHY. If any essential containers in the task are reporting as UNHEALTHY or UNKNOWN, then the task status also reports as UNHEALTHY or UNKNOWN, accordingly.

The Amazon ECS container agent does not monitor or report on Docker health checks that are embedded in a container image (such as those specified in a parent image or from the image's Dockerfile) and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

" + } + }, + "HostEntry": { + "base": "

Hostnames and IP address entries that are added to the /etc/hosts file of a container via the extraHosts parameter of its ContainerDefinition.

", + "refs": { + "HostEntryList$member": null + } + }, + "HostEntryList": { + "base": null, + "refs": { + "ContainerDefinition$extraHosts": "

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. If using the Fargate launch type, this may be used to list non-Fargate hosts to which the container can talk. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers.

" + } + }, + "HostVolumeProperties": { + "base": "

Details on a container instance host volume.

", + "refs": { + "Volume$host": "

The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed to persist after the containers associated with it stop running.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives. For example, you can mount C:\\my\\path:C:\\my\\path and D:\\:D:\\, but not D:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" + } + }, + "Integer": { + "base": null, + "refs": { + "Cluster$registeredContainerInstancesCount": "

The number of container instances registered into the cluster.

", + "Cluster$runningTasksCount": "

The number of tasks in the cluster that are in the RUNNING state.

", + "Cluster$pendingTasksCount": "

The number of tasks in the cluster that are in the PENDING state.

", + "Cluster$activeServicesCount": "

The number of services that are running on the cluster in an ACTIVE state. You can view these services with ListServices.

", + "ContainerDefinition$cpu": "

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2; however, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to 2 CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

", + "ContainerInstance$runningTasksCount": "

The number of tasks on the container instance that are in the RUNNING status.

", + "ContainerInstance$pendingTasksCount": "

The number of tasks on the container instance that are in the PENDING status.

", + "Deployment$pendingCount": "

The number of tasks in the deployment that are in the PENDING status.

", + "Deployment$runningCount": "

The number of tasks in the deployment that are in the RUNNING status.

", + "Resource$integerValue": "

When the integerValue type is set, the value of the resource must be an integer.

", + "Service$runningCount": "

The number of tasks in the cluster that are in the RUNNING state.

", + "Service$pendingCount": "

The number of tasks in the cluster that are in the PENDING state.

", + "TaskDefinition$revision": "

The revision of the task in a particular family. The revision is a version number of a task definition in a family. When you register a task definition for the first time, the revision is 1; each time you register a new revision of a task definition in the same family, the revision value always increases by one (even if you have deregistered previous revisions in this family).

", + "Tmpfs$size": "

The size (in MiB) of the tmpfs volume.

", + "Ulimit$softLimit": "

The soft limit for the ulimit type.

", + "Ulimit$hardLimit": "

The hard limit for the ulimit type.

" + } + }, + "InvalidParameterException": { + "base": "

The specified parameter is invalid. Review the available parameters for the API request.

", + "refs": { + } + }, + "IpcMode": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$ipcMode": null, + "TaskDefinition$ipcMode": null + } + }, + "KernelCapabilities": { + "base": "

The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. For more information on the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information on these Linux capabilities, see the capabilities(7) Linux manual page.

", + "refs": { + "LinuxParameters$capabilities": "

The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker.

If you are using tasks that use the Fargate launch type, capabilities is supported but the add parameter is not supported.

" + } + }, + "KeyValuePair": { + "base": "

A key and value pair object.

", + "refs": { + "AttachmentDetails$member": null, + "EnvironmentVariables$member": null + } + }, + "LaunchType": { + "base": null, + "refs": { + "CreateServiceRequest$launchType": "

The launch type on which to run your service.

", + "Deployment$launchType": "

The launch type on which your service is running.

", + "ListServicesRequest$launchType": "

The launch type for the services to list.

", + "ListTasksRequest$launchType": "

The launch type for services to list.

", + "RunTaskRequest$launchType": "

The launch type on which to run your task.

", + "Service$launchType": "

The launch type on which your service is running.

", + "Task$launchType": "

The launch type on which your task is running.

" + } + }, + "LinuxParameters": { + "base": "

Linux-specific options that are applied to the container, such as Linux KernelCapabilities.

", + "refs": { + "ContainerDefinition$linuxParameters": "

Linux-specific modifications that are applied to the container, such as Linux KernelCapabilities.

This parameter is not supported for Windows containers.

" + } + }, + "ListAttributesRequest": { + "base": null, + "refs": { + } + }, + "ListAttributesResponse": { + "base": null, + "refs": { + } + }, + "ListClustersRequest": { + "base": null, + "refs": { + } + }, + "ListClustersResponse": { + "base": null, + "refs": { + } + }, + "ListContainerInstancesRequest": { + "base": null, + "refs": { + } + }, + "ListContainerInstancesResponse": { + "base": null, + "refs": { + } + }, + "ListServicesRequest": { + "base": null, + "refs": { + } + }, + "ListServicesResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionFamiliesRequest": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionFamiliesResponse": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionsRequest": { + "base": null, + "refs": { + } + }, + "ListTaskDefinitionsResponse": { + "base": null, + "refs": { + } + }, + "ListTasksRequest": { + "base": null, + "refs": { + } + }, + "ListTasksResponse": { + "base": null, + "refs": { + } + }, + "LoadBalancer": { + "base": "

Details on a load balancer that is used with a service.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers; Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

", + "refs": { + "LoadBalancers$member": null + } + }, + "LoadBalancers": { + "base": null, + "refs": { + "CreateServiceRequest$loadBalancers": "

A load balancer object representing the load balancer to use with your service. Currently, you are limited to one load balancer or target group per service. After you create a service, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers; Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

", + "Service$loadBalancers": "

A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers; Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + } + }, + "LogConfiguration": { + "base": "

Log configuration options to send to a custom log driver for the container.

", + "refs": { + "ContainerDefinition$logConfiguration": "

The log configuration specification for the container.

If using the Fargate launch type, the only supported value is awslogs.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + } + }, + "LogConfigurationOptionsMap": { + "base": null, + "refs": { + "LogConfiguration$options": "

The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + } + }, + "LogDriver": { + "base": null, + "refs": { + "LogConfiguration$logDriver": "

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default. If using the Fargate launch type, the only supported value is awslogs. For more information about using the awslogs driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + } + }, + "Long": { + "base": null, + "refs": { + "ContainerInstance$version": "

The version counter for the container instance. Every time a container instance experiences a change that triggers a CloudWatch event, the version counter is incremented. If you are replicating your Amazon ECS container instance state with CloudWatch Events, you can compare the version of a container instance reported by the Amazon ECS APIs with the version reported in CloudWatch Events for the container instance (inside the detail object) to verify that the version in your event stream is current.

", + "Resource$longValue": "

When the longValue type is set, the value of the resource must be an extended precision floating-point type.

", + "Task$version": "

The version counter for the task. Every time a task experiences a change that triggers a CloudWatch event, the version counter is incremented. If you are replicating your Amazon ECS task state with CloudWatch Events, you can compare the version of a task reported by the Amazon ECS APIs with the version reported in CloudWatch Events for the task (inside the detail object) to verify that the version in your event stream is current.

" + } + }, + "MissingVersionException": { + "base": "

Amazon ECS is unable to determine the current version of the Amazon ECS container agent on the container instance and does not have enough information to proceed with an update. This could be because the agent running on the container instance is an older or custom version that does not use our version information.

", + "refs": { + } + }, + "MountPoint": { + "base": "

Details on a volume mount point that is used in a container definition.

", + "refs": { + "MountPointList$member": null + } + }, + "MountPointList": { + "base": null, + "refs": { + "ContainerDefinition$mountPoints": "

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" + } + }, + "NetworkBinding": { + "base": "

Details on the network bindings between a container and its host container instance. After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

", + "refs": { + "NetworkBindings$member": null + } + }, + "NetworkBindings": { + "base": null, + "refs": { + "Container$networkBindings": "

The network bindings associated with the container.

", + "ContainerStateChange$networkBindings": "

Any network bindings associated with the container.

", + "SubmitContainerStateChangeRequest$networkBindings": "

The network bindings of the container.

" + } + }, + "NetworkConfiguration": { + "base": "

An object representing the network configuration for a task or service.

", + "refs": { + "CreateServiceRequest$networkConfiguration": "

The network configuration for the service. This parameter is required for task definitions that use the awsvpc network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

", + "Deployment$networkConfiguration": "

The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode.

", + "RunTaskRequest$networkConfiguration": "

The network configuration for the task. This parameter is required for task definitions that use the awsvpc network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

", + "Service$networkConfiguration": "

The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode.

", + "StartTaskRequest$networkConfiguration": "

The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode.

", + "UpdateServiceRequest$networkConfiguration": "

The network configuration for the service. This parameter is required for task definitions that use the awsvpc network mode to receive their own elastic network interface, and it is not supported for other network modes. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

Updating a service to add a subnet to a list of existing subnets does not trigger a service deployment. For example, if your network configuration change is to keep the existing subnets and simply add another subnet to the network configuration, this does not trigger a new service deployment.

" + } + }, + "NetworkInterface": { + "base": "

An object representing the elastic network interface for tasks that use the awsvpc network mode.

", + "refs": { + "NetworkInterfaces$member": null + } + }, + "NetworkInterfaces": { + "base": null, + "refs": { + "Container$networkInterfaces": "

The network interfaces associated with the container.

" + } + }, + "NetworkMode": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$networkMode": "

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. The default Docker network mode is bridge. If using the Fargate launch type, the awsvpc network mode is required. If using the EC2 launch type, any network mode can be used. If the network mode is set to none, you can't specify port mappings in your container definitions, and the task's containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

If the network mode is awsvpc, the task is allocated an Elastic Network Interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

If the network mode is host, you can't run multiple instantiations of the same task on a single container instance when port mappings are used.

Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode.

For more information, see Network settings in the Docker run reference.

", + "TaskDefinition$networkMode": "

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. The default Docker network mode is bridge. If using the Fargate launch type, the awsvpc network mode is required. If using the EC2 launch type, any network mode can be used. If the network mode is set to none, you can't specify port mappings in your container definitions, and the task's containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

If the network mode is awsvpc, the task is allocated an Elastic Network Interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

Currently, only the Amazon ECS-optimized AMI, other Amazon Linux variants with the ecs-init package, or AWS Fargate infrastructure support the awsvpc network mode.

If the network mode is host, you can't run multiple instantiations of the same task on a single container instance when port mappings are used.

Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode. If you use the console to register a task definition with Windows containers, you must choose the <default> network mode object.

For more information, see Network settings in the Docker run reference.

" + } + }, + "NoUpdateAvailableException": { + "base": "

There is no update available for this Amazon ECS container agent. This could be because the agent is already running the latest version, or it is so old that there is no update path to the current version.

", + "refs": { + } + }, + "PidMode": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$pidMode": null, + "TaskDefinition$pidMode": null + } + }, + "PlacementConstraint": { + "base": "

An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

", + "refs": { + "PlacementConstraints$member": null + } + }, + "PlacementConstraintType": { + "base": null, + "refs": { + "PlacementConstraint$type": "

The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates. The value distinctInstance is not supported in task definitions.

" + } + }, + "PlacementConstraints": { + "base": null, + "refs": { + "CreateServiceRequest$placementConstraints": "

An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10 constraints per task (this limit includes constraints in the task definition and those specified at run time).

", + "RunTaskRequest$placementConstraints": "

An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at run time).

", + "Service$placementConstraints": "

The placement constraints for the tasks in the service.

" + } + }, + "PlacementStrategies": { + "base": null, + "refs": { + "CreateServiceRequest$placementStrategy": "

The placement strategy objects to use for tasks in your service. You can specify a maximum of five strategy rules per service.

", + "RunTaskRequest$placementStrategy": "

The placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task.

", + "Service$placementStrategy": "

The placement strategy that determines how tasks for the service are placed.

" + } + }, + "PlacementStrategy": { + "base": "

The task placement strategy for a task or service. For more information, see Task Placement Strategies in the Amazon Elastic Container Service Developer Guide.

", + "refs": { + "PlacementStrategies$member": null + } + }, + "PlacementStrategyType": { + "base": null, + "refs": { + "PlacementStrategy$type": "

The type of placement strategy. The random placement strategy randomly places tasks on available candidates. The spread placement strategy spreads placement across available candidates evenly based on the field parameter. The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified with the field parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory (but still enough to run the task).

" + } + }, + "PlatformTaskDefinitionIncompatibilityException": { + "base": "

The specified platform version does not satisfy the task definition's required capabilities.

", + "refs": { + } + }, + "PlatformUnknownException": { + "base": "

The specified platform version does not exist.

", + "refs": { + } + }, + "PortMapping": { + "base": "

Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.

If using containers in a task with the awsvpc or host network mode, exposed ports should be specified using containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the networkBindings section of DescribeTasks API responses.

", + "refs": { + "PortMappingList$member": null + } + }, + "PortMappingList": { + "base": null, + "refs": { + "ContainerDefinition$portMappings": "

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" + } + }, + "PutAttributesRequest": { + "base": null, + "refs": { + } + }, + "PutAttributesResponse": { + "base": null, + "refs": { + } + }, + "RegisterContainerInstanceRequest": { + "base": null, + "refs": { + } + }, + "RegisterContainerInstanceResponse": { + "base": null, + "refs": { + } + }, + "RegisterTaskDefinitionRequest": { + "base": null, + "refs": { + } + }, + "RegisterTaskDefinitionResponse": { + "base": null, + "refs": { + } + }, + "RepositoryCredentials": { + "base": "

The repository credentials for private registry authentication.

", + "refs": { + "ContainerDefinition$repositoryCredentials": "

The private repository authentication credentials to use.

" + } + }, + "RequiresAttributes": { + "base": null, + "refs": { + "TaskDefinition$requiresAttributes": "

The container instance attributes required by your task. This field is not valid if using the Fargate launch type for your task.

" + } + }, + "Resource": { + "base": "

Describes the resources available for a container instance.

", + "refs": { + "Resources$member": null + } + }, + "Resources": { + "base": null, + "refs": { + "ContainerInstance$remainingResources": "

For CPU and memory resource types, this parameter describes the remaining CPU and memory that has not already been allocated to tasks and is therefore available for new tasks. For port resource types, this parameter describes the ports that were reserved by the Amazon ECS container agent (at instance registration time) and any task containers that have reserved port mappings on the host (with the host or bridge network mode). Any port that is not specified here is available for new tasks.

", + "ContainerInstance$registeredResources": "

For CPU and memory resource types, this parameter describes the amount of each resource that was available on the container instance when the container agent registered it with Amazon ECS; this value represents the total amount of CPU and memory that can be allocated on this container instance to tasks. For port resource types, this parameter describes the ports that were reserved by the Amazon ECS container agent when it registered the container instance with Amazon ECS.

", + "RegisterContainerInstanceRequest$totalResources": "

The resources available on the instance.

" + } + }, + "RunTaskRequest": { + "base": null, + "refs": { + } + }, + "RunTaskResponse": { + "base": null, + "refs": { + } + }, + "SchedulingStrategy": { + "base": null, + "refs": { + "CreateServiceRequest$schedulingStrategy": "

The scheduling strategy to use for the service. For more information, see Services.

There are two service scheduler strategies available:

  • REPLICA-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions.

  • DAEMON-The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When using this strategy, there is no need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies.

    Fargate tasks do not support the DAEMON scheduling strategy.

", + "ListServicesRequest$schedulingStrategy": "

The scheduling strategy for services to list.

", + "Service$schedulingStrategy": "

The scheduling strategy to use for the service. For more information, see Services.

There are two service scheduler strategies available:

  • REPLICA-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions.

  • DAEMON-The daemon scheduling strategy deploys exactly one task on each container instance in your cluster. When you are using this strategy, do not specify a desired number of tasks or any task placement strategies.

    Fargate tasks do not support the DAEMON scheduling strategy.

" + } + }, + "Scope": { + "base": null, + "refs": { + "DockerVolumeConfiguration$scope": "

The scope for the Docker volume that determines its lifecycle. Docker volumes that are scoped to a task are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as shared persist after the task stops.

" + } + }, + "Secret": { + "base": null, + "refs": { + "SecretList$member": null + } + }, + "SecretList": { + "base": null, + "refs": { + "ContainerDefinition$secrets": null, + "LogConfiguration$secretOptions": null + } + }, + "ServerException": { + "base": "

These errors are usually caused by a server issue.

", + "refs": { + } + }, + "Service": { + "base": "

Details on a service within a cluster

", + "refs": { + "CreateServiceResponse$service": "

The full description of your service following the create call.

", + "DeleteServiceResponse$service": "

The full description of the deleted service.

", + "Services$member": null, + "UpdateServiceResponse$service": "

The full description of your service following the update call.

" + } + }, + "ServiceEvent": { + "base": "

Details on an event associated with a service.

", + "refs": { + "ServiceEvents$member": null + } + }, + "ServiceEvents": { + "base": null, + "refs": { + "Service$events": "

The event stream for your service. A maximum of 100 of the latest events are displayed.

" + } + }, + "ServiceNotActiveException": { + "base": "

The specified service is not active. You can't update a service that is inactive. If you have previously deleted a service, you can re-create it with CreateService.

", + "refs": { + } + }, + "ServiceNotFoundException": { + "base": "

The specified service could not be found. You can view your available services with ListServices. Amazon ECS services are cluster-specific and region-specific.

", + "refs": { + } + }, + "ServiceRegistries": { + "base": null, + "refs": { + "CreateServiceRequest$serviceRegistries": "

The details of the service discovery registries to assign to this service. For more information, see Service Discovery.

Service discovery is supported for Fargate tasks if using platform version v1.1.0 or later. For more information, see AWS Fargate Platform Versions.

", + "Service$serviceRegistries": "

" + } + }, + "ServiceRegistry": { + "base": "

Details of the service registry.

", + "refs": { + "ServiceRegistries$member": null + } + }, + "Services": { + "base": null, + "refs": { + "DescribeServicesResponse$services": "

The list of services described.

" + } + }, + "SortOrder": { + "base": null, + "refs": { + "ListTaskDefinitionsRequest$sort": "

The order in which to sort the results. Valid values are ASC and DESC. By default (ASC), task definitions are listed lexicographically by family name and in ascending numerical order by revision so that the newest task definitions in a family are listed last. Setting this parameter to DESC reverses the sort order on family name and revision so that the newest task definitions in a family are listed first.

" + } + }, + "StartTaskRequest": { + "base": null, + "refs": { + } + }, + "StartTaskResponse": { + "base": null, + "refs": { + } + }, + "StopTaskRequest": { + "base": null, + "refs": { + } + }, + "StopTaskResponse": { + "base": null, + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "Attachment$id": "

The unique identifier for the attachment.

", + "Attachment$type": "

The type of the attachment, such as ElasticNetworkInterface.

", + "Attachment$status": "

The status of the attachment. Valid values are PRECREATED, CREATED, ATTACHING, ATTACHED, DETACHING, DETACHED, and DELETED.

", + "AttachmentStateChange$attachmentArn": "

The Amazon Resource Name (ARN) of the attachment.

", + "AttachmentStateChange$status": "

The status of the attachment.

", + "Attribute$name": "

The name of the attribute. Up to 128 letters (uppercase and lowercase), numbers, hyphens, underscores, and periods are allowed.

", + "Attribute$value": "

The value of the attribute. Up to 128 letters (uppercase and lowercase), numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons, and spaces are allowed.

", + "Attribute$targetId": "

The ID of the target. You can specify the short form ID for a resource or the full Amazon Resource Name (ARN).

", + "ClientException$message": null, + "Cluster$clusterArn": "

The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs namespace, followed by the Region of the cluster, the AWS account ID of the cluster owner, the cluster namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test ..

", + "Cluster$clusterName": "

A user-generated string that you use to identify your cluster.

", + "Cluster$status": "

The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE indicates that you can register container instances with the cluster and the associated instances can accept tasks.

", + "Container$containerArn": "

The Amazon Resource Name (ARN) of the container.

", + "Container$taskArn": "

The ARN of the task.

", + "Container$name": "

The name of the container.

", + "Container$lastStatus": "

The last known status of the container.

", + "Container$reason": "

A short (255 max characters) human-readable string to provide additional details about a running or stopped container.

", + "ContainerDefinition$name": "

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

", + "ContainerDefinition$image": "

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

  • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image are not propagated to already running tasks.

  • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

", + "ContainerDefinition$hostname": "

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if using the awsvpc networkMode.

", + "ContainerDefinition$user": "

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

This parameter is not supported for Windows containers.

", + "ContainerDefinition$workingDirectory": "

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

", + "ContainerInstance$containerInstanceArn": "

The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

", + "ContainerInstance$ec2InstanceId": "

The EC2 instance ID of the container instance.

", + "ContainerInstance$status": "

The status of the container instance. The valid values are ACTIVE, INACTIVE, or DRAINING. ACTIVE indicates that the container instance can accept tasks. DRAINING indicates that new tasks are not placed on the container instance and any service tasks running on the container instance are removed if possible. For more information, see Container Instance Draining in the Amazon Elastic Container Service Developer Guide.

", + "ContainerOverride$name": "

The name of the container that receives the override. This parameter is required if any override is specified.

", + "ContainerStateChange$containerName": "

The name of the container.

", + "ContainerStateChange$reason": "

The reason for the state change.

", + "ContainerStateChange$status": "

The status of the container.

", + "CreateClusterRequest$clusterName": "

The name of your cluster. If you do not specify a name for your cluster, you create a cluster named default. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", + "CreateServiceRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not specify a cluster, the default cluster is assumed.

", + "CreateServiceRequest$serviceName": "

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a Region or across multiple Regions.

", + "CreateServiceRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.

", + "CreateServiceRequest$clientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.

", + "CreateServiceRequest$platformVersion": "

The platform version on which to run your service. If one is not specified, the latest version is used by default.

", + "CreateServiceRequest$role": "

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition does not use the awsvpc network mode. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. The service-linked role is required if your task definition uses the awsvpc network mode, in which case you should not specify a role here. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide.

", + "DeleteAttributesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete attributes. If you do not specify a cluster, the default cluster is assumed.

", + "DeleteClusterRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster to delete.

", + "DeleteServiceRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to delete. If you do not specify a cluster, the default cluster is assumed.

", + "DeleteServiceRequest$service": "

The name of the service to delete.

", + "Deployment$id": "

The ID of the deployment.

", + "Deployment$status": "

The status of the deployment. Valid values are PRIMARY (for the most recent deployment), ACTIVE (for previous deployments that still have tasks running, but are being replaced with the PRIMARY deployment), and INACTIVE (for deployments that have been completely replaced).

", + "Deployment$taskDefinition": "

The most recent task definition that was specified for the service to use.

", + "Deployment$platformVersion": "

The platform version on which your service is running.

", + "DeregisterContainerInstanceRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to deregister. If you do not specify a cluster, the default cluster is assumed.

", + "DeregisterContainerInstanceRequest$containerInstance": "

The container instance ID or full ARN of the container instance to deregister. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

", + "DeregisterTaskDefinitionRequest$taskDefinition": "

The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition to deregister. You must specify a revision.

", + "DescribeContainerInstancesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to describe. If you do not specify a cluster, the default cluster is assumed.

", + "DescribeServicesRequest$cluster": "

The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe. If you do not specify a cluster, the default cluster is assumed.

", + "DescribeTaskDefinitionRequest$taskDefinition": "

The family for the latest ACTIVE revision, family and revision (family:revision) for a specific revision in the family, or full Amazon Resource Name (ARN) of the task definition to describe.

", + "DescribeTasksRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to describe. If you do not specify a cluster, the default cluster is assumed.

", + "Device$hostPath": "

The path for the device on the host container instance.

", + "Device$containerPath": "

The path inside the container at which to expose the host device.

", + "DiscoverPollEndpointRequest$containerInstance": "

The container instance ID or full ARN of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID .

", + "DiscoverPollEndpointRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that the container instance belongs to.

", + "DiscoverPollEndpointResponse$endpoint": "

The endpoint for the Amazon ECS agent to poll.

", + "DiscoverPollEndpointResponse$telemetryEndpoint": "

The telemetry endpoint for the Amazon ECS agent.

", + "DockerLabelsMap$key": null, + "DockerLabelsMap$value": null, + "Failure$arn": "

The Amazon Resource Name (ARN) of the failed resource.

", + "Failure$reason": "

The reason for the failure.

", + "HostEntry$hostname": "

The hostname to use in the /etc/hosts entry.

", + "HostEntry$ipAddress": "

The IP address to use in the /etc/hosts entry.

", + "HostVolumeProperties$sourcePath": "

The path on the host container instance that is presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If the host parameter contains a sourcePath file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

If you are using the Fargate launch type, the sourcePath parameter is not supported.

", + "KeyValuePair$name": "

The name of the key value pair. For environment variables, this is the name of the environment variable.

", + "KeyValuePair$value": "

The value of the key value pair. For environment variables, this is the value of the environment variable.

", + "ListAttributesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. If you do not specify a cluster, the default cluster is assumed.

", + "ListAttributesRequest$attributeName": "

The name of the attribute with which to filter the results.

", + "ListAttributesRequest$attributeValue": "

The value of the attribute with which to filter results. You must also specify an attribute name to use this parameter.

", + "ListAttributesRequest$nextToken": "

The nextToken value returned from a previous paginated ListAttributes request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListAttributesResponse$nextToken": "

The nextToken value to include in a future ListAttributes request. When the results of a ListAttributes request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListClustersRequest$nextToken": "

The nextToken value returned from a previous paginated ListClusters request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListClustersResponse$nextToken": "

The nextToken value to include in a future ListClusters request. When the results of a ListClusters request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListContainerInstancesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list. If you do not specify a cluster, the default cluster is assumed.

", + "ListContainerInstancesRequest$filter": "

You can filter the results of a ListContainerInstances operation with cluster query language statements. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", + "ListContainerInstancesRequest$nextToken": "

The nextToken value returned from a previous paginated ListContainerInstances request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListContainerInstancesResponse$nextToken": "

The nextToken value to include in a future ListContainerInstances request. When the results of a ListContainerInstances request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListServicesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the services to list. If you do not specify a cluster, the default cluster is assumed.

", + "ListServicesRequest$nextToken": "

The nextToken value returned from a previous paginated ListServices request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListServicesResponse$nextToken": "

The nextToken value to include in a future ListServices request. When the results of a ListServices request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon ECS tasks, services, task definitions, clusters, and container instances.

", + "ListTaskDefinitionFamiliesRequest$familyPrefix": "

The familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. If you specify a familyPrefix, only task definition family names that begin with the familyPrefix string are returned.

", + "ListTaskDefinitionFamiliesRequest$nextToken": "

The nextToken value returned from a previous paginated ListTaskDefinitionFamilies request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListTaskDefinitionFamiliesResponse$nextToken": "

The nextToken value to include in a future ListTaskDefinitionFamilies request. When the results of a ListTaskDefinitionFamilies request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListTaskDefinitionsRequest$familyPrefix": "

The full family name with which to filter the ListTaskDefinitions results. Specifying a familyPrefix limits the listed task definitions to task definition revisions that belong to that family.

", + "ListTaskDefinitionsRequest$nextToken": "

The nextToken value returned from a previous paginated ListTaskDefinitions request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListTaskDefinitionsResponse$nextToken": "

The nextToken value to include in a future ListTaskDefinitions request. When the results of a ListTaskDefinitions request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListTasksRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the tasks to list. If you do not specify a cluster, the default cluster is assumed.

", + "ListTasksRequest$containerInstance": "

The container instance ID or full ARN of the container instance with which to filter the ListTasks results. Specifying a containerInstance limits the results to tasks that belong to that container instance.

", + "ListTasksRequest$family": "

The name of the family with which to filter the ListTasks results. Specifying a family limits the results to tasks that belong to that family.

", + "ListTasksRequest$nextToken": "

The nextToken value returned from a previous paginated ListTasks request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListTasksRequest$startedBy": "

The startedBy value with which to filter the task results. Specifying a startedBy value limits the results to tasks that were started with that value.

", + "ListTasksRequest$serviceName": "

The name of the service with which to filter the ListTasks results. Specifying a serviceName limits the results to tasks that belong to that service.

", + "ListTasksResponse$nextToken": "

The nextToken value to include in a future ListTasks request. When the results of a ListTasks request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "LoadBalancer$targetGroupArn": "

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group associated with a service.

If your service's task definition uses the awsvpc network mode (which is required for the Fargate launch type), you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

", + "LoadBalancer$loadBalancerName": "

The name of a load balancer.

", + "LoadBalancer$containerName": "

The name of the container (as it appears in a container definition) to associate with the load balancer.

", + "LogConfigurationOptionsMap$key": null, + "LogConfigurationOptionsMap$value": null, + "MountPoint$sourceVolume": "

The name of the volume to mount.

", + "MountPoint$containerPath": "

The path on the container to mount the host volume at.

", + "NetworkBinding$bindIP": "

The IP address that the container is bound to on the container instance.

", + "NetworkInterface$attachmentId": "

The attachment ID for the network interface.

", + "NetworkInterface$privateIpv4Address": "

The private IPv4 address for the network interface.

", + "NetworkInterface$ipv6Address": "

The private IPv6 address for the network interface.

", + "PlacementConstraint$expression": "

A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", + "PlacementStrategy$field": "

The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used.

", + "PutAttributesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to apply attributes. If you do not specify a cluster, the default cluster is assumed.

", + "RegisterContainerInstanceRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster with which to register your container instance. If you do not specify a cluster, the default cluster is assumed.

", + "RegisterContainerInstanceRequest$instanceIdentityDocument": "

The instance identity document for the EC2 instance to register. This document can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/document/

", + "RegisterContainerInstanceRequest$instanceIdentityDocumentSignature": "

The instance identity document signature for the EC2 instance to register. This signature can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/signature/

", + "RegisterContainerInstanceRequest$containerInstanceArn": "

The ARN of the container instance (if it was previously registered).

", + "RegisterTaskDefinitionRequest$family": "

You must specify a family for a task definition, which allows you to track multiple versions of the same task definition. The family is used as a name for your task definition. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", + "RegisterTaskDefinitionRequest$taskRoleArn": "

The short name or full Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

", + "RegisterTaskDefinitionRequest$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

", + "RegisterTaskDefinitionRequest$cpu": "

The number of CPU units used by the task. It can be expressed as an integer using CPU units, for example 1024, or as a string using vCPUs, for example 1 vCPU or 1 vcpu, in a task definition. String values are converted to an integer indicating the CPU units when the task definition is registered.

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers.

If using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs).

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter:

  • 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

  • 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

  • 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

  • 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

  • 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

", + "RegisterTaskDefinitionRequest$memory": "

The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB, for example 1024, or as a string using GB, for example 1GB or 1 GB, in a task definition. String values are converted to an integer indicating the MiB when the task definition is registered.

Task-level CPU and memory parameters are ignored for Windows containers. We recommend specifying container-level resources for Windows containers.

If using the EC2 launch type, this field is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

", + "RepositoryCredentials$credentialsParameter": "

The Amazon Resource Name (ARN) or name of the secret containing the private repository credentials.

", + "Resource$name": "

The name of the resource, such as CPU, MEMORY, PORTS, PORTS_UDP, or a user-defined resource.

", + "Resource$type": "

The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET.

", + "RunTaskRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task. If you do not specify a cluster, the default cluster is assumed.

", + "RunTaskRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to run. If a revision is not specified, the latest ACTIVE revision is used.

", + "RunTaskRequest$startedBy": "

An optional tag specified when a task is started. For example if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", + "RunTaskRequest$group": "

The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).

", + "RunTaskRequest$platformVersion": "

The platform version on which to run your task. If one is not specified, the latest version is used by default.

", + "Secret$name": null, + "Secret$valueFrom": null, + "ServerException$message": null, + "Service$serviceArn": "

The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the Region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service .

", + "Service$serviceName": "

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a Region or across multiple Regions.

", + "Service$clusterArn": "

The Amazon Resource Name (ARN) of the cluster that hosts the service.

", + "Service$status": "

The status of the service. The valid values are ACTIVE, DRAINING, or INACTIVE.

", + "Service$platformVersion": "

The platform version on which your task is running. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

", + "Service$taskDefinition": "

The task definition to use for tasks in the service. This value is specified when the service is created with CreateService, and it can be modified with UpdateService.

", + "Service$roleArn": "

The ARN of the IAM role associated with the service that allows the Amazon ECS container agent to register container instances with an Elastic Load Balancing load balancer.

", + "ServiceEvent$id": "

The ID string of the event.

", + "ServiceEvent$message": "

The event message.

", + "ServiceRegistry$registryArn": "

The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is Amazon Route 53 Auto Naming. For more information, see Service.

", + "ServiceRegistry$containerName": "

The container name value, already specified in the task definition, to be used for your service discovery service. If the task definition that your service task specifies uses the bridge or host network mode, you must specify a containerName and containerPort combination from the task definition. If the task definition that your service task specifies uses the awsvpc network mode and a type SRV DNS record is used, you must specify either a containerName and containerPort combination or a port value, but not both.

", + "StartTaskRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster on which to start your task. If you do not specify a cluster, the default cluster is assumed.

", + "StartTaskRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to start. If a revision is not specified, the latest ACTIVE revision is used.

", + "StartTaskRequest$startedBy": "

An optional tag specified when a task is started. For example if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", + "StartTaskRequest$group": "

The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).

", + "StopTaskRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to stop. If you do not specify a cluster, the default cluster is assumed.

", + "StopTaskRequest$task": "

The task ID or full ARN entry of the task to stop.

", + "StopTaskRequest$reason": "

An optional message specified when a task is stopped. For example, if you are using a custom scheduler, you can use this parameter to specify the reason for stopping the task here, and the message appears in subsequent DescribeTasks API operations on this task. Up to 255 characters are allowed in this message.

", + "StringList$member": null, + "SubmitContainerStateChangeRequest$cluster": "

The short name or full ARN of the cluster that hosts the container.

", + "SubmitContainerStateChangeRequest$task": "

The task ID or full Amazon Resource Name (ARN) of the task that hosts the container.

", + "SubmitContainerStateChangeRequest$containerName": "

The name of the container.

", + "SubmitContainerStateChangeRequest$status": "

The status of the state change request.

", + "SubmitContainerStateChangeRequest$reason": "

The reason for the state change request.

", + "SubmitContainerStateChangeResponse$acknowledgment": "

Acknowledgement of the state change.

", + "SubmitTaskStateChangeRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task.

", + "SubmitTaskStateChangeRequest$task": "

The task ID or full ARN of the task in the state change request.

", + "SubmitTaskStateChangeRequest$status": "

The status of the state change request.

", + "SubmitTaskStateChangeRequest$reason": "

The reason for the state change request.

", + "SubmitTaskStateChangeResponse$acknowledgment": "

Acknowledgement of the state change.

", + "Task$taskArn": "

The Amazon Resource Name (ARN) of the task.

", + "Task$clusterArn": "

The ARN of the cluster that hosts the task.

", + "Task$taskDefinitionArn": "

The ARN of the task definition that creates the task.

", + "Task$containerInstanceArn": "

The ARN of the container instances that host the task.

", + "Task$lastStatus": "

The last known status of the task.

", + "Task$desiredStatus": "

The desired status of the task.

", + "Task$cpu": "

The number of CPU units used by the task. It can be expressed as an integer using CPU units, for example 1024, or as a string using vCPUs, for example 1 vCPU or 1 vcpu, in a task definition. String values are converted to an integer indicating the CPU units when the task definition is registered.

If using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs).

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter:

  • 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

  • 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

  • 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

  • 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

  • 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

", + "Task$memory": "

The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB, for example 1024, or as a string using GB, for example 1GB or 1 GB, in a task definition. String values are converted to an integer indicating the MiB when the task definition is registered.

If using the EC2 launch type, this field is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

", + "Task$startedBy": "

The tag specified when a task is started. If the task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", + "Task$stoppedReason": "

The reason the task was stopped.

", + "Task$group": "

The name of the task group associated with the task.

", + "Task$platformVersion": "

The platform version on which your task is running. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

", + "TaskDefinition$taskDefinitionArn": "

The full Amazon Resource Name (ARN) of the task definition.

", + "TaskDefinition$family": "

The family of your task definition, used as the definition name.

", + "TaskDefinition$taskRoleArn": "

The ARN of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

", + "TaskDefinition$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

", + "TaskDefinition$cpu": "

The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

  • 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)

  • 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)

  • 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)

  • 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)

  • 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

", + "TaskDefinition$memory": "

The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used. If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

", + "TaskDefinitionPlacementConstraint$expression": "

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", + "TaskOverride$taskRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

", + "TaskOverride$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

", + "Tmpfs$containerPath": "

The absolute file path where the tmpfs volume is to be mounted.

", + "UpdateContainerAgentRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is running on. If you do not specify a cluster, the default cluster is assumed.

", + "UpdateContainerAgentRequest$containerInstance": "

The container instance ID or full ARN entries for the container instance on which you would like to update the Amazon ECS container agent.

", + "UpdateContainerInstancesStateRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to update. If you do not specify a cluster, the default cluster is assumed.

", + "UpdateServiceRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that your service is running on. If you do not specify a cluster, the default cluster is assumed.

", + "UpdateServiceRequest$service": "

The name of the service to update.

", + "UpdateServiceRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used. If you modify the task definition with UpdateService, Amazon ECS spawns a task with the new version of the task definition and then stops an old task after the new version is running.

", + "UpdateServiceRequest$platformVersion": "

The platform version that your service should run.

", + "VersionInfo$agentVersion": "

The version number of the Amazon ECS container agent.

", + "VersionInfo$agentHash": "

The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub repository.

", + "VersionInfo$dockerVersion": "

The Docker version running on the container instance.

", + "Volume$name": "

The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.

", + "VolumeFrom$sourceContainer": "

The name of another container within the same task definition to mount volumes from.

" + } + }, + "StringList": { + "base": null, + "refs": { + "AwsVpcConfiguration$subnets": "

The subnets associated with the task or service. There is a limit of 10 subnets able to be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

", + "AwsVpcConfiguration$securityGroups": "

The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of 5 security groups able to be specified per AwsVpcConfiguration.

All specified security groups must be from the same VPC.

", + "ContainerDefinition$links": "

The link parameter allows containers to communicate with each other without the need for port mappings. Only supported if the network mode of a task definition is set to bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. For more information about linking Docker containers, go to https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run .

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

", + "ContainerDefinition$entryPoint": "

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

", + "ContainerDefinition$command": "

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.

", + "ContainerDefinition$dnsServers": "

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers.

", + "ContainerDefinition$dnsSearchDomains": "

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

", + "ContainerDefinition$dockerSecurityOptions": "

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

This parameter is not supported for Windows containers.

", + "ContainerOverride$command": "

The command to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name.

", + "DescribeClustersRequest$clusters": "

A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) entries. If you do not specify a cluster, the default cluster is assumed.

", + "DescribeContainerInstancesRequest$containerInstances": "

A list of up to 100 container instance IDs or full Amazon Resource Name (ARN) entries.

", + "DescribeServicesRequest$services": "

A list of services to describe. You may specify up to 10 services to describe in a single operation.

", + "DescribeTasksRequest$tasks": "

A list of up to 100 task IDs or full ARN entries.

", + "HealthCheck$command": "

A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to execute the command arguments directly, or CMD-SHELL to run the command with the container's default shell. For example:

[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]

An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.

", + "KernelCapabilities$add": "

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

If you are using tasks that use the Fargate launch type, the add parameter is not supported.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

", + "KernelCapabilities$drop": "

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

", + "ListClustersResponse$clusterArns": "

The list of full Amazon Resource Name (ARN) entries for each cluster associated with your account.

", + "ListContainerInstancesResponse$containerInstanceArns": "

The list of container instances with full ARN entries for each container instance associated with the specified cluster.

", + "ListServicesResponse$serviceArns": "

The list of full ARN entries for each service associated with the specified cluster.

", + "ListTaskDefinitionFamiliesResponse$families": "

The list of task definition family names that match the ListTaskDefinitionFamilies request.

", + "ListTaskDefinitionsResponse$taskDefinitionArns": "

The list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefinitions request.

", + "ListTasksResponse$taskArns": "

The list of task ARN entries for the ListTasks request.

", + "Resource$stringSetValue": "

When the stringSetValue type is set, the value of the resource must be a string type.

", + "StartTaskRequest$containerInstances": "

The container instance IDs or full ARN entries for the container instances on which you would like to place your task. You can specify up to 10 container instances.

", + "Tmpfs$mountOptions": "

The list of tmpfs volume mount options.

Valid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" | \"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" | \"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" | \"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" | \"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\"

", + "UpdateContainerInstancesStateRequest$containerInstances": "

A list of container instance IDs or full ARN entries.

" + } + }, + "SubmitContainerStateChangeRequest": { + "base": null, + "refs": { + } + }, + "SubmitContainerStateChangeResponse": { + "base": null, + "refs": { + } + }, + "SubmitTaskStateChangeRequest": { + "base": null, + "refs": { + } + }, + "SubmitTaskStateChangeResponse": { + "base": null, + "refs": { + } + }, + "Tags": { + "base": null, + "refs": { + "ContainerInstance$tags": "

The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

", + "ListTagsForResourceResponse$tags": "

The tags for the resource.

", + "Task$tags": "

The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + } + }, + "TargetNotFoundException": { + "base": "

The specified target could not be found. You can view your available container instances with ListContainerInstances. Amazon ECS container instances are cluster-specific and region-specific.

", + "refs": { + } + }, + "TargetType": { + "base": null, + "refs": { + "Attribute$targetType": "

The type of the target with which to attach the attribute. This parameter is required if you use the short form ID for a resource instead of the full ARN.

", + "ListAttributesRequest$targetType": "

The type of the target with which to list attributes.

" + } + }, + "Task": { + "base": "

Details on a task in a cluster.

", + "refs": { + "StopTaskResponse$task": "

The task that was stopped.

", + "Tasks$member": null + } + }, + "TaskDefinition": { + "base": "

Details of a task definition.

", + "refs": { + "DeregisterTaskDefinitionResponse$taskDefinition": "

The full description of the deregistered task.

", + "DescribeTaskDefinitionResponse$taskDefinition": "

The full task definition description.

", + "RegisterTaskDefinitionResponse$taskDefinition": "

The full description of the registered task definition.

" + } + }, + "TaskDefinitionFamilyStatus": { + "base": null, + "refs": { + "ListTaskDefinitionFamiliesRequest$status": "

The task definition family status with which to filter the ListTaskDefinitionFamilies results. By default, both ACTIVE and INACTIVE task definition families are listed. If this parameter is set to ACTIVE, only task definition families that have an ACTIVE task definition revision are returned. If this parameter is set to INACTIVE, only task definition families that do not have any ACTIVE task definition revisions are returned. If you paginate the resulting output, be sure to keep the status value constant in each subsequent request.

" + } + }, + "TaskDefinitionPlacementConstraint": { + "base": "

An object representing a constraint on task placement in the task definition.

If you are using the Fargate launch type, task placement constraints are not supported.

For more information, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

", + "refs": { + "TaskDefinitionPlacementConstraints$member": null + } + }, + "TaskDefinitionPlacementConstraintType": { + "base": null, + "refs": { + "TaskDefinitionPlacementConstraint$type": "

The type of constraint. The DistinctInstance constraint ensures that each task in a particular group is running on a different container instance. The MemberOf constraint restricts selection to be from a group of valid candidates.

" + } + }, + "TaskDefinitionPlacementConstraints": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$placementConstraints": "

An array of placement constraint objects to use for the task. You can specify a maximum of 10 constraints per task (this limit includes constraints in the task definition and those specified at run time).

", + "TaskDefinition$placementConstraints": "

An array of placement constraint objects to use for tasks. This field is not valid if using the Fargate launch type for your task.

" + } + }, + "TaskDefinitionStatus": { + "base": null, + "refs": { + "ListTaskDefinitionsRequest$status": "

The task definition status with which to filter the ListTaskDefinitions results. By default, only ACTIVE task definitions are listed. By setting this parameter to INACTIVE, you can view task definitions that are INACTIVE as long as an active task or service still references them. If you paginate the resulting output, be sure to keep the status value constant in each subsequent request.

", + "TaskDefinition$status": "

The status of the task definition.

" + } + }, + "TaskOverride": { + "base": "

The overrides associated with a task.

", + "refs": { + "RunTaskRequest$overrides": "

A list of container overrides in JSON format that specify the name of a container in the specified task definition and the overrides it should receive. You can override the default command for a container (that is specified in the task definition or Docker image) with a command override. You can also override existing environment variables (that are specified in the task definition or Docker image) on a container or add new environment variables to it with an environment override.

A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting characters of the override structure.

", + "StartTaskRequest$overrides": "

A list of container overrides in JSON format that specify the name of a container in the specified task definition and the overrides it should receive. You can override the default command for a container (that is specified in the task definition or Docker image) with a command override. You can also override existing environment variables (that are specified in the task definition or Docker image) on a container or add new environment variables to it with an environment override.

A total of 8192 characters are allowed for overrides. This limit includes the JSON formatting characters of the override structure.

", + "Task$overrides": "

One or more container overrides.

" + } + }, + "Tasks": { + "base": null, + "refs": { + "DescribeTasksResponse$tasks": "

The list of tasks.

", + "RunTaskResponse$tasks": "

A full description of the tasks that were run. The tasks that were successfully placed on your cluster are described here.

", + "StartTaskResponse$tasks": "

A full description of the tasks that were started. Each task that was successfully placed on your container instances is described.

" + } + }, + "Timestamp": { + "base": null, + "refs": { + "ContainerInstance$registeredAt": "

The Unix time stamp for when the container instance was registered.

", + "Deployment$createdAt": "

The Unix time stamp for when the service was created.

", + "Deployment$updatedAt": "

The Unix time stamp for when the service was last updated.

", + "Service$createdAt": "

The Unix time stamp for when the service was created.

", + "ServiceEvent$createdAt": "

The Unix time stamp for when the event was triggered.

", + "SubmitTaskStateChangeRequest$pullStartedAt": "

The Unix time stamp for when the container image pull began.

", + "SubmitTaskStateChangeRequest$pullStoppedAt": "

The Unix time stamp for when the container image pull completed.

", + "SubmitTaskStateChangeRequest$executionStoppedAt": "

The Unix time stamp for when the task execution stopped.

", + "Task$connectivityAt": "

The Unix time stamp for when the task last went into CONNECTED status.

", + "Task$pullStartedAt": "

The Unix time stamp for when the container image pull began.

", + "Task$pullStoppedAt": "

The Unix time stamp for when the container image pull completed.

", + "Task$executionStoppedAt": "

The Unix time stamp for when the task execution stopped.

", + "Task$createdAt": "

The Unix time stamp for when the task was created (the task entered the PENDING state).

", + "Task$startedAt": "

The Unix time stamp for when the task started (the task transitioned from the PENDING state to the RUNNING state).

", + "Task$stoppingAt": "

The Unix time stamp for when the task stops (transitions from the RUNNING state to STOPPED).

", + "Task$stoppedAt": "

The Unix time stamp for when the task was stopped (the task transitioned from the RUNNING state to the STOPPED state).

" + } + }, + "Tmpfs": { + "base": "

The container path, mount options, and size of the tmpfs mount.

", + "refs": { + "TmpfsList$member": null + } + }, + "TmpfsList": { + "base": null, + "refs": { + "LinuxParameters$tmpfs": "

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you are using tasks that use the Fargate launch type, the tmpfs parameter is not supported.

" + } + }, + "TransportProtocol": { + "base": null, + "refs": { + "NetworkBinding$protocol": "

The protocol used for the network binding.

", + "PortMapping$protocol": "

The protocol used for the port mapping. Valid values are tcp and udp. The default is tcp.

" + } + }, + "Ulimit": { + "base": "

The ulimit settings to pass to the container.

", + "refs": { + "UlimitList$member": null + } + }, + "UlimitList": { + "base": null, + "refs": { + "ContainerDefinition$ulimits": "

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

This parameter is not supported for Windows containers.

" + } + }, + "UlimitName": { + "base": null, + "refs": { + "Ulimit$name": "

The type of the ulimit.

" + } + }, + "UnsupportedFeatureException": { + "base": "

The specified task is not supported in this region.

", + "refs": { + } + }, + "UpdateContainerAgentRequest": { + "base": null, + "refs": { + } + }, + "UpdateContainerAgentResponse": { + "base": null, + "refs": { + } + }, + "UpdateContainerInstancesStateRequest": { + "base": null, + "refs": { + } + }, + "UpdateContainerInstancesStateResponse": { + "base": null, + "refs": { + } + }, + "UpdateInProgressException": { + "base": "

There is already a current Amazon ECS container agent update in progress on the specified container instance. If the container agent becomes disconnected while it is in a transitional stage, such as PENDING or STAGING, the update process can get stuck in that state. However, when the agent reconnects, it resumes where it stopped previously.

", + "refs": { + } + }, + "UpdateServiceRequest": { + "base": null, + "refs": { + } + }, + "UpdateServiceResponse": { + "base": null, + "refs": { + } + }, + "VersionInfo": { + "base": "

The Docker and Amazon ECS container agent version information about a container instance.

", + "refs": { + "ContainerInstance$versionInfo": "

The version information for the Amazon ECS container agent and Docker daemon running on the container instance.

", + "RegisterContainerInstanceRequest$versionInfo": "

The version information for the Amazon ECS container agent and Docker daemon running on the container instance.

" + } + }, + "Volume": { + "base": "

A data volume used in a task definition.

", + "refs": { + "VolumeList$member": null + } + }, + "VolumeFrom": { + "base": "

Details on a data volume from another container in the same task definition.

", + "refs": { + "VolumeFromList$member": null + } + }, + "VolumeFromList": { + "base": null, + "refs": { + "ContainerDefinition$volumesFrom": "

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" + } + }, + "VolumeList": { + "base": null, + "refs": { + "RegisterTaskDefinitionRequest$volumes": "

A list of volume definitions in JSON format that containers in your task may use.

", + "TaskDefinition$volumes": "

The list of volumes in a task.

If you are using the Fargate launch type, the host and sourcePath parameters are not supported.

For more information about volume definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.

" + } + } + } +} diff --git a/agent/ecs_client/model/api/paginators-1.json b/agent/ecs_client/model/api/paginators-1.json new file mode 100644 index 00000000000..081a2df0025 --- /dev/null +++ b/agent/ecs_client/model/api/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListClusters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "clusterArns" + }, + "ListContainerInstances": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "containerInstanceArns" + }, + "ListTaskDefinitions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "taskDefinitionArns" + }, + "ListTaskDefinitionFamilies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "families" + }, + "ListTasks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "taskArns" + }, + "ListServices": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "serviceArns" + } + } +} diff --git a/agent/ecs_client/model/api/waiters-2.json b/agent/ecs_client/model/api/waiters-2.json new file mode 100644 index 00000000000..8866d15fdd8 --- /dev/null +++ b/agent/ecs_client/model/api/waiters-2.json @@ -0,0 +1,93 @@ +{ + "version": 2, + "waiters": { + "TasksRunning": { + "delay": 6, + "operation": "DescribeTasks", + "maxAttempts": 100, + "acceptors": [ + { + "expected": "STOPPED", + "matcher": "pathAny", + "state": "failure", + "argument": "tasks[].lastStatus" + }, + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "RUNNING", + "matcher": "pathAll", + "state": "success", + "argument": "tasks[].lastStatus" + } + ] + }, + "TasksStopped": { + "delay": 6, + "operation": "DescribeTasks", + "maxAttempts": 100, + "acceptors": [ + { + "expected": "STOPPED", + "matcher": "pathAll", + "state": "success", + "argument": "tasks[].lastStatus" + } + ] + }, + "ServicesStable": { + "delay": 15, + "operation": "DescribeServices", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "DRAINING", + "matcher": "pathAny", + "state": "failure", + "argument": "services[].status" + }, + { + "expected": "INACTIVE", + "matcher": "pathAny", + "state": "failure", + "argument": "services[].status" + }, + { + "expected": true, + "matcher": "path", + "state": "success", + "argument": "services | [@[?length(deployments)!=`1`], @[?desiredCount!=runningCount]][] | length(@) == `0`" + } + ] + }, + "ServicesInactive": { + "delay": 15, + "operation": "DescribeServices", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "MISSING", + "matcher": "pathAny", + "state": "failure", + "argument": "failures[].reason" + }, + { + "expected": "INACTIVE", + "matcher": "pathAny", + "state": "success", + "argument": "services[].status" + } + ] + } + } +} diff --git a/agent/ecs_client/model/ecs/api.go b/agent/ecs_client/model/ecs/api.go new file mode 100644 index 00000000000..bd9ff756a23 --- /dev/null +++ b/agent/ecs_client/model/ecs/api.go @@ -0,0 +1,14565 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// +// Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. + +package ecs + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opCreateCluster = "CreateCluster" + +// CreateClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateCluster operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCluster for more information on using the CreateCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateClusterRequest method. +// req, resp := client.CreateClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { + op := &request.Operation{ + Name: opCreateCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClusterInput{} + } + + output = &CreateClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCluster API operation for Amazon Elastic Container Service. +// +// Creates a new Amazon ECS cluster. By default, your account receives a default +// cluster when you launch your first container instance. However, you can create +// your own cluster with a unique name with the CreateCluster action. +// +// When you call the CreateCluster API operation, Amazon ECS attempts to create +// the service-linked role for your account so that required resources in other +// AWS services can be managed on your behalf. However, if the IAM user that +// makes the call does not have permissions to create the service-linked role, +// it is not created. For more information, see Using Service-Linked Roles for +// Amazon ECS (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation CreateCluster for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + return out, req.Send() +} + +// CreateClusterWithContext is the same as CreateCluster with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCluster for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) CreateClusterWithContext(ctx aws.Context, input *CreateClusterInput, opts ...request.Option) (*CreateClusterOutput, error) { + req, out := c.CreateClusterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateService = "CreateService" + +// CreateServiceRequest generates a "aws/request.Request" representing the +// client's request for the CreateService operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateService for more information on using the CreateService +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateServiceRequest method. +// req, resp := client.CreateServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Request, output *CreateServiceOutput) { + op := &request.Operation{ + Name: opCreateService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateServiceInput{} + } + + output = &CreateServiceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateService API operation for Amazon Elastic Container Service. +// +// Runs and maintains a desired number of tasks from a specified task definition. +// If the number of tasks running in a service drops below desiredCount, Amazon +// ECS spawns another copy of the task in the specified cluster. To update an +// existing service, see UpdateService. +// +// In addition to maintaining the desired count of tasks in your service, you +// can optionally run your service behind a load balancer. The load balancer +// distributes traffic across the tasks that are associated with the service. +// For more information, see Service Load Balancing (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// You can optionally specify a deployment configuration for your service. During +// a deployment, the service scheduler uses the minimumHealthyPercent and maximumPercent +// parameters to determine the deployment strategy. The deployment is triggered +// by changing the task definition or the desired count of a service with an +// UpdateService operation. +// +// The minimumHealthyPercent represents a lower limit on the number of your +// service's tasks that must remain in the RUNNING state during a deployment, +// as a percentage of the desiredCount (rounded up to the nearest integer). +// This parameter enables you to deploy without using additional cluster capacity. +// For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent +// of 50%, the scheduler can stop two existing tasks to free up cluster capacity +// before starting two new tasks. Tasks for services that do not use a load +// balancer are considered healthy if they are in the RUNNING state. Tasks for +// services that do use a load balancer are considered healthy if they are in +// the RUNNING state and the container instance they are hosted on is reported +// as healthy by the load balancer. The default value for a replica service +// for minimumHealthyPercent is 50% in the console and 100% for the AWS CLI, +// the AWS SDKs, and the APIs. The default value for a daemon service for minimumHealthyPercent +// is 0% for the AWS CLI, the AWS SDKs, and the APIs and 50% for the console. +// +// The maximumPercent parameter represents an upper limit on the number of your +// service's tasks that are allowed in the RUNNING or PENDING state during a +// deployment, as a percentage of the desiredCount (rounded down to the nearest +// integer). This parameter enables you to define the deployment batch size. +// For example, if your replica service has a desiredCount of four tasks and +// a maximumPercent value of 200%, the scheduler can start four new tasks before +// stopping the four older tasks (provided that the cluster resources required +// to do this are available). The default value for a replica service for maximumPercent +// is 200%. If you are using a daemon service type, the maximumPercent should +// remain at 100%, which is the default value. +// +// When the service scheduler launches new tasks, it determines task placement +// in your cluster using the following logic: +// +// * Determine which of the container instances in your cluster can support +// your service's task definition (for example, they have the required CPU, +// memory, ports, and container instance attributes). +// +// * By default, the service scheduler attempts to balance tasks across Availability +// Zones in this manner (although you can choose a different placement strategy) +// with the placementStrategy parameter): Sort the valid container instances, +// giving priority to instances that have the fewest number of running tasks +// for this service in their respective Availability Zone. For example, if +// zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal +// for placement. Place the new service task on a valid container instance +// in an optimal Availability Zone (based on the previous steps), favoring +// container instances with the fewest number of running tasks for this service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation CreateService for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * UnsupportedFeatureException +// The specified task is not supported in this region. +// +// * PlatformUnknownException +// The specified platform version does not exist. +// +// * PlatformTaskDefinitionIncompatibilityException +// The specified platform version does not satisfy the task definition's required +// capabilities. +// +// * AccessDeniedException +// You do not have authorization to perform the requested action. +// +func (c *ECS) CreateService(input *CreateServiceInput) (*CreateServiceOutput, error) { + req, out := c.CreateServiceRequest(input) + return out, req.Send() +} + +// CreateServiceWithContext is the same as CreateService with the addition of +// the ability to pass a context and additional request options. +// +// See CreateService for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) CreateServiceWithContext(ctx aws.Context, input *CreateServiceInput, opts ...request.Option) (*CreateServiceOutput, error) { + req, out := c.CreateServiceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAccountSetting = "DeleteAccountSetting" + +// DeleteAccountSettingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountSetting operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAccountSetting for more information on using the DeleteAccountSetting +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAccountSettingRequest method. +// req, resp := client.DeleteAccountSettingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DeleteAccountSettingRequest(input *DeleteAccountSettingInput) (req *request.Request, output *DeleteAccountSettingOutput) { + op := &request.Operation{ + Name: opDeleteAccountSetting, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountSettingInput{} + } + + output = &DeleteAccountSettingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteAccountSetting API operation for Amazon Elastic Container Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DeleteAccountSetting for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) DeleteAccountSetting(input *DeleteAccountSettingInput) (*DeleteAccountSettingOutput, error) { + req, out := c.DeleteAccountSettingRequest(input) + return out, req.Send() +} + +// DeleteAccountSettingWithContext is the same as DeleteAccountSetting with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAccountSetting for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DeleteAccountSettingWithContext(ctx aws.Context, input *DeleteAccountSettingInput, opts ...request.Option) (*DeleteAccountSettingOutput, error) { + req, out := c.DeleteAccountSettingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAttributes = "DeleteAttributes" + +// DeleteAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAttributes for more information on using the DeleteAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAttributesRequest method. +// req, resp := client.DeleteAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DeleteAttributesRequest(input *DeleteAttributesInput) (req *request.Request, output *DeleteAttributesOutput) { + op := &request.Operation{ + Name: opDeleteAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAttributesInput{} + } + + output = &DeleteAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteAttributes API operation for Amazon Elastic Container Service. +// +// Deletes one or more custom attributes from an Amazon ECS resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DeleteAttributes for usage and error information. +// +// Returned Error Types: +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * TargetNotFoundException +// The specified target could not be found. You can view your available container +// instances with ListContainerInstances. Amazon ECS container instances are +// cluster-specific and region-specific. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) DeleteAttributes(input *DeleteAttributesInput) (*DeleteAttributesOutput, error) { + req, out := c.DeleteAttributesRequest(input) + return out, req.Send() +} + +// DeleteAttributesWithContext is the same as DeleteAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DeleteAttributesWithContext(ctx aws.Context, input *DeleteAttributesInput, opts ...request.Option) (*DeleteAttributesOutput, error) { + req, out := c.DeleteAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteCluster = "DeleteCluster" + +// DeleteClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCluster operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCluster for more information on using the DeleteCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteClusterRequest method. +// req, resp := client.DeleteClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { + op := &request.Operation{ + Name: opDeleteCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClusterInput{} + } + + output = &DeleteClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCluster API operation for Amazon Elastic Container Service. +// +// Deletes the specified cluster. You must deregister all container instances +// from this cluster before you may delete it. You can list the container instances +// in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DeleteCluster for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * ClusterContainsContainerInstancesException +// You cannot delete a cluster that has registered container instances. You +// must first deregister the container instances before you can delete the cluster. +// For more information, see DeregisterContainerInstance. +// +// * ClusterContainsServicesException +// You cannot delete a cluster that contains services. You must first update +// the service to reduce its desired task count to 0 and then delete the service. +// For more information, see UpdateService and DeleteService. +// +// * ClusterContainsTasksException +// You cannot delete a cluster that has active tasks. +// +func (c *ECS) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + return out, req.Send() +} + +// DeleteClusterWithContext is the same as DeleteCluster with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCluster for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DeleteClusterWithContext(ctx aws.Context, input *DeleteClusterInput, opts ...request.Option) (*DeleteClusterOutput, error) { + req, out := c.DeleteClusterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteService = "DeleteService" + +// DeleteServiceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteService operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteService for more information on using the DeleteService +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteServiceRequest method. +// req, resp := client.DeleteServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DeleteServiceRequest(input *DeleteServiceInput) (req *request.Request, output *DeleteServiceOutput) { + op := &request.Operation{ + Name: opDeleteService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceInput{} + } + + output = &DeleteServiceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteService API operation for Amazon Elastic Container Service. +// +// Deletes a specified service within a cluster. You can delete a service if +// you have no running tasks in it and the desired task count is zero. If the +// service is actively maintaining tasks, you cannot delete it, and you must +// update the service to a desired task count of zero. For more information, +// see UpdateService. +// +// When you delete a service, if there are still running tasks that require +// cleanup, the service status moves from ACTIVE to DRAINING, and the service +// is no longer visible in the console or in ListServices API operations. After +// the tasks have stopped, then the service status moves from DRAINING to INACTIVE. +// Services in the DRAINING or INACTIVE status can still be viewed with DescribeServices +// API operations. However, in the future, INACTIVE services may be cleaned +// up and purged from Amazon ECS record keeping, and DescribeServices API operations +// on those services return a ServiceNotFoundException error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DeleteService for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * ServiceNotFoundException +// The specified service could not be found. You can view your available services +// with ListServices. Amazon ECS services are cluster-specific and region-specific. +// +func (c *ECS) DeleteService(input *DeleteServiceInput) (*DeleteServiceOutput, error) { + req, out := c.DeleteServiceRequest(input) + return out, req.Send() +} + +// DeleteServiceWithContext is the same as DeleteService with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteService for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DeleteServiceWithContext(ctx aws.Context, input *DeleteServiceInput, opts ...request.Option) (*DeleteServiceOutput, error) { + req, out := c.DeleteServiceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterContainerInstance = "DeregisterContainerInstance" + +// DeregisterContainerInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterContainerInstance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterContainerInstance for more information on using the DeregisterContainerInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterContainerInstanceRequest method. +// req, resp := client.DeregisterContainerInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DeregisterContainerInstanceRequest(input *DeregisterContainerInstanceInput) (req *request.Request, output *DeregisterContainerInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterContainerInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterContainerInstanceInput{} + } + + output = &DeregisterContainerInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterContainerInstance API operation for Amazon Elastic Container Service. +// +// Deregisters an Amazon ECS container instance from the specified cluster. +// This instance is no longer available to run tasks. +// +// If you intend to use the container instance for some other purpose after +// deregistration, you should stop all of the tasks running on the container +// instance before deregistration. That prevents any orphaned tasks from consuming +// resources. +// +// Deregistering a container instance removes the instance from a cluster, but +// it does not terminate the EC2 instance; if you are finished using the instance, +// be sure to terminate it in the Amazon EC2 console to stop billing. +// +// If you terminate a running container instance, Amazon ECS automatically deregisters +// the instance from your cluster (stopped container instances or instances +// with disconnected agents are not automatically deregistered when terminated). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DeregisterContainerInstance for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) DeregisterContainerInstance(input *DeregisterContainerInstanceInput) (*DeregisterContainerInstanceOutput, error) { + req, out := c.DeregisterContainerInstanceRequest(input) + return out, req.Send() +} + +// DeregisterContainerInstanceWithContext is the same as DeregisterContainerInstance with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterContainerInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DeregisterContainerInstanceWithContext(ctx aws.Context, input *DeregisterContainerInstanceInput, opts ...request.Option) (*DeregisterContainerInstanceOutput, error) { + req, out := c.DeregisterContainerInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterTaskDefinition = "DeregisterTaskDefinition" + +// DeregisterTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterTaskDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterTaskDefinition for more information on using the DeregisterTaskDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterTaskDefinitionRequest method. +// req, resp := client.DeregisterTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DeregisterTaskDefinitionRequest(input *DeregisterTaskDefinitionInput) (req *request.Request, output *DeregisterTaskDefinitionOutput) { + op := &request.Operation{ + Name: opDeregisterTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTaskDefinitionInput{} + } + + output = &DeregisterTaskDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterTaskDefinition API operation for Amazon Elastic Container Service. +// +// Deregisters the specified task definition by family and revision. Upon deregistration, +// the task definition is marked as INACTIVE. Existing tasks and services that +// reference an INACTIVE task definition continue to run without disruption. +// Existing services that reference an INACTIVE task definition can still scale +// up or down by modifying the service's desired count. +// +// You cannot use an INACTIVE task definition to run new tasks or create new +// services, and you cannot update an existing service to reference an INACTIVE +// task definition (although there may be up to a 10-minute window following +// deregistration where these restrictions have not yet taken effect). +// +// At this time, INACTIVE task definitions remain discoverable in your account +// indefinitely; however, this behavior is subject to change in the future, +// so you should not rely on INACTIVE task definitions persisting beyond the +// lifecycle of any associated tasks and services. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DeregisterTaskDefinition for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) DeregisterTaskDefinition(input *DeregisterTaskDefinitionInput) (*DeregisterTaskDefinitionOutput, error) { + req, out := c.DeregisterTaskDefinitionRequest(input) + return out, req.Send() +} + +// DeregisterTaskDefinitionWithContext is the same as DeregisterTaskDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterTaskDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DeregisterTaskDefinitionWithContext(ctx aws.Context, input *DeregisterTaskDefinitionInput, opts ...request.Option) (*DeregisterTaskDefinitionOutput, error) { + req, out := c.DeregisterTaskDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeClusters = "DescribeClusters" + +// DescribeClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClusters for more information on using the DescribeClusters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClustersRequest method. +// req, resp := client.DescribeClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { + op := &request.Operation{ + Name: opDescribeClusters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeClustersInput{} + } + + output = &DescribeClustersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClusters API operation for Amazon Elastic Container Service. +// +// Describes one or more of your clusters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DescribeClusters for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + return out, req.Send() +} + +// DescribeClustersWithContext is the same as DescribeClusters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClusters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DescribeClustersWithContext(ctx aws.Context, input *DescribeClustersInput, opts ...request.Option) (*DescribeClustersOutput, error) { + req, out := c.DescribeClustersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeContainerInstances = "DescribeContainerInstances" + +// DescribeContainerInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeContainerInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeContainerInstances for more information on using the DescribeContainerInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeContainerInstancesRequest method. +// req, resp := client.DescribeContainerInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DescribeContainerInstancesRequest(input *DescribeContainerInstancesInput) (req *request.Request, output *DescribeContainerInstancesOutput) { + op := &request.Operation{ + Name: opDescribeContainerInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContainerInstancesInput{} + } + + output = &DescribeContainerInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeContainerInstances API operation for Amazon Elastic Container Service. +// +// Describes Amazon Elastic Container Service container instances. Returns metadata +// about registered and remaining resources on each container instance requested. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DescribeContainerInstances for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) DescribeContainerInstances(input *DescribeContainerInstancesInput) (*DescribeContainerInstancesOutput, error) { + req, out := c.DescribeContainerInstancesRequest(input) + return out, req.Send() +} + +// DescribeContainerInstancesWithContext is the same as DescribeContainerInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeContainerInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DescribeContainerInstancesWithContext(ctx aws.Context, input *DescribeContainerInstancesInput, opts ...request.Option) (*DescribeContainerInstancesOutput, error) { + req, out := c.DescribeContainerInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeServices = "DescribeServices" + +// DescribeServicesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeServices for more information on using the DescribeServices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeServicesRequest method. +// req, resp := client.DescribeServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DescribeServicesRequest(input *DescribeServicesInput) (req *request.Request, output *DescribeServicesOutput) { + op := &request.Operation{ + Name: opDescribeServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeServicesInput{} + } + + output = &DescribeServicesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeServices API operation for Amazon Elastic Container Service. +// +// Describes the specified services running in your cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DescribeServices for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) DescribeServices(input *DescribeServicesInput) (*DescribeServicesOutput, error) { + req, out := c.DescribeServicesRequest(input) + return out, req.Send() +} + +// DescribeServicesWithContext is the same as DescribeServices with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeServices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DescribeServicesWithContext(ctx aws.Context, input *DescribeServicesInput, opts ...request.Option) (*DescribeServicesOutput, error) { + req, out := c.DescribeServicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTaskDefinition = "DescribeTaskDefinition" + +// DescribeTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTaskDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTaskDefinition for more information on using the DescribeTaskDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTaskDefinitionRequest method. +// req, resp := client.DescribeTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DescribeTaskDefinitionRequest(input *DescribeTaskDefinitionInput) (req *request.Request, output *DescribeTaskDefinitionOutput) { + op := &request.Operation{ + Name: opDescribeTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTaskDefinitionInput{} + } + + output = &DescribeTaskDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTaskDefinition API operation for Amazon Elastic Container Service. +// +// Describes a task definition. You can specify a family and revision to find +// information about a specific task definition, or you can simply specify the +// family to find the latest ACTIVE revision in that family. +// +// You can only describe INACTIVE task definitions while an active task or service +// references them. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DescribeTaskDefinition for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) DescribeTaskDefinition(input *DescribeTaskDefinitionInput) (*DescribeTaskDefinitionOutput, error) { + req, out := c.DescribeTaskDefinitionRequest(input) + return out, req.Send() +} + +// DescribeTaskDefinitionWithContext is the same as DescribeTaskDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTaskDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DescribeTaskDefinitionWithContext(ctx aws.Context, input *DescribeTaskDefinitionInput, opts ...request.Option) (*DescribeTaskDefinitionOutput, error) { + req, out := c.DescribeTaskDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTasks = "DescribeTasks" + +// DescribeTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTasks for more information on using the DescribeTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTasksRequest method. +// req, resp := client.DescribeTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DescribeTasksRequest(input *DescribeTasksInput) (req *request.Request, output *DescribeTasksOutput) { + op := &request.Operation{ + Name: opDescribeTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTasksInput{} + } + + output = &DescribeTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTasks API operation for Amazon Elastic Container Service. +// +// Describes a specified task or tasks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DescribeTasks for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) DescribeTasks(input *DescribeTasksInput) (*DescribeTasksOutput, error) { + req, out := c.DescribeTasksRequest(input) + return out, req.Send() +} + +// DescribeTasksWithContext is the same as DescribeTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DescribeTasksWithContext(ctx aws.Context, input *DescribeTasksInput, opts ...request.Option) (*DescribeTasksOutput, error) { + req, out := c.DescribeTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDiscoverPollEndpoint = "DiscoverPollEndpoint" + +// DiscoverPollEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DiscoverPollEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DiscoverPollEndpoint for more information on using the DiscoverPollEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DiscoverPollEndpointRequest method. +// req, resp := client.DiscoverPollEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) DiscoverPollEndpointRequest(input *DiscoverPollEndpointInput) (req *request.Request, output *DiscoverPollEndpointOutput) { + op := &request.Operation{ + Name: opDiscoverPollEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DiscoverPollEndpointInput{} + } + + output = &DiscoverPollEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// DiscoverPollEndpoint API operation for Amazon Elastic Container Service. +// +// +// This action is only used by the Amazon ECS agent, and it is not intended +// for use outside of the agent. +// +// Returns an endpoint for the Amazon ECS agent to poll for updates. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation DiscoverPollEndpoint for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +func (c *ECS) DiscoverPollEndpoint(input *DiscoverPollEndpointInput) (*DiscoverPollEndpointOutput, error) { + req, out := c.DiscoverPollEndpointRequest(input) + return out, req.Send() +} + +// DiscoverPollEndpointWithContext is the same as DiscoverPollEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See DiscoverPollEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DiscoverPollEndpointWithContext(ctx aws.Context, input *DiscoverPollEndpointInput, opts ...request.Option) (*DiscoverPollEndpointOutput, error) { + req, out := c.DiscoverPollEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAttributes = "ListAttributes" + +// ListAttributesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAttributes for more information on using the ListAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAttributesRequest method. +// req, resp := client.ListAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) ListAttributesRequest(input *ListAttributesInput) (req *request.Request, output *ListAttributesOutput) { + op := &request.Operation{ + Name: opListAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAttributesInput{} + } + + output = &ListAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAttributes API operation for Amazon Elastic Container Service. +// +// Lists the attributes for Amazon ECS resources within a specified target type +// and cluster. When you specify a target type and cluster, ListAttributes returns +// a list of attribute objects, one for each attribute on each resource. You +// can filter the list of results to a single attribute name to only return +// results that have that name. You can also filter the results by attribute +// name and value, for example, to see which container instances in a cluster +// are running a Linux AMI (ecs.os-type=linux). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation ListAttributes for usage and error information. +// +// Returned Error Types: +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) ListAttributes(input *ListAttributesInput) (*ListAttributesOutput, error) { + req, out := c.ListAttributesRequest(input) + return out, req.Send() +} + +// ListAttributesWithContext is the same as ListAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See ListAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListAttributesWithContext(ctx aws.Context, input *ListAttributesInput, opts ...request.Option) (*ListAttributesOutput, error) { + req, out := c.ListAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListClusters = "ListClusters" + +// ListClustersRequest generates a "aws/request.Request" representing the +// client's request for the ListClusters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListClusters for more information on using the ListClusters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListClustersRequest method. +// req, resp := client.ListClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { + op := &request.Operation{ + Name: opListClusters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClustersInput{} + } + + output = &ListClustersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListClusters API operation for Amazon Elastic Container Service. +// +// Returns a list of existing clusters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation ListClusters for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + return out, req.Send() +} + +// ListClustersWithContext is the same as ListClusters with the addition of +// the ability to pass a context and additional request options. +// +// See ListClusters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, opts ...request.Option) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListClustersPages iterates over the pages of a ListClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClusters operation. +// pageNum := 0 +// err := client.ListClustersPages(params, +// func(page *ecs.ListClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListClustersPages(input *ListClustersInput, fn func(*ListClustersOutput, bool) bool) error { + return c.ListClustersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListClustersPagesWithContext same as ListClustersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListClustersPagesWithContext(ctx aws.Context, input *ListClustersInput, fn func(*ListClustersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListClustersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListClustersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListContainerInstances = "ListContainerInstances" + +// ListContainerInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListContainerInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListContainerInstances for more information on using the ListContainerInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListContainerInstancesRequest method. +// req, resp := client.ListContainerInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) ListContainerInstancesRequest(input *ListContainerInstancesInput) (req *request.Request, output *ListContainerInstancesOutput) { + op := &request.Operation{ + Name: opListContainerInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListContainerInstancesInput{} + } + + output = &ListContainerInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListContainerInstances API operation for Amazon Elastic Container Service. +// +// Returns a list of container instances in a specified cluster. You can filter +// the results of a ListContainerInstances operation with cluster query language +// statements inside the filter parameter. For more information, see Cluster +// Query Language (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation ListContainerInstances for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) ListContainerInstances(input *ListContainerInstancesInput) (*ListContainerInstancesOutput, error) { + req, out := c.ListContainerInstancesRequest(input) + return out, req.Send() +} + +// ListContainerInstancesWithContext is the same as ListContainerInstances with the addition of +// the ability to pass a context and additional request options. +// +// See ListContainerInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListContainerInstancesWithContext(ctx aws.Context, input *ListContainerInstancesInput, opts ...request.Option) (*ListContainerInstancesOutput, error) { + req, out := c.ListContainerInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListContainerInstancesPages iterates over the pages of a ListContainerInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListContainerInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListContainerInstances operation. +// pageNum := 0 +// err := client.ListContainerInstancesPages(params, +// func(page *ecs.ListContainerInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListContainerInstancesPages(input *ListContainerInstancesInput, fn func(*ListContainerInstancesOutput, bool) bool) error { + return c.ListContainerInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListContainerInstancesPagesWithContext same as ListContainerInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListContainerInstancesPagesWithContext(ctx aws.Context, input *ListContainerInstancesInput, fn func(*ListContainerInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListContainerInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListContainerInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListContainerInstancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListServices = "ListServices" + +// ListServicesRequest generates a "aws/request.Request" representing the +// client's request for the ListServices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListServices for more information on using the ListServices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListServicesRequest method. +// req, resp := client.ListServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) ListServicesRequest(input *ListServicesInput) (req *request.Request, output *ListServicesOutput) { + op := &request.Operation{ + Name: opListServices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListServicesInput{} + } + + output = &ListServicesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListServices API operation for Amazon Elastic Container Service. +// +// Lists the services that are running in a specified cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation ListServices for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) ListServices(input *ListServicesInput) (*ListServicesOutput, error) { + req, out := c.ListServicesRequest(input) + return out, req.Send() +} + +// ListServicesWithContext is the same as ListServices with the addition of +// the ability to pass a context and additional request options. +// +// See ListServices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListServicesWithContext(ctx aws.Context, input *ListServicesInput, opts ...request.Option) (*ListServicesOutput, error) { + req, out := c.ListServicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListServicesPages iterates over the pages of a ListServices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServices operation. +// pageNum := 0 +// err := client.ListServicesPages(params, +// func(page *ecs.ListServicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListServicesPages(input *ListServicesInput, fn func(*ListServicesOutput, bool) bool) error { + return c.ListServicesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListServicesPagesWithContext same as ListServicesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListServicesPagesWithContext(ctx aws.Context, input *ListServicesInput, fn func(*ListServicesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListServicesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListServicesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListServicesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Elastic Container Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTaskDefinitionFamilies = "ListTaskDefinitionFamilies" + +// ListTaskDefinitionFamiliesRequest generates a "aws/request.Request" representing the +// client's request for the ListTaskDefinitionFamilies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTaskDefinitionFamilies for more information on using the ListTaskDefinitionFamilies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTaskDefinitionFamiliesRequest method. +// req, resp := client.ListTaskDefinitionFamiliesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) ListTaskDefinitionFamiliesRequest(input *ListTaskDefinitionFamiliesInput) (req *request.Request, output *ListTaskDefinitionFamiliesOutput) { + op := &request.Operation{ + Name: opListTaskDefinitionFamilies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTaskDefinitionFamiliesInput{} + } + + output = &ListTaskDefinitionFamiliesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTaskDefinitionFamilies API operation for Amazon Elastic Container Service. +// +// Returns a list of task definition families that are registered to your account +// (which may include task definition families that no longer have any ACTIVE +// task definition revisions). +// +// You can filter out task definition families that do not contain any ACTIVE +// task definition revisions by setting the status parameter to ACTIVE. You +// can also filter the results with the familyPrefix parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation ListTaskDefinitionFamilies for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) ListTaskDefinitionFamilies(input *ListTaskDefinitionFamiliesInput) (*ListTaskDefinitionFamiliesOutput, error) { + req, out := c.ListTaskDefinitionFamiliesRequest(input) + return out, req.Send() +} + +// ListTaskDefinitionFamiliesWithContext is the same as ListTaskDefinitionFamilies with the addition of +// the ability to pass a context and additional request options. +// +// See ListTaskDefinitionFamilies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListTaskDefinitionFamiliesWithContext(ctx aws.Context, input *ListTaskDefinitionFamiliesInput, opts ...request.Option) (*ListTaskDefinitionFamiliesOutput, error) { + req, out := c.ListTaskDefinitionFamiliesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTaskDefinitionFamiliesPages iterates over the pages of a ListTaskDefinitionFamilies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTaskDefinitionFamilies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTaskDefinitionFamilies operation. +// pageNum := 0 +// err := client.ListTaskDefinitionFamiliesPages(params, +// func(page *ecs.ListTaskDefinitionFamiliesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListTaskDefinitionFamiliesPages(input *ListTaskDefinitionFamiliesInput, fn func(*ListTaskDefinitionFamiliesOutput, bool) bool) error { + return c.ListTaskDefinitionFamiliesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTaskDefinitionFamiliesPagesWithContext same as ListTaskDefinitionFamiliesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListTaskDefinitionFamiliesPagesWithContext(ctx aws.Context, input *ListTaskDefinitionFamiliesInput, fn func(*ListTaskDefinitionFamiliesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTaskDefinitionFamiliesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTaskDefinitionFamiliesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTaskDefinitionFamiliesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTaskDefinitions = "ListTaskDefinitions" + +// ListTaskDefinitionsRequest generates a "aws/request.Request" representing the +// client's request for the ListTaskDefinitions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTaskDefinitions for more information on using the ListTaskDefinitions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTaskDefinitionsRequest method. +// req, resp := client.ListTaskDefinitionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) ListTaskDefinitionsRequest(input *ListTaskDefinitionsInput) (req *request.Request, output *ListTaskDefinitionsOutput) { + op := &request.Operation{ + Name: opListTaskDefinitions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTaskDefinitionsInput{} + } + + output = &ListTaskDefinitionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTaskDefinitions API operation for Amazon Elastic Container Service. +// +// Returns a list of task definitions that are registered to your account. You +// can filter the results by family name with the familyPrefix parameter or +// by status with the status parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation ListTaskDefinitions for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) ListTaskDefinitions(input *ListTaskDefinitionsInput) (*ListTaskDefinitionsOutput, error) { + req, out := c.ListTaskDefinitionsRequest(input) + return out, req.Send() +} + +// ListTaskDefinitionsWithContext is the same as ListTaskDefinitions with the addition of +// the ability to pass a context and additional request options. +// +// See ListTaskDefinitions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListTaskDefinitionsWithContext(ctx aws.Context, input *ListTaskDefinitionsInput, opts ...request.Option) (*ListTaskDefinitionsOutput, error) { + req, out := c.ListTaskDefinitionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTaskDefinitionsPages iterates over the pages of a ListTaskDefinitions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTaskDefinitions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTaskDefinitions operation. +// pageNum := 0 +// err := client.ListTaskDefinitionsPages(params, +// func(page *ecs.ListTaskDefinitionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListTaskDefinitionsPages(input *ListTaskDefinitionsInput, fn func(*ListTaskDefinitionsOutput, bool) bool) error { + return c.ListTaskDefinitionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTaskDefinitionsPagesWithContext same as ListTaskDefinitionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListTaskDefinitionsPagesWithContext(ctx aws.Context, input *ListTaskDefinitionsInput, fn func(*ListTaskDefinitionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTaskDefinitionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTaskDefinitionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTaskDefinitionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTasks = "ListTasks" + +// ListTasksRequest generates a "aws/request.Request" representing the +// client's request for the ListTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTasks for more information on using the ListTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTasksRequest method. +// req, resp := client.ListTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) ListTasksRequest(input *ListTasksInput) (req *request.Request, output *ListTasksOutput) { + op := &request.Operation{ + Name: opListTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTasksInput{} + } + + output = &ListTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTasks API operation for Amazon Elastic Container Service. +// +// Returns a list of tasks for a specified cluster. You can filter the results +// by family name, by a particular container instance, or by the desired status +// of the task with the family, containerInstance, and desiredStatus parameters. +// +// Recently stopped tasks might appear in the returned results. Currently, stopped +// tasks appear in the returned results for at least one hour. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation ListTasks for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * ServiceNotFoundException +// The specified service could not be found. You can view your available services +// with ListServices. Amazon ECS services are cluster-specific and region-specific. +// +func (c *ECS) ListTasks(input *ListTasksInput) (*ListTasksOutput, error) { + req, out := c.ListTasksRequest(input) + return out, req.Send() +} + +// ListTasksWithContext is the same as ListTasks with the addition of +// the ability to pass a context and additional request options. +// +// See ListTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListTasksWithContext(ctx aws.Context, input *ListTasksInput, opts ...request.Option) (*ListTasksOutput, error) { + req, out := c.ListTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTasksPages iterates over the pages of a ListTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTasks operation. +// pageNum := 0 +// err := client.ListTasksPages(params, +// func(page *ecs.ListTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECS) ListTasksPages(input *ListTasksInput, fn func(*ListTasksOutput, bool) bool) error { + return c.ListTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTasksPagesWithContext same as ListTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) ListTasksPagesWithContext(ctx aws.Context, input *ListTasksInput, fn func(*ListTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutAccountSetting = "PutAccountSetting" + +// PutAccountSettingRequest generates a "aws/request.Request" representing the +// client's request for the PutAccountSetting operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutAccountSetting for more information on using the PutAccountSetting +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutAccountSettingRequest method. +// req, resp := client.PutAccountSettingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) PutAccountSettingRequest(input *PutAccountSettingInput) (req *request.Request, output *PutAccountSettingOutput) { + op := &request.Operation{ + Name: opPutAccountSetting, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAccountSettingInput{} + } + + output = &PutAccountSettingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutAccountSetting API operation for Amazon Elastic Container Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation PutAccountSetting for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) PutAccountSetting(input *PutAccountSettingInput) (*PutAccountSettingOutput, error) { + req, out := c.PutAccountSettingRequest(input) + return out, req.Send() +} + +// PutAccountSettingWithContext is the same as PutAccountSetting with the addition of +// the ability to pass a context and additional request options. +// +// See PutAccountSetting for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) PutAccountSettingWithContext(ctx aws.Context, input *PutAccountSettingInput, opts ...request.Option) (*PutAccountSettingOutput, error) { + req, out := c.PutAccountSettingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutAttributes = "PutAttributes" + +// PutAttributesRequest generates a "aws/request.Request" representing the +// client's request for the PutAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutAttributes for more information on using the PutAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutAttributesRequest method. +// req, resp := client.PutAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) PutAttributesRequest(input *PutAttributesInput) (req *request.Request, output *PutAttributesOutput) { + op := &request.Operation{ + Name: opPutAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAttributesInput{} + } + + output = &PutAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutAttributes API operation for Amazon Elastic Container Service. +// +// Create or update an attribute on an Amazon ECS resource. If the attribute +// does not exist, it is created. If the attribute exists, its value is replaced +// with the specified value. To delete an attribute, use DeleteAttributes. For +// more information, see Attributes (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes) +// in the Amazon Elastic Container Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation PutAttributes for usage and error information. +// +// Returned Error Types: +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * TargetNotFoundException +// The specified target could not be found. You can view your available container +// instances with ListContainerInstances. Amazon ECS container instances are +// cluster-specific and region-specific. +// +// * AttributeLimitExceededException +// You can apply up to 10 custom attributes per resource. You can view the attributes +// of a resource with ListAttributes. You can remove existing attributes on +// a resource with DeleteAttributes. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) PutAttributes(input *PutAttributesInput) (*PutAttributesOutput, error) { + req, out := c.PutAttributesRequest(input) + return out, req.Send() +} + +// PutAttributesWithContext is the same as PutAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See PutAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) PutAttributesWithContext(ctx aws.Context, input *PutAttributesInput, opts ...request.Option) (*PutAttributesOutput, error) { + req, out := c.PutAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterContainerInstance = "RegisterContainerInstance" + +// RegisterContainerInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterContainerInstance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterContainerInstance for more information on using the RegisterContainerInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterContainerInstanceRequest method. +// req, resp := client.RegisterContainerInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) RegisterContainerInstanceRequest(input *RegisterContainerInstanceInput) (req *request.Request, output *RegisterContainerInstanceOutput) { + op := &request.Operation{ + Name: opRegisterContainerInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterContainerInstanceInput{} + } + + output = &RegisterContainerInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterContainerInstance API operation for Amazon Elastic Container Service. +// +// +// This action is only used by the Amazon ECS agent, and it is not intended +// for use outside of the agent. +// +// Registers an EC2 instance into the specified cluster. This instance becomes +// available to place containers on. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation RegisterContainerInstance for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) RegisterContainerInstance(input *RegisterContainerInstanceInput) (*RegisterContainerInstanceOutput, error) { + req, out := c.RegisterContainerInstanceRequest(input) + return out, req.Send() +} + +// RegisterContainerInstanceWithContext is the same as RegisterContainerInstance with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterContainerInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) RegisterContainerInstanceWithContext(ctx aws.Context, input *RegisterContainerInstanceInput, opts ...request.Option) (*RegisterContainerInstanceOutput, error) { + req, out := c.RegisterContainerInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterTaskDefinition = "RegisterTaskDefinition" + +// RegisterTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the RegisterTaskDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterTaskDefinition for more information on using the RegisterTaskDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterTaskDefinitionRequest method. +// req, resp := client.RegisterTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) RegisterTaskDefinitionRequest(input *RegisterTaskDefinitionInput) (req *request.Request, output *RegisterTaskDefinitionOutput) { + op := &request.Operation{ + Name: opRegisterTaskDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTaskDefinitionInput{} + } + + output = &RegisterTaskDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterTaskDefinition API operation for Amazon Elastic Container Service. +// +// Registers a new task definition from the supplied family and containerDefinitions. +// Optionally, you can add data volumes to your containers with the volumes +// parameter. For more information about task definition parameters and defaults, +// see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// You can specify an IAM role for your task with the taskRoleArn parameter. +// When you specify an IAM role for a task, its containers can then use the +// latest versions of the AWS CLI or SDKs to make API requests to the AWS services +// that are specified in the IAM policy associated with the role. For more information, +// see IAM Roles for Tasks (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// You can specify a Docker networking mode for the containers in your task +// definition with the networkMode parameter. The available network modes correspond +// to those described in Network settings (https://docs.docker.com/engine/reference/run/#/network-settings) +// in the Docker run reference. If you specify the awsvpc network mode, the +// task is allocated an elastic network interface, and you must specify a NetworkConfiguration +// when you create a service or run a task with the task definition. For more +// information, see Task Networking (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation RegisterTaskDefinition for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) RegisterTaskDefinition(input *RegisterTaskDefinitionInput) (*RegisterTaskDefinitionOutput, error) { + req, out := c.RegisterTaskDefinitionRequest(input) + return out, req.Send() +} + +// RegisterTaskDefinitionWithContext is the same as RegisterTaskDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterTaskDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) RegisterTaskDefinitionWithContext(ctx aws.Context, input *RegisterTaskDefinitionInput, opts ...request.Option) (*RegisterTaskDefinitionOutput, error) { + req, out := c.RegisterTaskDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRunTask = "RunTask" + +// RunTaskRequest generates a "aws/request.Request" representing the +// client's request for the RunTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RunTask for more information on using the RunTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RunTaskRequest method. +// req, resp := client.RunTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) RunTaskRequest(input *RunTaskInput) (req *request.Request, output *RunTaskOutput) { + op := &request.Operation{ + Name: opRunTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunTaskInput{} + } + + output = &RunTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// RunTask API operation for Amazon Elastic Container Service. +// +// Starts a new task using the specified task definition. +// +// You can allow Amazon ECS to place tasks for you, or you can customize how +// Amazon ECS places tasks using placement constraints and placement strategies. +// For more information, see Scheduling Tasks (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// Alternatively, you can use StartTask to use your own scheduler or place tasks +// manually on specific container instances. +// +// The Amazon ECS API follows an eventual consistency model, due to the distributed +// nature of the system supporting the API. This means that the result of an +// API command you run that affects your Amazon ECS resources might not be immediately +// visible to all subsequent commands you run. You should keep this in mind +// when you carry out an API command that immediately follows a previous API +// command. +// +// To manage eventual consistency, you can do the following: +// +// * Confirm the state of the resource before you run a command to modify +// it. Run the DescribeTasks command using an exponential backoff algorithm +// to ensure that you allow enough time for the previous command to propagate +// through the system. To do this, run the DescribeTasks command repeatedly, +// starting with a couple of seconds of wait time and increasing gradually +// up to five minutes of wait time. +// +// * Add wait time between subsequent commands, even if the DescribeTasks +// command returns an accurate response. Apply an exponential backoff algorithm +// starting with a couple of seconds of wait time, and increase gradually +// up to about five minutes of wait time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation RunTask for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * UnsupportedFeatureException +// The specified task is not supported in this region. +// +// * PlatformUnknownException +// The specified platform version does not exist. +// +// * PlatformTaskDefinitionIncompatibilityException +// The specified platform version does not satisfy the task definition's required +// capabilities. +// +// * AccessDeniedException +// You do not have authorization to perform the requested action. +// +// * BlockedException +// Your AWS account has been blocked. Contact AWS Support (http://aws.amazon.com/contact-us/) +// for more information. +// +func (c *ECS) RunTask(input *RunTaskInput) (*RunTaskOutput, error) { + req, out := c.RunTaskRequest(input) + return out, req.Send() +} + +// RunTaskWithContext is the same as RunTask with the addition of +// the ability to pass a context and additional request options. +// +// See RunTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) RunTaskWithContext(ctx aws.Context, input *RunTaskInput, opts ...request.Option) (*RunTaskOutput, error) { + req, out := c.RunTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartTask = "StartTask" + +// StartTaskRequest generates a "aws/request.Request" representing the +// client's request for the StartTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartTask for more information on using the StartTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartTaskRequest method. +// req, resp := client.StartTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) StartTaskRequest(input *StartTaskInput) (req *request.Request, output *StartTaskOutput) { + op := &request.Operation{ + Name: opStartTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartTaskInput{} + } + + output = &StartTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartTask API operation for Amazon Elastic Container Service. +// +// Starts a new task from the specified task definition on the specified container +// instance or instances. +// +// Alternatively, you can use RunTask to place tasks for you. For more information, +// see Scheduling Tasks (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation StartTask for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) StartTask(input *StartTaskInput) (*StartTaskOutput, error) { + req, out := c.StartTaskRequest(input) + return out, req.Send() +} + +// StartTaskWithContext is the same as StartTask with the addition of +// the ability to pass a context and additional request options. +// +// See StartTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) StartTaskWithContext(ctx aws.Context, input *StartTaskInput, opts ...request.Option) (*StartTaskOutput, error) { + req, out := c.StartTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopTask = "StopTask" + +// StopTaskRequest generates a "aws/request.Request" representing the +// client's request for the StopTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopTask for more information on using the StopTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopTaskRequest method. +// req, resp := client.StopTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) StopTaskRequest(input *StopTaskInput) (req *request.Request, output *StopTaskOutput) { + op := &request.Operation{ + Name: opStopTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopTaskInput{} + } + + output = &StopTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopTask API operation for Amazon Elastic Container Service. +// +// Stops a running task. +// +// When StopTask is called on a task, the equivalent of docker stop is issued +// to the containers running in the task. This results in a SIGTERM and a default +// 30-second timeout, after which SIGKILL is sent and the containers are forcibly +// stopped. If the container handles the SIGTERM gracefully and exits within +// 30 seconds from receiving it, no SIGKILL is sent. +// +// The default 30-second timeout can be configured on the Amazon ECS container +// agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, +// see Amazon ECS Container Agent Configuration (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation StopTask for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) StopTask(input *StopTaskInput) (*StopTaskOutput, error) { + req, out := c.StopTaskRequest(input) + return out, req.Send() +} + +// StopTaskWithContext is the same as StopTask with the addition of +// the ability to pass a context and additional request options. +// +// See StopTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) StopTaskWithContext(ctx aws.Context, input *StopTaskInput, opts ...request.Option) (*StopTaskOutput, error) { + req, out := c.StopTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSubmitAttachmentStateChanges = "SubmitAttachmentStateChanges" + +// SubmitAttachmentStateChangesRequest generates a "aws/request.Request" representing the +// client's request for the SubmitAttachmentStateChanges operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SubmitAttachmentStateChanges for more information on using the SubmitAttachmentStateChanges +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SubmitAttachmentStateChangesRequest method. +// req, resp := client.SubmitAttachmentStateChangesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) SubmitAttachmentStateChangesRequest(input *SubmitAttachmentStateChangesInput) (req *request.Request, output *SubmitAttachmentStateChangesOutput) { + op := &request.Operation{ + Name: opSubmitAttachmentStateChanges, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitAttachmentStateChangesInput{} + } + + output = &SubmitAttachmentStateChangesOutput{} + req = c.newRequest(op, input, output) + return +} + +// SubmitAttachmentStateChanges API operation for Amazon Elastic Container Service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation SubmitAttachmentStateChanges for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * AccessDeniedException +// You do not have authorization to perform the requested action. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +func (c *ECS) SubmitAttachmentStateChanges(input *SubmitAttachmentStateChangesInput) (*SubmitAttachmentStateChangesOutput, error) { + req, out := c.SubmitAttachmentStateChangesRequest(input) + return out, req.Send() +} + +// SubmitAttachmentStateChangesWithContext is the same as SubmitAttachmentStateChanges with the addition of +// the ability to pass a context and additional request options. +// +// See SubmitAttachmentStateChanges for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) SubmitAttachmentStateChangesWithContext(ctx aws.Context, input *SubmitAttachmentStateChangesInput, opts ...request.Option) (*SubmitAttachmentStateChangesOutput, error) { + req, out := c.SubmitAttachmentStateChangesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSubmitContainerStateChange = "SubmitContainerStateChange" + +// SubmitContainerStateChangeRequest generates a "aws/request.Request" representing the +// client's request for the SubmitContainerStateChange operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SubmitContainerStateChange for more information on using the SubmitContainerStateChange +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SubmitContainerStateChangeRequest method. +// req, resp := client.SubmitContainerStateChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) SubmitContainerStateChangeRequest(input *SubmitContainerStateChangeInput) (req *request.Request, output *SubmitContainerStateChangeOutput) { + op := &request.Operation{ + Name: opSubmitContainerStateChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitContainerStateChangeInput{} + } + + output = &SubmitContainerStateChangeOutput{} + req = c.newRequest(op, input, output) + return +} + +// SubmitContainerStateChange API operation for Amazon Elastic Container Service. +// +// +// This action is only used by the Amazon ECS agent, and it is not intended +// for use outside of the agent. +// +// Sent to acknowledge that a container changed states. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation SubmitContainerStateChange for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * AccessDeniedException +// You do not have authorization to perform the requested action. +// +func (c *ECS) SubmitContainerStateChange(input *SubmitContainerStateChangeInput) (*SubmitContainerStateChangeOutput, error) { + req, out := c.SubmitContainerStateChangeRequest(input) + return out, req.Send() +} + +// SubmitContainerStateChangeWithContext is the same as SubmitContainerStateChange with the addition of +// the ability to pass a context and additional request options. +// +// See SubmitContainerStateChange for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) SubmitContainerStateChangeWithContext(ctx aws.Context, input *SubmitContainerStateChangeInput, opts ...request.Option) (*SubmitContainerStateChangeOutput, error) { + req, out := c.SubmitContainerStateChangeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSubmitTaskStateChange = "SubmitTaskStateChange" + +// SubmitTaskStateChangeRequest generates a "aws/request.Request" representing the +// client's request for the SubmitTaskStateChange operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SubmitTaskStateChange for more information on using the SubmitTaskStateChange +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SubmitTaskStateChangeRequest method. +// req, resp := client.SubmitTaskStateChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) SubmitTaskStateChangeRequest(input *SubmitTaskStateChangeInput) (req *request.Request, output *SubmitTaskStateChangeOutput) { + op := &request.Operation{ + Name: opSubmitTaskStateChange, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SubmitTaskStateChangeInput{} + } + + output = &SubmitTaskStateChangeOutput{} + req = c.newRequest(op, input, output) + return +} + +// SubmitTaskStateChange API operation for Amazon Elastic Container Service. +// +// +// This action is only used by the Amazon ECS agent, and it is not intended +// for use outside of the agent. +// +// Sent to acknowledge that a task changed states. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation SubmitTaskStateChange for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * AccessDeniedException +// You do not have authorization to perform the requested action. +// +func (c *ECS) SubmitTaskStateChange(input *SubmitTaskStateChangeInput) (*SubmitTaskStateChangeOutput, error) { + req, out := c.SubmitTaskStateChangeRequest(input) + return out, req.Send() +} + +// SubmitTaskStateChangeWithContext is the same as SubmitTaskStateChange with the addition of +// the ability to pass a context and additional request options. +// +// See SubmitTaskStateChange for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) SubmitTaskStateChangeWithContext(ctx aws.Context, input *SubmitTaskStateChangeInput, opts ...request.Option) (*SubmitTaskStateChangeOutput, error) { + req, out := c.SubmitTaskStateChangeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateContainerAgent = "UpdateContainerAgent" + +// UpdateContainerAgentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContainerAgent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateContainerAgent for more information on using the UpdateContainerAgent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateContainerAgentRequest method. +// req, resp := client.UpdateContainerAgentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) UpdateContainerAgentRequest(input *UpdateContainerAgentInput) (req *request.Request, output *UpdateContainerAgentOutput) { + op := &request.Operation{ + Name: opUpdateContainerAgent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContainerAgentInput{} + } + + output = &UpdateContainerAgentOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateContainerAgent API operation for Amazon Elastic Container Service. +// +// Updates the Amazon ECS container agent on a specified container instance. +// Updating the Amazon ECS container agent does not interrupt running tasks +// or services on the container instance. The process for updating the agent +// differs depending on whether your container instance was launched with the +// Amazon ECS-optimized AMI or another operating system. +// +// UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux +// with the ecs-init service installed and running. For help updating the Amazon +// ECS container agent on other operating systems, see Manually Updating the +// Amazon ECS Container Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent) +// in the Amazon Elastic Container Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation UpdateContainerAgent for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * UpdateInProgressException +// There is already a current Amazon ECS container agent update in progress +// on the specified container instance. If the container agent becomes disconnected +// while it is in a transitional stage, such as PENDING or STAGING, the update +// process can get stuck in that state. However, when the agent reconnects, +// it resumes where it stopped previously. +// +// * NoUpdateAvailableException +// There is no update available for this Amazon ECS container agent. This could +// be because the agent is already running the latest version, or it is so old +// that there is no update path to the current version. +// +// * MissingVersionException +// Amazon ECS is unable to determine the current version of the Amazon ECS container +// agent on the container instance and does not have enough information to proceed +// with an update. This could be because the agent running on the container +// instance is an older or custom version that does not use our version information. +// +func (c *ECS) UpdateContainerAgent(input *UpdateContainerAgentInput) (*UpdateContainerAgentOutput, error) { + req, out := c.UpdateContainerAgentRequest(input) + return out, req.Send() +} + +// UpdateContainerAgentWithContext is the same as UpdateContainerAgent with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateContainerAgent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) UpdateContainerAgentWithContext(ctx aws.Context, input *UpdateContainerAgentInput, opts ...request.Option) (*UpdateContainerAgentOutput, error) { + req, out := c.UpdateContainerAgentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateContainerInstancesState = "UpdateContainerInstancesState" + +// UpdateContainerInstancesStateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContainerInstancesState operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateContainerInstancesState for more information on using the UpdateContainerInstancesState +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateContainerInstancesStateRequest method. +// req, resp := client.UpdateContainerInstancesStateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) UpdateContainerInstancesStateRequest(input *UpdateContainerInstancesStateInput) (req *request.Request, output *UpdateContainerInstancesStateOutput) { + op := &request.Operation{ + Name: opUpdateContainerInstancesState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContainerInstancesStateInput{} + } + + output = &UpdateContainerInstancesStateOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateContainerInstancesState API operation for Amazon Elastic Container Service. +// +// Modifies the status of an Amazon ECS container instance. +// +// You can change the status of a container instance to DRAINING to manually +// remove an instance from a cluster, for example to perform system updates, +// update the Docker daemon, or scale down the cluster size. +// +// When you set a container instance to DRAINING, Amazon ECS prevents new tasks +// from being scheduled for placement on the container instance and replacement +// service tasks are started on other container instances in the cluster if +// the resources are available. Service tasks on the container instance that +// are in the PENDING state are stopped immediately. +// +// Service tasks on the container instance that are in the RUNNING state are +// stopped and replaced according to the service's deployment configuration +// parameters, minimumHealthyPercent and maximumPercent. You can change the +// deployment configuration of your service using UpdateService. +// +// * If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount +// temporarily during task replacement. For example, desiredCount is four +// tasks, a minimum of 50% allows the scheduler to stop two existing tasks +// before starting two new tasks. If the minimum is 100%, the service scheduler +// can't remove existing tasks until the replacement tasks are considered +// healthy. Tasks for services that do not use a load balancer are considered +// healthy if they are in the RUNNING state. Tasks for services that use +// a load balancer are considered healthy if they are in the RUNNING state +// and the container instance they are hosted on is reported as healthy by +// the load balancer. +// +// * The maximumPercent parameter represents an upper limit on the number +// of running tasks during task replacement, which enables you to define +// the replacement batch size. For example, if desiredCount of four tasks, +// a maximum of 200% starts four new tasks before stopping the four tasks +// to be drained (provided that the cluster resources required to do this +// are available). If the maximum is 100%, then replacement tasks can't start +// until the draining tasks have stopped. +// +// Any PENDING or RUNNING tasks that do not belong to a service are not affected; +// you must wait for them to finish or stop them manually. +// +// A container instance has completed draining when it has no more RUNNING tasks. +// You can verify this using ListTasks. +// +// When you set a container instance to ACTIVE, the Amazon ECS scheduler can +// begin scheduling tasks on the instance again. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation UpdateContainerInstancesState for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +func (c *ECS) UpdateContainerInstancesState(input *UpdateContainerInstancesStateInput) (*UpdateContainerInstancesStateOutput, error) { + req, out := c.UpdateContainerInstancesStateRequest(input) + return out, req.Send() +} + +// UpdateContainerInstancesStateWithContext is the same as UpdateContainerInstancesState with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateContainerInstancesState for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) UpdateContainerInstancesStateWithContext(ctx aws.Context, input *UpdateContainerInstancesStateInput, opts ...request.Option) (*UpdateContainerInstancesStateOutput, error) { + req, out := c.UpdateContainerInstancesStateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateService = "UpdateService" + +// UpdateServiceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateService operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateService for more information on using the UpdateService +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateServiceRequest method. +// req, resp := client.UpdateServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ECS) UpdateServiceRequest(input *UpdateServiceInput) (req *request.Request, output *UpdateServiceOutput) { + op := &request.Operation{ + Name: opUpdateService, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceInput{} + } + + output = &UpdateServiceOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateService API operation for Amazon Elastic Container Service. +// +// Modifies the desired count, deployment configuration, network configuration, +// or task definition used in a service. +// +// You can add to or subtract from the number of instantiations of a task definition +// in a service by specifying the cluster that the service is running in and +// a new desiredCount parameter. +// +// If you have updated the Docker image of your application, you can create +// a new task definition with that image and deploy it to your service. The +// service scheduler uses the minimum healthy percent and maximum percent parameters +// (in the service's deployment configuration) to determine the deployment strategy. +// +// If your updated Docker image uses the same tag as what is in the existing +// task definition for your service (for example, my_image:latest), you do not +// need to create a new revision of your task definition. You can update the +// service using the forceNewDeployment option. The new tasks launched by the +// deployment pull the current image/tag combination from your repository when +// they start. +// +// You can also update the deployment configuration of a service. When a deployment +// is triggered by updating the task definition of a service, the service scheduler +// uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, +// to determine the deployment strategy. +// +// * If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount +// temporarily during a deployment. For example, if desiredCount is four +// tasks, a minimum of 50% allows the scheduler to stop two existing tasks +// before starting two new tasks. Tasks for services that do not use a load +// balancer are considered healthy if they are in the RUNNING state. Tasks +// for services that use a load balancer are considered healthy if they are +// in the RUNNING state and the container instance they are hosted on is +// reported as healthy by the load balancer. +// +// * The maximumPercent parameter represents an upper limit on the number +// of running tasks during a deployment, which enables you to define the +// deployment batch size. For example, if desiredCount is four tasks, a maximum +// of 200% starts four new tasks before stopping the four older tasks (provided +// that the cluster resources required to do this are available). +// +// When UpdateService stops a task during a deployment, the equivalent of docker +// stop is issued to the containers running in the task. This results in a SIGTERM +// and a 30-second timeout, after which SIGKILL is sent and the containers are +// forcibly stopped. If the container handles the SIGTERM gracefully and exits +// within 30 seconds from receiving it, no SIGKILL is sent. +// +// When the service scheduler launches new tasks, it determines task placement +// in your cluster with the following logic: +// +// * Determine which of the container instances in your cluster can support +// your service's task definition (for example, they have the required CPU, +// memory, ports, and container instance attributes). +// +// * By default, the service scheduler attempts to balance tasks across Availability +// Zones in this manner (although you can choose a different placement strategy): +// Sort the valid container instances by the fewest number of running tasks +// for this service in the same Availability Zone as the instance. For example, +// if zone A has one running service task and zones B and C each have zero, +// valid container instances in either zone B or C are considered optimal +// for placement. Place the new service task on a valid container instance +// in an optimal Availability Zone (based on the previous steps), favoring +// container instances with the fewest number of running tasks for this service. +// +// When the service scheduler stops running tasks, it attempts to maintain balance +// across the Availability Zones in your cluster using the following logic: +// +// * Sort the container instances by the largest number of running tasks +// for this service in the same Availability Zone as the instance. For example, +// if zone A has one running service task and zones B and C each have two, +// container instances in either zone B or C are considered optimal for termination. +// +// * Stop the task on a container instance in an optimal Availability Zone +// (based on the previous steps), favoring container instances with the largest +// number of running tasks for this service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Container Service's +// API operation UpdateService for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ClusterNotFoundException +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +// +// * ServiceNotFoundException +// The specified service could not be found. You can view your available services +// with ListServices. Amazon ECS services are cluster-specific and region-specific. +// +// * ServiceNotActiveException +// The specified service is not active. You can't update a service that is inactive. +// If you have previously deleted a service, you can re-create it with CreateService. +// +// * PlatformUnknownException +// The specified platform version does not exist. +// +// * PlatformTaskDefinitionIncompatibilityException +// The specified platform version does not satisfy the task definition's required +// capabilities. +// +// * AccessDeniedException +// You do not have authorization to perform the requested action. +// +func (c *ECS) UpdateService(input *UpdateServiceInput) (*UpdateServiceOutput, error) { + req, out := c.UpdateServiceRequest(input) + return out, req.Send() +} + +// UpdateServiceWithContext is the same as UpdateService with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateService for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) UpdateServiceWithContext(ctx aws.Context, input *UpdateServiceInput, opts ...request.Option) (*UpdateServiceOutput, error) { + req, out := c.UpdateServiceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have authorization to perform the requested action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object representing a container instance or task attachment. +type Attachment struct { + _ struct{} `type:"structure"` + + // Details of the attachment. For elastic network interfaces, this includes + // the network interface ID, the MAC address, the subnet ID, and the private + // IPv4 address. + Details []*KeyValuePair `locationName:"details" type:"list"` + + // The unique identifier for the attachment. + Id *string `locationName:"id" type:"string"` + + // The status of the attachment. Valid values are PRECREATED, CREATED, ATTACHING, + // ATTACHED, DETACHING, DETACHED, and DELETED. + Status *string `locationName:"status" type:"string"` + + // The type of the attachment, such as ElasticNetworkInterface. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s Attachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attachment) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *Attachment) SetDetails(v []*KeyValuePair) *Attachment { + s.Details = v + return s +} + +// SetId sets the Id field's value. +func (s *Attachment) SetId(v string) *Attachment { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Attachment) SetStatus(v string) *Attachment { + s.Status = &v + return s +} + +// SetType sets the Type field's value. +func (s *Attachment) SetType(v string) *Attachment { + s.Type = &v + return s +} + +// An object representing a change in state for a task attachment. +type AttachmentStateChange struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the attachment. + // + // AttachmentArn is a required field + AttachmentArn *string `locationName:"attachmentArn" type:"string" required:"true"` + + // The status of the attachment. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachmentStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachmentStateChange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachmentStateChange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachmentStateChange"} + if s.AttachmentArn == nil { + invalidParams.Add(request.NewErrParamRequired("AttachmentArn")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttachmentArn sets the AttachmentArn field's value. +func (s *AttachmentStateChange) SetAttachmentArn(v string) *AttachmentStateChange { + s.AttachmentArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AttachmentStateChange) SetStatus(v string) *AttachmentStateChange { + s.Status = &v + return s +} + +// An attribute is a name-value pair associated with an Amazon ECS object. Attributes +// enable you to extend the Amazon ECS data model by adding custom metadata +// to your resources. For more information, see Attributes (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes) +// in the Amazon Elastic Container Service Developer Guide. +type Attribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. Up to 128 letters (uppercase and lowercase), numbers, + // hyphens, underscores, and periods are allowed. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The ID of the target. You can specify the short form ID for a resource or + // the full Amazon Resource Name (ARN). + TargetId *string `locationName:"targetId" type:"string"` + + // The type of the target with which to attach the attribute. This parameter + // is required if you use the short form ID for a resource instead of the full + // ARN. + TargetType *string `locationName:"targetType" type:"string" enum:"TargetType"` + + // The value of the attribute. Up to 128 letters (uppercase and lowercase), + // numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons, + // and spaces are allowed. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Attribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Attribute"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *Attribute) SetName(v string) *Attribute { + s.Name = &v + return s +} + +// SetTargetId sets the TargetId field's value. +func (s *Attribute) SetTargetId(v string) *Attribute { + s.TargetId = &v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *Attribute) SetTargetType(v string) *Attribute { + s.TargetType = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Attribute) SetValue(v string) *Attribute { + s.Value = &v + return s +} + +// You can apply up to 10 custom attributes per resource. You can view the attributes +// of a resource with ListAttributes. You can remove existing attributes on +// a resource with DeleteAttributes. +type AttributeLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AttributeLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeLimitExceededException) GoString() string { + return s.String() +} + +func newErrorAttributeLimitExceededException(v protocol.ResponseMetadata) error { + return &AttributeLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AttributeLimitExceededException) Code() string { + return "AttributeLimitExceededException" +} + +// Message returns the exception's message. +func (s *AttributeLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AttributeLimitExceededException) OrigErr() error { + return nil +} + +func (s *AttributeLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AttributeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AttributeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object representing the networking details for a task or service. +type AwsVpcConfiguration struct { + _ struct{} `type:"structure"` + + // Whether the task's elastic network interface receives a public IP address. + AssignPublicIp *string `locationName:"assignPublicIp" type:"string" enum:"AssignPublicIp"` + + // The security groups associated with the task or service. If you do not specify + // a security group, the default security group for the VPC is used. There is + // a limit of 5 security groups able to be specified per AwsVpcConfiguration. + // + // All specified security groups must be from the same VPC. + SecurityGroups []*string `locationName:"securityGroups" type:"list"` + + // The subnets associated with the task or service. There is a limit of 10 subnets + // able to be specified per AwsVpcConfiguration. + // + // All specified subnets must be from the same VPC. + // + // Subnets is a required field + Subnets []*string `locationName:"subnets" type:"list" required:"true"` +} + +// String returns the string representation +func (s AwsVpcConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsVpcConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsVpcConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsVpcConfiguration"} + if s.Subnets == nil { + invalidParams.Add(request.NewErrParamRequired("Subnets")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssignPublicIp sets the AssignPublicIp field's value. +func (s *AwsVpcConfiguration) SetAssignPublicIp(v string) *AwsVpcConfiguration { + s.AssignPublicIp = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *AwsVpcConfiguration) SetSecurityGroups(v []*string) *AwsVpcConfiguration { + s.SecurityGroups = v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *AwsVpcConfiguration) SetSubnets(v []*string) *AwsVpcConfiguration { + s.Subnets = v + return s +} + +// Your AWS account has been blocked. Contact AWS Support (http://aws.amazon.com/contact-us/) +// for more information. +type BlockedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BlockedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockedException) GoString() string { + return s.String() +} + +func newErrorBlockedException(v protocol.ResponseMetadata) error { + return &BlockedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BlockedException) Code() string { + return "BlockedException" +} + +// Message returns the exception's message. +func (s *BlockedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BlockedException) OrigErr() error { + return nil +} + +func (s *BlockedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BlockedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BlockedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +type ClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientException) GoString() string { + return s.String() +} + +func newErrorClientException(v protocol.ResponseMetadata) error { + return &ClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ClientException) Code() string { + return "ClientException" +} + +// Message returns the exception's message. +func (s *ClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ClientException) OrigErr() error { + return nil +} + +func (s *ClientException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A regional grouping of one or more container instances on which you can run +// task requests. Each account receives a default cluster the first time you +// use the Amazon ECS service, but you may also create other clusters. Clusters +// may contain more than one instance type simultaneously. +type Cluster struct { + _ struct{} `type:"structure"` + + // The number of services that are running on the cluster in an ACTIVE state. + // You can view these services with ListServices. + ActiveServicesCount *int64 `locationName:"activeServicesCount" type:"integer"` + + // The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains + // the arn:aws:ecs namespace, followed by the Region of the cluster, the AWS + // account ID of the cluster owner, the cluster namespace, and then the cluster + // name. For example, arn:aws:ecs:region:012345678910:cluster/test .. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // A user-generated string that you use to identify your cluster. + ClusterName *string `locationName:"clusterName" type:"string"` + + // The number of tasks in the cluster that are in the PENDING state. + PendingTasksCount *int64 `locationName:"pendingTasksCount" type:"integer"` + + // The number of container instances registered into the cluster. + RegisteredContainerInstancesCount *int64 `locationName:"registeredContainerInstancesCount" type:"integer"` + + // The number of tasks in the cluster that are in the RUNNING state. + RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + + // The status of the cluster. The valid values are ACTIVE or INACTIVE. ACTIVE + // indicates that you can register container instances with the cluster and + // the associated instances can accept tasks. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Cluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cluster) GoString() string { + return s.String() +} + +// SetActiveServicesCount sets the ActiveServicesCount field's value. +func (s *Cluster) SetActiveServicesCount(v int64) *Cluster { + s.ActiveServicesCount = &v + return s +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *Cluster) SetClusterArn(v string) *Cluster { + s.ClusterArn = &v + return s +} + +// SetClusterName sets the ClusterName field's value. +func (s *Cluster) SetClusterName(v string) *Cluster { + s.ClusterName = &v + return s +} + +// SetPendingTasksCount sets the PendingTasksCount field's value. +func (s *Cluster) SetPendingTasksCount(v int64) *Cluster { + s.PendingTasksCount = &v + return s +} + +// SetRegisteredContainerInstancesCount sets the RegisteredContainerInstancesCount field's value. +func (s *Cluster) SetRegisteredContainerInstancesCount(v int64) *Cluster { + s.RegisteredContainerInstancesCount = &v + return s +} + +// SetRunningTasksCount sets the RunningTasksCount field's value. +func (s *Cluster) SetRunningTasksCount(v int64) *Cluster { + s.RunningTasksCount = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Cluster) SetStatus(v string) *Cluster { + s.Status = &v + return s +} + +// You cannot delete a cluster that has registered container instances. You +// must first deregister the container instances before you can delete the cluster. +// For more information, see DeregisterContainerInstance. +type ClusterContainsContainerInstancesException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClusterContainsContainerInstancesException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterContainsContainerInstancesException) GoString() string { + return s.String() +} + +func newErrorClusterContainsContainerInstancesException(v protocol.ResponseMetadata) error { + return &ClusterContainsContainerInstancesException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ClusterContainsContainerInstancesException) Code() string { + return "ClusterContainsContainerInstancesException" +} + +// Message returns the exception's message. +func (s *ClusterContainsContainerInstancesException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ClusterContainsContainerInstancesException) OrigErr() error { + return nil +} + +func (s *ClusterContainsContainerInstancesException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ClusterContainsContainerInstancesException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ClusterContainsContainerInstancesException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You cannot delete a cluster that contains services. You must first update +// the service to reduce its desired task count to 0 and then delete the service. +// For more information, see UpdateService and DeleteService. +type ClusterContainsServicesException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClusterContainsServicesException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterContainsServicesException) GoString() string { + return s.String() +} + +func newErrorClusterContainsServicesException(v protocol.ResponseMetadata) error { + return &ClusterContainsServicesException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ClusterContainsServicesException) Code() string { + return "ClusterContainsServicesException" +} + +// Message returns the exception's message. +func (s *ClusterContainsServicesException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ClusterContainsServicesException) OrigErr() error { + return nil +} + +func (s *ClusterContainsServicesException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ClusterContainsServicesException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ClusterContainsServicesException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You cannot delete a cluster that has active tasks. +type ClusterContainsTasksException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClusterContainsTasksException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterContainsTasksException) GoString() string { + return s.String() +} + +func newErrorClusterContainsTasksException(v protocol.ResponseMetadata) error { + return &ClusterContainsTasksException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ClusterContainsTasksException) Code() string { + return "ClusterContainsTasksException" +} + +// Message returns the exception's message. +func (s *ClusterContainsTasksException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ClusterContainsTasksException) OrigErr() error { + return nil +} + +func (s *ClusterContainsTasksException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ClusterContainsTasksException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ClusterContainsTasksException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified cluster could not be found. You can view your available clusters +// with ListClusters. Amazon ECS clusters are region-specific. +type ClusterNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClusterNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterNotFoundException) GoString() string { + return s.String() +} + +func newErrorClusterNotFoundException(v protocol.ResponseMetadata) error { + return &ClusterNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ClusterNotFoundException) Code() string { + return "ClusterNotFoundException" +} + +// Message returns the exception's message. +func (s *ClusterNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ClusterNotFoundException) OrigErr() error { + return nil +} + +func (s *ClusterNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ClusterNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ClusterNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A Docker container that is part of a task. +type Container struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the container. + ContainerArn *string `locationName:"containerArn" type:"string"` + + // The exit code returned from the container. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + FirelensConfiguration *FirelensConfiguration `locationName:"firelensConfiguration" type:"structure"` + + // The health status of the container. If health checks are not configured for + // this container in its task definition, then it reports health status as UNKNOWN. + HealthStatus *string `locationName:"healthStatus" type:"string" enum:"HealthStatus"` + + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The last known status of the container. + LastStatus *string `locationName:"lastStatus" type:"string"` + + ManagedAgents []*ManagedAgent `locationName:"managedAgents" type:"list"` + + // The name of the container. + Name *string `locationName:"name" type:"string"` + + // The network bindings associated with the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // The network interfaces associated with the container. + NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaces" type:"list"` + + // A short (255 max characters) human-readable string to provide additional + // details about a running or stopped container. + Reason *string `locationName:"reason" type:"string"` + + RuntimeId *string `locationName:"runtimeId" type:"string"` + + // The ARN of the task. + TaskArn *string `locationName:"taskArn" type:"string"` +} + +// String returns the string representation +func (s Container) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Container) GoString() string { + return s.String() +} + +// SetContainerArn sets the ContainerArn field's value. +func (s *Container) SetContainerArn(v string) *Container { + s.ContainerArn = &v + return s +} + +// SetExitCode sets the ExitCode field's value. +func (s *Container) SetExitCode(v int64) *Container { + s.ExitCode = &v + return s +} + +// SetFirelensConfiguration sets the FirelensConfiguration field's value. +func (s *Container) SetFirelensConfiguration(v *FirelensConfiguration) *Container { + s.FirelensConfiguration = v + return s +} + +// SetHealthStatus sets the HealthStatus field's value. +func (s *Container) SetHealthStatus(v string) *Container { + s.HealthStatus = &v + return s +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *Container) SetImageDigest(v string) *Container { + s.ImageDigest = &v + return s +} + +// SetLastStatus sets the LastStatus field's value. +func (s *Container) SetLastStatus(v string) *Container { + s.LastStatus = &v + return s +} + +// SetManagedAgents sets the ManagedAgents field's value. +func (s *Container) SetManagedAgents(v []*ManagedAgent) *Container { + s.ManagedAgents = v + return s +} + +// SetName sets the Name field's value. +func (s *Container) SetName(v string) *Container { + s.Name = &v + return s +} + +// SetNetworkBindings sets the NetworkBindings field's value. +func (s *Container) SetNetworkBindings(v []*NetworkBinding) *Container { + s.NetworkBindings = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *Container) SetNetworkInterfaces(v []*NetworkInterface) *Container { + s.NetworkInterfaces = v + return s +} + +// SetReason sets the Reason field's value. +func (s *Container) SetReason(v string) *Container { + s.Reason = &v + return s +} + +// SetRuntimeId sets the RuntimeId field's value. +func (s *Container) SetRuntimeId(v string) *Container { + s.RuntimeId = &v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *Container) SetTaskArn(v string) *Container { + s.TaskArn = &v + return s +} + +// Container definitions are used in task definitions to describe the different +// containers that are launched as part of a task. +type ContainerDefinition struct { + _ struct{} `type:"structure"` + + // The command that is passed to the container. This parameter maps to Cmd in + // the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the COMMAND parameter to docker run (https://docs.docker.com/engine/reference/run/). + // For more information, see https://docs.docker.com/engine/reference/builder/#cmd + // (https://docs.docker.com/engine/reference/builder/#cmd). + Command []*string `locationName:"command" type:"list"` + + // The number of cpu units reserved for the container. This parameter maps to + // CpuShares in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). + // + // This field is optional for tasks using the Fargate launch type, and the only + // requirement is that the total amount of CPU reserved for all containers within + // a task be lower than the task-level cpu value. + // + // You can determine the number of CPU units that are available per EC2 instance + // type by multiplying the vCPUs listed for that instance type on the Amazon + // EC2 Instances (http://aws.amazon.com/ec2/instance-types/) detail page by + // 1,024. + // + // For example, if you run a single-container task on a single-core instance + // type with 512 CPU units specified for that container, and that is the only + // task running on the container instance, that container could use the full + // 1,024 CPU unit share at any given time. However, if you launched another + // copy of the same task on that container instance, each task would be guaranteed + // a minimum of 512 CPU units when needed, and each container could float to + // higher CPU usage if the other container was not using it, but if both tasks + // were 100% active all of the time, they would be limited to 512 CPU units. + // + // Linux containers share unallocated CPU units with other containers on the + // container instance with the same ratio as their allocated amount. For example, + // if you run a single-container task on a single-core instance type with 512 + // CPU units specified for that container, and that is the only task running + // on the container instance, that container could use the full 1,024 CPU unit + // share at any given time. However, if you launched another copy of the same + // task on that container instance, each task would be guaranteed a minimum + // of 512 CPU units when needed, and each container could float to higher CPU + // usage if the other container was not using it, but if both tasks were 100% + // active all of the time, they would be limited to 512 CPU units. + // + // On Linux container instances, the Docker daemon on the container instance + // uses the CPU value to calculate the relative CPU share ratios for running + // containers. For more information, see CPU share constraint (https://docs.docker.com/engine/reference/run/#cpu-share-constraint) + // in the Docker documentation. The minimum valid CPU share value that the Linux + // kernel allows is 2; however, the CPU parameter is not required, and you can + // use CPU values below 2 in your container definitions. For CPU values below + // 2 (including null), the behavior varies based on your Amazon ECS container + // agent version: + // + // * Agent versions less than or equal to 1.1.0: Null and zero CPU values + // are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. + // CPU values of 1 are passed to Docker as 1, which the Linux kernel converts + // to 2 CPU shares. + // + // * Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values + // of 1 are passed to Docker as 2. + // + // On Windows container instances, the CPU limit is enforced as an absolute + // limit, or a quota. Windows containers only have access to the specified amount + // of CPU that is described in the task definition. + Cpu *int64 `locationName:"cpu" type:"integer"` + + DependsOn []*ContainerDependency `locationName:"dependsOn" type:"list"` + + // When this parameter is true, networking is disabled within the container. + // This parameter maps to NetworkDisabled in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/). + // + // This parameter is not supported for Windows containers. + DisableNetworking *bool `locationName:"disableNetworking" type:"boolean"` + + // A list of DNS search domains that are presented to the container. This parameter + // maps to DnsSearch in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --dns-search option to docker run (https://docs.docker.com/engine/reference/run/). + // + // This parameter is not supported for Windows containers. + DnsSearchDomains []*string `locationName:"dnsSearchDomains" type:"list"` + + // A list of DNS servers that are presented to the container. This parameter + // maps to Dns in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --dns option to docker run (https://docs.docker.com/engine/reference/run/). + // + // This parameter is not supported for Windows containers. + DnsServers []*string `locationName:"dnsServers" type:"list"` + + // A key/value map of labels to add to the container. This parameter maps to + // Labels in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --label option to docker run (https://docs.docker.com/engine/reference/run/). + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log in to your container instance and run the following + // command: sudo docker version | grep "Server API version" + DockerLabels map[string]*string `locationName:"dockerLabels" type:"map"` + + // A list of strings to provide custom labels for SELinux and AppArmor multi-level + // security systems. This field is not valid for containers in tasks using the + // Fargate launch type. + // + // This parameter maps to SecurityOpt in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --security-opt option to docker run (https://docs.docker.com/engine/reference/run/). + // + // The Amazon ECS container agent running on a container instance must register + // with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment + // variables before containers placed on that instance can use these security + // options. For more information, see Amazon ECS Container Agent Configuration + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // This parameter is not supported for Windows containers. + DockerSecurityOptions []*string `locationName:"dockerSecurityOptions" type:"list"` + + // + // Early versions of the Amazon ECS container agent do not properly handle entryPoint + // parameters. If you have problems using entryPoint, update your container + // agent or enter your commands and arguments as command array items instead. + // + // The entry point that is passed to the container. This parameter maps to Entrypoint + // in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --entrypoint option to docker run (https://docs.docker.com/engine/reference/run/). + // For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint + // (https://docs.docker.com/engine/reference/builder/#entrypoint). + EntryPoint []*string `locationName:"entryPoint" type:"list"` + + // The environment variables to pass to a container. This parameter maps to + // Env in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --env option to docker run (https://docs.docker.com/engine/reference/run/). + // + // We do not recommend using plaintext environment variables for sensitive information, + // such as credential data. + Environment []*KeyValuePair `locationName:"environment" type:"list"` + + // If the essential parameter of a container is marked as true, and that container + // fails or stops for any reason, all other containers that are part of the + // task are stopped. If the essential parameter of a container is marked as + // false, then its failure does not affect the rest of the containers in a task. + // If this parameter is omitted, a container is assumed to be essential. + // + // All tasks must have at least one essential container. If you have an application + // that is composed of multiple containers, you should group containers that + // are used for a common purpose into components, and separate the different + // components into multiple task definitions. For more information, see Application + // Architecture (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) + // in the Amazon Elastic Container Service Developer Guide. + Essential *bool `locationName:"essential" type:"boolean"` + + // A list of hostnames and IP address mappings to append to the /etc/hosts file + // on the container. If using the Fargate launch type, this may be used to list + // non-Fargate hosts to which the container can talk. This parameter maps to + // ExtraHosts in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --add-host option to docker run (https://docs.docker.com/engine/reference/run/). + // + // This parameter is not supported for Windows containers. + ExtraHosts []*HostEntry `locationName:"extraHosts" type:"list"` + + FirelensConfiguration *FirelensConfiguration `locationName:"firelensConfiguration" type:"structure"` + + // The health check command and associated configuration parameters for the + // container. This parameter maps to HealthCheck in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the HEALTHCHECK parameter of docker run (https://docs.docker.com/engine/reference/run/). + HealthCheck *HealthCheck `locationName:"healthCheck" type:"structure"` + + // The hostname to use for your container. This parameter maps to Hostname in + // the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --hostname option to docker run (https://docs.docker.com/engine/reference/run/). + // + // The hostname parameter is not supported if using the awsvpc networkMode. + Hostname *string `locationName:"hostname" type:"string"` + + // The image used to start a container. This string is passed directly to the + // Docker daemon. Images in the Docker Hub registry are available by default. + // Other repositories are specified with either repository-url/image:tag or + // repository-url/image@digest . Up to 255 letters (uppercase and lowercase), + // numbers, hyphens, underscores, colons, periods, forward slashes, and number + // signs are allowed. This parameter maps to Image in the Create a container + // (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the IMAGE parameter of docker run (https://docs.docker.com/engine/reference/run/). + // + // * When a new task starts, the Amazon ECS container agent pulls the latest + // version of the specified image and tag for the container to use. However, + // subsequent updates to a repository image are not propagated to already + // running tasks. + // + // * Images in Amazon ECR repositories can be specified by either using the + // full registry/repository:tag or registry/repository@digest. For example, + // 012345678910.dkr.ecr..amazonaws.com/:latest + // or 012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE. + // + // * Images in official repositories on Docker Hub use a single name (for + // example, ubuntu or mongo). + // + // * Images in other repositories on Docker Hub are qualified with an organization + // name (for example, amazon/amazon-ecs-agent). + // + // * Images in other online repositories are qualified further by a domain + // name (for example, quay.io/assemblyline/ubuntu). + Image *string `locationName:"image" type:"string"` + + InferenceDevices []*string `locationName:"inferenceDevices" type:"list"` + + Interactive *bool `locationName:"interactive" type:"boolean"` + + // The link parameter allows containers to communicate with each other without + // the need for port mappings. Only supported if the network mode of a task + // definition is set to bridge. The name:internalName construct is analogous + // to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), + // numbers, hyphens, and underscores are allowed. For more information about + // linking Docker containers, go to https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/ + // (https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/). + // This parameter maps to Links in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --link option to docker run (https://docs.docker.com/engine/reference/commandline/run/). + // + // This parameter is not supported for Windows containers. + // + // Containers that are collocated on a single container instance may be able + // to communicate with each other without requiring links or host port mappings. + // Network isolation is achieved on the container instance using security groups + // and VPC settings. + Links []*string `locationName:"links" type:"list"` + + // Linux-specific modifications that are applied to the container, such as Linux + // KernelCapabilities. + // + // This parameter is not supported for Windows containers. + LinuxParameters *LinuxParameters `locationName:"linuxParameters" type:"structure"` + + // The log configuration specification for the container. + // + // If using the Fargate launch type, the only supported value is awslogs. + // + // This parameter maps to LogConfig in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/). + // By default, containers use the same logging driver that the Docker daemon + // uses; however the container may use a different logging driver than the Docker + // daemon by specifying a log driver with this parameter in the container definition. + // To use a different logging driver for a container, the log system must be + // configured properly on the container instance (or on a different log server + // for remote logging options). For more information on the options for different + // supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) + // in the Docker documentation. + // + // Amazon ECS currently supports a subset of the logging drivers available to + // the Docker daemon (shown in the LogConfiguration data type). Additional log + // drivers may be available in future releases of the Amazon ECS container agent. + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log in to your container instance and run the following + // command: sudo docker version | grep "Server API version" + // + // The Amazon ECS container agent running on a container instance must register + // the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS + // environment variable before containers placed on that instance can use these + // log configuration options. For more information, see Amazon ECS Container + // Agent Configuration (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + // in the Amazon Elastic Container Service Developer Guide. + LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` + + // The hard limit (in MiB) of memory to present to the container. If your container + // attempts to exceed the memory specified here, the container is killed. This + // parameter maps to Memory in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/). + // + // If your containers are part of a task using the Fargate launch type, this + // field is optional and the only requirement is that the total amount of memory + // reserved for all containers within a task be lower than the task memory value. + // + // For containers that are part of a task using the EC2 launch type, you must + // specify a non-zero integer for one or both of memory or memoryReservation + // in container definitions. If you specify both, memory must be greater than + // memoryReservation. If you specify memoryReservation, then that value is subtracted + // from the available memory resources for the container instance on which the + // container is placed; otherwise, the value of memory is used. + // + // The Docker daemon reserves a minimum of 4 MiB of memory for a container, + // so you should not specify fewer than 4 MiB of memory for your containers. + Memory *int64 `locationName:"memory" type:"integer"` + + // The soft limit (in MiB) of memory to reserve for the container. When system + // memory is under heavy contention, Docker attempts to keep the container memory + // to this soft limit; however, your container can consume more memory when + // it needs to, up to either the hard limit specified with the memory parameter + // (if applicable), or all of the available memory on the container instance, + // whichever comes first. This parameter maps to MemoryReservation in the Create + // a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --memory-reservation option to docker run (https://docs.docker.com/engine/reference/run/). + // + // You must specify a non-zero integer for one or both of memory or memoryReservation + // in container definitions. If you specify both, memory must be greater than + // memoryReservation. If you specify memoryReservation, then that value is subtracted + // from the available memory resources for the container instance on which the + // container is placed; otherwise, the value of memory is used. + // + // For example, if your container normally uses 128 MiB of memory, but occasionally + // bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation + // of 128 MiB, and a memory hard limit of 300 MiB. This configuration would + // allow the container to only reserve 128 MiB of memory from the remaining + // resources on the container instance, but also allow the container to consume + // more memory resources when needed. + // + // The Docker daemon reserves a minimum of 4 MiB of memory for a container, + // so you should not specify fewer than 4 MiB of memory for your containers. + MemoryReservation *int64 `locationName:"memoryReservation" type:"integer"` + + // The mount points for data volumes in your container. + // + // This parameter maps to Volumes in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --volume option to docker run (https://docs.docker.com/engine/reference/run/). + // + // Windows containers can mount whole directories on the same drive as $env:ProgramData. + // Windows containers cannot mount directories on a different drive, and mount + // point cannot be across drives. + MountPoints []*MountPoint `locationName:"mountPoints" type:"list"` + + // The name of a container. If you are linking multiple containers together + // in a task definition, the name of one container can be entered in the links + // of another container to connect the containers. Up to 255 letters (uppercase + // and lowercase), numbers, hyphens, and underscores are allowed. This parameter + // maps to name in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --name option to docker run (https://docs.docker.com/engine/reference/run/). + Name *string `locationName:"name" type:"string"` + + // The list of port mappings for the container. Port mappings allow containers + // to access ports on the host container instance to send or receive traffic. + // + // For task definitions that use the awsvpc network mode, you should only specify + // the containerPort. The hostPort can be left blank or it must be the same + // value as the containerPort. + // + // Port mappings on Windows use the NetNAT gateway address rather than localhost. + // There is no loopback for port mappings on Windows, so you cannot access a + // container's mapped port from the host itself. + // + // This parameter maps to PortBindings in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --publish option to docker run (https://docs.docker.com/engine/reference/run/). + // If the network mode of a task definition is set to none, then you can't specify + // port mappings. If the network mode of a task definition is set to host, then + // host ports must either be undefined or they must match the container port + // in the port mapping. + // + // After a task reaches the RUNNING status, manual and automatic host and container + // port assignments are visible in the Network Bindings section of a container + // description for a selected task in the Amazon ECS console. The assignments + // are also visible in the networkBindings section DescribeTasks responses. + PortMappings []*PortMapping `locationName:"portMappings" type:"list"` + + // When this parameter is true, the container is given elevated privileges on + // the host container instance (similar to the root user). This parameter maps + // to Privileged in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --privileged option to docker run (https://docs.docker.com/engine/reference/run/). + // + // This parameter is not supported for Windows containers or tasks using the + // Fargate launch type. + Privileged *bool `locationName:"privileged" type:"boolean"` + + PseudoTerminal *bool `locationName:"pseudoTerminal" type:"boolean"` + + // When this parameter is true, the container is given read-only access to its + // root file system. This parameter maps to ReadonlyRootfs in the Create a container + // (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --read-only option to docker run. + // + // This parameter is not supported for Windows containers. + ReadonlyRootFilesystem *bool `locationName:"readonlyRootFilesystem" type:"boolean"` + + // The private repository authentication credentials to use. + RepositoryCredentials *RepositoryCredentials `locationName:"repositoryCredentials" type:"structure"` + + ResourceRequirements []*ResourceRequirement `locationName:"resourceRequirements" type:"list"` + + Secrets []*Secret `locationName:"secrets" type:"list"` + + StartTimeout *int64 `locationName:"startTimeout" type:"integer"` + + StopTimeout *int64 `locationName:"stopTimeout" type:"integer"` + + SystemControls []*SystemControl `locationName:"systemControls" type:"list"` + + // A list of ulimits to set in the container. This parameter maps to Ulimits + // in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/). + // Valid naming values are displayed in the Ulimit data type. This parameter + // requires version 1.18 of the Docker Remote API or greater on your container + // instance. To check the Docker Remote API version on your container instance, + // log in to your container instance and run the following command: sudo docker + // version | grep "Server API version" + // + // This parameter is not supported for Windows containers. + Ulimits []*Ulimit `locationName:"ulimits" type:"list"` + + // The user name to use inside the container. This parameter maps to User in + // the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --user option to docker run (https://docs.docker.com/engine/reference/run/). + // + // This parameter is not supported for Windows containers. + User *string `locationName:"user" type:"string"` + + // Data volumes to mount from another container. This parameter maps to VolumesFrom + // in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --volumes-from option to docker run (https://docs.docker.com/engine/reference/run/). + VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"` + + // The working directory in which to run commands inside the container. This + // parameter maps to WorkingDir in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --workdir option to docker run (https://docs.docker.com/engine/reference/run/). + WorkingDirectory *string `locationName:"workingDirectory" type:"string"` +} + +// String returns the string representation +func (s ContainerDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContainerDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContainerDefinition"} + if s.DependsOn != nil { + for i, v := range s.DependsOn { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DependsOn", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ExtraHosts != nil { + for i, v := range s.ExtraHosts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtraHosts", i), err.(request.ErrInvalidParams)) + } + } + } + if s.FirelensConfiguration != nil { + if err := s.FirelensConfiguration.Validate(); err != nil { + invalidParams.AddNested("FirelensConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.HealthCheck != nil { + if err := s.HealthCheck.Validate(); err != nil { + invalidParams.AddNested("HealthCheck", err.(request.ErrInvalidParams)) + } + } + if s.LinuxParameters != nil { + if err := s.LinuxParameters.Validate(); err != nil { + invalidParams.AddNested("LinuxParameters", err.(request.ErrInvalidParams)) + } + } + if s.LogConfiguration != nil { + if err := s.LogConfiguration.Validate(); err != nil { + invalidParams.AddNested("LogConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.RepositoryCredentials != nil { + if err := s.RepositoryCredentials.Validate(); err != nil { + invalidParams.AddNested("RepositoryCredentials", err.(request.ErrInvalidParams)) + } + } + if s.Secrets != nil { + for i, v := range s.Secrets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Secrets", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Ulimits != nil { + for i, v := range s.Ulimits { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Ulimits", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommand sets the Command field's value. +func (s *ContainerDefinition) SetCommand(v []*string) *ContainerDefinition { + s.Command = v + return s +} + +// SetCpu sets the Cpu field's value. +func (s *ContainerDefinition) SetCpu(v int64) *ContainerDefinition { + s.Cpu = &v + return s +} + +// SetDependsOn sets the DependsOn field's value. +func (s *ContainerDefinition) SetDependsOn(v []*ContainerDependency) *ContainerDefinition { + s.DependsOn = v + return s +} + +// SetDisableNetworking sets the DisableNetworking field's value. +func (s *ContainerDefinition) SetDisableNetworking(v bool) *ContainerDefinition { + s.DisableNetworking = &v + return s +} + +// SetDnsSearchDomains sets the DnsSearchDomains field's value. +func (s *ContainerDefinition) SetDnsSearchDomains(v []*string) *ContainerDefinition { + s.DnsSearchDomains = v + return s +} + +// SetDnsServers sets the DnsServers field's value. +func (s *ContainerDefinition) SetDnsServers(v []*string) *ContainerDefinition { + s.DnsServers = v + return s +} + +// SetDockerLabels sets the DockerLabels field's value. +func (s *ContainerDefinition) SetDockerLabels(v map[string]*string) *ContainerDefinition { + s.DockerLabels = v + return s +} + +// SetDockerSecurityOptions sets the DockerSecurityOptions field's value. +func (s *ContainerDefinition) SetDockerSecurityOptions(v []*string) *ContainerDefinition { + s.DockerSecurityOptions = v + return s +} + +// SetEntryPoint sets the EntryPoint field's value. +func (s *ContainerDefinition) SetEntryPoint(v []*string) *ContainerDefinition { + s.EntryPoint = v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *ContainerDefinition) SetEnvironment(v []*KeyValuePair) *ContainerDefinition { + s.Environment = v + return s +} + +// SetEssential sets the Essential field's value. +func (s *ContainerDefinition) SetEssential(v bool) *ContainerDefinition { + s.Essential = &v + return s +} + +// SetExtraHosts sets the ExtraHosts field's value. +func (s *ContainerDefinition) SetExtraHosts(v []*HostEntry) *ContainerDefinition { + s.ExtraHosts = v + return s +} + +// SetFirelensConfiguration sets the FirelensConfiguration field's value. +func (s *ContainerDefinition) SetFirelensConfiguration(v *FirelensConfiguration) *ContainerDefinition { + s.FirelensConfiguration = v + return s +} + +// SetHealthCheck sets the HealthCheck field's value. +func (s *ContainerDefinition) SetHealthCheck(v *HealthCheck) *ContainerDefinition { + s.HealthCheck = v + return s +} + +// SetHostname sets the Hostname field's value. +func (s *ContainerDefinition) SetHostname(v string) *ContainerDefinition { + s.Hostname = &v + return s +} + +// SetImage sets the Image field's value. +func (s *ContainerDefinition) SetImage(v string) *ContainerDefinition { + s.Image = &v + return s +} + +// SetInferenceDevices sets the InferenceDevices field's value. +func (s *ContainerDefinition) SetInferenceDevices(v []*string) *ContainerDefinition { + s.InferenceDevices = v + return s +} + +// SetInteractive sets the Interactive field's value. +func (s *ContainerDefinition) SetInteractive(v bool) *ContainerDefinition { + s.Interactive = &v + return s +} + +// SetLinks sets the Links field's value. +func (s *ContainerDefinition) SetLinks(v []*string) *ContainerDefinition { + s.Links = v + return s +} + +// SetLinuxParameters sets the LinuxParameters field's value. +func (s *ContainerDefinition) SetLinuxParameters(v *LinuxParameters) *ContainerDefinition { + s.LinuxParameters = v + return s +} + +// SetLogConfiguration sets the LogConfiguration field's value. +func (s *ContainerDefinition) SetLogConfiguration(v *LogConfiguration) *ContainerDefinition { + s.LogConfiguration = v + return s +} + +// SetMemory sets the Memory field's value. +func (s *ContainerDefinition) SetMemory(v int64) *ContainerDefinition { + s.Memory = &v + return s +} + +// SetMemoryReservation sets the MemoryReservation field's value. +func (s *ContainerDefinition) SetMemoryReservation(v int64) *ContainerDefinition { + s.MemoryReservation = &v + return s +} + +// SetMountPoints sets the MountPoints field's value. +func (s *ContainerDefinition) SetMountPoints(v []*MountPoint) *ContainerDefinition { + s.MountPoints = v + return s +} + +// SetName sets the Name field's value. +func (s *ContainerDefinition) SetName(v string) *ContainerDefinition { + s.Name = &v + return s +} + +// SetPortMappings sets the PortMappings field's value. +func (s *ContainerDefinition) SetPortMappings(v []*PortMapping) *ContainerDefinition { + s.PortMappings = v + return s +} + +// SetPrivileged sets the Privileged field's value. +func (s *ContainerDefinition) SetPrivileged(v bool) *ContainerDefinition { + s.Privileged = &v + return s +} + +// SetPseudoTerminal sets the PseudoTerminal field's value. +func (s *ContainerDefinition) SetPseudoTerminal(v bool) *ContainerDefinition { + s.PseudoTerminal = &v + return s +} + +// SetReadonlyRootFilesystem sets the ReadonlyRootFilesystem field's value. +func (s *ContainerDefinition) SetReadonlyRootFilesystem(v bool) *ContainerDefinition { + s.ReadonlyRootFilesystem = &v + return s +} + +// SetRepositoryCredentials sets the RepositoryCredentials field's value. +func (s *ContainerDefinition) SetRepositoryCredentials(v *RepositoryCredentials) *ContainerDefinition { + s.RepositoryCredentials = v + return s +} + +// SetResourceRequirements sets the ResourceRequirements field's value. +func (s *ContainerDefinition) SetResourceRequirements(v []*ResourceRequirement) *ContainerDefinition { + s.ResourceRequirements = v + return s +} + +// SetSecrets sets the Secrets field's value. +func (s *ContainerDefinition) SetSecrets(v []*Secret) *ContainerDefinition { + s.Secrets = v + return s +} + +// SetStartTimeout sets the StartTimeout field's value. +func (s *ContainerDefinition) SetStartTimeout(v int64) *ContainerDefinition { + s.StartTimeout = &v + return s +} + +// SetStopTimeout sets the StopTimeout field's value. +func (s *ContainerDefinition) SetStopTimeout(v int64) *ContainerDefinition { + s.StopTimeout = &v + return s +} + +// SetSystemControls sets the SystemControls field's value. +func (s *ContainerDefinition) SetSystemControls(v []*SystemControl) *ContainerDefinition { + s.SystemControls = v + return s +} + +// SetUlimits sets the Ulimits field's value. +func (s *ContainerDefinition) SetUlimits(v []*Ulimit) *ContainerDefinition { + s.Ulimits = v + return s +} + +// SetUser sets the User field's value. +func (s *ContainerDefinition) SetUser(v string) *ContainerDefinition { + s.User = &v + return s +} + +// SetVolumesFrom sets the VolumesFrom field's value. +func (s *ContainerDefinition) SetVolumesFrom(v []*VolumeFrom) *ContainerDefinition { + s.VolumesFrom = v + return s +} + +// SetWorkingDirectory sets the WorkingDirectory field's value. +func (s *ContainerDefinition) SetWorkingDirectory(v string) *ContainerDefinition { + s.WorkingDirectory = &v + return s +} + +type ContainerDependency struct { + _ struct{} `type:"structure"` + + // Condition is a required field + Condition *string `locationName:"condition" type:"string" required:"true" enum:"ContainerCondition"` + + // ContainerName is a required field + ContainerName *string `locationName:"containerName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ContainerDependency) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerDependency) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContainerDependency) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContainerDependency"} + if s.Condition == nil { + invalidParams.Add(request.NewErrParamRequired("Condition")) + } + if s.ContainerName == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *ContainerDependency) SetCondition(v string) *ContainerDependency { + s.Condition = &v + return s +} + +// SetContainerName sets the ContainerName field's value. +func (s *ContainerDependency) SetContainerName(v string) *ContainerDependency { + s.ContainerName = &v + return s +} + +// An EC2 instance that is running the Amazon ECS agent and has been registered +// with a cluster. +type ContainerInstance struct { + _ struct{} `type:"structure"` + + // This parameter returns true if the agent is connected to Amazon ECS. Registered + // instances with an agent that may be unhealthy or stopped return false. Only + // instances connected to an agent can accept placement requests. + AgentConnected *bool `locationName:"agentConnected" type:"boolean"` + + // The status of the most recent agent update. If an update has never been requested, + // this value is NULL. + AgentUpdateStatus *string `locationName:"agentUpdateStatus" type:"string" enum:"AgentUpdateStatus"` + + // The elastic network interfaces associated with the container instance. + Attachments []*Attachment `locationName:"attachments" type:"list"` + + // The attributes set for the container instance, either by the Amazon ECS container + // agent at instance registration or manually with the PutAttributes operation. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + ClientToken *string `locationName:"clientToken" type:"string"` + + // The Amazon Resource Name (ARN) of the container instance. The ARN contains + // the arn:aws:ecs namespace, followed by the Region of the container instance, + // the AWS account ID of the container instance owner, the container-instance + // namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID . + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The EC2 instance ID of the container instance. + Ec2InstanceId *string `locationName:"ec2InstanceId" type:"string"` + + // The number of tasks on the container instance that are in the PENDING status. + PendingTasksCount *int64 `locationName:"pendingTasksCount" type:"integer"` + + // The Unix time stamp for when the container instance was registered. + RegisteredAt *time.Time `locationName:"registeredAt" type:"timestamp"` + + // For CPU and memory resource types, this parameter describes the amount of + // each resource that was available on the container instance when the container + // agent registered it with Amazon ECS; this value represents the total amount + // of CPU and memory that can be allocated on this container instance to tasks. + // For port resource types, this parameter describes the ports that were reserved + // by the Amazon ECS container agent when it registered the container instance + // with Amazon ECS. + RegisteredResources []*Resource `locationName:"registeredResources" type:"list"` + + // For CPU and memory resource types, this parameter describes the remaining + // CPU and memory that has not already been allocated to tasks and is therefore + // available for new tasks. For port resource types, this parameter describes + // the ports that were reserved by the Amazon ECS container agent (at instance + // registration time) and any task containers that have reserved port mappings + // on the host (with the host or bridge network mode). Any port that is not + // specified here is available for new tasks. + RemainingResources []*Resource `locationName:"remainingResources" type:"list"` + + // The number of tasks on the container instance that are in the RUNNING status. + RunningTasksCount *int64 `locationName:"runningTasksCount" type:"integer"` + + // The status of the container instance. The valid values are ACTIVE, INACTIVE, + // or DRAINING. ACTIVE indicates that the container instance can accept tasks. + // DRAINING indicates that new tasks are not placed on the container instance + // and any service tasks running on the container instance are removed if possible. + // For more information, see Container Instance Draining (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-draining.html) + // in the Amazon Elastic Container Service Developer Guide. + Status *string `locationName:"status" type:"string"` + + // The metadata that you apply to the container instance to help you categorize + // and organize them. Each tag consists of a key and an optional value, both + // of which you define. Tag keys can have a maximum character length of 128 + // characters, and tag values can have a maximum length of 256 characters. + Tags []*Tag `locationName:"tags" type:"list"` + + // The version counter for the container instance. Every time a container instance + // experiences a change that triggers a CloudWatch event, the version counter + // is incremented. If you are replicating your Amazon ECS container instance + // state with CloudWatch Events, you can compare the version of a container + // instance reported by the Amazon ECS APIs with the version reported in CloudWatch + // Events for the container instance (inside the detail object) to verify that + // the version in your event stream is current. + Version *int64 `locationName:"version" type:"long"` + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s ContainerInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerInstance) GoString() string { + return s.String() +} + +// SetAgentConnected sets the AgentConnected field's value. +func (s *ContainerInstance) SetAgentConnected(v bool) *ContainerInstance { + s.AgentConnected = &v + return s +} + +// SetAgentUpdateStatus sets the AgentUpdateStatus field's value. +func (s *ContainerInstance) SetAgentUpdateStatus(v string) *ContainerInstance { + s.AgentUpdateStatus = &v + return s +} + +// SetAttachments sets the Attachments field's value. +func (s *ContainerInstance) SetAttachments(v []*Attachment) *ContainerInstance { + s.Attachments = v + return s +} + +// SetAttributes sets the Attributes field's value. +func (s *ContainerInstance) SetAttributes(v []*Attribute) *ContainerInstance { + s.Attributes = v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *ContainerInstance) SetClientToken(v string) *ContainerInstance { + s.ClientToken = &v + return s +} + +// SetContainerInstanceArn sets the ContainerInstanceArn field's value. +func (s *ContainerInstance) SetContainerInstanceArn(v string) *ContainerInstance { + s.ContainerInstanceArn = &v + return s +} + +// SetEc2InstanceId sets the Ec2InstanceId field's value. +func (s *ContainerInstance) SetEc2InstanceId(v string) *ContainerInstance { + s.Ec2InstanceId = &v + return s +} + +// SetPendingTasksCount sets the PendingTasksCount field's value. +func (s *ContainerInstance) SetPendingTasksCount(v int64) *ContainerInstance { + s.PendingTasksCount = &v + return s +} + +// SetRegisteredAt sets the RegisteredAt field's value. +func (s *ContainerInstance) SetRegisteredAt(v time.Time) *ContainerInstance { + s.RegisteredAt = &v + return s +} + +// SetRegisteredResources sets the RegisteredResources field's value. +func (s *ContainerInstance) SetRegisteredResources(v []*Resource) *ContainerInstance { + s.RegisteredResources = v + return s +} + +// SetRemainingResources sets the RemainingResources field's value. +func (s *ContainerInstance) SetRemainingResources(v []*Resource) *ContainerInstance { + s.RemainingResources = v + return s +} + +// SetRunningTasksCount sets the RunningTasksCount field's value. +func (s *ContainerInstance) SetRunningTasksCount(v int64) *ContainerInstance { + s.RunningTasksCount = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ContainerInstance) SetStatus(v string) *ContainerInstance { + s.Status = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ContainerInstance) SetTags(v []*Tag) *ContainerInstance { + s.Tags = v + return s +} + +// SetVersion sets the Version field's value. +func (s *ContainerInstance) SetVersion(v int64) *ContainerInstance { + s.Version = &v + return s +} + +// SetVersionInfo sets the VersionInfo field's value. +func (s *ContainerInstance) SetVersionInfo(v *VersionInfo) *ContainerInstance { + s.VersionInfo = v + return s +} + +// The overrides that should be sent to a container. +type ContainerOverride struct { + _ struct{} `type:"structure"` + + // The command to send to the container that overrides the default command from + // the Docker image or the task definition. You must also specify a container + // name. + Command []*string `locationName:"command" type:"list"` + + // The number of cpu units reserved for the container, instead of the default + // value from the task definition. You must also specify a container name. + Cpu *int64 `locationName:"cpu" type:"integer"` + + // The environment variables to send to the container. You can add new environment + // variables, which are added to the container at launch, or you can override + // the existing environment variables from the Docker image or the task definition. + // You must also specify a container name. + Environment []*KeyValuePair `locationName:"environment" type:"list"` + + // The hard limit (in MiB) of memory to present to the container, instead of + // the default value from the task definition. If your container attempts to + // exceed the memory specified here, the container is killed. You must also + // specify a container name. + Memory *int64 `locationName:"memory" type:"integer"` + + // The soft limit (in MiB) of memory to reserve for the container, instead of + // the default value from the task definition. You must also specify a container + // name. + MemoryReservation *int64 `locationName:"memoryReservation" type:"integer"` + + // The name of the container that receives the override. This parameter is required + // if any override is specified. + Name *string `locationName:"name" type:"string"` + + ResourceRequirements []*ResourceRequirement `locationName:"resourceRequirements" type:"list"` +} + +// String returns the string representation +func (s ContainerOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerOverride) GoString() string { + return s.String() +} + +// SetCommand sets the Command field's value. +func (s *ContainerOverride) SetCommand(v []*string) *ContainerOverride { + s.Command = v + return s +} + +// SetCpu sets the Cpu field's value. +func (s *ContainerOverride) SetCpu(v int64) *ContainerOverride { + s.Cpu = &v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *ContainerOverride) SetEnvironment(v []*KeyValuePair) *ContainerOverride { + s.Environment = v + return s +} + +// SetMemory sets the Memory field's value. +func (s *ContainerOverride) SetMemory(v int64) *ContainerOverride { + s.Memory = &v + return s +} + +// SetMemoryReservation sets the MemoryReservation field's value. +func (s *ContainerOverride) SetMemoryReservation(v int64) *ContainerOverride { + s.MemoryReservation = &v + return s +} + +// SetName sets the Name field's value. +func (s *ContainerOverride) SetName(v string) *ContainerOverride { + s.Name = &v + return s +} + +// SetResourceRequirements sets the ResourceRequirements field's value. +func (s *ContainerOverride) SetResourceRequirements(v []*ResourceRequirement) *ContainerOverride { + s.ResourceRequirements = v + return s +} + +// An object representing a change in state for a container. +type ContainerStateChange struct { + _ struct{} `type:"structure"` + + // The name of the container. + ContainerName *string `locationName:"containerName" type:"string"` + + // The exit code for the container, if the state change is a result of the container + // exiting. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + ImageDigest *string `locationName:"imageDigest" type:"string"` + + ManagedAgents []*ManagedAgentStateChange `locationName:"managedAgents" type:"list"` + + // Any network bindings associated with the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // The reason for the state change. + Reason *string `locationName:"reason" type:"string"` + + RuntimeId *string `locationName:"runtimeId" type:"string"` + + // The status of the container. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s ContainerStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerStateChange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContainerStateChange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContainerStateChange"} + if s.ManagedAgents != nil { + for i, v := range s.ManagedAgents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ManagedAgents", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerName sets the ContainerName field's value. +func (s *ContainerStateChange) SetContainerName(v string) *ContainerStateChange { + s.ContainerName = &v + return s +} + +// SetExitCode sets the ExitCode field's value. +func (s *ContainerStateChange) SetExitCode(v int64) *ContainerStateChange { + s.ExitCode = &v + return s +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *ContainerStateChange) SetImageDigest(v string) *ContainerStateChange { + s.ImageDigest = &v + return s +} + +// SetManagedAgents sets the ManagedAgents field's value. +func (s *ContainerStateChange) SetManagedAgents(v []*ManagedAgentStateChange) *ContainerStateChange { + s.ManagedAgents = v + return s +} + +// SetNetworkBindings sets the NetworkBindings field's value. +func (s *ContainerStateChange) SetNetworkBindings(v []*NetworkBinding) *ContainerStateChange { + s.NetworkBindings = v + return s +} + +// SetReason sets the Reason field's value. +func (s *ContainerStateChange) SetReason(v string) *ContainerStateChange { + s.Reason = &v + return s +} + +// SetRuntimeId sets the RuntimeId field's value. +func (s *ContainerStateChange) SetRuntimeId(v string) *ContainerStateChange { + s.RuntimeId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ContainerStateChange) SetStatus(v string) *ContainerStateChange { + s.Status = &v + return s +} + +type CreateClusterInput struct { + _ struct{} `type:"structure"` + + // The name of your cluster. If you do not specify a name for your cluster, + // you create a cluster named default. Up to 255 letters (uppercase and lowercase), + // numbers, hyphens, and underscores are allowed. + ClusterName *string `locationName:"clusterName" type:"string"` +} + +// String returns the string representation +func (s CreateClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterInput) GoString() string { + return s.String() +} + +// SetClusterName sets the ClusterName field's value. +func (s *CreateClusterInput) SetClusterName(v string) *CreateClusterInput { + s.ClusterName = &v + return s +} + +type CreateClusterOutput struct { + _ struct{} `type:"structure"` + + // The full description of your new cluster. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s CreateClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClusterOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *CreateClusterOutput) SetCluster(v *Cluster) *CreateClusterOutput { + s.Cluster = v + return s +} + +type CreateServiceInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. Up to 32 ASCII characters are allowed. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to run your service. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The number of instantiations of the specified task definition to place and + // keep running on your cluster. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The period of time, in seconds, that the Amazon ECS service scheduler should + // ignore unhealthy Elastic Load Balancing target health checks after a task + // has first started. This is only valid if your service is configured to use + // a load balancer. If your service's tasks take a while to start and respond + // to Elastic Load Balancing health checks, you can specify a health check grace + // period of up to 7,200 seconds during which the ECS service scheduler ignores + // health check status. This grace period can prevent the ECS service scheduler + // from marking tasks as unhealthy and stopping them before they have time to + // come up. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + + // The launch type on which to run your service. + LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` + + // A load balancer object representing the load balancer to use with your service. + // Currently, you are limited to one load balancer or target group per service. + // After you create a service, the load balancer name or target group ARN, container + // name, and container port specified in the service definition are immutable. + // + // For Classic Load Balancers, this object must contain the load balancer name, + // the container name (as it appears in a container definition), and the container + // port to access from the load balancer. When a task from this service is placed + // on a container instance, the container instance is registered with the load + // balancer specified here. + // + // For Application Load Balancers and Network Load Balancers, this object must + // contain the load balancer target group ARN, the container name (as it appears + // in a container definition), and the container port to access from the load + // balancer. When a task from this service is placed on a container instance, + // the container instance and port combination is registered as a target in + // the target group specified here. + // + // Services with tasks that use the awsvpc network mode (for example, those + // with the Fargate launch type) only support Application Load Balancers and + // Network Load Balancers; Classic Load Balancers are not supported. Also, when + // you create any target groups for these services, you must choose ip as the + // target type, not instance, because tasks that use the awsvpc network mode + // are associated with an elastic network interface, not an Amazon EC2 instance. + LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` + + // The network configuration for the service. This parameter is required for + // task definitions that use the awsvpc network mode to receive their own Elastic + // Network Interface, and it is not supported for other network modes. For more + // information, see Task Networking (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // in the Amazon Elastic Container Service Developer Guide. + NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` + + // An array of placement constraint objects to use for tasks in your service. + // You can specify a maximum of 10 constraints per task (this limit includes + // constraints in the task definition and those specified at run time). + PlacementConstraints []*PlacementConstraint `locationName:"placementConstraints" type:"list"` + + // The placement strategy objects to use for tasks in your service. You can + // specify a maximum of five strategy rules per service. + PlacementStrategy []*PlacementStrategy `locationName:"placementStrategy" type:"list"` + + // The platform version on which to run your service. If one is not specified, + // the latest version is used by default. + PlatformVersion *string `locationName:"platformVersion" type:"string"` + + // The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon + // ECS to make calls to your load balancer on your behalf. This parameter is + // only permitted if you are using a load balancer with your service and your + // task definition does not use the awsvpc network mode. If you specify the + // role parameter, you must also specify a load balancer object with the loadBalancers + // parameter. + // + // If your account has already created the Amazon ECS service-linked role, that + // role is used by default for your service unless you specify a role here. + // The service-linked role is required if your task definition uses the awsvpc + // network mode, in which case you should not specify a role here. For more + // information, see Using Service-Linked Roles for Amazon ECS (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // If your specified role has a path other than /, then you must either specify + // the full role ARN (this is recommended) or prefix the role name with the + // path. For example, if a role with the name bar has a path of /foo/ then you + // would specify /foo/bar as the role name. For more information, see Friendly + // Names and Paths (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) + // in the IAM User Guide. + Role *string `locationName:"role" type:"string"` + + // The scheduling strategy to use for the service. For more information, see + // Services (http://docs.aws.amazon.com/AmazonECS/latest/developerguideecs_services.html). + // + // There are two service scheduler strategies available: + // + // * REPLICA-The replica scheduling strategy places and maintains the desired + // number of tasks across your cluster. By default, the service scheduler + // spreads tasks across Availability Zones. You can use task placement strategies + // and constraints to customize task placement decisions. + // + // * DAEMON-The daemon scheduling strategy deploys exactly one task on each + // active container instance that meets all of the task placement constraints + // that you specify in your cluster. When using this strategy, there is no + // need to specify a desired number of tasks, a task placement strategy, + // or use Service Auto Scaling policies. Fargate tasks do not support the + // DAEMON scheduling strategy. + SchedulingStrategy *string `locationName:"schedulingStrategy" type:"string" enum:"SchedulingStrategy"` + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. Service names must be unique within + // a cluster, but you can have similarly named services in multiple clusters + // within a Region or across multiple Regions. + // + // ServiceName is a required field + ServiceName *string `locationName:"serviceName" type:"string" required:"true"` + + // The details of the service discovery registries to assign to this service. + // For more information, see Service Discovery (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). + // + // Service discovery is supported for Fargate tasks if using platform version + // v1.1.0 or later. For more information, see AWS Fargate Platform Versions + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). + ServiceRegistries []*ServiceRegistry `locationName:"serviceRegistries" type:"list"` + + // The family and revision (family:revision) or full ARN of the task definition + // to run in your service. If a revision is not specified, the latest ACTIVE + // revision is used. + // + // TaskDefinition is a required field + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateServiceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateServiceInput"} + if s.ServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceName")) + } + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + if s.NetworkConfiguration != nil { + if err := s.NetworkConfiguration.Validate(); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateServiceInput) SetClientToken(v string) *CreateServiceInput { + s.ClientToken = &v + return s +} + +// SetCluster sets the Cluster field's value. +func (s *CreateServiceInput) SetCluster(v string) *CreateServiceInput { + s.Cluster = &v + return s +} + +// SetDeploymentConfiguration sets the DeploymentConfiguration field's value. +func (s *CreateServiceInput) SetDeploymentConfiguration(v *DeploymentConfiguration) *CreateServiceInput { + s.DeploymentConfiguration = v + return s +} + +// SetDesiredCount sets the DesiredCount field's value. +func (s *CreateServiceInput) SetDesiredCount(v int64) *CreateServiceInput { + s.DesiredCount = &v + return s +} + +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *CreateServiceInput) SetHealthCheckGracePeriodSeconds(v int64) *CreateServiceInput { + s.HealthCheckGracePeriodSeconds = &v + return s +} + +// SetLaunchType sets the LaunchType field's value. +func (s *CreateServiceInput) SetLaunchType(v string) *CreateServiceInput { + s.LaunchType = &v + return s +} + +// SetLoadBalancers sets the LoadBalancers field's value. +func (s *CreateServiceInput) SetLoadBalancers(v []*LoadBalancer) *CreateServiceInput { + s.LoadBalancers = v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *CreateServiceInput) SetNetworkConfiguration(v *NetworkConfiguration) *CreateServiceInput { + s.NetworkConfiguration = v + return s +} + +// SetPlacementConstraints sets the PlacementConstraints field's value. +func (s *CreateServiceInput) SetPlacementConstraints(v []*PlacementConstraint) *CreateServiceInput { + s.PlacementConstraints = v + return s +} + +// SetPlacementStrategy sets the PlacementStrategy field's value. +func (s *CreateServiceInput) SetPlacementStrategy(v []*PlacementStrategy) *CreateServiceInput { + s.PlacementStrategy = v + return s +} + +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *CreateServiceInput) SetPlatformVersion(v string) *CreateServiceInput { + s.PlatformVersion = &v + return s +} + +// SetRole sets the Role field's value. +func (s *CreateServiceInput) SetRole(v string) *CreateServiceInput { + s.Role = &v + return s +} + +// SetSchedulingStrategy sets the SchedulingStrategy field's value. +func (s *CreateServiceInput) SetSchedulingStrategy(v string) *CreateServiceInput { + s.SchedulingStrategy = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *CreateServiceInput) SetServiceName(v string) *CreateServiceInput { + s.ServiceName = &v + return s +} + +// SetServiceRegistries sets the ServiceRegistries field's value. +func (s *CreateServiceInput) SetServiceRegistries(v []*ServiceRegistry) *CreateServiceInput { + s.ServiceRegistries = v + return s +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *CreateServiceInput) SetTaskDefinition(v string) *CreateServiceInput { + s.TaskDefinition = &v + return s +} + +type CreateServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of your service following the create call. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s CreateServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceOutput) GoString() string { + return s.String() +} + +// SetService sets the Service field's value. +func (s *CreateServiceOutput) SetService(v *Service) *CreateServiceOutput { + s.Service = v + return s +} + +type DeleteAccountSettingInput struct { + _ struct{} `type:"structure"` + + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true" enum:"SettingName"` + + PrincipalArn *string `locationName:"principalArn" type:"string"` +} + +// String returns the string representation +func (s DeleteAccountSettingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountSettingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccountSettingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccountSettingInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteAccountSettingInput) SetName(v string) *DeleteAccountSettingInput { + s.Name = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *DeleteAccountSettingInput) SetPrincipalArn(v string) *DeleteAccountSettingInput { + s.PrincipalArn = &v + return s +} + +type DeleteAccountSettingOutput struct { + _ struct{} `type:"structure"` + + Setting *Setting `locationName:"setting" type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountSettingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountSettingOutput) GoString() string { + return s.String() +} + +// SetSetting sets the Setting field's value. +func (s *DeleteAccountSettingOutput) SetSetting(v *Setting) *DeleteAccountSettingOutput { + s.Setting = v + return s +} + +type DeleteAttributesInput struct { + _ struct{} `type:"structure"` + + // The attributes to delete from your resource. You can specify up to 10 attributes + // per request. For custom attributes, specify the attribute name and target + // ID, but do not specify the value. If you specify the target ID using the + // short form, you must also specify the target type. + // + // Attributes is a required field + Attributes []*Attribute `locationName:"attributes" type:"list" required:"true"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that contains + // the resource to delete attributes. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` +} + +// String returns the string representation +func (s DeleteAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *DeleteAttributesInput) SetAttributes(v []*Attribute) *DeleteAttributesInput { + s.Attributes = v + return s +} + +// SetCluster sets the Cluster field's value. +func (s *DeleteAttributesInput) SetCluster(v string) *DeleteAttributesInput { + s.Cluster = &v + return s +} + +type DeleteAttributesOutput struct { + _ struct{} `type:"structure"` + + // A list of attribute objects that were successfully deleted from your resource. + Attributes []*Attribute `locationName:"attributes" type:"list"` +} + +// String returns the string representation +func (s DeleteAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAttributesOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *DeleteAttributesOutput) SetAttributes(v []*Attribute) *DeleteAttributesOutput { + s.Attributes = v + return s +} + +type DeleteClusterInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster to delete. + // + // Cluster is a required field + Cluster *string `locationName:"cluster" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClusterInput"} + if s.Cluster == nil { + invalidParams.Add(request.NewErrParamRequired("Cluster")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *DeleteClusterInput) SetCluster(v string) *DeleteClusterInput { + s.Cluster = &v + return s +} + +type DeleteClusterOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deleted cluster. + Cluster *Cluster `locationName:"cluster" type:"structure"` +} + +// String returns the string representation +func (s DeleteClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *DeleteClusterOutput) SetCluster(v *Cluster) *DeleteClusterOutput { + s.Cluster = v + return s +} + +type DeleteServiceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the service to delete. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // If true, allows you to delete a service even if it has not been scaled down + // to zero tasks. It is only necessary to use this if the service is using the + // REPLICA scheduling strategy. + Force *bool `locationName:"force" type:"boolean"` + + // The name of the service to delete. + // + // Service is a required field + Service *string `locationName:"service" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServiceInput"} + if s.Service == nil { + invalidParams.Add(request.NewErrParamRequired("Service")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *DeleteServiceInput) SetCluster(v string) *DeleteServiceInput { + s.Cluster = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DeleteServiceInput) SetForce(v bool) *DeleteServiceInput { + s.Force = &v + return s +} + +// SetService sets the Service field's value. +func (s *DeleteServiceInput) SetService(v string) *DeleteServiceInput { + s.Service = &v + return s +} + +type DeleteServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deleted service. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s DeleteServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceOutput) GoString() string { + return s.String() +} + +// SetService sets the Service field's value. +func (s *DeleteServiceOutput) SetService(v *Service) *DeleteServiceOutput { + s.Service = v + return s +} + +// The details of an Amazon ECS service deployment. +type Deployment struct { + _ struct{} `type:"structure"` + + // The Unix time stamp for when the service was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The most recent desired count of tasks that was specified for the service + // to deploy or maintain. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The ID of the deployment. + Id *string `locationName:"id" type:"string"` + + // The launch type on which your service is running. + LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` + + // The VPC subnet and security group configuration for tasks that receive their + // own elastic network interface by using the awsvpc networking mode. + NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` + + // The number of tasks in the deployment that are in the PENDING status. + PendingCount *int64 `locationName:"pendingCount" type:"integer"` + + // The platform version on which your service is running. + PlatformVersion *string `locationName:"platformVersion" type:"string"` + + // The number of tasks in the deployment that are in the RUNNING status. + RunningCount *int64 `locationName:"runningCount" type:"integer"` + + // The status of the deployment. Valid values are PRIMARY (for the most recent + // deployment), ACTIVE (for previous deployments that still have tasks running, + // but are being replaced with the PRIMARY deployment), and INACTIVE (for deployments + // that have been completely replaced). + Status *string `locationName:"status" type:"string"` + + // The most recent task definition that was specified for the service to use. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` + + // The Unix time stamp for when the service was last updated. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp"` +} + +// String returns the string representation +func (s Deployment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Deployment) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *Deployment) SetCreatedAt(v time.Time) *Deployment { + s.CreatedAt = &v + return s +} + +// SetDesiredCount sets the DesiredCount field's value. +func (s *Deployment) SetDesiredCount(v int64) *Deployment { + s.DesiredCount = &v + return s +} + +// SetId sets the Id field's value. +func (s *Deployment) SetId(v string) *Deployment { + s.Id = &v + return s +} + +// SetLaunchType sets the LaunchType field's value. +func (s *Deployment) SetLaunchType(v string) *Deployment { + s.LaunchType = &v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *Deployment) SetNetworkConfiguration(v *NetworkConfiguration) *Deployment { + s.NetworkConfiguration = v + return s +} + +// SetPendingCount sets the PendingCount field's value. +func (s *Deployment) SetPendingCount(v int64) *Deployment { + s.PendingCount = &v + return s +} + +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *Deployment) SetPlatformVersion(v string) *Deployment { + s.PlatformVersion = &v + return s +} + +// SetRunningCount sets the RunningCount field's value. +func (s *Deployment) SetRunningCount(v int64) *Deployment { + s.RunningCount = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Deployment) SetStatus(v string) *Deployment { + s.Status = &v + return s +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *Deployment) SetTaskDefinition(v string) *Deployment { + s.TaskDefinition = &v + return s +} + +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *Deployment) SetUpdatedAt(v time.Time) *Deployment { + s.UpdatedAt = &v + return s +} + +// Optional deployment parameters that control how many tasks run during the +// deployment and the ordering of stopping and starting tasks. +type DeploymentConfiguration struct { + _ struct{} `type:"structure"` + + // The upper limit (as a percentage of the service's desiredCount) of the number + // of tasks that are allowed in the RUNNING or PENDING state in a service during + // a deployment. The maximum number of tasks during a deployment is the desiredCount + // multiplied by maximumPercent/100, rounded down to the nearest integer value. + MaximumPercent *int64 `locationName:"maximumPercent" type:"integer"` + + // The lower limit (as a percentage of the service's desiredCount) of the number + // of running tasks that must remain in the RUNNING state in a service during + // a deployment. The minimum number of healthy tasks during a deployment is + // the desiredCount multiplied by minimumHealthyPercent/100, rounded up to the + // nearest integer value. + MinimumHealthyPercent *int64 `locationName:"minimumHealthyPercent" type:"integer"` +} + +// String returns the string representation +func (s DeploymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeploymentConfiguration) GoString() string { + return s.String() +} + +// SetMaximumPercent sets the MaximumPercent field's value. +func (s *DeploymentConfiguration) SetMaximumPercent(v int64) *DeploymentConfiguration { + s.MaximumPercent = &v + return s +} + +// SetMinimumHealthyPercent sets the MinimumHealthyPercent field's value. +func (s *DeploymentConfiguration) SetMinimumHealthyPercent(v int64) *DeploymentConfiguration { + s.MinimumHealthyPercent = &v + return s +} + +type DeregisterContainerInstanceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instance to deregister. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full ARN of the container instance to deregister. + // The ARN contains the arn:aws:ecs namespace, followed by the Region of the + // container instance, the AWS account ID of the container instance owner, the + // container-instance namespace, and then the container instance ID. For example, + // arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID . + // + // ContainerInstance is a required field + ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` + + // Forces the deregistration of the container instance. If you have tasks running + // on the container instance when you deregister it with the force option, these + // tasks remain running until you terminate the instance or the tasks stop through + // some other means, but they are orphaned (no longer monitored or accounted + // for by Amazon ECS). If an orphaned task on your container instance is part + // of an Amazon ECS service, then the service scheduler starts another copy + // of that task, on a different container instance if possible. + // + // Any containers in orphaned service tasks that are registered with a Classic + // Load Balancer or an Application Load Balancer target group are deregistered. + // They begin connection draining according to the settings on the load balancer + // or target group. + Force *bool `locationName:"force" type:"boolean"` +} + +// String returns the string representation +func (s DeregisterContainerInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterContainerInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterContainerInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterContainerInstanceInput"} + if s.ContainerInstance == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstance")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *DeregisterContainerInstanceInput) SetCluster(v string) *DeregisterContainerInstanceInput { + s.Cluster = &v + return s +} + +// SetContainerInstance sets the ContainerInstance field's value. +func (s *DeregisterContainerInstanceInput) SetContainerInstance(v string) *DeregisterContainerInstanceInput { + s.ContainerInstance = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DeregisterContainerInstanceInput) SetForce(v bool) *DeregisterContainerInstanceInput { + s.Force = &v + return s +} + +type DeregisterContainerInstanceOutput struct { + _ struct{} `type:"structure"` + + // The container instance that was deregistered. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s DeregisterContainerInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterContainerInstanceOutput) GoString() string { + return s.String() +} + +// SetContainerInstance sets the ContainerInstance field's value. +func (s *DeregisterContainerInstanceOutput) SetContainerInstance(v *ContainerInstance) *DeregisterContainerInstanceOutput { + s.ContainerInstance = v + return s +} + +type DeregisterTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // The family and revision (family:revision) or full Amazon Resource Name (ARN) + // of the task definition to deregister. You must specify a revision. + // + // TaskDefinition is a required field + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterTaskDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterTaskDefinitionInput"} + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *DeregisterTaskDefinitionInput) SetTaskDefinition(v string) *DeregisterTaskDefinitionInput { + s.TaskDefinition = &v + return s +} + +type DeregisterTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full description of the deregistered task. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s DeregisterTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskDefinitionOutput) GoString() string { + return s.String() +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *DeregisterTaskDefinitionOutput) SetTaskDefinition(v *TaskDefinition) *DeregisterTaskDefinitionOutput { + s.TaskDefinition = v + return s +} + +type DescribeClustersInput struct { + _ struct{} `type:"structure"` + + // A list of up to 100 cluster names or full cluster Amazon Resource Name (ARN) + // entries. If you do not specify a cluster, the default cluster is assumed. + Clusters []*string `locationName:"clusters" type:"list"` + + // Additional information about your clusters to be separated by launch type, + // including: + // + // * runningEC2TasksCount + // + // * runningFargateTasksCount + // + // * pendingEC2TasksCount + // + // * pendingFargateTasksCount + // + // * activeEC2ServiceCount + // + // * activeFargateServiceCount + // + // * drainingEC2ServiceCount + // + // * drainingFargateServiceCount + Include []*string `locationName:"include" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersInput) GoString() string { + return s.String() +} + +// SetClusters sets the Clusters field's value. +func (s *DescribeClustersInput) SetClusters(v []*string) *DescribeClustersInput { + s.Clusters = v + return s +} + +// SetInclude sets the Include field's value. +func (s *DescribeClustersInput) SetInclude(v []*string) *DescribeClustersInput { + s.Include = v + return s +} + +type DescribeClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of clusters. + Clusters []*Cluster `locationName:"clusters" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s DescribeClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClustersOutput) GoString() string { + return s.String() +} + +// SetClusters sets the Clusters field's value. +func (s *DescribeClustersOutput) SetClusters(v []*Cluster) *DescribeClustersOutput { + s.Clusters = v + return s +} + +// SetFailures sets the Failures field's value. +func (s *DescribeClustersOutput) SetFailures(v []*Failure) *DescribeClustersOutput { + s.Failures = v + return s +} + +type DescribeContainerInstancesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instances to describe. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A list of up to 100 container instance IDs or full Amazon Resource Name (ARN) + // entries. + // + // ContainerInstances is a required field + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeContainerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContainerInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeContainerInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContainerInstancesInput"} + if s.ContainerInstances == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstances")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *DescribeContainerInstancesInput) SetCluster(v string) *DescribeContainerInstancesInput { + s.Cluster = &v + return s +} + +// SetContainerInstances sets the ContainerInstances field's value. +func (s *DescribeContainerInstancesInput) SetContainerInstances(v []*string) *DescribeContainerInstancesInput { + s.ContainerInstances = v + return s +} + +type DescribeContainerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances. + ContainerInstances []*ContainerInstance `locationName:"containerInstances" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s DescribeContainerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContainerInstancesOutput) GoString() string { + return s.String() +} + +// SetContainerInstances sets the ContainerInstances field's value. +func (s *DescribeContainerInstancesOutput) SetContainerInstances(v []*ContainerInstance) *DescribeContainerInstancesOutput { + s.ContainerInstances = v + return s +} + +// SetFailures sets the Failures field's value. +func (s *DescribeContainerInstancesOutput) SetFailures(v []*Failure) *DescribeContainerInstancesOutput { + s.Failures = v + return s +} + +type DescribeServicesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN)the cluster that hosts the + // service to describe. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A list of services to describe. You may specify up to 10 services to describe + // in a single operation. + // + // Services is a required field + Services []*string `locationName:"services" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeServicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeServicesInput"} + if s.Services == nil { + invalidParams.Add(request.NewErrParamRequired("Services")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *DescribeServicesInput) SetCluster(v string) *DescribeServicesInput { + s.Cluster = &v + return s +} + +// SetServices sets the Services field's value. +func (s *DescribeServicesInput) SetServices(v []*string) *DescribeServicesInput { + s.Services = v + return s +} + +type DescribeServicesOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // The list of services described. + Services []*Service `locationName:"services" type:"list"` +} + +// String returns the string representation +func (s DescribeServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeServicesOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *DescribeServicesOutput) SetFailures(v []*Failure) *DescribeServicesOutput { + s.Failures = v + return s +} + +// SetServices sets the Services field's value. +func (s *DescribeServicesOutput) SetServices(v []*Service) *DescribeServicesOutput { + s.Services = v + return s +} + +type DescribeTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // The family for the latest ACTIVE revision, family and revision (family:revision) + // for a specific revision in the family, or full Amazon Resource Name (ARN) + // of the task definition to describe. + // + // TaskDefinition is a required field + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTaskDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTaskDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTaskDefinitionInput"} + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *DescribeTaskDefinitionInput) SetTaskDefinition(v string) *DescribeTaskDefinitionInput { + s.TaskDefinition = &v + return s +} + +type DescribeTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full task definition description. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s DescribeTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTaskDefinitionOutput) GoString() string { + return s.String() +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *DescribeTaskDefinitionOutput) SetTaskDefinition(v *TaskDefinition) *DescribeTaskDefinitionOutput { + s.TaskDefinition = v + return s +} + +type DescribeTasksInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task to describe. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A list of up to 100 task IDs or full ARN entries. + // + // Tasks is a required field + Tasks []*string `locationName:"tasks" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTasksInput"} + if s.Tasks == nil { + invalidParams.Add(request.NewErrParamRequired("Tasks")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *DescribeTasksInput) SetCluster(v string) *DescribeTasksInput { + s.Cluster = &v + return s +} + +// SetTasks sets the Tasks field's value. +func (s *DescribeTasksInput) SetTasks(v []*string) *DescribeTasksInput { + s.Tasks = v + return s +} + +type DescribeTasksOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // The list of tasks. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s DescribeTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTasksOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *DescribeTasksOutput) SetFailures(v []*Failure) *DescribeTasksOutput { + s.Failures = v + return s +} + +// SetTasks sets the Tasks field's value. +func (s *DescribeTasksOutput) SetTasks(v []*Task) *DescribeTasksOutput { + s.Tasks = v + return s +} + +// An object representing a container instance host device. +type Device struct { + _ struct{} `type:"structure"` + + // The path inside the container at which to expose the host device. + ContainerPath *string `locationName:"containerPath" type:"string"` + + // The path for the device on the host container instance. + // + // HostPath is a required field + HostPath *string `locationName:"hostPath" type:"string" required:"true"` + + // The explicit permissions to provide to the container for the device. By default, + // the container has permissions for read, write, and mknod for the device. + Permissions []*string `locationName:"permissions" type:"list"` +} + +// String returns the string representation +func (s Device) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Device) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Device) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Device"} + if s.HostPath == nil { + invalidParams.Add(request.NewErrParamRequired("HostPath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerPath sets the ContainerPath field's value. +func (s *Device) SetContainerPath(v string) *Device { + s.ContainerPath = &v + return s +} + +// SetHostPath sets the HostPath field's value. +func (s *Device) SetHostPath(v string) *Device { + s.HostPath = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *Device) SetPermissions(v []*string) *Device { + s.Permissions = v + return s +} + +type DiscoverPollEndpointInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that the + // container instance belongs to. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full ARN of the container instance. The ARN + // contains the arn:aws:ecs namespace, followed by the Region of the container + // instance, the AWS account ID of the container instance owner, the container-instance + // namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID . + ContainerInstance *string `locationName:"containerInstance" type:"string"` +} + +// String returns the string representation +func (s DiscoverPollEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiscoverPollEndpointInput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *DiscoverPollEndpointInput) SetCluster(v string) *DiscoverPollEndpointInput { + s.Cluster = &v + return s +} + +// SetContainerInstance sets the ContainerInstance field's value. +func (s *DiscoverPollEndpointInput) SetContainerInstance(v string) *DiscoverPollEndpointInput { + s.ContainerInstance = &v + return s +} + +type DiscoverPollEndpointOutput struct { + _ struct{} `type:"structure"` + + // The endpoint for the Amazon ECS agent to poll. + Endpoint *string `locationName:"endpoint" type:"string"` + + // The telemetry endpoint for the Amazon ECS agent. + TelemetryEndpoint *string `locationName:"telemetryEndpoint" type:"string"` +} + +// String returns the string representation +func (s DiscoverPollEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiscoverPollEndpointOutput) GoString() string { + return s.String() +} + +// SetEndpoint sets the Endpoint field's value. +func (s *DiscoverPollEndpointOutput) SetEndpoint(v string) *DiscoverPollEndpointOutput { + s.Endpoint = &v + return s +} + +// SetTelemetryEndpoint sets the TelemetryEndpoint field's value. +func (s *DiscoverPollEndpointOutput) SetTelemetryEndpoint(v string) *DiscoverPollEndpointOutput { + s.TelemetryEndpoint = &v + return s +} + +type DockerVolumeConfiguration struct { + _ struct{} `type:"structure"` + + Autoprovision *bool `locationName:"autoprovision" type:"boolean"` + + Driver *string `locationName:"driver" type:"string"` + + DriverOpts map[string]*string `locationName:"driverOpts" type:"map"` + + Labels map[string]*string `locationName:"labels" type:"map"` + + // The scope for the Docker volume that determines its lifecycle. Docker volumes + // that are scoped to a task are automatically provisioned when the task starts + // and destroyed when the task stops. Docker volumes that are scoped as shared + // persist after the task stops. + Scope *string `locationName:"scope" type:"string" enum:"Scope"` +} + +// String returns the string representation +func (s DockerVolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DockerVolumeConfiguration) GoString() string { + return s.String() +} + +// SetAutoprovision sets the Autoprovision field's value. +func (s *DockerVolumeConfiguration) SetAutoprovision(v bool) *DockerVolumeConfiguration { + s.Autoprovision = &v + return s +} + +// SetDriver sets the Driver field's value. +func (s *DockerVolumeConfiguration) SetDriver(v string) *DockerVolumeConfiguration { + s.Driver = &v + return s +} + +// SetDriverOpts sets the DriverOpts field's value. +func (s *DockerVolumeConfiguration) SetDriverOpts(v map[string]*string) *DockerVolumeConfiguration { + s.DriverOpts = v + return s +} + +// SetLabels sets the Labels field's value. +func (s *DockerVolumeConfiguration) SetLabels(v map[string]*string) *DockerVolumeConfiguration { + s.Labels = v + return s +} + +// SetScope sets the Scope field's value. +func (s *DockerVolumeConfiguration) SetScope(v string) *DockerVolumeConfiguration { + s.Scope = &v + return s +} + +type EFSAuthorizationConfig struct { + _ struct{} `type:"structure"` + + AccessPointId *string `locationName:"accessPointId" type:"string"` + + Iam *string `locationName:"iam" type:"string" enum:"EFSAuthorizationConfigIAM"` +} + +// String returns the string representation +func (s EFSAuthorizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSAuthorizationConfig) GoString() string { + return s.String() +} + +// SetAccessPointId sets the AccessPointId field's value. +func (s *EFSAuthorizationConfig) SetAccessPointId(v string) *EFSAuthorizationConfig { + s.AccessPointId = &v + return s +} + +// SetIam sets the Iam field's value. +func (s *EFSAuthorizationConfig) SetIam(v string) *EFSAuthorizationConfig { + s.Iam = &v + return s +} + +type EFSVolumeConfiguration struct { + _ struct{} `type:"structure"` + + AuthorizationConfig *EFSAuthorizationConfig `locationName:"authorizationConfig" type:"structure"` + + // FileSystemId is a required field + FileSystemId *string `locationName:"fileSystemId" type:"string" required:"true"` + + RootDirectory *string `locationName:"rootDirectory" type:"string"` + + TransitEncryption *string `locationName:"transitEncryption" type:"string" enum:"EFSTransitEncryption"` +} + +// String returns the string representation +func (s EFSVolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSVolumeConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EFSVolumeConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EFSVolumeConfiguration"} + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorizationConfig sets the AuthorizationConfig field's value. +func (s *EFSVolumeConfiguration) SetAuthorizationConfig(v *EFSAuthorizationConfig) *EFSVolumeConfiguration { + s.AuthorizationConfig = v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *EFSVolumeConfiguration) SetFileSystemId(v string) *EFSVolumeConfiguration { + s.FileSystemId = &v + return s +} + +// SetRootDirectory sets the RootDirectory field's value. +func (s *EFSVolumeConfiguration) SetRootDirectory(v string) *EFSVolumeConfiguration { + s.RootDirectory = &v + return s +} + +// SetTransitEncryption sets the TransitEncryption field's value. +func (s *EFSVolumeConfiguration) SetTransitEncryption(v string) *EFSVolumeConfiguration { + s.TransitEncryption = &v + return s +} + +type FSxWindowsFileServerAuthorizationConfig struct { + _ struct{} `type:"structure"` + + // CredentialsParameter is a required field + CredentialsParameter *string `locationName:"credentialsParameter" type:"string" required:"true"` + + // Domain is a required field + Domain *string `locationName:"domain" type:"string" required:"true"` +} + +// String returns the string representation +func (s FSxWindowsFileServerAuthorizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FSxWindowsFileServerAuthorizationConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FSxWindowsFileServerAuthorizationConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FSxWindowsFileServerAuthorizationConfig"} + if s.CredentialsParameter == nil { + invalidParams.Add(request.NewErrParamRequired("CredentialsParameter")) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCredentialsParameter sets the CredentialsParameter field's value. +func (s *FSxWindowsFileServerAuthorizationConfig) SetCredentialsParameter(v string) *FSxWindowsFileServerAuthorizationConfig { + s.CredentialsParameter = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *FSxWindowsFileServerAuthorizationConfig) SetDomain(v string) *FSxWindowsFileServerAuthorizationConfig { + s.Domain = &v + return s +} + +type FSxWindowsFileServerVolumeConfiguration struct { + _ struct{} `type:"structure"` + + // AuthorizationConfig is a required field + AuthorizationConfig *FSxWindowsFileServerAuthorizationConfig `locationName:"authorizationConfig" type:"structure" required:"true"` + + // FileSystemId is a required field + FileSystemId *string `locationName:"fileSystemId" type:"string" required:"true"` + + // RootDirectory is a required field + RootDirectory *string `locationName:"rootDirectory" type:"string" required:"true"` +} + +// String returns the string representation +func (s FSxWindowsFileServerVolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FSxWindowsFileServerVolumeConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FSxWindowsFileServerVolumeConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FSxWindowsFileServerVolumeConfiguration"} + if s.AuthorizationConfig == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizationConfig")) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.RootDirectory == nil { + invalidParams.Add(request.NewErrParamRequired("RootDirectory")) + } + if s.AuthorizationConfig != nil { + if err := s.AuthorizationConfig.Validate(); err != nil { + invalidParams.AddNested("AuthorizationConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorizationConfig sets the AuthorizationConfig field's value. +func (s *FSxWindowsFileServerVolumeConfiguration) SetAuthorizationConfig(v *FSxWindowsFileServerAuthorizationConfig) *FSxWindowsFileServerVolumeConfiguration { + s.AuthorizationConfig = v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *FSxWindowsFileServerVolumeConfiguration) SetFileSystemId(v string) *FSxWindowsFileServerVolumeConfiguration { + s.FileSystemId = &v + return s +} + +// SetRootDirectory sets the RootDirectory field's value. +func (s *FSxWindowsFileServerVolumeConfiguration) SetRootDirectory(v string) *FSxWindowsFileServerVolumeConfiguration { + s.RootDirectory = &v + return s +} + +// A failed resource. +type Failure struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the failed resource. + Arn *string `locationName:"arn" type:"string"` + + // The reason for the failure. + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s Failure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Failure) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Failure) SetArn(v string) *Failure { + s.Arn = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *Failure) SetReason(v string) *Failure { + s.Reason = &v + return s +} + +type FirelensConfiguration struct { + _ struct{} `type:"structure"` + + Options map[string]*string `locationName:"options" type:"map"` + + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"FirelensConfigurationType"` +} + +// String returns the string representation +func (s FirelensConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FirelensConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FirelensConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FirelensConfiguration"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOptions sets the Options field's value. +func (s *FirelensConfiguration) SetOptions(v map[string]*string) *FirelensConfiguration { + s.Options = v + return s +} + +// SetType sets the Type field's value. +func (s *FirelensConfiguration) SetType(v string) *FirelensConfiguration { + s.Type = &v + return s +} + +// An object representing a container health check. Health check parameters +// that are specified in a container definition override any Docker health checks +// that exist in the container image (such as those specified in a parent image +// or from the image's Dockerfile). +type HealthCheck struct { + _ struct{} `type:"structure"` + + // A string array representing the command that the container runs to determine + // if it is healthy. The string array must start with CMD to execute the command + // arguments directly, or CMD-SHELL to run the command with the container's + // default shell. For example: + // + // [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ] + // + // An exit code of 0 indicates success, and non-zero exit code indicates failure. + // For more information, see HealthCheck in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/). + // + // Command is a required field + Command []*string `locationName:"command" type:"list" required:"true"` + + // The time period in seconds between each health check execution. You may specify + // between 5 and 300 seconds. The default value is 30 seconds. + Interval *int64 `locationName:"interval" type:"integer"` + + // The number of times to retry a failed health check before the container is + // considered unhealthy. You may specify between 1 and 10 retries. The default + // value is 3. + Retries *int64 `locationName:"retries" type:"integer"` + + // The optional grace period within which to provide containers time to bootstrap + // before failed health checks count towards the maximum number of retries. + // You may specify between 0 and 300 seconds. The startPeriod is disabled by + // default. + // + // If a health check succeeds within the startPeriod, then the container is + // considered healthy and any subsequent failures count toward the maximum number + // of retries. + StartPeriod *int64 `locationName:"startPeriod" type:"integer"` + + // The time period in seconds to wait for a health check to succeed before it + // is considered a failure. You may specify between 2 and 60 seconds. The default + // value is 5. + Timeout *int64 `locationName:"timeout" type:"integer"` +} + +// String returns the string representation +func (s HealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthCheck) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HealthCheck) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HealthCheck"} + if s.Command == nil { + invalidParams.Add(request.NewErrParamRequired("Command")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommand sets the Command field's value. +func (s *HealthCheck) SetCommand(v []*string) *HealthCheck { + s.Command = v + return s +} + +// SetInterval sets the Interval field's value. +func (s *HealthCheck) SetInterval(v int64) *HealthCheck { + s.Interval = &v + return s +} + +// SetRetries sets the Retries field's value. +func (s *HealthCheck) SetRetries(v int64) *HealthCheck { + s.Retries = &v + return s +} + +// SetStartPeriod sets the StartPeriod field's value. +func (s *HealthCheck) SetStartPeriod(v int64) *HealthCheck { + s.StartPeriod = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *HealthCheck) SetTimeout(v int64) *HealthCheck { + s.Timeout = &v + return s +} + +// Hostnames and IP address entries that are added to the /etc/hosts file of +// a container via the extraHosts parameter of its ContainerDefinition. +type HostEntry struct { + _ struct{} `type:"structure"` + + // The hostname to use in the /etc/hosts entry. + // + // Hostname is a required field + Hostname *string `locationName:"hostname" type:"string" required:"true"` + + // The IP address to use in the /etc/hosts entry. + // + // IpAddress is a required field + IpAddress *string `locationName:"ipAddress" type:"string" required:"true"` +} + +// String returns the string representation +func (s HostEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HostEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HostEntry"} + if s.Hostname == nil { + invalidParams.Add(request.NewErrParamRequired("Hostname")) + } + if s.IpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("IpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostname sets the Hostname field's value. +func (s *HostEntry) SetHostname(v string) *HostEntry { + s.Hostname = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *HostEntry) SetIpAddress(v string) *HostEntry { + s.IpAddress = &v + return s +} + +// Details on a container instance host volume. +type HostVolumeProperties struct { + _ struct{} `type:"structure"` + + // The path on the host container instance that is presented to the container. + // If this parameter is empty, then the Docker daemon has assigned a host path + // for you. If the host parameter contains a sourcePath file location, then + // the data volume persists at the specified location on the host container + // instance until you delete it manually. If the sourcePath value does not exist + // on the host container instance, the Docker daemon creates it. If the location + // does exist, the contents of the source path folder are exported. + // + // If you are using the Fargate launch type, the sourcePath parameter is not + // supported. + SourcePath *string `locationName:"sourcePath" type:"string"` +} + +// String returns the string representation +func (s HostVolumeProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostVolumeProperties) GoString() string { + return s.String() +} + +// SetSourcePath sets the SourcePath field's value. +func (s *HostVolumeProperties) SetSourcePath(v string) *HostVolumeProperties { + s.SourcePath = &v + return s +} + +type InferenceAccelerator struct { + _ struct{} `type:"structure"` + + // DeviceName is a required field + DeviceName *string `locationName:"deviceName" type:"string" required:"true"` + + // DeviceType is a required field + DeviceType *string `locationName:"deviceType" type:"string" required:"true"` +} + +// String returns the string representation +func (s InferenceAccelerator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceAccelerator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InferenceAccelerator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InferenceAccelerator"} + if s.DeviceName == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceName")) + } + if s.DeviceType == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeviceName sets the DeviceName field's value. +func (s *InferenceAccelerator) SetDeviceName(v string) *InferenceAccelerator { + s.DeviceName = &v + return s +} + +// SetDeviceType sets the DeviceType field's value. +func (s *InferenceAccelerator) SetDeviceType(v string) *InferenceAccelerator { + s.DeviceType = &v + return s +} + +// The specified parameter is invalid. Review the available parameters for the +// API request. +type InvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidParameterException) GoString() string { + return s.String() +} + +func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { + return &InvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidParameterException) Code() string { + return "InvalidParameterException" +} + +// Message returns the exception's message. +func (s *InvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameterException) OrigErr() error { + return nil +} + +func (s *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The Linux capabilities for the container that are added to or dropped from +// the default configuration provided by Docker. For more information on the +// default capabilities and the non-default available capabilities, see Runtime +// privilege and Linux capabilities (https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities) +// in the Docker run reference. For more detailed information on these Linux +// capabilities, see the capabilities(7) (http://man7.org/linux/man-pages/man7/capabilities.7.html) +// Linux manual page. +type KernelCapabilities struct { + _ struct{} `type:"structure"` + + // The Linux capabilities for the container that have been added to the default + // configuration provided by Docker. This parameter maps to CapAdd in the Create + // a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --cap-add option to docker run (https://docs.docker.com/engine/reference/run/). + // + // If you are using tasks that use the Fargate launch type, the add parameter + // is not supported. + // + // Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | + // "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" + // | "IPC_OWNER" | "KILL" | "LEASE" | "LINUX_IMMUTABLE" | "MAC_ADMIN" | "MAC_OVERRIDE" + // | "MKNOD" | "NET_ADMIN" | "NET_BIND_SERVICE" | "NET_BROADCAST" | "NET_RAW" + // | "SETFCAP" | "SETGID" | "SETPCAP" | "SETUID" | "SYS_ADMIN" | "SYS_BOOT" + // | "SYS_CHROOT" | "SYS_MODULE" | "SYS_NICE" | "SYS_PACCT" | "SYS_PTRACE" | + // "SYS_RAWIO" | "SYS_RESOURCE" | "SYS_TIME" | "SYS_TTY_CONFIG" | "SYSLOG" | + // "WAKE_ALARM" + Add []*string `locationName:"add" type:"list"` + + // The Linux capabilities for the container that have been removed from the + // default configuration provided by Docker. This parameter maps to CapDrop + // in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --cap-drop option to docker run (https://docs.docker.com/engine/reference/run/). + // + // Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | + // "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" + // | "IPC_OWNER" | "KILL" | "LEASE" | "LINUX_IMMUTABLE" | "MAC_ADMIN" | "MAC_OVERRIDE" + // | "MKNOD" | "NET_ADMIN" | "NET_BIND_SERVICE" | "NET_BROADCAST" | "NET_RAW" + // | "SETFCAP" | "SETGID" | "SETPCAP" | "SETUID" | "SYS_ADMIN" | "SYS_BOOT" + // | "SYS_CHROOT" | "SYS_MODULE" | "SYS_NICE" | "SYS_PACCT" | "SYS_PTRACE" | + // "SYS_RAWIO" | "SYS_RESOURCE" | "SYS_TIME" | "SYS_TTY_CONFIG" | "SYSLOG" | + // "WAKE_ALARM" + Drop []*string `locationName:"drop" type:"list"` +} + +// String returns the string representation +func (s KernelCapabilities) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KernelCapabilities) GoString() string { + return s.String() +} + +// SetAdd sets the Add field's value. +func (s *KernelCapabilities) SetAdd(v []*string) *KernelCapabilities { + s.Add = v + return s +} + +// SetDrop sets the Drop field's value. +func (s *KernelCapabilities) SetDrop(v []*string) *KernelCapabilities { + s.Drop = v + return s +} + +// A key and value pair object. +type KeyValuePair struct { + _ struct{} `type:"structure"` + + // The name of the key value pair. For environment variables, this is the name + // of the environment variable. + Name *string `locationName:"name" type:"string"` + + // The value of the key value pair. For environment variables, this is the value + // of the environment variable. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s KeyValuePair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyValuePair) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *KeyValuePair) SetName(v string) *KeyValuePair { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *KeyValuePair) SetValue(v string) *KeyValuePair { + s.Value = &v + return s +} + +// Linux-specific options that are applied to the container, such as Linux KernelCapabilities. +type LinuxParameters struct { + _ struct{} `type:"structure"` + + // The Linux capabilities for the container that are added to or dropped from + // the default configuration provided by Docker. + // + // If you are using tasks that use the Fargate launch type, capabilities is + // supported but the add parameter is not supported. + Capabilities *KernelCapabilities `locationName:"capabilities" type:"structure"` + + // Any host devices to expose to the container. This parameter maps to Devices + // in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.27/) + // and the --device option to docker run (https://docs.docker.com/engine/reference/run/). + // + // If you are using tasks that use the Fargate launch type, the devices parameter + // is not supported. + Devices []*Device `locationName:"devices" type:"list"` + + // Run an init process inside the container that forwards signals and reaps + // processes. This parameter maps to the --init option to docker run (https://docs.docker.com/engine/reference/run/). + // This parameter requires version 1.25 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log in to your container instance and run the following + // command: sudo docker version | grep "Server API version" + InitProcessEnabled *bool `locationName:"initProcessEnabled" type:"boolean"` + + // The value for the size (in MiB) of the /dev/shm volume. This parameter maps + // to the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/). + // + // If you are using tasks that use the Fargate launch type, the sharedMemorySize + // parameter is not supported. + SharedMemorySize *int64 `locationName:"sharedMemorySize" type:"integer"` + + // The container path, mount options, and size (in MiB) of the tmpfs mount. + // This parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/). + // + // If you are using tasks that use the Fargate launch type, the tmpfs parameter + // is not supported. + Tmpfs []*Tmpfs `locationName:"tmpfs" type:"list"` +} + +// String returns the string representation +func (s LinuxParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LinuxParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LinuxParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LinuxParameters"} + if s.Devices != nil { + for i, v := range s.Devices { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Devices", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tmpfs != nil { + for i, v := range s.Tmpfs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tmpfs", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapabilities sets the Capabilities field's value. +func (s *LinuxParameters) SetCapabilities(v *KernelCapabilities) *LinuxParameters { + s.Capabilities = v + return s +} + +// SetDevices sets the Devices field's value. +func (s *LinuxParameters) SetDevices(v []*Device) *LinuxParameters { + s.Devices = v + return s +} + +// SetInitProcessEnabled sets the InitProcessEnabled field's value. +func (s *LinuxParameters) SetInitProcessEnabled(v bool) *LinuxParameters { + s.InitProcessEnabled = &v + return s +} + +// SetSharedMemorySize sets the SharedMemorySize field's value. +func (s *LinuxParameters) SetSharedMemorySize(v int64) *LinuxParameters { + s.SharedMemorySize = &v + return s +} + +// SetTmpfs sets the Tmpfs field's value. +func (s *LinuxParameters) SetTmpfs(v []*Tmpfs) *LinuxParameters { + s.Tmpfs = v + return s +} + +type ListAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute with which to filter the results. + AttributeName *string `locationName:"attributeName" type:"string"` + + // The value of the attribute with which to filter results. You must also specify + // an attribute name to use this parameter. + AttributeValue *string `locationName:"attributeValue" type:"string"` + + // The short name or full Amazon Resource Name (ARN) of the cluster to list + // attributes. If you do not specify a cluster, the default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The maximum number of cluster results returned by ListAttributes in paginated + // output. When this parameter is used, ListAttributes only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListAttributes + // request with the returned nextToken value. This value can be between 1 and + // 100. If this parameter is not used, then ListAttributes returns up to 100 + // results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListAttributes request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The type of the target with which to list attributes. + // + // TargetType is a required field + TargetType *string `locationName:"targetType" type:"string" required:"true" enum:"TargetType"` +} + +// String returns the string representation +func (s ListAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAttributesInput"} + if s.TargetType == nil { + invalidParams.Add(request.NewErrParamRequired("TargetType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *ListAttributesInput) SetAttributeName(v string) *ListAttributesInput { + s.AttributeName = &v + return s +} + +// SetAttributeValue sets the AttributeValue field's value. +func (s *ListAttributesInput) SetAttributeValue(v string) *ListAttributesInput { + s.AttributeValue = &v + return s +} + +// SetCluster sets the Cluster field's value. +func (s *ListAttributesInput) SetCluster(v string) *ListAttributesInput { + s.Cluster = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAttributesInput) SetMaxResults(v int64) *ListAttributesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAttributesInput) SetNextToken(v string) *ListAttributesInput { + s.NextToken = &v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *ListAttributesInput) SetTargetType(v string) *ListAttributesInput { + s.TargetType = &v + return s +} + +type ListAttributesOutput struct { + _ struct{} `type:"structure"` + + // A list of attribute objects that meet the criteria of the request. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The nextToken value to include in a future ListAttributes request. When the + // results of a ListAttributes request exceed maxResults, this value can be + // used to retrieve the next page of results. This value is null when there + // are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttributesOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *ListAttributesOutput) SetAttributes(v []*Attribute) *ListAttributesOutput { + s.Attributes = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAttributesOutput) SetNextToken(v string) *ListAttributesOutput { + s.NextToken = &v + return s +} + +type ListClustersInput struct { + _ struct{} `type:"structure"` + + // The maximum number of cluster results returned by ListClusters in paginated + // output. When this parameter is used, ListClusters only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListClusters + // request with the returned nextToken value. This value can be between 1 and + // 100. If this parameter is not used, then ListClusters returns up to 100 results + // and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListClusters request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersInput) GoString() string { + return s.String() +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListClustersInput) SetMaxResults(v int64) *ListClustersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListClustersInput) SetNextToken(v string) *ListClustersInput { + s.NextToken = &v + return s +} + +type ListClustersOutput struct { + _ struct{} `type:"structure"` + + // The list of full Amazon Resource Name (ARN) entries for each cluster associated + // with your account. + ClusterArns []*string `locationName:"clusterArns" type:"list"` + + // The nextToken value to include in a future ListClusters request. When the + // results of a ListClusters request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersOutput) GoString() string { + return s.String() +} + +// SetClusterArns sets the ClusterArns field's value. +func (s *ListClustersOutput) SetClusterArns(v []*string) *ListClustersOutput { + s.ClusterArns = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListClustersOutput) SetNextToken(v string) *ListClustersOutput { + s.NextToken = &v + return s +} + +type ListContainerInstancesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instances to list. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // You can filter the results of a ListContainerInstances operation with cluster + // query language statements. For more information, see Cluster Query Language + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) + // in the Amazon Elastic Container Service Developer Guide. + Filter *string `locationName:"filter" type:"string"` + + // The maximum number of container instance results returned by ListContainerInstances + // in paginated output. When this parameter is used, ListContainerInstances + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListContainerInstances request with the returned nextToken value. + // This value can be between 1 and 100. If this parameter is not used, then + // ListContainerInstances returns up to 100 results and a nextToken value if + // applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListContainerInstances + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // Filters the container instances by status. For example, if you specify the + // DRAINING status, the results include only container instances that have been + // set to DRAINING using UpdateContainerInstancesState. If you do not specify + // this parameter, the default is to include container instances set to ACTIVE + // and DRAINING. + Status *string `locationName:"status" type:"string" enum:"ContainerInstanceStatus"` +} + +// String returns the string representation +func (s ListContainerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListContainerInstancesInput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *ListContainerInstancesInput) SetCluster(v string) *ListContainerInstancesInput { + s.Cluster = &v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ListContainerInstancesInput) SetFilter(v string) *ListContainerInstancesInput { + s.Filter = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListContainerInstancesInput) SetMaxResults(v int64) *ListContainerInstancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListContainerInstancesInput) SetNextToken(v string) *ListContainerInstancesInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListContainerInstancesInput) SetStatus(v string) *ListContainerInstancesInput { + s.Status = &v + return s +} + +type ListContainerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances with full ARN entries for each container + // instance associated with the specified cluster. + ContainerInstanceArns []*string `locationName:"containerInstanceArns" type:"list"` + + // The nextToken value to include in a future ListContainerInstances request. + // When the results of a ListContainerInstances request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListContainerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListContainerInstancesOutput) GoString() string { + return s.String() +} + +// SetContainerInstanceArns sets the ContainerInstanceArns field's value. +func (s *ListContainerInstancesOutput) SetContainerInstanceArns(v []*string) *ListContainerInstancesOutput { + s.ContainerInstanceArns = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListContainerInstancesOutput) SetNextToken(v string) *ListContainerInstancesOutput { + s.NextToken = &v + return s +} + +type ListServicesInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the services to list. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The launch type for the services to list. + LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` + + // The maximum number of service results returned by ListServices in paginated + // output. When this parameter is used, ListServices only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another ListServices + // request with the returned nextToken value. This value can be between 1 and + // 10. If this parameter is not used, then ListServices returns up to 10 results + // and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListServices request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The scheduling strategy for services to list. + SchedulingStrategy *string `locationName:"schedulingStrategy" type:"string" enum:"SchedulingStrategy"` +} + +// String returns the string representation +func (s ListServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesInput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *ListServicesInput) SetCluster(v string) *ListServicesInput { + s.Cluster = &v + return s +} + +// SetLaunchType sets the LaunchType field's value. +func (s *ListServicesInput) SetLaunchType(v string) *ListServicesInput { + s.LaunchType = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListServicesInput) SetMaxResults(v int64) *ListServicesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListServicesInput) SetNextToken(v string) *ListServicesInput { + s.NextToken = &v + return s +} + +// SetSchedulingStrategy sets the SchedulingStrategy field's value. +func (s *ListServicesInput) SetSchedulingStrategy(v string) *ListServicesInput { + s.SchedulingStrategy = &v + return s +} + +type ListServicesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListServices request. When the + // results of a ListServices request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of full ARN entries for each service associated with the specified + // cluster. + ServiceArns []*string `locationName:"serviceArns" type:"list"` +} + +// String returns the string representation +func (s ListServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServicesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListServicesOutput) SetNextToken(v string) *ListServicesOutput { + s.NextToken = &v + return s +} + +// SetServiceArns sets the ServiceArns field's value. +func (s *ListServicesOutput) SetServiceArns(v []*string) *ListServicesOutput { + s.ServiceArns = v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. Currently, the supported resources are Amazon ECS tasks, services, + // task definitions, clusters, and container instances. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags for the resource. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +type ListTaskDefinitionFamiliesInput struct { + _ struct{} `type:"structure"` + + // The familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. + // If you specify a familyPrefix, only task definition family names that begin + // with the familyPrefix string are returned. + FamilyPrefix *string `locationName:"familyPrefix" type:"string"` + + // The maximum number of task definition family results returned by ListTaskDefinitionFamilies + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitionFamilies request with the returned nextToken value. + // This value can be between 1 and 100. If this parameter is not used, then + // ListTaskDefinitionFamilies returns up to 100 results and a nextToken value + // if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTaskDefinitionFamilies + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The task definition family status with which to filter the ListTaskDefinitionFamilies + // results. By default, both ACTIVE and INACTIVE task definition families are + // listed. If this parameter is set to ACTIVE, only task definition families + // that have an ACTIVE task definition revision are returned. If this parameter + // is set to INACTIVE, only task definition families that do not have any ACTIVE + // task definition revisions are returned. If you paginate the resulting output, + // be sure to keep the status value constant in each subsequent request. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionFamilyStatus"` +} + +// String returns the string representation +func (s ListTaskDefinitionFamiliesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionFamiliesInput) GoString() string { + return s.String() +} + +// SetFamilyPrefix sets the FamilyPrefix field's value. +func (s *ListTaskDefinitionFamiliesInput) SetFamilyPrefix(v string) *ListTaskDefinitionFamiliesInput { + s.FamilyPrefix = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTaskDefinitionFamiliesInput) SetMaxResults(v int64) *ListTaskDefinitionFamiliesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTaskDefinitionFamiliesInput) SetNextToken(v string) *ListTaskDefinitionFamiliesInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListTaskDefinitionFamiliesInput) SetStatus(v string) *ListTaskDefinitionFamiliesInput { + s.Status = &v + return s +} + +type ListTaskDefinitionFamiliesOutput struct { + _ struct{} `type:"structure"` + + // The list of task definition family names that match the ListTaskDefinitionFamilies + // request. + Families []*string `locationName:"families" type:"list"` + + // The nextToken value to include in a future ListTaskDefinitionFamilies request. + // When the results of a ListTaskDefinitionFamilies request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListTaskDefinitionFamiliesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionFamiliesOutput) GoString() string { + return s.String() +} + +// SetFamilies sets the Families field's value. +func (s *ListTaskDefinitionFamiliesOutput) SetFamilies(v []*string) *ListTaskDefinitionFamiliesOutput { + s.Families = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTaskDefinitionFamiliesOutput) SetNextToken(v string) *ListTaskDefinitionFamiliesOutput { + s.NextToken = &v + return s +} + +type ListTaskDefinitionsInput struct { + _ struct{} `type:"structure"` + + // The full family name with which to filter the ListTaskDefinitions results. + // Specifying a familyPrefix limits the listed task definitions to task definition + // revisions that belong to that family. + FamilyPrefix *string `locationName:"familyPrefix" type:"string"` + + // The maximum number of task definition results returned by ListTaskDefinitions + // in paginated output. When this parameter is used, ListTaskDefinitions only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another ListTaskDefinitions request with the returned nextToken value. This + // value can be between 1 and 100. If this parameter is not used, then ListTaskDefinitions + // returns up to 100 results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTaskDefinitions + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The order in which to sort the results. Valid values are ASC and DESC. By + // default (ASC), task definitions are listed lexicographically by family name + // and in ascending numerical order by revision so that the newest task definitions + // in a family are listed last. Setting this parameter to DESC reverses the + // sort order on family name and revision so that the newest task definitions + // in a family are listed first. + Sort *string `locationName:"sort" type:"string" enum:"SortOrder"` + + // The task definition status with which to filter the ListTaskDefinitions results. + // By default, only ACTIVE task definitions are listed. By setting this parameter + // to INACTIVE, you can view task definitions that are INACTIVE as long as an + // active task or service still references them. If you paginate the resulting + // output, be sure to keep the status value constant in each subsequent request. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` +} + +// String returns the string representation +func (s ListTaskDefinitionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionsInput) GoString() string { + return s.String() +} + +// SetFamilyPrefix sets the FamilyPrefix field's value. +func (s *ListTaskDefinitionsInput) SetFamilyPrefix(v string) *ListTaskDefinitionsInput { + s.FamilyPrefix = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTaskDefinitionsInput) SetMaxResults(v int64) *ListTaskDefinitionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTaskDefinitionsInput) SetNextToken(v string) *ListTaskDefinitionsInput { + s.NextToken = &v + return s +} + +// SetSort sets the Sort field's value. +func (s *ListTaskDefinitionsInput) SetSort(v string) *ListTaskDefinitionsInput { + s.Sort = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListTaskDefinitionsInput) SetStatus(v string) *ListTaskDefinitionsInput { + s.Status = &v + return s +} + +type ListTaskDefinitionsOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListTaskDefinitions request. When + // the results of a ListTaskDefinitions request exceed maxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefinitions + // request. + TaskDefinitionArns []*string `locationName:"taskDefinitionArns" type:"list"` +} + +// String returns the string representation +func (s ListTaskDefinitionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTaskDefinitionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTaskDefinitionsOutput) SetNextToken(v string) *ListTaskDefinitionsOutput { + s.NextToken = &v + return s +} + +// SetTaskDefinitionArns sets the TaskDefinitionArns field's value. +func (s *ListTaskDefinitionsOutput) SetTaskDefinitionArns(v []*string) *ListTaskDefinitionsOutput { + s.TaskDefinitionArns = v + return s +} + +type ListTasksInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the tasks to list. If you do not specify a cluster, the default cluster is + // assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full ARN of the container instance with which + // to filter the ListTasks results. Specifying a containerInstance limits the + // results to tasks that belong to that container instance. + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + // The task desired status with which to filter the ListTasks results. Specifying + // a desiredStatus of STOPPED limits the results to tasks that Amazon ECS has + // set the desired status to STOPPED, which can be useful for debugging tasks + // that are not starting properly or have died or finished. The default status + // filter is RUNNING, which shows tasks that Amazon ECS has set the desired + // status to RUNNING. + // + // Although you can filter results based on a desired status of PENDING, this + // does not return any results because Amazon ECS never sets the desired status + // of a task to that value (only a task's lastStatus may have a value of PENDING). + DesiredStatus *string `locationName:"desiredStatus" type:"string" enum:"DesiredStatus"` + + // The name of the family with which to filter the ListTasks results. Specifying + // a family limits the results to tasks that belong to that family. + Family *string `locationName:"family" type:"string"` + + // The launch type for services to list. + LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` + + // The maximum number of task results returned by ListTasks in paginated output. + // When this parameter is used, ListTasks only returns maxResults results in + // a single page along with a nextToken response element. The remaining results + // of the initial request can be seen by sending another ListTasks request with + // the returned nextToken value. This value can be between 1 and 100. If this + // parameter is not used, then ListTasks returns up to 100 results and a nextToken + // value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListTasks request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. + // + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the service with which to filter the ListTasks results. Specifying + // a serviceName limits the results to tasks that belong to that service. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The startedBy value with which to filter the task results. Specifying a startedBy + // value limits the results to tasks that were started with that value. + StartedBy *string `locationName:"startedBy" type:"string"` +} + +// String returns the string representation +func (s ListTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTasksInput) GoString() string { + return s.String() +} + +// SetCluster sets the Cluster field's value. +func (s *ListTasksInput) SetCluster(v string) *ListTasksInput { + s.Cluster = &v + return s +} + +// SetContainerInstance sets the ContainerInstance field's value. +func (s *ListTasksInput) SetContainerInstance(v string) *ListTasksInput { + s.ContainerInstance = &v + return s +} + +// SetDesiredStatus sets the DesiredStatus field's value. +func (s *ListTasksInput) SetDesiredStatus(v string) *ListTasksInput { + s.DesiredStatus = &v + return s +} + +// SetFamily sets the Family field's value. +func (s *ListTasksInput) SetFamily(v string) *ListTasksInput { + s.Family = &v + return s +} + +// SetLaunchType sets the LaunchType field's value. +func (s *ListTasksInput) SetLaunchType(v string) *ListTasksInput { + s.LaunchType = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTasksInput) SetMaxResults(v int64) *ListTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTasksInput) SetNextToken(v string) *ListTasksInput { + s.NextToken = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ListTasksInput) SetServiceName(v string) *ListTasksInput { + s.ServiceName = &v + return s +} + +// SetStartedBy sets the StartedBy field's value. +func (s *ListTasksInput) SetStartedBy(v string) *ListTasksInput { + s.StartedBy = &v + return s +} + +type ListTasksOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListTasks request. When the results + // of a ListTasks request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of task ARN entries for the ListTasks request. + TaskArns []*string `locationName:"taskArns" type:"list"` +} + +// String returns the string representation +func (s ListTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTasksOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTasksOutput) SetNextToken(v string) *ListTasksOutput { + s.NextToken = &v + return s +} + +// SetTaskArns sets the TaskArns field's value. +func (s *ListTasksOutput) SetTaskArns(v []*string) *ListTasksOutput { + s.TaskArns = v + return s +} + +// Details on a load balancer that is used with a service. +// +// Services with tasks that use the awsvpc network mode (for example, those +// with the Fargate launch type) only support Application Load Balancers and +// Network Load Balancers; Classic Load Balancers are not supported. Also, when +// you create any target groups for these services, you must choose ip as the +// target type, not instance, because tasks that use the awsvpc network mode +// are associated with an elastic network interface, not an Amazon EC2 instance. +type LoadBalancer struct { + _ struct{} `type:"structure"` + + // The name of the container (as it appears in a container definition) to associate + // with the load balancer. + ContainerName *string `locationName:"containerName" type:"string"` + + // The port on the container to associate with the load balancer. This port + // must correspond to a containerPort in the service's task definition. Your + // container instances must allow ingress traffic on the hostPort of the port + // mapping. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The name of a load balancer. + LoadBalancerName *string `locationName:"loadBalancerName" type:"string"` + + // The full Amazon Resource Name (ARN) of the Elastic Load Balancing target + // group associated with a service. + // + // If your service's task definition uses the awsvpc network mode (which is + // required for the Fargate launch type), you must choose ip as the target type, + // not instance, because tasks that use the awsvpc network mode are associated + // with an elastic network interface, not an Amazon EC2 instance. + TargetGroupArn *string `locationName:"targetGroupArn" type:"string"` +} + +// String returns the string representation +func (s LoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancer) GoString() string { + return s.String() +} + +// SetContainerName sets the ContainerName field's value. +func (s *LoadBalancer) SetContainerName(v string) *LoadBalancer { + s.ContainerName = &v + return s +} + +// SetContainerPort sets the ContainerPort field's value. +func (s *LoadBalancer) SetContainerPort(v int64) *LoadBalancer { + s.ContainerPort = &v + return s +} + +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *LoadBalancer) SetLoadBalancerName(v string) *LoadBalancer { + s.LoadBalancerName = &v + return s +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *LoadBalancer) SetTargetGroupArn(v string) *LoadBalancer { + s.TargetGroupArn = &v + return s +} + +// Log configuration options to send to a custom log driver for the container. +type LogConfiguration struct { + _ struct{} `type:"structure"` + + // The log driver to use for the container. The valid values listed for this + // parameter are log drivers that the Amazon ECS container agent can communicate + // with by default. If using the Fargate launch type, the only supported value + // is awslogs. For more information about using the awslogs driver, see Using + // the awslogs Log Driver (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // If you have a custom driver that is not listed above that you would like + // to work with the Amazon ECS container agent, you can fork the Amazon ECS + // container agent project that is available on GitHub (https://github.com/aws/amazon-ecs-agent) + // and customize it to work with that driver. We encourage you to submit pull + // requests for changes that you would like to have included. However, Amazon + // Web Services does not currently support running modified copies of this software. + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log in to your container instance and run the following + // command: sudo docker version | grep "Server API version" + // + // LogDriver is a required field + LogDriver *string `locationName:"logDriver" type:"string" required:"true" enum:"LogDriver"` + + // The configuration options to send to the log driver. This parameter requires + // version 1.19 of the Docker Remote API or greater on your container instance. + // To check the Docker Remote API version on your container instance, log in + // to your container instance and run the following command: sudo docker version + // | grep "Server API version" + Options map[string]*string `locationName:"options" type:"map"` + + SecretOptions []*Secret `locationName:"secretOptions" type:"list"` +} + +// String returns the string representation +func (s LogConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogConfiguration"} + if s.LogDriver == nil { + invalidParams.Add(request.NewErrParamRequired("LogDriver")) + } + if s.SecretOptions != nil { + for i, v := range s.SecretOptions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecretOptions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogDriver sets the LogDriver field's value. +func (s *LogConfiguration) SetLogDriver(v string) *LogConfiguration { + s.LogDriver = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *LogConfiguration) SetOptions(v map[string]*string) *LogConfiguration { + s.Options = v + return s +} + +// SetSecretOptions sets the SecretOptions field's value. +func (s *LogConfiguration) SetSecretOptions(v []*Secret) *LogConfiguration { + s.SecretOptions = v + return s +} + +type ManagedAgent struct { + _ struct{} `type:"structure"` + + LastStartedAt *time.Time `locationName:"lastStartedAt" type:"timestamp"` + + LastStatus *string `locationName:"lastStatus" type:"string"` + + Name *string `locationName:"name" type:"string" enum:"ManagedAgentName"` + + Reason *string `locationName:"reason" type:"string"` +} + +// String returns the string representation +func (s ManagedAgent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedAgent) GoString() string { + return s.String() +} + +// SetLastStartedAt sets the LastStartedAt field's value. +func (s *ManagedAgent) SetLastStartedAt(v time.Time) *ManagedAgent { + s.LastStartedAt = &v + return s +} + +// SetLastStatus sets the LastStatus field's value. +func (s *ManagedAgent) SetLastStatus(v string) *ManagedAgent { + s.LastStatus = &v + return s +} + +// SetName sets the Name field's value. +func (s *ManagedAgent) SetName(v string) *ManagedAgent { + s.Name = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *ManagedAgent) SetReason(v string) *ManagedAgent { + s.Reason = &v + return s +} + +type ManagedAgentStateChange struct { + _ struct{} `type:"structure"` + + ContainerName *string `locationName:"containerName" type:"string"` + + ManagedAgentName *string `locationName:"managedAgentName" type:"string" enum:"ManagedAgentName"` + + Reason *string `locationName:"reason" type:"string"` + + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true"` +} + +// String returns the string representation +func (s ManagedAgentStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedAgentStateChange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ManagedAgentStateChange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ManagedAgentStateChange"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerName sets the ContainerName field's value. +func (s *ManagedAgentStateChange) SetContainerName(v string) *ManagedAgentStateChange { + s.ContainerName = &v + return s +} + +// SetManagedAgentName sets the ManagedAgentName field's value. +func (s *ManagedAgentStateChange) SetManagedAgentName(v string) *ManagedAgentStateChange { + s.ManagedAgentName = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *ManagedAgentStateChange) SetReason(v string) *ManagedAgentStateChange { + s.Reason = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ManagedAgentStateChange) SetStatus(v string) *ManagedAgentStateChange { + s.Status = &v + return s +} + +// Amazon ECS is unable to determine the current version of the Amazon ECS container +// agent on the container instance and does not have enough information to proceed +// with an update. This could be because the agent running on the container +// instance is an older or custom version that does not use our version information. +type MissingVersionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s MissingVersionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MissingVersionException) GoString() string { + return s.String() +} + +func newErrorMissingVersionException(v protocol.ResponseMetadata) error { + return &MissingVersionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *MissingVersionException) Code() string { + return "MissingVersionException" +} + +// Message returns the exception's message. +func (s *MissingVersionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MissingVersionException) OrigErr() error { + return nil +} + +func (s *MissingVersionException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *MissingVersionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *MissingVersionException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Details on a volume mount point that is used in a container definition. +type MountPoint struct { + _ struct{} `type:"structure"` + + // The path on the container to mount the host volume at. + ContainerPath *string `locationName:"containerPath" type:"string"` + + // If this value is true, the container has read-only access to the volume. + // If this value is false, then the container can write to the volume. The default + // value is false. + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + // The name of the volume to mount. + SourceVolume *string `locationName:"sourceVolume" type:"string"` +} + +// String returns the string representation +func (s MountPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MountPoint) GoString() string { + return s.String() +} + +// SetContainerPath sets the ContainerPath field's value. +func (s *MountPoint) SetContainerPath(v string) *MountPoint { + s.ContainerPath = &v + return s +} + +// SetReadOnly sets the ReadOnly field's value. +func (s *MountPoint) SetReadOnly(v bool) *MountPoint { + s.ReadOnly = &v + return s +} + +// SetSourceVolume sets the SourceVolume field's value. +func (s *MountPoint) SetSourceVolume(v string) *MountPoint { + s.SourceVolume = &v + return s +} + +// Details on the network bindings between a container and its host container +// instance. After a task reaches the RUNNING status, manual and automatic host +// and container port assignments are visible in the networkBindings section +// of DescribeTasks API responses. +type NetworkBinding struct { + _ struct{} `type:"structure"` + + // The IP address that the container is bound to on the container instance. + BindIP *string `locationName:"bindIP" type:"string"` + + // The port number on the container that is used with the network binding. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port number on the host that is used with the network binding. + HostPort *int64 `locationName:"hostPort" type:"integer"` + + // The protocol used for the network binding. + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s NetworkBinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkBinding) GoString() string { + return s.String() +} + +// SetBindIP sets the BindIP field's value. +func (s *NetworkBinding) SetBindIP(v string) *NetworkBinding { + s.BindIP = &v + return s +} + +// SetContainerPort sets the ContainerPort field's value. +func (s *NetworkBinding) SetContainerPort(v int64) *NetworkBinding { + s.ContainerPort = &v + return s +} + +// SetHostPort sets the HostPort field's value. +func (s *NetworkBinding) SetHostPort(v int64) *NetworkBinding { + s.HostPort = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *NetworkBinding) SetProtocol(v string) *NetworkBinding { + s.Protocol = &v + return s +} + +// An object representing the network configuration for a task or service. +type NetworkConfiguration struct { + _ struct{} `type:"structure"` + + // The VPC subnets and security groups associated with a task. + // + // All specified subnets and security groups must be from the same VPC. + AwsvpcConfiguration *AwsVpcConfiguration `locationName:"awsvpcConfiguration" type:"structure"` +} + +// String returns the string representation +func (s NetworkConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NetworkConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NetworkConfiguration"} + if s.AwsvpcConfiguration != nil { + if err := s.AwsvpcConfiguration.Validate(); err != nil { + invalidParams.AddNested("AwsvpcConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsvpcConfiguration sets the AwsvpcConfiguration field's value. +func (s *NetworkConfiguration) SetAwsvpcConfiguration(v *AwsVpcConfiguration) *NetworkConfiguration { + s.AwsvpcConfiguration = v + return s +} + +// An object representing the elastic network interface for tasks that use the +// awsvpc network mode. +type NetworkInterface struct { + _ struct{} `type:"structure"` + + // The attachment ID for the network interface. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // The private IPv6 address for the network interface. + Ipv6Address *string `locationName:"ipv6Address" type:"string"` + + // The private IPv4 address for the network interface. + PrivateIpv4Address *string `locationName:"privateIpv4Address" type:"string"` +} + +// String returns the string representation +func (s NetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterface) GoString() string { + return s.String() +} + +// SetAttachmentId sets the AttachmentId field's value. +func (s *NetworkInterface) SetAttachmentId(v string) *NetworkInterface { + s.AttachmentId = &v + return s +} + +// SetIpv6Address sets the Ipv6Address field's value. +func (s *NetworkInterface) SetIpv6Address(v string) *NetworkInterface { + s.Ipv6Address = &v + return s +} + +// SetPrivateIpv4Address sets the PrivateIpv4Address field's value. +func (s *NetworkInterface) SetPrivateIpv4Address(v string) *NetworkInterface { + s.PrivateIpv4Address = &v + return s +} + +// There is no update available for this Amazon ECS container agent. This could +// be because the agent is already running the latest version, or it is so old +// that there is no update path to the current version. +type NoUpdateAvailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s NoUpdateAvailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoUpdateAvailableException) GoString() string { + return s.String() +} + +func newErrorNoUpdateAvailableException(v protocol.ResponseMetadata) error { + return &NoUpdateAvailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *NoUpdateAvailableException) Code() string { + return "NoUpdateAvailableException" +} + +// Message returns the exception's message. +func (s *NoUpdateAvailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *NoUpdateAvailableException) OrigErr() error { + return nil +} + +func (s *NoUpdateAvailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *NoUpdateAvailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *NoUpdateAvailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object representing a constraint on task placement. For more information, +// see Task Placement Constraints (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) +// in the Amazon Elastic Container Service Developer Guide. +type PlacementConstraint struct { + _ struct{} `type:"structure"` + + // A cluster query language expression to apply to the constraint. You cannot + // specify an expression if the constraint type is distinctInstance. For more + // information, see Cluster Query Language (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) + // in the Amazon Elastic Container Service Developer Guide. + Expression *string `locationName:"expression" type:"string"` + + // The type of constraint. Use distinctInstance to ensure that each task in + // a particular group is running on a different container instance. Use memberOf + // to restrict the selection to a group of valid candidates. The value distinctInstance + // is not supported in task definitions. + Type *string `locationName:"type" type:"string" enum:"PlacementConstraintType"` +} + +// String returns the string representation +func (s PlacementConstraint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementConstraint) GoString() string { + return s.String() +} + +// SetExpression sets the Expression field's value. +func (s *PlacementConstraint) SetExpression(v string) *PlacementConstraint { + s.Expression = &v + return s +} + +// SetType sets the Type field's value. +func (s *PlacementConstraint) SetType(v string) *PlacementConstraint { + s.Type = &v + return s +} + +// The task placement strategy for a task or service. For more information, +// see Task Placement Strategies (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html) +// in the Amazon Elastic Container Service Developer Guide. +type PlacementStrategy struct { + _ struct{} `type:"structure"` + + // The field to apply the placement strategy against. For the spread placement + // strategy, valid values are instanceId (or host, which has the same effect), + // or any platform or custom attribute that is applied to a container instance, + // such as attribute:ecs.availability-zone. For the binpack placement strategy, + // valid values are cpu and memory. For the random placement strategy, this + // field is not used. + Field *string `locationName:"field" type:"string"` + + // The type of placement strategy. The random placement strategy randomly places + // tasks on available candidates. The spread placement strategy spreads placement + // across available candidates evenly based on the field parameter. The binpack + // strategy places tasks on available candidates that have the least available + // amount of the resource that is specified with the field parameter. For example, + // if you binpack on memory, a task is placed on the instance with the least + // amount of remaining memory (but still enough to run the task). + Type *string `locationName:"type" type:"string" enum:"PlacementStrategyType"` +} + +// String returns the string representation +func (s PlacementStrategy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementStrategy) GoString() string { + return s.String() +} + +// SetField sets the Field field's value. +func (s *PlacementStrategy) SetField(v string) *PlacementStrategy { + s.Field = &v + return s +} + +// SetType sets the Type field's value. +func (s *PlacementStrategy) SetType(v string) *PlacementStrategy { + s.Type = &v + return s +} + +type PlatformDevice struct { + _ struct{} `type:"structure"` + + Id *string `locationName:"id" type:"string"` + + Type *string `locationName:"type" type:"string" enum:"PlatformDeviceType"` +} + +// String returns the string representation +func (s PlatformDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlatformDevice) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *PlatformDevice) SetId(v string) *PlatformDevice { + s.Id = &v + return s +} + +// SetType sets the Type field's value. +func (s *PlatformDevice) SetType(v string) *PlatformDevice { + s.Type = &v + return s +} + +// The specified platform version does not satisfy the task definition's required +// capabilities. +type PlatformTaskDefinitionIncompatibilityException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s PlatformTaskDefinitionIncompatibilityException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlatformTaskDefinitionIncompatibilityException) GoString() string { + return s.String() +} + +func newErrorPlatformTaskDefinitionIncompatibilityException(v protocol.ResponseMetadata) error { + return &PlatformTaskDefinitionIncompatibilityException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PlatformTaskDefinitionIncompatibilityException) Code() string { + return "PlatformTaskDefinitionIncompatibilityException" +} + +// Message returns the exception's message. +func (s *PlatformTaskDefinitionIncompatibilityException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PlatformTaskDefinitionIncompatibilityException) OrigErr() error { + return nil +} + +func (s *PlatformTaskDefinitionIncompatibilityException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PlatformTaskDefinitionIncompatibilityException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PlatformTaskDefinitionIncompatibilityException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified platform version does not exist. +type PlatformUnknownException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s PlatformUnknownException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlatformUnknownException) GoString() string { + return s.String() +} + +func newErrorPlatformUnknownException(v protocol.ResponseMetadata) error { + return &PlatformUnknownException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PlatformUnknownException) Code() string { + return "PlatformUnknownException" +} + +// Message returns the exception's message. +func (s *PlatformUnknownException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PlatformUnknownException) OrigErr() error { + return nil +} + +func (s *PlatformUnknownException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PlatformUnknownException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PlatformUnknownException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Port mappings allow containers to access ports on the host container instance +// to send or receive traffic. Port mappings are specified as part of the container +// definition. +// +// If using containers in a task with the awsvpc or host network mode, exposed +// ports should be specified using containerPort. The hostPort can be left blank +// or it must be the same value as the containerPort. +// +// After a task reaches the RUNNING status, manual and automatic host and container +// port assignments are visible in the networkBindings section of DescribeTasks +// API responses. +type PortMapping struct { + _ struct{} `type:"structure"` + + // The port number on the container that is bound to the user-specified or automatically + // assigned host port. + // + // If using containers in a task with the awsvpc or host network mode, exposed + // ports should be specified using containerPort. + // + // If using containers in a task with the bridge network mode and you specify + // a container port and not a host port, your container automatically receives + // a host port in the ephemeral port range (for more information, see hostPort). + // Port mappings that are automatically assigned in this way do not count toward + // the 100 reserved ports limit of a container instance. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port number on the container instance to reserve for your container. + // + // If using containers in a task with the awsvpc or host network mode, the hostPort + // can either be left blank or set to the same value as the containerPort. + // + // If using containers in a task with the bridge network mode, you can specify + // a non-reserved host port for your container port mapping, or you can omit + // the hostPort (or set it to 0) while specifying a containerPort and your container + // automatically receives a port in the ephemeral port range for your container + // instance operating system and Docker version. + // + // The default ephemeral port range for Docker version 1.6.0 and later is listed + // on the instance under /proc/sys/net/ipv4/ip_local_port_range; if this kernel + // parameter is unavailable, the default ephemeral port range from 49153 through + // 65535 is used. You should not attempt to specify a host port in the ephemeral + // port range as these are reserved for automatic assignment. In general, ports + // below 32768 are outside of the ephemeral port range. + // + // The default ephemeral port range from 49153 through 65535 is always used + // for Docker versions before 1.6.0. + // + // The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, + // and the Amazon ECS container agent ports 51678 and 51679. Any host port that + // was previously specified in a running task is also reserved while the task + // is running (after a task stops, the host port is released). The current reserved + // ports are displayed in the remainingResources of DescribeContainerInstances + // output, and a container instance may have up to 100 reserved ports at a time, + // including the default reserved ports (automatically assigned ports do not + // count toward the 100 reserved ports limit). + HostPort *int64 `locationName:"hostPort" type:"integer"` + + // The protocol used for the port mapping. Valid values are tcp and udp. The + // default is tcp. + Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` +} + +// String returns the string representation +func (s PortMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortMapping) GoString() string { + return s.String() +} + +// SetContainerPort sets the ContainerPort field's value. +func (s *PortMapping) SetContainerPort(v int64) *PortMapping { + s.ContainerPort = &v + return s +} + +// SetHostPort sets the HostPort field's value. +func (s *PortMapping) SetHostPort(v int64) *PortMapping { + s.HostPort = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *PortMapping) SetProtocol(v string) *PortMapping { + s.Protocol = &v + return s +} + +type ProxyConfiguration struct { + _ struct{} `type:"structure"` + + // ContainerName is a required field + ContainerName *string `locationName:"containerName" type:"string" required:"true"` + + Properties []*KeyValuePair `locationName:"properties" type:"list"` + + Type *string `locationName:"type" type:"string" enum:"ProxyConfigurationType"` +} + +// String returns the string representation +func (s ProxyConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProxyConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProxyConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProxyConfiguration"} + if s.ContainerName == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerName sets the ContainerName field's value. +func (s *ProxyConfiguration) SetContainerName(v string) *ProxyConfiguration { + s.ContainerName = &v + return s +} + +// SetProperties sets the Properties field's value. +func (s *ProxyConfiguration) SetProperties(v []*KeyValuePair) *ProxyConfiguration { + s.Properties = v + return s +} + +// SetType sets the Type field's value. +func (s *ProxyConfiguration) SetType(v string) *ProxyConfiguration { + s.Type = &v + return s +} + +type PutAccountSettingInput struct { + _ struct{} `type:"structure"` + + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true" enum:"SettingName"` + + PrincipalArn *string `locationName:"principalArn" type:"string"` + + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutAccountSettingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAccountSettingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutAccountSettingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAccountSettingInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *PutAccountSettingInput) SetName(v string) *PutAccountSettingInput { + s.Name = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *PutAccountSettingInput) SetPrincipalArn(v string) *PutAccountSettingInput { + s.PrincipalArn = &v + return s +} + +// SetValue sets the Value field's value. +func (s *PutAccountSettingInput) SetValue(v string) *PutAccountSettingInput { + s.Value = &v + return s +} + +type PutAccountSettingOutput struct { + _ struct{} `type:"structure"` + + Setting *Setting `locationName:"setting" type:"structure"` +} + +// String returns the string representation +func (s PutAccountSettingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAccountSettingOutput) GoString() string { + return s.String() +} + +// SetSetting sets the Setting field's value. +func (s *PutAccountSettingOutput) SetSetting(v *Setting) *PutAccountSettingOutput { + s.Setting = v + return s +} + +type PutAttributesInput struct { + _ struct{} `type:"structure"` + + // The attributes to apply to your resource. You can specify up to 10 custom + // attributes per resource. You can specify up to 10 attributes in a single + // call. + // + // Attributes is a required field + Attributes []*Attribute `locationName:"attributes" type:"list" required:"true"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that contains + // the resource to apply attributes. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` +} + +// String returns the string representation +func (s PutAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *PutAttributesInput) SetAttributes(v []*Attribute) *PutAttributesInput { + s.Attributes = v + return s +} + +// SetCluster sets the Cluster field's value. +func (s *PutAttributesInput) SetCluster(v string) *PutAttributesInput { + s.Cluster = &v + return s +} + +type PutAttributesOutput struct { + _ struct{} `type:"structure"` + + // The attributes applied to your resource. + Attributes []*Attribute `locationName:"attributes" type:"list"` +} + +// String returns the string representation +func (s PutAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAttributesOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *PutAttributesOutput) SetAttributes(v []*Attribute) *PutAttributesOutput { + s.Attributes = v + return s +} + +type RegisterContainerInstanceInput struct { + _ struct{} `type:"structure"` + + // The container instance attributes that this container instance supports. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + ClientToken *string `locationName:"clientToken" type:"string"` + + // The short name or full Amazon Resource Name (ARN) of the cluster with which + // to register your container instance. If you do not specify a cluster, the + // default cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The ARN of the container instance (if it was previously registered). + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The instance identity document for the EC2 instance to register. This document + // can be found by running the following command from the instance: curl http://169.254.169.254/latest/dynamic/instance-identity/document/ + InstanceIdentityDocument *string `locationName:"instanceIdentityDocument" type:"string"` + + // The instance identity document signature for the EC2 instance to register. + // This signature can be found by running the following command from the instance: + // curl http://169.254.169.254/latest/dynamic/instance-identity/signature/ + InstanceIdentityDocumentSignature *string `locationName:"instanceIdentityDocumentSignature" type:"string"` + + PlatformDevices []*PlatformDevice `locationName:"platformDevices" type:"list"` + + Tags []*Tag `locationName:"tags" type:"list"` + + // The resources available on the instance. + TotalResources []*Resource `locationName:"totalResources" type:"list"` + + // The version information for the Amazon ECS container agent and Docker daemon + // running on the container instance. + VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` +} + +// String returns the string representation +func (s RegisterContainerInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterContainerInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterContainerInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterContainerInstanceInput"} + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *RegisterContainerInstanceInput) SetAttributes(v []*Attribute) *RegisterContainerInstanceInput { + s.Attributes = v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *RegisterContainerInstanceInput) SetClientToken(v string) *RegisterContainerInstanceInput { + s.ClientToken = &v + return s +} + +// SetCluster sets the Cluster field's value. +func (s *RegisterContainerInstanceInput) SetCluster(v string) *RegisterContainerInstanceInput { + s.Cluster = &v + return s +} + +// SetContainerInstanceArn sets the ContainerInstanceArn field's value. +func (s *RegisterContainerInstanceInput) SetContainerInstanceArn(v string) *RegisterContainerInstanceInput { + s.ContainerInstanceArn = &v + return s +} + +// SetInstanceIdentityDocument sets the InstanceIdentityDocument field's value. +func (s *RegisterContainerInstanceInput) SetInstanceIdentityDocument(v string) *RegisterContainerInstanceInput { + s.InstanceIdentityDocument = &v + return s +} + +// SetInstanceIdentityDocumentSignature sets the InstanceIdentityDocumentSignature field's value. +func (s *RegisterContainerInstanceInput) SetInstanceIdentityDocumentSignature(v string) *RegisterContainerInstanceInput { + s.InstanceIdentityDocumentSignature = &v + return s +} + +// SetPlatformDevices sets the PlatformDevices field's value. +func (s *RegisterContainerInstanceInput) SetPlatformDevices(v []*PlatformDevice) *RegisterContainerInstanceInput { + s.PlatformDevices = v + return s +} + +// SetTags sets the Tags field's value. +func (s *RegisterContainerInstanceInput) SetTags(v []*Tag) *RegisterContainerInstanceInput { + s.Tags = v + return s +} + +// SetTotalResources sets the TotalResources field's value. +func (s *RegisterContainerInstanceInput) SetTotalResources(v []*Resource) *RegisterContainerInstanceInput { + s.TotalResources = v + return s +} + +// SetVersionInfo sets the VersionInfo field's value. +func (s *RegisterContainerInstanceInput) SetVersionInfo(v *VersionInfo) *RegisterContainerInstanceInput { + s.VersionInfo = v + return s +} + +type RegisterContainerInstanceOutput struct { + _ struct{} `type:"structure"` + + // The container instance that was registered. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s RegisterContainerInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterContainerInstanceOutput) GoString() string { + return s.String() +} + +// SetContainerInstance sets the ContainerInstance field's value. +func (s *RegisterContainerInstanceOutput) SetContainerInstance(v *ContainerInstance) *RegisterContainerInstanceOutput { + s.ContainerInstance = v + return s +} + +type RegisterTaskDefinitionInput struct { + _ struct{} `type:"structure"` + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. + // + // ContainerDefinitions is a required field + ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list" required:"true"` + + // The number of CPU units used by the task. It can be expressed as an integer + // using CPU units, for example 1024, or as a string using vCPUs, for example + // 1 vCPU or 1 vcpu, in a task definition. String values are converted to an + // integer indicating the CPU units when the task definition is registered. + // + // Task-level CPU and memory parameters are ignored for Windows containers. + // We recommend specifying container-level resources for Windows containers. + // + // If using the EC2 launch type, this field is optional. Supported values are + // between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs). + // + // If using the Fargate launch type, this field is required and you must use + // one of the following values, which determines your range of supported values + // for the memory parameter: + // + // * 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), + // 2048 (2 GB) + // + // * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 + // (3 GB), 4096 (4 GB) + // + // * 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 + // (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + // + // * 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 + // (16 GB) in increments of 1024 (1 GB) + // + // * 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 + // (30 GB) in increments of 1024 (1 GB) + Cpu *string `locationName:"cpu" type:"string"` + + // The Amazon Resource Name (ARN) of the task execution role that the Amazon + // ECS container agent and the Docker daemon can assume. + ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` + + // You must specify a family for a task definition, which allows you to track + // multiple versions of the same task definition. The family is used as a name + // for your task definition. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. + // + // Family is a required field + Family *string `locationName:"family" type:"string" required:"true"` + + InferenceAccelerators []*InferenceAccelerator `locationName:"inferenceAccelerators" type:"list"` + + IpcMode *string `locationName:"ipcMode" type:"string" enum:"IpcMode"` + + // The amount of memory (in MiB) used by the task. It can be expressed as an + // integer using MiB, for example 1024, or as a string using GB, for example + // 1GB or 1 GB, in a task definition. String values are converted to an integer + // indicating the MiB when the task definition is registered. + // + // Task-level CPU and memory parameters are ignored for Windows containers. + // We recommend specifying container-level resources for Windows containers. + // + // If using the EC2 launch type, this field is optional. + // + // If using the Fargate launch type, this field is required and you must use + // one of the following values, which determines your range of supported values + // for the cpu parameter: + // + // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 + // vCPU) + // + // * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: + // 512 (.5 vCPU) + // + // * 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 + // (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) + // + // * Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + // Available cpu values: 2048 (2 vCPU) + // + // * Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - + // Available cpu values: 4096 (4 vCPU) + Memory *string `locationName:"memory" type:"string"` + + // The Docker networking mode to use for the containers in the task. The valid + // values are none, bridge, awsvpc, and host. The default Docker network mode + // is bridge. If using the Fargate launch type, the awsvpc network mode is required. + // If using the EC2 launch type, any network mode can be used. If the network + // mode is set to none, you can't specify port mappings in your container definitions, + // and the task's containers do not have external connectivity. The host and + // awsvpc network modes offer the highest networking performance for containers + // because they use the EC2 network stack instead of the virtualized network + // stack provided by the bridge mode. + // + // With the host and awsvpc network modes, exposed container ports are mapped + // directly to the corresponding host port (for the host network mode) or the + // attached elastic network interface port (for the awsvpc network mode), so + // you cannot take advantage of dynamic host port mappings. + // + // If the network mode is awsvpc, the task is allocated an Elastic Network Interface, + // and you must specify a NetworkConfiguration when you create a service or + // run a task with the task definition. For more information, see Task Networking + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // If the network mode is host, you can't run multiple instantiations of the + // same task on a single container instance when port mappings are used. + // + // Docker for Windows uses different network modes than Docker for Linux. When + // you register a task definition with Windows containers, you must not specify + // a network mode. + // + // For more information, see Network settings (https://docs.docker.com/engine/reference/run/#network-settings) + // in the Docker run reference. + NetworkMode *string `locationName:"networkMode" type:"string" enum:"NetworkMode"` + + PidMode *string `locationName:"pidMode" type:"string" enum:"PidMode"` + + // An array of placement constraint objects to use for the task. You can specify + // a maximum of 10 constraints per task (this limit includes constraints in + // the task definition and those specified at run time). + PlacementConstraints []*TaskDefinitionPlacementConstraint `locationName:"placementConstraints" type:"list"` + + ProxyConfiguration *ProxyConfiguration `locationName:"proxyConfiguration" type:"structure"` + + // The launch type required by the task. If no value is specified, it defaults + // to EC2. + RequiresCompatibilities []*string `locationName:"requiresCompatibilities" type:"list"` + + Tags []*Tag `locationName:"tags" type:"list"` + + // The short name or full Amazon Resource Name (ARN) of the IAM role that containers + // in this task can assume. All containers in this task are granted the permissions + // that are specified in this role. For more information, see IAM Roles for + // Tasks (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) + // in the Amazon Elastic Container Service Developer Guide. + TaskRoleArn *string `locationName:"taskRoleArn" type:"string"` + + // A list of volume definitions in JSON format that containers in your task + // may use. + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s RegisterTaskDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterTaskDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterTaskDefinitionInput"} + if s.ContainerDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerDefinitions")) + } + if s.Family == nil { + invalidParams.Add(request.NewErrParamRequired("Family")) + } + if s.ContainerDefinitions != nil { + for i, v := range s.ContainerDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContainerDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.InferenceAccelerators != nil { + for i, v := range s.InferenceAccelerators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InferenceAccelerators", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProxyConfiguration != nil { + if err := s.ProxyConfiguration.Validate(); err != nil { + invalidParams.AddNested("ProxyConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Volumes != nil { + for i, v := range s.Volumes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Volumes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerDefinitions sets the ContainerDefinitions field's value. +func (s *RegisterTaskDefinitionInput) SetContainerDefinitions(v []*ContainerDefinition) *RegisterTaskDefinitionInput { + s.ContainerDefinitions = v + return s +} + +// SetCpu sets the Cpu field's value. +func (s *RegisterTaskDefinitionInput) SetCpu(v string) *RegisterTaskDefinitionInput { + s.Cpu = &v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *RegisterTaskDefinitionInput) SetExecutionRoleArn(v string) *RegisterTaskDefinitionInput { + s.ExecutionRoleArn = &v + return s +} + +// SetFamily sets the Family field's value. +func (s *RegisterTaskDefinitionInput) SetFamily(v string) *RegisterTaskDefinitionInput { + s.Family = &v + return s +} + +// SetInferenceAccelerators sets the InferenceAccelerators field's value. +func (s *RegisterTaskDefinitionInput) SetInferenceAccelerators(v []*InferenceAccelerator) *RegisterTaskDefinitionInput { + s.InferenceAccelerators = v + return s +} + +// SetIpcMode sets the IpcMode field's value. +func (s *RegisterTaskDefinitionInput) SetIpcMode(v string) *RegisterTaskDefinitionInput { + s.IpcMode = &v + return s +} + +// SetMemory sets the Memory field's value. +func (s *RegisterTaskDefinitionInput) SetMemory(v string) *RegisterTaskDefinitionInput { + s.Memory = &v + return s +} + +// SetNetworkMode sets the NetworkMode field's value. +func (s *RegisterTaskDefinitionInput) SetNetworkMode(v string) *RegisterTaskDefinitionInput { + s.NetworkMode = &v + return s +} + +// SetPidMode sets the PidMode field's value. +func (s *RegisterTaskDefinitionInput) SetPidMode(v string) *RegisterTaskDefinitionInput { + s.PidMode = &v + return s +} + +// SetPlacementConstraints sets the PlacementConstraints field's value. +func (s *RegisterTaskDefinitionInput) SetPlacementConstraints(v []*TaskDefinitionPlacementConstraint) *RegisterTaskDefinitionInput { + s.PlacementConstraints = v + return s +} + +// SetProxyConfiguration sets the ProxyConfiguration field's value. +func (s *RegisterTaskDefinitionInput) SetProxyConfiguration(v *ProxyConfiguration) *RegisterTaskDefinitionInput { + s.ProxyConfiguration = v + return s +} + +// SetRequiresCompatibilities sets the RequiresCompatibilities field's value. +func (s *RegisterTaskDefinitionInput) SetRequiresCompatibilities(v []*string) *RegisterTaskDefinitionInput { + s.RequiresCompatibilities = v + return s +} + +// SetTags sets the Tags field's value. +func (s *RegisterTaskDefinitionInput) SetTags(v []*Tag) *RegisterTaskDefinitionInput { + s.Tags = v + return s +} + +// SetTaskRoleArn sets the TaskRoleArn field's value. +func (s *RegisterTaskDefinitionInput) SetTaskRoleArn(v string) *RegisterTaskDefinitionInput { + s.TaskRoleArn = &v + return s +} + +// SetVolumes sets the Volumes field's value. +func (s *RegisterTaskDefinitionInput) SetVolumes(v []*Volume) *RegisterTaskDefinitionInput { + s.Volumes = v + return s +} + +type RegisterTaskDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The full description of the registered task definition. + TaskDefinition *TaskDefinition `locationName:"taskDefinition" type:"structure"` +} + +// String returns the string representation +func (s RegisterTaskDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskDefinitionOutput) GoString() string { + return s.String() +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *RegisterTaskDefinitionOutput) SetTaskDefinition(v *TaskDefinition) *RegisterTaskDefinitionOutput { + s.TaskDefinition = v + return s +} + +// The repository credentials for private registry authentication. +type RepositoryCredentials struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) or name of the secret containing the private + // repository credentials. + // + // CredentialsParameter is a required field + CredentialsParameter *string `locationName:"credentialsParameter" type:"string" required:"true"` +} + +// String returns the string representation +func (s RepositoryCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RepositoryCredentials) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RepositoryCredentials) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RepositoryCredentials"} + if s.CredentialsParameter == nil { + invalidParams.Add(request.NewErrParamRequired("CredentialsParameter")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCredentialsParameter sets the CredentialsParameter field's value. +func (s *RepositoryCredentials) SetCredentialsParameter(v string) *RepositoryCredentials { + s.CredentialsParameter = &v + return s +} + +// Describes the resources available for a container instance. +type Resource struct { + _ struct{} `type:"structure"` + + // When the doubleValue type is set, the value of the resource must be a double + // precision floating-point type. + DoubleValue *float64 `locationName:"doubleValue" type:"double"` + + // When the integerValue type is set, the value of the resource must be an integer. + IntegerValue *int64 `locationName:"integerValue" type:"integer"` + + // When the longValue type is set, the value of the resource must be an extended + // precision floating-point type. + LongValue *int64 `locationName:"longValue" type:"long"` + + // The name of the resource, such as CPU, MEMORY, PORTS, PORTS_UDP, or a user-defined + // resource. + Name *string `locationName:"name" type:"string"` + + // When the stringSetValue type is set, the value of the resource must be a + // string type. + StringSetValue []*string `locationName:"stringSetValue" type:"list"` + + // The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s Resource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Resource) GoString() string { + return s.String() +} + +// SetDoubleValue sets the DoubleValue field's value. +func (s *Resource) SetDoubleValue(v float64) *Resource { + s.DoubleValue = &v + return s +} + +// SetIntegerValue sets the IntegerValue field's value. +func (s *Resource) SetIntegerValue(v int64) *Resource { + s.IntegerValue = &v + return s +} + +// SetLongValue sets the LongValue field's value. +func (s *Resource) SetLongValue(v int64) *Resource { + s.LongValue = &v + return s +} + +// SetName sets the Name field's value. +func (s *Resource) SetName(v string) *Resource { + s.Name = &v + return s +} + +// SetStringSetValue sets the StringSetValue field's value. +func (s *Resource) SetStringSetValue(v []*string) *Resource { + s.StringSetValue = v + return s +} + +// SetType sets the Type field's value. +func (s *Resource) SetType(v string) *Resource { + s.Type = &v + return s +} + +type ResourceRequirement struct { + _ struct{} `type:"structure"` + + Type *string `locationName:"type" type:"string" enum:"ResourceType"` + + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ResourceRequirement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRequirement) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *ResourceRequirement) SetType(v string) *ResourceRequirement { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ResourceRequirement) SetValue(v string) *ResourceRequirement { + s.Value = &v + return s +} + +type RunTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to run your task. If you do not specify a cluster, the default cluster is + // assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The number of instantiations of the specified task to place on your cluster. + // You can specify up to 10 tasks per call. + Count *int64 `locationName:"count" type:"integer"` + + // The name of the task group to associate with the task. The default value + // is the family name of the task definition (for example, family:my-family-name). + Group *string `locationName:"group" type:"string"` + + // The launch type on which to run your task. + LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` + + // The network configuration for the task. This parameter is required for task + // definitions that use the awsvpc network mode to receive their own Elastic + // Network Interface, and it is not supported for other network modes. For more + // information, see Task Networking (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // in the Amazon Elastic Container Service Developer Guide. + NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` + + // A list of container overrides in JSON format that specify the name of a container + // in the specified task definition and the overrides it should receive. You + // can override the default command for a container (that is specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition + // or Docker image) on a container or add new environment variables to it with + // an environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes + // the JSON formatting characters of the override structure. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // An array of placement constraint objects to use for the task. You can specify + // up to 10 constraints per task (including constraints in the task definition + // and those specified at run time). + PlacementConstraints []*PlacementConstraint `locationName:"placementConstraints" type:"list"` + + // The placement strategy objects to use for the task. You can specify a maximum + // of five strategy rules per task. + PlacementStrategy []*PlacementStrategy `locationName:"placementStrategy" type:"list"` + + // The platform version on which to run your task. If one is not specified, + // the latest version is used by default. + PlatformVersion *string `locationName:"platformVersion" type:"string"` + + // An optional tag specified when a task is started. For example if you automatically + // trigger a task to run a batch process job, you could apply a unique identifier + // for that job to your task with the startedBy parameter. You can then identify + // which tasks belong to that job by filtering the results of a ListTasks call + // with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The family and revision (family:revision) or full ARN of the task definition + // to run. If a revision is not specified, the latest ACTIVE revision is used. + // + // TaskDefinition is a required field + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s RunTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunTaskInput"} + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + if s.NetworkConfiguration != nil { + if err := s.NetworkConfiguration.Validate(); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *RunTaskInput) SetCluster(v string) *RunTaskInput { + s.Cluster = &v + return s +} + +// SetCount sets the Count field's value. +func (s *RunTaskInput) SetCount(v int64) *RunTaskInput { + s.Count = &v + return s +} + +// SetGroup sets the Group field's value. +func (s *RunTaskInput) SetGroup(v string) *RunTaskInput { + s.Group = &v + return s +} + +// SetLaunchType sets the LaunchType field's value. +func (s *RunTaskInput) SetLaunchType(v string) *RunTaskInput { + s.LaunchType = &v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *RunTaskInput) SetNetworkConfiguration(v *NetworkConfiguration) *RunTaskInput { + s.NetworkConfiguration = v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *RunTaskInput) SetOverrides(v *TaskOverride) *RunTaskInput { + s.Overrides = v + return s +} + +// SetPlacementConstraints sets the PlacementConstraints field's value. +func (s *RunTaskInput) SetPlacementConstraints(v []*PlacementConstraint) *RunTaskInput { + s.PlacementConstraints = v + return s +} + +// SetPlacementStrategy sets the PlacementStrategy field's value. +func (s *RunTaskInput) SetPlacementStrategy(v []*PlacementStrategy) *RunTaskInput { + s.PlacementStrategy = v + return s +} + +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *RunTaskInput) SetPlatformVersion(v string) *RunTaskInput { + s.PlatformVersion = &v + return s +} + +// SetStartedBy sets the StartedBy field's value. +func (s *RunTaskInput) SetStartedBy(v string) *RunTaskInput { + s.StartedBy = &v + return s +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *RunTaskInput) SetTaskDefinition(v string) *RunTaskInput { + s.TaskDefinition = &v + return s +} + +type RunTaskOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // A full description of the tasks that were run. The tasks that were successfully + // placed on your cluster are described here. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s RunTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunTaskOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *RunTaskOutput) SetFailures(v []*Failure) *RunTaskOutput { + s.Failures = v + return s +} + +// SetTasks sets the Tasks field's value. +func (s *RunTaskOutput) SetTasks(v []*Task) *RunTaskOutput { + s.Tasks = v + return s +} + +type Secret struct { + _ struct{} `type:"structure"` + + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // ValueFrom is a required field + ValueFrom *string `locationName:"valueFrom" type:"string" required:"true"` +} + +// String returns the string representation +func (s Secret) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Secret) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Secret) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Secret"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ValueFrom == nil { + invalidParams.Add(request.NewErrParamRequired("ValueFrom")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *Secret) SetName(v string) *Secret { + s.Name = &v + return s +} + +// SetValueFrom sets the ValueFrom field's value. +func (s *Secret) SetValueFrom(v string) *Secret { + s.ValueFrom = &v + return s +} + +// These errors are usually caused by a server issue. +type ServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerException) GoString() string { + return s.String() +} + +func newErrorServerException(v protocol.ResponseMetadata) error { + return &ServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServerException) Code() string { + return "ServerException" +} + +// Message returns the exception's message. +func (s *ServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServerException) OrigErr() error { + return nil +} + +func (s *ServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Details on a service within a cluster +type Service struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster that hosts the service. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Unix time stamp for when the service was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The current state of deployments for the service. + Deployments []*Deployment `locationName:"deployments" type:"list"` + + // The desired number of instantiations of the task definition to keep running + // on the service. This value is specified when the service is created with + // CreateService, and it can be modified with UpdateService. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // The event stream for your service. A maximum of 100 of the latest events + // are displayed. + Events []*ServiceEvent `locationName:"events" type:"list"` + + // The period of time, in seconds, that the Amazon ECS service scheduler ignores + // unhealthy Elastic Load Balancing target health checks after a task has first + // started. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + + // The launch type on which your service is running. + LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` + + // A list of Elastic Load Balancing load balancer objects, containing the load + // balancer name, the container name (as it appears in a container definition), + // and the container port to access from the load balancer. + // + // Services with tasks that use the awsvpc network mode (for example, those + // with the Fargate launch type) only support Application Load Balancers and + // Network Load Balancers; Classic Load Balancers are not supported. Also, when + // you create any target groups for these services, you must choose ip as the + // target type, not instance, because tasks that use the awsvpc network mode + // are associated with an elastic network interface, not an Amazon EC2 instance. + LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` + + // The VPC subnet and security group configuration for tasks that receive their + // own elastic network interface by using the awsvpc networking mode. + NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` + + // The number of tasks in the cluster that are in the PENDING state. + PendingCount *int64 `locationName:"pendingCount" type:"integer"` + + // The placement constraints for the tasks in the service. + PlacementConstraints []*PlacementConstraint `locationName:"placementConstraints" type:"list"` + + // The placement strategy that determines how tasks for the service are placed. + PlacementStrategy []*PlacementStrategy `locationName:"placementStrategy" type:"list"` + + // The platform version on which your task is running. For more information, + // see AWS Fargate Platform Versions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) + // in the Amazon Elastic Container Service Developer Guide. + PlatformVersion *string `locationName:"platformVersion" type:"string"` + + // The ARN of the IAM role associated with the service that allows the Amazon + // ECS container agent to register container instances with an Elastic Load + // Balancing load balancer. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The number of tasks in the cluster that are in the RUNNING state. + RunningCount *int64 `locationName:"runningCount" type:"integer"` + + // The scheduling strategy to use for the service. For more information, see + // Services (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). + // + // There are two service scheduler strategies available: + // + // * REPLICA-The replica scheduling strategy places and maintains the desired + // number of tasks across your cluster. By default, the service scheduler + // spreads tasks across Availability Zones. You can use task placement strategies + // and constraints to customize task placement decisions. + // + // * DAEMON-The daemon scheduling strategy deploys exactly one task on each + // container instance in your cluster. When you are using this strategy, + // do not specify a desired number of tasks or any task placement strategies. + // Fargate tasks do not support the DAEMON scheduling strategy. + SchedulingStrategy *string `locationName:"schedulingStrategy" type:"string" enum:"SchedulingStrategy"` + + // The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, + // followed by the Region of the service, the AWS account ID of the service + // owner, the service namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service . + ServiceArn *string `locationName:"serviceArn" type:"string"` + + // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. Service names must be unique within + // a cluster, but you can have similarly named services in multiple clusters + // within a Region or across multiple Regions. + ServiceName *string `locationName:"serviceName" type:"string"` + + ServiceRegistries []*ServiceRegistry `locationName:"serviceRegistries" type:"list"` + + // The status of the service. The valid values are ACTIVE, DRAINING, or INACTIVE. + Status *string `locationName:"status" type:"string"` + + // The task definition to use for tasks in the service. This value is specified + // when the service is created with CreateService, and it can be modified with + // UpdateService. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` +} + +// String returns the string representation +func (s Service) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Service) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *Service) SetClusterArn(v string) *Service { + s.ClusterArn = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *Service) SetCreatedAt(v time.Time) *Service { + s.CreatedAt = &v + return s +} + +// SetDeploymentConfiguration sets the DeploymentConfiguration field's value. +func (s *Service) SetDeploymentConfiguration(v *DeploymentConfiguration) *Service { + s.DeploymentConfiguration = v + return s +} + +// SetDeployments sets the Deployments field's value. +func (s *Service) SetDeployments(v []*Deployment) *Service { + s.Deployments = v + return s +} + +// SetDesiredCount sets the DesiredCount field's value. +func (s *Service) SetDesiredCount(v int64) *Service { + s.DesiredCount = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *Service) SetEvents(v []*ServiceEvent) *Service { + s.Events = v + return s +} + +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *Service) SetHealthCheckGracePeriodSeconds(v int64) *Service { + s.HealthCheckGracePeriodSeconds = &v + return s +} + +// SetLaunchType sets the LaunchType field's value. +func (s *Service) SetLaunchType(v string) *Service { + s.LaunchType = &v + return s +} + +// SetLoadBalancers sets the LoadBalancers field's value. +func (s *Service) SetLoadBalancers(v []*LoadBalancer) *Service { + s.LoadBalancers = v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *Service) SetNetworkConfiguration(v *NetworkConfiguration) *Service { + s.NetworkConfiguration = v + return s +} + +// SetPendingCount sets the PendingCount field's value. +func (s *Service) SetPendingCount(v int64) *Service { + s.PendingCount = &v + return s +} + +// SetPlacementConstraints sets the PlacementConstraints field's value. +func (s *Service) SetPlacementConstraints(v []*PlacementConstraint) *Service { + s.PlacementConstraints = v + return s +} + +// SetPlacementStrategy sets the PlacementStrategy field's value. +func (s *Service) SetPlacementStrategy(v []*PlacementStrategy) *Service { + s.PlacementStrategy = v + return s +} + +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *Service) SetPlatformVersion(v string) *Service { + s.PlatformVersion = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *Service) SetRoleArn(v string) *Service { + s.RoleArn = &v + return s +} + +// SetRunningCount sets the RunningCount field's value. +func (s *Service) SetRunningCount(v int64) *Service { + s.RunningCount = &v + return s +} + +// SetSchedulingStrategy sets the SchedulingStrategy field's value. +func (s *Service) SetSchedulingStrategy(v string) *Service { + s.SchedulingStrategy = &v + return s +} + +// SetServiceArn sets the ServiceArn field's value. +func (s *Service) SetServiceArn(v string) *Service { + s.ServiceArn = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *Service) SetServiceName(v string) *Service { + s.ServiceName = &v + return s +} + +// SetServiceRegistries sets the ServiceRegistries field's value. +func (s *Service) SetServiceRegistries(v []*ServiceRegistry) *Service { + s.ServiceRegistries = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Service) SetStatus(v string) *Service { + s.Status = &v + return s +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *Service) SetTaskDefinition(v string) *Service { + s.TaskDefinition = &v + return s +} + +// Details on an event associated with a service. +type ServiceEvent struct { + _ struct{} `type:"structure"` + + // The Unix time stamp for when the event was triggered. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The ID string of the event. + Id *string `locationName:"id" type:"string"` + + // The event message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceEvent) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *ServiceEvent) SetCreatedAt(v time.Time) *ServiceEvent { + s.CreatedAt = &v + return s +} + +// SetId sets the Id field's value. +func (s *ServiceEvent) SetId(v string) *ServiceEvent { + s.Id = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ServiceEvent) SetMessage(v string) *ServiceEvent { + s.Message = &v + return s +} + +// The specified service is not active. You can't update a service that is inactive. +// If you have previously deleted a service, you can re-create it with CreateService. +type ServiceNotActiveException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceNotActiveException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceNotActiveException) GoString() string { + return s.String() +} + +func newErrorServiceNotActiveException(v protocol.ResponseMetadata) error { + return &ServiceNotActiveException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceNotActiveException) Code() string { + return "ServiceNotActiveException" +} + +// Message returns the exception's message. +func (s *ServiceNotActiveException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceNotActiveException) OrigErr() error { + return nil +} + +func (s *ServiceNotActiveException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceNotActiveException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceNotActiveException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified service could not be found. You can view your available services +// with ListServices. Amazon ECS services are cluster-specific and region-specific. +type ServiceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceNotFoundException) GoString() string { + return s.String() +} + +func newErrorServiceNotFoundException(v protocol.ResponseMetadata) error { + return &ServiceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceNotFoundException) Code() string { + return "ServiceNotFoundException" +} + +// Message returns the exception's message. +func (s *ServiceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceNotFoundException) OrigErr() error { + return nil +} + +func (s *ServiceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Details of the service registry. +type ServiceRegistry struct { + _ struct{} `type:"structure"` + + // The container name value, already specified in the task definition, to be + // used for your service discovery service. If the task definition that your + // service task specifies uses the bridge or host network mode, you must specify + // a containerName and containerPort combination from the task definition. If + // the task definition that your service task specifies uses the awsvpc network + // mode and a type SRV DNS record is used, you must specify either a containerName + // and containerPort combination or a port value, but not both. + ContainerName *string `locationName:"containerName" type:"string"` + + // The port value, already specified in the task definition, to be used for + // your service discovery service. If the task definition your service task + // specifies uses the bridge or host network mode, you must specify a containerName + // and containerPort combination from the task definition. If the task definition + // your service task specifies uses the awsvpc network mode and a type SRV DNS + // record is used, you must specify either a containerName and containerPort + // combination or a port value, but not both. + ContainerPort *int64 `locationName:"containerPort" type:"integer"` + + // The port value used if your service discovery service specified an SRV record. + // This field is required if both the awsvpc network mode and SRV records are + // used. + Port *int64 `locationName:"port" type:"integer"` + + // The Amazon Resource Name (ARN) of the service registry. The currently supported + // service registry is Amazon Route 53 Auto Naming. For more information, see + // Service (https://docs.aws.amazon.com/Route53/latest/APIReference/API_autonaming_Service.html). + RegistryArn *string `locationName:"registryArn" type:"string"` +} + +// String returns the string representation +func (s ServiceRegistry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceRegistry) GoString() string { + return s.String() +} + +// SetContainerName sets the ContainerName field's value. +func (s *ServiceRegistry) SetContainerName(v string) *ServiceRegistry { + s.ContainerName = &v + return s +} + +// SetContainerPort sets the ContainerPort field's value. +func (s *ServiceRegistry) SetContainerPort(v int64) *ServiceRegistry { + s.ContainerPort = &v + return s +} + +// SetPort sets the Port field's value. +func (s *ServiceRegistry) SetPort(v int64) *ServiceRegistry { + s.Port = &v + return s +} + +// SetRegistryArn sets the RegistryArn field's value. +func (s *ServiceRegistry) SetRegistryArn(v string) *ServiceRegistry { + s.RegistryArn = &v + return s +} + +type Setting struct { + _ struct{} `type:"structure"` + + Name *string `locationName:"name" type:"string" enum:"SettingName"` + + PrincipalArn *string `locationName:"principalArn" type:"string"` + + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Setting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Setting) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *Setting) SetName(v string) *Setting { + s.Name = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *Setting) SetPrincipalArn(v string) *Setting { + s.PrincipalArn = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Setting) SetValue(v string) *Setting { + s.Value = &v + return s +} + +type StartTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster on which + // to start your task. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance IDs or full ARN entries for the container instances + // on which you would like to place your task. You can specify up to 10 container + // instances. + // + // ContainerInstances is a required field + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` + + // The name of the task group to associate with the task. The default value + // is the family name of the task definition (for example, family:my-family-name). + Group *string `locationName:"group" type:"string"` + + // The VPC subnet and security group configuration for tasks that receive their + // own elastic network interface by using the awsvpc networking mode. + NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` + + // A list of container overrides in JSON format that specify the name of a container + // in the specified task definition and the overrides it should receive. You + // can override the default command for a container (that is specified in the + // task definition or Docker image) with a command override. You can also override + // existing environment variables (that are specified in the task definition + // or Docker image) on a container or add new environment variables to it with + // an environment override. + // + // A total of 8192 characters are allowed for overrides. This limit includes + // the JSON formatting characters of the override structure. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // An optional tag specified when a task is started. For example if you automatically + // trigger a task to run a batch process job, you could apply a unique identifier + // for that job to your task with the startedBy parameter. You can then identify + // which tasks belong to that job by filtering the results of a ListTasks call + // with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. + // + // If a task is started by an Amazon ECS service, then the startedBy parameter + // contains the deployment ID of the service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The family and revision (family:revision) or full ARN of the task definition + // to start. If a revision is not specified, the latest ACTIVE revision is used. + // + // TaskDefinition is a required field + TaskDefinition *string `locationName:"taskDefinition" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartTaskInput"} + if s.ContainerInstances == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstances")) + } + if s.TaskDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("TaskDefinition")) + } + if s.NetworkConfiguration != nil { + if err := s.NetworkConfiguration.Validate(); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *StartTaskInput) SetCluster(v string) *StartTaskInput { + s.Cluster = &v + return s +} + +// SetContainerInstances sets the ContainerInstances field's value. +func (s *StartTaskInput) SetContainerInstances(v []*string) *StartTaskInput { + s.ContainerInstances = v + return s +} + +// SetGroup sets the Group field's value. +func (s *StartTaskInput) SetGroup(v string) *StartTaskInput { + s.Group = &v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *StartTaskInput) SetNetworkConfiguration(v *NetworkConfiguration) *StartTaskInput { + s.NetworkConfiguration = v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *StartTaskInput) SetOverrides(v *TaskOverride) *StartTaskInput { + s.Overrides = v + return s +} + +// SetStartedBy sets the StartedBy field's value. +func (s *StartTaskInput) SetStartedBy(v string) *StartTaskInput { + s.StartedBy = &v + return s +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *StartTaskInput) SetTaskDefinition(v string) *StartTaskInput { + s.TaskDefinition = &v + return s +} + +type StartTaskOutput struct { + _ struct{} `type:"structure"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` + + // A full description of the tasks that were started. Each task that was successfully + // placed on your container instances is described. + Tasks []*Task `locationName:"tasks" type:"list"` +} + +// String returns the string representation +func (s StartTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTaskOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *StartTaskOutput) SetFailures(v []*Failure) *StartTaskOutput { + s.Failures = v + return s +} + +// SetTasks sets the Tasks field's value. +func (s *StartTaskOutput) SetTasks(v []*Task) *StartTaskOutput { + s.Tasks = v + return s +} + +type StopTaskInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task to stop. If you do not specify a cluster, the default cluster is + // assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // An optional message specified when a task is stopped. For example, if you + // are using a custom scheduler, you can use this parameter to specify the reason + // for stopping the task here, and the message appears in subsequent DescribeTasks + // API operations on this task. Up to 255 characters are allowed in this message. + Reason *string `locationName:"reason" type:"string"` + + // The task ID or full ARN entry of the task to stop. + // + // Task is a required field + Task *string `locationName:"task" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopTaskInput"} + if s.Task == nil { + invalidParams.Add(request.NewErrParamRequired("Task")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *StopTaskInput) SetCluster(v string) *StopTaskInput { + s.Cluster = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *StopTaskInput) SetReason(v string) *StopTaskInput { + s.Reason = &v + return s +} + +// SetTask sets the Task field's value. +func (s *StopTaskInput) SetTask(v string) *StopTaskInput { + s.Task = &v + return s +} + +type StopTaskOutput struct { + _ struct{} `type:"structure"` + + // The task that was stopped. + Task *Task `locationName:"task" type:"structure"` +} + +// String returns the string representation +func (s StopTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTaskOutput) GoString() string { + return s.String() +} + +// SetTask sets the Task field's value. +func (s *StopTaskOutput) SetTask(v *Task) *StopTaskOutput { + s.Task = v + return s +} + +type SubmitAttachmentStateChangesInput struct { + _ struct{} `type:"structure"` + + Attachments []*AttachmentStateChange `locationName:"attachments" type:"list"` + + Cluster *string `locationName:"cluster" type:"string"` +} + +// String returns the string representation +func (s SubmitAttachmentStateChangesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitAttachmentStateChangesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SubmitAttachmentStateChangesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SubmitAttachmentStateChangesInput"} + if s.Attachments != nil { + for i, v := range s.Attachments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attachments", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttachments sets the Attachments field's value. +func (s *SubmitAttachmentStateChangesInput) SetAttachments(v []*AttachmentStateChange) *SubmitAttachmentStateChangesInput { + s.Attachments = v + return s +} + +// SetCluster sets the Cluster field's value. +func (s *SubmitAttachmentStateChangesInput) SetCluster(v string) *SubmitAttachmentStateChangesInput { + s.Cluster = &v + return s +} + +type SubmitAttachmentStateChangesOutput struct { + _ struct{} `type:"structure"` + + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitAttachmentStateChangesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitAttachmentStateChangesOutput) GoString() string { + return s.String() +} + +// SetAcknowledgment sets the Acknowledgment field's value. +func (s *SubmitAttachmentStateChangesOutput) SetAcknowledgment(v string) *SubmitAttachmentStateChangesOutput { + s.Acknowledgment = &v + return s +} + +type SubmitContainerStateChangeInput struct { + _ struct{} `type:"structure"` + + // The short name or full ARN of the cluster that hosts the container. + Cluster *string `locationName:"cluster" type:"string"` + + // The name of the container. + ContainerName *string `locationName:"containerName" type:"string"` + + // The exit code returned for the state change request. + ExitCode *int64 `locationName:"exitCode" type:"integer"` + + ManagedAgents []*ManagedAgentStateChange `locationName:"managedAgents" type:"list"` + + // The network bindings of the container. + NetworkBindings []*NetworkBinding `locationName:"networkBindings" type:"list"` + + // The reason for the state change request. + Reason *string `locationName:"reason" type:"string"` + + RuntimeId *string `locationName:"runtimeId" type:"string"` + + // The status of the state change request. + Status *string `locationName:"status" type:"string"` + + // The task ID or full Amazon Resource Name (ARN) of the task that hosts the + // container. + Task *string `locationName:"task" type:"string"` +} + +// String returns the string representation +func (s SubmitContainerStateChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitContainerStateChangeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SubmitContainerStateChangeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SubmitContainerStateChangeInput"} + if s.ManagedAgents != nil { + for i, v := range s.ManagedAgents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ManagedAgents", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *SubmitContainerStateChangeInput) SetCluster(v string) *SubmitContainerStateChangeInput { + s.Cluster = &v + return s +} + +// SetContainerName sets the ContainerName field's value. +func (s *SubmitContainerStateChangeInput) SetContainerName(v string) *SubmitContainerStateChangeInput { + s.ContainerName = &v + return s +} + +// SetExitCode sets the ExitCode field's value. +func (s *SubmitContainerStateChangeInput) SetExitCode(v int64) *SubmitContainerStateChangeInput { + s.ExitCode = &v + return s +} + +// SetManagedAgents sets the ManagedAgents field's value. +func (s *SubmitContainerStateChangeInput) SetManagedAgents(v []*ManagedAgentStateChange) *SubmitContainerStateChangeInput { + s.ManagedAgents = v + return s +} + +// SetNetworkBindings sets the NetworkBindings field's value. +func (s *SubmitContainerStateChangeInput) SetNetworkBindings(v []*NetworkBinding) *SubmitContainerStateChangeInput { + s.NetworkBindings = v + return s +} + +// SetReason sets the Reason field's value. +func (s *SubmitContainerStateChangeInput) SetReason(v string) *SubmitContainerStateChangeInput { + s.Reason = &v + return s +} + +// SetRuntimeId sets the RuntimeId field's value. +func (s *SubmitContainerStateChangeInput) SetRuntimeId(v string) *SubmitContainerStateChangeInput { + s.RuntimeId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SubmitContainerStateChangeInput) SetStatus(v string) *SubmitContainerStateChangeInput { + s.Status = &v + return s +} + +// SetTask sets the Task field's value. +func (s *SubmitContainerStateChangeInput) SetTask(v string) *SubmitContainerStateChangeInput { + s.Task = &v + return s +} + +type SubmitContainerStateChangeOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitContainerStateChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitContainerStateChangeOutput) GoString() string { + return s.String() +} + +// SetAcknowledgment sets the Acknowledgment field's value. +func (s *SubmitContainerStateChangeOutput) SetAcknowledgment(v string) *SubmitContainerStateChangeOutput { + s.Acknowledgment = &v + return s +} + +type SubmitTaskStateChangeInput struct { + _ struct{} `type:"structure"` + + // Any attachments associated with the state change request. + Attachments []*AttachmentStateChange `locationName:"attachments" type:"list"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the task. + Cluster *string `locationName:"cluster" type:"string"` + + // Any containers associated with the state change request. + Containers []*ContainerStateChange `locationName:"containers" type:"list"` + + // The Unix time stamp for when the task execution stopped. + ExecutionStoppedAt *time.Time `locationName:"executionStoppedAt" type:"timestamp"` + + ManagedAgents []*ManagedAgentStateChange `locationName:"managedAgents" type:"list"` + + // The Unix time stamp for when the container image pull began. + PullStartedAt *time.Time `locationName:"pullStartedAt" type:"timestamp"` + + // The Unix time stamp for when the container image pull completed. + PullStoppedAt *time.Time `locationName:"pullStoppedAt" type:"timestamp"` + + // The reason for the state change request. + Reason *string `locationName:"reason" type:"string"` + + // The status of the state change request. + Status *string `locationName:"status" type:"string"` + + // The task ID or full ARN of the task in the state change request. + Task *string `locationName:"task" type:"string"` +} + +// String returns the string representation +func (s SubmitTaskStateChangeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitTaskStateChangeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SubmitTaskStateChangeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SubmitTaskStateChangeInput"} + if s.Attachments != nil { + for i, v := range s.Attachments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attachments", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Containers != nil { + for i, v := range s.Containers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Containers", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ManagedAgents != nil { + for i, v := range s.ManagedAgents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ManagedAgents", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttachments sets the Attachments field's value. +func (s *SubmitTaskStateChangeInput) SetAttachments(v []*AttachmentStateChange) *SubmitTaskStateChangeInput { + s.Attachments = v + return s +} + +// SetCluster sets the Cluster field's value. +func (s *SubmitTaskStateChangeInput) SetCluster(v string) *SubmitTaskStateChangeInput { + s.Cluster = &v + return s +} + +// SetContainers sets the Containers field's value. +func (s *SubmitTaskStateChangeInput) SetContainers(v []*ContainerStateChange) *SubmitTaskStateChangeInput { + s.Containers = v + return s +} + +// SetExecutionStoppedAt sets the ExecutionStoppedAt field's value. +func (s *SubmitTaskStateChangeInput) SetExecutionStoppedAt(v time.Time) *SubmitTaskStateChangeInput { + s.ExecutionStoppedAt = &v + return s +} + +// SetManagedAgents sets the ManagedAgents field's value. +func (s *SubmitTaskStateChangeInput) SetManagedAgents(v []*ManagedAgentStateChange) *SubmitTaskStateChangeInput { + s.ManagedAgents = v + return s +} + +// SetPullStartedAt sets the PullStartedAt field's value. +func (s *SubmitTaskStateChangeInput) SetPullStartedAt(v time.Time) *SubmitTaskStateChangeInput { + s.PullStartedAt = &v + return s +} + +// SetPullStoppedAt sets the PullStoppedAt field's value. +func (s *SubmitTaskStateChangeInput) SetPullStoppedAt(v time.Time) *SubmitTaskStateChangeInput { + s.PullStoppedAt = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *SubmitTaskStateChangeInput) SetReason(v string) *SubmitTaskStateChangeInput { + s.Reason = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SubmitTaskStateChangeInput) SetStatus(v string) *SubmitTaskStateChangeInput { + s.Status = &v + return s +} + +// SetTask sets the Task field's value. +func (s *SubmitTaskStateChangeInput) SetTask(v string) *SubmitTaskStateChangeInput { + s.Task = &v + return s +} + +type SubmitTaskStateChangeOutput struct { + _ struct{} `type:"structure"` + + // Acknowledgement of the state change. + Acknowledgment *string `locationName:"acknowledgment" type:"string"` +} + +// String returns the string representation +func (s SubmitTaskStateChangeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubmitTaskStateChangeOutput) GoString() string { + return s.String() +} + +// SetAcknowledgment sets the Acknowledgment field's value. +func (s *SubmitTaskStateChangeOutput) SetAcknowledgment(v string) *SubmitTaskStateChangeOutput { + s.Acknowledgment = &v + return s +} + +type SystemControl struct { + _ struct{} `type:"structure"` + + Namespace *string `locationName:"namespace" type:"string"` + + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s SystemControl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SystemControl) GoString() string { + return s.String() +} + +// SetNamespace sets the Namespace field's value. +func (s *SystemControl) SetNamespace(v string) *SystemControl { + s.Namespace = &v + return s +} + +// SetValue sets the Value field's value. +func (s *SystemControl) SetValue(v string) *SystemControl { + s.Value = &v + return s +} + +type Tag struct { + _ struct{} `type:"structure"` + + Key *string `locationName:"key" min:"1" type:"string"` + + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// The specified target could not be found. You can view your available container +// instances with ListContainerInstances. Amazon ECS container instances are +// cluster-specific and region-specific. +type TargetNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s TargetNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetNotFoundException) GoString() string { + return s.String() +} + +func newErrorTargetNotFoundException(v protocol.ResponseMetadata) error { + return &TargetNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TargetNotFoundException) Code() string { + return "TargetNotFoundException" +} + +// Message returns the exception's message. +func (s *TargetNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TargetNotFoundException) OrigErr() error { + return nil +} + +func (s *TargetNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TargetNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TargetNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Details on a task in a cluster. +type Task struct { + _ struct{} `type:"structure"` + + // The elastic network adapter associated with the task if the task uses the + // awsvpc network mode. + Attachments []*Attachment `locationName:"attachments" type:"list"` + + // The ARN of the cluster that hosts the task. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The connectivity status of a task. + Connectivity *string `locationName:"connectivity" type:"string" enum:"Connectivity"` + + // The Unix time stamp for when the task last went into CONNECTED status. + ConnectivityAt *time.Time `locationName:"connectivityAt" type:"timestamp"` + + // The ARN of the container instances that host the task. + ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` + + // The containers associated with the task. + Containers []*Container `locationName:"containers" type:"list"` + + // The number of CPU units used by the task. It can be expressed as an integer + // using CPU units, for example 1024, or as a string using vCPUs, for example + // 1 vCPU or 1 vcpu, in a task definition. String values are converted to an + // integer indicating the CPU units when the task definition is registered. + // + // If using the EC2 launch type, this field is optional. Supported values are + // between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs). + // + // If using the Fargate launch type, this field is required and you must use + // one of the following values, which determines your range of supported values + // for the memory parameter: + // + // * 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), + // 2048 (2 GB) + // + // * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 + // (3 GB), 4096 (4 GB) + // + // * 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 + // (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + // + // * 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 + // (16 GB) in increments of 1024 (1 GB) + // + // * 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 + // (30 GB) in increments of 1024 (1 GB) + Cpu *string `locationName:"cpu" type:"string"` + + // The Unix time stamp for when the task was created (the task entered the PENDING + // state). + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The desired status of the task. + DesiredStatus *string `locationName:"desiredStatus" type:"string"` + + // The Unix time stamp for when the task execution stopped. + ExecutionStoppedAt *time.Time `locationName:"executionStoppedAt" type:"timestamp"` + + // The name of the task group associated with the task. + Group *string `locationName:"group" type:"string"` + + // The health status for the task, which is determined by the health of the + // essential containers in the task. If all essential containers in the task + // are reporting as HEALTHY, then the task status also reports as HEALTHY. If + // any essential containers in the task are reporting as UNHEALTHY or UNKNOWN, + // then the task status also reports as UNHEALTHY or UNKNOWN, accordingly. + // + // The Amazon ECS container agent does not monitor or report on Docker health + // checks that are embedded in a container image (such as those specified in + // a parent image or from the image's Dockerfile) and not specified in the container + // definition. Health check parameters that are specified in a container definition + // override any Docker health checks that exist in the container image. + HealthStatus *string `locationName:"healthStatus" type:"string" enum:"HealthStatus"` + + // The last known status of the task. + LastStatus *string `locationName:"lastStatus" type:"string"` + + // The launch type on which your task is running. + LaunchType *string `locationName:"launchType" type:"string" enum:"LaunchType"` + + // The amount of memory (in MiB) used by the task. It can be expressed as an + // integer using MiB, for example 1024, or as a string using GB, for example + // 1GB or 1 GB, in a task definition. String values are converted to an integer + // indicating the MiB when the task definition is registered. + // + // If using the EC2 launch type, this field is optional. + // + // If using the Fargate launch type, this field is required and you must use + // one of the following values, which determines your range of supported values + // for the cpu parameter: + // + // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 + // vCPU) + // + // * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: + // 512 (.5 vCPU) + // + // * 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 + // (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) + // + // * Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + // Available cpu values: 2048 (2 vCPU) + // + // * Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - + // Available cpu values: 4096 (4 vCPU) + Memory *string `locationName:"memory" type:"string"` + + // One or more container overrides. + Overrides *TaskOverride `locationName:"overrides" type:"structure"` + + // The platform version on which your task is running. For more information, + // see AWS Fargate Platform Versions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) + // in the Amazon Elastic Container Service Developer Guide. + PlatformVersion *string `locationName:"platformVersion" type:"string"` + + // The Unix time stamp for when the container image pull began. + PullStartedAt *time.Time `locationName:"pullStartedAt" type:"timestamp"` + + // The Unix time stamp for when the container image pull completed. + PullStoppedAt *time.Time `locationName:"pullStoppedAt" type:"timestamp"` + + // The Unix time stamp for when the task started (the task transitioned from + // the PENDING state to the RUNNING state). + StartedAt *time.Time `locationName:"startedAt" type:"timestamp"` + + // The tag specified when a task is started. If the task is started by an Amazon + // ECS service, then the startedBy parameter contains the deployment ID of the + // service that starts it. + StartedBy *string `locationName:"startedBy" type:"string"` + + // The Unix time stamp for when the task was stopped (the task transitioned + // from the RUNNING state to the STOPPED state). + StoppedAt *time.Time `locationName:"stoppedAt" type:"timestamp"` + + // The reason the task was stopped. + StoppedReason *string `locationName:"stoppedReason" type:"string"` + + // The Unix time stamp for when the task stops (transitions from the RUNNING + // state to STOPPED). + StoppingAt *time.Time `locationName:"stoppingAt" type:"timestamp"` + + // The metadata that you apply to the task to help you categorize and organize + // them. Each tag consists of a key and an optional value, both of which you + // define. Tag keys can have a maximum character length of 128 characters, and + // tag values can have a maximum length of 256 characters. + Tags []*Tag `locationName:"tags" type:"list"` + + // The Amazon Resource Name (ARN) of the task. + TaskArn *string `locationName:"taskArn" type:"string"` + + // The ARN of the task definition that creates the task. + TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` + + // The version counter for the task. Every time a task experiences a change + // that triggers a CloudWatch event, the version counter is incremented. If + // you are replicating your Amazon ECS task state with CloudWatch Events, you + // can compare the version of a task reported by the Amazon ECS APIs with the + // version reported in CloudWatch Events for the task (inside the detail object) + // to verify that the version in your event stream is current. + Version *int64 `locationName:"version" type:"long"` +} + +// String returns the string representation +func (s Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Task) GoString() string { + return s.String() +} + +// SetAttachments sets the Attachments field's value. +func (s *Task) SetAttachments(v []*Attachment) *Task { + s.Attachments = v + return s +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *Task) SetClusterArn(v string) *Task { + s.ClusterArn = &v + return s +} + +// SetConnectivity sets the Connectivity field's value. +func (s *Task) SetConnectivity(v string) *Task { + s.Connectivity = &v + return s +} + +// SetConnectivityAt sets the ConnectivityAt field's value. +func (s *Task) SetConnectivityAt(v time.Time) *Task { + s.ConnectivityAt = &v + return s +} + +// SetContainerInstanceArn sets the ContainerInstanceArn field's value. +func (s *Task) SetContainerInstanceArn(v string) *Task { + s.ContainerInstanceArn = &v + return s +} + +// SetContainers sets the Containers field's value. +func (s *Task) SetContainers(v []*Container) *Task { + s.Containers = v + return s +} + +// SetCpu sets the Cpu field's value. +func (s *Task) SetCpu(v string) *Task { + s.Cpu = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *Task) SetCreatedAt(v time.Time) *Task { + s.CreatedAt = &v + return s +} + +// SetDesiredStatus sets the DesiredStatus field's value. +func (s *Task) SetDesiredStatus(v string) *Task { + s.DesiredStatus = &v + return s +} + +// SetExecutionStoppedAt sets the ExecutionStoppedAt field's value. +func (s *Task) SetExecutionStoppedAt(v time.Time) *Task { + s.ExecutionStoppedAt = &v + return s +} + +// SetGroup sets the Group field's value. +func (s *Task) SetGroup(v string) *Task { + s.Group = &v + return s +} + +// SetHealthStatus sets the HealthStatus field's value. +func (s *Task) SetHealthStatus(v string) *Task { + s.HealthStatus = &v + return s +} + +// SetLastStatus sets the LastStatus field's value. +func (s *Task) SetLastStatus(v string) *Task { + s.LastStatus = &v + return s +} + +// SetLaunchType sets the LaunchType field's value. +func (s *Task) SetLaunchType(v string) *Task { + s.LaunchType = &v + return s +} + +// SetMemory sets the Memory field's value. +func (s *Task) SetMemory(v string) *Task { + s.Memory = &v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *Task) SetOverrides(v *TaskOverride) *Task { + s.Overrides = v + return s +} + +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *Task) SetPlatformVersion(v string) *Task { + s.PlatformVersion = &v + return s +} + +// SetPullStartedAt sets the PullStartedAt field's value. +func (s *Task) SetPullStartedAt(v time.Time) *Task { + s.PullStartedAt = &v + return s +} + +// SetPullStoppedAt sets the PullStoppedAt field's value. +func (s *Task) SetPullStoppedAt(v time.Time) *Task { + s.PullStoppedAt = &v + return s +} + +// SetStartedAt sets the StartedAt field's value. +func (s *Task) SetStartedAt(v time.Time) *Task { + s.StartedAt = &v + return s +} + +// SetStartedBy sets the StartedBy field's value. +func (s *Task) SetStartedBy(v string) *Task { + s.StartedBy = &v + return s +} + +// SetStoppedAt sets the StoppedAt field's value. +func (s *Task) SetStoppedAt(v time.Time) *Task { + s.StoppedAt = &v + return s +} + +// SetStoppedReason sets the StoppedReason field's value. +func (s *Task) SetStoppedReason(v string) *Task { + s.StoppedReason = &v + return s +} + +// SetStoppingAt sets the StoppingAt field's value. +func (s *Task) SetStoppingAt(v time.Time) *Task { + s.StoppingAt = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Task) SetTags(v []*Tag) *Task { + s.Tags = v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *Task) SetTaskArn(v string) *Task { + s.TaskArn = &v + return s +} + +// SetTaskDefinitionArn sets the TaskDefinitionArn field's value. +func (s *Task) SetTaskDefinitionArn(v string) *Task { + s.TaskDefinitionArn = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *Task) SetVersion(v int64) *Task { + s.Version = &v + return s +} + +// Details of a task definition. +type TaskDefinition struct { + _ struct{} `type:"structure"` + + // The launch type to use with your task. For more information, see Amazon ECS + // Launch Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // in the Amazon Elastic Container Service Developer Guide. + Compatibilities []*string `locationName:"compatibilities" type:"list"` + + // A list of container definitions in JSON format that describe the different + // containers that make up your task. For more information about container definition + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) + // in the Amazon Elastic Container Service Developer Guide. + ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list"` + + // The number of cpu units used by the task. If using the EC2 launch type, this + // field is optional and any value can be used. If using the Fargate launch + // type, this field is required and you must use one of the following values, + // which determines your range of valid values for the memory parameter: + // + // * 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), + // 2048 (2 GB) + // + // * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 + // (3 GB), 4096 (4 GB) + // + // * 1024 (1 vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 + // (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + // + // * 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 + // (16 GB) in increments of 1024 (1 GB) + // + // * 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 + // (30 GB) in increments of 1024 (1 GB) + Cpu *string `locationName:"cpu" type:"string"` + + // The Amazon Resource Name (ARN) of the task execution role that the Amazon + // ECS container agent and the Docker daemon can assume. + ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` + + // The family of your task definition, used as the definition name. + Family *string `locationName:"family" type:"string"` + + InferenceAccelerators []*InferenceAccelerator `locationName:"inferenceAccelerators" type:"list"` + + IpcMode *string `locationName:"ipcMode" type:"string" enum:"IpcMode"` + + // The amount (in MiB) of memory used by the task. If using the EC2 launch type, + // this field is optional and any value can be used. If using the Fargate launch + // type, this field is required and you must use one of the following values, + // which determines your range of valid values for the cpu parameter: + // + // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 + // vCPU) + // + // * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: + // 512 (.5 vCPU) + // + // * 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 + // (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) + // + // * Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + // Available cpu values: 2048 (2 vCPU) + // + // * Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - + // Available cpu values: 4096 (4 vCPU) + Memory *string `locationName:"memory" type:"string"` + + // The Docker networking mode to use for the containers in the task. The valid + // values are none, bridge, awsvpc, and host. The default Docker network mode + // is bridge. If using the Fargate launch type, the awsvpc network mode is required. + // If using the EC2 launch type, any network mode can be used. If the network + // mode is set to none, you can't specify port mappings in your container definitions, + // and the task's containers do not have external connectivity. The host and + // awsvpc network modes offer the highest networking performance for containers + // because they use the EC2 network stack instead of the virtualized network + // stack provided by the bridge mode. + // + // With the host and awsvpc network modes, exposed container ports are mapped + // directly to the corresponding host port (for the host network mode) or the + // attached elastic network interface port (for the awsvpc network mode), so + // you cannot take advantage of dynamic host port mappings. + // + // If the network mode is awsvpc, the task is allocated an Elastic Network Interface, + // and you must specify a NetworkConfiguration when you create a service or + // run a task with the task definition. For more information, see Task Networking + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // Currently, only the Amazon ECS-optimized AMI, other Amazon Linux variants + // with the ecs-init package, or AWS Fargate infrastructure support the awsvpc + // network mode. + // + // If the network mode is host, you can't run multiple instantiations of the + // same task on a single container instance when port mappings are used. + // + // Docker for Windows uses different network modes than Docker for Linux. When + // you register a task definition with Windows containers, you must not specify + // a network mode. If you use the console to register a task definition with + // Windows containers, you must choose the network mode object. + // + // For more information, see Network settings (https://docs.docker.com/engine/reference/run/#network-settings) + // in the Docker run reference. + NetworkMode *string `locationName:"networkMode" type:"string" enum:"NetworkMode"` + + PidMode *string `locationName:"pidMode" type:"string" enum:"PidMode"` + + // An array of placement constraint objects to use for tasks. This field is + // not valid if using the Fargate launch type for your task. + PlacementConstraints []*TaskDefinitionPlacementConstraint `locationName:"placementConstraints" type:"list"` + + ProxyConfiguration *ProxyConfiguration `locationName:"proxyConfiguration" type:"structure"` + + // The container instance attributes required by your task. This field is not + // valid if using the Fargate launch type for your task. + RequiresAttributes []*Attribute `locationName:"requiresAttributes" type:"list"` + + // The launch type the task is using. + RequiresCompatibilities []*string `locationName:"requiresCompatibilities" type:"list"` + + // The revision of the task in a particular family. The revision is a version + // number of a task definition in a family. When you register a task definition + // for the first time, the revision is 1; each time you register a new revision + // of a task definition in the same family, the revision value always increases + // by one (even if you have deregistered previous revisions in this family). + Revision *int64 `locationName:"revision" type:"integer"` + + // The status of the task definition. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` + + // The full Amazon Resource Name (ARN) of the task definition. + TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` + + // The ARN of the IAM role that containers in this task can assume. All containers + // in this task are granted the permissions that are specified in this role. + // + // IAM roles for tasks on Windows require that the -EnableTaskIAMRole option + // is set when you launch the Amazon ECS-optimized Windows AMI. Your containers + // must also run some configuration code in order to take advantage of the feature. + // For more information, see Windows IAM Roles for Tasks (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) + // in the Amazon Elastic Container Service Developer Guide. + TaskRoleArn *string `locationName:"taskRoleArn" type:"string"` + + // The list of volumes in a task. + // + // If you are using the Fargate launch type, the host and sourcePath parameters + // are not supported. + // + // For more information about volume definition parameters and defaults, see + // Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // in the Amazon Elastic Container Service Developer Guide. + Volumes []*Volume `locationName:"volumes" type:"list"` +} + +// String returns the string representation +func (s TaskDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskDefinition) GoString() string { + return s.String() +} + +// SetCompatibilities sets the Compatibilities field's value. +func (s *TaskDefinition) SetCompatibilities(v []*string) *TaskDefinition { + s.Compatibilities = v + return s +} + +// SetContainerDefinitions sets the ContainerDefinitions field's value. +func (s *TaskDefinition) SetContainerDefinitions(v []*ContainerDefinition) *TaskDefinition { + s.ContainerDefinitions = v + return s +} + +// SetCpu sets the Cpu field's value. +func (s *TaskDefinition) SetCpu(v string) *TaskDefinition { + s.Cpu = &v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *TaskDefinition) SetExecutionRoleArn(v string) *TaskDefinition { + s.ExecutionRoleArn = &v + return s +} + +// SetFamily sets the Family field's value. +func (s *TaskDefinition) SetFamily(v string) *TaskDefinition { + s.Family = &v + return s +} + +// SetInferenceAccelerators sets the InferenceAccelerators field's value. +func (s *TaskDefinition) SetInferenceAccelerators(v []*InferenceAccelerator) *TaskDefinition { + s.InferenceAccelerators = v + return s +} + +// SetIpcMode sets the IpcMode field's value. +func (s *TaskDefinition) SetIpcMode(v string) *TaskDefinition { + s.IpcMode = &v + return s +} + +// SetMemory sets the Memory field's value. +func (s *TaskDefinition) SetMemory(v string) *TaskDefinition { + s.Memory = &v + return s +} + +// SetNetworkMode sets the NetworkMode field's value. +func (s *TaskDefinition) SetNetworkMode(v string) *TaskDefinition { + s.NetworkMode = &v + return s +} + +// SetPidMode sets the PidMode field's value. +func (s *TaskDefinition) SetPidMode(v string) *TaskDefinition { + s.PidMode = &v + return s +} + +// SetPlacementConstraints sets the PlacementConstraints field's value. +func (s *TaskDefinition) SetPlacementConstraints(v []*TaskDefinitionPlacementConstraint) *TaskDefinition { + s.PlacementConstraints = v + return s +} + +// SetProxyConfiguration sets the ProxyConfiguration field's value. +func (s *TaskDefinition) SetProxyConfiguration(v *ProxyConfiguration) *TaskDefinition { + s.ProxyConfiguration = v + return s +} + +// SetRequiresAttributes sets the RequiresAttributes field's value. +func (s *TaskDefinition) SetRequiresAttributes(v []*Attribute) *TaskDefinition { + s.RequiresAttributes = v + return s +} + +// SetRequiresCompatibilities sets the RequiresCompatibilities field's value. +func (s *TaskDefinition) SetRequiresCompatibilities(v []*string) *TaskDefinition { + s.RequiresCompatibilities = v + return s +} + +// SetRevision sets the Revision field's value. +func (s *TaskDefinition) SetRevision(v int64) *TaskDefinition { + s.Revision = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *TaskDefinition) SetStatus(v string) *TaskDefinition { + s.Status = &v + return s +} + +// SetTaskDefinitionArn sets the TaskDefinitionArn field's value. +func (s *TaskDefinition) SetTaskDefinitionArn(v string) *TaskDefinition { + s.TaskDefinitionArn = &v + return s +} + +// SetTaskRoleArn sets the TaskRoleArn field's value. +func (s *TaskDefinition) SetTaskRoleArn(v string) *TaskDefinition { + s.TaskRoleArn = &v + return s +} + +// SetVolumes sets the Volumes field's value. +func (s *TaskDefinition) SetVolumes(v []*Volume) *TaskDefinition { + s.Volumes = v + return s +} + +// An object representing a constraint on task placement in the task definition. +// +// If you are using the Fargate launch type, task placement constraints are +// not supported. +// +// For more information, see Task Placement Constraints (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) +// in the Amazon Elastic Container Service Developer Guide. +type TaskDefinitionPlacementConstraint struct { + _ struct{} `type:"structure"` + + // A cluster query language expression to apply to the constraint. For more + // information, see Cluster Query Language (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) + // in the Amazon Elastic Container Service Developer Guide. + Expression *string `locationName:"expression" type:"string"` + + // The type of constraint. The DistinctInstance constraint ensures that each + // task in a particular group is running on a different container instance. + // The MemberOf constraint restricts selection to be from a group of valid candidates. + Type *string `locationName:"type" type:"string" enum:"TaskDefinitionPlacementConstraintType"` +} + +// String returns the string representation +func (s TaskDefinitionPlacementConstraint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskDefinitionPlacementConstraint) GoString() string { + return s.String() +} + +// SetExpression sets the Expression field's value. +func (s *TaskDefinitionPlacementConstraint) SetExpression(v string) *TaskDefinitionPlacementConstraint { + s.Expression = &v + return s +} + +// SetType sets the Type field's value. +func (s *TaskDefinitionPlacementConstraint) SetType(v string) *TaskDefinitionPlacementConstraint { + s.Type = &v + return s +} + +// The overrides associated with a task. +type TaskOverride struct { + _ struct{} `type:"structure"` + + // One or more container overrides sent to a task. + ContainerOverrides []*ContainerOverride `locationName:"containerOverrides" type:"list"` + + // The Amazon Resource Name (ARN) of the task execution role that the Amazon + // ECS container agent and the Docker daemon can assume. + ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM role that containers in this task + // can assume. All containers in this task are granted the permissions that + // are specified in this role. + TaskRoleArn *string `locationName:"taskRoleArn" type:"string"` +} + +// String returns the string representation +func (s TaskOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskOverride) GoString() string { + return s.String() +} + +// SetContainerOverrides sets the ContainerOverrides field's value. +func (s *TaskOverride) SetContainerOverrides(v []*ContainerOverride) *TaskOverride { + s.ContainerOverrides = v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *TaskOverride) SetExecutionRoleArn(v string) *TaskOverride { + s.ExecutionRoleArn = &v + return s +} + +// SetTaskRoleArn sets the TaskRoleArn field's value. +func (s *TaskOverride) SetTaskRoleArn(v string) *TaskOverride { + s.TaskRoleArn = &v + return s +} + +// The container path, mount options, and size of the tmpfs mount. +type Tmpfs struct { + _ struct{} `type:"structure"` + + // The absolute file path where the tmpfs volume is to be mounted. + // + // ContainerPath is a required field + ContainerPath *string `locationName:"containerPath" type:"string" required:"true"` + + // The list of tmpfs volume mount options. + // + // Valid values: "defaults" | "ro" | "rw" | "suid" | "nosuid" | "dev" | "nodev" + // | "exec" | "noexec" | "sync" | "async" | "dirsync" | "remount" | "mand" | + // "nomand" | "atime" | "noatime" | "diratime" | "nodiratime" | "bind" | "rbind" + // | "unbindable" | "runbindable" | "private" | "rprivate" | "shared" | "rshared" + // | "slave" | "rslave" | "relatime" | "norelatime" | "strictatime" | "nostrictatime" + MountOptions []*string `locationName:"mountOptions" type:"list"` + + // The size (in MiB) of the tmpfs volume. + // + // Size is a required field + Size *int64 `locationName:"size" type:"integer" required:"true"` +} + +// String returns the string representation +func (s Tmpfs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tmpfs) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tmpfs) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tmpfs"} + if s.ContainerPath == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerPath")) + } + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerPath sets the ContainerPath field's value. +func (s *Tmpfs) SetContainerPath(v string) *Tmpfs { + s.ContainerPath = &v + return s +} + +// SetMountOptions sets the MountOptions field's value. +func (s *Tmpfs) SetMountOptions(v []*string) *Tmpfs { + s.MountOptions = v + return s +} + +// SetSize sets the Size field's value. +func (s *Tmpfs) SetSize(v int64) *Tmpfs { + s.Size = &v + return s +} + +// The ulimit settings to pass to the container. +type Ulimit struct { + _ struct{} `type:"structure"` + + // The hard limit for the ulimit type. + // + // HardLimit is a required field + HardLimit *int64 `locationName:"hardLimit" type:"integer" required:"true"` + + // The type of the ulimit. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true" enum:"UlimitName"` + + // The soft limit for the ulimit type. + // + // SoftLimit is a required field + SoftLimit *int64 `locationName:"softLimit" type:"integer" required:"true"` +} + +// String returns the string representation +func (s Ulimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ulimit) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Ulimit) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Ulimit"} + if s.HardLimit == nil { + invalidParams.Add(request.NewErrParamRequired("HardLimit")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SoftLimit == nil { + invalidParams.Add(request.NewErrParamRequired("SoftLimit")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHardLimit sets the HardLimit field's value. +func (s *Ulimit) SetHardLimit(v int64) *Ulimit { + s.HardLimit = &v + return s +} + +// SetName sets the Name field's value. +func (s *Ulimit) SetName(v string) *Ulimit { + s.Name = &v + return s +} + +// SetSoftLimit sets the SoftLimit field's value. +func (s *Ulimit) SetSoftLimit(v int64) *Ulimit { + s.SoftLimit = &v + return s +} + +// The specified task is not supported in this region. +type UnsupportedFeatureException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedFeatureException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedFeatureException) GoString() string { + return s.String() +} + +func newErrorUnsupportedFeatureException(v protocol.ResponseMetadata) error { + return &UnsupportedFeatureException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedFeatureException) Code() string { + return "UnsupportedFeatureException" +} + +// Message returns the exception's message. +func (s *UnsupportedFeatureException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedFeatureException) OrigErr() error { + return nil +} + +func (s *UnsupportedFeatureException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedFeatureException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedFeatureException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UpdateContainerAgentInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // container instance is running on. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // The container instance ID or full ARN entries for the container instance + // on which you would like to update the Amazon ECS container agent. + // + // ContainerInstance is a required field + ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateContainerAgentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerAgentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateContainerAgentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateContainerAgentInput"} + if s.ContainerInstance == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstance")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *UpdateContainerAgentInput) SetCluster(v string) *UpdateContainerAgentInput { + s.Cluster = &v + return s +} + +// SetContainerInstance sets the ContainerInstance field's value. +func (s *UpdateContainerAgentInput) SetContainerInstance(v string) *UpdateContainerAgentInput { + s.ContainerInstance = &v + return s +} + +type UpdateContainerAgentOutput struct { + _ struct{} `type:"structure"` + + // The container instance for which the container agent was updated. + ContainerInstance *ContainerInstance `locationName:"containerInstance" type:"structure"` +} + +// String returns the string representation +func (s UpdateContainerAgentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerAgentOutput) GoString() string { + return s.String() +} + +// SetContainerInstance sets the ContainerInstance field's value. +func (s *UpdateContainerAgentOutput) SetContainerInstance(v *ContainerInstance) *UpdateContainerAgentOutput { + s.ContainerInstance = v + return s +} + +type UpdateContainerInstancesStateInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that hosts + // the container instance to update. If you do not specify a cluster, the default + // cluster is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // A list of container instance IDs or full ARN entries. + // + // ContainerInstances is a required field + ContainerInstances []*string `locationName:"containerInstances" type:"list" required:"true"` + + // The container instance state with which to update the container instance. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"ContainerInstanceStatus"` +} + +// String returns the string representation +func (s UpdateContainerInstancesStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerInstancesStateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateContainerInstancesStateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateContainerInstancesStateInput"} + if s.ContainerInstances == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerInstances")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *UpdateContainerInstancesStateInput) SetCluster(v string) *UpdateContainerInstancesStateInput { + s.Cluster = &v + return s +} + +// SetContainerInstances sets the ContainerInstances field's value. +func (s *UpdateContainerInstancesStateInput) SetContainerInstances(v []*string) *UpdateContainerInstancesStateInput { + s.ContainerInstances = v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateContainerInstancesStateInput) SetStatus(v string) *UpdateContainerInstancesStateInput { + s.Status = &v + return s +} + +type UpdateContainerInstancesStateOutput struct { + _ struct{} `type:"structure"` + + // The list of container instances. + ContainerInstances []*ContainerInstance `locationName:"containerInstances" type:"list"` + + // Any failures associated with the call. + Failures []*Failure `locationName:"failures" type:"list"` +} + +// String returns the string representation +func (s UpdateContainerInstancesStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContainerInstancesStateOutput) GoString() string { + return s.String() +} + +// SetContainerInstances sets the ContainerInstances field's value. +func (s *UpdateContainerInstancesStateOutput) SetContainerInstances(v []*ContainerInstance) *UpdateContainerInstancesStateOutput { + s.ContainerInstances = v + return s +} + +// SetFailures sets the Failures field's value. +func (s *UpdateContainerInstancesStateOutput) SetFailures(v []*Failure) *UpdateContainerInstancesStateOutput { + s.Failures = v + return s +} + +// There is already a current Amazon ECS container agent update in progress +// on the specified container instance. If the container agent becomes disconnected +// while it is in a transitional stage, such as PENDING or STAGING, the update +// process can get stuck in that state. However, when the agent reconnects, +// it resumes where it stopped previously. +type UpdateInProgressException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UpdateInProgressException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInProgressException) GoString() string { + return s.String() +} + +func newErrorUpdateInProgressException(v protocol.ResponseMetadata) error { + return &UpdateInProgressException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UpdateInProgressException) Code() string { + return "UpdateInProgressException" +} + +// Message returns the exception's message. +func (s *UpdateInProgressException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UpdateInProgressException) OrigErr() error { + return nil +} + +func (s *UpdateInProgressException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UpdateInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UpdateInProgressException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UpdateServiceInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the cluster that your + // service is running on. If you do not specify a cluster, the default cluster + // is assumed. + Cluster *string `locationName:"cluster" type:"string"` + + // Optional deployment parameters that control how many tasks run during the + // deployment and the ordering of stopping and starting tasks. + DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` + + // The number of instantiations of the task to place and keep running in your + // service. + DesiredCount *int64 `locationName:"desiredCount" type:"integer"` + + // Whether to force a new deployment of the service. Deployments are not forced + // by default. You can use this option to trigger a new deployment with no service + // definition changes. For example, you can update a service's tasks to use + // a newer Docker image with the same image/tag combination (my_image:latest) + // or to roll Fargate tasks onto a newer platform version. + ForceNewDeployment *bool `locationName:"forceNewDeployment" type:"boolean"` + + // The period of time, in seconds, that the Amazon ECS service scheduler should + // ignore unhealthy Elastic Load Balancing target health checks after a task + // has first started. This is only valid if your service is configured to use + // a load balancer. If your service's tasks take a while to start and respond + // to Elastic Load Balancing health checks, you can specify a health check grace + // period of up to 1,800 seconds during which the ECS service scheduler ignores + // the Elastic Load Balancing health check status. This grace period can prevent + // the ECS service scheduler from marking tasks as unhealthy and stopping them + // before they have time to come up. + HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` + + // The network configuration for the service. This parameter is required for + // task definitions that use the awsvpc network mode to receive their own elastic + // network interface, and it is not supported for other network modes. For more + // information, see Task Networking (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // Updating a service to add a subnet to a list of existing subnets does not + // trigger a service deployment. For example, if your network configuration + // change is to keep the existing subnets and simply add another subnet to the + // network configuration, this does not trigger a new service deployment. + NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` + + // The platform version that your service should run. + PlatformVersion *string `locationName:"platformVersion" type:"string"` + + // The name of the service to update. + // + // Service is a required field + Service *string `locationName:"service" type:"string" required:"true"` + + // The family and revision (family:revision) or full ARN of the task definition + // to run in your service. If a revision is not specified, the latest ACTIVE + // revision is used. If you modify the task definition with UpdateService, Amazon + // ECS spawns a task with the new version of the task definition and then stops + // an old task after the new version is running. + TaskDefinition *string `locationName:"taskDefinition" type:"string"` +} + +// String returns the string representation +func (s UpdateServiceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServiceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateServiceInput"} + if s.Service == nil { + invalidParams.Add(request.NewErrParamRequired("Service")) + } + if s.NetworkConfiguration != nil { + if err := s.NetworkConfiguration.Validate(); err != nil { + invalidParams.AddNested("NetworkConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCluster sets the Cluster field's value. +func (s *UpdateServiceInput) SetCluster(v string) *UpdateServiceInput { + s.Cluster = &v + return s +} + +// SetDeploymentConfiguration sets the DeploymentConfiguration field's value. +func (s *UpdateServiceInput) SetDeploymentConfiguration(v *DeploymentConfiguration) *UpdateServiceInput { + s.DeploymentConfiguration = v + return s +} + +// SetDesiredCount sets the DesiredCount field's value. +func (s *UpdateServiceInput) SetDesiredCount(v int64) *UpdateServiceInput { + s.DesiredCount = &v + return s +} + +// SetForceNewDeployment sets the ForceNewDeployment field's value. +func (s *UpdateServiceInput) SetForceNewDeployment(v bool) *UpdateServiceInput { + s.ForceNewDeployment = &v + return s +} + +// SetHealthCheckGracePeriodSeconds sets the HealthCheckGracePeriodSeconds field's value. +func (s *UpdateServiceInput) SetHealthCheckGracePeriodSeconds(v int64) *UpdateServiceInput { + s.HealthCheckGracePeriodSeconds = &v + return s +} + +// SetNetworkConfiguration sets the NetworkConfiguration field's value. +func (s *UpdateServiceInput) SetNetworkConfiguration(v *NetworkConfiguration) *UpdateServiceInput { + s.NetworkConfiguration = v + return s +} + +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *UpdateServiceInput) SetPlatformVersion(v string) *UpdateServiceInput { + s.PlatformVersion = &v + return s +} + +// SetService sets the Service field's value. +func (s *UpdateServiceInput) SetService(v string) *UpdateServiceInput { + s.Service = &v + return s +} + +// SetTaskDefinition sets the TaskDefinition field's value. +func (s *UpdateServiceInput) SetTaskDefinition(v string) *UpdateServiceInput { + s.TaskDefinition = &v + return s +} + +type UpdateServiceOutput struct { + _ struct{} `type:"structure"` + + // The full description of your service following the update call. + Service *Service `locationName:"service" type:"structure"` +} + +// String returns the string representation +func (s UpdateServiceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceOutput) GoString() string { + return s.String() +} + +// SetService sets the Service field's value. +func (s *UpdateServiceOutput) SetService(v *Service) *UpdateServiceOutput { + s.Service = v + return s +} + +// The Docker and Amazon ECS container agent version information about a container +// instance. +type VersionInfo struct { + _ struct{} `type:"structure"` + + // The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent + // (https://github.com/aws/amazon-ecs-agent/commits/master) GitHub repository. + AgentHash *string `locationName:"agentHash" type:"string"` + + // The version number of the Amazon ECS container agent. + AgentVersion *string `locationName:"agentVersion" type:"string"` + + // The Docker version running on the container instance. + DockerVersion *string `locationName:"dockerVersion" type:"string"` +} + +// String returns the string representation +func (s VersionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersionInfo) GoString() string { + return s.String() +} + +// SetAgentHash sets the AgentHash field's value. +func (s *VersionInfo) SetAgentHash(v string) *VersionInfo { + s.AgentHash = &v + return s +} + +// SetAgentVersion sets the AgentVersion field's value. +func (s *VersionInfo) SetAgentVersion(v string) *VersionInfo { + s.AgentVersion = &v + return s +} + +// SetDockerVersion sets the DockerVersion field's value. +func (s *VersionInfo) SetDockerVersion(v string) *VersionInfo { + s.DockerVersion = &v + return s +} + +// A data volume used in a task definition. +type Volume struct { + _ struct{} `type:"structure"` + + DockerVolumeConfiguration *DockerVolumeConfiguration `locationName:"dockerVolumeConfiguration" type:"structure"` + + EfsVolumeConfiguration *EFSVolumeConfiguration `locationName:"efsVolumeConfiguration" type:"structure"` + + FsxWindowsFileServerVolumeConfiguration *FSxWindowsFileServerVolumeConfiguration `locationName:"fsxWindowsFileServerVolumeConfiguration" type:"structure"` + + // The contents of the host parameter determine whether your data volume persists + // on the host container instance and where it is stored. If the host parameter + // is empty, then the Docker daemon assigns a host path for your data volume, + // but the data is not guaranteed to persist after the containers associated + // with it stop running. + // + // Windows containers can mount whole directories on the same drive as $env:ProgramData. + // Windows containers cannot mount directories on a different drive, and mount + // point cannot be across drives. For example, you can mount C:\my\path:C:\my\path + // and D:\:D:\, but not D:\my\path:C:\my\path or D:\:C:\my\path. + Host *HostVolumeProperties `locationName:"host" type:"structure"` + + // The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. This name is referenced in the sourceVolume + // parameter of container definition mountPoints. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Volume) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Volume"} + if s.EfsVolumeConfiguration != nil { + if err := s.EfsVolumeConfiguration.Validate(); err != nil { + invalidParams.AddNested("EfsVolumeConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.FsxWindowsFileServerVolumeConfiguration != nil { + if err := s.FsxWindowsFileServerVolumeConfiguration.Validate(); err != nil { + invalidParams.AddNested("FsxWindowsFileServerVolumeConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDockerVolumeConfiguration sets the DockerVolumeConfiguration field's value. +func (s *Volume) SetDockerVolumeConfiguration(v *DockerVolumeConfiguration) *Volume { + s.DockerVolumeConfiguration = v + return s +} + +// SetEfsVolumeConfiguration sets the EfsVolumeConfiguration field's value. +func (s *Volume) SetEfsVolumeConfiguration(v *EFSVolumeConfiguration) *Volume { + s.EfsVolumeConfiguration = v + return s +} + +// SetFsxWindowsFileServerVolumeConfiguration sets the FsxWindowsFileServerVolumeConfiguration field's value. +func (s *Volume) SetFsxWindowsFileServerVolumeConfiguration(v *FSxWindowsFileServerVolumeConfiguration) *Volume { + s.FsxWindowsFileServerVolumeConfiguration = v + return s +} + +// SetHost sets the Host field's value. +func (s *Volume) SetHost(v *HostVolumeProperties) *Volume { + s.Host = v + return s +} + +// SetName sets the Name field's value. +func (s *Volume) SetName(v string) *Volume { + s.Name = &v + return s +} + +// Details on a data volume from another container in the same task definition. +type VolumeFrom struct { + _ struct{} `type:"structure"` + + // If this value is true, the container has read-only access to the volume. + // If this value is false, then the container can write to the volume. The default + // value is false. + ReadOnly *bool `locationName:"readOnly" type:"boolean"` + + // The name of another container within the same task definition to mount volumes + // from. + SourceContainer *string `locationName:"sourceContainer" type:"string"` +} + +// String returns the string representation +func (s VolumeFrom) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeFrom) GoString() string { + return s.String() +} + +// SetReadOnly sets the ReadOnly field's value. +func (s *VolumeFrom) SetReadOnly(v bool) *VolumeFrom { + s.ReadOnly = &v + return s +} + +// SetSourceContainer sets the SourceContainer field's value. +func (s *VolumeFrom) SetSourceContainer(v string) *VolumeFrom { + s.SourceContainer = &v + return s +} + +const ( + // AgentUpdateStatusPending is a AgentUpdateStatus enum value + AgentUpdateStatusPending = "PENDING" + + // AgentUpdateStatusStaging is a AgentUpdateStatus enum value + AgentUpdateStatusStaging = "STAGING" + + // AgentUpdateStatusStaged is a AgentUpdateStatus enum value + AgentUpdateStatusStaged = "STAGED" + + // AgentUpdateStatusUpdating is a AgentUpdateStatus enum value + AgentUpdateStatusUpdating = "UPDATING" + + // AgentUpdateStatusUpdated is a AgentUpdateStatus enum value + AgentUpdateStatusUpdated = "UPDATED" + + // AgentUpdateStatusFailed is a AgentUpdateStatus enum value + AgentUpdateStatusFailed = "FAILED" +) + +// AgentUpdateStatus_Values returns all elements of the AgentUpdateStatus enum +func AgentUpdateStatus_Values() []string { + return []string{ + AgentUpdateStatusPending, + AgentUpdateStatusStaging, + AgentUpdateStatusStaged, + AgentUpdateStatusUpdating, + AgentUpdateStatusUpdated, + AgentUpdateStatusFailed, + } +} + +const ( + // AssignPublicIpEnabled is a AssignPublicIp enum value + AssignPublicIpEnabled = "ENABLED" + + // AssignPublicIpDisabled is a AssignPublicIp enum value + AssignPublicIpDisabled = "DISABLED" +) + +// AssignPublicIp_Values returns all elements of the AssignPublicIp enum +func AssignPublicIp_Values() []string { + return []string{ + AssignPublicIpEnabled, + AssignPublicIpDisabled, + } +} + +const ( + // ClusterFieldStatistics is a ClusterField enum value + ClusterFieldStatistics = "STATISTICS" +) + +// ClusterField_Values returns all elements of the ClusterField enum +func ClusterField_Values() []string { + return []string{ + ClusterFieldStatistics, + } +} + +const ( + // CompatibilityEc2 is a Compatibility enum value + CompatibilityEc2 = "EC2" + + // CompatibilityFargate is a Compatibility enum value + CompatibilityFargate = "FARGATE" +) + +// Compatibility_Values returns all elements of the Compatibility enum +func Compatibility_Values() []string { + return []string{ + CompatibilityEc2, + CompatibilityFargate, + } +} + +const ( + // ConnectivityConnected is a Connectivity enum value + ConnectivityConnected = "CONNECTED" + + // ConnectivityDisconnected is a Connectivity enum value + ConnectivityDisconnected = "DISCONNECTED" +) + +// Connectivity_Values returns all elements of the Connectivity enum +func Connectivity_Values() []string { + return []string{ + ConnectivityConnected, + ConnectivityDisconnected, + } +} + +const ( + // ContainerConditionStart is a ContainerCondition enum value + ContainerConditionStart = "START" + + // ContainerConditionComplete is a ContainerCondition enum value + ContainerConditionComplete = "COMPLETE" + + // ContainerConditionSuccess is a ContainerCondition enum value + ContainerConditionSuccess = "SUCCESS" + + // ContainerConditionHealthy is a ContainerCondition enum value + ContainerConditionHealthy = "HEALTHY" +) + +// ContainerCondition_Values returns all elements of the ContainerCondition enum +func ContainerCondition_Values() []string { + return []string{ + ContainerConditionStart, + ContainerConditionComplete, + ContainerConditionSuccess, + ContainerConditionHealthy, + } +} + +const ( + // ContainerInstanceStatusActive is a ContainerInstanceStatus enum value + ContainerInstanceStatusActive = "ACTIVE" + + // ContainerInstanceStatusDraining is a ContainerInstanceStatus enum value + ContainerInstanceStatusDraining = "DRAINING" +) + +// ContainerInstanceStatus_Values returns all elements of the ContainerInstanceStatus enum +func ContainerInstanceStatus_Values() []string { + return []string{ + ContainerInstanceStatusActive, + ContainerInstanceStatusDraining, + } +} + +const ( + // DesiredStatusRunning is a DesiredStatus enum value + DesiredStatusRunning = "RUNNING" + + // DesiredStatusPending is a DesiredStatus enum value + DesiredStatusPending = "PENDING" + + // DesiredStatusStopped is a DesiredStatus enum value + DesiredStatusStopped = "STOPPED" +) + +// DesiredStatus_Values returns all elements of the DesiredStatus enum +func DesiredStatus_Values() []string { + return []string{ + DesiredStatusRunning, + DesiredStatusPending, + DesiredStatusStopped, + } +} + +const ( + // DeviceCgroupPermissionRead is a DeviceCgroupPermission enum value + DeviceCgroupPermissionRead = "read" + + // DeviceCgroupPermissionWrite is a DeviceCgroupPermission enum value + DeviceCgroupPermissionWrite = "write" + + // DeviceCgroupPermissionMknod is a DeviceCgroupPermission enum value + DeviceCgroupPermissionMknod = "mknod" +) + +// DeviceCgroupPermission_Values returns all elements of the DeviceCgroupPermission enum +func DeviceCgroupPermission_Values() []string { + return []string{ + DeviceCgroupPermissionRead, + DeviceCgroupPermissionWrite, + DeviceCgroupPermissionMknod, + } +} + +const ( + // EFSAuthorizationConfigIAMEnabled is a EFSAuthorizationConfigIAM enum value + EFSAuthorizationConfigIAMEnabled = "ENABLED" + + // EFSAuthorizationConfigIAMDisabled is a EFSAuthorizationConfigIAM enum value + EFSAuthorizationConfigIAMDisabled = "DISABLED" +) + +// EFSAuthorizationConfigIAM_Values returns all elements of the EFSAuthorizationConfigIAM enum +func EFSAuthorizationConfigIAM_Values() []string { + return []string{ + EFSAuthorizationConfigIAMEnabled, + EFSAuthorizationConfigIAMDisabled, + } +} + +const ( + // EFSTransitEncryptionEnabled is a EFSTransitEncryption enum value + EFSTransitEncryptionEnabled = "ENABLED" + + // EFSTransitEncryptionDisabled is a EFSTransitEncryption enum value + EFSTransitEncryptionDisabled = "DISABLED" +) + +// EFSTransitEncryption_Values returns all elements of the EFSTransitEncryption enum +func EFSTransitEncryption_Values() []string { + return []string{ + EFSTransitEncryptionEnabled, + EFSTransitEncryptionDisabled, + } +} + +const ( + // FirelensConfigurationTypeFluentd is a FirelensConfigurationType enum value + FirelensConfigurationTypeFluentd = "fluentd" + + // FirelensConfigurationTypeFluentbit is a FirelensConfigurationType enum value + FirelensConfigurationTypeFluentbit = "fluentbit" +) + +// FirelensConfigurationType_Values returns all elements of the FirelensConfigurationType enum +func FirelensConfigurationType_Values() []string { + return []string{ + FirelensConfigurationTypeFluentd, + FirelensConfigurationTypeFluentbit, + } +} + +const ( + // HealthStatusHealthy is a HealthStatus enum value + HealthStatusHealthy = "HEALTHY" + + // HealthStatusUnhealthy is a HealthStatus enum value + HealthStatusUnhealthy = "UNHEALTHY" + + // HealthStatusUnknown is a HealthStatus enum value + HealthStatusUnknown = "UNKNOWN" +) + +// HealthStatus_Values returns all elements of the HealthStatus enum +func HealthStatus_Values() []string { + return []string{ + HealthStatusHealthy, + HealthStatusUnhealthy, + HealthStatusUnknown, + } +} + +const ( + // IpcModeHost is a IpcMode enum value + IpcModeHost = "host" + + // IpcModeTask is a IpcMode enum value + IpcModeTask = "task" + + // IpcModeNone is a IpcMode enum value + IpcModeNone = "none" +) + +// IpcMode_Values returns all elements of the IpcMode enum +func IpcMode_Values() []string { + return []string{ + IpcModeHost, + IpcModeTask, + IpcModeNone, + } +} + +const ( + // LaunchTypeEc2 is a LaunchType enum value + LaunchTypeEc2 = "EC2" + + // LaunchTypeFargate is a LaunchType enum value + LaunchTypeFargate = "FARGATE" +) + +// LaunchType_Values returns all elements of the LaunchType enum +func LaunchType_Values() []string { + return []string{ + LaunchTypeEc2, + LaunchTypeFargate, + } +} + +const ( + // LogDriverJsonFile is a LogDriver enum value + LogDriverJsonFile = "json-file" + + // LogDriverSyslog is a LogDriver enum value + LogDriverSyslog = "syslog" + + // LogDriverJournald is a LogDriver enum value + LogDriverJournald = "journald" + + // LogDriverGelf is a LogDriver enum value + LogDriverGelf = "gelf" + + // LogDriverFluentd is a LogDriver enum value + LogDriverFluentd = "fluentd" + + // LogDriverAwslogs is a LogDriver enum value + LogDriverAwslogs = "awslogs" + + // LogDriverSplunk is a LogDriver enum value + LogDriverSplunk = "splunk" + + // LogDriverAwsfirelens is a LogDriver enum value + LogDriverAwsfirelens = "awsfirelens" +) + +// LogDriver_Values returns all elements of the LogDriver enum +func LogDriver_Values() []string { + return []string{ + LogDriverJsonFile, + LogDriverSyslog, + LogDriverJournald, + LogDriverGelf, + LogDriverFluentd, + LogDriverAwslogs, + LogDriverSplunk, + LogDriverAwsfirelens, + } +} + +const ( + // ManagedAgentNameExecuteCommandAgent is a ManagedAgentName enum value + ManagedAgentNameExecuteCommandAgent = "ExecuteCommandAgent" +) + +// ManagedAgentName_Values returns all elements of the ManagedAgentName enum +func ManagedAgentName_Values() []string { + return []string{ + ManagedAgentNameExecuteCommandAgent, + } +} + +const ( + // NetworkModeBridge is a NetworkMode enum value + NetworkModeBridge = "bridge" + + // NetworkModeHost is a NetworkMode enum value + NetworkModeHost = "host" + + // NetworkModeAwsvpc is a NetworkMode enum value + NetworkModeAwsvpc = "awsvpc" + + // NetworkModeNone is a NetworkMode enum value + NetworkModeNone = "none" +) + +// NetworkMode_Values returns all elements of the NetworkMode enum +func NetworkMode_Values() []string { + return []string{ + NetworkModeBridge, + NetworkModeHost, + NetworkModeAwsvpc, + NetworkModeNone, + } +} + +const ( + // PidModeHost is a PidMode enum value + PidModeHost = "host" + + // PidModeTask is a PidMode enum value + PidModeTask = "task" +) + +// PidMode_Values returns all elements of the PidMode enum +func PidMode_Values() []string { + return []string{ + PidModeHost, + PidModeTask, + } +} + +const ( + // PlacementConstraintTypeDistinctInstance is a PlacementConstraintType enum value + PlacementConstraintTypeDistinctInstance = "distinctInstance" + + // PlacementConstraintTypeMemberOf is a PlacementConstraintType enum value + PlacementConstraintTypeMemberOf = "memberOf" +) + +// PlacementConstraintType_Values returns all elements of the PlacementConstraintType enum +func PlacementConstraintType_Values() []string { + return []string{ + PlacementConstraintTypeDistinctInstance, + PlacementConstraintTypeMemberOf, + } +} + +const ( + // PlacementStrategyTypeRandom is a PlacementStrategyType enum value + PlacementStrategyTypeRandom = "random" + + // PlacementStrategyTypeSpread is a PlacementStrategyType enum value + PlacementStrategyTypeSpread = "spread" + + // PlacementStrategyTypeBinpack is a PlacementStrategyType enum value + PlacementStrategyTypeBinpack = "binpack" +) + +// PlacementStrategyType_Values returns all elements of the PlacementStrategyType enum +func PlacementStrategyType_Values() []string { + return []string{ + PlacementStrategyTypeRandom, + PlacementStrategyTypeSpread, + PlacementStrategyTypeBinpack, + } +} + +const ( + // PlatformDeviceTypeGpu is a PlatformDeviceType enum value + PlatformDeviceTypeGpu = "GPU" +) + +// PlatformDeviceType_Values returns all elements of the PlatformDeviceType enum +func PlatformDeviceType_Values() []string { + return []string{ + PlatformDeviceTypeGpu, + } +} + +const ( + // ProxyConfigurationTypeAppmesh is a ProxyConfigurationType enum value + ProxyConfigurationTypeAppmesh = "APPMESH" +) + +// ProxyConfigurationType_Values returns all elements of the ProxyConfigurationType enum +func ProxyConfigurationType_Values() []string { + return []string{ + ProxyConfigurationTypeAppmesh, + } +} + +const ( + // ResourceTypeGpu is a ResourceType enum value + ResourceTypeGpu = "GPU" +) + +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeGpu, + } +} + +const ( + // SchedulingStrategyReplica is a SchedulingStrategy enum value + SchedulingStrategyReplica = "REPLICA" + + // SchedulingStrategyDaemon is a SchedulingStrategy enum value + SchedulingStrategyDaemon = "DAEMON" +) + +// SchedulingStrategy_Values returns all elements of the SchedulingStrategy enum +func SchedulingStrategy_Values() []string { + return []string{ + SchedulingStrategyReplica, + SchedulingStrategyDaemon, + } +} + +const ( + // ScopeTask is a Scope enum value + ScopeTask = "task" + + // ScopeShared is a Scope enum value + ScopeShared = "shared" +) + +// Scope_Values returns all elements of the Scope enum +func Scope_Values() []string { + return []string{ + ScopeTask, + ScopeShared, + } +} + +const ( + // SettingNameServiceLongArnFormat is a SettingName enum value + SettingNameServiceLongArnFormat = "serviceLongArnFormat" + + // SettingNameTaskLongArnFormat is a SettingName enum value + SettingNameTaskLongArnFormat = "taskLongArnFormat" + + // SettingNameContainerInstanceLongArnFormat is a SettingName enum value + SettingNameContainerInstanceLongArnFormat = "containerInstanceLongArnFormat" +) + +// SettingName_Values returns all elements of the SettingName enum +func SettingName_Values() []string { + return []string{ + SettingNameServiceLongArnFormat, + SettingNameTaskLongArnFormat, + SettingNameContainerInstanceLongArnFormat, + } +} + +const ( + // SortOrderAsc is a SortOrder enum value + SortOrderAsc = "ASC" + + // SortOrderDesc is a SortOrder enum value + SortOrderDesc = "DESC" +) + +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAsc, + SortOrderDesc, + } +} + +const ( + // TargetTypeContainerInstance is a TargetType enum value + TargetTypeContainerInstance = "container-instance" +) + +// TargetType_Values returns all elements of the TargetType enum +func TargetType_Values() []string { + return []string{ + TargetTypeContainerInstance, + } +} + +const ( + // TaskDefinitionFamilyStatusActive is a TaskDefinitionFamilyStatus enum value + TaskDefinitionFamilyStatusActive = "ACTIVE" + + // TaskDefinitionFamilyStatusInactive is a TaskDefinitionFamilyStatus enum value + TaskDefinitionFamilyStatusInactive = "INACTIVE" + + // TaskDefinitionFamilyStatusAll is a TaskDefinitionFamilyStatus enum value + TaskDefinitionFamilyStatusAll = "ALL" +) + +// TaskDefinitionFamilyStatus_Values returns all elements of the TaskDefinitionFamilyStatus enum +func TaskDefinitionFamilyStatus_Values() []string { + return []string{ + TaskDefinitionFamilyStatusActive, + TaskDefinitionFamilyStatusInactive, + TaskDefinitionFamilyStatusAll, + } +} + +const ( + // TaskDefinitionPlacementConstraintTypeMemberOf is a TaskDefinitionPlacementConstraintType enum value + TaskDefinitionPlacementConstraintTypeMemberOf = "memberOf" +) + +// TaskDefinitionPlacementConstraintType_Values returns all elements of the TaskDefinitionPlacementConstraintType enum +func TaskDefinitionPlacementConstraintType_Values() []string { + return []string{ + TaskDefinitionPlacementConstraintTypeMemberOf, + } +} + +const ( + // TaskDefinitionStatusActive is a TaskDefinitionStatus enum value + TaskDefinitionStatusActive = "ACTIVE" + + // TaskDefinitionStatusInactive is a TaskDefinitionStatus enum value + TaskDefinitionStatusInactive = "INACTIVE" +) + +// TaskDefinitionStatus_Values returns all elements of the TaskDefinitionStatus enum +func TaskDefinitionStatus_Values() []string { + return []string{ + TaskDefinitionStatusActive, + TaskDefinitionStatusInactive, + } +} + +const ( + // TransportProtocolTcp is a TransportProtocol enum value + TransportProtocolTcp = "tcp" + + // TransportProtocolUdp is a TransportProtocol enum value + TransportProtocolUdp = "udp" +) + +// TransportProtocol_Values returns all elements of the TransportProtocol enum +func TransportProtocol_Values() []string { + return []string{ + TransportProtocolTcp, + TransportProtocolUdp, + } +} + +const ( + // UlimitNameCore is a UlimitName enum value + UlimitNameCore = "core" + + // UlimitNameCpu is a UlimitName enum value + UlimitNameCpu = "cpu" + + // UlimitNameData is a UlimitName enum value + UlimitNameData = "data" + + // UlimitNameFsize is a UlimitName enum value + UlimitNameFsize = "fsize" + + // UlimitNameLocks is a UlimitName enum value + UlimitNameLocks = "locks" + + // UlimitNameMemlock is a UlimitName enum value + UlimitNameMemlock = "memlock" + + // UlimitNameMsgqueue is a UlimitName enum value + UlimitNameMsgqueue = "msgqueue" + + // UlimitNameNice is a UlimitName enum value + UlimitNameNice = "nice" + + // UlimitNameNofile is a UlimitName enum value + UlimitNameNofile = "nofile" + + // UlimitNameNproc is a UlimitName enum value + UlimitNameNproc = "nproc" + + // UlimitNameRss is a UlimitName enum value + UlimitNameRss = "rss" + + // UlimitNameRtprio is a UlimitName enum value + UlimitNameRtprio = "rtprio" + + // UlimitNameRttime is a UlimitName enum value + UlimitNameRttime = "rttime" + + // UlimitNameSigpending is a UlimitName enum value + UlimitNameSigpending = "sigpending" + + // UlimitNameStack is a UlimitName enum value + UlimitNameStack = "stack" +) + +// UlimitName_Values returns all elements of the UlimitName enum +func UlimitName_Values() []string { + return []string{ + UlimitNameCore, + UlimitNameCpu, + UlimitNameData, + UlimitNameFsize, + UlimitNameLocks, + UlimitNameMemlock, + UlimitNameMsgqueue, + UlimitNameNice, + UlimitNameNofile, + UlimitNameNproc, + UlimitNameRss, + UlimitNameRtprio, + UlimitNameRttime, + UlimitNameSigpending, + UlimitNameStack, + } +} diff --git a/agent/ecs_client/model/ecs/errors.go b/agent/ecs_client/model/ecs/errors.go new file mode 100644 index 00000000000..0a06c5366ec --- /dev/null +++ b/agent/ecs_client/model/ecs/errors.go @@ -0,0 +1,184 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// +// Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. + +package ecs + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have authorization to perform the requested action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeAttributeLimitExceededException for service response error code + // "AttributeLimitExceededException". + // + // You can apply up to 10 custom attributes per resource. You can view the attributes + // of a resource with ListAttributes. You can remove existing attributes on + // a resource with DeleteAttributes. + ErrCodeAttributeLimitExceededException = "AttributeLimitExceededException" + + // ErrCodeBlockedException for service response error code + // "BlockedException". + // + // Your AWS account has been blocked. Contact AWS Support (http://aws.amazon.com/contact-us/) + // for more information. + ErrCodeBlockedException = "BlockedException" + + // ErrCodeClientException for service response error code + // "ClientException". + // + // These errors are usually caused by a client action, such as using an action + // or resource on behalf of a user that doesn't have permissions to use the + // action or resource, or specifying an identifier that is not valid. + ErrCodeClientException = "ClientException" + + // ErrCodeClusterContainsContainerInstancesException for service response error code + // "ClusterContainsContainerInstancesException". + // + // You cannot delete a cluster that has registered container instances. You + // must first deregister the container instances before you can delete the cluster. + // For more information, see DeregisterContainerInstance. + ErrCodeClusterContainsContainerInstancesException = "ClusterContainsContainerInstancesException" + + // ErrCodeClusterContainsServicesException for service response error code + // "ClusterContainsServicesException". + // + // You cannot delete a cluster that contains services. You must first update + // the service to reduce its desired task count to 0 and then delete the service. + // For more information, see UpdateService and DeleteService. + ErrCodeClusterContainsServicesException = "ClusterContainsServicesException" + + // ErrCodeClusterContainsTasksException for service response error code + // "ClusterContainsTasksException". + // + // You cannot delete a cluster that has active tasks. + ErrCodeClusterContainsTasksException = "ClusterContainsTasksException" + + // ErrCodeClusterNotFoundException for service response error code + // "ClusterNotFoundException". + // + // The specified cluster could not be found. You can view your available clusters + // with ListClusters. Amazon ECS clusters are region-specific. + ErrCodeClusterNotFoundException = "ClusterNotFoundException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // The specified parameter is invalid. Review the available parameters for the + // API request. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeMissingVersionException for service response error code + // "MissingVersionException". + // + // Amazon ECS is unable to determine the current version of the Amazon ECS container + // agent on the container instance and does not have enough information to proceed + // with an update. This could be because the agent running on the container + // instance is an older or custom version that does not use our version information. + ErrCodeMissingVersionException = "MissingVersionException" + + // ErrCodeNoUpdateAvailableException for service response error code + // "NoUpdateAvailableException". + // + // There is no update available for this Amazon ECS container agent. This could + // be because the agent is already running the latest version, or it is so old + // that there is no update path to the current version. + ErrCodeNoUpdateAvailableException = "NoUpdateAvailableException" + + // ErrCodePlatformTaskDefinitionIncompatibilityException for service response error code + // "PlatformTaskDefinitionIncompatibilityException". + // + // The specified platform version does not satisfy the task definition's required + // capabilities. + ErrCodePlatformTaskDefinitionIncompatibilityException = "PlatformTaskDefinitionIncompatibilityException" + + // ErrCodePlatformUnknownException for service response error code + // "PlatformUnknownException". + // + // The specified platform version does not exist. + ErrCodePlatformUnknownException = "PlatformUnknownException" + + // ErrCodeServerException for service response error code + // "ServerException". + // + // These errors are usually caused by a server issue. + ErrCodeServerException = "ServerException" + + // ErrCodeServiceNotActiveException for service response error code + // "ServiceNotActiveException". + // + // The specified service is not active. You can't update a service that is inactive. + // If you have previously deleted a service, you can re-create it with CreateService. + ErrCodeServiceNotActiveException = "ServiceNotActiveException" + + // ErrCodeServiceNotFoundException for service response error code + // "ServiceNotFoundException". + // + // The specified service could not be found. You can view your available services + // with ListServices. Amazon ECS services are cluster-specific and region-specific. + ErrCodeServiceNotFoundException = "ServiceNotFoundException" + + // ErrCodeTargetNotFoundException for service response error code + // "TargetNotFoundException". + // + // The specified target could not be found. You can view your available container + // instances with ListContainerInstances. Amazon ECS container instances are + // cluster-specific and region-specific. + ErrCodeTargetNotFoundException = "TargetNotFoundException" + + // ErrCodeUnsupportedFeatureException for service response error code + // "UnsupportedFeatureException". + // + // The specified task is not supported in this region. + ErrCodeUnsupportedFeatureException = "UnsupportedFeatureException" + + // ErrCodeUpdateInProgressException for service response error code + // "UpdateInProgressException". + // + // There is already a current Amazon ECS container agent update in progress + // on the specified container instance. If the container agent becomes disconnected + // while it is in a transitional stage, such as PENDING or STAGING, the update + // process can get stuck in that state. However, when the agent reconnects, + // it resumes where it stopped previously. + ErrCodeUpdateInProgressException = "UpdateInProgressException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "AttributeLimitExceededException": newErrorAttributeLimitExceededException, + "BlockedException": newErrorBlockedException, + "ClientException": newErrorClientException, + "ClusterContainsContainerInstancesException": newErrorClusterContainsContainerInstancesException, + "ClusterContainsServicesException": newErrorClusterContainsServicesException, + "ClusterContainsTasksException": newErrorClusterContainsTasksException, + "ClusterNotFoundException": newErrorClusterNotFoundException, + "InvalidParameterException": newErrorInvalidParameterException, + "MissingVersionException": newErrorMissingVersionException, + "NoUpdateAvailableException": newErrorNoUpdateAvailableException, + "PlatformTaskDefinitionIncompatibilityException": newErrorPlatformTaskDefinitionIncompatibilityException, + "PlatformUnknownException": newErrorPlatformUnknownException, + "ServerException": newErrorServerException, + "ServiceNotActiveException": newErrorServiceNotActiveException, + "ServiceNotFoundException": newErrorServiceNotFoundException, + "TargetNotFoundException": newErrorTargetNotFoundException, + "UnsupportedFeatureException": newErrorUnsupportedFeatureException, + "UpdateInProgressException": newErrorUpdateInProgressException, +} diff --git a/agent/ecs_client/model/ecs/service.go b/agent/ecs_client/model/ecs/service.go new file mode 100644 index 00000000000..0dc4c59775c --- /dev/null +++ b/agent/ecs_client/model/ecs/service.go @@ -0,0 +1,116 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// +// Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. + +package ecs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// ECS provides the API operation methods for making requests to +// Amazon Elastic Container Service. See this package's package overview docs +// for details on the service. +// +// ECS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type ECS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "ecs" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "ECS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the ECS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a ECS client from just a session. +// svc := ecs.New(mySession) +// +// // Create a ECS client with additional configuration +// svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECS { + svc := &ECS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2014-11-13", + JSONVersion: "1.1", + TargetPrefix: "AmazonEC2ContainerServiceV20141113", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ECS operation and runs any +// custom request initialization. +func (c *ECS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/ecs_client/model/generate.go b/agent/ecs_client/model/generate.go new file mode 100644 index 00000000000..80d6bb73084 --- /dev/null +++ b/agent/ecs_client/model/generate.go @@ -0,0 +1,17 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package model + +// codegen tag required by AWS SDK generators +//go:generate go run -tags codegen ../../gogenerate/awssdk.go -typesOnly=false -copyright_file ../../../scripts/copyright_file diff --git a/agent/ecscni/generate_mocks.go b/agent/ecscni/generate_mocks.go new file mode 100644 index 00000000000..6b23ef31819 --- /dev/null +++ b/agent/ecscni/generate_mocks.go @@ -0,0 +1,18 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecscni + +//go:generate mockgen -destination=mocks/ecscni_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/ecscni CNIClient +//go:generate mockgen -destination=mocks_libcni/libcni_mocks.go -copyright_file=../../scripts/copyright_file github.com/containernetworking/cni/libcni CNI +//go:generate mockgen -destination=mocks_cnitypes/result_mocks.go -copyright_file=../../scripts/copyright_file github.com/containernetworking/cni/pkg/types Result diff --git a/agent/ecscni/mocks/ecscni_mocks.go b/agent/ecscni/mocks/ecscni_mocks.go new file mode 100644 index 00000000000..99da37cc5b9 --- /dev/null +++ b/agent/ecscni/mocks/ecscni_mocks.go @@ -0,0 +1,125 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/ecscni (interfaces: CNIClient) + +// Package mock_ecscni is a generated GoMock package. +package mock_ecscni + +import ( + context "context" + reflect "reflect" + time "time" + + ecscni "github.com/aws/amazon-ecs-agent/agent/ecscni" + current "github.com/containernetworking/cni/pkg/types/current" + gomock "github.com/golang/mock/gomock" +) + +// MockCNIClient is a mock of CNIClient interface +type MockCNIClient struct { + ctrl *gomock.Controller + recorder *MockCNIClientMockRecorder +} + +// MockCNIClientMockRecorder is the mock recorder for MockCNIClient +type MockCNIClientMockRecorder struct { + mock *MockCNIClient +} + +// NewMockCNIClient creates a new mock instance +func NewMockCNIClient(ctrl *gomock.Controller) *MockCNIClient { + mock := &MockCNIClient{ctrl: ctrl} + mock.recorder = &MockCNIClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockCNIClient) EXPECT() *MockCNIClientMockRecorder { + return m.recorder +} + +// Capabilities mocks base method +func (m *MockCNIClient) Capabilities(arg0 string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Capabilities", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Capabilities indicates an expected call of Capabilities +func (mr *MockCNIClientMockRecorder) Capabilities(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Capabilities", reflect.TypeOf((*MockCNIClient)(nil).Capabilities), arg0) +} + +// CleanupNS mocks base method +func (m *MockCNIClient) CleanupNS(arg0 context.Context, arg1 *ecscni.Config, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CleanupNS", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CleanupNS indicates an expected call of CleanupNS +func (mr *MockCNIClientMockRecorder) CleanupNS(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupNS", reflect.TypeOf((*MockCNIClient)(nil).CleanupNS), arg0, arg1, arg2) +} + +// ReleaseIPResource mocks base method +func (m *MockCNIClient) ReleaseIPResource(arg0 context.Context, arg1 *ecscni.Config, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReleaseIPResource", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReleaseIPResource indicates an expected call of ReleaseIPResource +func (mr *MockCNIClientMockRecorder) ReleaseIPResource(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseIPResource", reflect.TypeOf((*MockCNIClient)(nil).ReleaseIPResource), arg0, arg1, arg2) +} + +// SetupNS mocks base method +func (m *MockCNIClient) SetupNS(arg0 context.Context, arg1 *ecscni.Config, arg2 time.Duration) (*current.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetupNS", arg0, arg1, arg2) + ret0, _ := ret[0].(*current.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetupNS indicates an expected call of SetupNS +func (mr *MockCNIClientMockRecorder) SetupNS(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupNS", reflect.TypeOf((*MockCNIClient)(nil).SetupNS), arg0, arg1, arg2) +} + +// Version mocks base method +func (m *MockCNIClient) Version(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version +func (mr *MockCNIClientMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockCNIClient)(nil).Version), arg0) +} diff --git a/agent/ecscni/mocks_cnitypes/result_mocks.go b/agent/ecscni/mocks_cnitypes/result_mocks.go new file mode 100644 index 00000000000..a2d18b09d11 --- /dev/null +++ b/agent/ecscni/mocks_cnitypes/result_mocks.go @@ -0,0 +1,121 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/containernetworking/cni/pkg/types (interfaces: Result) + +// Package mock_types is a generated GoMock package. +package mock_types + +import ( + io "io" + reflect "reflect" + + types "github.com/containernetworking/cni/pkg/types" + gomock "github.com/golang/mock/gomock" +) + +// MockResult is a mock of Result interface +type MockResult struct { + ctrl *gomock.Controller + recorder *MockResultMockRecorder +} + +// MockResultMockRecorder is the mock recorder for MockResult +type MockResultMockRecorder struct { + mock *MockResult +} + +// NewMockResult creates a new mock instance +func NewMockResult(ctrl *gomock.Controller) *MockResult { + mock := &MockResult{ctrl: ctrl} + mock.recorder = &MockResultMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockResult) EXPECT() *MockResultMockRecorder { + return m.recorder +} + +// GetAsVersion mocks base method +func (m *MockResult) GetAsVersion(arg0 string) (types.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAsVersion", arg0) + ret0, _ := ret[0].(types.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAsVersion indicates an expected call of GetAsVersion +func (mr *MockResultMockRecorder) GetAsVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAsVersion", reflect.TypeOf((*MockResult)(nil).GetAsVersion), arg0) +} + +// Print mocks base method +func (m *MockResult) Print() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Print") + ret0, _ := ret[0].(error) + return ret0 +} + +// Print indicates an expected call of Print +func (mr *MockResultMockRecorder) Print() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Print", reflect.TypeOf((*MockResult)(nil).Print)) +} + +// PrintTo mocks base method +func (m *MockResult) PrintTo(arg0 io.Writer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrintTo", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// PrintTo indicates an expected call of PrintTo +func (mr *MockResultMockRecorder) PrintTo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrintTo", reflect.TypeOf((*MockResult)(nil).PrintTo), arg0) +} + +// String mocks base method +func (m *MockResult) String() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "String") + ret0, _ := ret[0].(string) + return ret0 +} + +// String indicates an expected call of String +func (mr *MockResultMockRecorder) String() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockResult)(nil).String)) +} + +// Version mocks base method +func (m *MockResult) Version() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version") + ret0, _ := ret[0].(string) + return ret0 +} + +// Version indicates an expected call of Version +func (mr *MockResultMockRecorder) Version() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockResult)(nil).Version)) +} diff --git a/agent/ecscni/mocks_libcni/libcni_mocks.go b/agent/ecscni/mocks_libcni/libcni_mocks.go new file mode 100644 index 00000000000..e75ceb98aa4 --- /dev/null +++ b/agent/ecscni/mocks_libcni/libcni_mocks.go @@ -0,0 +1,197 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/containernetworking/cni/libcni (interfaces: CNI) + +// Package mock_libcni is a generated GoMock package. +package mock_libcni + +import ( + context "context" + reflect "reflect" + + libcni "github.com/containernetworking/cni/libcni" + types "github.com/containernetworking/cni/pkg/types" + gomock "github.com/golang/mock/gomock" +) + +// MockCNI is a mock of CNI interface +type MockCNI struct { + ctrl *gomock.Controller + recorder *MockCNIMockRecorder +} + +// MockCNIMockRecorder is the mock recorder for MockCNI +type MockCNIMockRecorder struct { + mock *MockCNI +} + +// NewMockCNI creates a new mock instance +func NewMockCNI(ctrl *gomock.Controller) *MockCNI { + mock := &MockCNI{ctrl: ctrl} + mock.recorder = &MockCNIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockCNI) EXPECT() *MockCNIMockRecorder { + return m.recorder +} + +// AddNetwork mocks base method +func (m *MockCNI) AddNetwork(arg0 context.Context, arg1 *libcni.NetworkConfig, arg2 *libcni.RuntimeConf) (types.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddNetwork", arg0, arg1, arg2) + ret0, _ := ret[0].(types.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddNetwork indicates an expected call of AddNetwork +func (mr *MockCNIMockRecorder) AddNetwork(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNetwork", reflect.TypeOf((*MockCNI)(nil).AddNetwork), arg0, arg1, arg2) +} + +// AddNetworkList mocks base method +func (m *MockCNI) AddNetworkList(arg0 context.Context, arg1 *libcni.NetworkConfigList, arg2 *libcni.RuntimeConf) (types.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddNetworkList", arg0, arg1, arg2) + ret0, _ := ret[0].(types.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddNetworkList indicates an expected call of AddNetworkList +func (mr *MockCNIMockRecorder) AddNetworkList(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNetworkList", reflect.TypeOf((*MockCNI)(nil).AddNetworkList), arg0, arg1, arg2) +} + +// CheckNetwork mocks base method +func (m *MockCNI) CheckNetwork(arg0 context.Context, arg1 *libcni.NetworkConfig, arg2 *libcni.RuntimeConf) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckNetwork", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CheckNetwork indicates an expected call of CheckNetwork +func (mr *MockCNIMockRecorder) CheckNetwork(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckNetwork", reflect.TypeOf((*MockCNI)(nil).CheckNetwork), arg0, arg1, arg2) +} + +// CheckNetworkList mocks base method +func (m *MockCNI) CheckNetworkList(arg0 context.Context, arg1 *libcni.NetworkConfigList, arg2 *libcni.RuntimeConf) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckNetworkList", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CheckNetworkList indicates an expected call of CheckNetworkList +func (mr *MockCNIMockRecorder) CheckNetworkList(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckNetworkList", reflect.TypeOf((*MockCNI)(nil).CheckNetworkList), arg0, arg1, arg2) +} + +// DelNetwork mocks base method +func (m *MockCNI) DelNetwork(arg0 context.Context, arg1 *libcni.NetworkConfig, arg2 *libcni.RuntimeConf) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DelNetwork", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DelNetwork indicates an expected call of DelNetwork +func (mr *MockCNIMockRecorder) DelNetwork(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DelNetwork", reflect.TypeOf((*MockCNI)(nil).DelNetwork), arg0, arg1, arg2) +} + +// DelNetworkList mocks base method +func (m *MockCNI) DelNetworkList(arg0 context.Context, arg1 *libcni.NetworkConfigList, arg2 *libcni.RuntimeConf) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DelNetworkList", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DelNetworkList indicates an expected call of DelNetworkList +func (mr *MockCNIMockRecorder) DelNetworkList(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DelNetworkList", reflect.TypeOf((*MockCNI)(nil).DelNetworkList), arg0, arg1, arg2) +} + +// GetNetworkCachedResult mocks base method +func (m *MockCNI) GetNetworkCachedResult(arg0 *libcni.NetworkConfig, arg1 *libcni.RuntimeConf) (types.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkCachedResult", arg0, arg1) + ret0, _ := ret[0].(types.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkCachedResult indicates an expected call of GetNetworkCachedResult +func (mr *MockCNIMockRecorder) GetNetworkCachedResult(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkCachedResult", reflect.TypeOf((*MockCNI)(nil).GetNetworkCachedResult), arg0, arg1) +} + +// GetNetworkListCachedResult mocks base method +func (m *MockCNI) GetNetworkListCachedResult(arg0 *libcni.NetworkConfigList, arg1 *libcni.RuntimeConf) (types.Result, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkListCachedResult", arg0, arg1) + ret0, _ := ret[0].(types.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNetworkListCachedResult indicates an expected call of GetNetworkListCachedResult +func (mr *MockCNIMockRecorder) GetNetworkListCachedResult(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkListCachedResult", reflect.TypeOf((*MockCNI)(nil).GetNetworkListCachedResult), arg0, arg1) +} + +// ValidateNetwork mocks base method +func (m *MockCNI) ValidateNetwork(arg0 context.Context, arg1 *libcni.NetworkConfig) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateNetwork", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateNetwork indicates an expected call of ValidateNetwork +func (mr *MockCNIMockRecorder) ValidateNetwork(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNetwork", reflect.TypeOf((*MockCNI)(nil).ValidateNetwork), arg0, arg1) +} + +// ValidateNetworkList mocks base method +func (m *MockCNI) ValidateNetworkList(arg0 context.Context, arg1 *libcni.NetworkConfigList) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateNetworkList", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateNetworkList indicates an expected call of ValidateNetworkList +func (mr *MockCNIMockRecorder) ValidateNetworkList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNetworkList", reflect.TypeOf((*MockCNI)(nil).ValidateNetworkList), arg0, arg1) +} diff --git a/agent/ecscni/netconfig.go b/agent/ecscni/netconfig.go new file mode 100644 index 00000000000..a72599223bd --- /dev/null +++ b/agent/ecscni/netconfig.go @@ -0,0 +1,189 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecscni + +import ( + "encoding/json" + "net" + + "github.com/aws/amazon-ecs-agent/agent/api/appmesh" + "github.com/aws/amazon-ecs-agent/agent/api/eni" + + "github.com/cihub/seelog" + "github.com/containernetworking/cni/libcni" + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/pkg/errors" +) + +// NewBridgeNetworkConfig creates the config of bridge for ADD command, where +// bridge plugin acquires the IP and route information from IPAM. +func NewBridgeNetworkConfig(cfg *Config, includeIPAM bool) (string, *libcni.NetworkConfig, error) { + // Create the bridge config first. + bridgeName := defaultBridgeName + if len(cfg.BridgeName) != 0 { + bridgeName = cfg.BridgeName + } + + bridgeConfig := BridgeConfig{ + Type: ECSBridgePluginName, + BridgeName: bridgeName, + } + + // Create the IPAM config if requested. + if includeIPAM { + ipamConfig, err := newIPAMConfig(cfg) + if err != nil { + return "", nil, errors.Wrap(err, "NewBridgeNetworkConfig: create ipam configuration failed") + } + + bridgeConfig.IPAM = ipamConfig + } + + networkConfig, err := newNetworkConfig(bridgeConfig, ECSBridgePluginName, cfg.MinSupportedCNIVersion) + if err != nil { + return "", nil, errors.Wrap(err, "NewBridgeNetworkConfig: construct bridge and ipam network configuration failed") + } + + return defaultVethName, networkConfig, nil +} + +// newNetworkConfig converts a network config to libcni's NetworkConfig. +func newNetworkConfig(netcfg interface{}, plugin string, cniVersion string) (*libcni.NetworkConfig, error) { + configBytes, err := json.Marshal(netcfg) + if err != nil { + seelog.Errorf("[ECSCNI] Marshal configuration for plugin %s failed, error: %v", plugin, err) + return nil, err + } + + netConfig := &libcni.NetworkConfig{ + Network: &cnitypes.NetConf{ + Type: plugin, + CNIVersion: cniVersion, + }, + Bytes: configBytes, + } + + return netConfig, nil +} + +// NewIPAMNetworkConfig creates the IPAM configuration accepted by libcni. +func NewIPAMNetworkConfig(cfg *Config) (string, *libcni.NetworkConfig, error) { + ipamConfig, err := newIPAMConfig(cfg) + if err != nil { + return defaultVethName, nil, errors.Wrap(err, "NewIPAMNetworkConfig: create ipam network configuration failed") + } + + ipamNetworkConfig := IPAMNetworkConfig{ + Name: ECSIPAMPluginName, + Type: ECSIPAMPluginName, + IPAM: ipamConfig, + } + + networkConfig, err := newNetworkConfig(ipamNetworkConfig, ECSIPAMPluginName, cfg.MinSupportedCNIVersion) + if err != nil { + return "", nil, errors.Wrap(err, "NewIPAMNetworkConfig: construct ipam network configuration failed") + } + + return defaultVethName, networkConfig, nil +} + +func newIPAMConfig(cfg *Config) (IPAMConfig, error) { + _, dst, err := net.ParseCIDR(TaskIAMRoleEndpoint) + if err != nil { + return IPAMConfig{}, err + } + + routes := []*cnitypes.Route{ + { + Dst: *dst, + }, + } + + for _, route := range cfg.AdditionalLocalRoutes { + seelog.Debugf("[ECSCNI] Adding an additional route for %s", route) + ipNetRoute := (net.IPNet)(route) + routes = append(routes, &cnitypes.Route{Dst: ipNetRoute}) + } + + ipamConfig := IPAMConfig{ + Type: ECSIPAMPluginName, + IPV4Subnet: ecsSubnet, + IPV4Address: cfg.IPAMV4Address, + ID: cfg.ID, + IPV4Routes: routes, + } + + return ipamConfig, nil +} + +// NewENINetworkConfig creates a new ENI CNI network configuration. +func NewENINetworkConfig(eni *eni.ENI, cfg *Config) (string, *libcni.NetworkConfig, error) { + eniConf := ENIConfig{ + Type: ECSENIPluginName, + ENIID: eni.ID, + MACAddress: eni.MacAddress, + IPAddresses: eni.GetIPAddressesWithPrefixLength(), + GatewayIPAddresses: []string{eni.GetSubnetGatewayIPv4Address()}, + BlockInstanceMetadata: cfg.BlockInstanceMetadata, + } + + networkConfig, err := newNetworkConfig(eniConf, ECSENIPluginName, cfg.MinSupportedCNIVersion) + if err != nil { + return "", nil, errors.Wrap(err, "cni config: failed to create configuration") + } + + return defaultENIName, networkConfig, nil +} + +// NewBranchENINetworkConfig creates a new branch ENI CNI network configuration. +func NewBranchENINetworkConfig(eni *eni.ENI, cfg *Config) (string, *libcni.NetworkConfig, error) { + eniConf := BranchENIConfig{ + Type: ECSBranchENIPluginName, + TrunkMACAddress: eni.InterfaceVlanProperties.TrunkInterfaceMacAddress, + BranchVlanID: eni.InterfaceVlanProperties.VlanID, + BranchMACAddress: eni.MacAddress, + IPAddresses: eni.GetIPAddressesWithPrefixLength(), + GatewayIPAddresses: []string{eni.GetSubnetGatewayIPv4Address()}, + BlockInstanceMetadata: cfg.BlockInstanceMetadata, + InterfaceType: vpcCNIPluginInterfaceType, + } + + networkConfig, err := newNetworkConfig(eniConf, ECSBranchENIPluginName, cfg.MinSupportedCNIVersion) + if err != nil { + return "", nil, errors.Wrap(err, "NewBranchENINetworkConfig: construct the eni network configuration failed") + } + + return defaultENIName, networkConfig, nil +} + +// NewAppMeshConfig creates a new AppMesh CNI network configuration. +func NewAppMeshConfig(appMesh *appmesh.AppMesh, cfg *Config) (string, *libcni.NetworkConfig, error) { + appMeshConfig := AppMeshConfig{ + Type: ECSAppMeshPluginName, + IgnoredUID: appMesh.IgnoredUID, + IgnoredGID: appMesh.IgnoredGID, + ProxyIngressPort: appMesh.ProxyIngressPort, + ProxyEgressPort: appMesh.ProxyEgressPort, + AppPorts: appMesh.AppPorts, + EgressIgnoredPorts: appMesh.EgressIgnoredPorts, + EgressIgnoredIPs: appMesh.EgressIgnoredIPs, + } + + networkConfig, err := newNetworkConfig(appMeshConfig, ECSAppMeshPluginName, cfg.MinSupportedCNIVersion) + if err != nil { + return "", nil, errors.Wrap(err, "NewAppMeshConfig: construct the app mesh network configuration failed") + } + + return defaultAppMeshIfName, networkConfig, nil +} diff --git a/agent/ecscni/plugin.go b/agent/ecscni/plugin.go new file mode 100644 index 00000000000..192cfccaac8 --- /dev/null +++ b/agent/ecscni/plugin.go @@ -0,0 +1,290 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecscni + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/aws/amazon-ecs-agent/agent/logger" + "github.com/cihub/seelog" + "github.com/containernetworking/cni/libcni" + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/pkg/errors" +) + +const ( + currentCNISpec = "0.3.1" + // ECSCNIVersion, ECSCNIGitHash, VPCCNIGitHash needs to be updated every time CNI plugin is updated + currentECSCNIVersion = "2020.09.0" + currentECSCNIGitHash = "55b2ae77ee0bf22321b14f2d4ebbcc04f77322e1" + currentVPCCNIGitHash = "a21d3a41f922e14c19387713df66be3e4ee1e1f6" + vpcCNIPluginPath = "/log/vpc-branch-eni.log" + vpcCNIPluginInterfaceType = "vlan" +) + +// CNIClient defines the method of setting/cleaning up container namespace +type CNIClient interface { + // Version returns the version of the plugin + Version(string) (string, error) + // Capabilities returns the capabilities supported by a plugin + Capabilities(string) ([]string, error) + // SetupNS sets up the namespace of container + SetupNS(context.Context, *Config, time.Duration) (*current.Result, error) + // CleanupNS cleans up the container namespace + CleanupNS(context.Context, *Config, time.Duration) error + // ReleaseIPResource marks the ip available in the ipam db + ReleaseIPResource(context.Context, *Config, time.Duration) error +} + +// cniClient is the client to call plugin and setup the network +type cniClient struct { + pluginsPath string + libcni libcni.CNI +} + +// NewClient creates a client of ecscni which is used to invoke the plugin +func NewClient(pluginsPath string) CNIClient { + libcniConfig := &libcni.CNIConfig{ + Path: []string{pluginsPath}, + } + + cniClient := &cniClient{ + pluginsPath: pluginsPath, + libcni: libcniConfig, + } + cniClient.init() + return cniClient +} + +func (client *cniClient) init() { + // Set environment variables for CNI plugins. + os.Setenv("ECS_CNI_LOGLEVEL", logger.GetLevel()) + os.Setenv("VPC_CNI_LOG_LEVEL", logger.GetLevel()) + os.Setenv("VPC_CNI_LOG_FILE", vpcCNIPluginPath) +} + +// SetupNS sets up the network namespace of a task by invoking the given CNI network configurations. +// It returns the result of the bridge plugin invocation as that result is used to parse the IPv4 +// address allocated to the veth device attached to the task by the task engine. +func (client *cniClient) SetupNS( + ctx context.Context, + cfg *Config, + timeout time.Duration) (*current.Result, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return client.setupNS(ctx, cfg) +} + +func (client *cniClient) setupNS(ctx context.Context, cfg *Config) (*current.Result, error) { + seelog.Debugf("[ECSCNI] Setting up the container namespace %s", cfg.ContainerID) + + var bridgeResult cnitypes.Result + runtimeConfig := libcni.RuntimeConf{ + ContainerID: cfg.ContainerID, + NetNS: fmt.Sprintf(NetnsFormat, cfg.ContainerPID), + } + + // Execute all CNI network configurations serially, in the given order. + for _, networkConfig := range cfg.NetworkConfigs { + cniNetworkConfig := networkConfig.CNINetworkConfig + seelog.Debugf("[ECSCNI] Adding network %s type %s in the container namespace %s", + cniNetworkConfig.Network.Name, + cniNetworkConfig.Network.Type, + cfg.ContainerID) + runtimeConfig.IfName = networkConfig.IfName + result, err := client.libcni.AddNetwork(ctx, cniNetworkConfig, &runtimeConfig) + if err != nil { + return nil, errors.Wrap(err, "add network failed") + } + // Save the result object from the bridge plugin execution. We need this later + // for inferring what IPv4 address was used to bring up the veth pair for task. + if cniNetworkConfig.Network.Type == ECSBridgePluginName { + bridgeResult = result + } + + seelog.Debugf("[ECSCNI] Completed adding network %s type %s in the container namespace %s", + cniNetworkConfig.Network.Name, + cniNetworkConfig.Network.Type, + cfg.ContainerID) + } + + seelog.Debugf("[ECSCNI] Completed setting up the container namespace: %s", bridgeResult.String()) + + if _, err := bridgeResult.GetAsVersion(currentCNISpec); err != nil { + seelog.Warnf("[ECSCNI] Unable to convert result to spec version %s; error: %v; result is of version: %s", + currentCNISpec, err, bridgeResult.Version()) + return nil, err + } + var curResult *current.Result + curResult, ok := bridgeResult.(*current.Result) + if !ok { + return nil, errors.Errorf( + "cni setup: unable to convert result to expected version '%s'", + bridgeResult.String()) + } + + return curResult, nil +} + +// CleanupNS will clean up the container namespace, including remove the veth +// pair and stop the dhclient +func (client *cniClient) CleanupNS( + ctx context.Context, + cfg *Config, + timeout time.Duration) error { + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + return client.cleanupNS(ctx, cfg) +} + +func (client *cniClient) cleanupNS(ctx context.Context, cfg *Config) error { + seelog.Debugf("[ECSCNI] Cleaning up the container namespace %s", cfg.ContainerID) + + runtimeConfig := libcni.RuntimeConf{ + ContainerID: cfg.ContainerID, + NetNS: fmt.Sprintf(NetnsFormat, cfg.ContainerPID), + } + + // Execute all CNI network configurations serially, in the reverse order. + for i := len(cfg.NetworkConfigs) - 1; i >= 0; i-- { + networkConfig := cfg.NetworkConfigs[i] + cniNetworkConfig := networkConfig.CNINetworkConfig + seelog.Debugf("[ECSCNI] Deleting network %s type %s in the container namespace %s", + cniNetworkConfig.Network.Name, + cniNetworkConfig.Network.Type, + cfg.ContainerID) + runtimeConfig.IfName = networkConfig.IfName + err := client.libcni.DelNetwork(ctx, cniNetworkConfig, &runtimeConfig) + if err != nil { + return errors.Wrap(err, "delete network failed") + } + + seelog.Debugf("[ECSCNI] Completed deleting network %s type %s in the container namespace %s", + cniNetworkConfig.Network.Name, + cniNetworkConfig.Network.Type, + cfg.ContainerID) + } + + seelog.Debugf("[ECSCNI] Completed cleaning up the container namespace %s", cfg.ContainerID) + + return nil +} + +// ReleaseIPResource marks the ip available in the ipam db +func (client *cniClient) ReleaseIPResource(ctx context.Context, cfg *Config, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + runtimeConfig := libcni.RuntimeConf{ + ContainerID: cfg.ContainerID, + NetNS: fmt.Sprintf(NetnsFormat, cfg.ContainerPID), + } + + seelog.Debugf("[ECSCNI] Releasing the ip resource from ipam db, id: [%s], ip: [%v]", cfg.ID, cfg.IPAMV4Address) + os.Setenv("ECS_CNI_LOGLEVEL", logger.GetLevel()) + defer os.Unsetenv("ECS_CNI_LOGLEVEL") + + ifName, networkConfig, err := NewIPAMNetworkConfig(cfg) + if err != nil { + return err + } + + runtimeConfig.IfName = ifName + + return client.libcni.DelNetwork(ctx, networkConfig, &runtimeConfig) +} + +// Version returns the version of the plugin +func (client *cniClient) Version(name string) (string, error) { + file := filepath.Join(client.pluginsPath, name) + + // Check if the plugin file exists before executing it + _, err := os.Stat(file) + if err != nil { + return "", err + } + + cmd := exec.Command(file, versionCommand) + versionInfo, err := cmd.Output() + if err != nil { + return "", err + } + + version := &cniPluginVersion{} + // versionInfo is of the format + // {"version":"2017.06.0","dirty":true,"gitShortHash":"226db36"} + // Unmarshal this + err = json.Unmarshal(versionInfo, version) + if err != nil { + return "", errors.Wrapf(err, "ecscni: unmarshal version from string: %s", versionInfo) + } + + return version.str(), nil +} + +// cniPluginVersion is used to convert the JSON output of the +// '--version' command into a string +type cniPluginVersion struct { + Version string `json:"version"` + Dirty bool `json:"dirty"` + Hash string `json:"gitShortHash"` +} + +// str generates a string version of the CNI plugin version +// Example: +// {"version":"2017.06.0","dirty":true,"gitShortHash":"226db36"} => @226db36-2017.06.0 +// {"version":"2017.06.0","dirty":false,"gitShortHash":"326db36"} => 326db36-2017.06.0 +func (version *cniPluginVersion) str() string { + ver := "" + if version.Dirty { + ver = "@" + } + return ver + version.Hash + "-" + version.Version +} + +// Capabilities returns the capabilities supported by a plugin +func (client *cniClient) Capabilities(name string) ([]string, error) { + file := filepath.Join(client.pluginsPath, name) + + // Check if the plugin file exists before executing it + _, err := os.Stat(file) + if err != nil { + return nil, errors.Wrapf(err, "ecscni: unable to describe file info for '%s'", file) + } + + cmd := exec.Command(file, capabilitiesCommand) + capabilitiesInfo, err := cmd.Output() + if err != nil { + return nil, errors.Wrapf(err, "ecscni: failed invoking capabilities command for '%s'", name) + } + + capabilities := &struct { + Capabilities []string `json:"capabilities"` + }{} + err = json.Unmarshal(capabilitiesInfo, capabilities) + if err != nil { + return nil, errors.Wrapf(err, "ecscni: failed to unmarshal capabilities for '%s' from string: %s", name, capabilitiesInfo) + } + + return capabilities.Capabilities, nil +} diff --git a/agent/ecscni/plugin_test.go b/agent/ecscni/plugin_test.go new file mode 100644 index 00000000000..d109827baa9 --- /dev/null +++ b/agent/ecscni/plugin_test.go @@ -0,0 +1,647 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecscni + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api/appmesh" + "github.com/aws/amazon-ecs-agent/agent/api/eni" + mock_libcni "github.com/aws/amazon-ecs-agent/agent/ecscni/mocks_libcni" + "github.com/containernetworking/cni/libcni" + cnitypes "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + eniID = "eni-12345678" + eniIPV4Address = "172.31.21.40" + eniIPV6Address = "abcd:dcba:1234:4321::" + eniIPV4AddressWithBlockSize = "172.31.21.40/20" + eniIPV6AddressWithBlockSize = "abcd:dcba:1234:4321::/64" + eniMACAddress = "02:7b:64:49:b1:40" + eniSubnetGatewayIPV4Address = "172.31.1.1/20" + eniSubnetGatewayIPV4AddressWithoutBlockSize = "172.31.1.1" + trunkENIMACAddress = "02:7b:64:49:b2:40" + branchENIVLANID = "42" +) + +func TestSetupNS(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + additionalRoutesJson := `["169.254.172.1/32", "10.11.12.13/32"]` + var additionalRoutes []cnitypes.IPNet + err := json.Unmarshal([]byte(additionalRoutesJson), &additionalRoutes) + assert.NoError(t, err) + + gomock.InOrder( + // ENI plugin was called first + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSENIPluginName, net.Network.Type, "first plugin should be eni") + }), + // Bridge plugin was called second + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSBridgePluginName, net.Network.Type, "second plugin should be bridge") + var bridgeConfig BridgeConfig + err := json.Unmarshal(net.Bytes, &bridgeConfig) + assert.NoError(t, err, "unmarshal BridgeConfig") + assert.Len(t, bridgeConfig.IPAM.IPV4Routes, 3, "default route plus two extra routes") + }), + ) + + config := &Config{ + AdditionalLocalRoutes: additionalRoutes, + NetworkConfigs: []*NetworkConfig{}, + } + config.NetworkConfigs = append(config.NetworkConfigs, eniNetworkConfig(config)) + config.NetworkConfigs = append(config.NetworkConfigs, bridgeConfigWithIPAM(config)) + + _, err = ecscniClient.SetupNS(context.TODO(), config, time.Second) + assert.NoError(t, err) +} + +func eniNetworkConfig(config *Config) *NetworkConfig { + _, eniNetworkConfig, _ := NewENINetworkConfig( + &eni.ENI{ + ID: eniID, + IPV4Addresses: []*eni.ENIIPV4Address{ + {Address: eniIPV4Address, Primary: true}, + }, + MacAddress: eniMACAddress, + SubnetGatewayIPV4Address: eniSubnetGatewayIPV4Address, + }, + config, + ) + return &NetworkConfig{CNINetworkConfig: eniNetworkConfig} +} + +func bridgeConfigWithIPAM(config *Config) *NetworkConfig { + _, bridgeNetworkConfig, _ := NewBridgeNetworkConfig(config, true) + return &NetworkConfig{CNINetworkConfig: bridgeNetworkConfig} +} + +func TestSetupNSTrunk(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + additionalRoutesJson := `["169.254.172.1/32", "10.11.12.13/32"]` + var additionalRoutes []cnitypes.IPNet + err := json.Unmarshal([]byte(additionalRoutesJson), &additionalRoutes) + assert.NoError(t, err) + + gomock.InOrder( + // ENI plugin was called first + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSBranchENIPluginName, net.Network.Type, "first plugin should be eni") + }), + // Bridge plugin was called last + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSBridgePluginName, net.Network.Type, "second plugin should be bridge") + var bridgeConfig BridgeConfig + err := json.Unmarshal(net.Bytes, &bridgeConfig) + assert.NoError(t, err, "unmarshal BridgeConfig") + assert.Len(t, bridgeConfig.IPAM.IPV4Routes, 3, "default route plus two extra routes") + }), + ) + + config := &Config{ + AdditionalLocalRoutes: additionalRoutes, + NetworkConfigs: []*NetworkConfig{}, + } + config.NetworkConfigs = append(config.NetworkConfigs, branchENINetworkConfig(config)) + config.NetworkConfigs = append(config.NetworkConfigs, bridgeConfigWithIPAM(config)) + _, err = ecscniClient.SetupNS(context.TODO(), config, time.Second) + assert.NoError(t, err) +} + +func branchENINetworkConfig(config *Config) *NetworkConfig { + _, eniNetworkConfig, _ := NewBranchENINetworkConfig( + &eni.ENI{ + ID: eniID, + IPV4Addresses: []*eni.ENIIPV4Address{ + {Address: eniIPV4Address, Primary: true}, + }, + MacAddress: eniMACAddress, + SubnetGatewayIPV4Address: eniSubnetGatewayIPV4Address, + InterfaceVlanProperties: &eni.InterfaceVlanProperties{ + TrunkInterfaceMacAddress: trunkENIMACAddress, + VlanID: branchENIVLANID, + }, + }, + config) + return &NetworkConfig{CNINetworkConfig: eniNetworkConfig} +} + +func TestSetupNSAppMeshEnabled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + additionalRoutesJson := `["169.254.172.1/32", "10.11.12.13/32"]` + var additionalRoutes []cnitypes.IPNet + err := json.Unmarshal([]byte(additionalRoutesJson), &additionalRoutes) + assert.NoError(t, err) + + gomock.InOrder( + // ENI plugin was called first + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSENIPluginName, net.Network.Type, "first plugin should be eni") + }), + // Bridge plugin was called second + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSBridgePluginName, net.Network.Type, "second plugin should be bridge") + var bridgeConfig BridgeConfig + err := json.Unmarshal(net.Bytes, &bridgeConfig) + assert.NoError(t, err, "unmarshal BridgeConfig") + assert.Len(t, bridgeConfig.IPAM.IPV4Routes, 3, "default route plus two extra routes") + }), + // AppMesh plugin was called third + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSAppMeshPluginName, net.Network.Type, "third plugin should be app mesh") + }), + ) + config := &Config{ + AdditionalLocalRoutes: additionalRoutes, + NetworkConfigs: []*NetworkConfig{}, + } + config.NetworkConfigs = append(config.NetworkConfigs, eniNetworkConfig(config)) + config.NetworkConfigs = append(config.NetworkConfigs, bridgeConfigWithIPAM(config)) + config.NetworkConfigs = append(config.NetworkConfigs, appMeshNetworkConfig(config)) + _, err = ecscniClient.SetupNS(context.TODO(), config, time.Second) + assert.NoError(t, err) +} + +func appMeshNetworkConfig(config *Config) *NetworkConfig { + _, appMeshNetworkConfig, _ := NewAppMeshConfig(&appmesh.AppMesh{ + IgnoredUID: "1337", + IgnoredGID: "1448", + ProxyIngressPort: "15000", + ProxyEgressPort: "15001", + AppPorts: []string{ + "9000", + }, + EgressIgnoredPorts: []string{ + "9001", + }, + EgressIgnoredIPs: []string{ + "169.254.169.254", + }, + }, config) + return &NetworkConfig{CNINetworkConfig: appMeshNetworkConfig} +} + +func TestSetupNSTimeout(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + gomock.InOrder( + // ENI plugin was called first + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, errors.New("timeout")).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + }).MaxTimes(1), + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).MaxTimes(1), + libcniClient.EXPECT().AddNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(¤t.Result{}, nil).MaxTimes(1), + ) + + config := &Config{ + NetworkConfigs: []*NetworkConfig{}, + } + config.NetworkConfigs = append(config.NetworkConfigs, eniNetworkConfig(config)) + config.NetworkConfigs = append(config.NetworkConfigs, bridgeConfigWithIPAM(config)) + config.NetworkConfigs = append(config.NetworkConfigs, appMeshNetworkConfig(config)) + _, err := ecscniClient.SetupNS(context.TODO(), config, time.Millisecond) + assert.Error(t, err) +} + +func TestCleanupNS(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + // This will be called for both bridge and eni plugin + libcniClient.EXPECT().DelNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) + additionalRoutesJson := `["169.254.172.1/32", "10.11.12.13/32"]` + var additionalRoutes []cnitypes.IPNet + err := json.Unmarshal([]byte(additionalRoutesJson), &additionalRoutes) + assert.NoError(t, err) + config := &Config{ + AdditionalLocalRoutes: additionalRoutes, + NetworkConfigs: []*NetworkConfig{}, + } + config.NetworkConfigs = append(config.NetworkConfigs, eniNetworkConfig(config)) + config.NetworkConfigs = append(config.NetworkConfigs, bridgeConfigWithIPAM(config)) + err = ecscniClient.CleanupNS(context.TODO(), config, time.Second) + assert.NoError(t, err) +} + +func TestCleanupNSTrunk(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + gomock.InOrder( + // Bridge plugin was called first + libcniClient.EXPECT().DelNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSBridgePluginName, net.Network.Type, "first plugin should be bridge") + var bridgeConfig BridgeConfig + err := json.Unmarshal(net.Bytes, &bridgeConfig) + assert.NoError(t, err, "unmarshal BridgeConfig") + }), + // ENI plugin was called second + libcniClient.EXPECT().DelNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Do( + func(ctx context.Context, net *libcni.NetworkConfig, rt *libcni.RuntimeConf) { + assert.Equal(t, ECSBranchENIPluginName, net.Network.Type, "second plugin should be eni") + }), + ) + + additionalRoutesJson := `["169.254.172.1/32", "10.11.12.13/32"]` + var additionalRoutes []cnitypes.IPNet + err := json.Unmarshal([]byte(additionalRoutesJson), &additionalRoutes) + assert.NoError(t, err) + config := &Config{ + AdditionalLocalRoutes: additionalRoutes, + NetworkConfigs: []*NetworkConfig{}, + } + config.NetworkConfigs = append(config.NetworkConfigs, branchENINetworkConfig(config)) + config.NetworkConfigs = append(config.NetworkConfigs, bridgeConfigWithIPAM(config)) + err = ecscniClient.CleanupNS(context.TODO(), config, time.Second) + assert.NoError(t, err) +} + +func TestCleanupNSAppMeshEnabled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + // This will be called for both bridge and eni plugin + libcniClient.EXPECT().DelNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(3) + + additionalRoutesJson := `["169.254.172.1/32", "10.11.12.13/32"]` + var additionalRoutes []cnitypes.IPNet + err := json.Unmarshal([]byte(additionalRoutesJson), &additionalRoutes) + assert.NoError(t, err) + config := &Config{ + AdditionalLocalRoutes: additionalRoutes, + NetworkConfigs: []*NetworkConfig{}, + } + config.NetworkConfigs = append(config.NetworkConfigs, eniNetworkConfig(config)) + config.NetworkConfigs = append(config.NetworkConfigs, bridgeConfigWithIPAM(config)) + config.NetworkConfigs = append(config.NetworkConfigs, appMeshNetworkConfig(config)) + err = ecscniClient.CleanupNS(context.TODO(), config, time.Second) + assert.NoError(t, err) +} + +func TestCleanupNSTimeout(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + // This will be called for both bridge and eni plugin + libcniClient.EXPECT().DelNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(x interface{}, y interface{}, z interface{}) { + }).Return(errors.New("timeout")).MaxTimes(3) + + additionalRoutesJson := `["169.254.172.1/32", "10.11.12.13/32"]` + var additionalRoutes []cnitypes.IPNet + err := json.Unmarshal([]byte(additionalRoutesJson), &additionalRoutes) + assert.NoError(t, err) + ctx, cancel := context.WithTimeout(context.TODO(), 1*time.Millisecond) + defer cancel() + config := &Config{ + AdditionalLocalRoutes: additionalRoutes, + NetworkConfigs: []*NetworkConfig{}, + } + config.NetworkConfigs = append(config.NetworkConfigs, eniNetworkConfig(config)) + config.NetworkConfigs = append(config.NetworkConfigs, bridgeConfigWithIPAM(config)) + err = ecscniClient.CleanupNS(ctx, config, time.Millisecond) + assert.Error(t, err) +} + +func TestReleaseIPInIPAM(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ecscniClient := NewClient("") + libcniClient := mock_libcni.NewMockCNI(ctrl) + ecscniClient.(*cniClient).libcni = libcniClient + + libcniClient.EXPECT().DelNetwork(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + + err := ecscniClient.ReleaseIPResource(context.TODO(), &Config{}, time.Second) + assert.NoError(t, err) +} + +// TestConstructENINetworkConfig tests createENINetworkConfig creates the correct +// configuration for eni plugin +func TestConstructENINetworkConfig(t *testing.T) { + config := &Config{ + ContainerID: "containerid12", + ContainerPID: "pid", + BlockInstanceMetadata: true, + } + + eniName, eniNetworkConfig, err := NewENINetworkConfig( + &eni.ENI{ + ID: eniID, + IPV4Addresses: []*eni.ENIIPV4Address{ + {Address: eniIPV4Address, Primary: true}, + }, + IPV6Addresses: []*eni.ENIIPV6Address{ + { + Address: eniIPV6Address, + }, + }, + MacAddress: eniMACAddress, + SubnetGatewayIPV4Address: eniSubnetGatewayIPV4Address, + }, + config) + require.NoError(t, err, "Failed to construct eni network config") + assert.Equal(t, "eth0", eniName) + eniConfig := &ENIConfig{} + err = json.Unmarshal(eniNetworkConfig.Bytes, eniConfig) + require.NoError(t, err, "unmarshal config from bytes failed") + assert.Equal(t, &ENIConfig{ + Type: "ecs-eni", + ENIID: eniID, + IPAddresses: []string{eniIPV4AddressWithBlockSize, eniIPV6AddressWithBlockSize}, + MACAddress: eniMACAddress, + BlockInstanceMetadata: true, + GatewayIPAddresses: []string{eniSubnetGatewayIPV4AddressWithoutBlockSize}, + }, eniConfig) +} + +// TestConstructBranchENINetworkConfig tests createBranchENINetworkConfig creates the correct +// configuration for eni plugin +func TestConstructBranchENINetworkConfig(t *testing.T) { + config := &Config{ + ContainerID: "containerid12", + ContainerPID: "pid", + BlockInstanceMetadata: true, + } + + eniName, eniNetworkConfig, err := NewBranchENINetworkConfig( + &eni.ENI{ + ID: eniID, + IPV4Addresses: []*eni.ENIIPV4Address{ + {Address: eniIPV4Address, Primary: true}, + }, + IPV6Addresses: []*eni.ENIIPV6Address{ + { + Address: eniIPV6Address, + }, + }, + MacAddress: eniMACAddress, + SubnetGatewayIPV4Address: eniSubnetGatewayIPV4Address, + InterfaceVlanProperties: &eni.InterfaceVlanProperties{ + TrunkInterfaceMacAddress: trunkENIMACAddress, + VlanID: branchENIVLANID, + }, + }, + config) + require.NoError(t, err, "Failed to construct eni network config") + assert.Equal(t, "eth0", eniName) + branchENIConfig := &BranchENIConfig{} + err = json.Unmarshal(eniNetworkConfig.Bytes, branchENIConfig) + require.NoError(t, err, "unmarshal config from bytes failed") + assert.Equal(t, &BranchENIConfig{ + Type: "vpc-branch-eni", + IPAddresses: []string{eniIPV4AddressWithBlockSize, eniIPV6AddressWithBlockSize}, + BranchMACAddress: eniMACAddress, + BlockInstanceMetadata: true, + GatewayIPAddresses: []string{eniSubnetGatewayIPV4AddressWithoutBlockSize}, + TrunkMACAddress: trunkENIMACAddress, + BranchVlanID: branchENIVLANID, + InterfaceType: "vlan", + }, branchENIConfig) +} + +// TestConstructBridgeNetworkConfigWithoutIPAM tests createBridgeNetworkConfigWithoutIPAM creates the right configuration for bridge plugin +func TestConstructBridgeNetworkConfigWithoutIPAM(t *testing.T) { + config := &Config{ + ContainerID: "containerid12", + ContainerPID: "pid", + BridgeName: "bridge-test1", + } + + vethName, bridgeNetworkConfig, err := NewBridgeNetworkConfig(config, false) + require.NoError(t, err, "Failed to construct bridge network config") + assert.Equal(t, "ecs-eth0", vethName) + bridgeConfig := &BridgeConfig{} + err = json.Unmarshal(bridgeNetworkConfig.Bytes, bridgeConfig) + require.NoError(t, err, "unmarshal bridge config from bytes failed") + assert.Equal(t, config.BridgeName, bridgeConfig.BridgeName) + assert.Equal(t, IPAMConfig{}, bridgeConfig.IPAM) +} + +// TestConstructAppMeshNetworkConfig tests createAppMeshConfig creates the correct +// configuration for app mesh plugin +func TestConstructAppMeshNetworkConfig(t *testing.T) { + config := &appmesh.AppMesh{ + IgnoredUID: "1337", + IgnoredGID: "1448", + ProxyIngressPort: "15000", + ProxyEgressPort: "15001", + AppPorts: []string{ + "9000", + }, + EgressIgnoredPorts: []string{ + "9001", + }, + EgressIgnoredIPs: []string{ + "169.254.169.254", + }, + } + + appMeshIfName, appMeshNetworkConfig, err := NewAppMeshConfig(config, &Config{}) + require.NoError(t, err, "Failed to construct app mesh network config") + assert.Equal(t, "aws-appmesh", appMeshIfName) + appMeshConfig := &AppMeshConfig{} + err = json.Unmarshal(appMeshNetworkConfig.Bytes, appMeshConfig) + require.NoError(t, err, "unmarshal config from bytes failed") + + assert.Equal(t, config.IgnoredUID, appMeshConfig.IgnoredUID) + assert.Equal(t, config.IgnoredGID, appMeshConfig.IgnoredGID) + assert.Equal(t, config.ProxyIngressPort, appMeshConfig.ProxyIngressPort) + assert.Equal(t, config.ProxyEgressPort, appMeshConfig.ProxyEgressPort) + assert.Equal(t, len(config.ProxyEgressPort), len(appMeshConfig.ProxyEgressPort)) + assert.Equal(t, config.ProxyEgressPort[0], appMeshConfig.ProxyEgressPort[0]) + assert.Equal(t, len(config.EgressIgnoredIPs), len(appMeshConfig.EgressIgnoredIPs)) + assert.Equal(t, config.EgressIgnoredIPs[0], appMeshConfig.EgressIgnoredIPs[0]) +} + +func TestConstructIPAMNetworkConfig(t *testing.T) { + config := &Config{ + ID: eniMACAddress, + ContainerID: "containerid12", + ContainerPID: "pid", + BlockInstanceMetadata: true, + } + + vethName, networkConfig, err := NewIPAMNetworkConfig(config) + require.NoError(t, err, "Failed to construct network config") + assert.Equal(t, "ecs-eth0", vethName) + ipamNetworkConfig := &IPAMNetworkConfig{} + err = json.Unmarshal(networkConfig.Bytes, ipamNetworkConfig) + require.NoError(t, err, "unmarshal config from bytes failed") + _, dst, _ := net.ParseCIDR("169.254.170.2/32") + expectedConfig := &IPAMNetworkConfig{ + Type: "ecs-ipam", + Name: "ecs-ipam", + IPAM: IPAMConfig{ + Type: "ecs-ipam", + ID: eniMACAddress, + IPV4Subnet: "169.254.172.0/22", + IPV4Routes: []*cnitypes.Route{{Dst: *dst}}, + }, + } + expectedConfigBytes, _ := json.Marshal(expectedConfig) + assert.Equal(t, expectedConfigBytes, networkConfig.Bytes) +} + +// TestConstructBridgeNetworkConfigWithIPAM tests createBridgeNetworkConfigWithIPAM +// creates the correct configuration for bridge and ipam plugin +func TestConstructNetworkConfig(t *testing.T) { + additionalRoutesJson := `["169.254.172.1/32", "10.11.12.13/32"]` + var additionalRoutes []cnitypes.IPNet + err := json.Unmarshal([]byte(additionalRoutesJson), &additionalRoutes) + assert.NoError(t, err) + + config := &Config{ + ContainerID: "containerid12", + ContainerPID: "pid", + BridgeName: "bridge-test1", + AdditionalLocalRoutes: additionalRoutes, + } + + vethName, bridgeNetworkConfig, err := NewBridgeNetworkConfig(config, true) + require.NoError(t, err, "construct bridge plugins configuration failed") + assert.Equal(t, "ecs-eth0", vethName) + bridgeConfig := &BridgeConfig{} + err = json.Unmarshal(bridgeNetworkConfig.Bytes, bridgeConfig) + require.NoError(t, err, "unmarshal bridge config from bytes failed: %s", + string(bridgeNetworkConfig.Bytes)) + assert.Equal(t, config.BridgeName, bridgeConfig.BridgeName) + assert.Equal(t, ecsSubnet, bridgeConfig.IPAM.IPV4Subnet) + assert.Equal(t, TaskIAMRoleEndpoint, bridgeConfig.IPAM.IPV4Routes[0].Dst.String()) +} + +func TestCNIPluginVersion(t *testing.T) { + testCases := []struct { + version *cniPluginVersion + str string + }{ + { + version: &cniPluginVersion{ + Version: "1", + Dirty: false, + Hash: "hash", + }, + str: "hash-1", + }, + { + version: &cniPluginVersion{ + Version: "1", + Dirty: true, + Hash: "hash", + }, + str: "@hash-1", + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("version string %s", tc.str), func(t *testing.T) { + assert.Equal(t, tc.str, tc.version.str()) + }) + } +} + +// Asserts that CNI plugin version matches the expected version +func TestCNIPluginVersionNumber(t *testing.T) { + versionStr := getCNIVersionString(t) + assert.Equal(t, versionStr, currentECSCNIVersion) +} + +// Asserts that CNI plugin version is upgraded when new commits are made to CNI plugin submodule +func TestCNIPluginVersionUpgrade(t *testing.T) { + versionStr := getCNIVersionString(t) + cmd := exec.Command("git", "submodule") + versionInfo, err := cmd.Output() + assert.NoError(t, err, "Error running the command: git submodule") + versionInfoStrList := strings.Split(string(versionInfo), "\n") + // If a new commit is added, version should be upgraded + if currentECSCNIGitHash != strings.Split(versionInfoStrList[0], " ")[1] { + assert.NotEqual(t, currentECSCNIVersion, versionStr) + } + assert.Equal(t, currentVPCCNIGitHash, strings.Split(versionInfoStrList[1], " ")[1]) +} + +// Returns the version in CNI plugin VERSION file as a string +func getCNIVersionString(t *testing.T) string { + // ../../amazon-ecs-cni-plugins/VERSION + versionFilePath := filepath.Clean(filepath.Join("..", "..", "amazon-ecs-cni-plugins", "VERSION")) + versionStr, err := ioutil.ReadFile(versionFilePath) + assert.NoError(t, err, "Error reading the CNI plugin version file") + return strings.TrimSpace(string(versionStr)) +} diff --git a/agent/ecscni/types.go b/agent/ecscni/types.go new file mode 100644 index 00000000000..8561eb51be4 --- /dev/null +++ b/agent/ecscni/types.go @@ -0,0 +1,214 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecscni + +import ( + "github.com/containernetworking/cni/libcni" + cnitypes "github.com/containernetworking/cni/pkg/types" +) + +const ( + // versionCommand is the command used to get the version of plugin + versionCommand = "--version" + // capabilitiesCommand is the command used to get the capabilities of a + // CNI plugin + capabilitiesCommand = "--capabilities" + // defaultVethName is the name of veth pair name in the container namespace + defaultVethName = "ecs-eth0" + // defaultENIName is the name of eni interface name in the container namespace + defaultENIName = "eth0" + // defaultBridgeName is the default name of bridge created for container to + // communicate with ecs-agent + defaultBridgeName = "ecs-bridge" + // defaultAppMeshIfName is the default name of app mesh to setup iptable rules + // for app mesh container. IfName is mandatory field to invoke CNI plugin. + defaultAppMeshIfName = "aws-appmesh" + // ecsSubnet is the available ip addresses to use for task networking + ecsSubnet = "169.254.172.0/22" + + // NetnsFormat is used to construct the path to cotainer network namespace + NetnsFormat = "/host/proc/%s/ns/net" + // ECSIPAMPluginName is the binary of the ipam plugin + ECSIPAMPluginName = "ecs-ipam" + // ECSBridgePluginName is the binary of the bridge plugin + ECSBridgePluginName = "ecs-bridge" + // ECSENIPluginName is the binary of the eni plugin + ECSENIPluginName = "ecs-eni" + // ECSAppMeshPluginName is the binary of aws-appmesh plugin + ECSAppMeshPluginName = "aws-appmesh" + // ECSBranchENIPluginName is the binary of the branch-eni plugin + ECSBranchENIPluginName = "vpc-branch-eni" + // TaskIAMRoleEndpoint is the endpoint of ecs-agent exposes credentials for + // task IAM role + TaskIAMRoleEndpoint = "169.254.170.2/32" + // CapabilityAWSVPCNetworkingMode is the capability string, which when + // present in the output of the '--capabilities' command of a CNI plugin + // indicates that the plugin can support the ECS "awsvpc" network mode + CapabilityAWSVPCNetworkingMode = "awsvpc-network-mode" +) + +//IPAMNetworkConfig is the config format accepted by the plugin +type IPAMNetworkConfig struct { + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + CNIVersion string `json:"cniVersion,omitempty"` + IPAM IPAMConfig `json:"ipam"` +} + +// IPAMConfig contains all the information needed to invoke the ipam plugin +type IPAMConfig struct { + // Type is the cni plugin name + Type string `json:"type,omitempty"` + // ID is the information stored in the ipam along with ip as key-value pair + ID string `json:"id,omitempty"` + // CNIVersion is the cni spec version to use + CNIVersion string `json:"cniVersion,omitempty"` + // IPV4Subnet is the ip address range managed by ipam + IPV4Subnet string `json:"ipv4-subnet,omitempty"` + // IPV4Address is the ip address to deal with(assign or release) in ipam + IPV4Address *cnitypes.IPNet `json:"ipv4-address,omitempty"` + // IPV4Gateway is the gateway returned by ipam, defalut the '.1' in the subnet + IPV4Gateway string `json:"ipv4-gateway,omitempty"` + // IPV4Routes is the route to added in the containerr namespace + IPV4Routes []*cnitypes.Route `json:"ipv4-routes,omitempty"` +} + +// BridgeConfig contains all the information needed to invoke the bridge plugin +type BridgeConfig struct { + // Type is the cni plugin name + Type string `json:"type,omitempty"` + // CNIVersion is the cni spec version to use + CNIVersion string `json:"cniVersion,omitempty"` + // BridgeName is the name of bridge + BridgeName string `json:"bridge"` + // IsGw indicates whether the bridge act as a gateway, it determines whether + // an ip address needs to assign to the bridge + IsGW bool `json:"isGateway"` + // IsDefaultGW indicates whether the bridge is the gateway of the container + IsDefaultGW bool `json:"isDefaultGateway"` + // ForceAddress indicates whether a new ip should be assigned if the bridge + // has already a different ip + ForceAddress bool `json:"forceAddress"` + // IPMasq indicates whether to setup the IP Masquerade for traffic originating + // from this network + IPMasq bool `json:"ipMasq"` + // MTU sets MTU of the bridge interface + MTU int `json:"mtu"` + // HairpinMode sets the hairpin mode of interface on the bridge + HairpinMode bool `json:"hairpinMode"` + // IPAM is the configuration to acquire ip/route from ipam plugin + IPAM IPAMConfig `json:"ipam,omitempty"` +} + +// ENIConfig contains all the information needed to invoke the eni plugin +type ENIConfig struct { + // Type is the cni plugin name + Type string `json:"type,omitempty"` + // CNIVersion is the cni spec version to use + CNIVersion string `json:"cniVersion,omitempty"` + // ENIID is the id of ec2 eni + ENIID string `json:"eni"` + // MacAddress is the mac address of eni + MACAddress string `json:"mac"` + // IPAddresses contains the ip addresses of the ENI. + IPAddresses []string `json:"ip-addresses"` + // GatewayIPAddresses specifies the addresses of the subnet gateway for the ENI. + GatewayIPAddresses []string `json:"gateway-ip-addresses"` + // BlockInstanceMetadata specifies if InstanceMetadata endpoint should be blocked + BlockInstanceMetadata bool `json:"block-instance-metadata"` +} + +// AppMeshConfig contains all the information needed to invoke the app mesh plugin +type AppMeshConfig struct { + // Type is the cni plugin name + Type string `json:"type,omitempty"` + // CNIVersion is the cni spec version to use + CNIVersion string `json:"cniVersion,omitempty"` + // IgnoredUID specifies egress traffic from the processes owned by the UID will be ignored + IgnoredUID string `json:"ignoredUID,omitempty"` + // IgnoredGID specifies egress traffic from the processes owned by the GID will be ignored + IgnoredGID string `json:"ignoredGID,omitempty"` + // ProxyIngressPort is the ingress port number that proxy is listening on + ProxyIngressPort string `json:"proxyIngressPort"` + // ProxyEgressPort is the egress port number that proxy is listening on + ProxyEgressPort string `json:"proxyEgressPort"` + // AppPorts specifies port numbers that application is listening on + AppPorts []string `json:"appPorts"` + // EgressIgnoredPorts is the list of ports for which egress traffic will be ignored + EgressIgnoredPorts []string `json:"egressIgnoredPorts,omitempty"` + // EgressIgnoredIPs is the list of IPs for which egress traffic will be ignored + EgressIgnoredIPs []string `json:"egressIgnoredIPs,omitempty"` +} + +// BranchENIConfig contains all the information needed to invoke the vpc-branch-eni plugin +type BranchENIConfig struct { + // CNIVersion is the CNI spec version to use + CNIVersion string `json:"cniVersion,omitempty"` + // Name is the CNI network name + Name string `json:"name,omitempty"` + // Type is the CNI plugin name + Type string `json:"type,omitempty"` + + // TrunkMACAddress is the MAC address of the trunk ENI + TrunkMACAddress string `json:"trunkMACAddress,omitempty"` + // BranchVlanID is the VLAN ID of the branch ENI + BranchVlanID string `json:"branchVlanID,omitempty"` + // BranchMacAddress is the MAC address of the branch ENI + BranchMACAddress string `json:"branchMACAddress"` + // IPAddresses contains the IP addresses of the branch ENI. + IPAddresses []string `json:"ipAddresses"` + // GatewayIPAddresses contains the IP addresses of the default gateway in the subnet. + GatewayIPAddresses []string `json:"gatewayIPAddresses"` + // BlockInstanceMetdata specifies if InstanceMetadata endpoint should be blocked. + BlockInstanceMetadata bool `json:"blockInstanceMetadata"` + // InterfaceType is the type of the interface to connect the branch ENI to + InterfaceType string `json:"interfaceType,omitempty"` +} + +// Config contains all the information to set up the container namespace using +// the plugins +type Config struct { + // PluginsPath indicates the path where cni plugins are located + PluginsPath string + // MinSupportedCNIVersion is the minimum cni spec version supported + MinSupportedCNIVersion string + // ContainerID is the id of container of which to set up the network namespace + ContainerID string + // ContainerPID is the pid of the container + ContainerPID string + // BridgeName is the name used to create the bridge + BridgeName string + // IPAMV4Address is the ipv4 used to assign from ipam + IPAMV4Address *cnitypes.IPNet + // ID is the information associate with ip in ipam + ID string + // BlockInstanceMetadata specifies if InstanceMetadata endpoint should be blocked + BlockInstanceMetadata bool + // AdditionalLocalRoutes specifies additional routes to be added to the task namespace + AdditionalLocalRoutes []cnitypes.IPNet + // NetworkConfigs is the list of CNI network configurations to be invoked + NetworkConfigs []*NetworkConfig +} + +// NetworkConfig wraps CNI library's NetworkConfig object. It tracks the interface device +// name (the IfName param required to invoke AddNetwork) along with libcni's NetworkConfig +// object. The IfName is required to be set to invoke `AddNetwork` method when invoking +// plugins to set up the network namespace. +type NetworkConfig struct { + // IfName is the name of the network interface device, to be set within the + // network namespace. + IfName string + // CNINetworkConfig is the network configuration required to invoke the CNI plugin + CNINetworkConfig *libcni.NetworkConfig +} diff --git a/agent/engine/common_integ_test.go b/agent/engine/common_integ_test.go new file mode 100644 index 00000000000..1664f103a73 --- /dev/null +++ b/agent/engine/common_integ_test.go @@ -0,0 +1,295 @@ +// +build sudo integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/statechange" + log "github.com/cihub/seelog" + "github.com/stretchr/testify/assert" +) + +var ( + sdkClientFactory sdkclientfactory.Factory +) + +func init() { + sdkClientFactory = sdkclientfactory.NewFactory(context.TODO(), dockerEndpoint) +} + +func defaultTestConfigIntegTest() *config.Config { + cfg, _ := config.NewConfig(ec2.NewBlackholeEC2MetadataClient()) + cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + cfg.ImagePullBehavior = config.ImagePullPreferCachedBehavior + return cfg +} + +func createTestTask(arn string) *apitask.Task { + return &apitask.Task{ + Arn: arn, + Family: "family", + Version: "1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{createTestContainer()}, + } +} + +func setupIntegTestLogs(t *testing.T) string { + // Create a directory for storing test logs. + testLogDir, err := ioutil.TempDir("", "ecs-integ-test") + require.NoError(t, err, "Unable to create directory for storing test logs") + + logger, err := log.LoggerFromConfigAsString(loggerConfigIntegrationTest(testLogDir)) + assert.NoError(t, err, "initialisation failed") + + err = log.ReplaceLogger(logger) + assert.NoError(t, err, "unable to replace logger") + + return testLogDir +} + +func loggerConfigIntegrationTest(logfile string) string { + config := fmt.Sprintf(` + + + + + + + + + + `, logfile) + + return config +} + +func verifyContainerRunningStateChange(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, + "Expected container to be RUNNING") +} + +func verifyContainerRunningStateChangeWithRuntimeID(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, + "Expected container to be RUNNING") + assert.NotEqual(t, "", event.(api.ContainerStateChange).RuntimeID, + "Expected container runtimeID should not empty") +} + +func verifyExecAgentStateChange(t *testing.T, taskEngine TaskEngine, + expectedStatus apicontainerstatus.ManagedAgentStatus, waitDone chan<- struct{}) { + stateChangeEvents := taskEngine.StateChangeEvents() + for event := range stateChangeEvents { + if managedAgentEvent, ok := event.(api.ManagedAgentStateChange); ok { + if managedAgentEvent.Status == expectedStatus { + close(waitDone) + return + } + + } + } +} + +func verifyContainerStoppedStateChange(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerStopped, + "Expected container to be STOPPED") +} + +func verifyContainerStoppedStateChangeWithRuntimeID(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerStopped, + "Expected container to be STOPPED") + assert.NotEqual(t, "", event.(api.ContainerStateChange).RuntimeID, + "Expected container runtimeID should not empty") +} + +func setup(cfg *config.Config, state dockerstate.TaskEngineState, t *testing.T) (TaskEngine, func(), credentials.Manager) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + skipIntegTestIfApplicable(t) + + sdkClientFactory := sdkclientfactory.NewFactory(ctx, dockerEndpoint) + dockerClient, err := dockerapi.NewDockerGoClient(sdkClientFactory, cfg, context.Background()) + if err != nil { + t.Fatalf("Error creating Docker client: %v", err) + } + credentialsManager := credentials.NewManager() + if state == nil { + state = dockerstate.NewTaskEngineState() + } + imageManager := NewImageManager(cfg, dockerClient, state) + imageManager.SetDataClient(data.NewNoopClient()) + metadataManager := containermetadata.NewManager(dockerClient, cfg) + + taskEngine := NewDockerTaskEngine(cfg, dockerClient, credentialsManager, + eventstream.NewEventStream("ENGINEINTEGTEST", context.Background()), imageManager, state, metadataManager, + nil, execcmd.NewManager()) + taskEngine.MustInit(context.TODO()) + return taskEngine, func() { + taskEngine.Shutdown() + }, credentialsManager +} + +func skipIntegTestIfApplicable(t *testing.T) { + if os.Getenv("ECS_SKIP_ENGINE_INTEG_TEST") != "" { + t.Skip("ECS_SKIP_ENGINE_INTEG_TEST") + } + if !isDockerRunning() { + t.Skip("Docker not running") + } +} + +func createTestContainerWithImageAndName(image string, name string) *apicontainer.Container { + return &apicontainer.Container{ + Name: name, + Image: image, + Command: []string{}, + Essential: true, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 1024, + Memory: 128, + } +} + +func waitForTaskCleanup(t *testing.T, taskEngine TaskEngine, taskArn string, seconds int) { + for i := 0; i < seconds; i++ { + _, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(taskArn) + if !ok { + return + } + time.Sleep(1 * time.Second) + } + t.Fatalf("timed out waiting for task to be clean up, task: %s", taskArn) +} + +// A map that stores statusChangeEvents for both Tasks and Containers +// Organized first by EventType (Task or Container), +// then by StatusType (i.e. RUNNING, STOPPED, etc) +// then by Task/Container identifying string (TaskARN or ContainerName) +// EventType +// / \ +// TaskEvent ContainerEvent +// / \ / \ +// RUNNING STOPPED RUNNING STOPPED +// / \ / \ | | +// ARN1 ARN2 ARN3 ARN4 ARN:Cont1 ARN:Cont2 +type EventSet map[statechange.EventType]statusToName + +// Type definition for mapping a Status to a TaskARN/ContainerName +type statusToName map[string]nameSet + +// Type definition for a generic set implemented as a map +type nameSet map[string]bool + +// Holds the Events Map described above with a RW mutex +type TestEvents struct { + RecordedEvents EventSet + StateChangeEvents <-chan statechange.Event +} + +// Initializes the TestEvents using the TaskEngine. Abstracts the overhead required to set up +// collecting TaskEngine stateChangeEvents. +// We must use the Golang assert library and NOT the require library to ensure the Go routine is +// stopped at the end of our tests +func InitEventCollection(taskEngine TaskEngine) *TestEvents { + stateChangeEvents := taskEngine.StateChangeEvents() + recordedEvents := make(EventSet) + testEvents := &TestEvents{ + RecordedEvents: recordedEvents, + StateChangeEvents: stateChangeEvents, + } + return testEvents +} + +// This method queries the TestEvents struct to check a Task Status. +// This method will block if there are no more stateChangeEvents from the DockerTaskEngine but is expected +func VerifyTaskStatus(status apitaskstatus.TaskStatus, taskARN string, testEvents *TestEvents, t *testing.T) error { + for { + if _, found := testEvents.RecordedEvents[statechange.TaskEvent][status.String()][taskARN]; found { + return nil + } + event := <-testEvents.StateChangeEvents + RecordEvent(testEvents, event) + } +} + +// This method queries the TestEvents struct to check a Task Status. +// This method will block if there are no more stateChangeEvents from the DockerTaskEngine but is expected +func VerifyContainerStatus(status apicontainerstatus.ContainerStatus, ARNcontName string, testEvents *TestEvents, t *testing.T) error { + for { + if _, found := testEvents.RecordedEvents[statechange.ContainerEvent][status.String()][ARNcontName]; found { + return nil + } + event := <-testEvents.StateChangeEvents + RecordEvent(testEvents, event) + } +} + +// Will record the event that was just collected into the TestEvents struct's RecordedEvents map +func RecordEvent(testEvents *TestEvents, event statechange.Event) { + switch event.GetEventType() { + case statechange.TaskEvent: + taskEvent := event.(api.TaskStateChange) + if _, exists := testEvents.RecordedEvents[statechange.TaskEvent]; !exists { + testEvents.RecordedEvents[statechange.TaskEvent] = make(statusToName) + } + if _, exists := testEvents.RecordedEvents[statechange.TaskEvent][taskEvent.Status.String()]; !exists { + testEvents.RecordedEvents[statechange.TaskEvent][taskEvent.Status.String()] = make(map[string]bool) + } + testEvents.RecordedEvents[statechange.TaskEvent][taskEvent.Status.String()][taskEvent.TaskARN] = true + case statechange.ContainerEvent: + containerEvent := event.(api.ContainerStateChange) + if _, exists := testEvents.RecordedEvents[statechange.ContainerEvent]; !exists { + testEvents.RecordedEvents[statechange.ContainerEvent] = make(statusToName) + } + if _, exists := testEvents.RecordedEvents[statechange.ContainerEvent][containerEvent.Status.String()]; !exists { + testEvents.RecordedEvents[statechange.ContainerEvent][containerEvent.Status.String()] = make(map[string]bool) + } + testEvents.RecordedEvents[statechange.ContainerEvent][containerEvent.Status.String()][containerEvent.TaskArn+":"+containerEvent.ContainerName] = true + } +} diff --git a/agent/engine/common_test.go b/agent/engine/common_test.go new file mode 100644 index 00000000000..b6945e04248 --- /dev/null +++ b/agent/engine/common_test.go @@ -0,0 +1,366 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package engine contains the core logic for managing tasks +package engine + +import ( + "context" + "fmt" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + "github.com/aws/amazon-ecs-agent/agent/statechange" + mock_ttime "github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks" + "github.com/cihub/seelog" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/assert" +) + +const ( + containerID = "containerID" + waitTaskStateChangeDuration = 2 * time.Minute +) + +var ( + defaultDockerClientAPIVersion = dockerclient.Version_1_17 + // some unassigned ports to use for tests + // see https://www.speedguide.net/port.php?port=24685 + unassignedPort int32 = 24685 +) + +// getUnassignedPort returns a NEW unassigned port each time it's called. +func getUnassignedPort() uint16 { + return uint16(atomic.AddInt32(&unassignedPort, 1)) +} + +func discardEvents(from interface{}) func() { + done := make(chan bool) + + go func() { + for { + ndx, _, _ := reflect.Select([]reflect.SelectCase{ + { + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(from), + }, + { + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(done), + }, + }) + if ndx == 1 { + break + } + } + }() + return func() { + done <- true + } +} + +// TODO: Move integ tests away from relying on the statechange channel for +// determining if a task is running/stopped or not +func verifyTaskIsRunning(stateChangeEvents <-chan statechange.Event, task *apitask.Task) error { + for { + event := <-stateChangeEvents + if event.GetEventType() != statechange.TaskEvent { + continue + } + + taskEvent := event.(api.TaskStateChange) + if taskEvent.TaskARN != task.Arn { + continue + } + if taskEvent.Status == apitaskstatus.TaskRunning { + return nil + } + if taskEvent.Status > apitaskstatus.TaskRunning { + return fmt.Errorf("Task went straight to %s without running, task: %s", taskEvent.Status.String(), task.Arn) + } + } +} + +func verifyTaskIsStopped(stateChangeEvents <-chan statechange.Event, task *apitask.Task) { + for { + event := <-stateChangeEvents + if event.GetEventType() != statechange.TaskEvent { + continue + } + taskEvent := event.(api.TaskStateChange) + if taskEvent.TaskARN == task.Arn && taskEvent.Status >= apitaskstatus.TaskStopped { + return + } + } +} + +// waitForTaskStoppedByCheckStatus verify the task is in stopped status by checking the KnownStatusUnsafe field of the task +func waitForTaskStoppedByCheckStatus(task *apitask.Task) { + for { + if task.GetKnownStatus() == apitaskstatus.TaskStopped { + return + } + time.Sleep(50 * time.Millisecond) + } +} + +// validateContainerRunWorkflow validates the container create and start workflow +// for a test task without any resources (such as ENIs). +// +// The createdContainerName channel is used to emit the container name from the +// create operation. It can be used to validate that the name of the container +// removed matches with the generated container name during cleanup operation in the +// test. +func validateContainerRunWorkflow(t *testing.T, + container *apicontainer.Container, + task *apitask.Task, + imageManager *mock_engine.MockImageManager, + client *mock_dockerapi.MockDockerClient, + roleCredentials *credentials.TaskIAMRoleCredentials, + containerEventsWG *sync.WaitGroup, + eventStream chan dockerapi.DockerContainerChangeEvent, + createdContainerName chan<- string, + assertions func(), +) { + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes() + client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}) + imageManager.EXPECT().RecordContainerReference(container).Return(nil) + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false) + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil) + dockerConfig, err := task.DockerConfig(container, defaultDockerClientAPIVersion) + if err != nil { + t.Fatal(err) + } + if roleCredentials != nil { + // Container config should get updated with this during PostUnmarshalTask + credentialsEndpointEnvValue := roleCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI() + dockerConfig.Env = append(dockerConfig.Env, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI="+credentialsEndpointEnvValue) + } + + v3EndpointID := container.GetV3EndpointID() + if v3EndpointID == "" { + // if container's v3 endpoint id is not specified, set it here so it's not randomly generated + // in execution; and then we can check whether the endpoint's value is expected + v3EndpointID = uuid.New() + container.SetV3EndpointID(v3EndpointID) + metadataEndpointEnvValue := fmt.Sprintf(apicontainer.MetadataURIFormat, v3EndpointID) + dockerConfig.Env = append(dockerConfig.Env, "ECS_CONTAINER_METADATA_URI="+metadataEndpointEnvValue) + metadataEndpointEnvValueV4 := fmt.Sprintf(apicontainer.MetadataURIFormatV4, v3EndpointID) + dockerConfig.Env = append(dockerConfig.Env, "ECS_CONTAINER_METADATA_URI_V4="+metadataEndpointEnvValueV4) + } + // Container config should get updated with this during CreateContainer + dockerConfig.Labels["com.amazonaws.ecs.task-arn"] = task.Arn + dockerConfig.Labels["com.amazonaws.ecs.container-name"] = container.Name + dockerConfig.Labels["com.amazonaws.ecs.task-definition-family"] = task.Family + dockerConfig.Labels["com.amazonaws.ecs.task-definition-version"] = task.Version + dockerConfig.Labels["com.amazonaws.ecs.cluster"] = "" + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, config *dockercontainer.Config, y interface{}, containerName string, z time.Duration) { + checkDockerConfigsExceptEnv(t, dockerConfig, config) + checkDockerConfigsEnv(t, dockerConfig, config) + // sleep5 task contains only one container. Just assign + // the containerName to createdContainerName + createdContainerName <- containerName + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}) + defaultConfig := config.DefaultConfig() + client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Do( + func(ctx interface{}, id string, timeout time.Duration) { + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}) + assertions() +} + +// checkDockerConfigsExceptEnv checks whether the contents in the docker config are expected +// except for the Env field. Checking for Env field is seperated because when agent converts +// its container config to docker config, it iterates over the container's env map and +// append them into docker config's env slice. So the sequence for the env slice is undetermined, +// and it needs other logic to check equality. +func checkDockerConfigsExceptEnv(t *testing.T, expectedConfig *dockercontainer.Config, config *dockercontainer.Config) { + expectedConfigEnvList := expectedConfig.Env + configEnvList := config.Env + expectedConfig.Env = nil + config.Env = nil + + assert.True(t, reflect.DeepEqual(expectedConfig, config), + "Mismatch in container config; expected: %v, got: %v", expectedConfig, config) + + expectedConfig.Env = expectedConfigEnvList + config.Env = configEnvList +} + +// checkDockerConfigsEnv checks whether two docker configs have same list of environment +// variables and each has same value, ignoring the order. +func checkDockerConfigsEnv(t *testing.T, expectedConfig *dockercontainer.Config, config *dockercontainer.Config) { + expectedConfigEnvList := expectedConfig.Env + configEnvList := config.Env + + assert.ElementsMatchf(t, expectedConfigEnvList, configEnvList, + "Mismatch in container config env; expected: %v, got: %v", expectedConfigEnvList, configEnvList) +} + +// addTaskToEngine adds a test task to the engine. It waits for a task to reach the +// steady state before returning. Hence, this should not be used for tests, which +// expect container stops to be invoked before a task reaches its steady state +func addTaskToEngine(t *testing.T, + ctx context.Context, + taskEngine TaskEngine, + sleepTask *apitask.Task, + mockTime *mock_ttime.MockTime, + createStartEventsReported *sync.WaitGroup) { + // steadyStateCheckWait is used to force the test to wait until the steady-state check + // has been invoked at least once + mockTime.EXPECT().Now().Return(time.Now()).AnyTimes() + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + taskEngine.AddTask(sleepTask) + waitForRunningEvents(t, taskEngine.StateChangeEvents()) + + // Wait for all events to be consumed prior to moving it towards stopped; we + // don't want to race the below with these or we'll end up with the "going + // backwards in state" stop and we haven't 'expect'd for that + + // Wait for container create and start events to be processed + createStartEventsReported.Wait() +} + +func createDockerEvent(status apicontainerstatus.ContainerStatus) dockerapi.DockerContainerChangeEvent { + meta := dockerapi.DockerContainerMetadata{ + DockerID: containerID, + } + return dockerapi.DockerContainerChangeEvent{Status: status, DockerContainerMetadata: meta} +} + +// waitForRunningEvents waits for a task to emit 'RUNNING' events for a container +// and the task +func waitForRunningEvents(t *testing.T, stateChangeEvents <-chan statechange.Event) { + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, + "Expected container to be RUNNING") + + event = <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning, + "Expected task to be RUNNING") + + select { + case <-stateChangeEvents: + t.Fatal("Should be out of events") + default: + } +} + +// waitForStopEvents waits for a task to emit 'STOPPED' events for a container +// and the task +func waitForStopEvents(t *testing.T, stateChangeEvents <-chan statechange.Event, verifyExitCode, execEnabled bool) { + if execEnabled { + event := <-stateChangeEvents + if masc := event.(api.ManagedAgentStateChange); masc.Status != apicontainerstatus.ManagedAgentStopped { + t.Fatal("Expected managed agent to stop first") + } + } + + event := <-stateChangeEvents + if cont := event.(api.ContainerStateChange); cont.Status != apicontainerstatus.ContainerStopped { + t.Fatal("Expected container to stop first") + if verifyExitCode { + assert.Equal(t, *cont.ExitCode, 1, "Exit code should be present") + } + } + event = <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskStopped, "Expected task to be STOPPED") + + select { + case <-stateChangeEvents: + t.Fatal("Should be out of events") + default: + } +} + +func waitForContainerHealthStatus(t *testing.T, testTask *apitask.Task) { + ctx, cancel := context.WithTimeout(context.TODO(), waitTaskStateChangeDuration) + defer cancel() + + for { + select { + case <-ctx.Done(): + t.Error("Timed out waiting for container health status") + default: + healthStatus := testTask.Containers[0].GetHealthStatus() + if healthStatus.Status.BackendStatus() == "UNKNOWN" { + time.Sleep(time.Second) + continue + } + return + } + } +} + +// sorts through stateChangeEvents to locate and assert that the ManagedAgent event matchess the expectedManagedAgent event. +// expectContainerEvent field is a boolean to allow us to ignore an expected empty channel +func checkManagedAgentEvents(t *testing.T, expectContainerEvent bool, stateChangeEvents <-chan statechange.Event, + expectedManagedAgent apicontainer.ManagedAgent, waitDone chan<- struct{}) { + if expectContainerEvent { + for event := range stateChangeEvents { + if managedAgentEvent, ok := event.(api.ManagedAgentStateChange); ok { + // there is currently only ever a single managed agent + assert.Equal(t, expectedManagedAgent.Status, managedAgentEvent.Status, + "expected managedAgent container state change event did not match actual event") + assert.Equal(t, expectedManagedAgent.Reason, managedAgentEvent.Reason, + "expected managedAgent container state change event reports the wrong reason") + close(waitDone) + return + } + seelog.Debugf("processed errant event: %v", event) + } + } else { + assert.Empty(t, stateChangeEvents, "expected empty stateChangeEvents channel, but found an event") + close(waitDone) + } +} + +func enableExecCommandAgentForContainer(container *apicontainer.Container, state apicontainer.ManagedAgentState) { + container.ManagedAgentsUnsafe = []apicontainer.ManagedAgent{ + { + Name: execcmd.ExecuteCommandAgentName, + Properties: make(map[string]string), + ManagedAgentState: state, + }, + } +} diff --git a/agent/engine/common_unix_integ_test.go b/agent/engine/common_unix_integ_test.go new file mode 100644 index 00000000000..de895d7aea1 --- /dev/null +++ b/agent/engine/common_unix_integ_test.go @@ -0,0 +1,43 @@ +// +build linux +// +build sudo integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "os" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" +) + +const ( + testRegistryImage = "127.0.0.1:51670/amazon/amazon-ecs-netkitten:latest" + dockerEndpoint = "unix:///var/run/docker.sock" +) + +func createTestContainer() *apicontainer.Container { + return createTestContainerWithImageAndName(testRegistryImage, "netcat") +} + +// getLongRunningCommand returns the command that keeps the container running for the container +// that uses the default integ test image (amazon/amazon-ecs-netkitten for unix) +func getLongRunningCommand() []string { + return []string{"-loop=true"} +} + +func isDockerRunning() bool { + _, err := os.Stat("/var/run/docker.sock") + return err == nil +} diff --git a/agent/engine/data.go b/agent/engine/data.go new file mode 100644 index 00000000000..cb500f2c4d9 --- /dev/null +++ b/agent/engine/data.go @@ -0,0 +1,262 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + "github.com/aws/amazon-ecs-agent/agent/utils" + + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// LoadState populates the task engine state with data in db. +func (engine *DockerTaskEngine) LoadState() error { + if err := engine.loadTasks(); err != nil { + return err + } + + if err := engine.loadContainers(); err != nil { + return err + } + + if err := engine.loadImageStates(); err != nil { + return err + } + + return engine.loadENIAttachments() +} + +func (engine *DockerTaskEngine) loadTasks() error { + tasks, err := engine.dataClient.GetTasks() + if err != nil { + return err + } + + for _, task := range tasks { + engine.state.AddTask(task) + + // Populate ip <-> task mapping if task has a local ip. This mapping is needed for serving v2 task metadata. + if ip := task.GetLocalIPAddress(); ip != "" { + engine.state.AddTaskIPAddress(ip, task.Arn) + } + } + return nil +} + +func (engine *DockerTaskEngine) loadContainers() error { + containers, err := engine.dataClient.GetContainers() + if err != nil { + return err + } + + for _, container := range containers { + task, ok := engine.state.TaskByArn(container.Container.GetTaskARN()) + if !ok { + // A task is saved to task table before its containers saved to container table. It is not expected + // that we have a container from container table whose task is not in the task table. + return errors.Errorf("did not find the task of container %s: %s", container.Container.Name, + container.Container.GetTaskARN()) + } + if container.Container.GetKnownStatus() == apicontainerstatus.ContainerPulled { + engine.state.AddPulledContainer(container, task) + } else { + engine.state.AddContainer(container, task) + } + } + + // Update containers in task from data in container table. It stores more updated data of container + // than the task table. + tasks := engine.state.AllTasks() + for _, task := range tasks { + // A task's container map contains containers loaded from container table. It is ok + // that there isn't any container in the map, because we first save a container to container table + // when finished a transition on it, and before that the container is just stored as part of the task. + dockerContainers, _ := engine.state.ContainerMapByArn(task.Arn) + pulledContainers, _ := engine.state.PulledContainerMapByArn(task.Arn) + + for idx, container := range task.Containers { + for _, pulledContainer := range pulledContainers { + if container.Name == pulledContainer.Container.Name { + task.Containers[idx] = pulledContainer.Container + } + } + for _, dockerContainer := range dockerContainers { + if container.Name == dockerContainer.Container.Name { + task.Containers[idx] = dockerContainer.Container + } + } + } + } + return nil +} + +func (engine *DockerTaskEngine) loadImageStates() error { + images, err := engine.dataClient.GetImageStates() + if err != nil { + return err + } + + for _, image := range images { + engine.state.AddImageState(image) + } + return nil +} + +func (engine *DockerTaskEngine) loadENIAttachments() error { + eniAttachments, err := engine.dataClient.GetENIAttachments() + if err != nil { + return err + } + + for _, eniAttachment := range eniAttachments { + engine.state.AddENIAttachment(eniAttachment) + } + return nil +} + +// SaveState saves all the data in task engine state to db. +func (engine *DockerTaskEngine) SaveState() error { + state := engine.state + ensureCompatibleWithBoltDB(state) + + for _, task := range state.AllTasks() { + if err := engine.dataClient.SaveTask(task); err != nil { + return err + } + } + + for _, containerID := range state.GetAllContainerIDs() { + container, ok := state.ContainerByID(containerID) + if !ok { + return errors.Errorf("container not found: %s", containerID) + } + if err := engine.dataClient.SaveDockerContainer(container); err != nil { + return err + } + } + + for _, imageState := range state.AllImageStates() { + if err := engine.dataClient.SaveImageState(imageState); err != nil { + return err + } + } + + for _, eniAttachment := range state.AllENIAttachments() { + if err := engine.dataClient.SaveENIAttachment(eniAttachment); err != nil { + return err + } + } + return nil +} + +// ensureCompatibleWithBoltDB ensures that the data in the state is compatible with what we need +// in order to save into boltdb. Specifically, we need: (1) container to have its taskARN field populated; +// (2) if task has a local ip address, it's stored in the localIPAddressUnsafe field. +// Task launched with new agent will have those fields populated already, but task loaded +// from state file generated by old agent will not, which is why this method is needed. +func ensureCompatibleWithBoltDB(state dockerstate.TaskEngineState) { + tasks := state.AllTasks() + for _, t := range tasks { + for _, c := range t.Containers { + c.SetTaskARN(t.Arn) + } + + addr, ok := state.GetIPAddressByTaskARN(t.Arn) + if ok { + t.SetLocalIPAddress(addr) + } + } +} + +func (engine *DockerTaskEngine) saveTaskData(task *apitask.Task) { + err := engine.dataClient.SaveTask(task) + if err != nil { + seelog.Errorf("Failed to save data for task %s: %v", task.Arn, err) + } +} + +func (engine *DockerTaskEngine) saveContainerData(container *apicontainer.Container) { + err := engine.dataClient.SaveContainer(container) + if err != nil { + seelog.Errorf("Failed to save data for container %s: %v", container.Name, err) + } +} + +func (engine *DockerTaskEngine) saveDockerContainerData(container *apicontainer.DockerContainer) { + err := engine.dataClient.SaveDockerContainer(container) + if err != nil { + seelog.Errorf("Failed to save data for docker container %s: %v", container.Container.Name, err) + } +} + +func (engine *DockerTaskEngine) removeTaskData(task *apitask.Task) { + id, err := utils.GetTaskID(task.Arn) + if err != nil { + seelog.Errorf("Failed to get task id from task ARN %s: %v", task.Arn, err) + return + } + err = engine.dataClient.DeleteTask(id) + if err != nil { + seelog.Errorf("Failed to remove data for task %s: %v", task.Arn, err) + } + + for _, c := range task.Containers { + id, err := data.GetContainerID(c) + if err != nil { + seelog.Errorf("Failed to get container id from container %s: %v", c.Name, err) + continue + } + err = engine.dataClient.DeleteContainer(id) + if err != nil { + seelog.Errorf("Failed to remove data for container %s: %v", c.Name, err) + } + } +} + +func (engine *DockerTaskEngine) removeENIAttachmentData(mac string) { + attachmentToRemove, ok := engine.state.ENIByMac(mac) + if !ok { + seelog.Warnf("Unable to retrieve ENI Attachment for mac address %s: ", mac) + return + } + attachmentId, err := utils.GetENIAttachmentId(attachmentToRemove.AttachmentARN) + if err != nil { + seelog.Errorf("Failed to get attachment id for %s: %v", attachmentToRemove.AttachmentARN, err) + } else { + err = engine.dataClient.DeleteENIAttachment(attachmentId) + if err != nil { + seelog.Errorf("Failed to remove data for eni attachment %s: %v", attachmentId, err) + } + } +} + +func (imageManager *dockerImageManager) saveImageStateData(imageState *image.ImageState) { + err := imageManager.dataClient.SaveImageState(imageState) + if err != nil { + seelog.Errorf("Failed to save data for image state %s:, %v", imageState.GetImageID(), err) + } +} + +func (imageManager *dockerImageManager) removeImageStateData(imageId string) { + err := imageManager.dataClient.DeleteImageState(imageId) + if err != nil { + seelog.Errorf("Failed to remove data for image state %s:, %v", imageId, err) + } +} diff --git a/agent/engine/data_test.go b/agent/engine/data_test.go new file mode 100644 index 00000000000..6485c2050a9 --- /dev/null +++ b/agent/engine/data_test.go @@ -0,0 +1,291 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "io/ioutil" + "os" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testContainerName = "test-name" + testImageId = "test-imageId" + testMac = "test-mac" + testAttachmentArn = "arn:aws:ecs:us-west-2:1234567890:attachment/abc" + testDockerID = "test-docker-id" + testTaskIP = "10.1.2.3" +) + +var ( + testContainer = &apicontainer.Container{ + Name: testContainerName, + TaskARNUnsafe: testTaskARN, + } + testPulledContainer = &apicontainer.Container{ + Name: testContainerName + "-pulled", + TaskARNUnsafe: testTaskARN, + KnownStatusUnsafe: apicontainerstatus.ContainerPulled, + } + testDockerContainer = &apicontainer.DockerContainer{ + DockerID: testDockerID, + Container: testContainer, + } + testPulledDockerContainer = &apicontainer.DockerContainer{ + DockerID: testDockerID, + Container: testPulledContainer, + } + testTask = &apitask.Task{ + Arn: testTaskARN, + Containers: []*apicontainer.Container{testContainer}, + LocalIPAddressUnsafe: testTaskIP, + } + testTaskWithPulledContainer = &apitask.Task{ + Arn: testTaskARN, + Containers: []*apicontainer.Container{testContainer, testPulledContainer}, + LocalIPAddressUnsafe: testTaskIP, + } + testImageState = &image.ImageState{ + Image: testImage, + PullSucceeded: false, + } + testImage = &image.Image{ + ImageID: testImageId, + } + + testENIAttachment = &eni.ENIAttachment{ + AttachmentARN: testAttachmentArn, + AttachStatusSent: false, + MACAddress: testMac, + } +) + +func newTestDataClient(t *testing.T) (data.Client, func()) { + testDir, err := ioutil.TempDir("", "agent_engine_unit_test") + require.NoError(t, err) + + testClient, err := data.NewWithSetup(testDir) + + cleanup := func() { + require.NoError(t, testClient.Close()) + require.NoError(t, os.RemoveAll(testDir)) + } + return testClient, cleanup +} + +func TestLoadState(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + engine := &DockerTaskEngine{ + state: dockerstate.NewTaskEngineState(), + dataClient: dataClient, + } + require.NoError(t, dataClient.SaveTask(testTaskWithPulledContainer)) + testDockerContainer.Container.SetKnownStatus(apicontainerstatus.ContainerRunning) + require.NoError(t, dataClient.SaveDockerContainer(testDockerContainer)) + require.NoError(t, dataClient.SaveDockerContainer(testPulledDockerContainer)) + require.NoError(t, dataClient.SaveENIAttachment(testENIAttachment)) + require.NoError(t, dataClient.SaveImageState(testImageState)) + + require.NoError(t, engine.LoadState()) + task, ok := engine.state.TaskByArn(testTaskARN) + assert.True(t, ok) + pulledContainers, ok := engine.state.PulledContainerMapByArn(testTaskARN) + assert.True(t, ok) + assert.Len(t, pulledContainers, 1) + // Also check that the container in the task has the updated status from container table. + assert.Equal(t, apicontainerstatus.ContainerRunning, task.Containers[0].GetKnownStatus()) + assert.Equal(t, apicontainerstatus.ContainerPulled, task.Containers[1].GetKnownStatus()) + _, ok = engine.state.ContainerByID(testDockerID) + assert.True(t, ok) + assert.Len(t, engine.state.AllImageStates(), 1) + assert.Len(t, engine.state.AllENIAttachments(), 1) + + // Check ip <-> task arn mapping is loaded in state. + ip, ok := engine.state.GetIPAddressByTaskARN(testTaskARN) + require.True(t, ok) + assert.Equal(t, testTaskIP, ip) + arn, ok := engine.state.GetTaskByIPAddress(testTaskIP) + require.True(t, ok) + assert.Equal(t, testTaskARN, arn) +} + +func TestSaveState(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + engine := &DockerTaskEngine{ + state: dockerstate.NewTaskEngineState(), + dataClient: dataClient, + } + engine.state.AddTask(testTask) + engine.state.AddContainer(testDockerContainer, testTask) + engine.state.AddImageState(testImageState) + engine.state.AddENIAttachment(testENIAttachment) + + require.NoError(t, engine.SaveState()) + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, tasks, 1) + + containers, err := dataClient.GetContainers() + require.NoError(t, err) + assert.Len(t, containers, 1) + + images, err := dataClient.GetImageStates() + require.NoError(t, err) + assert.Len(t, images, 1) + + eniAttachments, err := dataClient.GetENIAttachments() + require.NoError(t, err) + assert.Len(t, eniAttachments, 1) +} + +func TestSaveStateEnsureBoltDBCompatibility(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + engine := &DockerTaskEngine{ + state: dockerstate.NewTaskEngineState(), + dataClient: dataClient, + } + + // Save a container without task ARN populated in taskARNUnsafe field and a task without ip address + // populated in localIPAddressUnsafe field. Check they are populated upon saving to boltdb. + testContainer := &apicontainer.Container{ + Name: testContainerName, + } + testDockerContainer := &apicontainer.DockerContainer{ + DockerID: testDockerID, + Container: testContainer, + } + testTask := &apitask.Task{ + Arn: testTaskARN, + Containers: []*apicontainer.Container{testContainer}, + } + engine.state.AddTask(testTask) + engine.state.AddContainer(testDockerContainer, testTask) + engine.state.AddTaskIPAddress(testTaskIP, testTaskARN) + + require.NoError(t, engine.SaveState()) + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, tasks, 1) + assert.Equal(t, testTaskIP, tasks[0].GetLocalIPAddress()) + + containers, err := dataClient.GetContainers() + require.NoError(t, err) + assert.Len(t, containers, 1) + assert.Equal(t, testTaskARN, containers[0].Container.GetTaskARN()) +} + +func TestSaveAndRemoveTaskData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + engine := &DockerTaskEngine{ + dataClient: dataClient, + } + engine.saveTaskData(testTask) + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, tasks, 1) + + engine.removeTaskData(testTask) + tasks, err = dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, tasks, 0) +} + +func TestSaveContainerData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + engine := &DockerTaskEngine{ + dataClient: dataClient, + } + engine.saveContainerData(testContainer) + containers, err := dataClient.GetContainers() + require.NoError(t, err) + assert.Len(t, containers, 1) +} + +func TestSaveDockerContainerData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + engine := &DockerTaskEngine{ + dataClient: dataClient, + } + engine.saveDockerContainerData(&apicontainer.DockerContainer{ + DockerName: "test-docker-name", + Container: testContainer, + }) + containers, err := dataClient.GetContainers() + require.NoError(t, err) + assert.Len(t, containers, 1) +} + +func TestSaveAndRemoveImageStateData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + imageManager := &dockerImageManager{ + dataClient: dataClient, + } + imageManager.saveImageStateData(testImageState) + res, err := dataClient.GetImageStates() + require.NoError(t, err) + assert.Len(t, res, 1) + + imageManager.removeImageStateData(testImageId) + res, err = dataClient.GetImageStates() + require.NoError(t, err) + assert.Len(t, res, 0) +} + +func TestRemoveENIAttachmentData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + engine := &DockerTaskEngine{ + state: dockerstate.NewTaskEngineState(), + dataClient: dataClient, + } + + engine.state.AddENIAttachment(testENIAttachment) + dataClient.SaveENIAttachment(testENIAttachment) + res, err := dataClient.GetENIAttachments() + require.NoError(t, err) + assert.Len(t, res, 1) + + engine.removeENIAttachmentData(testMac) + res, err = dataClient.GetENIAttachments() + require.NoError(t, err) + assert.Len(t, res, 0) +} diff --git a/agent/engine/default.go b/agent/engine/default.go new file mode 100644 index 00000000000..27ef92eb28c --- /dev/null +++ b/agent/engine/default.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package engine contains code for interacting with container-running backends and handling events from them. +// It supports Docker as the sole task engine type. +package engine + +import ( + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/taskresource" +) + +// NewTaskEngine returns a default TaskEngine +func NewTaskEngine(cfg *config.Config, client dockerapi.DockerClient, + credentialsManager credentials.Manager, + containerChangeEventStream *eventstream.EventStream, + imageManager ImageManager, state dockerstate.TaskEngineState, + metadataManager containermetadata.Manager, + resourceFields *taskresource.ResourceFields, + execCmdMgr execcmd.Manager) TaskEngine { + + taskEngine := NewDockerTaskEngine(cfg, client, credentialsManager, + containerChangeEventStream, imageManager, + state, metadataManager, resourceFields, execCmdMgr) + + return taskEngine +} diff --git a/agent/engine/dependencygraph/graph.go b/agent/engine/dependencygraph/graph.go new file mode 100644 index 00000000000..4444a0dcb98 --- /dev/null +++ b/agent/engine/dependencygraph/graph.go @@ -0,0 +1,510 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dependencygraph + +import ( + "fmt" + "strings" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + log "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +const ( + // CreateCondition ensures that a container progresses to next state only when dependency container has started + createCondition = "CREATE" + // StartCondition ensures that a container progresses to next state only when dependency container is running + startCondition = "START" + // SuccessCondition ensures that a container progresses to next state only when + // dependency container has successfully completed with exit code 0 + successCondition = "SUCCESS" + // CompleteCondition ensures that a container progresses to next state only when dependency container has completed + completeCondition = "COMPLETE" + // HealthyCondition ensures that a container progresses to next state only when dependency container is healthy + healthyCondition = "HEALTHY" + // 0 is the standard exit code for success. + successExitCode = 0 +) + +var ( + // CredentialsNotResolvedErr is the error where a container needs to wait for + // credentials before it can process by agent + CredentialsNotResolvedErr = errors.New("dependency graph: container execution credentials not available") + // DependentContainerNotResolvedErr is the error where a dependent container isn't in expected state + DependentContainerNotResolvedErr = errors.New("dependency graph: dependent container not in expected state") + // ContainerPastDesiredStatusErr is the error where the container status is bigger than desired status + ContainerPastDesiredStatusErr = errors.New("container transition: container status is equal or greater than desired status") + // ErrContainerDependencyNotResolved is when the container's dependencies + // on other containers are not resolved + ErrContainerDependencyNotResolved = errors.New("dependency graph: dependency on containers not resolved") + // ErrResourceDependencyNotResolved is when the container's dependencies + // on task resources are not resolved + ErrResourceDependencyNotResolved = errors.New("dependency graph: dependency on resources not resolved") + // ResourcePastDesiredStatusErr is the error where the task resource known status is bigger than desired status + ResourcePastDesiredStatusErr = errors.New("task resource transition: task resource status is equal or greater than desired status") + // ErrContainerDependencyNotResolvedForResource is when the resource's dependencies + // on other containers are not resolved + ErrContainerDependencyNotResolvedForResource = errors.New("dependency graph: resource's dependency on containers not resolved") +) + +// ValidDependencies takes a task and verifies that it is possible to allow all +// containers within it to reach the desired status by proceeding in some +// order. +func ValidDependencies(task *apitask.Task, cfg *config.Config) bool { + unresolved := make([]*apicontainer.Container, len(task.Containers)) + resolved := make([]*apicontainer.Container, 0, len(task.Containers)) + + copy(unresolved, task.Containers) + +OuterLoop: + for len(unresolved) > 0 { + for i, tryResolve := range unresolved { + if dependenciesCanBeResolved(tryResolve, resolved, cfg) { + resolved = append(resolved, tryResolve) + unresolved = append(unresolved[:i], unresolved[i+1:]...) + // Break out of the inner loop now that we modified the slice + // we're looping over + continue OuterLoop + } + } + log.Warnf("Could not resolve some containers: [%v] for task %v", unresolved, task) + return false + } + + return true +} + +// DependenciesCanBeResolved verifies that it's possible to transition a `target` +// given a group of already handled containers, `by`. Essentially, it asks "is +// `target` resolved by `by`". It assumes that everything in `by` has reached +// DesiredStatus and that `target` is also trying to get there +// +// This function is used for verifying that a state should be resolvable, not +// for actually deciding what to do. `DependenciesAreResolved` should be used for +// that purpose instead. +func dependenciesCanBeResolved(target *apicontainer.Container, by []*apicontainer.Container, cfg *config.Config) bool { + nameMap := make(map[string]*apicontainer.Container) + for _, cont := range by { + nameMap[cont.Name] = cont + } + + if _, err := verifyContainerOrderingStatusResolvable(target, nameMap, cfg, containerOrderingDependenciesCanResolve); err != nil { + return false + } + return verifyStatusResolvable(target, nameMap, target.SteadyStateDependencies, onSteadyStateCanResolve) +} + +// DependenciesAreResolved validates that the `target` container can be +// transitioned given the current known state of the containers in `by`. If +// this function returns true, `target` should be technically able to launch +// without issues. +// Transitions are between known statuses (whether the container can move to +// the next known status), not desired statuses; the desired status typically +// is either RUNNING or STOPPED. +func DependenciesAreResolved(target *apicontainer.Container, + by []*apicontainer.Container, + id string, + manager credentials.Manager, + resources []taskresource.TaskResource, + cfg *config.Config) (*apicontainer.DependsOn, error) { + if !executionCredentialsResolved(target, id, manager) { + return nil, CredentialsNotResolvedErr + } + + nameMap := make(map[string]*apicontainer.Container) + for _, cont := range by { + nameMap[cont.Name] = cont + } + neededVolumeContainers := make([]string, len(target.VolumesFrom)) + for i, volume := range target.VolumesFrom { + neededVolumeContainers[i] = volume.SourceContainer + } + + resourcesMap := make(map[string]taskresource.TaskResource) + for _, resource := range resources { + resourcesMap[resource.GetName()] = resource + } + + if blocked, err := verifyContainerOrderingStatusResolvable(target, nameMap, cfg, containerOrderingDependenciesIsResolved); err != nil { + return blocked, err + } + + if !verifyStatusResolvable(target, nameMap, target.SteadyStateDependencies, onSteadyStateIsResolved) { + return nil, DependentContainerNotResolvedErr + } + if err := verifyTransitionDependenciesResolved(target, nameMap, resourcesMap); err != nil { + return nil, err + } + + // If the target is desired terminal and isn't stopped, we should validate that it doesn't have any containers + // that are dependent on it that need to shut down first. + if target.DesiredTerminal() && !target.KnownTerminal() { + if err := verifyShutdownOrder(target, nameMap); err != nil { + return nil, err + } + } + + return nil, nil +} + +// TaskResourceDependenciesAreResolved validates that the `target` resource can be +// transitioned given the current known state of the containers in `by`. If +// this function returns true, `target` should be technically able to transit +// without issues. +// Transitions are between known statuses (whether the resource can move to +// the next known status), not desired statuses; the desired status typically +// is either CREATED or REMOVED. +func TaskResourceDependenciesAreResolved(target taskresource.TaskResource, + by []*apicontainer.Container) error { + + nameMap := make(map[string]*apicontainer.Container) + for _, cont := range by { + nameMap[cont.Name] = cont + } + + if !verifyContainerDependenciesResolvedForResource(target, nameMap) { + return ErrContainerDependencyNotResolvedForResource + } + + return nil +} + +func verifyContainerDependenciesResolvedForResource(target taskresource.TaskResource, existingContainers map[string]*apicontainer.Container) bool { + targetNext := target.NextKnownState() + // For task resource without dependency map, containerDependencies will just be nil + containerDependencies := target.GetContainerDependencies(targetNext) + for _, containerDependency := range containerDependencies { + dep, exists := existingContainers[containerDependency.ContainerName] + if !exists { + return false + } + if dep.GetKnownStatus() < containerDependency.SatisfiedStatus { + return false + } + } + return true +} + +func executionCredentialsResolved(target *apicontainer.Container, id string, manager credentials.Manager) bool { + if target.GetKnownStatus() >= apicontainerstatus.ContainerPulled || + !target.ShouldPullWithExecutionRole() || + target.GetDesiredStatus() >= apicontainerstatus.ContainerStopped { + return true + } + + _, ok := manager.GetTaskCredentials(id) + return ok +} + +// verifyStatusResolvable validates that `target` can be resolved given that +// target depends on `dependencies` (which are container names) and there are +// `existingContainers` (map from name to container). The `resolves` function +// passed should return true if the named container is resolved. +func verifyStatusResolvable(target *apicontainer.Container, existingContainers map[string]*apicontainer.Container, + dependencies []string, resolves func(*apicontainer.Container, *apicontainer.Container) bool) bool { + targetGoal := target.GetDesiredStatus() + if targetGoal != target.GetSteadyStateStatus() && targetGoal != apicontainerstatus.ContainerCreated { + // A container can always stop, die, or reach whatever other state it + // wants regardless of what dependencies it has + return true + } + + for _, dependency := range dependencies { + maybeResolves, exists := existingContainers[dependency] + if !exists { + return false + } + if !resolves(target, maybeResolves) { + return false + } + } + return true +} + +// verifyContainerOrderingStatusResolvable validates that `target` can be resolved given that +// the dependsOn containers are resolved and there are `existingContainers` +// (map from name to container). The `resolves` function passed should return true if the named container is resolved. + +func verifyContainerOrderingStatusResolvable(target *apicontainer.Container, existingContainers map[string]*apicontainer.Container, + cfg *config.Config, resolves func(*apicontainer.Container, *apicontainer.Container, string, *config.Config) bool) (*apicontainer.DependsOn, error) { + + targetGoal := target.GetDesiredStatus() + targetKnown := target.GetKnownStatus() + if targetGoal != target.GetSteadyStateStatus() && targetGoal != apicontainerstatus.ContainerCreated { + // A container can always stop, die, or reach whatever other state it + // wants regardless of what dependencies it has + return nil, nil + } + + var blockedDependency *apicontainer.DependsOn + + targetDependencies := target.GetDependsOn() + for _, dependency := range targetDependencies { + dependencyContainer, ok := existingContainers[dependency.ContainerName] + if !ok { + return nil, fmt.Errorf("dependency graph: container ordering dependency [%v] for target [%v] does not exist.", dependencyContainer, target) + } + + // We want to check whether the dependency container has timed out only if target has not been created yet. + // If the target is already created, then everything is normal and dependency can be and is resolved. + // However, if dependency container has already stopped, then it cannot time out. + if targetKnown < apicontainerstatus.ContainerCreated && dependencyContainer.GetKnownStatus() != apicontainerstatus.ContainerStopped { + if hasDependencyTimedOut(dependencyContainer, dependency.Condition) { + return nil, fmt.Errorf("dependency graph: container ordering dependency [%v] for target [%v] has timed out.", dependencyContainer, target) + } + } + + // We want to fail fast if the dependency container has stopped but did not exit successfully because target container + // can then never progress to its desired state when the dependency condition is 'SUCCESS' + if dependency.Condition == successCondition && dependencyContainer.GetKnownStatus() == apicontainerstatus.ContainerStopped && + !hasDependencyStoppedSuccessfully(dependencyContainer) { + return nil, fmt.Errorf("dependency graph: failed to resolve container ordering dependency [%v] for target [%v] as dependency did not exit successfully.", dependencyContainer, target) + } + + // For any of the dependency conditions - START/COMPLETE/SUCCESS/HEALTHY, if the dependency container has + // not started and will not start in the future, this dependency can never be resolved. + if dependencyContainer.HasNotAndWillNotStart() { + return nil, fmt.Errorf("dependency graph: failed to resolve container ordering dependency [%v] for target [%v] because dependency will never start", dependencyContainer, target) + } + + if !resolves(target, dependencyContainer, dependency.Condition, cfg) { + blockedDependency = &dependency + } + } + if blockedDependency != nil { + return blockedDependency, fmt.Errorf("dependency graph: failed to resolve the container ordering dependency [%v] for target [%v]", blockedDependency, target) + } + return nil, nil +} + +func verifyTransitionDependenciesResolved(target *apicontainer.Container, + existingContainers map[string]*apicontainer.Container, + existingResources map[string]taskresource.TaskResource) error { + + if !verifyContainerDependenciesResolved(target, existingContainers) { + return ErrContainerDependencyNotResolved + } + if !verifyResourceDependenciesResolved(target, existingResources) { + return ErrResourceDependencyNotResolved + } + return nil +} + +func verifyContainerDependenciesResolved(target *apicontainer.Container, existingContainers map[string]*apicontainer.Container) bool { + targetNext := target.GetNextKnownStateProgression() + containerDependencies := target.TransitionDependenciesMap[targetNext].ContainerDependencies + for _, containerDependency := range containerDependencies { + dep, exists := existingContainers[containerDependency.ContainerName] + if !exists { + return false + } + if dep.GetKnownStatus() < containerDependency.SatisfiedStatus { + return false + } + } + return true +} + +func verifyResourceDependenciesResolved(target *apicontainer.Container, existingResources map[string]taskresource.TaskResource) bool { + targetNext := target.GetNextKnownStateProgression() + resourceDependencies := target.TransitionDependenciesMap[targetNext].ResourceDependencies + for _, resourceDependency := range resourceDependencies { + dep, exists := existingResources[resourceDependency.Name] + if !exists { + return false + } + if dep.GetKnownStatus() < resourceDependency.GetRequiredStatus() { + return false + } + } + return true +} + +func containerOrderingDependenciesCanResolve(target *apicontainer.Container, + dependsOnContainer *apicontainer.Container, + dependsOnStatus string, + cfg *config.Config) bool { + + targetDesiredStatus := target.GetDesiredStatus() + dependsOnContainerDesiredStatus := dependsOnContainer.GetDesiredStatus() + + switch dependsOnStatus { + case createCondition: + return verifyContainerOrderingStatus(dependsOnContainer) + + case startCondition: + if targetDesiredStatus == apicontainerstatus.ContainerCreated { + // The 'target' container desires to be moved to 'Created' state. + // Allow this only if the desired status of the dependency container is + // 'Created' or if the linked container is in 'steady state' + return dependsOnContainerDesiredStatus == apicontainerstatus.ContainerCreated || + dependsOnContainerDesiredStatus == dependsOnContainer.GetSteadyStateStatus() + + } else if targetDesiredStatus == target.GetSteadyStateStatus() { + // The 'target' container desires to be moved to its 'steady' state. + // Allow this only if the dependency container also desires to be in 'steady state' + return dependsOnContainerDesiredStatus == dependsOnContainer.GetSteadyStateStatus() + } + return false + + case successCondition: + var dependencyStoppedSuccessfully bool + + if dependsOnContainer.GetKnownExitCode() != nil { + dependencyStoppedSuccessfully = dependsOnContainer.GetKnownStatus() == apicontainerstatus.ContainerStopped && + *dependsOnContainer.GetKnownExitCode() == successExitCode + } + return verifyContainerOrderingStatus(dependsOnContainer) || dependencyStoppedSuccessfully + + case completeCondition: + return verifyContainerOrderingStatus(dependsOnContainer) + + case healthyCondition: + return verifyContainerOrderingStatus(dependsOnContainer) && dependsOnContainer.HealthStatusShouldBeReported() + + default: + return false + } +} + +func containerOrderingDependenciesIsResolved(target *apicontainer.Container, + dependsOnContainer *apicontainer.Container, + dependsOnStatus string, + cfg *config.Config) bool { + + targetDesiredStatus := target.GetDesiredStatus() + targetContainerKnownStatus := target.GetKnownStatus() + dependsOnContainerKnownStatus := dependsOnContainer.GetKnownStatus() + + // The 'target' container desires to be moved to 'Created' or the 'steady' state. + // Allow this only if the environment variable ECS_PULL_DEPENDENT_CONTAINERS_UPFRONT is enabled and + // known status of the `target` container state has not reached to 'Pulled' state; + if cfg.DependentContainersPullUpfront.Enabled() && targetContainerKnownStatus < apicontainerstatus.ContainerPulled { + return true + } + + switch dependsOnStatus { + case createCondition: + // The 'target' container desires to be moved to 'Created' or the 'steady' state. + // Allow this only if the known status of the dependency container state is already started + // i.e it's state is any of 'Created', 'steady state' or 'Stopped' + return dependsOnContainerKnownStatus >= apicontainerstatus.ContainerCreated + + case startCondition: + if targetDesiredStatus == apicontainerstatus.ContainerCreated { + // The 'target' container desires to be moved to 'Created' state. + // Allow this only if the known status of the linked container is + // 'Created' or if the dependency container is in 'steady state' + return dependsOnContainerKnownStatus == apicontainerstatus.ContainerCreated || dependsOnContainer.IsKnownSteadyState() + } else if targetDesiredStatus == target.GetSteadyStateStatus() { + // The 'target' container desires to be moved to its 'steady' state. + // Allow this only if the dependency container is in 'steady state' as well + return dependsOnContainer.IsKnownSteadyState() + } + return false + + case successCondition: + // The 'target' container desires to be moved to 'Created' or the 'steady' state. + // Allow this only if the known status of the dependency container state is stopped with an exit code of 0 + if dependsOnContainer.GetKnownExitCode() != nil { + return dependsOnContainerKnownStatus == apicontainerstatus.ContainerStopped && + *dependsOnContainer.GetKnownExitCode() == successExitCode + } + return false + + case completeCondition: + // The 'target' container desires to be moved to 'Created' or the 'steady' state. + // Allow this only if the known status of the dependency container state is stopped with any exit code + return dependsOnContainerKnownStatus == apicontainerstatus.ContainerStopped && dependsOnContainer.GetKnownExitCode() != nil + + case healthyCondition: + return dependsOnContainer.HealthStatusShouldBeReported() && + dependsOnContainer.GetHealthStatus().Status == apicontainerstatus.ContainerHealthy + + default: + return false + } +} + +func hasDependencyTimedOut(dependOnContainer *apicontainer.Container, dependencyCondition string) bool { + if dependOnContainer.GetStartedAt().IsZero() || dependOnContainer.GetStartTimeout() <= 0 { + return false + } + switch dependencyCondition { + case successCondition, completeCondition, healthyCondition: + return time.Now().After(dependOnContainer.GetStartedAt().Add(dependOnContainer.GetStartTimeout())) + default: + return false + } +} + +func hasDependencyStoppedSuccessfully(dependency *apicontainer.Container) bool { + p := dependency.GetKnownExitCode() + return p != nil && *p == 0 +} + +func verifyContainerOrderingStatus(dependsOnContainer *apicontainer.Container) bool { + dependsOnContainerDesiredStatus := dependsOnContainer.GetDesiredStatus() + // The 'target' container desires to be moved to 'Created' or the 'steady' state. + // Allow this only if the dependency container also desires to be started + // i.e it's status is any of 'Created', 'steady state' or 'Stopped' + return dependsOnContainerDesiredStatus == apicontainerstatus.ContainerCreated || + dependsOnContainerDesiredStatus == apicontainerstatus.ContainerStopped || + dependsOnContainerDesiredStatus == dependsOnContainer.GetSteadyStateStatus() +} + +func verifyShutdownOrder(target *apicontainer.Container, existingContainers map[string]*apicontainer.Container) error { + // We considered adding this to the task state, but this will be at most 45 loops, + // so we err'd on the side of having less state. + missingShutdownDependencies := []string{} + + for _, existingContainer := range existingContainers { + dependencies := existingContainer.GetDependsOn() + for _, dependency := range dependencies { + // If another container declares a dependency on our target, we will want to verify that the container is + // stopped. + if dependency.ContainerName == target.Name { + if !existingContainer.KnownTerminal() { + missingShutdownDependencies = append(missingShutdownDependencies, existingContainer.Name) + } + } + } + } + + if len(missingShutdownDependencies) == 0 { + return nil + } + + return fmt.Errorf("dependency graph: target %s needs other containers stopped before it can stop: [%s]", + target.Name, strings.Join(missingShutdownDependencies, "], [")) +} + +func onSteadyStateCanResolve(target *apicontainer.Container, run *apicontainer.Container) bool { + return target.GetDesiredStatus() >= apicontainerstatus.ContainerCreated && + run.GetDesiredStatus() >= run.GetSteadyStateStatus() +} + +// onSteadyStateIsResolved defines a relationship where a target cannot be +// created until 'dependency' has reached the steady state. Transitions include pulling. +func onSteadyStateIsResolved(target *apicontainer.Container, run *apicontainer.Container) bool { + return target.GetDesiredStatus() >= apicontainerstatus.ContainerCreated && + run.GetKnownStatus() >= run.GetSteadyStateStatus() +} diff --git a/agent/engine/dependencygraph/graph_test.go b/agent/engine/dependencygraph/graph_test.go new file mode 100644 index 00000000000..db966f4a60d --- /dev/null +++ b/agent/engine/dependencygraph/graph_test.go @@ -0,0 +1,1397 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dependencygraph + +import ( + "fmt" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + mock_taskresource "github.com/aws/amazon-ecs-agent/agent/taskresource/mocks" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func volumeStrToVol(vols []string) []apicontainer.VolumeFrom { + ret := make([]apicontainer.VolumeFrom, len(vols)) + for i, v := range vols { + ret[i] = apicontainer.VolumeFrom{SourceContainer: v, ReadOnly: false} + } + return ret +} + +func steadyStateContainer(name string, dependsOn []apicontainer.DependsOn, desiredState apicontainerstatus.ContainerStatus, steadyState apicontainerstatus.ContainerStatus) *apicontainer.Container { + container := apicontainer.NewContainerWithSteadyState(steadyState) + container.Name = name + container.DependsOnUnsafe = dependsOn + container.DesiredStatusUnsafe = desiredState + return container +} + +func createdContainer(name string, dependsOn []apicontainer.DependsOn, steadyState apicontainerstatus.ContainerStatus) *apicontainer.Container { + container := apicontainer.NewContainerWithSteadyState(steadyState) + container.Name = name + container.DependsOnUnsafe = dependsOn + container.DesiredStatusUnsafe = apicontainerstatus.ContainerCreated + return container +} + +func TestValidDependencies(t *testing.T) { + // Empty task + task := &apitask.Task{} + cfg := config.Config{} + resolveable := ValidDependencies(task, &cfg) + assert.True(t, resolveable, "The zero dependency graph should resolve") + + task = &apitask.Task{ + Containers: []*apicontainer.Container{ + { + Name: "redis", + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + } + resolveable = ValidDependencies(task, &cfg) + assert.True(t, resolveable, "One container should resolve trivially") + + // Webserver stack + php := steadyStateContainer("php", []apicontainer.DependsOn{{ContainerName: "db", Condition: startCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + db := steadyStateContainer("db", []apicontainer.DependsOn{{ContainerName: "dbdatavolume", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + dbdata := createdContainer("dbdatavolume", []apicontainer.DependsOn{}, apicontainerstatus.ContainerRunning) + webserver := steadyStateContainer("webserver", []apicontainer.DependsOn{{ContainerName: "php", Condition: startCondition}, {ContainerName: "htmldata", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + htmldata := steadyStateContainer("htmldata", []apicontainer.DependsOn{{ContainerName: "sharedcssfiles", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + sharedcssfiles := createdContainer("sharedcssfiles", []apicontainer.DependsOn{}, apicontainerstatus.ContainerRunning) + + task = &apitask.Task{ + Containers: []*apicontainer.Container{ + php, db, dbdata, webserver, htmldata, sharedcssfiles, + }, + } + + resolveable = ValidDependencies(task, &cfg) + assert.True(t, resolveable, "The webserver group should resolve just fine") +} + +func TestValidDependenciesWithCycles(t *testing.T) { + // Unresolveable: cycle + task := &apitask.Task{ + Containers: []*apicontainer.Container{ + steadyStateContainer("a", []apicontainer.DependsOn{{ContainerName: "b", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning), + steadyStateContainer("b", []apicontainer.DependsOn{{ContainerName: "a", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning), + }, + } + cfg := config.Config{} + resolveable := ValidDependencies(task, &cfg) + assert.False(t, resolveable, "Cycle should not be resolveable") +} + +func TestValidDependenciesWithUnresolvedReference(t *testing.T) { + // Unresolveable, reference doesn't exist + task := &apitask.Task{ + Containers: []*apicontainer.Container{ + steadyStateContainer("php", []apicontainer.DependsOn{{ContainerName: "db", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning), + }, + } + cfg := config.Config{} + resolveable := ValidDependencies(task, &cfg) + assert.False(t, resolveable, "Nonexistent reference shouldn't resolve") +} + +func TestDependenciesAreResolvedWhenSteadyStateIsRunning(t *testing.T) { + task := &apitask.Task{ + Containers: []*apicontainer.Container{ + { + Name: "redis", + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + } + cfg := config.Config{} + _, err := DependenciesAreResolved(task.Containers[0], task.Containers, "", nil, nil, &cfg) + assert.NoError(t, err, "One container should resolve trivially") + + // Webserver stack + php := steadyStateContainer("php", []apicontainer.DependsOn{{ContainerName: "db", Condition: startCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + db := steadyStateContainer("db", []apicontainer.DependsOn{{ContainerName: "dbdatavolume", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + dbdata := createdContainer("dbdatavolume", []apicontainer.DependsOn{}, apicontainerstatus.ContainerRunning) + webserver := steadyStateContainer("webserver", []apicontainer.DependsOn{{ContainerName: "php", Condition: startCondition}, {ContainerName: "htmldata", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + htmldata := steadyStateContainer("htmldata", []apicontainer.DependsOn{{ContainerName: "sharedcssfiles", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + sharedcssfiles := createdContainer("sharedcssfiles", []apicontainer.DependsOn{}, apicontainerstatus.ContainerRunning) + + task = &apitask.Task{ + Containers: []*apicontainer.Container{ + php, db, dbdata, webserver, htmldata, sharedcssfiles, + }, + } + + _, err = DependenciesAreResolved(php, task.Containers, "", nil, nil, &cfg) + assert.Error(t, err, "Shouldn't be resolved; db isn't running") + + _, err = DependenciesAreResolved(db, task.Containers, "", nil, nil, &cfg) + assert.Error(t, err, "Shouldn't be resolved; dbdatavolume isn't created") + + _, err = DependenciesAreResolved(dbdata, task.Containers, "", nil, nil, &cfg) + assert.NoError(t, err, "data volume with no deps should resolve") + + dbdata.SetKnownStatus(apicontainerstatus.ContainerCreated) + _, err = DependenciesAreResolved(php, task.Containers, "", nil, nil, &cfg) + assert.Error(t, err, "Php shouldn't run, db is not created") + + db.SetKnownStatus(apicontainerstatus.ContainerCreated) + _, err = DependenciesAreResolved(php, task.Containers, "", nil, nil, &cfg) + assert.Error(t, err, "Php shouldn't run, db is not running") + + _, err = DependenciesAreResolved(db, task.Containers, "", nil, nil, &cfg) + assert.NoError(t, err, "db should be resolved, dbdata volume is Created") + db.SetKnownStatus(apicontainerstatus.ContainerRunning) + + _, err = DependenciesAreResolved(php, task.Containers, "", nil, nil, &cfg) + assert.NoError(t, err, "Php should resolve") +} + +func TestRunDependencies(t *testing.T) { + c1 := &apicontainer.Container{ + Name: "a", + KnownStatusUnsafe: apicontainerstatus.ContainerStatusNone, + } + c2 := &apicontainer.Container{ + Name: "b", + KnownStatusUnsafe: apicontainerstatus.ContainerStatusNone, + DesiredStatusUnsafe: apicontainerstatus.ContainerCreated, + SteadyStateDependencies: []string{"a"}, + } + task := &apitask.Task{Containers: []*apicontainer.Container{c1, c2}} + cfg := config.Config{} + _, err := DependenciesAreResolved(c2, task.Containers, "", nil, nil, &cfg) + assert.Error(t, err, "Dependencies should not be resolved") + + task.Containers[1].SetDesiredStatus(apicontainerstatus.ContainerRunning) + _, err = DependenciesAreResolved(c2, task.Containers, "", nil, nil, &cfg) + assert.Error(t, err, "Dependencies should not be resolved") + + task.Containers[0].SetKnownStatus(apicontainerstatus.ContainerRunning) + _, err = DependenciesAreResolved(c2, task.Containers, "", nil, nil, &cfg) + assert.NoError(t, err, "Dependencies should be resolved") + + task.Containers[1].SetDesiredStatus(apicontainerstatus.ContainerCreated) + _, err = DependenciesAreResolved(c1, task.Containers, "", nil, nil, &cfg) + assert.NoError(t, err, "Dependencies should be resolved") +} + +func TestRunDependenciesWhenSteadyStateIsResourcesProvisionedForOneContainer(t *testing.T) { + // Webserver stack + php := steadyStateContainer("php", []apicontainer.DependsOn{{ContainerName: "db", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + db := steadyStateContainer("db", []apicontainer.DependsOn{{ContainerName: "dbdatavolume", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + dbdata := createdContainer("dbdatavolume", []apicontainer.DependsOn{}, apicontainerstatus.ContainerRunning) + webserver := steadyStateContainer("webserver", []apicontainer.DependsOn{{ContainerName: "php", Condition: createCondition}, {ContainerName: "htmldata", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + htmldata := steadyStateContainer("htmldata", []apicontainer.DependsOn{{ContainerName: "sharedcssfiles", Condition: createCondition}}, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning) + sharedcssfiles := createdContainer("sharedcssfiles", []apicontainer.DependsOn{}, apicontainerstatus.ContainerRunning) + // The Pause container, being added to the webserver stack + pause := steadyStateContainer("pause", []apicontainer.DependsOn{}, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerResourcesProvisioned) + + task := &apitask.Task{ + Containers: []*apicontainer.Container{ + php, db, dbdata, webserver, htmldata, sharedcssfiles, pause, + }, + } + + cfg := config.Config{} + + // Add a dependency on the pause container for all containers in the webserver stack + for _, container := range task.Containers { + if container.Name == "pause" { + continue + } + container.SteadyStateDependencies = []string{"pause"} + _, err := DependenciesAreResolved(container, task.Containers, "", nil, nil, &cfg) + assert.Error(t, err, "Shouldn't be resolved; pause isn't running") + } + + _, err := DependenciesAreResolved(pause, task.Containers, "", nil, nil, &cfg) + assert.NoError(t, err, "Pause container's dependencies should be resolved") + + // Transition pause container to RUNNING + pause.KnownStatusUnsafe = apicontainerstatus.ContainerRunning + // Transition dependencies in webserver stack to CREATED/RUNNING state + dbdata.KnownStatusUnsafe = apicontainerstatus.ContainerCreated + db.KnownStatusUnsafe = apicontainerstatus.ContainerRunning + for _, container := range task.Containers { + if container.Name == "pause" { + continue + } + // Assert that dependencies remain unresolved until the pause container reaches + // RESOURCES_PROVISIONED + _, err = DependenciesAreResolved(container, task.Containers, "", nil, nil, &cfg) + assert.Error(t, err, "Shouldn't be resolved; pause isn't running") + } + pause.KnownStatusUnsafe = apicontainerstatus.ContainerResourcesProvisioned + // Dependecies should be resolved now that the 'pause' container has + // transitioned into RESOURCES_PROVISIONED + _, err = DependenciesAreResolved(php, task.Containers, "", nil, nil, &cfg) + assert.NoError(t, err, "Php should resolve") +} + +func TestOnSteadyStateIsResolved(t *testing.T) { + testcases := []struct { + TargetDesired apicontainerstatus.ContainerStatus + RunKnown apicontainerstatus.ContainerStatus + Resolved bool + }{ + { + TargetDesired: apicontainerstatus.ContainerStatusNone, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerPulled, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + RunKnown: apicontainerstatus.ContainerCreated, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + RunKnown: apicontainerstatus.ContainerRunning, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + RunKnown: apicontainerstatus.ContainerStopped, + Resolved: true, + }, + } + for _, tc := range testcases { + t.Run(fmt.Sprintf("T:%s+R:%s", tc.TargetDesired.String(), tc.RunKnown.String()), + assertResolved(onSteadyStateIsResolved, tc.TargetDesired, tc.RunKnown, tc.Resolved)) + } +} + +func assertResolved(f func(target *apicontainer.Container, dep *apicontainer.Container) bool, targetDesired, depKnown apicontainerstatus.ContainerStatus, expectedResolved bool) func(t *testing.T) { + return func(t *testing.T) { + target := &apicontainer.Container{ + DesiredStatusUnsafe: targetDesired, + } + dep := &apicontainer.Container{ + KnownStatusUnsafe: depKnown, + } + resolved := f(target, dep) + assert.Equal(t, expectedResolved, resolved) + } +} + +func TestVerifyTransitionDependenciesResolved(t *testing.T) { + testcases := []struct { + Name string + TargetKnown apicontainerstatus.ContainerStatus + TargetDesired apicontainerstatus.ContainerStatus + TargetNext apicontainerstatus.ContainerStatus + DependencyName string + DependencyKnown apicontainerstatus.ContainerStatus + SatisfiedStatus apicontainerstatus.ContainerStatus + ResolvedErr error + }{ + { + Name: "Nothing running, pull depends on running", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerPulled, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerStatusNone, + SatisfiedStatus: apicontainerstatus.ContainerRunning, + ResolvedErr: ErrContainerDependencyNotResolved, + }, + + { + Name: "Nothing running, pull depends on resources provisioned", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerPulled, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerStatusNone, + SatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + ResolvedErr: ErrContainerDependencyNotResolved, + }, + { + Name: "Nothing running, create depends on running", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerCreated, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerStatusNone, + SatisfiedStatus: apicontainerstatus.ContainerRunning, + }, + { + Name: "Dependency created, pull depends on running", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerPulled, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerCreated, + SatisfiedStatus: apicontainerstatus.ContainerRunning, + ResolvedErr: ErrContainerDependencyNotResolved, + }, + { + Name: "Dependency created, pull depends on resources provisioned", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerPulled, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerCreated, + SatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + ResolvedErr: ErrContainerDependencyNotResolved, + }, + { + Name: "Dependency running, pull depends on running", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerPulled, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerRunning, + SatisfiedStatus: apicontainerstatus.ContainerRunning, + }, + { + Name: "Dependency running, pull depends on resources provisioned", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerPulled, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerRunning, + SatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + ResolvedErr: ErrContainerDependencyNotResolved, + }, + { + Name: "Dependency resources provisioned, pull depends on resources provisioned", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerPulled, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerResourcesProvisioned, + SatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + }, + { + Name: "Dependency running, create depends on created", + TargetKnown: apicontainerstatus.ContainerPulled, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerCreated, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerRunning, + SatisfiedStatus: apicontainerstatus.ContainerCreated, + }, + { + Name: "Target running, create depends on running", + TargetKnown: apicontainerstatus.ContainerRunning, + TargetDesired: apicontainerstatus.ContainerRunning, + TargetNext: apicontainerstatus.ContainerRunning, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerRunning, + SatisfiedStatus: apicontainerstatus.ContainerCreated, + }, + { + Name: "Target pulled, desired stopped", + TargetKnown: apicontainerstatus.ContainerPulled, + TargetDesired: apicontainerstatus.ContainerStopped, + TargetNext: apicontainerstatus.ContainerRunning, + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerStatusNone, + SatisfiedStatus: apicontainerstatus.ContainerCreated, + }, + // Note: Not all possible situations are tested here. The only situations tested here are ones that are + // expected to reasonably happen at the time this code was written. Other behavior is not expected to occur, + // so it is not tested. + } + for _, tc := range testcases { + t.Run(tc.Name, func(t *testing.T) { + target := &apicontainer.Container{ + KnownStatusUnsafe: tc.TargetKnown, + DesiredStatusUnsafe: tc.TargetDesired, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + target.BuildContainerDependency(tc.DependencyName, tc.SatisfiedStatus, tc.TargetNext) + dep := &apicontainer.Container{ + Name: tc.DependencyName, + KnownStatusUnsafe: tc.DependencyKnown, + } + containers := make(map[string]*apicontainer.Container) + containers[dep.Name] = dep + resolved := verifyTransitionDependenciesResolved(target, containers, nil) + assert.Equal(t, tc.ResolvedErr, resolved) + }) + } +} + +func TestVerifyResourceDependenciesResolved(t *testing.T) { + testcases := []struct { + Name string + TargetKnown apicontainerstatus.ContainerStatus + TargetDep apicontainerstatus.ContainerStatus + DependencyKnown resourcestatus.ResourceStatus + RequiredStatus resourcestatus.ResourceStatus + + ExpectedResolved bool + }{ + { + Name: "resource none,container pull depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerPulled, + DependencyKnown: resourcestatus.ResourceStatus(0), + RequiredStatus: resourcestatus.ResourceStatus(1), + ExpectedResolved: false, + }, + { + Name: "resource created,container pull depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerPulled, + DependencyKnown: resourcestatus.ResourceStatus(1), + RequiredStatus: resourcestatus.ResourceStatus(1), + ExpectedResolved: true, + }, + { + Name: "resource none,container create depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerCreated, + DependencyKnown: resourcestatus.ResourceStatus(0), + RequiredStatus: resourcestatus.ResourceStatus(1), + ExpectedResolved: true, + }, + } + for _, tc := range testcases { + t.Run(tc.Name, func(t *testing.T) { + resourceName := "resource" + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + gomock.InOrder( + mockResource.EXPECT().SetKnownStatus(tc.DependencyKnown), + mockResource.EXPECT().GetKnownStatus().Return(tc.DependencyKnown).AnyTimes(), + ) + mockResource.SetKnownStatus(tc.DependencyKnown) + target := &apicontainer.Container{ + KnownStatusUnsafe: tc.TargetKnown, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + target.BuildResourceDependency(resourceName, tc.RequiredStatus, tc.TargetDep) + resources := make(map[string]taskresource.TaskResource) + resources[resourceName] = mockResource + resolved := verifyResourceDependenciesResolved(target, resources) + assert.Equal(t, tc.ExpectedResolved, resolved) + }) + } +} + +func TestVerifyTransitionResourceDependenciesResolved(t *testing.T) { + testcases := []struct { + Name string + TargetKnown apicontainerstatus.ContainerStatus + TargetDep apicontainerstatus.ContainerStatus + DependencyKnown resourcestatus.ResourceStatus + RequiredStatus resourcestatus.ResourceStatus + ResolvedErr error + }{ + { + Name: "resource none,container pull depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerPulled, + DependencyKnown: resourcestatus.ResourceStatus(0), + RequiredStatus: resourcestatus.ResourceStatus(1), + ResolvedErr: ErrResourceDependencyNotResolved, + }, + { + Name: "resource created,container pull depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerPulled, + DependencyKnown: resourcestatus.ResourceStatus(1), + RequiredStatus: resourcestatus.ResourceStatus(1), + }, + { + Name: "resource none,container create depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerCreated, + DependencyKnown: resourcestatus.ResourceStatus(0), + RequiredStatus: resourcestatus.ResourceStatus(1), + }, + } + for _, tc := range testcases { + t.Run(tc.Name, func(t *testing.T) { + resourceName := "resource" + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + gomock.InOrder( + mockResource.EXPECT().SetKnownStatus(tc.DependencyKnown), + mockResource.EXPECT().GetKnownStatus().Return(tc.DependencyKnown).AnyTimes(), + ) + mockResource.SetKnownStatus(tc.DependencyKnown) + target := &apicontainer.Container{ + KnownStatusUnsafe: tc.TargetKnown, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + target.BuildResourceDependency(resourceName, tc.RequiredStatus, tc.TargetDep) + resources := make(map[string]taskresource.TaskResource) + resources[resourceName] = mockResource + resolved := verifyTransitionDependenciesResolved(target, nil, resources) + assert.Equal(t, tc.ResolvedErr, resolved) + }) + } +} + +func TestTransitionDependencyResourceNotFound(t *testing.T) { + // this test verifies if error is thrown when the resource dependency is not + // found in the list of task resources + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + gomock.InOrder( + mockResource.EXPECT().SetKnownStatus(resourcestatus.ResourceStatus(1)), + ) + mockResource.SetKnownStatus(resourcestatus.ResourceStatus(1)) + target := &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerStatusNone, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + target.BuildResourceDependency("resource", resourcestatus.ResourceStatus(1), apicontainerstatus.ContainerPulled) + + resources := make(map[string]taskresource.TaskResource) + resources["resource1"] = mockResource // different resource name + resolved := verifyTransitionDependenciesResolved(target, nil, resources) + assert.Equal(t, ErrResourceDependencyNotResolved, resolved) +} + +// TestVerifyTransitionDependenciesResolvedForResource verifies the logic of function TaskResourceDependenciesAreResolved +func TestVerifyTransitionDependenciesResolvedForResource(t *testing.T) { + testcases := []struct { + Name string + TargetKnown resourcestatus.ResourceStatus + TargetDesired resourcestatus.ResourceStatus + TargetNext resourcestatus.ResourceStatus + DependencyName string + DependencyKnown apicontainerstatus.ContainerStatus + SatisfiedStatus apicontainerstatus.ContainerStatus + ResolvedErr error + }{ + { + Name: "desired created, resource created depends on container resources provisioned", + TargetKnown: resourcestatus.ResourceStatus(0), + TargetDesired: resourcestatus.ResourceStatus(1), + TargetNext: resourcestatus.ResourceStatus(1), + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerStatusNone, + SatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + ResolvedErr: ErrContainerDependencyNotResolvedForResource, + }, + { + Name: "desired removed, resource created depends on container resources provisioned", + TargetKnown: resourcestatus.ResourceStatus(0), + TargetDesired: resourcestatus.ResourceStatus(2), + TargetNext: resourcestatus.ResourceStatus(1), + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerStatusNone, + SatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + ResolvedErr: ErrContainerDependencyNotResolvedForResource, + }, + { + Name: "desired created, resource created depends on resourcesProvisioned, dependency resourcesProvisioned", + TargetKnown: resourcestatus.ResourceStatus(0), + TargetDesired: resourcestatus.ResourceStatus(1), + TargetNext: resourcestatus.ResourceStatus(1), + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerResourcesProvisioned, + SatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + ResolvedErr: nil, + }, + { + Name: "desired created, resource created depends on created, dependency passed created", + TargetKnown: resourcestatus.ResourceStatus(0), + TargetDesired: resourcestatus.ResourceStatus(1), + TargetNext: resourcestatus.ResourceStatus(1), + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerRunning, + SatisfiedStatus: apicontainerstatus.ContainerCreated, + ResolvedErr: nil, + }, + { + Name: "desired removed, resource removed depends on stopped, dependency resourceProvisioned", + TargetKnown: resourcestatus.ResourceStatus(1), + TargetDesired: resourcestatus.ResourceStatus(2), + TargetNext: resourcestatus.ResourceStatus(2), + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerResourcesProvisioned, + SatisfiedStatus: apicontainerstatus.ContainerStopped, + ResolvedErr: ErrContainerDependencyNotResolvedForResource, + }, + { + Name: "desired removed, resource removed depends on stopped, dependency stopped", + TargetKnown: resourcestatus.ResourceStatus(1), + TargetDesired: resourcestatus.ResourceStatus(2), + TargetNext: resourcestatus.ResourceStatus(2), + DependencyName: "container", + DependencyKnown: apicontainerstatus.ContainerStopped, + SatisfiedStatus: apicontainerstatus.ContainerStopped, + ResolvedErr: nil, + }, + } + for _, tc := range testcases { + t.Run(tc.Name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + con := &apicontainer.Container{ + Name: tc.DependencyName, + KnownStatusUnsafe: tc.DependencyKnown, + } + containers := []*apicontainer.Container{ + con, + } + containerDep := []apicontainer.ContainerDependency{ + { + ContainerName: tc.DependencyName, + SatisfiedStatus: tc.SatisfiedStatus, + }, + } + gomock.InOrder( + mockResource.EXPECT().NextKnownState().Return(tc.TargetNext), + mockResource.EXPECT().GetContainerDependencies(tc.TargetNext).Return(containerDep), + ) + resolved := TaskResourceDependenciesAreResolved(mockResource, containers) + assert.Equal(t, tc.ResolvedErr, resolved) + }) + } + +} + +// TestResourceTransitionDependencyNotFound verifies if error is thrown when the container dependency for resource +// is not found in the list of task containers +func TestResourceTransitionDependencyNotFound(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + containerDep := []apicontainer.ContainerDependency{ + { + ContainerName: "container2", + SatisfiedStatus: apicontainerstatus.ContainerStopped, + }, + } + con := &apicontainer.Container{ + Name: "container1", + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + existingContainers := []*apicontainer.Container{ + con, + } + gomock.InOrder( + mockResource.EXPECT().NextKnownState().Return(resourcestatus.ResourceStatus(2)), + mockResource.EXPECT().GetContainerDependencies(resourcestatus.ResourceStatus(2)).Return(containerDep), + ) + resolved := TaskResourceDependenciesAreResolved(mockResource, existingContainers) + assert.Equal(t, ErrContainerDependencyNotResolvedForResource, resolved) +} + +func TestResourceTransitionDependencyNoDependency(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + con := &apicontainer.Container{ + Name: "container1", + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + existingContainers := []*apicontainer.Container{ + con, + } + gomock.InOrder( + mockResource.EXPECT().NextKnownState().Return(resourcestatus.ResourceStatus(2)), + mockResource.EXPECT().GetContainerDependencies(resourcestatus.ResourceStatus(2)).Return(nil), + ) + assert.Nil(t, TaskResourceDependenciesAreResolved(mockResource, existingContainers)) +} + +func TestContainerOrderingCanResolve(t *testing.T) { + testcases := []struct { + TargetDesired apicontainerstatus.ContainerStatus + DependencyDesired apicontainerstatus.ContainerStatus + DependencyKnown apicontainerstatus.ContainerStatus + DependencyCondition string + ExitCode int + Resolvable bool + }{ + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyDesired: apicontainerstatus.ContainerStatusNone, + DependencyCondition: createCondition, + Resolvable: false, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyDesired: apicontainerstatus.ContainerStopped, + DependencyCondition: createCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyDesired: apicontainerstatus.ContainerZombie, + DependencyCondition: createCondition, + Resolvable: false, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerStatusNone, + DependencyCondition: createCondition, + Resolvable: false, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerCreated, + DependencyCondition: createCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerRunning, + DependencyCondition: createCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerStopped, + DependencyCondition: createCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyDesired: apicontainerstatus.ContainerCreated, + DependencyCondition: startCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerRunning, + DependencyCondition: startCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyDesired: apicontainerstatus.ContainerRunning, + DependencyCondition: startCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerZombie, + DependencyCondition: startCondition, + Resolvable: false, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerStopped, + DependencyCondition: successCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerRunning, + DependencyCondition: successCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerStopped, + ExitCode: 0, + DependencyCondition: successCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerStopped, + ExitCode: 0, + DependencyCondition: successCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerStopped, + ExitCode: 1, + DependencyCondition: successCondition, + Resolvable: false, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerStopped, + DependencyCondition: completeCondition, + Resolvable: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyDesired: apicontainerstatus.ContainerRunning, + DependencyCondition: completeCondition, + Resolvable: true, + }, + } + cfg := config.Config{} + for _, tc := range testcases { + t.Run(fmt.Sprintf("T:%s+V:%s", tc.TargetDesired.String(), tc.DependencyDesired.String()), + assertContainerOrderingCanResolve(containerOrderingDependenciesCanResolve, tc.TargetDesired, tc.DependencyDesired, tc.DependencyKnown, tc.DependencyCondition, tc.ExitCode, tc.Resolvable, &cfg)) + } +} + +func TestContainerOrderingIsResolved(t *testing.T) { + testcases := []struct { + TargetDesired apicontainerstatus.ContainerStatus + DependencyKnown apicontainerstatus.ContainerStatus + DependencyCondition string + Resolved bool + ExitCode int + }{ + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyKnown: apicontainerstatus.ContainerStatusNone, + DependencyCondition: createCondition, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyKnown: apicontainerstatus.ContainerCreated, + DependencyCondition: createCondition, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerStopped, + DependencyCondition: createCondition, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyKnown: apicontainerstatus.ContainerStopped, + DependencyCondition: createCondition, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerStatusNone, + DependencyCondition: createCondition, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerCreated, + DependencyCondition: createCondition, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyKnown: apicontainerstatus.ContainerCreated, + DependencyCondition: startCondition, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyKnown: apicontainerstatus.ContainerRunning, + DependencyCondition: startCondition, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyKnown: apicontainerstatus.ContainerZombie, + DependencyCondition: startCondition, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerRunning, + DependencyCondition: startCondition, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerZombie, + DependencyCondition: startCondition, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerStopped, + DependencyCondition: successCondition, + ExitCode: 0, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerStopped, + DependencyCondition: successCondition, + ExitCode: 1, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerStopped, + DependencyCondition: completeCondition, + ExitCode: 0, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerStopped, + DependencyCondition: completeCondition, + ExitCode: 1, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerRunning, + DependencyKnown: apicontainerstatus.ContainerRunning, + DependencyCondition: completeCondition, + Resolved: false, + }, + } + cfg := config.Config{} + for _, tc := range testcases { + t.Run(fmt.Sprintf("T:%s+V:%s", tc.TargetDesired.String(), tc.DependencyKnown.String()), + assertContainerOrderingResolved(containerOrderingDependenciesIsResolved, tc.TargetDesired, tc.DependencyKnown, tc.DependencyCondition, tc.ExitCode, tc.Resolved, &cfg)) + } +} + +func TestContainerOrderingIsResolvedWithDependentContainersPullUpfront(t *testing.T) { + testcases := []struct { + TargetKnown apicontainerstatus.ContainerStatus + DependencyKnown apicontainerstatus.ContainerStatus + DependencyCondition string + Resolved bool + ExitCode int + }{ + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerStatusNone, + DependencyCondition: startCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerPulled, + DependencyCondition: startCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerStopped, + DependencyCondition: startCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerCreated, + DependencyKnown: apicontainerstatus.ContainerStatusNone, + DependencyCondition: startCondition, + Resolved: false, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerStatusNone, + DependencyCondition: successCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerPulled, + DependencyCondition: successCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerCreated, + DependencyCondition: successCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerRunning, + DependencyCondition: successCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerStopped, + ExitCode: 1, + DependencyCondition: successCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerPulled, + DependencyKnown: apicontainerstatus.ContainerStatusNone, + DependencyCondition: successCondition, + Resolved: false, + }, + { + TargetKnown: apicontainerstatus.ContainerPulled, + DependencyKnown: apicontainerstatus.ContainerStopped, + ExitCode: 1, + DependencyCondition: successCondition, + Resolved: false, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerStatusNone, + DependencyCondition: completeCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerPulled, + DependencyCondition: completeCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerCreated, + DependencyCondition: completeCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerRunning, + DependencyCondition: completeCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnown: apicontainerstatus.ContainerStopped, + DependencyCondition: completeCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerPulled, + DependencyKnown: apicontainerstatus.ContainerStatusNone, + DependencyCondition: completeCondition, + Resolved: false, + }, + } + + cfg := config.Config{ + DependentContainersPullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + for _, tc := range testcases { + t.Run(fmt.Sprintf("DependencyCondition:%s+T:%s+V:%s", tc.DependencyCondition, tc.TargetKnown.String(), tc.DependencyKnown.String()), + assertContainerTargetOrderingResolved(containerOrderingDependenciesIsResolved, tc.TargetKnown, tc.DependencyKnown, tc.DependencyCondition, tc.ExitCode, tc.Resolved, &cfg)) + } +} + +func assertContainerTargetOrderingResolved(f func(target *apicontainer.Container, dep *apicontainer.Container, depCond string, cfg *config.Config) bool, targetKnown, depKnown apicontainerstatus.ContainerStatus, depCond string, exitcode int, expectedResolvable bool, cfg *config.Config) func(t *testing.T) { + return func(t *testing.T) { + target := &apicontainer.Container{ + KnownStatusUnsafe: targetKnown, + } + dep := &apicontainer.Container{ + KnownStatusUnsafe: depKnown, + KnownExitCodeUnsafe: aws.Int(exitcode), + } + + resolvable := f(target, dep, depCond, cfg) + assert.Equal(t, expectedResolvable, resolvable) + } +} + +func assertContainerOrderingCanResolve(f func(target *apicontainer.Container, dep *apicontainer.Container, depCond string, cfg *config.Config) bool, targetDesired, depDesired, depKnown apicontainerstatus.ContainerStatus, depCond string, exitcode int, expectedResolvable bool, cfg *config.Config) func(t *testing.T) { + return func(t *testing.T) { + target := &apicontainer.Container{ + DesiredStatusUnsafe: targetDesired, + } + dep := &apicontainer.Container{ + DesiredStatusUnsafe: depDesired, + KnownStatusUnsafe: depKnown, + KnownExitCodeUnsafe: aws.Int(exitcode), + } + resolvable := f(target, dep, depCond, cfg) + assert.Equal(t, expectedResolvable, resolvable) + } +} + +func assertContainerOrderingResolved(f func(target *apicontainer.Container, dep *apicontainer.Container, depCond string, cfg *config.Config) bool, targetDesired, depKnown apicontainerstatus.ContainerStatus, depCond string, exitcode int, expectedResolved bool, cfg *config.Config) func(t *testing.T) { + return func(t *testing.T) { + target := &apicontainer.Container{ + DesiredStatusUnsafe: targetDesired, + } + dep := &apicontainer.Container{ + KnownStatusUnsafe: depKnown, + KnownExitCodeUnsafe: aws.Int(exitcode), + } + resolved := f(target, dep, depCond, cfg) + assert.Equal(t, expectedResolved, resolved) + } +} + +func TestContainerOrderingHealthyConditionIsResolved(t *testing.T) { + testcases := []struct { + TargetDesired apicontainerstatus.ContainerStatus + TargetKnown apicontainerstatus.ContainerStatus + DependencyKnownHealthStatus apicontainerstatus.ContainerHealthStatus + HealthCheckType string + DependencyKnownHealthExitCode int + DependencyCondition string + Resolved bool + }{ + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyKnownHealthStatus: apicontainerstatus.ContainerHealthy, + HealthCheckType: "docker", + DependencyCondition: healthyCondition, + Resolved: true, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + DependencyKnownHealthStatus: apicontainerstatus.ContainerUnhealthy, + HealthCheckType: "docker", + DependencyKnownHealthExitCode: 1, + DependencyCondition: healthyCondition, + Resolved: false, + }, + { + TargetDesired: apicontainerstatus.ContainerCreated, + Resolved: false, + }, + } + cfg := config.Config{} + for _, tc := range testcases { + t.Run(fmt.Sprintf("DependencyKnownHealthStatus:%s+T:%s+V:%s", tc.DependencyKnownHealthStatus, tc.TargetDesired.String(), tc.DependencyKnownHealthStatus.String()), + assertContainerOrderingHealthyConditionResolved(containerOrderingDependenciesIsResolved, tc.TargetDesired, tc.TargetKnown, tc.DependencyKnownHealthStatus, tc.HealthCheckType, tc.DependencyKnownHealthExitCode, tc.DependencyCondition, tc.Resolved, &cfg)) + } +} + +func TestContainerOrderingHealthyConditionIsResolvedWithDependentContainersPullUpfront(t *testing.T) { + testcases := []struct { + TargetDesired apicontainerstatus.ContainerStatus + TargetKnown apicontainerstatus.ContainerStatus + DependencyKnownHealthStatus apicontainerstatus.ContainerHealthStatus + HealthCheckType string + DependencyKnownHealthExitCode int + DependencyCondition string + Resolved bool + }{ + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnownHealthStatus: apicontainerstatus.ContainerHealthy, + HealthCheckType: "docker", + DependencyCondition: healthyCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerStatusNone, + DependencyKnownHealthStatus: apicontainerstatus.ContainerUnhealthy, + HealthCheckType: "docker", + DependencyKnownHealthExitCode: 1, + DependencyCondition: healthyCondition, + Resolved: true, + }, + { + TargetKnown: apicontainerstatus.ContainerPulled, + DependencyKnownHealthStatus: apicontainerstatus.ContainerUnhealthy, + HealthCheckType: "docker", + DependencyKnownHealthExitCode: 1, + DependencyCondition: healthyCondition, + Resolved: false, + }, + } + cfg := config.Config{ + DependentContainersPullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + } + for _, tc := range testcases { + t.Run(fmt.Sprintf("DependencyKnownHealthStatus:%s+T:%s+V:%s", tc.DependencyKnownHealthStatus, tc.TargetKnown.String(), tc.DependencyKnownHealthStatus.String()), + assertContainerOrderingHealthyConditionResolved(containerOrderingDependenciesIsResolved, tc.TargetDesired, tc.TargetKnown, tc.DependencyKnownHealthStatus, tc.HealthCheckType, tc.DependencyKnownHealthExitCode, tc.DependencyCondition, tc.Resolved, &cfg)) + } +} + +func assertContainerOrderingHealthyConditionResolved(f func(target *apicontainer.Container, dep *apicontainer.Container, depCond string, cfg *config.Config) bool, targetDesired apicontainerstatus.ContainerStatus, targetKnown apicontainerstatus.ContainerStatus, depHealthKnown apicontainerstatus.ContainerHealthStatus, healthCheckEnabled string, depHealthKnownExitCode int, depCond string, expectedResolved bool, cfg *config.Config) func(t *testing.T) { + return func(t *testing.T) { + target := &apicontainer.Container{ + DesiredStatusUnsafe: targetDesired, + KnownStatusUnsafe: targetKnown, + } + dep := &apicontainer.Container{ + Health: apicontainer.HealthStatus{ + Status: depHealthKnown, + ExitCode: depHealthKnownExitCode, + }, + HealthCheckType: healthCheckEnabled, + } + resolved := f(target, dep, depCond, cfg) + assert.Equal(t, expectedResolved, resolved) + } +} + +func dependsOn(vals ...string) []apicontainer.DependsOn { + d := make([]apicontainer.DependsOn, len(vals)) + for i, val := range vals { + d[i] = apicontainer.DependsOn{ContainerName: val} + } + return d +} + +// TestVerifyShutdownOrder validates that the shutdown graph works in the inverse order of the startup graph +// This test always uses a running target +func TestVerifyShutdownOrder(t *testing.T) { + + // dependencies aren't transitive, so we can check multiple levels within here + others := map[string]*apicontainer.Container{ + "A": { + Name: "A", + DependsOnUnsafe: dependsOn("B"), + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + }, + "B": { + Name: "B", + DependsOnUnsafe: dependsOn("C", "D"), + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + "C": { + Name: "C", + DependsOnUnsafe: dependsOn("E", "F"), + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + }, + "D": { + Name: "D", + DependsOnUnsafe: dependsOn("E"), + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + } + + testCases := []struct { + TestName string + TargetName string + ShouldResolve bool + }{ + { + TestName: "Dependency is already stopped", + TargetName: "F", + ShouldResolve: true, + }, + { + TestName: "Running with a running dependency", + TargetName: "D", + ShouldResolve: false, + }, + { + TestName: "One dependency running, one stopped", + TargetName: "E", + ShouldResolve: false, + }, + { + TestName: "No dependencies declared", + TargetName: "Z", + ShouldResolve: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.TestName, func(t *testing.T) { + // create a target container + target := &apicontainer.Container{ + Name: tc.TargetName, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + DesiredStatusUnsafe: apicontainerstatus.ContainerStopped, + } + + // Validation + if tc.ShouldResolve { + assert.NoError(t, verifyShutdownOrder(target, others)) + } else { + assert.Error(t, verifyShutdownOrder(target, others)) + } + }) + } +} + +func TestStartTimeoutForContainerOrdering(t *testing.T) { + testcases := []struct { + DependencyStartedAt time.Time + DependencyStartTimeout uint + DependencyCondition string + ExpectedTimedOut bool + }{ + { + DependencyStartedAt: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + DependencyStartTimeout: 10, + DependencyCondition: healthyCondition, + ExpectedTimedOut: true, + }, + { + DependencyStartedAt: time.Date(4000, 1, 1, 0, 0, 0, 0, time.UTC), + DependencyStartTimeout: 10, + DependencyCondition: successCondition, + ExpectedTimedOut: false, + }, + { + DependencyStartedAt: time.Time{}, + DependencyStartTimeout: 10, + DependencyCondition: successCondition, + ExpectedTimedOut: false, + }, + } + + for _, tc := range testcases { + t.Run(fmt.Sprintf("T:dependency time out: %v", tc.ExpectedTimedOut), + assertContainerOrderingNotTimedOut(hasDependencyTimedOut, tc.DependencyStartedAt, tc.DependencyStartTimeout, tc.DependencyCondition, tc.ExpectedTimedOut)) + } +} + +func assertContainerOrderingNotTimedOut(f func(dep *apicontainer.Container, depCond string) bool, startedAt time.Time, timeout uint, depCond string, expectedTimedOut bool) func(t *testing.T) { + return func(t *testing.T) { + dep := &apicontainer.Container{ + Name: "dep", + StartTimeout: timeout, + } + + dep.SetStartedAt(startedAt) + + timedOut := f(dep, depCond) + assert.Equal(t, expectedTimedOut, timedOut) + } +} + +func TestVerifyContainerOrderingStatusResolvableFailOnDependencyWontStart(t *testing.T) { + targetName := "target" + dependencyName := "dependency" + target := &apicontainer.Container{ + Name: targetName, + KnownStatusUnsafe: apicontainerstatus.ContainerPulled, + DesiredStatusUnsafe: apicontainerstatus.ContainerCreated, + DependsOnUnsafe: []apicontainer.DependsOn{ + { + ContainerName: dependencyName, + }, + }, + } + dep := &apicontainer.Container{ + Name: dependencyName, + KnownStatusUnsafe: apicontainerstatus.ContainerPulled, + DesiredStatusUnsafe: apicontainerstatus.ContainerStopped, + AppliedStatus: apicontainerstatus.ContainerStatusNone, + } + contMap := map[string]*apicontainer.Container{ + targetName: target, + dependencyName: dep, + } + dummyResolves := func(*apicontainer.Container, *apicontainer.Container, string, *config.Config) bool { + return true + } + _, err := verifyContainerOrderingStatusResolvable(target, contMap, &config.Config{}, dummyResolves) + assert.Error(t, err) +} diff --git a/agent/engine/dependencygraph/graph_unix_test.go b/agent/engine/dependencygraph/graph_unix_test.go new file mode 100644 index 00000000000..bcaa4c566ce --- /dev/null +++ b/agent/engine/dependencygraph/graph_unix_test.go @@ -0,0 +1,80 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dependencygraph + +import ( + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/stretchr/testify/assert" +) + +func TestVerifyCgroupDependenciesResolved(t *testing.T) { + testcases := []struct { + Name string + TargetKnown apicontainerstatus.ContainerStatus + TargetDep apicontainerstatus.ContainerStatus + DependencyKnown resourcestatus.ResourceStatus + RequiredStatus resourcestatus.ResourceStatus + + ExpectedResolved bool + }{ + { + Name: "resource none,container pull depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerPulled, + DependencyKnown: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + RequiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + + ExpectedResolved: false, + }, + { + Name: "resource created,container pull depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerPulled, + DependencyKnown: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + RequiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + ExpectedResolved: true, + }, + { + Name: "resource none,container create depends on resource created", + TargetKnown: apicontainerstatus.ContainerStatusNone, + TargetDep: apicontainerstatus.ContainerCreated, + DependencyKnown: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + RequiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + ExpectedResolved: true, + }, + } + for _, tc := range testcases { + t.Run(tc.Name, func(t *testing.T) { + cgroupResource := &cgroup.CgroupResource{} + cgroupResource.SetKnownStatus(tc.DependencyKnown) + target := &apicontainer.Container{ + KnownStatusUnsafe: tc.TargetKnown, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + target.BuildResourceDependency("cgroup", tc.RequiredStatus, tc.TargetDep) + resources := make(map[string]taskresource.TaskResource) + resources[cgroupResource.GetName()] = cgroupResource + resolved := verifyResourceDependenciesResolved(target, resources) + assert.Equal(t, tc.ExpectedResolved, resolved) + }) + } +} diff --git a/agent/engine/docker_image_manager.go b/agent/engine/docker_image_manager.go new file mode 100644 index 00000000000..3031cbbadef --- /dev/null +++ b/agent/engine/docker_image_manager.go @@ -0,0 +1,680 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + "github.com/cihub/seelog" +) + +const ( + imageNotFoundForDeletionError = "no such image" +) + +// ImageManager is responsible for saving the Image states, +// adding and removing container references to ImageStates +type ImageManager interface { + RecordContainerReference(container *apicontainer.Container) error + RemoveContainerReferenceFromImageState(container *apicontainer.Container) error + AddAllImageStates(imageStates []*image.ImageState) + GetImageStateFromImageName(containerImageName string) (*image.ImageState, bool) + StartImageCleanupProcess(ctx context.Context) + SetDataClient(dataClient data.Client) +} + +// dockerImageManager accounts all the images and their states in the instance. +// It also has the cleanup policy configuration. +type dockerImageManager struct { + imageStates []*image.ImageState + client dockerapi.DockerClient + dataClient data.Client + updateLock sync.RWMutex + imageCleanupTicker *time.Ticker + state dockerstate.TaskEngineState + imageStatesConsideredForDeletion map[string]*image.ImageState + minimumAgeBeforeDeletion time.Duration + numImagesToDelete int + imageCleanupTimeInterval time.Duration + imagePullBehavior config.ImagePullBehaviorType + imageCleanupExclusionList []string + deleteNonECSImagesEnabled config.BooleanDefaultFalse + nonECSContainerCleanupWaitDuration time.Duration + numNonECSContainersToDelete int + nonECSMinimumAgeBeforeDeletion time.Duration +} + +// ImageStatesForDeletion is used for implementing the sort interface +type ImageStatesForDeletion []*image.ImageState + +// NewImageManager returns a new ImageManager +func NewImageManager(cfg *config.Config, client dockerapi.DockerClient, state dockerstate.TaskEngineState) ImageManager { + return &dockerImageManager{ + client: client, + state: state, + minimumAgeBeforeDeletion: cfg.MinimumImageDeletionAge, + numImagesToDelete: cfg.NumImagesToDeletePerCycle, + imageCleanupTimeInterval: cfg.ImageCleanupInterval, + imagePullBehavior: cfg.ImagePullBehavior, + imageCleanupExclusionList: buildImageCleanupExclusionList(cfg), + deleteNonECSImagesEnabled: cfg.DeleteNonECSImagesEnabled, + nonECSContainerCleanupWaitDuration: cfg.TaskCleanupWaitDuration, + numNonECSContainersToDelete: cfg.NumNonECSContainersToDeletePerCycle, + nonECSMinimumAgeBeforeDeletion: cfg.NonECSMinimumImageDeletionAge, + } +} + +// SetDataClient sets the saver that is used by the ImageManager. +func (imageManager *dockerImageManager) SetDataClient(dataClient data.Client) { + imageManager.dataClient = dataClient +} + +func buildImageCleanupExclusionList(cfg *config.Config) []string { + // append known cached internal images to imageCleanupExclusionList + excludedImages := append(cfg.ImageCleanupExclusionList, + cfg.PauseContainerImageName+":"+cfg.PauseContainerTag, + config.DefaultPauseContainerImageName+":"+config.DefaultPauseContainerTag, + config.CachedImageNameAgentContainer, + ) + for _, image := range excludedImages { + seelog.Infof("Image excluded from cleanup: %s", image) + } + return excludedImages +} + +func (imageManager *dockerImageManager) AddAllImageStates(imageStates []*image.ImageState) { + imageManager.updateLock.Lock() + defer imageManager.updateLock.Unlock() + for _, imageState := range imageStates { + imageManager.addImageState(imageState) + } +} + +func (imageManager *dockerImageManager) GetImageStatesCount() int { + imageManager.updateLock.RLock() + defer imageManager.updateLock.RUnlock() + return len(imageManager.imageStates) +} + +// RecordContainerReference adds container reference to the corresponding imageState object +func (imageManager *dockerImageManager) RecordContainerReference(container *apicontainer.Container) error { + // On agent restart, container ID was retrieved from agent state file + // TODO add setter and getter for modifying this + if container.ImageID != "" { + if !imageManager.addContainerReferenceToExistingImageState(container) { + return fmt.Errorf("Failed to add container to existing image state") + } + return nil + } + + if container.Image == "" { + return fmt.Errorf("Invalid container reference: Empty image name") + } + + // Inspect image for obtaining Container's Image ID + imageInspected, err := imageManager.client.InspectImage(container.Image) + if err != nil { + seelog.Errorf("Error inspecting image %v: %v", container.Image, err) + return err + } + container.ImageID = imageInspected.ID + imageDigest := imageManager.fetchRepoDigest(imageInspected, container) + container.SetImageDigest(imageDigest) + added := imageManager.addContainerReferenceToExistingImageState(container) + if !added { + imageManager.addContainerReferenceToNewImageState(container, imageInspected.Size) + } + return nil +} + +// check whether image pull from ECR +func (imageManager *dockerImageManager) isImagePullFromECR(container *apicontainer.Container) bool { + return container.RegistryAuthentication != nil && container.RegistryAuthentication.ECRAuthData != nil && container.RegistryAuthentication.Type == apicontainer.AuthTypeECR +} + +// The helper function to fetch the RepoImageDigest when inspect the image +func (imageManager *dockerImageManager) fetchRepoDigest(imageInspected *types.ImageInspect, container *apicontainer.Container) string { + resultRepoDigest := "" + if imageManager.isImagePullFromECR(container) { + imageRepoDigests := imageInspected.RepoDigests + imagePrefix := strings.Split(container.Image, "/")[0] + for _, imageRepoDigest := range imageRepoDigests { + if strings.HasPrefix(imageRepoDigest, imagePrefix) { + repoDigestSplitList := strings.Split(imageRepoDigest, "@") + if len(repoDigestSplitList) > 1 { + resultRepoDigest = repoDigestSplitList[1] + return resultRepoDigest + } else { + seelog.Warnf("ImageRepoDigest doesn't have the right format: %v", imageRepoDigest) + return "" + } + } + } + } + return resultRepoDigest +} + +func (imageManager *dockerImageManager) addContainerReferenceToExistingImageState(container *apicontainer.Container) bool { + // this lock is used for reading the image states in the image manager + imageManager.updateLock.RLock() + defer imageManager.updateLock.RUnlock() + imageManager.removeExistingImageNameOfDifferentID(container.Image, container.ImageID) + imageState, ok := imageManager.getImageState(container.ImageID) + if ok { + imageState.UpdateImageState(container) + imageManager.saveImageStateData(imageState) + } + return ok +} + +func (imageManager *dockerImageManager) addContainerReferenceToNewImageState(container *apicontainer.Container, imageSize int64) { + // this lock is used while creating and adding new image state to image manager + imageManager.updateLock.Lock() + defer imageManager.updateLock.Unlock() + imageManager.removeExistingImageNameOfDifferentID(container.Image, container.ImageID) + // check to see if a different thread added image state for same image ID + imageState, ok := imageManager.getImageState(container.ImageID) + if ok { + imageState.UpdateImageState(container) + imageManager.saveImageStateData(imageState) + } else { + sourceImage := &image.Image{ + ImageID: container.ImageID, + Size: imageSize, + } + sourceImageState := &image.ImageState{ + Image: sourceImage, + PulledAt: time.Now(), + LastUsedAt: time.Now(), + } + sourceImageState.UpdateImageState(container) + imageManager.addImageState(sourceImageState) + } +} + +// RemoveContainerReferenceFromImageState removes container reference from the corresponding imageState object +func (imageManager *dockerImageManager) RemoveContainerReferenceFromImageState(container *apicontainer.Container) error { + // this lock is for reading image states and finding the one that the container belongs to + imageManager.updateLock.RLock() + defer imageManager.updateLock.RUnlock() + if container.ImageID == "" { + return fmt.Errorf("Invalid container reference: Empty image id") + } + + // Find image state that this container is part of, and remove the reference + imageState, ok := imageManager.getImageState(container.ImageID) + if !ok { + return fmt.Errorf("Cannot find image state for the container to be removed") + } + // Found matching ImageState + err := imageState.RemoveContainerReference(container) + if err != nil { + return err + } + imageManager.saveImageStateData(imageState) + return nil +} + +func (imageManager *dockerImageManager) addImageState(imageState *image.ImageState) { + imageManager.imageStates = append(imageManager.imageStates, imageState) + imageManager.saveImageStateData(imageState) + +} + +// getAllImageStates returns the list of imageStates in the instance +func (imageManager *dockerImageManager) getAllImageStates() []*image.ImageState { + return imageManager.imageStates +} + +// getImageState returns the ImageState object that the container is referenced at +func (imageManager *dockerImageManager) getImageState(containerImageID string) (*image.ImageState, bool) { + for _, imageState := range imageManager.getAllImageStates() { + if imageState.Image.ImageID == containerImageID { + return imageState, true + } + } + return nil, false +} + +// removeImageState removes the imageState from the list of imageState objects in ImageManager +func (imageManager *dockerImageManager) removeImageState(imageStateToBeRemoved *image.ImageState) { + for i, imageState := range imageManager.imageStates { + if imageState.Image.ImageID == imageStateToBeRemoved.Image.ImageID { + // Image State found; hence remove it + seelog.Infof("Removing Image State: [%s] from Image Manager", imageState.String()) + imageManager.imageStates = append(imageManager.imageStates[:i], imageManager.imageStates[i+1:]...) + imageManager.removeImageStateData(imageState.Image.ImageID) + return + } + } +} + +func (imageManager *dockerImageManager) getCandidateImagesForDeletion() []*image.ImageState { + if len(imageManager.imageStatesConsideredForDeletion) < 1 { + seelog.Debugf("Image Manager: Empty state!") + // no image states present in image manager + return nil + } + var imagesForDeletion []*image.ImageState + for _, imageState := range imageManager.imageStatesConsideredForDeletion { + if imageManager.isImageOldEnough(imageState) && imageState.HasNoAssociatedContainers() { + seelog.Infof("Candidate image for deletion: [%s]", imageState.String()) + imagesForDeletion = append(imagesForDeletion, imageState) + } + } + return imagesForDeletion +} + +func (imageManager *dockerImageManager) isImageOldEnough(imageState *image.ImageState) bool { + ageOfImage := time.Since(imageState.PulledAt) + return ageOfImage > imageManager.minimumAgeBeforeDeletion +} + +//TODO: change image createdTime to image lastUsedTime when docker support it in the future +func (imageManager *dockerImageManager) nonECSImageOldEnough(NonECSImage ImageWithSizeID) bool { + ageOfImage := time.Since(NonECSImage.createdTime) + return ageOfImage > imageManager.nonECSMinimumAgeBeforeDeletion +} + +// Implementing sort interface based on last used times of the images +func (imageStates ImageStatesForDeletion) Len() int { + return len(imageStates) +} + +func (imageStates ImageStatesForDeletion) Less(i, j int) bool { + return imageStates[i].LastUsedAt.Before(imageStates[j].LastUsedAt) +} + +func (imageStates ImageStatesForDeletion) Swap(i, j int) { + imageStates[i], imageStates[j] = imageStates[j], imageStates[i] +} + +func (imageManager *dockerImageManager) getLeastRecentlyUsedImage(imagesForDeletion []*image.ImageState) *image.ImageState { + var candidateImages ImageStatesForDeletion + for _, imageState := range imagesForDeletion { + candidateImages = append(candidateImages, imageState) + } + // sort images in the order of last used times + sort.Sort(candidateImages) + // return only the top LRU image for deletion + return candidateImages[0] +} + +func (imageManager *dockerImageManager) removeExistingImageNameOfDifferentID(containerImageName string, inspectedImageID string) { + for _, imageState := range imageManager.getAllImageStates() { + // image with same name pulled in the instance. Untag the already existing image name + if imageState.Image.ImageID != inspectedImageID { + if imageNameRemoved := imageState.RemoveImageName(containerImageName); imageNameRemoved { + imageManager.saveImageStateData(imageState) + } + } + } +} + +func (imageManager *dockerImageManager) StartImageCleanupProcess(ctx context.Context) { + // If the image pull behavior is prefer cached, don't clean up the image, + // because the cached image is needed. + if imageManager.imagePullBehavior == config.ImagePullPreferCachedBehavior { + seelog.Info("Pull behavior is set to always use cache. Disabling cleanup") + return + } + // passing the cleanup interval as argument which would help during testing + imageManager.performPeriodicImageCleanup(ctx, imageManager.imageCleanupTimeInterval) +} + +func (imageManager *dockerImageManager) performPeriodicImageCleanup(ctx context.Context, imageCleanupInterval time.Duration) { + imageManager.imageCleanupTicker = time.NewTicker(imageCleanupInterval) + for { + select { + case <-imageManager.imageCleanupTicker.C: + go imageManager.removeUnusedImages(ctx) + case <-ctx.Done(): + imageManager.imageCleanupTicker.Stop() + return + } + } +} + +func (imageManager *dockerImageManager) removeUnusedImages(ctx context.Context) { + seelog.Debug("Attempting to obtain ImagePullDeleteLock for removing images") + ImagePullDeleteLock.Lock() + seelog.Debug("Obtained ImagePullDeleteLock for removing images") + defer seelog.Debug("Released ImagePullDeleteLock after removing images") + defer ImagePullDeleteLock.Unlock() + + imageManager.updateLock.Lock() + defer imageManager.updateLock.Unlock() + + var numECSImagesDeleted int + imageManager.imageStatesConsideredForDeletion = imageManager.imagesConsiderForDeletion(imageManager.getAllImageStates()) + + for i := 0; i < imageManager.numImagesToDelete; i++ { + err := imageManager.removeLeastRecentlyUsedImage(ctx) + numECSImagesDeleted = i + if err != nil { + seelog.Infof("End of eligible images for deletion: %v; Still have %d image states being managed", err, len(imageManager.getAllImageStates())) + break + } + } + if imageManager.deleteNonECSImagesEnabled.Enabled() { + // remove nonecs containers + imageManager.removeNonECSContainers(ctx) + // remove nonecs images + var nonECSImagesNumToDelete = imageManager.numImagesToDelete - numECSImagesDeleted + imageManager.removeNonECSImages(ctx, nonECSImagesNumToDelete) + } +} + +func (imageManager *dockerImageManager) removeNonECSContainers(ctx context.Context) { + nonECSContainersIDs, err := imageManager.getNonECSContainerIDs(ctx) + if err != nil { + seelog.Errorf("Error getting non-ECS container IDs: %v", err) + } + var nonECSContainerRemoveAvailableIDs []string + for _, id := range nonECSContainersIDs { + response, icErr := imageManager.client.InspectContainer(ctx, id, dockerclient.InspectContainerTimeout) + if icErr != nil { + seelog.Errorf("Error inspecting non-ECS container id: %s - %v", id, icErr) + continue + } + + seelog.Debugf("Inspecting Non-ECS Container ID [%s] for removal, Finished [%s] Status [%s]", id, response.State.FinishedAt, response.State.Status) + finishedTime, err := time.Parse(time.RFC3339Nano, response.State.FinishedAt) + if err != nil { + seelog.Errorf("Error parsing time string for container. id: %s, time: %s err: %s", id, response.State.FinishedAt, err) + continue + } + + if (response.State.Status == "exited" || + response.State.Status == "dead" || + response.State.Status == "created") && + time.Since(finishedTime) > imageManager.nonECSContainerCleanupWaitDuration { + nonECSContainerRemoveAvailableIDs = append(nonECSContainerRemoveAvailableIDs, id) + } + } + var numNonECSContainerDeleted = 0 + for _, id := range nonECSContainerRemoveAvailableIDs { + if numNonECSContainerDeleted == imageManager.numNonECSContainersToDelete { + break + } + seelog.Debugf("Removing non-ECS Container ID %s", id) + err := imageManager.client.RemoveContainer(ctx, id, dockerclient.RemoveContainerTimeout) + if err == nil { + seelog.Infof("Removed Container ID: %s", id) + numNonECSContainerDeleted++ + } else { + seelog.Errorf("Error Removing Container ID %s - %s", id, err) + continue + } + } +} + +func (imageManager *dockerImageManager) getNonECSContainerIDs(ctx context.Context) ([]string, error) { + var allContainersIDs []string + listContainersResponse := imageManager.client.ListContainers(ctx, true, dockerclient.ListContainersTimeout) + if listContainersResponse.Error != nil { + return nil, fmt.Errorf("Error listing containers: %v", listContainersResponse.Error) + } + allContainersIDs = append(allContainersIDs, listContainersResponse.DockerIDs...) + ECSContainersIDs := imageManager.state.GetAllContainerIDs() + nonECSContainersIDs := exclude(allContainersIDs, ECSContainersIDs) + return nonECSContainersIDs, nil +} + +type ImageWithSizeID struct { + RepoTags []string + ImageID string + Size int64 + createdTime time.Time +} + +func (imageManager *dockerImageManager) removeNonECSImages(ctx context.Context, nonECSImagesNumToDelete int) { + if nonECSImagesNumToDelete == 0 { + return + } + nonECSImages := imageManager.getNonECSImages(ctx) + + // we want to sort images with size ascending + sort.Slice(nonECSImages, func(i, j int) bool { + return nonECSImages[i].Size < nonECSImages[j].Size + }) + + // we will remove the remaining nonECSImages in each performPeriodicImageCleanup call() + var numImagesAlreadyDeleted = 0 + for _, image := range nonECSImages { + if numImagesAlreadyDeleted >= nonECSImagesNumToDelete { + break + } + // use current time - image creation time to determine if image is old enough to be deleted. + if !imageManager.nonECSImageOldEnough(image) { + continue + } + if len(image.RepoTags) > 1 { + seelog.Debugf("Non-ECS image has more than one tag Image: %s (Tags: %s)", image.ImageID, image.RepoTags) + for _, tag := range image.RepoTags { + err := imageManager.client.RemoveImage(ctx, tag, dockerclient.RemoveImageTimeout) + if err != nil { + seelog.Errorf("Error removing RepoTag (ImageID: %s, Tag: %s) %v", image.ImageID, tag, err) + } else { + seelog.Infof("Image Tag Removed: %s (ImageID: %s)", tag, image.ImageID) + numImagesAlreadyDeleted++ + } + } + } else { + seelog.Debugf("Removing non-ECS Image: %s (Tags: %s)", image.ImageID, image.RepoTags) + err := imageManager.client.RemoveImage(ctx, image.ImageID, dockerclient.RemoveImageTimeout) + if err != nil { + seelog.Errorf("Error removing Image %s (Tags: %s) - %v", image.ImageID, image.RepoTags, err) + } else { + seelog.Infof("Image removed: %s (Tags: %s)", image.ImageID, image.RepoTags) + numImagesAlreadyDeleted++ + } + } + } +} + +// getNonECSImages returns type ImageWithSizeID with all fields populated. +func (imageManager *dockerImageManager) getNonECSImages(ctx context.Context) []ImageWithSizeID { + r := imageManager.client.ListImages(ctx, dockerclient.ListImagesTimeout) + var allImages []ImageWithSizeID + // inspect all images + for _, imageID := range r.ImageIDs { + resp, err := imageManager.client.InspectImage(imageID) + if err != nil { + seelog.Errorf("Error inspecting non-ECS image: (ImageID: %s), %s", imageID, err) + continue + } + createTime := time.Time{} + createTime, err = time.Parse(time.RFC3339, resp.Created) + if err != nil { + seelog.Warnf("Error parse the inspected non-ECS image created time (ImageID: %s), %v", imageID, err) + } + allImages = append(allImages, + ImageWithSizeID{ + ImageID: imageID, + Size: resp.Size, + RepoTags: resp.RepoTags, + createdTime: createTime, + }) + } + + // get all 'ecs' image IDs + var ecsImageIDs []string + for _, imageState := range imageManager.getAllImageStates() { + ecsImageIDs = append(ecsImageIDs, imageState.Image.ImageID) + } + + // exclude 'ecs' image IDs and image IDs with an explicitly excluded tag + var nonECSImages []ImageWithSizeID + for _, image := range allImages { + // check image ID is not excluded + if isInExclusionList(image.ImageID, ecsImageIDs) { + continue + } + // check image TAG(s) is not excluded + if !anyIsInExclusionList(image.RepoTags, imageManager.imageCleanupExclusionList) { + nonECSImages = append(nonECSImages, image) + } + } + return nonECSImages +} + +func isInExclusionList(imageName string, imageExclusionList []string) bool { + for _, exclusionName := range imageExclusionList { + if imageName == exclusionName { + return true + } + } + return false +} + +// anyIsInExclusionList returns true if any name is in the exclusion list. +func anyIsInExclusionList(imageNames []string, nameExclusionList []string) bool { + for _, name := range imageNames { + if isInExclusionList(name, nameExclusionList) { + return true + } + } + return false +} + +func exclude(allList []string, exclusionList []string) []string { + var ret []string + var allMap = make(map[string]bool) + for _, a := range allList { + allMap[a] = true + } + for _, b := range exclusionList { + allMap[b] = false + } + for k := range allMap { + if allMap[k] { + ret = append(ret, k) + } + } + return ret +} + +func (imageManager *dockerImageManager) imagesConsiderForDeletion(allImageStates []*image.ImageState) map[string]*image.ImageState { + var imagesConsiderForDeletionMap = make(map[string]*image.ImageState) + seelog.Info("Begin building map of eligible unused images for deletion") + for _, imageState := range allImageStates { + if imageManager.isExcludedFromCleanup(imageState) { + //imageState that we want to keep + seelog.Debugf("Image excluded from deletion: [%s]", imageState.String()) + } else { + seelog.Debugf("Image going to be considered for deletion: [%s]", imageState.String()) + imagesConsiderForDeletionMap[imageState.Image.ImageID] = imageState + } + } + return imagesConsiderForDeletionMap +} + +func (imageManager *dockerImageManager) isExcludedFromCleanup(imageState *image.ImageState) bool { + for _, ecsName := range imageState.Image.Names { + for _, exclusionName := range imageManager.imageCleanupExclusionList { + if ecsName == exclusionName { + return true + } + } + } + return false +} + +func (imageManager *dockerImageManager) removeLeastRecentlyUsedImage(ctx context.Context) error { + leastRecentlyUsedImage := imageManager.getUnusedImageForDeletion() + if leastRecentlyUsedImage == nil { + return fmt.Errorf("No more eligible images for deletion") + } + imageManager.removeImage(ctx, leastRecentlyUsedImage) + return nil +} + +func (imageManager *dockerImageManager) getUnusedImageForDeletion() *image.ImageState { + candidateImageStatesForDeletion := imageManager.getCandidateImagesForDeletion() + if len(candidateImageStatesForDeletion) < 1 { + seelog.Infof("No eligible images for deletion for this cleanup cycle") + return nil + } + seelog.Infof("Found %d eligible images for deletion", len(candidateImageStatesForDeletion)) + return imageManager.getLeastRecentlyUsedImage(candidateImageStatesForDeletion) +} + +func (imageManager *dockerImageManager) removeImage(ctx context.Context, leastRecentlyUsedImage *image.ImageState) { + // Handling deleting while traversing a slice + imageNames := make([]string, len(leastRecentlyUsedImage.Image.Names)) + copy(imageNames, leastRecentlyUsedImage.Image.Names) + if len(imageNames) == 0 { + // potentially untagged image of format :; remove by ID + imageManager.deleteImage(ctx, leastRecentlyUsedImage.Image.ImageID, leastRecentlyUsedImage) + } else { + // Image has multiple tags/repos. Untag each name and delete the final reference to image + for _, imageName := range imageNames { + imageManager.deleteImage(ctx, imageName, leastRecentlyUsedImage) + } + } +} + +func (imageManager *dockerImageManager) deleteImage(ctx context.Context, imageID string, imageState *image.ImageState) { + if imageID == "" { + seelog.Errorf("Image ID to be deleted is null") + return + } + seelog.Infof("Removing Image: %s", imageID) + err := imageManager.client.RemoveImage(ctx, imageID, dockerclient.RemoveImageTimeout) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), imageNotFoundForDeletionError) { + seelog.Errorf("Image already removed from the instance: %v", err) + } else { + seelog.Errorf("Error removing Image %v - %v", imageID, err) + delete(imageManager.imageStatesConsideredForDeletion, imageState.Image.ImageID) + return + } + } + seelog.Infof("Image removed: %v", imageID) + imageState.RemoveImageName(imageID) + if len(imageState.Image.Names) == 0 { + seelog.Infof("Cleaning up all tracking information for image %s as it has zero references", imageID) + delete(imageManager.imageStatesConsideredForDeletion, imageState.Image.ImageID) + imageManager.removeImageState(imageState) + imageManager.state.RemoveImageState(imageState) + } +} + +func (imageManager *dockerImageManager) GetImageStateFromImageName(containerImageName string) (*image.ImageState, bool) { + imageManager.updateLock.Lock() + defer imageManager.updateLock.Unlock() + for _, imageState := range imageManager.getAllImageStates() { + for _, imageName := range imageState.Image.Names { + if imageName == containerImageName { + return imageState, true + } + } + } + return nil, false +} diff --git a/agent/engine/docker_image_manager_data_test.go b/agent/engine/docker_image_manager_data_test.go new file mode 100644 index 00000000000..1624edf75a4 --- /dev/null +++ b/agent/engine/docker_image_manager_data_test.go @@ -0,0 +1,124 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + + "github.com/stretchr/testify/assert" +) + +/* +This file contains unit tests that specifically check the data persisting behavior of the image manager +*/ + +const ( + testImageID = "test-id" + testImageID2 = "test-id2" + testImageName = "test-name" +) + +var ( + testImageStateData = &image.ImageState{ + Image: &image.Image{ + ImageID: testImageID, + }, + } + testContainerData = &apicontainer.Container{ + Name: testContainerName, + ImageID: testImageID, + } +) + +func TestAddImageStateSaveData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + imageManager := &dockerImageManager{ + dataClient: dataClient, + } + + imageManager.addImageState(testImageStateData) + imageStates, err := dataClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, imageStates, 1) +} + +func TestAddContainerReferenceToNewImageStateSaveData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + imageManager := &dockerImageManager{ + dataClient: dataClient, + } + + imageManager.addContainerReferenceToNewImageState(testContainerData, 0) + imageStates, err := dataClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, imageStates, 1) +} + +func TestAddContainerReferenceToExistingImageStateSaveData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + imageManager := &dockerImageManager{ + dataClient: dataClient, + } + imageManager.imageStates = append(imageManager.imageStates, testImageStateData) + imageManager.addContainerReferenceToExistingImageState(testContainerData) + imageStates, err := dataClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, imageStates, 1) +} + +func TestRemoveExistingImageNameOfDifferentID(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + imageManager := &dockerImageManager{ + dataClient: dataClient, + } + + testImageStateData.Image.Names = []string{testImageName} + imageManager.addImageState(testImageStateData) + imageStates, err := dataClient.GetImageStates() + assert.Len(t, imageStates[0].Image.Names, 1) + + imageManager.removeExistingImageNameOfDifferentID(testImageName, testImageID2) + imageStates, err = dataClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, imageStates, 1) + assert.Len(t, imageStates[0].Image.Names, 0) +} + +func TestRemoveImageStateSaveData(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + imageManager := &dockerImageManager{ + dataClient: dataClient, + } + + imageManager.addImageState(testImageStateData) + imageManager.removeImageStateData(testImageID) + imageStates, err := dataClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, imageStates, 0) +} diff --git a/agent/engine/docker_image_manager_integ_test.go b/agent/engine/docker_image_manager_integ_test.go new file mode 100644 index 00000000000..4f1f194e6a3 --- /dev/null +++ b/agent/engine/docker_image_manager_integ_test.go @@ -0,0 +1,687 @@ +// +build integration +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// The DockerTaskEngine is an abstraction over the DockerGoClient so that +// it does not have to know about tasks, only containers + +package engine + +import ( + "context" + "errors" + "fmt" + "runtime" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + imageRemovalTimeout = 30 * time.Second + taskCleanupTimeoutSeconds = 30 +) + +// Deletion of images in the order of LRU time: Happy path +// a. This includes starting up agent, pull images, start containers, +// account them in image manager,  stop containers, remove containers, account this in image manager, +// b. Simulate the pulled time (so that it passes the minimum age criteria +// for getting chosen for deletion ) +// c. Start image cleanup , ensure that ONLY the top 2 eligible LRU images +// are removed from the instance,  and those deleted images’ image states are removed from image manager. +// d. Ensure images that do not pass the ‘minimumAgeForDeletion’ criteria are not removed. +// e. Image has not passed the ‘hasNoAssociatedContainers’ criteria. +// f. Ensure that that if not eligible, image is not deleted from the instance and image reference in ImageManager is not removed. +func TestIntegImageCleanupHappyCase(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip(`Skipping this test because of error: level=error time=2020-05-27T20:20:03Z msg="Error removing` + + ` Image amazon/image-cleanup-test-image2:make - Error response from daemon: hcsshim::GetComputeSystems:` + + ` The requested compute system operation is not valid in the current state." module=log.go`) + } + cfg := defaultTestConfigIntegTest() + cfg.TaskCleanupWaitDuration = 5 * time.Second + + // Set low values so this test can complete in a sane amout of time + cfg.MinimumImageDeletionAge = 1 * time.Second + cfg.NumImagesToDeletePerCycle = 2 + // start agent + taskEngine, done, _ := setup(cfg, nil, t) + + imageManager := taskEngine.(*DockerTaskEngine).imageManager.(*dockerImageManager) + imageManager.SetDataClient(data.NewNoopClient()) + + defer func() { + done() + // Force cleanup all test images and containers + cleanupImagesHappy(imageManager) + }() + + stateChangeEvents := taskEngine.StateChangeEvents() + + // Create test Task + taskName := "imgClean" + testTask := createImageCleanupHappyTestTask(taskName) + + go taskEngine.AddTask(testTask) + + // Verify that Task is running + err := verifyTaskIsRunning(stateChangeEvents, testTask) + if err != nil { + t.Fatal(err) + } + + imageState1, ok := imageManager.GetImageStateFromImageName(test1Image1Name) + require.True(t, ok, "Could not find image state for %s", test1Image1Name) + t.Logf("Found image state for %s", test1Image1Name) + + imageState2, ok := imageManager.GetImageStateFromImageName(test1Image2Name) + require.True(t, ok, "Could not find image state for %s", test1Image2Name) + t.Logf("Found image state for %s", test1Image2Name) + + imageState3, ok := imageManager.GetImageStateFromImageName(test1Image3Name) + require.True(t, ok, "Could not find image state for %s", test1Image3Name) + t.Logf("Found image state for %s", test1Image3Name) + + imageState1ImageID := imageState1.Image.ImageID + imageState2ImageID := imageState2.Image.ImageID + imageState3ImageID := imageState3.Image.ImageID + + // Set the ImageState.LastUsedAt to a value far in the past to ensure the test images are deleted. + // This will make these test images the LRU images. + imageState1.LastUsedAt = imageState1.LastUsedAt.Add(-99995 * time.Hour) + imageState2.LastUsedAt = imageState2.LastUsedAt.Add(-99994 * time.Hour) + imageState3.LastUsedAt = imageState3.LastUsedAt.Add(-99993 * time.Hour) + + // Verify Task is stopped. + verifyTaskIsStopped(stateChangeEvents, testTask) + testTask.SetSentStatus(apitaskstatus.TaskStopped) + + // Allow Task cleanup to occur + time.Sleep(5 * time.Second) + + // Verify Task is cleaned up + err = verifyTaskIsCleanedUp(taskName, taskEngine) + if err != nil { + t.Fatal(err) + } + + // Call Image removal + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.removeUnusedImages(ctx) + + // Verify top 2 LRU images are deleted from image manager + err = verifyImagesAreRemoved(imageManager, imageState1ImageID, imageState2ImageID) + if err != nil { + t.Fatal(err) + } + + // Verify 3rd LRU image is not removed + err = verifyImagesAreNotRemoved(imageManager, imageState3ImageID) + if err != nil { + t.Fatal(err) + } + + // Verify top 2 LRU images are removed from docker + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageState1ImageID) + if !client.IsErrNotFound(err) { + t.Fatalf("Image was not removed successfully") + } + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageState2ImageID) + if !client.IsErrNotFound(err) { + t.Fatalf("Image was not removed successfully") + } + + // Verify 3rd LRU image has not been removed from Docker + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageState3ImageID) + if err != nil { + t.Fatalf("Image should not have been removed from Docker") + } +} + +// Test that images not falling in the image deletion eligibility criteria are not removed: +// a. Ensure images that do not pass the ‘minimumAgeForDeletion’ criteria are not removed. +// b. Image has not passed the ‘hasNoAssociatedContainers’ criteria. +// c. Ensure that the image is not deleted from the instance and image reference in ImageManager is not removed. +func TestIntegImageCleanupThreshold(t *testing.T) { + cfg := defaultTestConfigIntegTest() + cfg.TaskCleanupWaitDuration = 1 * time.Second + + // Set low values so this test can complete in a sane amout of time + cfg.MinimumImageDeletionAge = 15 * time.Minute + // Set to delete three images, but in this test we expect only two images to be removed + cfg.NumImagesToDeletePerCycle = 3 + // start agent + taskEngine, done, _ := setup(cfg, nil, t) + + imageManager := taskEngine.(*DockerTaskEngine).imageManager.(*dockerImageManager) + imageManager.SetDataClient(data.NewNoopClient()) + + defer func() { + done() + // Force cleanup all test images and containers + cleanupImagesThreshold(imageManager) + }() + + stateChangeEvents := taskEngine.StateChangeEvents() + + // Create test Task + taskName := "imgClean" + testTask := createImageCleanupThresholdTestTask(taskName) + + // Start Task + go taskEngine.AddTask(testTask) + + // Verify that Task is running + err := verifyTaskIsRunning(stateChangeEvents, testTask) + if err != nil { + t.Fatal(err) + } + + imageState1, ok := imageManager.GetImageStateFromImageName(test2Image1Name) + require.True(t, ok, "Could not find image state for %s", test2Image1Name) + t.Logf("Found image state for %s", test2Image1Name) + + imageState2, ok := imageManager.GetImageStateFromImageName(test2Image2Name) + require.True(t, ok, "Could not find image state for %s", test2Image2Name) + t.Logf("Found image state for %s", test2Image2Name) + + imageState3, ok := imageManager.GetImageStateFromImageName(test2Image3Name) + require.True(t, ok, "Could not find image state for %s", test2Image3Name) + t.Logf("Found image state for %s", test2Image3Name) + + imageState1ImageID := imageState1.Image.ImageID + imageState2ImageID := imageState2.Image.ImageID + imageState3ImageID := imageState3.Image.ImageID + + // Set the ImageState.LastUsedAt to a value far in the past to ensure the test images are deleted. + // This will make these the LRU images so they are deleted. + imageState1.LastUsedAt = imageState1.LastUsedAt.Add(-99995 * time.Hour) + imageState2.LastUsedAt = imageState2.LastUsedAt.Add(-99994 * time.Hour) + imageState3.LastUsedAt = imageState3.LastUsedAt.Add(-99993 * time.Hour) + + // Set two containers to have pull time > threshold + imageState1.PulledAt = imageState1.PulledAt.Add(-20 * time.Minute) + imageState2.PulledAt = imageState2.PulledAt.Add(-10 * time.Minute) + imageState3.PulledAt = imageState3.PulledAt.Add(-25 * time.Minute) + + // Verify Task is stopped + verifyTaskIsStopped(stateChangeEvents, testTask) + testTask.SetSentStatus(apitaskstatus.TaskStopped) + + // Allow Task cleanup to occur + time.Sleep(5 * time.Second) + + // Verify Task is cleaned up + err = verifyTaskIsCleanedUp(taskName, taskEngine) + if err != nil { + t.Fatal(err) + } + + // Call Image removal + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.removeUnusedImages(ctx) + + // Verify Image1 & Image3 are removed from ImageManager as they are beyond the minimumAge threshold + err = verifyImagesAreRemoved(imageManager, imageState1ImageID, imageState3ImageID) + if err != nil { + t.Fatal(err) + } + + // Verify Image2 is not removed, below threshold for minimumAge + err = verifyImagesAreNotRemoved(imageManager, imageState2ImageID) + if err != nil { + t.Fatal(err) + } + + // Verify Image1 & Image3 are removed from docker + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageState1ImageID) + if !client.IsErrNotFound(err) { + t.Fatalf("Image was not removed successfully") + } + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageState3ImageID) + if !client.IsErrNotFound(err) { + t.Fatalf("Image was not removed successfully") + } + + // Verify Image2 has not been removed from Docker + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageState2ImageID) + if err != nil { + t.Fatalf("Image should not have been removed from Docker") + } +} + +// TestImageWithSameNameAndDifferentID tests image can be correctly removed when tasks +// are running with the same image name, but different image id. +func TestImageWithSameNameAndDifferentID(t *testing.T) { + cfg := defaultTestConfigIntegTest() + cfg.TaskCleanupWaitDuration = 1 * time.Second + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Set low values so this test can complete in a sane amout of time + cfg.MinimumImageDeletionAge = 15 * time.Minute + + taskEngine, done, _ := setup(cfg, nil, t) + defer done() + + dockerClient := taskEngine.(*DockerTaskEngine).client + + // DockerClient doesn't implement TagImage, create a go docker client + sdkDockerClient, err := client.NewClientWithOpts(client.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating SDK docker client failed") + + imageManager := taskEngine.(*DockerTaskEngine).imageManager.(*dockerImageManager) + imageManager.SetDataClient(data.NewNoopClient()) + + stateChangeEvents := taskEngine.StateChangeEvents() + + // Pull the images needed for the test + if _, err = dockerClient.InspectImage(test3Image1Name); client.IsErrNotFound(err) { + metadata := dockerClient.PullImage(ctx, test3Image1Name, nil, dockerclient.LoadImageTimeout) + assert.NoError(t, metadata.Error, "Failed to pull image %s", test3Image1Name) + } + if _, err = dockerClient.InspectImage(test3Image2Name); client.IsErrNotFound(err) { + metadata := dockerClient.PullImage(ctx, test3Image2Name, nil, dockerclient.LoadImageTimeout) + assert.NoError(t, metadata.Error, "Failed to pull image %s", test3Image2Name) + } + if _, err = dockerClient.InspectImage(test3Image3Name); client.IsErrNotFound(err) { + metadata := dockerClient.PullImage(ctx, test3Image3Name, nil, dockerclient.LoadImageTimeout) + assert.NoError(t, metadata.Error, "Failed to pull image %s", test3Image3Name) + } + + // The same image name used by all tasks in this test + identicalImageName := "testimagewithsamenameanddifferentid:latest" + // Create three tasks which use the image with same name but different ID + task1 := createTestTask("task1") + task2 := createTestTask("task2") + task3 := createTestTask("task3") + task1.Containers[0].Image = identicalImageName + task2.Containers[0].Image = identicalImageName + task3.Containers[0].Image = identicalImageName + + err = renameImage(test3Image1Name, task1.Containers[0].Image, sdkDockerClient) + assert.NoError(t, err, "Renaming the image failed") + + // start and wait for task1 to be running + go taskEngine.AddTask(task1) + err = verifyTaskIsRunning(stateChangeEvents, task1) + require.NoError(t, err, "task1") + + // Verify image state is updated correctly + imageState1, ok := imageManager.GetImageStateFromImageName(identicalImageName) + require.True(t, ok, "Could not find image state for %s", identicalImageName) + t.Logf("Found image state for %s", identicalImageName) + imageID1 := imageState1.Image.ImageID + + // Using another image but rename to the same name as task1 for task2 + err = renameImage(test3Image2Name, task2.Containers[0].Image, sdkDockerClient) + require.NoError(t, err, "Renaming the image failed") + + // Start and wait for task2 to be running + go taskEngine.AddTask(task2) + err = verifyTaskIsRunning(stateChangeEvents, task2) + require.NoError(t, err, "task2") + + // Verify image state is updated correctly + imageState2, ok := imageManager.GetImageStateFromImageName(identicalImageName) + require.True(t, ok, "Could not find image state for %s", identicalImageName) + t.Logf("Found image state for %s", identicalImageName) + imageID2 := imageState2.Image.ImageID + require.NotEqual(t, imageID2, imageID1, "The image id in task 2 should be different from image in task 1") + + // Using a different image for task3 and rename it to the same name as task1 and task2 + err = renameImage(test3Image3Name, task3.Containers[0].Image, sdkDockerClient) + require.NoError(t, err, "Renaming the image failed") + + // Start and wait for task3 to be running + go taskEngine.AddTask(task3) + err = verifyTaskIsRunning(stateChangeEvents, task3) + require.NoError(t, err, "task3") + + // Verify image state is updated correctly + imageState3, ok := imageManager.GetImageStateFromImageName(identicalImageName) + require.True(t, ok, "Could not find image state for %s", identicalImageName) + t.Logf("Found image state for %s", identicalImageName) + imageID3 := imageState3.Image.ImageID + require.NotEqual(t, imageID3, imageID1, "The image id in task3 should be different from image in task1") + require.NotEqual(t, imageID3, imageID2, "The image id in task3 should be different from image in task2") + + // Modify image state sothat the image is eligible for deletion + imageState1.LastUsedAt = imageState1.LastUsedAt.Add(-99995 * time.Hour) + imageState2.LastUsedAt = imageState2.LastUsedAt.Add(-99994 * time.Hour) + imageState3.LastUsedAt = imageState3.LastUsedAt.Add(-99993 * time.Hour) + + imageState1.PulledAt = imageState1.PulledAt.Add(-20 * time.Minute) + imageState2.PulledAt = imageState2.PulledAt.Add(-19 * time.Minute) + imageState3.PulledAt = imageState3.PulledAt.Add(-18 * time.Minute) + + go discardEvents(stateChangeEvents) + // Wait for task to be stopped + waitForTaskStoppedByCheckStatus(task1) + waitForTaskStoppedByCheckStatus(task2) + waitForTaskStoppedByCheckStatus(task3) + + task1.SetSentStatus(apitaskstatus.TaskStopped) + task2.SetSentStatus(apitaskstatus.TaskStopped) + task3.SetSentStatus(apitaskstatus.TaskStopped) + + // Allow Task cleanup to occur + time.Sleep(5 * time.Second) + + err = verifyTaskIsCleanedUp("task1", taskEngine) + assert.NoError(t, err, "task1") + err = verifyTaskIsCleanedUp("task2", taskEngine) + assert.NoError(t, err, "task2") + err = verifyTaskIsCleanedUp("task3", taskEngine) + assert.NoError(t, err, "task3") + + imageManager.removeUnusedImages(ctx) + + // Verify all the three images are removed from image manager + err = verifyImagesAreRemoved(imageManager, imageID1, imageID2, imageID3) + require.NoError(t, err) + + // Verify images are removed by docker + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageID1) + assert.True(t, client.IsErrNotFound(err), "Image was not removed successfully, image: %s", imageID1) + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageID2) + assert.True(t, client.IsErrNotFound(err), "Image was not removed successfully, image: %s", imageID2) + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageID3) + assert.True(t, client.IsErrNotFound(err), "Image was not removed successfully, image: %s", imageID3) +} + +// TestImageWithSameIDAndDifferentNames tests images can be correctly removed if +// tasks are running with the same image id but different image name +func TestImageWithSameIDAndDifferentNames(t *testing.T) { + cfg := defaultTestConfigIntegTest() + cfg.TaskCleanupWaitDuration = 1 * time.Second + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Set low values so this test can complete in a sane amout of time + cfg.MinimumImageDeletionAge = 15 * time.Minute + + taskEngine, done, _ := setup(cfg, nil, t) + defer done() + + dockerClient := taskEngine.(*DockerTaskEngine).client + + // DockerClient doesn't implement TagImage, so create a go docker client + sdkDockerClient, err := client.NewClientWithOpts(client.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating docker client failed") + + imageManager := taskEngine.(*DockerTaskEngine).imageManager.(*dockerImageManager) + imageManager.SetDataClient(data.NewNoopClient()) + + stateChangeEvents := taskEngine.StateChangeEvents() + + // Start three tasks which using the image with same ID and different Name + task1 := createTestTask("task1") + task2 := createTestTask("task2") + task3 := createTestTask("task3") + task1.Containers[0].Image = "testimagewithsameidanddifferentnames-1:latest" + task2.Containers[0].Image = "testimagewithsameidanddifferentnames-2:latest" + task3.Containers[0].Image = "testimagewithsameidanddifferentnames-3:latest" + + // Pull the images needed for the test + if _, err = dockerClient.InspectImage(test4Image1Name); client.IsErrNotFound(err) { + metadata := dockerClient.PullImage(ctx, test4Image1Name, nil, defaultTestConfigIntegTest().ImagePullTimeout) + assert.NoError(t, metadata.Error, "Failed to pull image %s", test4Image1Name) + } + + // Using testImage1Name for all the tasks but with different name + err = renameImage(test4Image1Name, task1.Containers[0].Image, sdkDockerClient) + require.NoError(t, err, "Renaming image failed") + + // Start and wait for task1 to be running + go taskEngine.AddTask(task1) + err = verifyTaskIsRunning(stateChangeEvents, task1) + require.NoError(t, err) + + imageState1, ok := imageManager.GetImageStateFromImageName(task1.Containers[0].Image) + require.True(t, ok, "Could not find image state for %s", task1.Containers[0].Image) + t.Logf("Found image state for %s", task1.Containers[0].Image) + imageID1 := imageState1.Image.ImageID + + // copy the image for task2 to run with same image but different name + err = sdkDockerClient.ImageTag(ctx, task1.Containers[0].Image, task2.Containers[0].Image) + require.NoError(t, err, "Trying to copy image failed") + + // Start and wait for task2 to be running + go taskEngine.AddTask(task2) + err = verifyTaskIsRunning(stateChangeEvents, task2) + require.NoError(t, err) + + imageState2, ok := imageManager.GetImageStateFromImageName(task2.Containers[0].Image) + require.True(t, ok, "Could not find image state for %s", task2.Containers[0].Image) + t.Logf("Found image state for %s", task2.Containers[0].Image) + imageID2 := imageState2.Image.ImageID + require.Equal(t, imageID2, imageID1, "The image id in task2 should be same as in task1") + + // make task3 use the same image name but different image id + err = sdkDockerClient.ImageTag(ctx, task1.Containers[0].Image, task3.Containers[0].Image) + require.NoError(t, err, "Trying to copy image failed") + + // Start and wait for task3 to be running + go taskEngine.AddTask(task3) + err = verifyTaskIsRunning(stateChangeEvents, task3) + assert.NoError(t, err) + + imageState3, ok := imageManager.GetImageStateFromImageName(task3.Containers[0].Image) + require.True(t, ok, "Could not find image state for %s", task3.Containers[0].Image) + t.Logf("Found image state for %s", task3.Containers[0].Image) + imageID3 := imageState3.Image.ImageID + require.Equal(t, imageID3, imageID1, "The image id in task3 should be the same as in task1") + + // Modify the image state so that the image is eligible for deletion + // all the three tasks has the same imagestate + imageState1.LastUsedAt = imageState1.LastUsedAt.Add(-99995 * time.Hour) + imageState1.PulledAt = imageState1.PulledAt.Add(-20 * time.Minute) + + go discardEvents(stateChangeEvents) + // Wait for the Task to be stopped + waitForTaskStoppedByCheckStatus(task1) + waitForTaskStoppedByCheckStatus(task2) + waitForTaskStoppedByCheckStatus(task3) + + task1.SetSentStatus(apitaskstatus.TaskStopped) + task2.SetSentStatus(apitaskstatus.TaskStopped) + task3.SetSentStatus(apitaskstatus.TaskStopped) + + // Allow Task cleanup to occur + time.Sleep(5 * time.Second) + + err = verifyTaskIsCleanedUp("task1", taskEngine) + assert.NoError(t, err, "task1") + err = verifyTaskIsCleanedUp("task2", taskEngine) + assert.NoError(t, err, "task2") + err = verifyTaskIsCleanedUp("task3", taskEngine) + assert.NoError(t, err, "task3") + + imageManager.removeUnusedImages(ctx) + + // Verify all the images are removed from image manager + err = verifyImagesAreRemoved(imageManager, imageID1) + assert.NoError(t, err, "imageID1") + + // Verify images are removed by docker + _, err = taskEngine.(*DockerTaskEngine).client.InspectImage(imageID1) + assert.True(t, client.IsErrNotFound(err), "Image was not removed successfully") +} + +// renameImage retag the image with the target tag and delete the source tag +func renameImage(source string, target string, client *client.Client) error { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + err := client.ImageTag(ctx, source, target) + if err != nil { + return fmt.Errorf("Trying to tag image failed, err: %v", err) + } + + // delete the source tag + _, err = client.ImageRemove(ctx, source, types.ImageRemoveOptions{}) + if err != nil { + return fmt.Errorf("Failed to remove the source tag of the image: %s", source) + } + + return nil +} + +func createImageCleanupHappyTestTask(taskName string) *apitask.Task { + return &apitask.Task{ + Arn: taskName, + Family: taskName, + Version: "1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{ + { + Name: "test1", + Image: test1Image1Name, + Essential: false, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 512, + Memory: 256, + }, + { + Name: "test2", + Image: test1Image2Name, + Essential: false, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 512, + Memory: 256, + }, + { + Name: "test3", + Image: test1Image3Name, + Essential: false, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 512, + Memory: 256, + }, + }, + } +} + +func createImageCleanupThresholdTestTask(taskName string) *apitask.Task { + return &apitask.Task{ + Arn: taskName, + Family: taskName, + Version: "1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{ + { + Name: "test1", + Image: test2Image1Name, + Essential: false, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 512, + Memory: 256, + }, + { + Name: "test2", + Image: test2Image2Name, + Essential: false, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 512, + Memory: 256, + }, + { + Name: "test3", + Image: test2Image3Name, + Essential: false, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 512, + Memory: 256, + }, + }, + } +} + +func verifyTaskIsCleanedUp(taskName string, taskEngine TaskEngine) error { + for i := 0; i < taskCleanupTimeoutSeconds; i++ { + _, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(taskName) + if !ok { + break + } + time.Sleep(1 * time.Second) + if i == (taskCleanupTimeoutSeconds - 1) { + return errors.New("Expected Task to have been swept but was not") + } + } + return nil +} + +func verifyImagesAreRemoved(imageManager *dockerImageManager, imageIDs ...string) error { + imagesNotRemovedList := []string{} + for _, imageID := range imageIDs { + imageState, ok := imageManager.getImageState(imageID) + if ok { + imagesNotRemovedList = append(imagesNotRemovedList, imageState.Image.String()) + } + } + if len(imagesNotRemovedList) > 0 { + return fmt.Errorf("Image states still exist for: %s", imagesNotRemovedList) + } + return nil +} + +func verifyImagesAreNotRemoved(imageManager *dockerImageManager, imageIDs ...string) error { + imagesRemovedList := []string{} + for _, imageID := range imageIDs { + imageState, ok := imageManager.getImageState(imageID) + if !ok { + imagesRemovedList = append(imagesRemovedList, imageState.Image.String()) + } + } + if len(imagesRemovedList) > 0 { + return fmt.Errorf("Could not find images: %s in ImageManager", imagesRemovedList) + } + return nil +} + +func cleanupImagesHappy(imageManager *dockerImageManager) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.client.RemoveContainer(ctx, "test1", dockerclient.RemoveContainerTimeout) + imageManager.client.RemoveContainer(ctx, "test2", dockerclient.RemoveContainerTimeout) + imageManager.client.RemoveContainer(ctx, "test3", dockerclient.RemoveContainerTimeout) + imageManager.client.RemoveImage(ctx, test1Image1Name, imageRemovalTimeout) + imageManager.client.RemoveImage(ctx, test1Image2Name, imageRemovalTimeout) + imageManager.client.RemoveImage(ctx, test1Image3Name, imageRemovalTimeout) +} + +func cleanupImagesThreshold(imageManager *dockerImageManager) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.client.RemoveContainer(ctx, "test1", dockerclient.RemoveContainerTimeout) + imageManager.client.RemoveContainer(ctx, "test2", dockerclient.RemoveContainerTimeout) + imageManager.client.RemoveContainer(ctx, "test3", dockerclient.RemoveContainerTimeout) + imageManager.client.RemoveImage(ctx, test2Image1Name, imageRemovalTimeout) + imageManager.client.RemoveImage(ctx, test2Image2Name, imageRemovalTimeout) + imageManager.client.RemoveImage(ctx, test2Image3Name, imageRemovalTimeout) +} diff --git a/agent/engine/docker_image_manager_test.go b/agent/engine/docker_image_manager_test.go new file mode 100644 index 00000000000..945ab44ba7d --- /dev/null +++ b/agent/engine/docker_image_manager_test.go @@ -0,0 +1,1975 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "errors" + "reflect" + "sync" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func defaultTestConfig() *config.Config { + cfg, _ := config.NewConfig(ec2.NewBlackholeEC2MetadataClient()) + return cfg +} + +func TestNewImageManagerExcludesCachedImages(t *testing.T) { + cfg := defaultTestConfig() + cfg.PauseContainerImageName = "pause-name" + cfg.PauseContainerTag = "pause-tag" + cfg.ImageCleanupExclusionList = []string{"excluded:1"} + expected := []string{ + "excluded:1", + "pause-name:pause-tag", + config.DefaultPauseContainerImageName + ":" + config.DefaultPauseContainerTag, + config.CachedImageNameAgentContainer, + } + imageManager := NewImageManager(cfg, nil, nil) + dockerImageManager, ok := imageManager.(*dockerImageManager) + require.True(t, ok, "imageManager must be *dockerImageManager") + assert.ElementsMatch(t, expected, dockerImageManager.imageCleanupExclusionList) +} + +// TestImagePullRemoveDeadlock tests if there's a deadlock when trying to +// pull an image while image clean up is in progress +func TestImagePullRemoveDeadlock(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + cfg := defaultTestConfig() + imageManager := NewImageManager(cfg, client, dockerstate.NewTaskEngineState()) + imageManager.SetDataClient(data.NewNoopClient()) + + sleepContainer := &apicontainer.Container{ + Name: "sleep", + Image: "busybox", + } + sleepContainerImageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty"}, + } + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + + // Cause a fake delay when recording container reference so that the + // race condition between ImagePullLock and updateLock gets exercised + // If updateLock precedes ImagePullLock, it can cause a deadlock + client.EXPECT().InspectImage(sleepContainer.Image).Do(func(image string) { + time.Sleep(time.Second) + }).Return(sleepContainerImageInspected, nil) + + var wg sync.WaitGroup + wg.Add(2) + go func() { + ImagePullDeleteLock.Lock() + defer ImagePullDeleteLock.Unlock() + err := imageManager.RecordContainerReference(sleepContainer) + assert.NoError(t, err) + wg.Done() + }() + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + go func() { + imageManager.(*dockerImageManager).removeUnusedImages(ctx) + wg.Done() + }() + wg.Wait() +} + +func TestAddAndRemoveContainerToImageStateReferenceHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := NewImageManager(defaultTestConfig(), client, dockerstate.NewTaskEngineState()) + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImageState := &image.ImageState{ + Image: sourceImage, + PulledAt: time.Now().AddDate(0, -2, 0), + } + sourceImageState.AddImageName(container.Image) + imageManager.(*dockerImageManager).addImageState(sourceImageState) + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil) + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + imageState, ok := imageManager.(*dockerImageManager).getImageState(imageInspected.ID) + if !ok { + t.Error("Error in retrieving existing Image State for the Container") + } + if !reflect.DeepEqual(sourceImageState, imageState) { + t.Error("Mismatch between added and retrieved image state") + } + err = imageManager.RemoveContainerReferenceFromImageState(container) + if err != nil { + t.Error("Error removing container reference from image state") + } + imageState, _ = imageManager.(*dockerImageManager).getImageState(imageInspected.ID) + if len(imageState.Containers) != 0 { + t.Error("Error removing container reference from image state") + } +} + +func TestRecordContainerReferenceInspectError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + dataClient: data.NewNoopClient(), + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImageState := &image.ImageState{ + Image: sourceImage, + PulledAt: time.Now(), + } + sourceImageState.AddImageName(container.Image) + imageManager.addImageState(sourceImageState) + client.EXPECT().InspectImage(container.Image).Return(nil, errors.New("error inspecting")).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err == nil { + t.Error("Expected error in inspecting image while adding container to image state") + } +} + +func TestRecordContainerReferenceWithNoImageName(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImageState := &image.ImageState{ + Image: sourceImage, + PulledAt: time.Now(), + } + imageManager.addImageState(sourceImageState) + + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + imageState, ok := imageManager.getImageState(imageInspected.ID) + if !ok { + t.Error("Error in retrieving existing Image State for the Container") + } + for _, imageName := range imageState.Image.Names { + if imageName != container.Image { + t.Error("Error while adding image name to image state") + } + } +} + +func TestAddInvalidContainerReferenceToImageState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := NewImageManager(defaultTestConfig(), client, dockerstate.NewTaskEngineState()) + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Image: "", + } + err := imageManager.RecordContainerReference(container) + if err == nil { + t.Error("Expected error adding container reference with no image name to image state") + } +} + +func TestAddContainerReferenceToExistingImageState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + imageID := "sha256:qwerty" + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + ImageID: imageID, + } + sourceImage := &image.Image{ + ImageID: imageID, + } + sourceImageState := &image.ImageState{ + Image: sourceImage, + } + sourceImage1 := &image.Image{ + ImageID: "sha256:asdfg", + } + sourceImageState1 := &image.ImageState{ + Image: sourceImage1, + } + sourceImageState1.AddImageName("testContainerImage") + imageManager.addImageState(sourceImageState) + imageManager.addImageState(sourceImageState1) + if !imageManager.addContainerReferenceToExistingImageState(container) { + t.Error("Error in adding container to an already existing image state") + } + if !reflect.DeepEqual(sourceImageState.Containers[0], container) { + t.Error("Incorrect container added to an already existing image state") + } + if len(sourceImageState1.Image.Names) != 0 { + t.Error("Error removing existing image name of different ID") + } +} + +func TestFetchRepoDigest(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + + testCases := []struct { + container *apicontainer.Container + imageInspected *types.ImageInspect + }{ + { + container: &apicontainer.Container{ + Name: "testContainer1", + Image: "repo1", + ImageDigest: "", + }, + imageInspected: &types.ImageInspect{ + RepoDigests: []string{"repo1@digest1", "repo2@digest2", "repo3@digest3"}, + }, + }, + { + container: &apicontainer.Container{ + Name: "testContainer2", + Image: "repo1:latest", + ImageDigest: "", + }, + imageInspected: &types.ImageInspect{ + RepoDigests: []string{"repo1@digest1", "repo2@digest2", "repo3@digest3"}, + }, + }, + { + container: &apicontainer.Container{ + Name: "testContainer3", + Image: "repo1@sha256:12345", + ImageDigest: "", + }, + imageInspected: &types.ImageInspect{ + RepoDigests: []string{"repo1@sha256:12345", "repo2@digest2", "repo3"}, + }, + }, + { + container: &apicontainer.Container{ + Name: "testContainer4", + Image: "mysql123", + ImageDigest: "", + }, + imageInspected: &types.ImageInspect{ + RepoDigests: []string{}, + }, + }, + { + container: &apicontainer.Container{ + Name: "testContainer5", + Image: "mysql", + ImageDigest: "", + }, + imageInspected: &types.ImageInspect{ + RepoDigests: []string{"mysql", "repo2@digest2"}, + }, + }, + { + container: &apicontainer.Container{ + Name: "testContainer6", + Image: "123456781234.dkr.ecr.us-west-2.amazonaws.com/test-rci@sha256:d1c14fcf2e9476ed58ebc4251b211f403f271e96b6c3d9ada0f1c5454ca4d230", + ImageDigest: "sha256:d1c14fcf2e9476ed58ebc4251b211f403f271e96b6c3d9ada0f1c5454ca4d230", + RegistryAuthentication: &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + RegistryID: "123456781234", + }, + }, + }, + imageInspected: &types.ImageInspect{ + RepoDigests: []string{"repo1@digest1", "repo2@digest2", "123456781234.dkr.ecr.us-west-2.amazonaws.com/test-rci@sha256:d1c14fcf2e9476ed58ebc4251b211f403f271e96b6c3d9ada0f1c5454ca4d230"}, + }, + }, + { + container: &apicontainer.Container{ + Name: "testContainer7", + Image: "123456781234.dkr.ecr.us-west-2.amazonaws.com/ubuntu:trusty", + ImageDigest: "sha256:2feffff9eeca4e736f9f8e57813a97fe930554f474f7795ffa5a9261adeaaf44", + RegistryAuthentication: &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + RegistryID: "123456781234", + }, + }, + }, + imageInspected: &types.ImageInspect{ + RepoDigests: []string{"repo1@digest1", "repo2@digest2", "", "123456781234.dkr.ecr.us-west-2.amazonaws.com/ubuntu@sha256:2feffff9eeca4e736f9f8e57813a97fe930554f474f7795ffa5a9261adeaaf44"}, + }, + }, + } + + for i, test := range testCases { + resultRepoDigest := imageManager.fetchRepoDigest(test.imageInspected, test.container) + assert.Equal(t, test.container.ImageDigest, resultRepoDigest, "incorrect repoDigest", i) + } +} + +func TestAddContainerReferenceToExistingImageStateNoState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + ImageID: "sha256:qwerty", + } + if imageManager.addContainerReferenceToExistingImageState(container) { + t.Error("Error adding container to an incorrect existing image state") + } +} + +func TestAddContainerReferenceToNewImageState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + imageID := "sha256:qwerty" + var imageSize int64 + imageSize = 18767 + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + ImageID: imageID, + } + imageManager.addContainerReferenceToNewImageState(container, imageSize) + _, ok := imageManager.getImageState(imageID) + if !ok { + t.Error("Error adding container reference to new image state") + } +} + +func TestAddContainerReferenceToNewImageStateAddedState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + imageID := "sha256:qwerty" + var imageSize int64 + imageSize = 18767 + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + ImageID: imageID, + } + sourceImage := &image.Image{ + ImageID: imageID, + } + sourceImageState := &image.ImageState{ + Image: sourceImage, + } + sourceImage1 := &image.Image{ + ImageID: "sha256:asdfg", + } + sourceImageState1 := &image.ImageState{ + Image: sourceImage1, + } + sourceImageState1.AddImageName("testContainerImage") + imageManager.addImageState(sourceImageState) + imageManager.addImageState(sourceImageState1) + imageManager.addContainerReferenceToNewImageState(container, imageSize) + if !reflect.DeepEqual(sourceImageState.Containers[0], container) { + t.Error("Incorrect container added to an already existing image state") + } + if len(sourceImageState1.Image.Names) != 0 { + t.Error("Error removing existing image name of different ID") + } +} + +func TestRemoveContainerReferenceFromInvalidImageState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := NewImageManager(defaultTestConfig(), client, dockerstate.NewTaskEngineState()) + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Image: "myContainerImage", + } + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RemoveContainerReferenceFromImageState(container) + if err == nil { + t.Error("Expected error while adding container to an invalid image state") + } +} + +func TestRemoveInvalidContainerReferenceFromImageState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := NewImageManager(defaultTestConfig(), client, dockerstate.NewTaskEngineState()) + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Image: "", + } + err := imageManager.RemoveContainerReferenceFromImageState(container) + if err == nil { + t.Error("Expected error removing container reference with no image name from image state") + } +} + +func TestRemoveContainerReferenceFromImageStateInspectError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := NewImageManager(defaultTestConfig(), client, dockerstate.NewTaskEngineState()) + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Image: "myContainerImage", + } + client.EXPECT().InspectImage(container.Image).Return(nil, errors.New("error inspecting")).AnyTimes() + err := imageManager.RemoveContainerReferenceFromImageState(container) + if err == nil { + t.Error("Expected error in inspecting image while adding container to image state") + } +} + +func TestRemoveContainerReferenceFromImageStateWithNoReference(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImageState := &image.ImageState{ + Image: sourceImage, + PulledAt: time.Now(), + } + imageManager.addImageState(sourceImageState) + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RemoveContainerReferenceFromImageState(container) + if err == nil { + t.Error("Expected error removing non-existing container reference from image state") + } +} + +func TestGetCandidateImagesForDeletionImageNoImageState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + + imageStates := imageManager.getCandidateImagesForDeletion() + + if imageStates != nil { + t.Error("Expected no image state to be returned for deletion") + } +} + +func TestGetCandidateImagesForDeletionImageJustPulled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + imageManager.SetDataClient(data.NewNoopClient()) + + sourceImage := &image.Image{} + sourceImageState := &image.ImageState{ + Image: sourceImage, + PulledAt: time.Now(), + } + imageManager.addImageState(sourceImageState) + imageStates := imageManager.getCandidateImagesForDeletion() + if len(imageStates) > 0 { + t.Error("Expected no image state to be returned for deletion") + } +} + +func TestGetCandidateImagesForDeletionImageHasContainerReference(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImage.Names = append(sourceImage.Names, container.Image) + sourceImageState := &image.ImageState{ + Image: sourceImage, + PulledAt: time.Now().AddDate(0, -2, 0), + } + imageManager.addImageState(sourceImageState) + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + imageStates := imageManager.getCandidateImagesForDeletion() + if len(imageStates) > 0 { + t.Error("Expected no image state to be returned for deletion") + } +} + +func TestGetCandidateImagesForDeletionImageHasMoreContainerReferences(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + container2 := &apicontainer.Container{ + Name: "testContainer2", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImage.Names = append(sourceImage.Names, container.Image) + sourceImageState := &image.ImageState{ + Image: sourceImage, + PulledAt: time.Now().AddDate(0, -2, 0), + } + imageManager.addImageState(sourceImageState) + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err = imageManager.RecordContainerReference(container2) + if err != nil { + t.Error("Error in adding container2 to an existing image state") + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err = imageManager.RemoveContainerReferenceFromImageState(container) + if err != nil { + t.Error("Error removing container reference from image state") + } + imageStates := imageManager.getCandidateImagesForDeletion() + if len(imageStates) > 0 { + t.Error("Expected no image state to be returned for deletion") + } +} + +func TestImageCleanupExclusionListWithSingleName(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + sourceImageA := &image.Image{ + ImageID: "sha256:qwerty1", + Names: []string{"a"}, + } + sourceImageB := &image.Image{ + ImageID: "sha256:qwerty2", + Names: []string{"b"}, + } + sourceImageC := &image.Image{ + ImageID: "sha256:qwerty3", + Names: []string{"c"}, + } + ImageStateA := &image.ImageState{ + Image: sourceImageA, + PulledAt: time.Now().AddDate(0, -2, 0), + } + ImageStateB := &image.ImageState{ + Image: sourceImageB, + PulledAt: time.Now().AddDate(0, -2, 0), + } + ImageStateC := &image.ImageState{ + Image: sourceImageC, + PulledAt: time.Now().AddDate(0, -2, 0), + } + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + imageCleanupExclusionList: []string{"a", "c"}, + imageStatesConsideredForDeletion: map[string]*image.ImageState{ + "sha256:qwerty2": ImageStateB, + }, + } + listImagesResponse := dockerapi.ListImagesResponse{ + RepoTags: []string{"b"}, + } + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + var testImageStates = []*image.ImageState{ImageStateA, ImageStateB, ImageStateC} + var testResult = imageManager.imagesConsiderForDeletion(testImageStates) + assert.Equal(t, 1, len(testResult), "Expected 1 image state to be returned for deletion") + if !reflect.DeepEqual(imageManager.imageStatesConsideredForDeletion, testResult) { + t.Error("Incorrect image return from getCandidateImagesForDeletionHelper function") + } +} + +func TestImageCleanupExclusionListWithMultipleNames(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + sourceImageA := &image.Image{ + ImageID: "sha256:qwerty1", + Names: []string{"a", "b", "c"}, + } + sourceImageB := &image.Image{ + ImageID: "sha256:qwerty2", + Names: []string{"d", "e", "f"}, + } + sourceImageC := &image.Image{ + ImageID: "sha256:qwerty3", + Names: []string{"g", "h", "i"}, + } + sourceImageD := &image.Image{ + ImageID: "sha256:qwerty4", + Names: []string{"x", "y", "z"}, + } + ImageStateA := &image.ImageState{ + Image: sourceImageA, + PulledAt: time.Now().AddDate(0, -2, 0), + } + ImageStateB := &image.ImageState{ + Image: sourceImageB, + PulledAt: time.Now().AddDate(0, -2, 0), + } + ImageStateC := &image.ImageState{ + Image: sourceImageC, + PulledAt: time.Now().AddDate(0, -2, 0), + } + ImageStateD := &image.ImageState{ + Image: sourceImageD, + PulledAt: time.Now().AddDate(0, -2, 0), + } + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + imageCleanupExclusionList: []string{"a", "d", "g"}, + imageStatesConsideredForDeletion: map[string]*image.ImageState{ + "sha256:qwerty4": ImageStateD, + }, + } + listImagesResponse := dockerapi.ListImagesResponse{ + RepoTags: []string{"x", "y", "z"}, + } + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + var testImageStates = []*image.ImageState{ImageStateA, ImageStateB, ImageStateC, ImageStateD} + var testResult = imageManager.imagesConsiderForDeletion(testImageStates) + assert.Equal(t, 1, len(testResult), "Expected 1 image state to be returned for deletion") + if !reflect.DeepEqual(imageManager.imageStatesConsideredForDeletion, testResult) { + t.Error("Incorrect image return from getCandidateImagesForDeletionHelper function") + } +} + +func TestGetLeastRecentlyUsedImages(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := NewImageManager(defaultTestConfig(), client, dockerstate.NewTaskEngineState()) + + imageStateA := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -5, 0), + } + imageStateB := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -3, 0), + } + imageStateC := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -2, 0), + } + imageStateD := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -6, 0), + } + imageStateE := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -4, 0), + } + imageStateF := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -1, 0), + } + + candidateImagesForDeletion := []*image.ImageState{ + imageStateA, imageStateB, imageStateC, imageStateD, imageStateE, imageStateF, + } + expectedLeastRecentlyUsedImages := []*image.ImageState{ + imageStateD, imageStateA, imageStateE, imageStateB, imageStateC, + } + leastRecentlyUsedImage := imageManager.(*dockerImageManager).getLeastRecentlyUsedImage(candidateImagesForDeletion) + if !reflect.DeepEqual(leastRecentlyUsedImage, expectedLeastRecentlyUsedImages[0]) { + t.Error("Incorrect order of least recently used images") + } +} + +func TestGetLeastRecentlyUsedImagesLessThanFive(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + + imageStateA := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -5, 0), + } + imageStateB := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -3, 0), + } + imageStateC := &image.ImageState{ + LastUsedAt: time.Now().AddDate(0, -2, 0), + } + candidateImagesForDeletion := []*image.ImageState{ + imageStateA, imageStateB, imageStateC, + } + expectedLeastRecentlyUsedImages := []*image.ImageState{ + imageStateA, imageStateB, imageStateC, + } + leastRecentlyUsedImage := imageManager.getLeastRecentlyUsedImage(candidateImagesForDeletion) + if !reflect.DeepEqual(leastRecentlyUsedImage, expectedLeastRecentlyUsedImages[0]) { + t.Error("Incorrect order of least recently used images") + } +} + +func TestRemoveAlreadyExistingImageNameWithDifferentID(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImage.Names = append(sourceImage.Names, container.Image) + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil) + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + container1 := &apicontainer.Container{ + Name: "testContainer1", + Image: "testContainerImage", + } + imageInspected1 := &types.ImageInspect{ + ID: "sha256:asdfg", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected1, nil) + err = imageManager.RecordContainerReference(container1) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + imageState, ok := imageManager.getImageState(imageInspected.ID) + if !ok { + t.Error("Error in retrieving existing Image State for the Container") + } + if len(imageState.Image.Names) != 0 { + t.Error("Error in removing already existing image name with different ID") + } +} + +func TestImageCleanupHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: 1 * time.Millisecond, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty"}, + } + + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + + err = imageManager.RemoveContainerReferenceFromImageState(container) + if err != nil { + t.Error("Error removing container reference from image state") + } + + imageState, _ := imageManager.getImageState(imageInspected.ID) + imageState.PulledAt = time.Now().AddDate(0, -2, 0) + imageState.LastUsedAt = time.Now().AddDate(0, -2, 0) + imageState.AddImageName("anotherImage") + + client.EXPECT().RemoveImage(gomock.Any(), container.Image, dockerclient.RemoveImageTimeout).Return(nil) + client.EXPECT().RemoveImage(gomock.Any(), "anotherImage", dockerclient.RemoveImageTimeout).Return(nil) + parent := context.Background() + ctx, cancel := context.WithCancel(parent) + go imageManager.performPeriodicImageCleanup(ctx, 2*time.Millisecond) + time.Sleep(1 * time.Second) + cancel() + if imageState.GetImageNamesCount() != 0 { + t.Error("Error removing image name from state after the image is removed") + } + if imageManager.GetImageStatesCount() != 0 { + t.Error("Error removing image state after the image is removed") + } +} + +func TestImageCleanupCannotRemoveImage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImage.Names = append(sourceImage.Names, container.Image) + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty"}, + } + + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + + err = imageManager.RemoveContainerReferenceFromImageState(container) + if err != nil { + t.Error("Error removing container reference from image state") + } + + imageState, _ := imageManager.getImageState(imageInspected.ID) + imageState.PulledAt = time.Now().AddDate(0, -2, 0) + imageState.LastUsedAt = time.Now().AddDate(0, -2, 0) + + client.EXPECT().RemoveImage(gomock.Any(), container.Image, dockerclient.RemoveImageTimeout).Return( + errors.New("error removing image")).AnyTimes() + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.removeUnusedImages(ctx) + if len(imageState.Image.Names) == 0 { + t.Error("Error: image name should not be removed") + } + if len(imageManager.imageStates) == 0 { + t.Error("Error: image state should not be removed") + } +} + +func TestImageCleanupRemoveImageById(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImage.Names = append(sourceImage.Names, container.Image) + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty"}, + } + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + + err = imageManager.RemoveContainerReferenceFromImageState(container) + if err != nil { + t.Error("Error removing container reference from image state") + } + + imageState, _ := imageManager.getImageState(imageInspected.ID) + imageState.RemoveImageName(container.Image) + imageState.PulledAt = time.Now().AddDate(0, -2, 0) + imageState.LastUsedAt = time.Now().AddDate(0, -2, 0) + + client.EXPECT().RemoveImage(gomock.Any(), sourceImage.ImageID, dockerclient.RemoveImageTimeout).Return(nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.removeUnusedImages(ctx) + if len(imageManager.imageStates) != 0 { + t.Error("Error removing image state after the image is removed") + } +} + +func TestNonECSImageAndContainersCleanupRemoveImage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "exited", + FinishedAt: time.Now().AddDate(0, -2, 0).Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), dockerclient.RemoveContainerTimeout).Return(nil).Times(1) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) + + assert.Len(t, listContainersResponse.DockerIDs, 1, "Error removing container IDs") + assert.Len(t, inspectContainerResponse.ID, 1, "Error inspecting containers ids") + assert.Len(t, imageManager.imageStates, 0, "Error removing image state after the image is removed") +} + +func TestNonECSImageAndContainersCleanupRemoveImage_OneImageThreeTags(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "exited", + FinishedAt: time.Now().AddDate(0, -2, 0).Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + RepoTags: []string{"tester", "foo", "bar"}, + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester", "foo", "bar"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), dockerclient.RemoveContainerTimeout).Return(nil).Times(1) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.RepoTags[0], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.RepoTags[1], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.RepoTags[2], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(0) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) + + assert.Len(t, listContainersResponse.DockerIDs, 1, "Error removing container IDs") + assert.Len(t, inspectContainerResponse.ID, 1, "Error inspecting containers ids") + assert.Len(t, imageManager.imageStates, 0, "Error removing image state after the image is removed") +} + +func TestNonECSImageAndContainersCleanupRemoveImage_DontDeleteExcludedImage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + imageCleanupExclusionList: []string{"tester"}, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "exited", + FinishedAt: time.Now().AddDate(0, -2, 0).Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + RepoTags: []string{"tester"}, + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), dockerclient.RemoveContainerTimeout).Return(nil).Times(1) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(0) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) + + assert.Len(t, listContainersResponse.DockerIDs, 1, "Error removing container IDs") + assert.Len(t, inspectContainerResponse.ID, 1, "Error inspecting containers ids") + assert.Len(t, imageManager.imageStates, 0, "Error removing image state after the image is removed") +} + +func TestNonECSImageAndContainerCleanupRemoveImage_DontDeleteNotOldEnoughImage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + nonECSMinimumAgeBeforeDeletion: time.Hour * 100, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "exited", + FinishedAt: time.Now().AddDate(0, -2, 0).Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + RepoTags: []string{"tester"}, + Created: time.Now().AddDate(0, 0, -1).Format(time.RFC3339), + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), dockerclient.RemoveContainerTimeout).Return(nil).Times(1) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) +} + +func TestNonECSImageAndContainerCleanupRemoveImage_DeleteOldEnoughImage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + nonECSMinimumAgeBeforeDeletion: time.Hour * 3, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "exited", + FinishedAt: time.Now().AddDate(0, -2, 0).Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + RepoTags: []string{"tester"}, + Created: time.Now().AddDate(0, 0, -1).Format(time.RFC3339), + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), dockerclient.RemoveContainerTimeout).Return(nil).Times(1) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) +} + +func TestNonECSImageAndContainersCleanupRemoveImage_DontDeleteECSImages(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + } + imageManager.SetDataClient(data.NewNoopClient()) + imageState := &image.ImageState{ + Image: &image.Image{ImageID: "sha256:qwerty1"}, + PulledAt: time.Now(), + } + imageManager.addImageState(imageState) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "exited", + FinishedAt: time.Now().AddDate(0, -2, 0).Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + RepoTags: []string{"tester"}, + ID: "sha256:qwerty1", + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), dockerclient.RemoveContainerTimeout).Return(nil).Times(1) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(0) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) + + assert.Len(t, listContainersResponse.DockerIDs, 1, "Error removing container IDs") + assert.Len(t, inspectContainerResponse.ID, 1, "Error inspecting containers ids") + assert.Len(t, imageManager.imageStates, 1, "there should still be an image state") +} + +// Dead containers should be cleaned up. +func TestNonECSImageAndContainers_RemoveDeadContainer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "dead", + FinishedAt: time.Now().AddDate(0, -2, 0).Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), dockerclient.RemoveContainerTimeout).Return(nil).Times(1) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) + + assert.Len(t, listContainersResponse.DockerIDs, 1, "Error removing container IDs") + assert.Len(t, inspectContainerResponse.ID, 1, "Error inspecting containers ids") + assert.Len(t, imageManager.imageStates, 0, "Error removing image state after the image is removed") +} + +// Old 'Created' containers should be cleaned up +func TestNonECSImageAndContainersCleanup_RemoveOldCreatedContainer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "created", + FinishedAt: time.Now().AddDate(0, -2, 0).Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), "1", gomock.Any()).Return(nil).Times(1) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) + + assert.Len(t, listContainersResponse.DockerIDs, 1, "Error removing container IDs") + assert.Len(t, inspectContainerResponse.ID, 1, "Error inspecting containers ids") + assert.Len(t, imageManager.imageStates, 0, "Error removing image state after the image is removed") +} + +func TestNonECSImageAndContainersCleanup_DontRemoveContainerWithInvalidFinishedTime(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "created", + FinishedAt: "Hello! I am an invalid timestamp!!", + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), "1", gomock.Any()).Return(nil).Times(0) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) + + assert.Len(t, listContainersResponse.DockerIDs, 1, "Error removing container IDs") + assert.Len(t, inspectContainerResponse.ID, 1, "Error inspecting containers ids") + assert.Len(t, imageManager.imageStates, 0, "Error removing image state after the image is removed") +} + +// New 'Created' containers should NOT be cleaned up. +func TestNonECSImageAndContainersCleanup_DoNotRemoveNewlyCreatedContainer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + numNonECSContainersToDelete: 10, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + deleteNonECSImagesEnabled: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + nonECSContainerCleanupWaitDuration: time.Hour * 3, + } + imageManager.SetDataClient(data.NewNoopClient()) + + listContainersResponse := dockerapi.ListContainersResponse{ + DockerIDs: []string{"1"}, + } + + inspectContainerResponse := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "1", + State: &types.ContainerState{ + Status: "created", + FinishedAt: time.Now().Format(time.RFC3339Nano), + }, + }, + } + + inspectImageResponse := &types.ImageInspect{ + Size: 4096, + } + + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty1"}, + RepoTags: []string{"tester"}, + } + + client.EXPECT().ListContainers(gomock.Any(), gomock.Any(), dockerclient.ListImagesTimeout).Return(listContainersResponse).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), dockerclient.InspectContainerTimeout).Return(inspectContainerResponse, nil).AnyTimes() + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), dockerclient.RemoveContainerTimeout).Return(nil).Times(0) + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(listImagesResponse.ImageIDs[0]).Return(inspectImageResponse, nil).Times(1) + client.EXPECT().RemoveImage(gomock.Any(), listImagesResponse.ImageIDs[0], dockerclient.RemoveImageTimeout).Return(nil).Times(1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + imageManager.removeUnusedImages(ctx) + + assert.Len(t, listContainersResponse.DockerIDs, 1, "Error removing container IDs") + assert.Len(t, inspectContainerResponse.ID, 1, "Error inspecting containers ids") + assert.Len(t, imageManager.imageStates, 0, "Error removing image state after the image is removed") +} + +func TestDeleteImage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + imageState, _ := imageManager.getImageState(imageInspected.ID) + client.EXPECT().RemoveImage(gomock.Any(), container.Image, dockerclient.RemoveImageTimeout).Return(nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.deleteImage(ctx, container.Image, imageState) + if len(imageState.Image.Names) != 0 { + t.Error("Error removing Image name from image state") + } + if len(imageManager.getAllImageStates()) != 0 { + t.Error("Error removing image state from image manager after deletion") + } +} + +// This test tests that we detect correctly in agent when the agent is trying to delete image that +// does not exist for older version of docker. +func TestDeleteImageNotFoundOldDockerMessageError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + imageState, _ := imageManager.getImageState(imageInspected.ID) + client.EXPECT().RemoveImage(gomock.Any(), container.Image, dockerclient.RemoveImageTimeout).Return( + errors.New("no such image")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.deleteImage(ctx, container.Image, imageState) + if len(imageState.Image.Names) != 0 { + t.Error("Error removing Image name from image state") + } + if len(imageManager.getAllImageStates()) != 0 { + t.Error("Error removing image state from image manager") + } +} + +func TestDeleteImageNotFoundError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + imageState, _ := imageManager.getImageState(imageInspected.ID) + client.EXPECT().RemoveImage(gomock.Any(), container.Image, dockerclient.RemoveImageTimeout).Return( + errors.New("No such image: " + container.Image)) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.deleteImage(ctx, container.Image, imageState) + if len(imageState.Image.Names) != 0 { + t.Error("Error removing Image name from image state") + } + if len(imageManager.getAllImageStates()) != 0 { + t.Error("Error removing image state from image manager") + } +} + +func TestDeleteImageOtherRemoveImageErrors(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + imageState, _ := imageManager.getImageState(imageInspected.ID) + client.EXPECT().RemoveImage(gomock.Any(), container.Image, dockerclient.RemoveImageTimeout).Return( + errors.New("container for this image exists")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.deleteImage(ctx, container.Image, imageState) + if len(imageState.Image.Names) == 0 { + t.Error("Incorrectly removed Image name from image state") + } + if len(imageManager.getAllImageStates()) == 0 { + t.Error("Incorrecting removed image state from image manager before deletion") + } +} + +func TestDeleteImageIDNull(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.deleteImage(ctx, "", nil) +} + +func TestRemoveLeastRecentlyUsedImageNoImage(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := imageManager.removeLeastRecentlyUsedImage(ctx) + if err == nil { + t.Error("Expected Error for no LRU image to remove") + } +} + +func TestRemoveUnusedImagesNoImages(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.removeUnusedImages(ctx) +} + +func TestGetImageStateFromImageName(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + _, ok := imageManager.GetImageStateFromImageName(container.Image) + if !ok { + t.Error("Error retrieving image state by image name") + } +} + +func TestGetImageStateFromImageNameNoImageState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + imageManager := &dockerImageManager{client: client, state: dockerstate.NewTaskEngineState()} + imageManager.SetDataClient(data.NewNoopClient()) + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + _, ok := imageManager.GetImageStateFromImageName("noSuchImage") + if ok { + t.Error("Incorrect image state retrieved by image name") + } +} + +// TestConcurrentRemoveUnusedImages checks for concurrent map writes +// in the imageManager +func TestConcurrentRemoveUnusedImages(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + imageManager := &dockerImageManager{ + client: client, + state: dockerstate.NewTaskEngineState(), + minimumAgeBeforeDeletion: config.DefaultImageDeletionAge, + numImagesToDelete: config.DefaultNumImagesToDeletePerCycle, + imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval, + } + imageManager.SetDataClient(data.NewNoopClient()) + + container := &apicontainer.Container{ + Name: "testContainer", + Image: "testContainerImage", + } + sourceImage := &image.Image{ + ImageID: "sha256:qwerty", + } + sourceImage.Names = append(sourceImage.Names, container.Image) + imageInspected := &types.ImageInspect{ + ID: "sha256:qwerty", + } + listImagesResponse := dockerapi.ListImagesResponse{ + ImageIDs: []string{"sha256:qwerty"}, + } + client.EXPECT().ListImages(gomock.Any(), dockerclient.ListImagesTimeout).Return(listImagesResponse).AnyTimes() + client.EXPECT().InspectImage(container.Image).Return(imageInspected, nil).AnyTimes() + err := imageManager.RecordContainerReference(container) + if err != nil { + t.Error("Error in adding container to an existing image state") + } + require.Equal(t, 1, len(imageManager.imageStates)) + + // Remove container reference from image state to trigger cleanup + err = imageManager.RemoveContainerReferenceFromImageState(container) + assert.NoError(t, err) + + imageState, _ := imageManager.getImageState(imageInspected.ID) + imageState.PulledAt = time.Now().AddDate(0, -2, 0) + imageState.LastUsedAt = time.Now().AddDate(0, -2, 0) + + client.EXPECT().RemoveImage(gomock.Any(), container.Image, dockerclient.RemoveImageTimeout).Return(nil) + require.Equal(t, 1, len(imageManager.imageStates)) + + // We create 1000 goroutines and then perform a channel close + // to simulate the concurrent map write problem + numRoutines := 1000 + var waitGroup sync.WaitGroup + waitGroup.Add(numRoutines) + + ok := make(chan bool) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + for i := 0; i < numRoutines; i++ { + go func() { + <-ok + imageManager.removeUnusedImages(ctx) + waitGroup.Done() + }() + } + + close(ok) + waitGroup.Wait() + require.Equal(t, 0, len(imageManager.imageStates)) +} + +func TestImageCleanupProcessNotStart(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + + cfg := defaultTestConfig() + cfg.ImagePullBehavior = config.ImagePullPreferCachedBehavior + imageManager := NewImageManager(cfg, client, dockerstate.NewTaskEngineState()) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + imageManager.StartImageCleanupProcess(ctx) + // Nothing should happen. +} diff --git a/agent/engine/docker_image_manager_unix_integ_test.go b/agent/engine/docker_image_manager_unix_integ_test.go new file mode 100644 index 00000000000..3cb5ed6d536 --- /dev/null +++ b/agent/engine/docker_image_manager_unix_integ_test.go @@ -0,0 +1,34 @@ +// +build !windows,integration +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// The DockerTaskEngine is an abstraction over the DockerGoClient so that +// it does not have to know about tasks, only containers + +package engine + +const ( + test1Image1Name = "127.0.0.1:51670/amazon/image-cleanup-test-image1:latest" + test1Image2Name = "127.0.0.1:51670/amazon/image-cleanup-test-image2:latest" + test1Image3Name = "127.0.0.1:51670/amazon/image-cleanup-test-image3:latest" + + test2Image1Name = "127.0.0.1:51670/amazon/image-cleanup-test-image1:latest" + test2Image2Name = "127.0.0.1:51670/amazon/image-cleanup-test-image2:latest" + test2Image3Name = "127.0.0.1:51670/amazon/image-cleanup-test-image3:latest" + + test3Image1Name = "127.0.0.1:51670/amazon/image-cleanup-test-image1:latest" + test3Image2Name = "127.0.0.1:51670/amazon/image-cleanup-test-image2:latest" + test3Image3Name = "127.0.0.1:51670/amazon/image-cleanup-test-image3:latest" + + test4Image1Name = "127.0.0.1:51670/amazon/image-cleanup-test-image1:latest" +) diff --git a/agent/engine/docker_image_manager_windows_integ_test.go b/agent/engine/docker_image_manager_windows_integ_test.go new file mode 100644 index 00000000000..2e00bdff307 --- /dev/null +++ b/agent/engine/docker_image_manager_windows_integ_test.go @@ -0,0 +1,37 @@ +// +build windows,integration +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// The DockerTaskEngine is an abstraction over the DockerGoClient so that +// it does not have to know about tasks, only containers + +package engine + +// These constants are a hack around being unable to run a local registry on +// Windows. We're unable to run a local registry due to the lack of loopback +// networking support in Windows Net NAT. +const ( + test1Image1Name = "amazon/image-cleanup-test-image1:make" + test1Image2Name = "amazon/image-cleanup-test-image2:make" + test1Image3Name = "amazon/image-cleanup-test-image3:make" + + test2Image1Name = "amazon/image-cleanup-test-image4:make" + test2Image2Name = "amazon/image-cleanup-test-image5:make" + test2Image3Name = "amazon/image-cleanup-test-image6:make" + + test3Image1Name = "amazon/image-cleanup-test-image7:make" + test3Image2Name = "amazon/image-cleanup-test-image8:make" + test3Image3Name = "amazon/image-cleanup-test-image9:make" + + test4Image1Name = "amazon/image-cleanup-test-image10:make" +) diff --git a/agent/engine/docker_task_engine.go b/agent/engine/docker_task_engine.go new file mode 100644 index 00000000000..57f604e1ad9 --- /dev/null +++ b/agent/engine/docker_task_engine.go @@ -0,0 +1,1686 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package engine contains the core logic for managing tasks +package engine + +import ( + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/metrics" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" + "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + dockercontainer "github.com/docker/docker/api/types/container" + + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +const ( + //DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint + DockerEndpointEnvVariable = "DOCKER_HOST" + // DockerDefaultEndpoint is the default value for the Docker endpoint + DockerDefaultEndpoint = "unix:///var/run/docker.sock" + labelPrefix = "com.amazonaws.ecs." + labelTaskARN = labelPrefix + "task-arn" + labelContainerName = labelPrefix + "container-name" + labelTaskDefinitionFamily = labelPrefix + "task-definition-family" + labelTaskDefinitionVersion = labelPrefix + "task-definition-version" + labelCluster = labelPrefix + "cluster" + cniSetupTimeout = 1 * time.Minute + cniCleanupTimeout = 30 * time.Second + minGetIPBridgeTimeout = time.Second + maxGetIPBridgeTimeout = 10 * time.Second + getIPBridgeRetryJitterMultiplier = 0.2 + getIPBridgeRetryDelayMultiplier = 2 + ipamCleanupTmeout = 5 * time.Second + minEngineConnectRetryDelay = 2 * time.Second + maxEngineConnectRetryDelay = 200 * time.Second + engineConnectRetryJitterMultiplier = 0.20 + engineConnectRetryDelayMultiplier = 1.5 + // logDriverTypeFirelens is the log driver type for containers that want to use the firelens container to send logs. + logDriverTypeFirelens = "awsfirelens" + logDriverTypeFluentd = "fluentd" + logDriverTag = "tag" + logDriverFluentdAddress = "fluentd-address" + dataLogDriverPath = "/data/firelens/" + logDriverAsyncConnect = "fluentd-async-connect" + logDriverSubSecondPrecision = "fluentd-sub-second-precision" + dataLogDriverSocketPath = "/socket/fluent.sock" + socketPathPrefix = "unix://" + + // fluentTagDockerFormat is the format for the log tag, which is "containerName-firelens-taskID" + fluentTagDockerFormat = "%s-firelens-%s" + + // Environment variables are needed for firelens + fluentNetworkHost = "FLUENT_HOST" + fluentNetworkPort = "FLUENT_PORT" + FluentNetworkPortValue = "24224" + FluentAWSVPCHostValue = "127.0.0.1" + + defaultMonitorExecAgentsInterval = 15 * time.Minute +) + +// DockerTaskEngine is a state machine for managing a task and its containers +// in ECS. +// +// DockerTaskEngine implements an abstraction over the DockerGoClient so that +// it does not have to know about tasks, only containers +// The DockerTaskEngine interacts with Docker to implement a TaskEngine +type DockerTaskEngine struct { + // implements TaskEngine + + cfg *config.Config + + ctx context.Context + initialized bool + mustInitLock sync.Mutex + + // state stores all tasks this task engine is aware of, including their + // current state and mappings to/from dockerId and name. + // This is used to checkpoint state to disk so tasks may survive agent + // failures or updates + state dockerstate.TaskEngineState + managedTasks map[string]*managedTask + + taskStopGroup *utilsync.SequentialWaitGroup + + events <-chan dockerapi.DockerContainerChangeEvent + stateChangeEvents chan statechange.Event + + client dockerapi.DockerClient + dataClient data.Client + cniClient ecscni.CNIClient + + containerChangeEventStream *eventstream.EventStream + + stopEngine context.CancelFunc + + // tasksLock is a mutex that the task engine must acquire before changing + // any task's state which it manages. Since this is a lock that encompasses + // all tasks, it must not acquire it for any significant duration + // The write mutex should be taken when adding and removing tasks from managedTasks. + tasksLock sync.RWMutex + + credentialsManager credentials.Manager + _time ttime.Time + _timeOnce sync.Once + imageManager ImageManager + containerStatusToTransitionFunction map[apicontainerstatus.ContainerStatus]transitionApplyFunc + metadataManager containermetadata.Manager + + // taskSteadyStatePollInterval is the duration that a managed task waits + // once the task gets into steady state before polling the state of all of + // the task's containers to re-evaluate if the task is still in steady state + // This is set to defaultTaskSteadyStatePollInterval in production code. + // This can be used by tests that are looking to ensure that the steady state + // verification logic gets executed to set it to a low interval + taskSteadyStatePollInterval time.Duration + taskSteadyStatePollIntervalJitter time.Duration + + resourceFields *taskresource.ResourceFields + + // handleDelay is a function used to delay cleanup. Implementation is + // swappable for testing + handleDelay func(duration time.Duration) + monitorExecAgentsTicker *time.Ticker + execCmdMgr execcmd.Manager + monitorExecAgentsInterval time.Duration +} + +// NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine. +// The distinction between created and initialized is that when created it may +// be serialized/deserialized, but it will not communicate with docker until it +// is also initialized. +func NewDockerTaskEngine(cfg *config.Config, + client dockerapi.DockerClient, + credentialsManager credentials.Manager, + containerChangeEventStream *eventstream.EventStream, + imageManager ImageManager, + state dockerstate.TaskEngineState, + metadataManager containermetadata.Manager, + resourceFields *taskresource.ResourceFields, + execCmdMgr execcmd.Manager) *DockerTaskEngine { + dockerTaskEngine := &DockerTaskEngine{ + cfg: cfg, + client: client, + dataClient: data.NewNoopClient(), + + state: state, + managedTasks: make(map[string]*managedTask), + taskStopGroup: utilsync.NewSequentialWaitGroup(), + stateChangeEvents: make(chan statechange.Event), + + credentialsManager: credentialsManager, + + containerChangeEventStream: containerChangeEventStream, + imageManager: imageManager, + cniClient: ecscni.NewClient(cfg.CNIPluginsPath), + + metadataManager: metadataManager, + taskSteadyStatePollInterval: defaultTaskSteadyStatePollInterval, + taskSteadyStatePollIntervalJitter: defaultTaskSteadyStatePollIntervalJitter, + resourceFields: resourceFields, + handleDelay: time.Sleep, + execCmdMgr: execCmdMgr, + monitorExecAgentsInterval: defaultMonitorExecAgentsInterval, + } + + dockerTaskEngine.initializeContainerStatusToTransitionFunction() + + return dockerTaskEngine +} + +func (engine *DockerTaskEngine) initializeContainerStatusToTransitionFunction() { + containerStatusToTransitionFunction := map[apicontainerstatus.ContainerStatus]transitionApplyFunc{ + apicontainerstatus.ContainerPulled: engine.pullContainer, + apicontainerstatus.ContainerCreated: engine.createContainer, + apicontainerstatus.ContainerRunning: engine.startContainer, + apicontainerstatus.ContainerResourcesProvisioned: engine.provisionContainerResources, + apicontainerstatus.ContainerStopped: engine.stopContainer, + } + engine.containerStatusToTransitionFunction = containerStatusToTransitionFunction +} + +// ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1 +// Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718) +// Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks. +var ImagePullDeleteLock sync.RWMutex + +// UnmarshalJSON restores a previously marshaled task-engine state from json +func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error { + return engine.state.UnmarshalJSON(data) +} + +// MarshalJSON marshals into state directly +func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) { + return engine.state.MarshalJSON() +} + +// Init initializes a DockerTaskEngine such that it may communicate with docker +// and operate normally. +// This function must be called before any other function, except serializing and deserializing, can succeed without error. +func (engine *DockerTaskEngine) Init(ctx context.Context) error { + derivedCtx, cancel := context.WithCancel(ctx) + engine.stopEngine = cancel + engine.ctx = derivedCtx + + // Open the event stream before we sync state so that e.g. if a container + // goes from running to stopped after we sync with it as "running" we still + // have the "went to stopped" event pending so we can be up to date. + err := engine.openEventstream(derivedCtx) + if err != nil { + return err + } + engine.synchronizeState() + // Now catch up and start processing new events per normal + go engine.handleDockerEvents(derivedCtx) + engine.initialized = true + go engine.startPeriodicExecAgentsMonitoring(derivedCtx) + return nil +} + +func (engine *DockerTaskEngine) startPeriodicExecAgentsMonitoring(ctx context.Context) { + engine.monitorExecAgentsTicker = time.NewTicker(engine.monitorExecAgentsInterval) + for { + select { + case <-engine.monitorExecAgentsTicker.C: + go engine.monitorExecAgentProcesses(ctx) + case <-ctx.Done(): + engine.monitorExecAgentsTicker.Stop() + return + } + } +} + +func (engine *DockerTaskEngine) monitorExecAgentProcesses(ctx context.Context) { + // TODO: [ecs-exec]add jitter between containers to not overload docker with top calls + engine.tasksLock.RLock() + defer engine.tasksLock.RUnlock() + for _, mTask := range engine.managedTasks { + task := mTask.Task + + if task.GetKnownStatus() != apitaskstatus.TaskRunning { + continue + } + for _, c := range task.Containers { + if execcmd.IsExecEnabledContainer(c) { + if ma, _ := c.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed { + go engine.monitorExecAgentRunning(ctx, mTask, c) + } + } + } + } +} + +func (engine *DockerTaskEngine) monitorExecAgentRunning(ctx context.Context, + mTask *managedTask, c *apicontainer.Container) { + if !c.IsRunning() { + return + } + task := mTask.Task + dockerID, err := engine.getDockerID(task, c) + if err != nil { + seelog.Errorf("Task engine [%s]: Could not retrieve docker id for container", task.Arn) + return + } + // Sleeping here so that all the containers do not call inspect/start exec agent process + // at the same time. + // The max sleep is 50% of the monitor interval to allow enough buffer time + // to finish monitoring. + // This is inspired from containers streaming stats from Docker. + time.Sleep(retry.AddJitter(time.Nanosecond, engine.monitorExecAgentsInterval/2)) + status, err := engine.execCmdMgr.RestartAgentIfStopped(ctx, engine.client, task, c, dockerID) + if err != nil { + seelog.Errorf("Task engine [%s]: Failed to restart ExecCommandAgent Process for container [%s]: %v", task.Arn, dockerID, err) + mTask.emitManagedAgentEvent(mTask.Task, c, execcmd.ExecuteCommandAgentName, "ExecuteCommandAgent cannot be restarted") + } + if status == execcmd.Restarted { + mTask.emitManagedAgentEvent(mTask.Task, c, execcmd.ExecuteCommandAgentName, "ExecuteCommandAgent restarted") + } + +} + +// MustInit blocks and retries until an engine can be initialized. +func (engine *DockerTaskEngine) MustInit(ctx context.Context) { + if engine.initialized { + return + } + engine.mustInitLock.Lock() + defer engine.mustInitLock.Unlock() + + errorOnce := sync.Once{} + taskEngineConnectBackoff := retry.NewExponentialBackoff(minEngineConnectRetryDelay, maxEngineConnectRetryDelay, + engineConnectRetryJitterMultiplier, engineConnectRetryDelayMultiplier) + retry.RetryWithBackoff(taskEngineConnectBackoff, func() error { + if engine.initialized { + return nil + } + err := engine.Init(ctx) + if err != nil { + errorOnce.Do(func() { + seelog.Errorf("Task engine: could not connect to docker daemon: %v", err) + }) + } + return err + }) +} + +// SetDataClient sets the saver that is used by the DockerTaskEngine. +func (engine *DockerTaskEngine) SetDataClient(client data.Client) { + engine.dataClient = client +} + +// Shutdown makes a best-effort attempt to cleanup after the task engine. +// This should not be relied on for anything more complicated than testing. +func (engine *DockerTaskEngine) Shutdown() { + engine.stopEngine() + engine.Disable() +} + +// Disable prevents this engine from managing any additional tasks. +func (engine *DockerTaskEngine) Disable() { + engine.tasksLock.Lock() +} + +// isTaskManaged checks if task for the corresponding arn is present +func (engine *DockerTaskEngine) isTaskManaged(arn string) bool { + engine.tasksLock.RLock() + defer engine.tasksLock.RUnlock() + _, ok := engine.managedTasks[arn] + return ok +} + +// synchronizeState explicitly goes through each docker container stored in +// "state" and updates its KnownStatus appropriately, as well as queueing up +// events to push upstream. It also initializes some fields of task resources and eni attachments that won't be populated +// from loading state file. +func (engine *DockerTaskEngine) synchronizeState() { + engine.tasksLock.Lock() + defer engine.tasksLock.Unlock() + imageStates := engine.state.AllImageStates() + if len(imageStates) != 0 { + engine.imageManager.AddAllImageStates(imageStates) + } + eniAttachments := engine.state.AllENIAttachments() + for _, eniAttachment := range eniAttachments { + timeoutFunc := func() { + eniAttachment, ok := engine.state.ENIByMac(eniAttachment.MACAddress) + if !ok { + seelog.Warnf("Ignoring unmanaged ENI attachment with MAC address: %s", eniAttachment.MACAddress) + return + } + if !eniAttachment.IsSent() { + seelog.Warnf("Timed out waiting for ENI ack; removing ENI attachment record %s", eniAttachment.String()) + engine.removeENIAttachmentData(eniAttachment.MACAddress) + engine.state.RemoveENIAttachment(eniAttachment.MACAddress) + } + } + err := eniAttachment.Initialize(timeoutFunc) + if err != nil { + // The only case where we get an error from Initialize is that the attachment has expired. In that case, remove the expired + // attachment from state. + seelog.Warnf("ENI attachment has expired. Removing it from state. %s", eniAttachment.String()) + engine.removeENIAttachmentData(eniAttachment.MACAddress) + engine.state.RemoveENIAttachment(eniAttachment.MACAddress) + } + } + + tasks := engine.state.AllTasks() + tasksToStart := engine.filterTasksToStartUnsafe(tasks) + for _, task := range tasks { + task.InitializeResources(engine.resourceFields) + engine.saveTaskData(task) + } + + for _, task := range tasksToStart { + engine.startTask(task) + } +} + +// filterTasksToStartUnsafe filters only the tasks that need to be started after +// the agent has been restarted. It also synchronizes states of all of the containers +// in tasks that need to be started. +func (engine *DockerTaskEngine) filterTasksToStartUnsafe(tasks []*apitask.Task) []*apitask.Task { + var tasksToStart []*apitask.Task + for _, task := range tasks { + conts, ok := engine.state.ContainerMapByArn(task.Arn) + if !ok { + // task hasn't started processing, no need to check container status + tasksToStart = append(tasksToStart, task) + continue + } + + for _, cont := range conts { + engine.synchronizeContainerStatus(cont, task) + engine.saveDockerContainerData(cont) // persist the container with the updated information. + } + + tasksToStart = append(tasksToStart, task) + + // Put tasks that are stopped by acs but hasn't been stopped in wait group + if task.GetDesiredStatus().Terminal() && task.GetStopSequenceNumber() != 0 { + engine.taskStopGroup.Add(task.GetStopSequenceNumber(), 1) + } + } + + return tasksToStart +} + +// updateContainerMetadata sets the container metadata from the docker inspect +func updateContainerMetadata(metadata *dockerapi.DockerContainerMetadata, container *apicontainer.Container, task *apitask.Task) { + container.SetCreatedAt(metadata.CreatedAt) + container.SetStartedAt(metadata.StartedAt) + container.SetFinishedAt(metadata.FinishedAt) + + // Set the labels if it's not set + if len(metadata.Labels) != 0 && len(container.GetLabels()) == 0 { + container.SetLabels(metadata.Labels) + } + + // Update volume for empty volume container + if metadata.Volumes != nil { + if container.IsInternal() { + task.UpdateMountPoints(container, metadata.Volumes) + } else { + container.SetVolumes(metadata.Volumes) + } + } + + // Set Exitcode if it's not set + if metadata.ExitCode != nil { + container.SetKnownExitCode(metadata.ExitCode) + } + + // Set port mappings + if len(metadata.PortBindings) != 0 && len(container.GetKnownPortBindings()) == 0 { + container.SetKnownPortBindings(metadata.PortBindings) + } + // update the container health information + if container.HealthStatusShouldBeReported() { + container.SetHealthStatus(metadata.Health) + } + container.SetNetworkMode(metadata.NetworkMode) + container.SetNetworkSettings(metadata.NetworkSettings) +} + +// synchronizeContainerStatus checks and updates the container status with docker +func (engine *DockerTaskEngine) synchronizeContainerStatus(container *apicontainer.DockerContainer, task *apitask.Task) { + if container.DockerID == "" { + seelog.Debugf("Task engine [%s]: found container potentially created while we were down: %s", + task.Arn, container.DockerName) + // Figure out the dockerid + describedContainer, err := engine.client.InspectContainer(engine.ctx, + container.DockerName, dockerclient.InspectContainerTimeout) + if err != nil { + seelog.Warnf("Task engine [%s]: could not find matching container for expected name [%s]: %v", + task.Arn, container.DockerName, err) + } else { + // update the container metadata in case the container was created during agent restart + metadata := dockerapi.MetadataFromContainer(describedContainer) + updateContainerMetadata(&metadata, container.Container, task) + container.DockerID = describedContainer.ID + + container.Container.SetKnownStatus(dockerapi.DockerStateToState(describedContainer.State)) + // update mappings that need dockerid + engine.state.AddContainer(container, task) + err := engine.imageManager.RecordContainerReference(container.Container) + if err != nil { + seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v", + task.Arn, err) + } + } + return + } + + currentState, metadata := engine.client.DescribeContainer(engine.ctx, container.DockerID) + if metadata.Error != nil { + currentState = apicontainerstatus.ContainerStopped + // If this is a Docker API error + if metadata.Error.ErrorName() == dockerapi.CannotDescribeContainerErrorName { + seelog.Warnf("Task engine [%s]: could not describe previously known container [id=%s; name=%s]; assuming dead: %v", + task.Arn, container.DockerID, container.DockerName, metadata.Error) + if !container.Container.KnownTerminal() { + container.Container.ApplyingError = apierrors.NewNamedError(&ContainerVanishedError{}) + err := engine.imageManager.RemoveContainerReferenceFromImageState(container.Container) + if err != nil { + seelog.Warnf("Task engine [%s]: could not remove container reference for image state %s: %v", + container.Container.Image, err) + } + } + } else { + // If this is a container state error + updateContainerMetadata(&metadata, container.Container, task) + container.Container.ApplyingError = apierrors.NewNamedError(metadata.Error) + } + } else { + // update the container metadata in case the container status/metadata changed during agent restart + updateContainerMetadata(&metadata, container.Container, task) + err := engine.imageManager.RecordContainerReference(container.Container) + if err != nil { + seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v", + task.Arn, err) + } + if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.Container.IsMetadataFileUpdated() { + go engine.updateMetadataFile(task, container) + } + } + if currentState > container.Container.GetKnownStatus() { + // update the container known status + container.Container.SetKnownStatus(currentState) + } + // Update task ExecutionStoppedAt timestamp + task.RecordExecutionStoppedAt(container.Container) +} + +// checkTaskState inspects the state of all containers within a task and writes +// their state to the managed task's container channel. +func (engine *DockerTaskEngine) checkTaskState(task *apitask.Task) { + defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("CHECK_TASK_STATE")() + for _, container := range task.Containers { + dockerID, err := engine.getDockerID(task, container) + if err != nil { + continue + } + status, metadata := engine.client.DescribeContainer(engine.ctx, dockerID) + engine.tasksLock.RLock() + managedTask, ok := engine.managedTasks[task.Arn] + engine.tasksLock.RUnlock() + + if ok { + managedTask.emitDockerContainerChange(dockerContainerChange{ + container: container, + event: dockerapi.DockerContainerChangeEvent{ + Status: status, + DockerContainerMetadata: metadata, + }, + }) + } + } +} + +// sweepTask deletes all the containers associated with a task +func (engine *DockerTaskEngine) sweepTask(task *apitask.Task) { + for _, cont := range task.Containers { + err := engine.removeContainer(task, cont) + if err != nil { + seelog.Infof("Task engine [%s]: unable to remove old container [%s]: %v", + task.Arn, cont.Name, err) + } + // Internal container(created by ecs-agent) state isn't recorded + if cont.IsInternal() { + continue + } + err = engine.imageManager.RemoveContainerReferenceFromImageState(cont) + if err != nil { + seelog.Errorf("Task engine [%s]: Unable to remove container [%s] reference from image state: %v", + task.Arn, cont.Name, err) + } + } + + // Clean metadata directory for task + if engine.cfg.ContainerMetadataEnabled.Enabled() { + err := engine.metadataManager.Clean(task.Arn) + if err != nil { + seelog.Warnf("Task engine [%s]: clean task metadata failed: %v", task.Arn, err) + } + } +} + +var removeAll = os.RemoveAll + +func (engine *DockerTaskEngine) deleteTask(task *apitask.Task) { + for _, resource := range task.GetResources() { + err := resource.Cleanup() + if err != nil { + seelog.Warnf("Task engine [%s]: unable to cleanup resource %s: %v", + task.Arn, resource.GetName(), err) + } else { + seelog.Infof("Task engine [%s]: resource %s cleanup complete", task.Arn, + resource.GetName()) + } + } + + if execcmd.IsExecEnabledTask(task) { + // cleanup host exec agent log dirs + if tID, err := task.GetID(); err != nil { + seelog.Warnf("Task Engine[%s]: error getting task ID for ExecAgent logs cleanup: %v", task.Arn, err) + } else { + if err := removeAll(filepath.Join(execcmd.ECSAgentExecLogDir, tID)); err != nil { + seelog.Warnf("Task Engine[%s]: unable to remove ExecAgent host logs for task: %v", task.Arn, err) + } + } + } + + // Now remove ourselves from the global state and cleanup channels + engine.tasksLock.Lock() + engine.state.RemoveTask(task) + + taskENIs := task.GetTaskENIs() + for _, taskENI := range taskENIs { + // ENIs that exist only as logical associations on another interface do not have + // attachments that need to be removed. + if taskENI.IsStandardENI() { + seelog.Debugf("Task engine [%s]: removing eni %s from agent state", + task.Arn, taskENI.ID) + engine.removeENIAttachmentData(taskENI.MacAddress) + engine.state.RemoveENIAttachment(taskENI.MacAddress) + } else { + seelog.Debugf("Task engine [%s]: skipping removing logical eni %s from agent state", + task.Arn, taskENI.ID) + } + } + + // Remove task and container data from database. + engine.removeTaskData(task) + + seelog.Infof("Task engine [%s]: finished removing task data, removing task from managed tasks", task.Arn) + delete(engine.managedTasks, task.Arn) + engine.tasksLock.Unlock() +} + +func (engine *DockerTaskEngine) emitTaskEvent(task *apitask.Task, reason string) { + event, err := api.NewTaskStateChangeEvent(task, reason) + if err != nil { + seelog.Infof("Task engine [%s]: unable to create task state change event: %v", task.Arn, err) + return + } + + seelog.Infof("Task engine [%s]: Task engine: sending change event [%s]", task.Arn, event.String()) + engine.stateChangeEvents <- event +} + +// startTask creates a managedTask construct to track the task and then begins +// pushing it towards its desired state when allowed startTask is protected by +// the tasksLock lock of 'AddTask'. It should not be called from anywhere +// else and should exit quickly to allow AddTask to do more work. +func (engine *DockerTaskEngine) startTask(task *apitask.Task) { + // Create a channel that may be used to communicate with this task, survey + // what tasks need to be waited for for this one to start, and then spin off + // a goroutine to oversee this task + + thisTask := engine.newManagedTask(task) + thisTask._time = engine.time() + + go thisTask.overseeTask() +} + +func (engine *DockerTaskEngine) time() ttime.Time { + engine._timeOnce.Do(func() { + if engine._time == nil { + engine._time = &ttime.DefaultTime{} + } + }) + return engine._time +} + +// openEventstream opens, but does not consume, the docker event stream +func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error { + events, err := engine.client.ContainerEvents(ctx) + if err != nil { + return err + } + engine.events = events + return nil +} + +// handleDockerEvents must be called after openEventstream; it processes each +// event that it reads from the docker eventstream +func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case event := <-engine.events: + engine.handleDockerEvent(event) + } + } +} + +// handleDockerEvent is the entrypoint for task modifications originating with +// events occurring through Docker, outside the task engine itself. +// handleDockerEvent is responsible for taking an event that correlates to a +// container and placing it in the context of the task to which that container +// belongs. +func (engine *DockerTaskEngine) handleDockerEvent(event dockerapi.DockerContainerChangeEvent) { + seelog.Debugf("Task engine: handling a docker event: %s", event.String()) + + task, ok := engine.state.TaskByID(event.DockerID) + if !ok { + seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to task", + event.DockerID) + return + } + cont, ok := engine.state.ContainerByID(event.DockerID) + if !ok { + seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to container", + event.DockerID) + return + } + + // Container health status change does not affect the container status + // no need to process this in task manager + if event.Type == apicontainer.ContainerHealthEvent { + if cont.Container.HealthStatusShouldBeReported() { + seelog.Debugf("Task engine: updating container [%s(%s)] health status: %v", + cont.Container.Name, cont.DockerID, event.DockerContainerMetadata.Health) + cont.Container.SetHealthStatus(event.DockerContainerMetadata.Health) + } + return + } + + engine.tasksLock.RLock() + managedTask, ok := engine.managedTasks[task.Arn] + engine.tasksLock.RUnlock() + if !ok { + seelog.Criticalf("Task engine: could not find managed task [%s] corresponding to a docker event: %s", + task.Arn, event.String()) + return + } + seelog.Debugf("Task engine [%s]: writing docker event to the task: %s", + task.Arn, event.String()) + managedTask.emitDockerContainerChange(dockerContainerChange{container: cont.Container, event: event}) + seelog.Debugf("Task engine [%s]: wrote docker event to the task: %s", + task.Arn, event.String()) +} + +// StateChangeEvents returns channels to read task and container state changes. These +// changes should be read as soon as possible as them not being read will block +// processing the task referenced by the event. +func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event { + return engine.stateChangeEvents +} + +// AddTask starts tracking a task +func (engine *DockerTaskEngine) AddTask(task *apitask.Task) { + defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("ADD_TASK")() + err := task.PostUnmarshalTask(engine.cfg, engine.credentialsManager, + engine.resourceFields, engine.client, engine.ctx) + if err != nil { + seelog.Errorf("Task engine [%s]: unable to add task to the engine: %v", task.Arn, err) + task.SetKnownStatus(apitaskstatus.TaskStopped) + task.SetDesiredStatus(apitaskstatus.TaskStopped) + engine.emitTaskEvent(task, err.Error()) + return + } + + engine.tasksLock.Lock() + defer engine.tasksLock.Unlock() + + existingTask, exists := engine.state.TaskByArn(task.Arn) + if !exists { + // This will update the container desired status + task.UpdateDesiredStatus() + + engine.state.AddTask(task) + if dependencygraph.ValidDependencies(task, engine.cfg) { + engine.startTask(task) + } else { + seelog.Errorf("Task engine [%s]: unable to progress task with circular dependencies", task.Arn) + task.SetKnownStatus(apitaskstatus.TaskStopped) + task.SetDesiredStatus(apitaskstatus.TaskStopped) + err := TaskDependencyError{task.Arn} + engine.emitTaskEvent(task, err.Error()) + } + return + } + + // Update task + engine.updateTaskUnsafe(existingTask, task) +} + +// ListTasks returns the tasks currently managed by the DockerTaskEngine +func (engine *DockerTaskEngine) ListTasks() ([]*apitask.Task, error) { + return engine.state.AllTasks(), nil +} + +// GetTaskByArn returns the task identified by that ARN +func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*apitask.Task, bool) { + return engine.state.TaskByArn(arn) +} + +func (engine *DockerTaskEngine) pullContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { + switch container.Type { + case apicontainer.ContainerCNIPause, apicontainer.ContainerNamespacePause: + // pause images are managed at startup + return dockerapi.DockerContainerMetadata{} + } + + if engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.Arn) { + // Record the pullStoppedAt timestamp + defer func() { + timestamp := engine.time().Now() + task.SetPullStoppedAt(timestamp) + }() + + seelog.Infof("Task engine [%s]: pulling image %s for container %s concurrently", task.Arn, container.Image, container.Name) + return engine.concurrentPull(task, container) + + } + + // No pull image is required, the cached image will be used. + // Add the container that uses the cached image to the pulled container state. + dockerContainer := &apicontainer.DockerContainer{ + Container: container, + } + engine.state.AddPulledContainer(dockerContainer, task) + + // No pull image is required, just update container reference and use cached image. + engine.updateContainerReference(false, container, task.Arn) + // Return the metadata without any error + return dockerapi.DockerContainerMetadata{Error: nil} +} + +// imagePullRequired returns true if pulling image is required, or return false if local image cache +// should be used, by inspecting the agent pull behavior variable defined in config. The caller has +// to make sure the container passed in is not an internal container. +func (engine *DockerTaskEngine) imagePullRequired(imagePullBehavior config.ImagePullBehaviorType, + container *apicontainer.Container, + taskArn string) bool { + switch imagePullBehavior { + case config.ImagePullOnceBehavior: + // If this image has been pulled successfully before, don't pull the image, + // otherwise pull the image as usual, regardless whether the image exists or not + // (the image can be prepopulated with the AMI and never be pulled). + imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image) + if ok && imageState.GetPullSucceeded() { + seelog.Infof("Task engine [%s]: image %s for container %s has been pulled once, not pulling it again", + taskArn, container.Image, container.Name) + return false + } + return true + case config.ImagePullPreferCachedBehavior: + // If the behavior is prefer cached, don't pull if we found cached image + // by inspecting the image. + _, err := engine.client.InspectImage(container.Image) + if err != nil { + return true + } + seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s", + taskArn, container.Image, container.Name) + return false + default: + // Need to pull the image for always and default agent pull behavior + return true + } +} + +func (engine *DockerTaskEngine) concurrentPull(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { + seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image %s for container %s", + task.Arn, container.Image, container.Name) + ImagePullDeleteLock.RLock() + seelog.Debugf("Task engine [%s]: acquired ImagePullDeleteLock, start pulling image %s for container %s", + task.Arn, container.Image, container.Name) + defer seelog.Debugf("Task engine [%s]: released ImagePullDeleteLock after pulling image %s for container %s", + task.Arn, container.Image, container.Name) + defer ImagePullDeleteLock.RUnlock() + + // Record the task pull_started_at timestamp + pullStart := engine.time().Now() + ok := task.SetPullStartedAt(pullStart) + if ok { + seelog.Infof("Task engine [%s]: recording timestamp for starting image pulltime: %s", + task.Arn, pullStart) + } + metadata := engine.pullAndUpdateContainerReference(task, container) + if metadata.Error == nil { + seelog.Infof("Task engine [%s]: finished pulling image %s for container %s in %s", + task.Arn, container.Image, container.Name, time.Since(pullStart).String()) + } else { + seelog.Errorf("Task engine [%s]: failed to pull image %s for container %s: %v", + task.Arn, container.Image, container.Name, metadata.Error) + } + return metadata +} + +func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { + // If a task is blocked here for some time, and before it starts pulling image, + // the task's desired status is set to stopped, then don't pull the image + if task.GetDesiredStatus() == apitaskstatus.TaskStopped { + seelog.Infof("Task engine [%s]: task's desired status is stopped, skipping pulling image %s for container %s", + task.Arn, container.Image, container.Name) + container.SetDesiredStatus(apicontainerstatus.ContainerStopped) + return dockerapi.DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}} + } + + // Set the credentials for pull from ECR if necessary + if container.ShouldPullWithExecutionRole() { + executionCredentials, ok := engine.credentialsManager.GetTaskCredentials(task.GetExecutionCredentialsID()) + if !ok { + seelog.Errorf("Task engine [%s]: unable to acquire ECR credentials for image %s for container %s", + task.Arn, container.Image, container.Name) + return dockerapi.DockerContainerMetadata{ + Error: dockerapi.CannotPullECRContainerError{ + FromError: errors.New("engine ecr credentials: not found"), + }, + } + } + + iamCredentials := executionCredentials.GetIAMRoleCredentials() + container.SetRegistryAuthCredentials(iamCredentials) + // Clean up the ECR pull credentials after pulling + defer container.SetRegistryAuthCredentials(credentials.IAMRoleCredentials{}) + } + + // Apply registry auth data from ASM if required + if container.ShouldPullWithASMAuth() { + if err := task.PopulateASMAuthData(container); err != nil { + seelog.Errorf("Task engine [%s]: unable to acquire Docker registry credentials for image %s for container %s", + task.Arn, container.Image, container.Name) + return dockerapi.DockerContainerMetadata{ + Error: dockerapi.CannotPullContainerAuthError{ + FromError: errors.New("engine docker private registry credentials: not found"), + }, + } + } + defer container.SetASMDockerAuthConfig(types.AuthConfig{}) + } + + metadata := engine.client.PullImage(engine.ctx, container.Image, container.RegistryAuthentication, engine.cfg.ImagePullTimeout) + + // Don't add internal images(created by ecs-agent) into imagemanger state + if container.IsInternal() { + return metadata + } + pullSucceeded := metadata.Error == nil + findCachedImage := false + if !pullSucceeded { + // If Agent failed to pull an image when + // 1. DependentContainersPullUpfront is enabled + // 2. ImagePullBehavior is not set to always + // search the image in local cached images + if engine.cfg.DependentContainersPullUpfront.Enabled() && engine.cfg.ImagePullBehavior != config.ImagePullAlwaysBehavior { + if _, err := engine.client.InspectImage(container.Image); err != nil { + seelog.Errorf("Task engine [%s]: failed to find cached image %s for container %s", + task.Arn, container.Image, container.Name) + // Stop the task if the container is an essential container, + // and the image is not available in both remote and local caches + if container.IsEssential() { + task.SetDesiredStatus(apitaskstatus.TaskStopped) + engine.emitTaskEvent(task, fmt.Sprintf("%s: %s", metadata.Error.ErrorName(), metadata.Error.Error())) + } + return dockerapi.DockerContainerMetadata{Error: metadata.Error} + } + seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s", + task.Arn, container.Image, container.Name) + findCachedImage = true + } + } + + if pullSucceeded || findCachedImage { + dockerContainer := &apicontainer.DockerContainer{ + Container: container, + } + engine.state.AddPulledContainer(dockerContainer, task) + } + + engine.updateContainerReference(pullSucceeded, container, task.Arn) + return metadata +} + +func (engine *DockerTaskEngine) updateContainerReference(pullSucceeded bool, container *apicontainer.Container, taskArn string) { + err := engine.imageManager.RecordContainerReference(container) + if err != nil { + seelog.Errorf("Task engine [%s]: unable to add container reference to image state: %v", + taskArn, err) + } + imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image) + if ok && pullSucceeded { + // Only need to update the pullSucceeded flag of the image state when its not yet set to true. + if !imageState.GetPullSucceeded() { + imageState.SetPullSucceeded(true) + err = engine.dataClient.SaveImageState(imageState) + if err != nil { + seelog.Warnf("Task engine [%s]: unable to save image state: %v", + taskArn, err) + } + } + } + engine.state.AddImageState(imageState) +} + +func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { + seelog.Infof("Task engine [%s]: creating container: %s", task.Arn, container.Name) + client := engine.client + if container.DockerConfig.Version != nil { + client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) + } + + dockerContainerName := "" + containerMap, ok := engine.state.ContainerMapByArn(task.Arn) + if !ok { + containerMap = make(map[string]*apicontainer.DockerContainer) + } else { + // looking for container that has docker name but not created + for _, v := range containerMap { + if v.Container.Name == container.Name { + dockerContainerName = v.DockerName + break + } + } + } + + // Resolve HostConfig + // we have to do this in create, not start, because docker no longer handles + // merging create config with start hostconfig the same; e.g. memory limits + // get lost + dockerClientVersion, versionErr := client.APIVersion() + if versionErr != nil { + return dockerapi.DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}} + } + hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion, engine.cfg) + if hcerr != nil { + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(hcerr)} + } + + if container.AWSLogAuthExecutionRole() { + err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager) + if err != nil { + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} + } + } + + firelensConfig := container.GetFirelensConfig() + if firelensConfig != nil { + err := task.AddFirelensContainerBindMounts(firelensConfig, hostConfig, engine.cfg) + if err != nil { + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} + } + + cerr := task.PopulateSecretLogOptionsToFirelensContainer(container) + if cerr != nil { + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(cerr)} + } + + if firelensConfig.Type == firelens.FirelensConfigTypeFluentd { + // For fluentd router, needs to specify FLUENT_UID to root in order for the fluentd process to access + // the socket created by Docker. + container.MergeEnvironmentVariables(map[string]string{ + "FLUENT_UID": "0", + }) + } + } + + // If the container is using a special log driver type "awsfirelens", it means the container wants to use + // the firelens container to send logs. In this case, override the log driver type to be fluentd + // and specify appropriate tag and fluentd-address, so that the logs are sent to and routed by the firelens container. + // Update the environment variables FLUENT_HOST and FLUENT_PORT depending on the supported network modes - bridge + // and awsvpc. For reference - https://docs.docker.com/config/containers/logging/fluentd/. + if hostConfig.LogConfig.Type == logDriverTypeFirelens { + hostConfig.LogConfig = getFirelensLogConfig(task, container, hostConfig, engine.cfg) + if task.IsNetworkModeAWSVPC() { + container.MergeEnvironmentVariables(map[string]string{ + fluentNetworkHost: FluentAWSVPCHostValue, + fluentNetworkPort: FluentNetworkPortValue, + }) + } else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode { + ipAddress, ok := getContainerHostIP(task.GetFirelensContainer().GetNetworkSettings()) + if !ok { + err := apierrors.DockerClientConfigError{Msg: "unable to get BridgeIP for task in bridge mode"} + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(&err)} + } + container.MergeEnvironmentVariables(map[string]string{ + fluentNetworkHost: ipAddress, + fluentNetworkPort: FluentNetworkPortValue, + }) + } + } + + //Apply the log driver secret into container's LogConfig and Env secrets to container.Environment + hasSecretAsEnvOrLogDriver := func(s apicontainer.Secret) bool { + return s.Type == apicontainer.SecretTypeEnv || s.Target == apicontainer.SecretTargetLogDriver + } + if container.HasSecret(hasSecretAsEnvOrLogDriver) { + err := task.PopulateSecrets(hostConfig, container) + + if err != nil { + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} + } + } + + // Populate credentialspec resource + if container.RequiresCredentialSpec() { + seelog.Debugf("Obtained container %s with credentialspec resource requirement for task %s.", container.Name, task.Arn) + var credSpecResource *credentialspec.CredentialSpecResource + resource, ok := task.GetCredentialSpecResource() + if !ok || len(resource) <= 0 { + resMissingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch task resource credentialspec"} + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(resMissingErr)} + } + credSpecResource = resource[0].(*credentialspec.CredentialSpecResource) + + containerCredSpec, err := container.GetCredentialSpec() + if err == nil && containerCredSpec != "" { + // CredentialSpec mapping: input := credentialspec:file://test.json, output := credentialspec=file://test.json + desiredCredSpecInjection, err := credSpecResource.GetTargetMapping(containerCredSpec) + if err != nil || desiredCredSpecInjection == "" { + missingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec mapping"} + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(missingErr)} + } + + // Inject containers' hostConfig.SecurityOpt with the credentialspec resource + seelog.Infof("Injecting container %s with credentialspec %s.", container.Name, desiredCredSpecInjection) + if len(hostConfig.SecurityOpt) == 0 { + hostConfig.SecurityOpt = []string{desiredCredSpecInjection} + } else { + for idx, opt := range hostConfig.SecurityOpt { + if strings.HasPrefix(opt, "credentialspec:") { + hostConfig.SecurityOpt[idx] = desiredCredSpecInjection + } + } + } + + } else { + emptyErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec: " + err.Error()} + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(emptyErr)} + } + } + + if container.ShouldCreateWithEnvFiles() { + err := task.MergeEnvVarsFromEnvfiles(container) + if err != nil { + seelog.Errorf("Error populating environment variables from specified files into container %s", container.Name) + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} + } + } + + if execcmd.IsExecEnabledContainer(container) { + tID, err := task.GetID() + if err != nil { + herr := &apierrors.HostConfigError{Msg: err.Error()} + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(herr)} + } + err = engine.execCmdMgr.InitializeContainer(tID, container, hostConfig) + if err != nil { + seelog.Warnf("Exec Agent initialization: %v . Continuing to start container without enabling exec feature.", err) + + // Emit a managedagent state chnage event if exec agent initialization fails + engine.tasksLock.RLock() + mTask, ok := engine.managedTasks[task.Arn] + engine.tasksLock.RUnlock() + if ok { + mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, fmt.Sprintf("ExecuteCommandAgent Initialization failed - %v", err)) + } else { + seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name) + } + } + } + + config, err := task.DockerConfig(container, dockerClientVersion) + if err != nil { + return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} + } + + // Augment labels with some metadata from the agent. Explicitly do this last + // such that it will always override duplicates in the provided raw config + // data. + config.Labels[labelTaskARN] = task.Arn + config.Labels[labelContainerName] = container.Name + config.Labels[labelTaskDefinitionFamily] = task.Family + config.Labels[labelTaskDefinitionVersion] = task.Version + config.Labels[labelCluster] = engine.cfg.Cluster + + if dockerContainerName == "" { + // only alphanumeric and hyphen characters are allowed + reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+") + name := reInvalidChars.ReplaceAllString(container.Name, "") + + dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex() + + // Pre-add the container in case we stop before the next, more useful, + // AddContainer call. This ensures we have a way to get the container if + // we die before 'createContainer' returns because we can inspect by + // name + engine.state.AddContainer(&apicontainer.DockerContainer{ + DockerName: dockerContainerName, + Container: container, + }, task) + seelog.Infof("Task engine [%s]: created container name mapping for task: %s -> %s", + task.Arn, container.Name, dockerContainerName) + } + + // Create metadata directory and file then populate it with common metadata of all containers of this task + // Afterwards add this directory to the container's mounts if file creation was successful + if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() { + info, infoErr := engine.client.Info(engine.ctx, dockerclient.InfoTimeout) + if infoErr != nil { + seelog.Warnf("Task engine [%s]: unable to get docker info : %v", + task.Arn, infoErr) + } + mderr := engine.metadataManager.Create(config, hostConfig, task, container.Name, info.SecurityOptions) + if mderr != nil { + seelog.Warnf("Task engine [%s]: unable to create metadata for container %s: %v", + task.Arn, container.Name, mderr) + } + } + + createContainerBegin := time.Now() + metadata := client.CreateContainer(engine.ctx, config, hostConfig, + dockerContainerName, engine.cfg.ContainerCreateTimeout) + if metadata.DockerID != "" { + seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s", + task.Arn, container.Name, metadata.DockerID) + dockerContainer := &apicontainer.DockerContainer{DockerID: metadata.DockerID, + DockerName: dockerContainerName, + Container: container} + engine.state.AddContainer(dockerContainer, task) + engine.saveDockerContainerData(dockerContainer) + } + container.SetLabels(config.Labels) + seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s, took %s", + task.Arn, container.Name, metadata.DockerID, time.Since(createContainerBegin)) + container.SetRuntimeID(metadata.DockerID) + return metadata +} + +func getFirelensLogConfig(task *apitask.Task, container *apicontainer.Container, hostConfig *dockercontainer.HostConfig, cfg *config.Config) dockercontainer.LogConfig { + fields := strings.Split(task.Arn, "/") + taskID := fields[len(fields)-1] + tag := fmt.Sprintf(fluentTagDockerFormat, container.Name, taskID) + fluentd := socketPathPrefix + filepath.Join(cfg.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath) + logConfig := hostConfig.LogConfig + logConfig.Type = logDriverTypeFluentd + logConfig.Config = make(map[string]string) + logConfig.Config[logDriverTag] = tag + logConfig.Config[logDriverFluentdAddress] = fluentd + logConfig.Config[logDriverAsyncConnect] = strconv.FormatBool(true) + logConfig.Config[logDriverSubSecondPrecision] = strconv.FormatBool(true) + seelog.Debugf("Applying firelens log config for container %s: %v", container.Name, logConfig) + return logConfig +} + +func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { + seelog.Infof("Task engine [%s]: starting container: %s (Runtime ID: %s)", task.Arn, container.Name, container.GetRuntimeID()) + client := engine.client + if container.DockerConfig.Version != nil { + client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) + } + + dockerID, err := engine.getDockerID(task, container) + if err != nil { + return dockerapi.DockerContainerMetadata{ + Error: dockerapi.CannotStartContainerError{ + FromError: err, + }, + } + } + + startContainerBegin := time.Now() + dockerContainerMD := client.StartContainer(engine.ctx, dockerID, engine.cfg.ContainerStartTimeout) + if dockerContainerMD.Error != nil { + return dockerContainerMD + } + + seelog.Infof("Task engine [%s]: started docker container for task: %s -> %s, took %s", + task.Arn, container.Name, dockerContainerMD.DockerID, time.Since(startContainerBegin)) + + // Get metadata through container inspection and available task information then write this to the metadata file + // Performs this in the background to avoid delaying container start + // TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and + // add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted + if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() { + go func() { + err := engine.metadataManager.Update(engine.ctx, dockerID, task, container.Name) + if err != nil { + seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v", + task.Arn, container.Name, err) + return + } + container.SetMetadataFileUpdated() + seelog.Debugf("Task engine [%s]: updated metadata file for container %s", + task.Arn, container.Name) + }() + } + + // If container is a firelens container, fluent host is needed to be added to the environment variable for the task. + // For the supported network mode - bridge and awsvpc, the awsvpc take the host 127.0.0.1 but in bridge mode, + // there is a need to wait for the IP to be present before the container using the firelens can be created. + if container.GetFirelensConfig() != nil { + if !task.IsNetworkModeAWSVPC() && (container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode) { + _, gotContainerIP := getContainerHostIP(dockerContainerMD.NetworkSettings) + if !gotContainerIP { + getIPBridgeBackoff := retry.NewExponentialBackoff(minGetIPBridgeTimeout, maxGetIPBridgeTimeout, getIPBridgeRetryJitterMultiplier, getIPBridgeRetryDelayMultiplier) + contextWithTimeout, cancel := context.WithTimeout(engine.ctx, time.Minute) + defer cancel() + err := retry.RetryWithBackoffCtx(contextWithTimeout, getIPBridgeBackoff, func() error { + inspectOutput, err := engine.client.InspectContainer(engine.ctx, dockerContainerMD.DockerID, + dockerclient.InspectContainerTimeout) + if err != nil { + return err + } + _, gotIPBridge := getContainerHostIP(inspectOutput.NetworkSettings) + if gotIPBridge { + dockerContainerMD.NetworkSettings = inspectOutput.NetworkSettings + return nil + } else { + return errors.New("Bridge IP not available to use for firelens") + } + }) + if err != nil { + return dockerapi.DockerContainerMetadata{ + Error: dockerapi.CannotStartContainerError{FromError: err}, + } + } + } + + } + } + if execcmd.IsExecEnabledContainer(container) { + if ma, _ := container.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed { + reason := "ExecuteCommandAgent started" + if err := engine.execCmdMgr.StartAgent(engine.ctx, engine.client, task, container, dockerID); err != nil { + reason = err.Error() + seelog.Errorf("Task engine [%s]: Failed to start ExecCommandAgent Process for container [%s]: %v", task.Arn, container.Name, err) + } + + engine.tasksLock.RLock() + mTask, ok := engine.managedTasks[task.Arn] + engine.tasksLock.RUnlock() + // whether we started or failed to start, we'll want to emit a state change event + // redundant state change events like RUNNING->RUNNING are allowed + if ok { + mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, reason) + } else { + seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name) + } + } + } + return dockerContainerMD +} + +func (engine *DockerTaskEngine) provisionContainerResources(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { + seelog.Infof("Task engine [%s]: setting up container resources for container [%s]", + task.Arn, container.Name) + containerInspectOutput, err := engine.inspectContainer(task, container) + if err != nil { + return dockerapi.DockerContainerMetadata{ + Error: ContainerNetworkingError{ + fromError: errors.Wrap(err, + "container resource provisioning: cannot setup task network namespace due to error inspecting pause container"), + }, + } + } + + task.SetPausePIDInVolumeResources(strconv.Itoa(containerInspectOutput.State.Pid)) + + cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, true) + if err != nil { + return dockerapi.DockerContainerMetadata{ + Error: ContainerNetworkingError{ + fromError: errors.Wrap(err, + "container resource provisioning: unable to build cni configuration"), + }, + } + } + + // Invoke the libcni to config the network namespace for the container + result, err := engine.cniClient.SetupNS(engine.ctx, cniConfig, cniSetupTimeout) + if err != nil { + seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v", + task.Arn, err) + return dockerapi.DockerContainerMetadata{ + DockerID: cniConfig.ContainerID, + Error: ContainerNetworkingError{errors.Wrap(err, + "container resource provisioning: failed to setup network namespace")}, + } + } + + taskIP := result.IPs[0].Address.IP.String() + seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP) + engine.state.AddTaskIPAddress(taskIP, task.Arn) + task.SetLocalIPAddress(taskIP) + engine.saveTaskData(task) + return dockerapi.DockerContainerMetadata{ + DockerID: cniConfig.ContainerID, + } +} + +// checkTearDownPauseContainer idempotently tears down the pause container network when the pause container's known +//or desired status is stopped. +func (engine *DockerTaskEngine) checkTearDownPauseContainer(task *apitask.Task) { + if !task.IsNetworkModeAWSVPC() { + return + } + for _, container := range task.Containers { + // Cleanup the pause container network namespace before stop the container + if container.Type == apicontainer.ContainerCNIPause { + // Clean up if the pause container has stopped or will stop + if container.KnownTerminal() || container.DesiredTerminal() { + err := engine.cleanupPauseContainerNetwork(task, container) + if err != nil { + seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v", task.Arn, err) + } + } + return + } + } +} + +// cleanupPauseContainerNetwork will clean up the network namespace of pause container +func (engine *DockerTaskEngine) cleanupPauseContainerNetwork(task *apitask.Task, container *apicontainer.Container) error { + // This operation is idempotent + if container.IsContainerTornDown() { + return nil + } + delay := time.Duration(engine.cfg.ENIPauseContainerCleanupDelaySeconds) * time.Second + if engine.handleDelay != nil && delay > 0 { + seelog.Infof("Task engine [%s]: waiting %s before cleaning up pause container.", task.Arn, delay) + engine.handleDelay(delay) + } + containerInspectOutput, err := engine.inspectContainer(task, container) + if err != nil { + return errors.Wrap(err, "engine: cannot cleanup task network namespace due to error inspecting pause container") + } + + seelog.Infof("Task engine [%s]: cleaning up the network namespace", task.Arn) + cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, false) + if err != nil { + return errors.Wrapf(err, + "engine: failed cleanup task network namespace, task: %s", task.String()) + } + + err = engine.cniClient.CleanupNS(engine.ctx, cniConfig, cniCleanupTimeout) + if err != nil { + return err + } + + container.SetContainerTornDown(true) + seelog.Infof("Task engine [%s]: cleaned pause container network namespace", task.Arn) + return nil +} + +// buildCNIConfigFromTaskContainer builds a CNI config for the task and container. +func (engine *DockerTaskEngine) buildCNIConfigFromTaskContainer( + task *apitask.Task, + containerInspectOutput *types.ContainerJSON, + includeIPAMConfig bool) (*ecscni.Config, error) { + cniConfig := &ecscni.Config{ + BlockInstanceMetadata: engine.cfg.AWSVPCBlockInstanceMetdata.Enabled(), + MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion, + } + if engine.cfg.OverrideAWSVPCLocalIPv4Address != nil && + len(engine.cfg.OverrideAWSVPCLocalIPv4Address.IP) != 0 && + len(engine.cfg.OverrideAWSVPCLocalIPv4Address.Mask) != 0 { + cniConfig.IPAMV4Address = engine.cfg.OverrideAWSVPCLocalIPv4Address + } + if len(engine.cfg.AWSVPCAdditionalLocalRoutes) != 0 { + cniConfig.AdditionalLocalRoutes = engine.cfg.AWSVPCAdditionalLocalRoutes + } + + cniConfig.ContainerPID = strconv.Itoa(containerInspectOutput.State.Pid) + cniConfig.ContainerID = containerInspectOutput.ID + + cniConfig, err := task.BuildCNIConfig(includeIPAMConfig, cniConfig) + if err != nil { + return nil, errors.Wrapf(err, "engine: failed to build cni configuration from task") + } + + return cniConfig, nil +} + +func (engine *DockerTaskEngine) inspectContainer(task *apitask.Task, container *apicontainer.Container) (*types.ContainerJSON, error) { + dockerID, err := engine.getDockerID(task, container) + if err != nil { + return nil, err + } + + return engine.client.InspectContainer(engine.ctx, dockerID, dockerclient.InspectContainerTimeout) +} + +func (engine *DockerTaskEngine) stopContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { + seelog.Infof("Task engine [%s]: stopping container [%s]", task.Arn, container.Name) + + dockerID, err := engine.getDockerID(task, container) + if err != nil { + return dockerapi.DockerContainerMetadata{ + Error: dockerapi.CannotStopContainerError{ + FromError: err, + }, + } + } + + // Cleanup the pause container network namespace before stop the container + if container.Type == apicontainer.ContainerCNIPause { + err := engine.cleanupPauseContainerNetwork(task, container) + if err != nil { + seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v", + task.Arn, err) + } + } + + apiTimeoutStopContainer := container.GetStopTimeout() + if apiTimeoutStopContainer <= 0 { + apiTimeoutStopContainer = engine.cfg.DockerStopTimeout + } + + return engine.client.StopContainer(engine.ctx, dockerID, apiTimeoutStopContainer) +} + +func (engine *DockerTaskEngine) removeContainer(task *apitask.Task, container *apicontainer.Container) error { + seelog.Infof("Task engine [%s]: removing container: %s", task.Arn, container.Name) + + dockerID, err := engine.getDockerID(task, container) + if err != nil { + return err + } + + return engine.client.RemoveContainer(engine.ctx, dockerID, dockerclient.RemoveContainerTimeout) +} + +// updateTaskUnsafe determines if a new transition needs to be applied to the +// referenced task, and if needed applies it. It should not be called anywhere +// but from 'AddTask' and is protected by the tasksLock lock there. +func (engine *DockerTaskEngine) updateTaskUnsafe(task *apitask.Task, update *apitask.Task) { + managedTask, ok := engine.managedTasks[task.Arn] + if !ok { + seelog.Criticalf("Task engine [%s]: ACS message for a task we thought we managed, but don't! Aborting.", + task.Arn) + return + } + // Keep the lock because sequence numbers cannot be correct unless they are + // also read in the order addtask was called + // This does block the engine's ability to ingest any new events (including + // stops for past tasks, ack!), but this is necessary for correctness + updateDesiredStatus := update.GetDesiredStatus() + seelog.Debugf("Task engine [%s]: putting update on the acs channel: [%s] with seqnum [%d]", + task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber) + managedTask.emitACSTransition(acsTransition{ + desiredStatus: updateDesiredStatus, + seqnum: update.StopSequenceNumber, + }) + seelog.Debugf("Task engine [%s]: update taken off the acs channel: [%s] with seqnum [%d]", + task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber) +} + +// transitionContainer calls applyContainerState, and then notifies the managed +// task of the change. transitionContainer is called by progressTask and +// by handleStoppedToRunningContainerTransition. +func (engine *DockerTaskEngine) transitionContainer(task *apitask.Task, container *apicontainer.Container, to apicontainerstatus.ContainerStatus) { + // Let docker events operate async so that we can continue to handle ACS / other requests + // This is safe because 'applyContainerState' will not mutate the task + metadata := engine.applyContainerState(task, container, to) + + engine.tasksLock.RLock() + managedTask, ok := engine.managedTasks[task.Arn] + engine.tasksLock.RUnlock() + if ok { + managedTask.emitDockerContainerChange(dockerContainerChange{ + container: container, + event: dockerapi.DockerContainerChangeEvent{ + Status: to, + DockerContainerMetadata: metadata, + }, + }) + } +} + +// applyContainerState moves the container to the given state by calling the +// function defined in the transitionFunctionMap for the state +func (engine *DockerTaskEngine) applyContainerState(task *apitask.Task, container *apicontainer.Container, nextState apicontainerstatus.ContainerStatus) dockerapi.DockerContainerMetadata { + transitionFunction, ok := engine.transitionFunctionMap()[nextState] + if !ok { + seelog.Criticalf("Task engine [%s]: unsupported desired state transition for container [%s]: %s", + task.Arn, container.Name, nextState.String()) + return dockerapi.DockerContainerMetadata{Error: &impossibleTransitionError{nextState}} + } + metadata := transitionFunction(task, container) + if metadata.Error != nil { + seelog.Infof("Task engine [%s]: error transitioning container [%s (Runtime ID: %s)] to [%s]: %v", + task.Arn, container.Name, container.GetRuntimeID(), nextState.String(), metadata.Error) + } else { + seelog.Debugf("Task engine [%s]: transitioned container [%s (Runtime ID: %s)] to [%s]", + task.Arn, container.Name, container.GetRuntimeID(), nextState.String()) + } + return metadata +} + +// transitionFunctionMap provides the logic for the simple state machine of the +// DockerTaskEngine. Each desired state maps to a function that can be called +// to try and move the task to that desired state. +func (engine *DockerTaskEngine) transitionFunctionMap() map[apicontainerstatus.ContainerStatus]transitionApplyFunc { + return engine.containerStatusToTransitionFunction +} + +type transitionApplyFunc (func(*apitask.Task, *apicontainer.Container) dockerapi.DockerContainerMetadata) + +// State is a function primarily meant for testing usage; it is explicitly not +// part of the TaskEngine interface and should not be relied upon. +// It returns an internal representation of the state of this DockerTaskEngine. +func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState { + return engine.state +} + +// Version returns the underlying docker version. +func (engine *DockerTaskEngine) Version() (string, error) { + return engine.client.Version(engine.ctx, dockerclient.VersionTimeout) +} + +func (engine *DockerTaskEngine) updateMetadataFile(task *apitask.Task, cont *apicontainer.DockerContainer) { + err := engine.metadataManager.Update(engine.ctx, cont.DockerID, task, cont.Container.Name) + if err != nil { + seelog.Errorf("Task engine [%s]: failed to update metadata file for container %s: %v", + task.Arn, cont.Container.Name, err) + } else { + cont.Container.SetMetadataFileUpdated() + seelog.Debugf("Task engine [%s]: updated metadata file for container %s", + task.Arn, cont.Container.Name) + } +} + +func getContainerHostIP(networkSettings *types.NetworkSettings) (string, bool) { + if networkSettings == nil { + return "", false + } else if networkSettings.IPAddress != "" { + return networkSettings.IPAddress, true + } else if len(networkSettings.Networks) > 0 { + for mode, network := range networkSettings.Networks { + if mode == apitask.BridgeNetworkMode && network.IPAddress != "" { + return network.IPAddress, true + } + } + } + return "", false +} + +func (engine *DockerTaskEngine) getDockerID(task *apitask.Task, container *apicontainer.Container) (string, error) { + runtimeID := container.GetRuntimeID() + if runtimeID != "" { + return runtimeID, nil + } + containerMap, ok := engine.state.ContainerMapByArn(task.Arn) + if !ok { + return "", errors.Errorf("container name=%s belongs to unrecognized task taskArn=%s", container.Name, task.Arn) + } + + dockerContainer, ok := containerMap[container.Name] + if !ok { + return "", errors.Errorf("container name=%s not recognized by agent", container.Name) + } + + if dockerContainer.DockerID == "" { + return dockerContainer.DockerName, nil + } + return dockerContainer.DockerID, nil +} diff --git a/agent/engine/docker_task_engine_linux_test.go b/agent/engine/docker_task_engine_linux_test.go new file mode 100644 index 00000000000..8fc411af503 --- /dev/null +++ b/agent/engine/docker_task_engine_linux_test.go @@ -0,0 +1,535 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package engine + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/testdata" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/mock_control" + "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" + "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + mock_ioutilwrapper "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper/mocks" + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" + + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + cgroupMountPath = "/sys/fs/cgroup" + + testTaskDefFamily = "testFamily" + testTaskDefVersion = "1" +) + +func init() { + defaultConfig = config.DefaultConfig() + defaultConfig.TaskCPUMemLimit.Value = config.ExplicitlyDisabled +} + +// TestResourceContainerProgression tests the container progression based on a +// resource dependency +func TestResourceContainerProgression(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + sleepTask := testdata.LoadTask("sleep5") + sleepContainer := sleepTask.Containers[0] + + sleepContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + sleepContainer.BuildResourceDependency("cgroup", resourcestatus.ResourceCreated, apicontainerstatus.ContainerPulled) + + mockControl := mock_control.NewMockControl(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + taskID, err := sleepTask.GetID() + assert.NoError(t, err) + cgroupMemoryPath := fmt.Sprintf("/sys/fs/cgroup/memory/ecs/%s/memory.use_hierarchy", taskID) + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + cgroupResource := cgroup.NewCgroupResource(sleepTask.Arn, mockControl, mockIO, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + + sleepTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + sleepTask.AddResource("cgroup", cgroupResource) + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + // containerEventsWG is used to force the test to wait until the container created and started + // events are processed + containerEventsWG := sync.WaitGroup{} + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + gomock.InOrder( + // Ensure that the resource is created first + mockControl.EXPECT().Exists(gomock.Any()).Return(false), + mockControl.EXPECT().Create(gomock.Any()).Return(nil, nil), + mockIO.EXPECT().WriteFile(cgroupMemoryPath, gomock.Any(), gomock.Any()).Return(nil), + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(), + client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil), + imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false), + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, containerName string, z time.Duration) { + assert.True(t, strings.Contains(containerName, sleepContainer.Name)) + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}), + // Next, the sleep container is started + client.EXPECT().StartContainer(gomock.Any(), containerID+":"+sleepContainer.Name, defaultConfig.ContainerStartTimeout).Do( + func(ctx interface{}, id string, timeout time.Duration) { + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}), + ) + + addTaskToEngine(t, ctx, taskEngine, sleepTask, mockTime, &containerEventsWG) + + cleanup := make(chan time.Time, 1) + mockTime.EXPECT().After(gomock.Any()).Return(cleanup).AnyTimes() + + // Simulate a container stop event from docker + eventStream <- dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: containerID + ":" + sleepContainer.Name, + ExitCode: aws.Int(exitCode), + }, + } + waitForStopEvents(t, taskEngine.StateChangeEvents(), true, false) +} + +func TestDeleteTask(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + mockControl := mock_control.NewMockControl(ctrl) + cgroupResource := cgroup.NewCgroupResource("", mockControl, nil, "cgroupRoot", "", specs.LinuxResources{}) + task := &apitask.Task{ + Arn: testTaskARN, + ENIs: []*apieni.ENI{ + { + MacAddress: mac, + }, + }, + } + task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + task.AddResource("cgroup", cgroupResource) + require.NoError(t, dataClient.SaveTask(task)) + + cfg := defaultConfig + cfg.TaskCPUMemLimit.Value = config.ExplicitlyEnabled + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + + taskEngine := &DockerTaskEngine{ + state: mockState, + dataClient: dataClient, + cfg: &cfg, + } + + attachment := &apieni.ENIAttachment{ + TaskARN: "TaskARN", + AttachmentARN: testAttachmentArn, + MACAddress: "MACAddress", + Status: apieni.ENIAttachmentNone, + AttachStatusSent: true, + } + + gomock.InOrder( + mockControl.EXPECT().Remove("cgroupRoot").Return(nil), + mockState.EXPECT().RemoveTask(task), + mockState.EXPECT().ENIByMac(gomock.Any()).Return(attachment, true), + mockState.EXPECT().RemoveENIAttachment(mac), + ) + + assert.NoError(t, taskEngine.dataClient.SaveENIAttachment(attachment)) + attachments, err := taskEngine.dataClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, attachments, 1) + + taskEngine.deleteTask(task) + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, tasks, 0) + attachments, err = taskEngine.dataClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, attachments, 0) +} + +func TestDeleteTaskBranchENIEnabled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockControl := mock_control.NewMockControl(ctrl) + cgroupResource := cgroup.NewCgroupResource("", mockControl, nil, "cgroupRoot", "", specs.LinuxResources{}) + task := &apitask.Task{ + Arn: testTaskARN, + ENIs: []*apieni.ENI{ + { + MacAddress: mac, + InterfaceAssociationProtocol: apieni.VLANInterfaceAssociationProtocol, + }, + }, + } + task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + task.AddResource("cgroup", cgroupResource) + cfg := defaultConfig + cfg.TaskCPUMemLimit.Value = config.ExplicitlyEnabled + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + + taskEngine := &DockerTaskEngine{ + state: mockState, + cfg: &cfg, + dataClient: data.NewNoopClient(), + } + + gomock.InOrder( + mockControl.EXPECT().Remove("cgroupRoot").Return(nil), + mockState.EXPECT().RemoveTask(task), + ) + + taskEngine.deleteTask(task) +} + +// TestResourceContainerProgressionFailure ensures that task moves to STOPPED when +// resource creation fails +func TestResourceContainerProgressionFailure(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + sleepTask := testdata.LoadTask("sleep5") + sleepContainer := sleepTask.Containers[0] + + sleepContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + sleepContainer.BuildResourceDependency("cgroup", resourcestatus.ResourceCreated, apicontainerstatus.ContainerPulled) + + mockControl := mock_control.NewMockControl(ctrl) + taskID, err := sleepTask.GetID() + assert.NoError(t, err) + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + cgroupResource := cgroup.NewCgroupResource(sleepTask.Arn, mockControl, nil, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + + sleepTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + sleepTask.AddResource("cgroup", cgroupResource) + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + gomock.InOrder( + // resource creation failure + mockControl.EXPECT().Exists(gomock.Any()).Return(false), + mockControl.EXPECT().Create(gomock.Any()).Return(nil, errors.New("cgroup create error")), + ) + mockTime.EXPECT().Now().Return(time.Now()).AnyTimes() + + err = taskEngine.Init(ctx) + assert.NoError(t, err) + + taskEngine.AddTask(sleepTask) + cleanup := make(chan time.Time, 1) + mockTime.EXPECT().After(gomock.Any()).Return(cleanup).AnyTimes() + waitForStopEvents(t, taskEngine.StateChangeEvents(), true, false) +} + +func TestTaskCPULimitHappyPath(t *testing.T) { + testcases := []struct { + name string + metadataCreateError error + metadataUpdateError error + metadataCleanError error + taskCPULimit config.BooleanDefaultTrue + }{ + { + name: "Task CPU Limit Succeeds", + metadataCreateError: nil, + metadataUpdateError: nil, + metadataCleanError: nil, + taskCPULimit: config.BooleanDefaultTrue{Value: config.ExplicitlyEnabled}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + metadataConfig := defaultConfig + metadataConfig.TaskCPUMemLimit = tc.taskCPULimit + metadataConfig.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, metadataManager := mocks( + t, ctx, &metadataConfig) + defer ctrl.Finish() + + roleCredentials := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: credentials.IAMRoleCredentials{CredentialsID: "credsid"}, + } + credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(roleCredentials, true).AnyTimes() + credentialsManager.EXPECT().RemoveCredentials(credentialsID) + + sleepTask := testdata.LoadTask("sleep5") + sleepTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + sleepContainer := sleepTask.Containers[0] + sleepContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + sleepTask.SetCredentialsID(credentialsID) + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + // containerEventsWG is used to force the test to wait until the container created and started + // events are processed + containerEventsWG := sync.WaitGroup{} + + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + containerName := make(chan string) + go func() { + name := <-containerName + setCreatedContainerName(name) + }() + mockControl := mock_control.NewMockControl(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + taskID, err := sleepTask.GetID() + assert.NoError(t, err) + cgroupMemoryPath := fmt.Sprintf("/sys/fs/cgroup/memory/ecs/%s/memory.use_hierarchy", taskID) + if tc.taskCPULimit.Enabled() { + // TODO Currently, the resource Setup() method gets invoked multiple + // times for a task. This is really a bug and a fortunate occurrence + // that cgroup creation APIs behave idempotently. + // + // This should be modified so that 'Setup' is invoked exactly once + // by moving the cgroup creation to a "resource setup" step in the + // task life-cycle and performing the setup only in this stage + taskEngine.(*DockerTaskEngine).resourceFields = &taskresource.ResourceFields{ + Control: mockControl, + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + IOUtil: mockIO, + }, + } + mockControl.EXPECT().Exists(gomock.Any()).Return(false) + mockControl.EXPECT().Create(gomock.Any()).Return(nil, nil) + mockIO.EXPECT().WriteFile(cgroupMemoryPath, gomock.Any(), gomock.Any()).Return(nil) + } + + for _, container := range sleepTask.Containers { + validateContainerRunWorkflow(t, container, sleepTask, imageManager, + client, &roleCredentials, &containerEventsWG, + eventStream, containerName, func() { + metadataManager.EXPECT().Create(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).Return(tc.metadataCreateError) + metadataManager.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return(tc.metadataUpdateError) + }) + } + + client.EXPECT().Info(gomock.Any(), gomock.Any()).Return( + types.Info{}, nil) + addTaskToEngine(t, ctx, taskEngine, sleepTask, mockTime, &containerEventsWG) + cleanup := make(chan time.Time, 1) + defer close(cleanup) + mockTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1) + client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any()).AnyTimes() + // Simulate a container stop event from docker + eventStream <- dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: containerID, + ExitCode: aws.Int(exitCode), + }, + } + + // StopContainer might be invoked if the test execution is slow, during + // the cleanup phase. Account for that. + client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.DockerContainerMetadata{DockerID: containerID}).AnyTimes() + waitForStopEvents(t, taskEngine.StateChangeEvents(), true, false) + // This ensures that managedTask.waitForStopReported makes progress + sleepTask.SetSentStatus(apitaskstatus.TaskStopped) + // Extra events should not block forever; duplicate acs and docker events are possible + go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }() + go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }() + + sleepTaskStop := testdata.LoadTask("sleep5") + sleepTaskStop.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + sleepContainer = sleepTaskStop.Containers[0] + sleepContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + sleepTaskStop.SetCredentialsID(credentialsID) + sleepTaskStop.SetDesiredStatus(apitaskstatus.TaskStopped) + taskEngine.AddTask(sleepTaskStop) + // As above, duplicate events should not be a problem + taskEngine.AddTask(sleepTaskStop) + taskEngine.AddTask(sleepTaskStop) + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + if tc.taskCPULimit.Enabled() { + mockControl.EXPECT().Remove(cgroupRoot).Return(nil) + } + // Expect a bunch of steady state 'poll' describes when we trigger cleanup + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, removedContainerName string, timeout time.Duration) { + assert.Equal(t, containerID, removedContainerName, + "Container name mismatch") + }).Return(nil) + + imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any()) + metadataManager.EXPECT().Clean(gomock.Any()).Return(tc.metadataCleanError) + // trigger cleanup + cleanup <- time.Now() + go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }() + // Wait for the task to actually be dead; if we just fallthrough immediately, + // the remove might not have happened (expectation failure) + for { + tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks() + if len(tasks) == 0 { + break + } + time.Sleep(5 * time.Millisecond) + } + }) + } +} + +func TestCreateFirelensContainer(t *testing.T) { + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: logDriverTypeFirelens, + Config: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + } + + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + + getTask := func(firelensConfigType string) *apitask.Task { + task := &apitask.Task{ + Arn: testTaskARN, + Family: testTaskDefFamily, + Version: testTaskDefVersion, + Containers: []*apicontainer.Container{ + { + Name: "firelens", + FirelensConfig: &apicontainer.FirelensConfig{ + Type: firelensConfigType, + Options: map[string]string{ + "enable-ecs-log-metadata": "true", + "config-file-type": "s3", + "config-file-value": "arn:aws:s3:::bucket/key", + }, + }, + }, + { + Name: "logsender", + Secrets: []apicontainer.Secret{ + { + Name: "secret-name", + ValueFrom: "secret-value-from", + Provider: apicontainer.SecretProviderSSM, + Target: apicontainer.SecretTargetLogDriver, + Region: "us-west-2", + }, + }, + DockerConfig: apicontainer.DockerConfig{ + HostConfig: func(s string) *string { + return &s + }(string(rawHostConfig)), + }, + }, + }, + } + + task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + ssmRes := &ssmsecret.SSMSecretResource{} + ssmRes.SetCachedSecretValue("secret-value-from_us-west-2", "secret-val") + task.AddResource(ssmsecret.ResourceName, ssmRes) + return task + } + + testCases := []struct { + name string + task *apitask.Task + expectedGeneratedConfigBind string + expectedS3ConfigBind string + expectedSocketBind string + expectedLogOptionEnv string + }{ + { + name: "test create fluentd firelens container", + task: getTask(firelens.FirelensConfigTypeFluentd), + expectedGeneratedConfigBind: defaultConfig.DataDirOnHost + "/data/firelens/task-id/config/fluent.conf:/fluentd/etc/fluent.conf", + expectedS3ConfigBind: defaultConfig.DataDirOnHost + "/data/firelens/task-id/config/external.conf:/fluentd/etc/external.conf", + expectedSocketBind: defaultConfig.DataDirOnHost + "/data/firelens/task-id/socket/:/var/run/", + expectedLogOptionEnv: "secret-name_1=secret-val", + }, + { + name: "test create fluentbit firelens container", + task: getTask(firelens.FirelensConfigTypeFluentbit), + expectedGeneratedConfigBind: defaultConfig.DataDirOnHost + "/data/firelens/task-id/config/fluent.conf:/fluent-bit/etc/fluent-bit.conf", + expectedS3ConfigBind: defaultConfig.DataDirOnHost + "/data/firelens/task-id/config/external.conf:/fluent-bit/etc/external.conf", + expectedSocketBind: defaultConfig.DataDirOnHost + "/data/firelens/task-id/socket/:/var/run/", + expectedLogOptionEnv: "secret-name_1=secret-val", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockTime.EXPECT().Now().AnyTimes() + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx context.Context, + config *dockercontainer.Config, + hostConfig *dockercontainer.HostConfig, + name string, + timeout time.Duration) { + assert.Contains(t, hostConfig.Binds, tc.expectedGeneratedConfigBind) + assert.Contains(t, hostConfig.Binds, tc.expectedS3ConfigBind) + assert.Contains(t, hostConfig.Binds, tc.expectedSocketBind) + assert.Contains(t, config.Env, tc.expectedLogOptionEnv) + }) + ret := taskEngine.(*DockerTaskEngine).createContainer(tc.task, tc.task.Containers[0]) + assert.NoError(t, ret.Error) + }) + } +} diff --git a/agent/engine/docker_task_engine_test.go b/agent/engine/docker_task_engine_test.go new file mode 100644 index 00000000000..90651df3c27 --- /dev/null +++ b/agent/engine/docker_task_engine_test.go @@ -0,0 +1,3890 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/api/appmesh" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/asm" + mock_asm_factory "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks" + mock_secretsmanageriface "github.com/aws/amazon-ecs-agent/agent/asm/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + mock_containermetadata "github.com/aws/amazon-ecs-agent/agent/containermetadata/mocks" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + mock_ecscni "github.com/aws/amazon-ecs-agent/agent/ecscni/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + mock_execcmdagent "github.com/aws/amazon-ecs-agent/agent/engine/execcmd/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/testdata" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + mock_ssm_factory "github.com/aws/amazon-ecs-agent/agent/ssm/factory/mocks" + mock_ssmiface "github.com/aws/amazon-ecs-agent/agent/ssm/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth" + "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret" + mock_taskresource "github.com/aws/amazon-ecs-agent/agent/taskresource/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + mock_ttime "github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + credentialsID = "credsid" + ipv4 = "10.0.0.1" + gatewayIPv4 = "10.0.0.2/20" + mac = "1.2.3.4" + ipv6 = "f0:234:23" + dockerContainerName = "docker-container-name" + containerPid = 123 + taskIP = "169.254.170.3" + exitCode = 1 + labelsTaskARN = "arn:aws:ecs:us-east-1:012345678910:task/c09f0188-7f87-4b0f-bfc3-16296622b6fe" + taskSteadyStatePollInterval = time.Millisecond + secretID = "meaning-of-life" + region = "us-west-2" + username = "irene" + password = "sher" + ignoredUID = "1337" + proxyIngressPort = "15000" + proxyEgressPort = "15001" + appPort = "9000" + egressIgnoredIP = "169.254.169.254" + expectedDelaySeconds = 10 + expectedDelay = expectedDelaySeconds * time.Second + networkBridgeIP = "bridgeIP" + networkModeBridge = "bridge" + networkModeAWSVPC = "awsvpc" + testTaskARN = "arn:aws:ecs:region:account-id:task/task-id" +) + +var ( + defaultConfig config.Config + nsResult = mockSetupNSResult() + + mockENI = &apieni.ENI{ + ID: "eni-id", + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Primary: true, + Address: ipv4, + }, + }, + MacAddress: mac, + IPV6Addresses: []*apieni.ENIIPV6Address{ + { + Address: ipv6, + }, + }, + SubnetGatewayIPV4Address: gatewayIPv4, + } + + // createdContainerName is used to save the name of the created + // container from the validateContainerRunWorkflow method. This + // variable should never be accessed directly. + // The `getCreatedContainerName` and `setCreatedContainerName` + // methods should be used instead. + createdContainerName string + // createdContainerNameLock guards access to the createdContainerName + // var. + createdContainerNameLock sync.Mutex +) + +func init() { + defaultConfig = config.DefaultConfig() + defaultConfig.TaskCPUMemLimit.Value = config.ExplicitlyDisabled +} + +func getCreatedContainerName() string { + createdContainerNameLock.Lock() + defer createdContainerNameLock.Unlock() + + return createdContainerName +} + +func setCreatedContainerName(name string) { + createdContainerNameLock.Lock() + defer createdContainerNameLock.Unlock() + + createdContainerName = name +} + +func mocks(t *testing.T, ctx context.Context, cfg *config.Config) (*gomock.Controller, + *mock_dockerapi.MockDockerClient, *mock_ttime.MockTime, TaskEngine, + *mock_credentials.MockManager, *mock_engine.MockImageManager, *mock_containermetadata.MockManager) { + ctrl := gomock.NewController(t) + client := mock_dockerapi.NewMockDockerClient(ctrl) + mockTime := mock_ttime.NewMockTime(ctrl) + credentialsManager := mock_credentials.NewMockManager(ctrl) + + containerChangeEventStream := eventstream.NewEventStream("TESTTASKENGINE", ctx) + containerChangeEventStream.StartListening() + imageManager := mock_engine.NewMockImageManager(ctrl) + metadataManager := mock_containermetadata.NewMockManager(ctrl) + execCmdMgr := mock_execcmdagent.NewMockManager(ctrl) + + taskEngine := NewTaskEngine(cfg, client, credentialsManager, containerChangeEventStream, + imageManager, dockerstate.NewTaskEngineState(), metadataManager, nil, execCmdMgr) + taskEngine.(*DockerTaskEngine)._time = mockTime + taskEngine.(*DockerTaskEngine).ctx = ctx + + return ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, metadataManager +} + +func mockSetupNSResult() *current.Result { + _, ip, _ := net.ParseCIDR(taskIP + "/32") + return ¤t.Result{ + IPs: []*current.IPConfig{ + { + Address: *ip, + }, + }, + } +} + +func TestBatchContainerHappyPath(t *testing.T) { + testcases := []struct { + name string + metadataCreateError error + metadataUpdateError error + metadataCleanError error + taskCPULimit config.Conditional + execCommandAgentEnabled bool + }{ + { + name: "Metadata Manager Succeeds", + metadataCreateError: nil, + metadataUpdateError: nil, + metadataCleanError: nil, + taskCPULimit: config.ExplicitlyDisabled, + }, + { + name: "ExecCommandAgent is started", + metadataCreateError: nil, + metadataUpdateError: nil, + metadataCleanError: nil, + taskCPULimit: config.ExplicitlyDisabled, + execCommandAgentEnabled: true, + }, + { + name: "Metadata Manager Fails to Create, Update and Cleanup", + metadataCreateError: errors.New("create metadata error"), + metadataUpdateError: errors.New("update metadata error"), + metadataCleanError: errors.New("clean metadata error"), + taskCPULimit: config.ExplicitlyDisabled, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + metadataConfig := defaultConfig + metadataConfig.TaskCPUMemLimit.Value = tc.taskCPULimit + metadataConfig.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, metadataManager := mocks( + t, ctx, &metadataConfig) + execCmdMgr := mock_execcmdagent.NewMockManager(ctrl) + taskEngine.(*DockerTaskEngine).execCmdMgr = execCmdMgr + defer ctrl.Finish() + + roleCredentials := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: credentials.IAMRoleCredentials{CredentialsID: "credsid"}, + } + credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(roleCredentials, true).AnyTimes() + credentialsManager.EXPECT().RemoveCredentials(credentialsID) + + sleepTask := testdata.LoadTask("sleep5") + if tc.execCommandAgentEnabled && len(sleepTask.Containers) > 0 { + enableExecCommandAgentForContainer(sleepTask.Containers[0], apicontainer.ManagedAgentState{}) + } + sleepTask.SetCredentialsID(credentialsID) + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + // containerEventsWG is used to force the test to wait until the container created and started + // events are processed + containerEventsWG := sync.WaitGroup{} + + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + containerName := make(chan string) + go func() { + name := <-containerName + setCreatedContainerName(name) + }() + + for _, container := range sleepTask.Containers { + validateContainerRunWorkflow(t, container, sleepTask, imageManager, + client, &roleCredentials, &containerEventsWG, + eventStream, containerName, func() { + metadataManager.EXPECT().Create(gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any()).Return(tc.metadataCreateError) + metadataManager.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return(tc.metadataUpdateError) + + if tc.execCommandAgentEnabled { + execCmdMgr.EXPECT().InitializeContainer(gomock.Any(), container, gomock.Any()).Times(1) + // TODO: [ecs-exec] validate call control plane to report ExecCommandAgent SUCCESS/FAIL here + execCmdMgr.EXPECT().StartAgent(gomock.Any(), client, sleepTask, sleepTask.Containers[0], containerID) + } + }) + } + + client.EXPECT().Info(gomock.Any(), gomock.Any()).Return( + types.Info{}, nil) + addTaskToEngine(t, ctx, taskEngine, sleepTask, mockTime, &containerEventsWG) + cleanup := make(chan time.Time, 1) + defer close(cleanup) + mockTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1) + client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any()).AnyTimes() + // Simulate a container stop event from docker + eventStream <- dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: containerID, + ExitCode: aws.Int(exitCode), + }, + } + + // StopContainer might be invoked if the test execution is slow, during + // the cleanup phase. Account for that. + client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.DockerContainerMetadata{DockerID: containerID}).AnyTimes() + waitForStopEvents(t, taskEngine.StateChangeEvents(), true, tc.execCommandAgentEnabled) + // This ensures that managedTask.waitForStopReported makes progress + sleepTask.SetSentStatus(apitaskstatus.TaskStopped) + // Extra events should not block forever; duplicate acs and docker events are possible + go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }() + go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }() + + sleepTaskStop := testdata.LoadTask("sleep5") + + sleepTaskStop.SetCredentialsID(credentialsID) + sleepTaskStop.SetDesiredStatus(apitaskstatus.TaskStopped) + taskEngine.AddTask(sleepTaskStop) + // As above, duplicate events should not be a problem + taskEngine.AddTask(sleepTaskStop) + taskEngine.AddTask(sleepTaskStop) + // Expect a bunch of steady state 'poll' describes when we trigger cleanup + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, removedContainerName string, timeout time.Duration) { + assert.Equal(t, containerID, removedContainerName, + "Container name mismatch") + }).Return(nil) + + imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any()) + metadataManager.EXPECT().Clean(gomock.Any()).Return(tc.metadataCleanError) + // trigger cleanup + cleanup <- time.Now() + go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) }() + // Wait for the task to actually be dead; if we just fallthrough immediately, + // the remove might not have happened (expectation failure) + for { + tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks() + if len(tasks) == 0 { + break + } + time.Sleep(5 * time.Millisecond) + } + }) + } +} + +// TestTaskWithSteadyStateResourcesProvisioned tests container and task transitions +// when the steady state for the pause container is set to RESOURCES_PROVISIONED and +// the steady state for the normal container is set to RUNNING +func TestTaskWithSteadyStateResourcesProvisioned(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl) + taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient + // sleep5 contains a single 'sleep' container, with DesiredStatus == RUNNING + sleepTask := testdata.LoadTask("sleep5") + sleepContainer := sleepTask.Containers[0] + sleepContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + sleepContainer.BuildContainerDependency("pause", apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerPulled) + // Add a second container with DesiredStatus == RESOURCES_PROVISIONED and + // steadyState == RESOURCES_PROVISIONED + pauseContainer := apicontainer.NewContainerWithSteadyState(apicontainerstatus.ContainerResourcesProvisioned) + pauseContainer.Name = "pause" + pauseContainer.Image = "pause" + pauseContainer.CPU = 10 + pauseContainer.Memory = 10 + pauseContainer.Essential = true + pauseContainer.Type = apicontainer.ContainerCNIPause + sleepTask.Containers = append(sleepTask.Containers, pauseContainer) + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + // containerEventsWG is used to force the test to wait until the container created and started + // events are processed + containerEventsWG := sync.WaitGroup{} + + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + // We cannot rely on the order of pulls between images as they can still be downloaded in + // parallel. The dependency graph enforcement comes into effect for CREATED transitions. + // Hence, do not enforce the order of invocation of these calls + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes() + client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}) + imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil) + imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false) + + gomock.InOrder( + // Ensure that the pause container is created first + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, containerName string, z time.Duration) { + sleepTask.AddTaskENI(mockENI) + sleepTask.SetAppMesh(&appmesh.AppMesh{ + IgnoredUID: ignoredUID, + ProxyIngressPort: proxyIngressPort, + ProxyEgressPort: proxyEgressPort, + AppPorts: []string{ + appPort, + }, + EgressIgnoredIPs: []string{ + egressIgnoredIP, + }, + }) + assert.Equal(t, "none", string(hostConfig.NetworkMode)) + assert.True(t, strings.Contains(containerName, pauseContainer.Name)) + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + pauseContainer.Name}), + // Ensure that the pause container is started after it's created + client.EXPECT().StartContainer(gomock.Any(), containerID+":"+pauseContainer.Name, defaultConfig.ContainerStartTimeout).Do( + func(ctx interface{}, id string, timeout time.Duration) { + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + pauseContainer.Name}), + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: 23}, + }, + }, nil), + // Then setting up the pause container network namespace + mockCNIClient.EXPECT().SetupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nsResult, nil), + + // Once the pause container is started, sleep container will be created + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, containerName string, z time.Duration) { + assert.True(t, strings.Contains(containerName, sleepContainer.Name)) + assert.Equal(t, "container:"+containerID+":"+pauseContainer.Name, string(hostConfig.NetworkMode)) + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}), + // Next, the sleep container is started + client.EXPECT().StartContainer(gomock.Any(), containerID+":"+sleepContainer.Name, defaultConfig.ContainerStartTimeout).Do( + func(ctx interface{}, id string, timeout time.Duration) { + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}), + ) + + addTaskToEngine(t, ctx, taskEngine, sleepTask, mockTime, &containerEventsWG) + taskARNByIP, ok := taskEngine.(*DockerTaskEngine).state.GetTaskByIPAddress(taskIP) + assert.True(t, ok) + assert.Equal(t, sleepTask.Arn, taskARNByIP) + cleanup := make(chan time.Time, 1) + mockTime.EXPECT().After(gomock.Any()).Return(cleanup).AnyTimes() + client.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return( + &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: 23}, + }, + }, nil) + mockCNIClient.EXPECT().CleanupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + client.EXPECT().StopContainer(gomock.Any(), containerID+":"+pauseContainer.Name, gomock.Any()).MinTimes(1) + mockCNIClient.EXPECT().ReleaseIPResource(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1) + + // Simulate a container stop event from docker + eventStream <- dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: containerID + ":" + sleepContainer.Name, + ExitCode: aws.Int(exitCode), + }, + } + waitForStopEvents(t, taskEngine.StateChangeEvents(), true, false) +} + +// TestRemoveEvents tests if the task engine can handle task events while the task is being +// cleaned up. This test ensures that there's no regression in the task engine and ensures +// there's no deadlock as seen in #313 +func TestRemoveEvents(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + sleepTask := testdata.LoadTask("sleep5") + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + // containerEventsWG is used to force the test to wait until the container created and started + // events are processed + containerEventsWG := sync.WaitGroup{} + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + containerName := make(chan string) + go func() { + name := <-containerName + setCreatedContainerName(name) + }() + + for _, container := range sleepTask.Containers { + validateContainerRunWorkflow(t, container, sleepTask, imageManager, + client, nil, &containerEventsWG, + eventStream, containerName, func() { + }) + } + + addTaskToEngine(t, ctx, taskEngine, sleepTask, mockTime, &containerEventsWG) + cleanup := make(chan time.Time, 1) + defer close(cleanup) + mockTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1) + client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any()).AnyTimes() + + // Simulate a container stop event from docker + eventStream <- dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: containerID, + ExitCode: aws.Int(exitCode), + }, + } + + waitForStopEvents(t, taskEngine.StateChangeEvents(), true, false) + sleepTaskStop := testdata.LoadTask("sleep5") + sleepTaskStop.SetDesiredStatus(apitaskstatus.TaskStopped) + taskEngine.AddTask(sleepTaskStop) + + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, removedContainerName string, timeout time.Duration) { + assert.Equal(t, containerID, removedContainerName, + "Container name mismatch") + + // Emit a couple of events for the task before cleanup finishes. This forces + // discardEventsUntil to be invoked and should test the code path that + // caused the deadlock, which was fixed with #320 + eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) + eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) + }).Return(nil) + + imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any()) + + // This ensures that managedTask.waitForStopReported makes progress + sleepTask.SetSentStatus(apitaskstatus.TaskStopped) + // trigger cleanup + cleanup <- time.Now() + // Wait for the task to actually be dead; if we just fallthrough immediately, + // the remove might not have happened (expectation failure) + for { + tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks() + if len(tasks) == 0 { + break + } + time.Sleep(5 * time.Millisecond) + } +} + +func TestStartTimeoutThenStart(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, testTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + sleepTask := testdata.LoadTask("sleep5") + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + testTime.EXPECT().Now().Return(time.Now()).AnyTimes() + testTime.EXPECT().After(gomock.Any()) + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil) + for _, container := range sleepTask.Containers { + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes() + client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}) + + imageManager.EXPECT().RecordContainerReference(container) + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false) + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, x, y, z, timeout interface{}) { + go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}) + + client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Return(dockerapi.DockerContainerMetadata{ + Error: &dockerapi.DockerTimeoutError{}, + }) + } + + // Start timeout triggers a container stop as we force stop containers + // when startcontainer times out. See #1043 for details + client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(dockerapi.DockerContainerMetadata{ + Error: dockerapi.CannotStartContainerError{fmt.Errorf("cannot start container")}, + }).AnyTimes() + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + stateChangeEvents := taskEngine.StateChangeEvents() + taskEngine.AddTask(sleepTask) + waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false) + + // Now surprise surprise, it actually did start! + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + // However, if it starts again, we should not see it be killed; no additional expect + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + + select { + case <-stateChangeEvents: + t.Fatal("Should be out of events") + default: + } +} + +func TestSteadyStatePoll(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, testTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + taskEngine.(*DockerTaskEngine).taskSteadyStatePollInterval = taskSteadyStatePollInterval + containerEventsWG := sync.WaitGroup{} + sleepTask := testdata.LoadTask("sleep5") + sleepTask.Arn = uuid.New() + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + containerName := make(chan string) + go func() { + <-containerName + }() + + // set up expectations for each container in the task calling create + start + for _, container := range sleepTask.Containers { + validateContainerRunWorkflow(t, container, sleepTask, imageManager, + client, nil, &containerEventsWG, + eventStream, containerName, func() { + }) + } + testTime.EXPECT().Now().Return(time.Now()).MinTimes(1) + + var wg sync.WaitGroup + wg.Add(1) + + client.EXPECT().DescribeContainer(gomock.Any(), containerID).Return( + apicontainerstatus.ContainerStopped, + dockerapi.DockerContainerMetadata{ + DockerID: containerID, + }).Do(func(ctx interface{}, x interface{}) { + wg.Done() + }) + client.EXPECT().DescribeContainer(gomock.Any(), containerID).Return( + apicontainerstatus.ContainerStopped, + dockerapi.DockerContainerMetadata{ + DockerID: containerID, + }).AnyTimes() + client.EXPECT().StopContainer(gomock.Any(), containerID, 30*time.Second).AnyTimes() + + err := taskEngine.Init(ctx) // start the task engine + assert.NoError(t, err) + taskEngine.AddTask(sleepTask) // actually add the task we created + waitForRunningEvents(t, taskEngine.StateChangeEvents()) + containerMap, ok := taskEngine.(*DockerTaskEngine).State().ContainerMapByArn(sleepTask.Arn) + assert.True(t, ok) + dockerContainer, ok := containerMap[sleepTask.Containers[0].Name] + assert.True(t, ok) + + // Wait for container create and start events to be processed + containerEventsWG.Wait() + wg.Wait() + + cleanup := make(chan time.Time) + defer close(cleanup) + testTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1) + client.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerID, dockerclient.RemoveContainerTimeout).Return(nil) + imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any()).Return(nil) + + waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false) + // trigger cleanup, this ensures all the goroutines were finished + sleepTask.SetSentStatus(apitaskstatus.TaskStopped) + cleanup <- time.Now() + for { + tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks() + if len(tasks) == 0 { + break + } + time.Sleep(5 * time.Millisecond) + } +} + +func TestStopWithPendingStops(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, testTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + testTime.EXPECT().Now().Return(time.Now()).AnyTimes() + testTime.EXPECT().After(gomock.Any()).AnyTimes() + + sleepTask1 := testdata.LoadTask("sleep5") + sleepTask1.StartSequenceNumber = 5 + sleepTask2 := testdata.LoadTask("sleep5") + sleepTask2.Arn = "arn2" + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + err := taskEngine.Init(ctx) + assert.NoError(t, err) + stateChangeEvents := taskEngine.StateChangeEvents() + + defer discardEvents(stateChangeEvents)() + + pullDone := make(chan bool) + pullInvoked := make(chan bool) + client.EXPECT().PullImage(gomock.Any(), gomock.Any(), nil, gomock.Any()).Do(func(w, x, y, z interface{}) { + pullInvoked <- true + <-pullDone + }).MaxTimes(2) + + imageManager.EXPECT().RecordContainerReference(gomock.Any()).AnyTimes() + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).AnyTimes() + + taskEngine.AddTask(sleepTask2) + <-pullInvoked + stopSleep2 := testdata.LoadTask("sleep5") + stopSleep2.Arn = "arn2" + stopSleep2.SetDesiredStatus(apitaskstatus.TaskStopped) + stopSleep2.StopSequenceNumber = 4 + taskEngine.AddTask(stopSleep2) + + taskEngine.AddTask(sleepTask1) + stopSleep1 := testdata.LoadTask("sleep5") + stopSleep1.SetDesiredStatus(apitaskstatus.TaskStopped) + stopSleep1.StopSequenceNumber = 5 + taskEngine.AddTask(stopSleep1) + pullDone <- true + // this means the PullImage is only called once due to the task is stopped before it + // gets the pull image lock +} + +func TestCreateContainerSaveDockerIDAndName(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + taskEngine.SetDataClient(dataClient) + + sleepTask := testdata.LoadTask("sleep5") + sleepTask.Arn = testTaskARN + sleepContainer, _ := sleepTask.ContainerByName("sleep5") + sleepContainer.TaskARNUnsafe = testTaskARN + + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.DockerContainerMetadata{ + DockerID: testDockerID, + }) + metadata := taskEngine.createContainer(sleepTask, sleepContainer) + require.NoError(t, metadata.Error) + + containers, err := dataClient.GetContainers() + require.NoError(t, err) + require.Len(t, containers, 1) + assert.Equal(t, testDockerID, containers[0].DockerID) + assert.Contains(t, containers[0].DockerName, sleepContainer.Name) +} + +func TestCreateContainerMetadata(t *testing.T) { + testcases := []struct { + name string + info types.Info + error error + }{ + { + name: "Selinux Security Option", + info: types.Info{SecurityOptions: []string{"selinux"}}, + error: nil, + }, + { + name: "Docker Info Error", + info: types.Info{}, + error: errors.New("Error getting docker info"), + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, _, metadataManager := mocks(t, ctx, &config.Config{}) + defer ctrl.Finish() + + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + taskEngine.cfg.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + + sleepTask := testdata.LoadTask("sleep5") + sleepContainer, _ := sleepTask.ContainerByName("sleep5") + + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil) + client.EXPECT().Info(ctx, dockerclient.InfoTimeout).Return(tc.info, tc.error) + metadataManager.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), tc.info.SecurityOptions) + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + + metadata := taskEngine.createContainer(sleepTask, sleepContainer) + assert.NoError(t, metadata.Error) + }) + } +} + +func TestCreateContainerMergesLabels(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + testTask := &apitask.Task{ + Arn: labelsTaskARN, + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "c1", + DockerConfig: apicontainer.DockerConfig{ + Config: aws.String(`{"Labels":{"key":"value"}}`), + }, + }, + }, + } + expectedConfig, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if err != nil { + t.Fatal(err) + } + expectedConfig.Labels = map[string]string{ + "com.amazonaws.ecs.task-arn": labelsTaskARN, + "com.amazonaws.ecs.container-name": "c1", + "com.amazonaws.ecs.task-definition-family": "myFamily", + "com.amazonaws.ecs.task-definition-version": "1", + "com.amazonaws.ecs.cluster": "", + "key": "value", + } + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + client.EXPECT().CreateContainer(gomock.Any(), expectedConfig, gomock.Any(), gomock.Any(), gomock.Any()) + taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0]) +} + +// TestCreateContainerAddV3EndpointIDToState tests that in createContainer, when the +// container's v3 endpoint id is set, we will add mappings to engine state +func TestCreateContainerAddV3EndpointIDToState(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + + testContainer := &apicontainer.Container{ + Name: "c1", + V3EndpointID: "v3EndpointID", + } + + testTask := &apitask.Task{ + Arn: "myTaskArn", + Family: "myFamily", + Version: "1", + Containers: []*apicontainer.Container{ + testContainer, + }, + } + + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + // V3EndpointID mappings are only added to state when dockerID is available. So return one here. + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerapi.DockerContainerMetadata{ + DockerID: "dockerID", + }) + taskEngine.createContainer(testTask, testContainer) + + // check that we have added v3 endpoint mappings to state + state := taskEngine.state + + addedTaskARN, ok := state.TaskARNByV3EndpointID("v3EndpointID") + assert.True(t, ok) + assert.Equal(t, testTask.Arn, addedTaskARN) + + addedDockerID, ok := state.DockerIDByV3EndpointID("v3EndpointID") + assert.True(t, ok) + assert.Equal(t, "dockerID", addedDockerID) +} + +// TestTaskTransitionWhenStopContainerTimesout tests that task transitions to stopped +// only when terminal events are received from docker event stream when +// StopContainer times out +func TestTaskTransitionWhenStopContainerTimesout(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + sleepTask := testdata.LoadTask("sleep5") + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + mockTime.EXPECT().Now().Return(time.Now()).AnyTimes() + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + containerStopTimeoutError := dockerapi.DockerContainerMetadata{ + Error: &dockerapi.DockerTimeoutError{ + Transition: "stop", + Duration: 30 * time.Second, + }, + } + dockerEventSent := make(chan int) + for _, container := range sleepTask.Containers { + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes() + client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}) + imageManager.EXPECT().RecordContainerReference(container) + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false) + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil) + + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, x, y, z, timeout interface{}) { + go func() { eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}) + + gomock.InOrder( + client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Do( + func(ctx interface{}, id string, timeout time.Duration) { + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}), + + // StopContainer times out + client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(containerStopTimeoutError), + // Since task is not in steady state, progressContainers causes + // another invocation of StopContainer. Return a timeout error + // for that as well. + client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Do( + func(ctx interface{}, id string, timeout time.Duration) { + go func() { + dockerEventSent <- 1 + // Emit 'ContainerStopped' event to the container event stream + // This should cause the container and the task to transition + // to 'STOPPED' + eventStream <- createDockerEvent(apicontainerstatus.ContainerStopped) + }() + }).Return(containerStopTimeoutError).MinTimes(1), + ) + } + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + stateChangeEvents := taskEngine.StateChangeEvents() + + go taskEngine.AddTask(sleepTask) + // wait for task running + waitForRunningEvents(t, taskEngine.StateChangeEvents()) + // Set the task desired status to be stopped and StopContainer will be called + updateSleepTask := testdata.LoadTask("sleep5") + updateSleepTask.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(updateSleepTask) + + // StopContainer timeout error shouldn't cause cantainer/task status change + // until receive stop event from docker event stream + select { + case <-stateChangeEvents: + t.Error("Should not get task events") + case <-dockerEventSent: + t.Logf("Send docker stop event") + go func() { + for { + select { + case <-dockerEventSent: + case <-ctx.Done(): + return + } + } + }() + } + + // StopContainer was called again and received stop event from docker event stream + // Expect it to go to stopped + waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false) +} + +// TestTaskTransitionWhenStopContainerReturnsUnretriableError tests if the task transitions +// to stopped without retrying stopping the container in the task when the initial +// stop container call returns an unretriable error from docker, specifically the +// NoSuchContainer error +func TestTaskTransitionWhenStopContainerReturnsUnretriableError(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + sleepTask := testdata.LoadTask("sleep5") + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + mockTime.EXPECT().Now().Return(time.Now()).AnyTimes() + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + containerEventsWG := sync.WaitGroup{} + for _, container := range sleepTask.Containers { + gomock.InOrder( + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(), + client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + imageManager.EXPECT().RecordContainerReference(container), + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false), + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), + // Simulate successful create container + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, x, y, z, timeout interface{}) { + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}), + // Simulate successful start container + client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Do( + func(ctx interface{}, id string, timeout time.Duration) { + containerEventsWG.Add(1) + go func() { + eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning) + containerEventsWG.Done() + }() + }).Return(dockerapi.DockerContainerMetadata{DockerID: containerID}), + // StopContainer errors out. However, since this is a known unretriable error, + // the task engine should not retry stopping the container and move on. + // If there's a delay in task engine's processing of the ContainerRunning + // event, StopContainer will be invoked again as the engine considers it + // as a stopped container coming back. MinTimes() should guarantee that + // StopContainer is invoked at least once and in protecting agasint a test + // failure when there's a delay in task engine processing the ContainerRunning + // event. + client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(dockerapi.DockerContainerMetadata{ + Error: dockerapi.CannotStopContainerError{dockerapi.NoSuchContainerError{}}, + }).MinTimes(1), + ) + } + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + go taskEngine.AddTask(sleepTask) + // wait for task running + waitForRunningEvents(t, taskEngine.StateChangeEvents()) + containerEventsWG.Wait() + // Set the task desired status to be stopped and StopContainer will be called + updateSleepTask := testdata.LoadTask("sleep5") + updateSleepTask.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(updateSleepTask) + // StopContainer was called again and received stop event from docker event stream + // Expect it to go to stopped + waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false) +} + +// TestTaskTransitionWhenStopContainerReturnsTransientErrorBeforeSucceeding tests if the task +// transitions to stopped only after receiving the container stopped event from docker when +// the initial stop container call fails with an unknown error. +func TestTaskTransitionWhenStopContainerReturnsTransientErrorBeforeSucceeding(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + sleepTask := testdata.LoadTask("sleep5") + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + mockTime.EXPECT().Now().Return(time.Now()).AnyTimes() + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + containerStoppingError := dockerapi.DockerContainerMetadata{ + Error: dockerapi.CannotStopContainerError{errors.New("Error stopping container")}, + } + for _, container := range sleepTask.Containers { + gomock.InOrder( + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(), + client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + imageManager.EXPECT().RecordContainerReference(container), + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false), + // Simulate successful create container + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.DockerContainerMetadata{DockerID: containerID}), + // Simulate successful start container + client.EXPECT().StartContainer(gomock.Any(), containerID, defaultConfig.ContainerStartTimeout).Return( + dockerapi.DockerContainerMetadata{DockerID: containerID}), + // StopContainer errors out a couple of times + client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(containerStoppingError).Times(2), + // Since task is not in steady state, progressContainers causes + // another invocation of StopContainer. Return the 'succeed' response, + // which should cause the task engine to stop invoking this again and + // transition the task to stopped. + client.EXPECT().StopContainer(gomock.Any(), containerID, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + ) + } + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + go taskEngine.AddTask(sleepTask) + // wait for task running + waitForRunningEvents(t, taskEngine.StateChangeEvents()) + // Set the task desired status to be stopped and StopContainer will be called + updateSleepTask := testdata.LoadTask("sleep5") + updateSleepTask.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(updateSleepTask) + // StopContainer invocation should have caused it to stop eventually. + waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false) +} + +func TestGetTaskByArn(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + // Need a mock client as AddTask not only adds a task to the engine, but + // also causes the engine to progress the task. + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockTime.EXPECT().Now().Return(time.Now()).AnyTimes() + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes() + imageManager.EXPECT().RecordContainerReference(gomock.Any()).AnyTimes() + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).AnyTimes() + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + defer taskEngine.Disable() + sleepTask := testdata.LoadTask("sleep5") + sleepTask.SetDesiredStatus(apitaskstatus.TaskStopped) + sleepTaskArn := sleepTask.Arn + sleepTask.SetDesiredStatus(apitaskstatus.TaskStopped) + taskEngine.AddTask(sleepTask) + + _, found := taskEngine.GetTaskByArn(sleepTaskArn) + assert.True(t, found, "Task %s not found", sleepTaskArn) + + _, found = taskEngine.GetTaskByArn(sleepTaskArn + "arn") + assert.False(t, found, "Task with invalid arn found in the task engine") +} + +func TestPauseContainerHappyPath(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, dockerClient, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + cniClient := mock_ecscni.NewMockCNIClient(ctrl) + taskEngine.(*DockerTaskEngine).cniClient = cniClient + taskEngine.(*DockerTaskEngine).taskSteadyStatePollInterval = taskSteadyStatePollInterval + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + sleepTask := testdata.LoadTask("sleep5TwoContainers") + sleepContainer1 := sleepTask.Containers[0] + sleepContainer1.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + sleepContainer2 := sleepTask.Containers[1] + sleepContainer2.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + + // Add eni information to the task so the task can add dependency of pause container + sleepTask.AddTaskENI(mockENI) + + sleepTask.SetAppMesh(&appmesh.AppMesh{ + IgnoredUID: ignoredUID, + ProxyIngressPort: proxyIngressPort, + ProxyEgressPort: proxyEgressPort, + AppPorts: []string{ + appPort, + }, + EgressIgnoredIPs: []string{ + egressIgnoredIP, + }, + }) + + dockerClient.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + + sleepContainerID1 := containerID + "1" + sleepContainerID2 := containerID + "2" + pauseContainerID := "pauseContainerID" + // Pause container will be launched first + gomock.InOrder( + dockerClient.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), + dockerClient.EXPECT().CreateContainer( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, config *dockercontainer.Config, x, y, z interface{}) { + name, ok := config.Labels[labelPrefix+"container-name"] + assert.True(t, ok) + assert.Equal(t, apitask.NetworkPauseContainerName, name) + }).Return(dockerapi.DockerContainerMetadata{DockerID: "pauseContainerID"}), + dockerClient.EXPECT().StartContainer(gomock.Any(), pauseContainerID, defaultConfig.ContainerStartTimeout).Return( + dockerapi.DockerContainerMetadata{DockerID: "pauseContainerID"}), + dockerClient.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return( + &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: pauseContainerID, + State: &types.ContainerState{Pid: containerPid}, + }, + }, nil), + cniClient.EXPECT().SetupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nsResult, nil), + ) + + // For the other container + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes() + dockerClient.EXPECT().PullImage(gomock.Any(), gomock.Any(), nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}).Times(2) + imageManager.EXPECT().RecordContainerReference(gomock.Any()).Return(nil).Times(2) + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false).Times(2) + dockerClient.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).Times(2) + + dockerClient.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).Return(dockerapi.DockerContainerMetadata{DockerID: sleepContainerID1}) + dockerClient.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any()).Return(dockerapi.DockerContainerMetadata{DockerID: sleepContainerID2}) + + dockerClient.EXPECT().StartContainer(gomock.Any(), sleepContainerID1, defaultConfig.ContainerStartTimeout).Return( + dockerapi.DockerContainerMetadata{DockerID: sleepContainerID1}) + dockerClient.EXPECT().StartContainer(gomock.Any(), sleepContainerID2, defaultConfig.ContainerStartTimeout).Return( + dockerapi.DockerContainerMetadata{DockerID: sleepContainerID2}) + + cleanup := make(chan time.Time) + defer close(cleanup) + mockTime.EXPECT().Now().Return(time.Now()).MinTimes(1) + dockerClient.EXPECT().DescribeContainer(gomock.Any(), sleepContainerID1).AnyTimes() + dockerClient.EXPECT().DescribeContainer(gomock.Any(), sleepContainerID2).AnyTimes() + dockerClient.EXPECT().DescribeContainer(gomock.Any(), pauseContainerID).AnyTimes() + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + taskEngine.AddTask(sleepTask) + stateChangeEvents := taskEngine.StateChangeEvents() + verifyTaskIsRunning(stateChangeEvents, sleepTask) + + var wg sync.WaitGroup + wg.Add(1) + mockTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1) + + gomock.InOrder( + dockerClient.EXPECT().StopContainer(gomock.Any(), sleepContainerID2, gomock.Any()).Return( + dockerapi.DockerContainerMetadata{DockerID: sleepContainerID2}), + + dockerClient.EXPECT().InspectContainer(gomock.Any(), pauseContainerID, gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: pauseContainerID, + State: &types.ContainerState{Pid: containerPid}, + }, + }, nil), + cniClient.EXPECT().CleanupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), + + dockerClient.EXPECT().StopContainer(gomock.Any(), pauseContainerID, gomock.Any()).Return( + dockerapi.DockerContainerMetadata{DockerID: pauseContainerID}), + + cniClient.EXPECT().ReleaseIPResource(gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx context.Context, cfg *ecscni.Config, timeout time.Duration) { + wg.Done() + }).Return(nil), + ) + + dockerClient.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(3) + imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any()).Return(nil).Times(2) + + // Simulate a container stop event from docker + eventStream <- dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: sleepContainerID1, + ExitCode: aws.Int(exitCode), + }, + } + + verifyTaskIsStopped(stateChangeEvents, sleepTask) + sleepTask.SetSentStatus(apitaskstatus.TaskStopped) + cleanup <- time.Now() + for { + tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks() + if len(tasks) == 0 { + break + } + t.Logf("Found %d tasks in the engine; first task arn: %s", len(tasks), tasks[0].Arn) + fmt.Printf("Found %d tasks in the engine; first task arn: %s\n", len(tasks), tasks[0].Arn) + time.Sleep(5 * time.Millisecond) + } + wg.Wait() +} + +func TestBuildCNIConfigFromTaskContainer(t *testing.T) { + config := defaultConfig + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &config) + defer ctrl.Finish() + + testTask := testdata.LoadTask("sleep5") + testTask.AddTaskENI(mockENI) + testTask.SetAppMesh(&appmesh.AppMesh{ + IgnoredUID: ignoredUID, + ProxyIngressPort: proxyIngressPort, + ProxyEgressPort: proxyEgressPort, + AppPorts: []string{ + appPort, + }, + EgressIgnoredIPs: []string{ + egressIgnoredIP, + }, + }) + containerInspectOutput := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: containerPid}, + }, + } + + cniConfig, err := taskEngine.(*DockerTaskEngine).buildCNIConfigFromTaskContainer(testTask, containerInspectOutput, true) + assert.NoError(t, err) + assert.Equal(t, containerID, cniConfig.ContainerID) + assert.Equal(t, strconv.Itoa(containerPid), cniConfig.ContainerPID) + assert.Equal(t, mac, cniConfig.ID, "ID should be set to the mac of eni") + // We expect 3 NetworkConfig objects in the cni Config wrapper object: + // ENI, Bridge and Appmesh + require.Len(t, cniConfig.NetworkConfigs, 3) +} + +func TestProvisionContainerResources(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + taskEngine.SetDataClient(dataClient) + + mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl) + taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient + testTask := testdata.LoadTask("sleep5") + pauseContainer := &apicontainer.Container{ + Name: "pausecontainer", + Type: apicontainer.ContainerCNIPause, + } + testTask.Containers = append(testTask.Containers, pauseContainer) + testTask.AddTaskENI(mockENI) + volRes := &taskresourcevolume.VolumeResource{} + testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{ + "dockerVolume": {volRes}, + } + taskEngine.(*DockerTaskEngine).State().AddTask(testTask) + taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: dockerContainerName, + Container: pauseContainer, + }, testTask) + + gomock.InOrder( + dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: containerPid}, + }, + }, nil), + mockCNIClient.EXPECT().SetupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nsResult, nil), + ) + + require.Nil(t, taskEngine.(*DockerTaskEngine).provisionContainerResources(testTask, pauseContainer).Error) + assert.Equal(t, strconv.Itoa(containerPid), volRes.GetPauseContainerPID()) + assert.Equal(t, taskIP, testTask.GetLocalIPAddress()) + savedTasks, err := dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, savedTasks, 1) +} + +func TestProvisionContainerResourcesInspectError(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl) + taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient + testTask := testdata.LoadTask("sleep5") + pauseContainer := &apicontainer.Container{ + Name: "pausecontainer", + Type: apicontainer.ContainerCNIPause, + } + testTask.Containers = append(testTask.Containers, pauseContainer) + testTask.AddTaskENI(mockENI) + taskEngine.(*DockerTaskEngine).State().AddTask(testTask) + taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: dockerContainerName, + Container: pauseContainer, + }, testTask) + + dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(nil, errors.New("test error")) + + assert.NotNil(t, taskEngine.(*DockerTaskEngine).provisionContainerResources(testTask, pauseContainer).Error) +} + +// TestStopPauseContainerCleanupCalled tests when stopping the pause container +// its network namespace should be cleaned up first +func TestStopPauseContainerCleanupCalled(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl) + taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient + testTask := testdata.LoadTask("sleep5") + pauseContainer := &apicontainer.Container{ + Name: "pausecontainer", + Type: apicontainer.ContainerCNIPause, + DesiredStatusUnsafe: apicontainerstatus.ContainerStopped, + } + testTask.Containers = append(testTask.Containers, pauseContainer) + testTask.AddTaskENI(mockENI) + testTask.SetAppMesh(&appmesh.AppMesh{ + IgnoredUID: ignoredUID, + ProxyIngressPort: proxyIngressPort, + ProxyEgressPort: proxyEgressPort, + AppPorts: []string{ + appPort, + }, + EgressIgnoredIPs: []string{ + egressIgnoredIP, + }, + }) + taskEngine.(*DockerTaskEngine).State().AddTask(testTask) + taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: dockerContainerName, + Container: pauseContainer, + }, testTask) + + gomock.InOrder( + dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: containerPid}, + }, + }, nil), + mockCNIClient.EXPECT().CleanupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), + dockerClient.EXPECT().StopContainer(gomock.Any(), + containerID, + defaultConfig.DockerStopTimeout, + ).Return(dockerapi.DockerContainerMetadata{}), + ) + + taskEngine.(*DockerTaskEngine).stopContainer(testTask, pauseContainer) + require.True(t, pauseContainer.IsContainerTornDown()) +} + +// TestStopPauseContainerCleanupCalled tests when stopping the pause container +// its network namespace should be cleaned up first +func TestStopPauseContainerCleanupDelay(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + cfg := config.DefaultConfig() + cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + cfg.ENIPauseContainerCleanupDelaySeconds = expectedDelaySeconds + + delayedChan := make(chan time.Duration, 1) + ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &cfg) + taskEngine.(*DockerTaskEngine).handleDelay = func(d time.Duration) { + delayedChan <- d + } + + mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl) + taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient + testTask := testdata.LoadTask("sleep5") + pauseContainer := &apicontainer.Container{ + Name: "pausecontainer", + Type: apicontainer.ContainerCNIPause, + DesiredStatusUnsafe: apicontainerstatus.ContainerStopped, + } + testTask.Containers = append(testTask.Containers, pauseContainer) + testTask.AddTaskENI(mockENI) + taskEngine.(*DockerTaskEngine).State().AddTask(testTask) + taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: dockerContainerName, + Container: pauseContainer, + }, testTask) + + gomock.InOrder( + dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: containerPid}, + }, + }, nil), + mockCNIClient.EXPECT().CleanupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), + dockerClient.EXPECT().StopContainer(gomock.Any(), + containerID, + defaultConfig.DockerStopTimeout, + ).Return(dockerapi.DockerContainerMetadata{}), + ) + + taskEngine.(*DockerTaskEngine).stopContainer(testTask, pauseContainer) + + select { + case actualDelay := <-delayedChan: + assert.Equal(t, expectedDelay, actualDelay) + require.True(t, pauseContainer.IsContainerTornDown()) + default: + assert.Fail(t, "engine.handleDelay wasn't called") + } +} + +// TestCheckTearDownPauseContainer that the pause container teardown works and is idempotent +func TestCheckTearDownPauseContainer(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, dockerClient, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockCNIClient := mock_ecscni.NewMockCNIClient(ctrl) + taskEngine.(*DockerTaskEngine).cniClient = mockCNIClient + testTask := testdata.LoadTask("sleep5") + pauseContainer := &apicontainer.Container{ + Name: "pausecontainer", + Type: apicontainer.ContainerCNIPause, + DesiredStatusUnsafe: apicontainerstatus.ContainerStopped, + } + testTask.Containers = append(testTask.Containers, pauseContainer) + testTask.AddTaskENI(mockENI) + testTask.SetAppMesh(&appmesh.AppMesh{ + IgnoredUID: ignoredUID, + ProxyIngressPort: proxyIngressPort, + ProxyEgressPort: proxyEgressPort, + AppPorts: []string{ + appPort, + }, + EgressIgnoredIPs: []string{ + egressIgnoredIP, + }, + }) + taskEngine.(*DockerTaskEngine).State().AddTask(testTask) + taskEngine.(*DockerTaskEngine).State().AddContainer(&apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: dockerContainerName, + Container: pauseContainer, + }, testTask) + + gomock.InOrder( + dockerClient.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: containerPid}, + }, + }, nil).MaxTimes(1), + mockCNIClient.EXPECT().CleanupNS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).MaxTimes(1), + ) + + taskEngine.(*DockerTaskEngine).checkTearDownPauseContainer(testTask) + require.True(t, pauseContainer.IsContainerTornDown()) + + // Invoke one more time to check for idempotency (mocks configured with maxTimes = 1) + taskEngine.(*DockerTaskEngine).checkTearDownPauseContainer(testTask) +} + +// TestTaskWithCircularDependency tests the task with containers of which the +// dependencies can't be resolved +func TestTaskWithCircularDependency(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + client.EXPECT().ContainerEvents(gomock.Any()) + + task := testdata.LoadTask("circular_dependency") + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + events := taskEngine.StateChangeEvents() + go taskEngine.AddTask(task) + event := <-events + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskStopped, "Expected task to move to stopped directly") + _, ok := taskEngine.(*DockerTaskEngine).state.TaskByArn(task.Arn) + assert.True(t, ok, "Task state should be added to the agent state") + + _, ok = taskEngine.(*DockerTaskEngine).managedTasks[task.Arn] + assert.False(t, ok, "Task should not be added to task manager for processing") +} + +// TestCreateContainerOnAgentRestart tests when agent restarts it should use the +// docker container name restored from agent state file to create the container +func TestCreateContainerOnAgentRestart(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, _, _ := mocks(t, ctx, &config.Config{}) + defer ctrl.Finish() + + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + state := taskEngine.State() + sleepTask := testdata.LoadTask("sleep5") + sleepContainer, _ := sleepTask.ContainerByName("sleep5") + // Store the generated container name to state + state.AddContainer(&apicontainer.DockerContainer{DockerID: "dockerID", DockerName: "docker_container_name", Container: sleepContainer}, sleepTask) + + gomock.InOrder( + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), "docker_container_name", gomock.Any()), + ) + + metadata := taskEngine.createContainer(sleepTask, sleepContainer) + if metadata.Error != nil { + t.Error("Unexpected error", metadata.Error) + } +} + +func TestPullCNIImage(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, _, privateTaskEngine, _, _, _ := mocks(t, ctx, &config.Config{}) + defer ctrl.Finish() + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + + container := &apicontainer.Container{ + Type: apicontainer.ContainerCNIPause, + } + task := &apitask.Task{ + Containers: []*apicontainer.Container{container}, + } + metadata := taskEngine.pullContainer(task, container) + assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata") +} + +func TestPullNormalImage(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{}) + defer ctrl.Finish() + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + taskEngine._time = nil + imageName := "image" + container := &apicontainer.Container{ + Type: apicontainer.ContainerNormal, + Image: imageName, + } + task := &apitask.Task{ + Containers: []*apicontainer.Container{container}, + } + imageState := &image.ImageState{ + Image: &image.Image{ImageID: "id"}, + } + + client.EXPECT().PullImage(gomock.Any(), imageName, nil, gomock.Any()) + imageManager.EXPECT().RecordContainerReference(container) + imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true) + metadata := taskEngine.pullContainer(task, container) + assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata") +} + +func TestPullImageWithImagePullOnceBehavior(t *testing.T) { + testcases := []struct { + name string + pullSucceeded bool + }{ + { + name: "PullSucceeded is true", + pullSucceeded: true, + }, + { + name: "PullSucceeded is false", + pullSucceeded: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{ImagePullBehavior: config.ImagePullOnceBehavior}) + defer ctrl.Finish() + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + taskEngine._time = nil + imageName := "image" + container := &apicontainer.Container{ + Type: apicontainer.ContainerNormal, + Image: imageName, + } + task := &apitask.Task{ + Containers: []*apicontainer.Container{container}, + } + imageState := &image.ImageState{ + Image: &image.Image{ImageID: "id"}, + PullSucceeded: tc.pullSucceeded, + } + if !tc.pullSucceeded { + client.EXPECT().PullImage(gomock.Any(), imageName, nil, gomock.Any()) + } + imageManager.EXPECT().RecordContainerReference(container) + imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true).Times(2) + metadata := taskEngine.pullContainer(task, container) + assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata") + }) + } +} + +func TestPullImageWithImagePullPreferCachedBehaviorWithCachedImage(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{ImagePullBehavior: config.ImagePullPreferCachedBehavior}) + defer ctrl.Finish() + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + taskEngine._time = nil + imageName := "image" + container := &apicontainer.Container{ + Type: apicontainer.ContainerNormal, + Image: imageName, + } + task := &apitask.Task{ + Containers: []*apicontainer.Container{container}, + } + imageState := &image.ImageState{ + Image: &image.Image{ImageID: "id"}, + } + client.EXPECT().InspectImage(imageName).Return(nil, nil) + imageManager.EXPECT().RecordContainerReference(container) + imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true) + metadata := taskEngine.pullContainer(task, container) + assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata") +} + +func TestPullImageWithImagePullPreferCachedBehaviorWithoutCachedImage(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{ImagePullBehavior: config.ImagePullPreferCachedBehavior}) + defer ctrl.Finish() + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + taskEngine._time = nil + imageName := "image" + container := &apicontainer.Container{ + Type: apicontainer.ContainerNormal, + Image: imageName, + } + task := &apitask.Task{ + Containers: []*apicontainer.Container{container}, + } + imageState := &image.ImageState{ + Image: &image.Image{ImageID: "id"}, + } + client.EXPECT().InspectImage(imageName).Return(nil, errors.New("error")) + client.EXPECT().PullImage(gomock.Any(), imageName, nil, gomock.Any()) + imageManager.EXPECT().RecordContainerReference(container) + imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true) + metadata := taskEngine.pullContainer(task, container) + assert.Equal(t, dockerapi.DockerContainerMetadata{}, metadata, "expected empty metadata") +} + +func TestUpdateContainerReference(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, &config.Config{}) + defer ctrl.Finish() + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + taskEngine._time = nil + imageName := "image" + container := &apicontainer.Container{ + Type: apicontainer.ContainerNormal, + Image: imageName, + } + task := &apitask.Task{ + Containers: []*apicontainer.Container{container}, + } + imageState := &image.ImageState{ + Image: &image.Image{ImageID: "id"}, + } + + imageManager.EXPECT().RecordContainerReference(container) + imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(imageState, true) + taskEngine.updateContainerReference(true, container, task.Arn) + assert.True(t, imageState.PullSucceeded, "PullSucceeded set to false") +} + +// TestPullAndUpdateContainerReference checks whether a container is added to task engine state when +// Test # | Image availability | DependentContainersPullUpfront | ImagePullBehavior +// ----------------------------------------------------------------------------------- +// 1 | remote | enabled | default +// 2 | remote | disabled | default +// 3 | local | enabled | default +// 4 | local | enabled | once +// 5 | local | enabled | prefer-cached +// 6 | local | enabled | always +func TestPullAndUpdateContainerReference(t *testing.T) { + testcases := []struct { + Name string + ImagePullUpfront config.BooleanDefaultFalse + ImagePullBehavior config.ImagePullBehaviorType + ImageState *image.ImageState + ImageInspect *types.ImageInspect + InspectImage bool + NumOfPulledContainer int + PullImageErr apierrors.NamedError + }{ + { + Name: "DependentContainersPullUpfrontEnabledWithRemoteImage", + ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + ImagePullBehavior: config.ImagePullDefaultBehavior, + ImageState: &image.ImageState{ + Image: &image.Image{ImageID: "id"}, + }, + InspectImage: false, + NumOfPulledContainer: 1, + PullImageErr: nil, + }, + { + Name: "DependentContainersPullUpfrontDisabledWithRemoteImage", + ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, + ImagePullBehavior: config.ImagePullDefaultBehavior, + ImageState: &image.ImageState{ + Image: &image.Image{ImageID: "id"}, + }, + InspectImage: false, + NumOfPulledContainer: 1, + PullImageErr: nil, + }, + { + Name: "DependentContainersPullUpfrontEnabledWithCachedImage", + ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + ImagePullBehavior: config.ImagePullDefaultBehavior, + ImageState: nil, + ImageInspect: nil, + InspectImage: true, + NumOfPulledContainer: 1, + PullImageErr: dockerapi.CannotPullContainerError{fmt.Errorf("error")}, + }, + { + Name: "DependentContainersPullUpfrontEnabledAndImagePullOnceBehavior", + ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + ImagePullBehavior: config.ImagePullOnceBehavior, + ImageState: nil, + ImageInspect: nil, + InspectImage: true, + NumOfPulledContainer: 1, + PullImageErr: dockerapi.CannotPullContainerError{fmt.Errorf("error")}, + }, + { + Name: "DependentContainersPullUpfrontEnabledAndImagePullPreferCachedBehavior", + ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + ImagePullBehavior: config.ImagePullPreferCachedBehavior, + ImageState: nil, + ImageInspect: nil, + InspectImage: true, + NumOfPulledContainer: 1, + PullImageErr: dockerapi.CannotPullContainerError{fmt.Errorf("error")}, + }, + { + Name: "DependentContainersPullUpfrontEnabledAndImagePullAlwaysBehavior", + ImagePullUpfront: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + ImagePullBehavior: config.ImagePullAlwaysBehavior, + ImageState: nil, + ImageInspect: nil, + InspectImage: false, + NumOfPulledContainer: 0, + PullImageErr: dockerapi.CannotPullContainerError{fmt.Errorf("error")}, + }, + } + + for _, tc := range testcases { + t.Run(tc.Name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + cfg := &config.Config{ + DependentContainersPullUpfront: tc.ImagePullUpfront, + ImagePullBehavior: tc.ImagePullBehavior, + } + ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, cfg) + defer ctrl.Finish() + + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + taskEngine._time = nil + imageName := "image" + taskArn := "taskArn" + container := &apicontainer.Container{ + Type: apicontainer.ContainerNormal, + Image: imageName, + Essential: true, + } + + task := &apitask.Task{ + Arn: taskArn, + Containers: []*apicontainer.Container{container}, + } + + client.EXPECT().PullImage(gomock.Any(), imageName, nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{Error: tc.PullImageErr}) + + if tc.InspectImage { + client.EXPECT().InspectImage(imageName).Return(tc.ImageInspect, nil) + } + + imageManager.EXPECT().RecordContainerReference(container) + imageManager.EXPECT().GetImageStateFromImageName(imageName).Return(tc.ImageState, false) + metadata := taskEngine.pullAndUpdateContainerReference(task, container) + pulledContainersMap, _ := taskEngine.State().PulledContainerMapByArn(taskArn) + require.Len(t, pulledContainersMap, tc.NumOfPulledContainer) + assert.Equal(t, dockerapi.DockerContainerMetadata{Error: tc.PullImageErr}, + metadata, "expected metadata with error") + }) + } +} + +// TestMetadataFileUpdatedAgentRestart checks whether metadataManager.Update(...) is +// invoked in the path DockerTaskEngine.Init() -> .synchronizeState() -> .updateMetadataFile(...) +// for the following case: +// agent starts, container created, metadata file created, agent restarted, container recovered +// during task engine init, metadata file updated +func TestMetadataFileUpdatedAgentRestart(t *testing.T) { + conf := &defaultConfig + conf.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, imageManager, metadataManager := mocks(t, ctx, conf) + defer ctrl.Finish() + + var metadataUpdateWG sync.WaitGroup + metadataUpdateWG.Add(1) + taskEngine, _ := privateTaskEngine.(*DockerTaskEngine) + assert.True(t, taskEngine.cfg.ContainerMetadataEnabled.Enabled(), "ContainerMetadataEnabled set to false.") + + taskEngine._time = nil + state := taskEngine.State() + task := testdata.LoadTask("sleep5") + container, _ := task.ContainerByName("sleep5") + assert.False(t, container.MetadataFileUpdated) + container.SetKnownStatus(apicontainerstatus.ContainerRunning) + dockerContainer := &apicontainer.DockerContainer{DockerID: containerID, Container: container} + expectedTaskARN := task.Arn + expectedDockerID := dockerContainer.DockerID + expectedContainerName := container.Name + + state.AddTask(task) + state.AddContainer(dockerContainer, task) + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any()) + imageManager.EXPECT().RecordContainerReference(gomock.Any()) + + metadataManager.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, dockerID string, task *apitask.Task, containerName string) { + assert.Equal(t, expectedTaskARN, task.Arn) + assert.Equal(t, expectedContainerName, containerName) + assert.Equal(t, expectedDockerID, dockerID) + metadataUpdateWG.Done() + }) + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + defer taskEngine.Disable() + metadataUpdateWG.Wait() +} + +// TestTaskUseExecutionRolePullECRImage tests the agent will use the execution role +// credentials to pull from an ECR repository +func TestTaskUseExecutionRolePullECRImage(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, _ := mocks( + t, ctx, &defaultConfig) + defer ctrl.Finish() + + credentialsID := "execution role" + accessKeyID := "akid" + secretAccessKey := "sakid" + sessionToken := "token" + executionRoleCredentials := credentials.IAMRoleCredentials{ + CredentialsID: credentialsID, + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SessionToken: sessionToken, + } + + testTask := testdata.LoadTask("sleep5") + // Configure the task and container to use execution role + testTask.SetExecutionRoleCredentialsID(credentialsID) + testTask.Containers[0].RegistryAuthentication = &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + UseExecutionRole: true, + }, + } + container := testTask.Containers[0] + + mockTime.EXPECT().Now().AnyTimes() + credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(credentials.TaskIAMRoleCredentials{ + ARN: "", + IAMRoleCredentials: executionRoleCredentials, + }, true) + client.EXPECT().PullImage(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, image string, auth *apicontainer.RegistryAuthenticationData, timeout interface{}) { + assert.Equal(t, container.Image, image) + assert.Equal(t, auth.ECRAuthData.GetPullCredentials(), executionRoleCredentials) + }).Return(dockerapi.DockerContainerMetadata{}) + imageManager.EXPECT().RecordContainerReference(container).Return(nil) + imageManager.EXPECT().GetImageStateFromImageName(container.Image) + + taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) +} + +// TestTaskUseExecutionRolePullPrivateRegistryImage tests the agent will use the +// execution role credentials to pull from a private repository +func TestTaskUseExecutionRolePullPrivateRegistryImage(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, credentialsManager, imageManager, _ := mocks( + t, ctx, &defaultConfig) + defer ctrl.Finish() + + credentialsID := "execution role" + accessKeyID := "akid" + secretAccessKey := "sakid" + sessionToken := "token" + executionRoleCredentials := credentials.IAMRoleCredentials{ + CredentialsID: credentialsID, + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SessionToken: sessionToken, + } + testTask := testdata.LoadTask("sleep5") + // Configure the task and container to use execution role + testTask.SetExecutionRoleCredentialsID(credentialsID) + asmAuthData := &apicontainer.ASMAuthData{ + CredentialsParameter: secretID, + Region: region, + } + testTask.Containers[0].RegistryAuthentication = &apicontainer.RegistryAuthenticationData{ + Type: "asm", + ASMAuthData: asmAuthData, + } + requiredASMResources := []*apicontainer.ASMAuthData{asmAuthData} + asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl) + asmAuthRes := asmauth.NewASMAuthResource(testTask.Arn, requiredASMResources, + credentialsID, credentialsManager, asmClientCreator) + testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{ + asmauth.ResourceName: {asmAuthRes}, + } + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + asmAuthDataBytes, _ := json.Marshal(&asm.AuthDataValue{ + Username: aws.String(username), + Password: aws.String(password), + }) + asmAuthDataVal := string(asmAuthDataBytes) + asmSecretValue := &secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(asmAuthDataVal), + } + + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return( + credentials.TaskIAMRoleCredentials{ + ARN: "", + IAMRoleCredentials: executionRoleCredentials, + }, true), + asmClientCreator.EXPECT().NewASMClient(region, executionRoleCredentials).Return(mockASMClient), + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Return(asmSecretValue, nil), + ) + require.NoError(t, asmAuthRes.Create()) + container := testTask.Containers[0] + + mockTime.EXPECT().Now().AnyTimes() + client.EXPECT().PullImage(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, image string, auth *apicontainer.RegistryAuthenticationData, timeout interface{}) { + assert.Equal(t, container.Image, image) + dac := auth.ASMAuthData.GetDockerAuthConfig() + assert.Equal(t, username, dac.Username) + assert.Equal(t, password, dac.Password) + }).Return(dockerapi.DockerContainerMetadata{}) + imageManager.EXPECT().RecordContainerReference(container).Return(nil) + imageManager.EXPECT().GetImageStateFromImageName(container.Image) + + ret := taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + assert.Nil(t, ret.Error) +} + +// TestTaskUseExecutionRolePullPrivateRegistryImageNoASMResource tests the +// docker task engine code path for returning error for missing ASM resource +func TestTaskUseExecutionRolePullPrivateRegistryImageNoASMResource(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, mockTime, taskEngine, _, _, _ := mocks( + t, ctx, &defaultConfig) + defer ctrl.Finish() + + testTask := testdata.LoadTask("sleep5") + // Configure the task and container to use execution role + testTask.SetExecutionRoleCredentialsID(credentialsID) + asmAuthData := &apicontainer.ASMAuthData{ + CredentialsParameter: secretID, + Region: region, + } + testTask.Containers[0].RegistryAuthentication = &apicontainer.RegistryAuthenticationData{ + Type: "asm", + ASMAuthData: asmAuthData, + } + + // no asm auth resource in task + testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{} + + container := testTask.Containers[0] + mockTime.EXPECT().Now().AnyTimes() + + // ensure pullContainer returns error + ret := taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + assert.NotNil(t, ret.Error) +} + +// TestNewTaskTransitionOnRestart tests the agent will process the task recorded in +// the state file on restart +func TestNewTaskTransitionOnRestart(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockTime.EXPECT().Now().AnyTimes() + mockTime.EXPECT().After(gomock.Any()).AnyTimes() + client.EXPECT().Version(gomock.Any(), gomock.Any()).MaxTimes(1) + client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1) + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + state := dockerTaskEngine.State() + testTask := testdata.LoadTask("sleep5") + // add the task to the state to simulate the agent restored the state on restart + state.AddTask(testTask) + // Set the task to be stopped so that the process can done quickly + testTask.SetDesiredStatus(apitaskstatus.TaskStopped) + dockerTaskEngine.synchronizeState() + _, ok := dockerTaskEngine.managedTasks[testTask.Arn] + assert.True(t, ok, "task wasnot started") +} + +// TestTaskWaitForHostResourceOnRestart tests task stopped by acs but hasn't +// reached stopped should block the later task to start +func TestTaskWaitForHostResourceOnRestart(t *testing.T) { + // Task 1 stopped by backend + taskStoppedByACS := testdata.LoadTask("sleep5") + taskStoppedByACS.SetDesiredStatus(apitaskstatus.TaskStopped) + taskStoppedByACS.SetStopSequenceNumber(1) + taskStoppedByACS.SetKnownStatus(apitaskstatus.TaskRunning) + // Task 2 has essential container stopped + taskEssentialContainerStopped := testdata.LoadTask("sleep5") + taskEssentialContainerStopped.Arn = "task_Essential_Container_Stopped" + taskEssentialContainerStopped.SetDesiredStatus(apitaskstatus.TaskStopped) + taskEssentialContainerStopped.SetKnownStatus(apitaskstatus.TaskRunning) + // Normal task 3 needs to be started + taskNotStarted := testdata.LoadTask("sleep5") + taskNotStarted.Arn = "task_Not_started" + + conf := &defaultConfig + conf.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled} + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, privateTaskEngine, _, imageManager, _ := mocks(t, ctx, conf) + defer ctrl.Finish() + + client.EXPECT().Version(gomock.Any(), gomock.Any()).MaxTimes(1) + client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1) + + err := privateTaskEngine.Init(ctx) + assert.NoError(t, err) + + taskEngine := privateTaskEngine.(*DockerTaskEngine) + taskEngine.State().AddTask(taskStoppedByACS) + taskEngine.State().AddTask(taskNotStarted) + taskEngine.State().AddTask(taskEssentialContainerStopped) + + taskEngine.State().AddContainer(&apicontainer.DockerContainer{ + Container: taskStoppedByACS.Containers[0], + DockerID: containerID + "1", + DockerName: dockerContainerName + "1", + }, taskStoppedByACS) + taskEngine.State().AddContainer(&apicontainer.DockerContainer{ + Container: taskNotStarted.Containers[0], + DockerID: containerID + "2", + DockerName: dockerContainerName + "2", + }, taskNotStarted) + taskEngine.State().AddContainer(&apicontainer.DockerContainer{ + Container: taskEssentialContainerStopped.Containers[0], + DockerID: containerID + "3", + DockerName: dockerContainerName + "3", + }, taskEssentialContainerStopped) + + // these are performed in synchronizeState on restart + client.EXPECT().DescribeContainer(gomock.Any(), gomock.Any()).Return(apicontainerstatus.ContainerRunning, dockerapi.DockerContainerMetadata{ + DockerID: containerID, + }).Times(3) + imageManager.EXPECT().RecordContainerReference(gomock.Any()).Times(3) + + // start the two tasks + taskEngine.synchronizeState() + + var waitStopWG sync.WaitGroup + waitStopWG.Add(1) + go func() { + // This is to confirm the other task is waiting + time.Sleep(1 * time.Second) + // Remove the task sequence number 1 from waitgroup + taskEngine.taskStopGroup.Done(1) + waitStopWG.Done() + }() + + // task with sequence number 2 should wait until 1 is removed from the waitgroup + taskEngine.taskStopGroup.Wait(2) + waitStopWG.Wait() +} + +// TestPullStartedStoppedAtWasSetCorrectly tests the PullStartedAt and PullStoppedAt +// was set correctly +func TestPullStartedStoppedAtWasSetCorrectly(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + testTask := &apitask.Task{ + Arn: "taskArn", + } + container := &apicontainer.Container{ + Image: "image1", + } + startTime1 := time.Now() + startTime2 := startTime1.Add(time.Second) + startTime3 := startTime2.Add(time.Second) + stopTime1 := startTime3.Add(time.Second) + stopTime2 := stopTime1.Add(time.Second) + stopTime3 := stopTime2.Add(time.Second) + + client.EXPECT().PullImage(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + imageManager.EXPECT().RecordContainerReference(gomock.Any()).Times(3) + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false).Times(3) + + gomock.InOrder( + // three container pull start timestamp + mockTime.EXPECT().Now().Return(startTime1), + mockTime.EXPECT().Now().Return(startTime2), + mockTime.EXPECT().Now().Return(startTime3), + + // threre container pull stop timestamp + mockTime.EXPECT().Now().Return(stopTime1), + mockTime.EXPECT().Now().Return(stopTime2), + mockTime.EXPECT().Now().Return(stopTime3), + ) + + // Pull three images, the PullStartedAt should be the pull of the first container + // and PullStoppedAt should be the pull completion of the last container + taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + + assert.Equal(t, testTask.PullStartedAtUnsafe, startTime1) + assert.Equal(t, testTask.PullStoppedAtUnsafe, stopTime3) +} + +// TestPullStoppedAtWasSetCorrectlyWhenPullFail tests the PullStoppedAt was set +// correctly when the pull failed +func TestPullStoppedAtWasSetCorrectlyWhenPullFail(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + testTask := &apitask.Task{ + Arn: "taskArn", + } + container := &apicontainer.Container{ + Image: "image1", + } + + startTime1 := time.Now() + startTime2 := startTime1.Add(time.Second) + startTime3 := startTime2.Add(time.Second) + stopTime1 := startTime3.Add(time.Second) + stopTime2 := stopTime1.Add(time.Second) + stopTime3 := stopTime2.Add(time.Second) + + gomock.InOrder( + client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return( + dockerapi.DockerContainerMetadata{Error: dockerapi.CannotPullContainerError{fmt.Errorf("error")}}), + ) + imageManager.EXPECT().RecordContainerReference(gomock.Any()).Times(3) + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false).Times(3) + gomock.InOrder( + // three container pull start timestamp + mockTime.EXPECT().Now().Return(startTime1), + mockTime.EXPECT().Now().Return(startTime2), + mockTime.EXPECT().Now().Return(startTime3), + + // threre container pull stop timestamp + mockTime.EXPECT().Now().Return(stopTime1), + mockTime.EXPECT().Now().Return(stopTime2), + mockTime.EXPECT().Now().Return(stopTime3), + ) + + // Pull three images, the PullStartedAt should be the pull of the first container + // and PullStoppedAt should be the pull completion of the last container + taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + + assert.Equal(t, testTask.PullStartedAtUnsafe, startTime1) + assert.Equal(t, testTask.PullStoppedAtUnsafe, stopTime3) +} + +func TestSynchronizeContainerStatus(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + dockerID := "1234" + dockerContainer := &apicontainer.DockerContainer{ + DockerID: dockerID, + DockerName: "c1", + Container: &apicontainer.Container{}, + } + labels := map[string]string{ + "name": "metadata", + } + volumes := []types.MountPoint{ + { + Name: "volume", + Source: "/src/vol", + Destination: "/vol", + }, + } + created := time.Now() + gomock.InOrder( + client.EXPECT().DescribeContainer(gomock.Any(), dockerID).Return(apicontainerstatus.ContainerRunning, + dockerapi.DockerContainerMetadata{ + Labels: labels, + DockerID: dockerID, + CreatedAt: created, + Volumes: volumes, + }), + imageManager.EXPECT().RecordContainerReference(dockerContainer.Container), + ) + taskEngine.(*DockerTaskEngine).synchronizeContainerStatus(dockerContainer, nil) + assert.Equal(t, created, dockerContainer.Container.GetCreatedAt()) + assert.Equal(t, labels, dockerContainer.Container.GetLabels()) + assert.Equal(t, volumes, dockerContainer.Container.GetVolumes()) +} + +// TestHandleDockerHealthEvent tests the docker health event will only cause the +// container health status change +func TestHandleDockerHealthEvent(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + state := taskEngine.(*DockerTaskEngine).State() + testTask := testdata.LoadTask("sleep5") + testContainer := testTask.Containers[0] + testContainer.HealthCheckType = "docker" + + state.AddTask(testTask) + state.AddContainer(&apicontainer.DockerContainer{DockerID: "id", + DockerName: "container_name", + Container: testContainer, + }, testTask) + + taskEngine.(*DockerTaskEngine).handleDockerEvent(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + Type: apicontainer.ContainerHealthEvent, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: "id", + Health: apicontainer.HealthStatus{ + Status: apicontainerstatus.ContainerHealthy, + }, + }, + }) + assert.Equal(t, testContainer.Health.Status, apicontainerstatus.ContainerHealthy) +} + +func TestContainerMetadataUpdatedOnRestart(t *testing.T) { + dockerID := "dockerID_created" + labels := map[string]string{ + "name": "metadata", + } + testCases := []struct { + stage string + status apicontainerstatus.ContainerStatus + created time.Time + started time.Time + finished time.Time + portBindings []apicontainer.PortBinding + exitCode *int + err dockerapi.DockerStateError + }{ + { + stage: "created", + status: apicontainerstatus.ContainerCreated, + created: time.Now(), + }, + { + stage: "started", + status: apicontainerstatus.ContainerRunning, + started: time.Now(), + portBindings: []apicontainer.PortBinding{ + { + ContainerPort: 80, + HostPort: 80, + BindIP: "0.0.0.0/0", + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + }, + { + stage: "stopped", + finished: time.Now(), + exitCode: aws.Int(1), + }, + { + stage: "failed", + status: apicontainerstatus.ContainerStopped, + err: dockerapi.NewDockerStateError("error"), + exitCode: aws.Int(1), + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Agent restarted during container: %s", tc.stage), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + ctrl, client, _, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + dockerContainer := &apicontainer.DockerContainer{ + DockerID: dockerID, + DockerName: fmt.Sprintf("docker%s", tc.stage), + Container: &apicontainer.Container{}, + } + task := &apitask.Task{} + + if tc.stage == "created" { + dockerContainer.DockerID = "" + task.Volumes = []apitask.TaskVolume{ + { + Name: "empty", + Volume: &taskresourcevolume.LocalDockerVolume{}, + }, + } + client.EXPECT().InspectContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: dockerID, + Created: (tc.created).Format(time.RFC3339), + State: &types.ContainerState{ + Health: &types.Health{}, + }, + }, + Config: &dockercontainer.Config{ + Labels: labels, + }, + }, nil) + imageManager.EXPECT().RecordContainerReference(dockerContainer.Container).AnyTimes() + } else { + client.EXPECT().DescribeContainer(gomock.Any(), dockerID).Return(tc.status, dockerapi.DockerContainerMetadata{ + Labels: labels, + DockerID: dockerID, + CreatedAt: tc.created, + StartedAt: tc.started, + FinishedAt: tc.finished, + PortBindings: tc.portBindings, + ExitCode: tc.exitCode, + Error: tc.err, + }) + imageManager.EXPECT().RecordContainerReference(dockerContainer.Container).AnyTimes() + } + + taskEngine.(*DockerTaskEngine).synchronizeContainerStatus(dockerContainer, task) + assert.Equal(t, labels, dockerContainer.Container.GetLabels()) + assert.Equal(t, (tc.created).Format(time.RFC3339), (dockerContainer.Container.GetCreatedAt()).Format(time.RFC3339)) + assert.Equal(t, (tc.started).Format(time.RFC3339), (dockerContainer.Container.GetStartedAt()).Format(time.RFC3339)) + assert.Equal(t, (tc.finished).Format(time.RFC3339), (dockerContainer.Container.GetFinishedAt()).Format(time.RFC3339)) + if tc.stage == "started" { + assert.Equal(t, uint16(80), dockerContainer.Container.KnownPortBindingsUnsafe[0].ContainerPort) + } + if tc.stage == "finished" { + assert.False(t, task.GetExecutionStoppedAt().IsZero()) + assert.Equal(t, tc.exitCode, dockerContainer.Container.GetKnownExitCode()) + } + if tc.stage == "failed" { + assert.Equal(t, tc.exitCode, dockerContainer.Container.GetKnownExitCode()) + assert.NotNil(t, dockerContainer.Container.ApplyingError) + } + }) + } +} + +// TestContainerProgressParallize tests the container can be processed parallelly +func TestContainerProgressParallize(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, testTime, taskEngine, _, imageManager, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + stateChangeEvents := taskEngine.StateChangeEvents() + eventStream := make(chan dockerapi.DockerContainerChangeEvent) + state := taskEngine.(*DockerTaskEngine).State() + + fastPullImage := "fast-pull-image" + slowPullImage := "slow-pull-image" + + testTask := testdata.LoadTask("sleep5") + + containerTwo := &apicontainer.Container{ + Name: fastPullImage, + Image: fastPullImage, + } + + testTask.Containers = append(testTask.Containers, containerTwo) + testTask.Containers[0].Image = slowPullImage + testTask.Containers[0].Name = slowPullImage + + var fastContainerDockerName string + var slowContainerDockerName string + fastContainerDockerID := "fast-pull-container-id" + slowContainerDockerID := "slow-pull-container-id" + + var waitForFastPullContainer sync.WaitGroup + waitForFastPullContainer.Add(1) + + client.EXPECT().Version(gomock.Any(), gomock.Any()).Return("17.12.0", nil).AnyTimes() + testTime.EXPECT().Now().Return(time.Now()).AnyTimes() + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes() + imageManager.EXPECT().RecordContainerReference(gomock.Any()).Return(nil).AnyTimes() + imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false).AnyTimes() + client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil) + client.EXPECT().PullImage(gomock.Any(), fastPullImage, gomock.Any(), gomock.Any()) + client.EXPECT().PullImage(gomock.Any(), slowPullImage, gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, image interface{}, auth interface{}, timeout interface{}) { + waitForFastPullContainer.Wait() + }) + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx interface{}, cfg interface{}, hostconfig interface{}, name string, duration interface{}) { + if strings.Contains(name, slowPullImage) { + slowContainerDockerName = name + state.AddContainer(&apicontainer.DockerContainer{ + DockerID: slowContainerDockerID, + DockerName: slowContainerDockerName, + Container: testTask.Containers[0], + }, testTask) + go func() { + event := createDockerEvent(apicontainerstatus.ContainerCreated) + event.DockerID = slowContainerDockerID + eventStream <- event + }() + } else if strings.Contains(name, fastPullImage) { + fastContainerDockerName = name + state.AddTask(testTask) + state.AddContainer(&apicontainer.DockerContainer{ + DockerID: fastContainerDockerID, + DockerName: fastContainerDockerName, + Container: testTask.Containers[1], + }, testTask) + go func() { + event := createDockerEvent(apicontainerstatus.ContainerCreated) + event.DockerID = fastContainerDockerID + eventStream <- event + }() + } else { + t.Fatalf("Got unexpected name for creating container: %s", name) + } + }).Times(2) + client.EXPECT().StartContainer(gomock.Any(), fastContainerDockerID, gomock.Any()).Do( + func(ctx interface{}, id string, duration interface{}) { + go func() { + event := createDockerEvent(apicontainerstatus.ContainerRunning) + event.DockerID = fastContainerDockerID + eventStream <- event + }() + }) + client.EXPECT().StartContainer(gomock.Any(), slowContainerDockerID, gomock.Any()).Do( + func(ctx interface{}, id string, duration interface{}) { + go func() { + event := createDockerEvent(apicontainerstatus.ContainerRunning) + event.DockerID = slowContainerDockerID + eventStream <- event + }() + }) + + taskEngine.Init(ctx) + taskEngine.AddTask(testTask) + + // Expect the fast pulled container to be running firs + fastPullContainerRunning := false + for event := range stateChangeEvents { + containerEvent, ok := event.(api.ContainerStateChange) + if ok && containerEvent.Status == apicontainerstatus.ContainerRunning { + if containerEvent.ContainerName == fastPullImage { + fastPullContainerRunning = true + // The second container should start processing now + waitForFastPullContainer.Done() + continue + } + assert.True(t, fastPullContainerRunning, "got the slower pulled container running events first") + continue + } + + taskEvent, ok := event.(api.TaskStateChange) + if ok && taskEvent.Status == apitaskstatus.TaskRunning { + break + } + t.Errorf("Got unexpected task event: %v", taskEvent.String()) + } + defer discardEvents(stateChangeEvents)() + // stop and clean up the task + cleanup := make(chan time.Time) + client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.DockerContainerMetadata{DockerID: fastContainerDockerID}).AnyTimes() + client.EXPECT().StopContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.DockerContainerMetadata{DockerID: slowContainerDockerID}).AnyTimes() + testTime.EXPECT().After(gomock.Any()).Return(cleanup).MinTimes(1) + client.EXPECT().RemoveContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) + imageManager.EXPECT().RemoveContainerReferenceFromImageState(gomock.Any()).Return(nil).Times(2) + + containerStoppedEvent := createDockerEvent(apicontainerstatus.ContainerStopped) + containerStoppedEvent.DockerID = slowContainerDockerID + eventStream <- containerStoppedEvent + + testTask.SetSentStatus(apitaskstatus.TaskStopped) + cleanup <- time.Now() + for { + tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks() + if len(tasks) == 0 { + break + } + time.Sleep(5 * time.Millisecond) + } +} + +func TestSynchronizeResource(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockTime.EXPECT().Now().AnyTimes() + client.EXPECT().Version(gomock.Any(), gomock.Any()).MaxTimes(1) + client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1) + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + state := dockerTaskEngine.State() + cgroupResource := mock_taskresource.NewMockTaskResource(ctrl) + testTask := testdata.LoadTask("sleep5") + testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{ + "cgroup": { + cgroupResource, + }, + } + // add the task to the state to simulate the agent restored the state on restart + state.AddTask(testTask) + cgroupResource.EXPECT().Initialize(gomock.Any(), gomock.Any(), gomock.Any()) + cgroupResource.EXPECT().SetDesiredStatus(gomock.Any()).MaxTimes(1) + cgroupResource.EXPECT().GetDesiredStatus().MaxTimes(2) + cgroupResource.EXPECT().TerminalStatus().MaxTimes(1) + cgroupResource.EXPECT().SteadyState().MaxTimes(1) + cgroupResource.EXPECT().GetKnownStatus().MaxTimes(1) + cgroupResource.EXPECT().GetName().AnyTimes().Return("cgroup") + cgroupResource.EXPECT().StatusString(gomock.Any()).AnyTimes() + + // Set the task to be stopped so that the process can done quickly + testTask.SetDesiredStatus(apitaskstatus.TaskStopped) + dockerTaskEngine.synchronizeState() +} + +func TestSynchronizeENIAttachment(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + mockTime.EXPECT().Now().AnyTimes() + client.EXPECT().Version(gomock.Any(), gomock.Any()).MaxTimes(1) + client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1) + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + state := dockerTaskEngine.State() + testTask := testdata.LoadTask("sleep5") + expiresAt := time.Now().Unix() + 1 + attachment := &apieni.ENIAttachment{ + TaskARN: "TaskARN", + AttachmentARN: "AttachmentARN", + MACAddress: "MACAddress", + Status: apieni.ENIAttachmentNone, + ExpiresAt: time.Unix(expiresAt, 0), + } + state.AddENIAttachment(attachment) + + state.AddTask(testTask) + testTask.SetDesiredStatus(apitaskstatus.TaskStopped) + dockerTaskEngine.synchronizeState() + + // If the below call doesn't panic on NPE, it means the ENI attachment has been properly initialized in synchronizeState. + attachment.StopAckTimer() +} + +func TestSynchronizeENIAttachmentRemoveData(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + client.EXPECT().ContainerEvents(gomock.Any()).MaxTimes(1) + + err := taskEngine.Init(ctx) + assert.NoError(t, err) + + taskEngine.(*DockerTaskEngine).dataClient = dataClient + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + + attachment := &apieni.ENIAttachment{ + TaskARN: "TaskARN", + AttachmentARN: testAttachmentArn, + MACAddress: "MACAddress", + Status: apieni.ENIAttachmentNone, + AttachStatusSent: false, + } + + // eni attachment data is removed if AttachStatusSent is unset + dockerTaskEngine.state.AddENIAttachment(attachment) + assert.NoError(t, dockerTaskEngine.dataClient.SaveENIAttachment(attachment)) + + dockerTaskEngine.synchronizeState() + attachments, err := dockerTaskEngine.dataClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, attachments, 0) +} + +func TestTaskSecretsEnvironmentVariables(t *testing.T) { + // metadata required for createContainer workflow validation + taskARN := "secretsTask" + taskFamily := "secretsTaskFamily" + taskVersion := "1" + taskContainerName := "secretsContainer" + + // metadata required for ssm secret resource validation + ssmSecretName := "mySSMSecret" + ssmSecretValueFrom := "ssm/mySecret" + ssmSecretRetrievedValue := "mySSMSecretValue" + ssmSecretRegion := "us-west-2" + + // metadata required for asm secret resource validation + asmSecretName := "myASMSecret" + asmSecretValueFrom := "arn:aws:secretsmanager:region:account-id:secret:" + asmSecretName + asmSecretRetrievedValue := "myASMSecretValue" + asmSecretRegion := "us-west-2" + asmSecretKey := asmSecretValueFrom + "_" + asmSecretRegion + + ssmExpectedEnvVar := ssmSecretName + "=" + ssmSecretRetrievedValue + asmExpectedEnvVar := asmSecretName + "=" + asmSecretRetrievedValue + + testCases := []struct { + name string + secrets []apicontainer.Secret + ssmSecret apicontainer.Secret + asmSecret apicontainer.Secret + expectedEnv []string + }{ + { + name: "ASMSecretAsEnv", + secrets: []apicontainer.Secret{ + { + Name: ssmSecretName, + ValueFrom: ssmSecretValueFrom, + Region: ssmSecretRegion, + Target: "LOG_DRIVER", + Provider: "ssm", + }, + { + Name: asmSecretName, + ValueFrom: asmSecretValueFrom, + Region: asmSecretRegion, + Type: "ENVIRONMENT_VARIABLE", + Provider: "asm", + }, + }, + ssmSecret: apicontainer.Secret{ + Name: ssmSecretName, + ValueFrom: ssmSecretValueFrom, + Region: ssmSecretRegion, + Target: "LOG_DRIVER", + Provider: "ssm", + }, + asmSecret: apicontainer.Secret{ + Name: asmSecretName, + ValueFrom: asmSecretValueFrom, + Region: asmSecretRegion, + Type: "ENVIRONMENT_VARIABLE", + Provider: "asm", + }, + expectedEnv: []string{asmExpectedEnvVar}, + }, + { + name: "SSMSecretAsEnv", + secrets: []apicontainer.Secret{ + { + Name: ssmSecretName, + ValueFrom: ssmSecretValueFrom, + Region: ssmSecretRegion, + Type: "ENVIRONMENT_VARIABLE", + Provider: "ssm", + }, + { + Name: asmSecretName, + ValueFrom: asmSecretValueFrom, + Region: asmSecretRegion, + Target: "LOG_DRIVER", + Provider: "asm", + }, + }, + ssmSecret: apicontainer.Secret{ + Name: ssmSecretName, + ValueFrom: ssmSecretValueFrom, + Region: ssmSecretRegion, + Type: "ENVIRONMENT_VARIABLE", + Provider: "ssm", + }, + asmSecret: apicontainer.Secret{ + Name: asmSecretName, + ValueFrom: asmSecretValueFrom, + Region: asmSecretRegion, + Target: "LOG_DRIVER", + Provider: "asm", + }, + expectedEnv: []string{ssmExpectedEnvVar}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, credentialsManager, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + // sample test + testTask := &apitask.Task{ + Arn: taskARN, + Family: taskFamily, + Version: taskVersion, + Containers: []*apicontainer.Container{ + { + Name: taskContainerName, + Secrets: tc.secrets, + }, + }, + } + + // metadata required for execution role authentication workflow + credentialsID := "execution role" + executionRoleCredentials := credentials.IAMRoleCredentials{ + CredentialsID: credentialsID, + } + taskIAMcreds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: executionRoleCredentials, + } + + // configure the task and container to use execution role + testTask.SetExecutionRoleCredentialsID(credentialsID) + + // validate base config + expectedConfig, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if err != nil { + t.Fatal(err) + } + + expectedConfig.Labels = map[string]string{ + "com.amazonaws.ecs.task-arn": taskARN, + "com.amazonaws.ecs.container-name": taskContainerName, + "com.amazonaws.ecs.task-definition-family": taskFamily, + "com.amazonaws.ecs.task-definition-version": taskVersion, + "com.amazonaws.ecs.cluster": "", + } + + // required to validate container config includes secrets as environment variables + expectedConfig.Env = tc.expectedEnv + + // required for validating ssm workflows + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl) + + ssmRequirements := map[string][]apicontainer.Secret{ + ssmSecretRegion: []apicontainer.Secret{ + tc.ssmSecret, + }, + } + + ssmSecretRes := ssmsecret.NewSSMSecretResource( + testTask.Arn, + ssmRequirements, + credentialsID, + credentialsManager, + ssmClientCreator) + + // required for validating asm workflows + asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + + asmRequirements := map[string]apicontainer.Secret{ + asmSecretKey: tc.asmSecret, + } + + asmSecretRes := asmsecret.NewASMSecretResource( + testTask.Arn, + asmRequirements, + credentialsID, + credentialsManager, + asmClientCreator) + + testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{ + ssmsecret.ResourceName: {ssmSecretRes}, + asmsecret.ResourceName: {asmSecretRes}, + } + + ssmClientOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String(ssmSecretValueFrom), + Value: aws.String(ssmSecretRetrievedValue), + }, + }, + } + + asmClientOutput := &secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(asmSecretRetrievedValue), + } + + reqSecretNames := []*string{aws.String(ssmSecretValueFrom)} + + credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(taskIAMcreds, true).Times(2) + ssmClientCreator.EXPECT().NewSSMClient(region, executionRoleCredentials).Return(mockSSMClient) + asmClientCreator.EXPECT().NewASMClient(region, executionRoleCredentials).Return(mockASMClient) + + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Do(func(in *ssm.GetParametersInput) { + assert.Equal(t, in.Names, reqSecretNames) + }).Return(ssmClientOutput, nil).Times(1) + + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) { + assert.Equal(t, asmSecretValueFrom, aws.StringValue(in.SecretId)) + }).Return(asmClientOutput, nil).Times(1) + + require.NoError(t, ssmSecretRes.Create()) + require.NoError(t, asmSecretRes.Create()) + + mockTime.EXPECT().Now().AnyTimes() + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + + // test validates that the expectedConfig includes secrets are appended as + // environment varibles + client.EXPECT().CreateContainer(gomock.Any(), expectedConfig, gomock.Any(), gomock.Any(), gomock.Any()) + ret := taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0]) + assert.Nil(t, ret.Error) + + }) + } +} + +// TestCreateContainerAddFirelensLogDriverConfig tests that in createContainer, when the +// container is using firelens log driver, its logConfig is properly set. +func TestCreateContainerAddFirelensLogDriverConfig(t *testing.T) { + taskName := "logSenderTask" + taskARN := "arn:aws:ecs:region:account-id:task/task-id" + taskID := "task-id" + taskFamily := "logSenderTaskFamily" + taskVersion := "1" + logDriverTypeFirelens := "awsfirelens" + dataLogDriverPath := "/data/firelens/" + dataLogDriverSocketPath := "/socket/fluent.sock" + socketPathPrefix := "unix://" + networkModeBridge := "bridge" + networkModeAWSVPC := "awsvpc" + bridgeIPAddr := "bridgeIP" + envVarBridgeMode := "FLUENT_HOST=bridgeIP" + envVarPort := "FLUENT_PORT=24224" + envVarAWSVPCMode := "FLUENT_HOST=127.0.0.1" + eniIPv4Address := "10.0.0.2" + getTask := func(logDriverType string, networkMode string) *apitask.Task { + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: logDriverType, + Config: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + } + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + return &apitask.Task{ + Arn: taskARN, + Version: taskVersion, + Family: taskFamily, + Containers: []*apicontainer.Container{ + { + Name: taskName, + DockerConfig: apicontainer.DockerConfig{ + HostConfig: func() *string { + s := string(rawHostConfig) + return &s + }(), + }, + NetworkModeUnsafe: networkMode, + }, + { + Name: "test-container", + FirelensConfig: &apicontainer.FirelensConfig{ + Type: "fluentd", + }, + NetworkModeUnsafe: networkMode, + NetworkSettingsUnsafe: &types.NetworkSettings{ + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: bridgeIPAddr, + }, + }, + }, + }, + } + } + getTaskWithENI := func(logDriverType string, networkMode string) *apitask.Task { + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: logDriverType, + Config: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + } + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + return &apitask.Task{ + Arn: taskARN, + Version: taskVersion, + Family: taskFamily, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + }, + }, + Containers: []*apicontainer.Container{ + { + Name: taskName, + DockerConfig: apicontainer.DockerConfig{ + HostConfig: func() *string { + s := string(rawHostConfig) + return &s + }(), + }, + NetworkModeUnsafe: networkMode, + }, + { + Name: "test-container", + FirelensConfig: &apicontainer.FirelensConfig{ + Type: "fluentd", + }, + NetworkModeUnsafe: networkMode, + NetworkSettingsUnsafe: &types.NetworkSettings{ + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: bridgeIPAddr, + }, + }, + }, + }, + } + } + testCases := []struct { + name string + task *apitask.Task + expectedLogConfigType string + expectedLogConfigTag string + expectedLogConfigFluentAddress string + expectedFluentdAsyncConnect string + expectedSubSecondPrecision string + expectedIPAddress string + expectedPort string + }{ + { + name: "test container that uses firelens log driver with default mode", + task: getTask(logDriverTypeFirelens, ""), + expectedLogConfigType: logDriverTypeFluentd, + expectedLogConfigTag: taskName + "-firelens-" + taskID, + expectedFluentdAsyncConnect: strconv.FormatBool(true), + expectedSubSecondPrecision: strconv.FormatBool(true), + expectedLogConfigFluentAddress: socketPathPrefix + filepath.Join(defaultConfig.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath), + expectedIPAddress: envVarBridgeMode, + expectedPort: envVarPort, + }, + { + name: "test container that uses firelens log driver with bridge mode", + task: getTask(logDriverTypeFirelens, networkModeBridge), + expectedLogConfigType: logDriverTypeFluentd, + expectedLogConfigTag: taskName + "-firelens-" + taskID, + expectedFluentdAsyncConnect: strconv.FormatBool(true), + expectedSubSecondPrecision: strconv.FormatBool(true), + expectedLogConfigFluentAddress: socketPathPrefix + filepath.Join(defaultConfig.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath), + expectedIPAddress: envVarBridgeMode, + expectedPort: envVarPort, + }, + { + name: "test container that uses firelens log driver with awsvpc mode", + task: getTaskWithENI(logDriverTypeFirelens, networkModeAWSVPC), + expectedLogConfigType: logDriverTypeFluentd, + expectedLogConfigTag: taskName + "-firelens-" + taskID, + expectedFluentdAsyncConnect: strconv.FormatBool(true), + expectedSubSecondPrecision: strconv.FormatBool(true), + expectedLogConfigFluentAddress: socketPathPrefix + filepath.Join(defaultConfig.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath), + expectedIPAddress: envVarAWSVPCMode, + expectedPort: envVarPort, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx context.Context, + config *dockercontainer.Config, + hostConfig *dockercontainer.HostConfig, + name string, + timeout time.Duration) { + assert.Equal(t, tc.expectedLogConfigType, hostConfig.LogConfig.Type) + assert.Equal(t, tc.expectedLogConfigTag, hostConfig.LogConfig.Config["tag"]) + assert.Equal(t, tc.expectedLogConfigFluentAddress, hostConfig.LogConfig.Config["fluentd-address"]) + assert.Equal(t, tc.expectedFluentdAsyncConnect, hostConfig.LogConfig.Config["fluentd-async-connect"]) + assert.Equal(t, tc.expectedSubSecondPrecision, hostConfig.LogConfig.Config["fluentd-sub-second-precision"]) + assert.Contains(t, config.Env, tc.expectedIPAddress) + assert.Contains(t, config.Env, tc.expectedPort) + }) + ret := taskEngine.(*DockerTaskEngine).createContainer(tc.task, tc.task.Containers[0]) + assert.NoError(t, ret.Error) + }) + + } +} + +func TestCreateFirelensContainerSetFluentdUID(t *testing.T) { + testTask := &apitask.Task{ + Arn: "arn:aws:ecs:region:account-id:task/test-task-arn", + Containers: []*apicontainer.Container{ + { + Name: "test-container", + FirelensConfig: &apicontainer.FirelensConfig{ + Type: "fluentd", + }, + }, + }, + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx context.Context, + config *dockercontainer.Config, + hostConfig *dockercontainer.HostConfig, + name string, + timeout time.Duration) { + assert.Contains(t, config.Env, "FLUENT_UID=0") + }) + ret := taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0]) + assert.NoError(t, ret.Error) +} + +func TestGetBridgeIP(t *testing.T) { + networkDefaultIP := "defaultIP" + getNetwork := func(defaultIP string, bridgeIP string, networkMode string) *types.NetworkSettings { + endPoint := network.EndpointSettings{ + IPAddress: bridgeIP, + } + return &types.NetworkSettings{ + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: defaultIP, + }, + Networks: map[string]*network.EndpointSettings{ + networkMode: &endPoint, + }, + } + } + testCases := []struct { + defaultIP string + bridgeIP string + networkMode string + expectedOk bool + expectedIPAddress string + }{ + { + defaultIP: networkDefaultIP, + bridgeIP: networkBridgeIP, + networkMode: networkModeBridge, + expectedOk: true, + expectedIPAddress: networkDefaultIP, + }, + { + defaultIP: "", + bridgeIP: networkBridgeIP, + networkMode: networkModeBridge, + expectedOk: true, + expectedIPAddress: networkBridgeIP, + }, + { + defaultIP: "", + bridgeIP: networkBridgeIP, + networkMode: networkModeAWSVPC, + expectedOk: false, + expectedIPAddress: "", + }, + { + defaultIP: "", + bridgeIP: "", + networkMode: networkModeBridge, + expectedOk: false, + expectedIPAddress: "", + }, + } + + for _, tc := range testCases { + IPAddress, ok := getContainerHostIP(getNetwork(tc.defaultIP, tc.bridgeIP, tc.networkMode)) + assert.Equal(t, tc.expectedOk, ok) + assert.Equal(t, tc.expectedIPAddress, IPAddress) + } +} + +func TestStartFirelensContainerRetryForContainerIP(t *testing.T) { + dockerMetaDataWithoutNetworkSettings := dockerapi.DockerContainerMetadata{ + DockerID: containerID, + Volumes: []types.MountPoint{ + { + Name: "volume", + Source: "/src/vol", + Destination: "/vol", + }, + }, + } + rawHostConfigInput := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: "fluentd", + Config: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + } + jsonBaseWithoutNetwork := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: containerPid}, + }, + } + + jsonBaseWithNetwork := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: containerID, + State: &types.ContainerState{Pid: containerPid}, + }, + NetworkSettings: &types.NetworkSettings{ + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: networkBridgeIP, + }, + Networks: map[string]*network.EndpointSettings{ + apitask.BridgeNetworkMode: &network.EndpointSettings{ + IPAddress: networkBridgeIP, + }, + }, + }, + } + rawHostConfig, err := json.Marshal(&rawHostConfigInput) + require.NoError(t, err) + testTask := &apitask.Task{ + Arn: "arn:aws:ecs:region:account-id:task/task-id", + Version: "1", + Family: "logSenderTaskFamily", + Containers: []*apicontainer.Container{ + { + Name: "logSenderTask", + DockerConfig: apicontainer.DockerConfig{ + HostConfig: func() *string { + s := string(rawHostConfig) + return &s + }(), + }, + NetworkModeUnsafe: apitask.BridgeNetworkMode, + }, + { + Name: "test-container", + FirelensConfig: &apicontainer.FirelensConfig{ + Type: "fluentd", + }, + NetworkModeUnsafe: apitask.BridgeNetworkMode, + }, + }, + } + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + taskEngine.(*DockerTaskEngine).state.AddTask(testTask) + taskEngine.(*DockerTaskEngine).state.AddContainer(&apicontainer.DockerContainer{ + Container: testTask.Containers[1], + DockerName: dockerContainerName, + DockerID: containerID, + }, testTask) + + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + client.EXPECT().StartContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(dockerMetaDataWithoutNetworkSettings).AnyTimes() + gomock.InOrder( + client.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()). + Return(jsonBaseWithoutNetwork, nil), + client.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()). + Return(jsonBaseWithoutNetwork, nil), + client.EXPECT().InspectContainer(gomock.Any(), containerID, gomock.Any()). + Return(jsonBaseWithNetwork, nil), + ) + ret := taskEngine.(*DockerTaskEngine).startContainer(testTask, testTask.Containers[1]) + assert.NoError(t, ret.Error) + assert.Equal(t, jsonBaseWithNetwork.NetworkSettings, ret.NetworkSettings) +} + +func TestStartExecAgent(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + nowTime := time.Now() + ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + execCmdMgr := mock_execcmdagent.NewMockManager(ctrl) + dockerTaskEngine.execCmdMgr = execCmdMgr + defer ctrl.Finish() + const ( + testContainerId = "123" + ) + testCases := []struct { + execCommandAgentEnabled bool + expectContainerEvent bool + execAgentStatus apicontainerstatus.ManagedAgentStatus + execAgentInitFailed bool + execAgentStartError error + }{ + { + execCommandAgentEnabled: false, + expectContainerEvent: false, + execAgentStatus: apicontainerstatus.ManagedAgentStopped, + execAgentInitFailed: false, + }, + { + execCommandAgentEnabled: true, + expectContainerEvent: true, + execAgentStatus: apicontainerstatus.ManagedAgentRunning, + execAgentInitFailed: false, + }, + { + execCommandAgentEnabled: true, + expectContainerEvent: true, + execAgentStatus: apicontainerstatus.ManagedAgentStopped, + execAgentStartError: errors.New("mock error"), + }, + { + execCommandAgentEnabled: true, + expectContainerEvent: false, + execAgentStatus: apicontainerstatus.ManagedAgentStopped, + execAgentInitFailed: true, + }, + } + for _, tc := range testCases { + stateChangeEvents := taskEngine.StateChangeEvents() + testTask := &apitask.Task{ + Arn: "arn:aws:ecs:region:account-id:task/test-task-arn", + Containers: []*apicontainer.Container{ + { + Name: "test-container", + RuntimeID: testContainerId, + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + }, + }, + } + + if tc.execCommandAgentEnabled { + enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{ + LastStartedAt: nowTime, + Status: tc.execAgentStatus, + InitFailed: tc.execAgentInitFailed, + }) + } + mTestTask := &managedTask{ + Task: testTask, + engine: dockerTaskEngine, + ctx: ctx, + stateChangeEvents: stateChangeEvents, + } + + dockerTaskEngine.state.AddTask(testTask) + dockerTaskEngine.managedTasks[testTask.Arn] = mTestTask + + // check for expected taskEvent in stateChangeEvents + waitDone := make(chan struct{}) + var reason string + if tc.expectContainerEvent { + reason = "ExecuteCommandAgent started" + } + if tc.execAgentStartError != nil { + reason = tc.execAgentStartError.Error() + } + expectedManagedAgent := apicontainer.ManagedAgent{ + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: tc.execAgentStatus, + InitFailed: tc.execAgentInitFailed, + Reason: reason, + }, + } + go checkManagedAgentEvents(t, tc.expectContainerEvent, stateChangeEvents, expectedManagedAgent, waitDone) + + client.EXPECT().StartContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return( + dockerapi.DockerContainerMetadata{DockerID: containerID}).AnyTimes() + if tc.execCommandAgentEnabled { + execCmdMgr.EXPECT().InitializeContainer(gomock.Any(), testTask.Containers[0], gomock.Any()).AnyTimes() + if !tc.execAgentInitFailed { + execCmdMgr.EXPECT().StartAgent(gomock.Any(), client, testTask, testTask.Containers[0], testContainerId). + Return(tc.execAgentStartError). + AnyTimes() + } + } + ret := taskEngine.(*DockerTaskEngine).startContainer(testTask, testTask.Containers[0]) + assert.NoError(t, ret.Error) + + timeout := false + select { + case <-waitDone: + case <-time.After(time.Second): + timeout = true + } + assert.False(t, timeout) + } +} + +func TestMonitorExecAgentRunning(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + execCmdMgr := mock_execcmdagent.NewMockManager(ctrl) + dockerTaskEngine.execCmdMgr = execCmdMgr + dockerTaskEngine.monitorExecAgentsInterval = 2 * time.Millisecond + defer ctrl.Finish() + const ( + testContainerId = "123" + ) + testCases := []struct { + containerStatus apicontainerstatus.ContainerStatus + execCommandAgentState apicontainer.ManagedAgentState + execAgentStatus apicontainerstatus.ManagedAgentStatus + restartStatus execcmd.RestartStatus + simulateBadContainerId bool + expectedRestartInUnhealthyCall bool + expectContainerEvent bool + }{ + { + containerStatus: apicontainerstatus.ContainerStopped, + execAgentStatus: apicontainerstatus.ManagedAgentStopped, + restartStatus: execcmd.NotRestarted, + expectContainerEvent: false, + }, + { + containerStatus: apicontainerstatus.ContainerRunning, + simulateBadContainerId: true, + execAgentStatus: apicontainerstatus.ManagedAgentStopped, + restartStatus: execcmd.NotRestarted, + expectContainerEvent: false, + }, + { + containerStatus: apicontainerstatus.ContainerRunning, + execAgentStatus: apicontainerstatus.ManagedAgentRunning, + restartStatus: execcmd.NotRestarted, + expectContainerEvent: false, + }, + { + containerStatus: apicontainerstatus.ContainerRunning, + execAgentStatus: apicontainerstatus.ManagedAgentRunning, + restartStatus: execcmd.Restarted, + expectContainerEvent: true, + }, + } + for _, tc := range testCases { + nowTime := time.Now() + stateChangeEvents := taskEngine.StateChangeEvents() + testTask := &apitask.Task{ + Arn: "arn:aws:ecs:region:account-id:task/test-task-arn", + Containers: []*apicontainer.Container{ + { + Name: "test-container", + RuntimeID: testContainerId, + KnownStatusUnsafe: tc.containerStatus, + }, + }, + } + + enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{ + LastStartedAt: nowTime, + Status: tc.execAgentStatus, + }) + + mTestTask := &managedTask{ + Task: testTask, + engine: dockerTaskEngine, + ctx: ctx, + stateChangeEvents: stateChangeEvents, + } + + dockerTaskEngine.state.AddTask(testTask) + + if tc.simulateBadContainerId { + testTask.Containers[0].RuntimeID = "" + } + if tc.containerStatus == apicontainerstatus.ContainerRunning && !tc.simulateBadContainerId { + execCmdMgr.EXPECT().RestartAgentIfStopped(dockerTaskEngine.ctx, dockerTaskEngine.client, testTask, + testTask.Containers[0], testContainerId). + Return(tc.restartStatus, nil). + Times(1) + } + + // check for expected containerEvent in stateChangeEvents + waitDone := make(chan struct{}) + expectedManagedAgent := apicontainer.ManagedAgent{ + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentRunning, + Reason: "ExecuteCommandAgent restarted", + }, + } + // only if we expect restart will we also expect a managed agent container event + go checkManagedAgentEvents(t, tc.expectContainerEvent, stateChangeEvents, expectedManagedAgent, waitDone) + + taskEngine.(*DockerTaskEngine).monitorExecAgentRunning(ctx, mTestTask, testTask.Containers[0]) + + timeout := false + select { + case <-waitDone: + case <-time.After(time.Second): + timeout = true + } + assert.False(t, timeout) + } +} + +func TestMonitorExecAgentProcesses(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + nowTime := time.Now() + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + execCmdMgr := mock_execcmdagent.NewMockManager(ctrl) + dockerTaskEngine.execCmdMgr = execCmdMgr + dockerTaskEngine.monitorExecAgentsInterval = 2 * time.Millisecond + defer ctrl.Finish() + + testCases := []struct { + execAgentStatus apicontainerstatus.ManagedAgentStatus + expectContainerEvent bool + execAgentInitfailed bool + }{ + { + execAgentStatus: apicontainerstatus.ManagedAgentRunning, + expectContainerEvent: true, + execAgentInitfailed: false, + }, + { + execAgentStatus: apicontainerstatus.ManagedAgentStopped, + expectContainerEvent: false, + execAgentInitfailed: true, + }, + } + for _, tc := range testCases { + stateChangeEvents := taskEngine.StateChangeEvents() + testTask := &apitask.Task{ + Arn: "arn:aws:ecs:region:account-id:task/test-task-arn", + Containers: []*apicontainer.Container{ + { + Name: "test-container", + RuntimeID: "runtime-ID", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + } + enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{ + LastStartedAt: nowTime, + Status: apicontainerstatus.ManagedAgentRunning, + InitFailed: tc.execAgentInitfailed, + }) + mTestTask := &managedTask{ + Task: testTask, + engine: dockerTaskEngine, + ctx: ctx, + stateChangeEvents: stateChangeEvents, + } + dockerTaskEngine.state.AddTask(testTask) + dockerTaskEngine.managedTasks[testTask.Arn] = mTestTask + restartCtx, restartCancel := context.WithTimeout(context.Background(), time.Second) + defer restartCancel() + // return execcmd.Restarted to ensure container event emission + + if !tc.execAgentInitfailed { + execCmdMgr.EXPECT().RestartAgentIfStopped(dockerTaskEngine.ctx, dockerTaskEngine.client, testTask, testTask.Containers[0], testTask.Containers[0].RuntimeID). + DoAndReturn( + func(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) (execcmd.RestartStatus, error) { + defer restartCancel() + return execcmd.Restarted, nil + }). + Times(1) + } + + expectContainerEvent := tc.expectContainerEvent + waitDone := make(chan struct{}) + expectedManagedAgent := apicontainer.ManagedAgent{ + Name: execcmd.ExecuteCommandAgentName, + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: tc.execAgentStatus, + Reason: "ExecuteCommandAgent restarted", + LastStartedAt: nowTime, + }, + } + + go checkManagedAgentEvents(t, expectContainerEvent, stateChangeEvents, expectedManagedAgent, waitDone) + + dockerTaskEngine.monitorExecAgentProcesses(dockerTaskEngine.ctx) + <-restartCtx.Done() + time.Sleep(5 * time.Millisecond) + + timeout := false + select { + case <-waitDone: + case <-time.After(time.Second): + timeout = true + } + + assert.False(t, timeout) + } +} + +func TestMonitorExecAgentProcessExecDisabled(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + execCmdMgr := mock_execcmdagent.NewMockManager(ctrl) + dockerTaskEngine.execCmdMgr = execCmdMgr + defer ctrl.Finish() + tt := []struct { + execCommandAgentEnabled bool + taskStatus apitaskstatus.TaskStatus + }{ + { + execCommandAgentEnabled: false, + taskStatus: apitaskstatus.TaskRunning, + }, + { + execCommandAgentEnabled: true, + taskStatus: apitaskstatus.TaskStopped, + }, + } + for _, test := range tt { + testTask := &apitask.Task{ + Arn: "arn:aws:ecs:region:account-id:task/test-task-arn", + Containers: []*apicontainer.Container{ + { + Name: "test-container", + RuntimeID: "runtime-ID", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + KnownStatusUnsafe: test.taskStatus, + } + if test.execCommandAgentEnabled { + enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{}) + } + dockerTaskEngine.state.AddTask(testTask) + dockerTaskEngine.managedTasks[testTask.Arn] = &managedTask{Task: testTask} + dockerTaskEngine.monitorExecAgentProcesses(ctx) + // absence of top container expect call indicates it shouldn't have been called + time.Sleep(10 * time.Millisecond) + } +} +func TestMonitorExecAgentsMultipleContainers(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, _, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + execCmdMgr := mock_execcmdagent.NewMockManager(ctrl) + dockerTaskEngine.execCmdMgr = execCmdMgr + dockerTaskEngine.monitorExecAgentsInterval = 2 * time.Millisecond + defer ctrl.Finish() + stateChangeEvents := taskEngine.StateChangeEvents() + + testTask := &apitask.Task{ + Arn: "arn:aws:ecs:region:account-id:task/test-task-arn", + Containers: []*apicontainer.Container{ + { + Name: "test-container1", + RuntimeID: "runtime-ID1", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + { + Name: "test-container2", + RuntimeID: "runtime-ID2", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + } + + for _, c := range testTask.Containers { + enableExecCommandAgentForContainer(c, apicontainer.ManagedAgentState{}) + } + + mTestTask := &managedTask{ + Task: testTask, + engine: dockerTaskEngine, + ctx: ctx, + stateChangeEvents: stateChangeEvents, + } + + dockerTaskEngine.state.AddTask(testTask) + dockerTaskEngine.managedTasks[testTask.Arn] = mTestTask + wg := &sync.WaitGroup{} + numContainers := len(testTask.Containers) + wg.Add(numContainers) + + for i := 0; i < numContainers; i++ { + execCmdMgr.EXPECT().RestartAgentIfStopped(dockerTaskEngine.ctx, dockerTaskEngine.client, testTask, testTask.Containers[i], testTask.Containers[i].RuntimeID). + DoAndReturn( + func(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) (execcmd.RestartStatus, error) { + defer wg.Done() + defer discardEvents(stateChangeEvents)() + return execcmd.NotRestarted, nil + }). + Times(1) + + } + taskEngine.(*DockerTaskEngine).monitorExecAgentProcesses(dockerTaskEngine.ctx) + + waitDone := make(chan struct{}) + go func() { + wg.Wait() + close(waitDone) + }() + + timeout := false + select { + case <-waitDone: + case <-time.After(time.Second): + timeout = true + } + assert.False(t, timeout) + +} + +func TestPeriodicExecAgentsMonitoring(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, taskEngine, _, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + execAgentPID := "1234" + resp := &dockercontainer.ContainerTopOKBody{ + Processes: [][]string{{"root", execAgentPID}}, + } + testTask := &apitask.Task{ + Arn: "arn:aws:ecs:region:account-id:task/test-task-arn", + Containers: []*apicontainer.Container{ + { + Name: "test-container", + RuntimeID: "runtime-ID", + }, + }, + } + enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{ + Metadata: map[string]interface{}{ + "PID": execAgentPID, + }}) + taskEngine.(*DockerTaskEngine).monitorExecAgentsInterval = 2 * time.Millisecond + taskEngine.(*DockerTaskEngine).state.AddTask(testTask) + taskEngine.(*DockerTaskEngine).managedTasks[testTask.Arn] = &managedTask{Task: testTask} + topCtx, topCancel := context.WithTimeout(context.Background(), time.Second) + defer topCancel() + client.EXPECT().TopContainer(gomock.Any(), testTask.Containers[0].RuntimeID, 30*time.Second, execAgentPID).DoAndReturn( + func(ctx context.Context, containerID string, timeout time.Duration, psArgs ...string) (*dockercontainer.ContainerTopOKBody, error) { + defer topCancel() + return resp, nil + }).AnyTimes() + go taskEngine.(*DockerTaskEngine).startPeriodicExecAgentsMonitoring(ctx) + <-topCtx.Done() + time.Sleep(5 * time.Millisecond) + execCmdAgent, ok := testTask.Containers[0].GetManagedAgentByName(execcmd.ExecuteCommandAgentName) + assert.True(t, ok) + execMD := execcmd.MapToAgentMetadata(execCmdAgent.Metadata) + assert.Equal(t, execAgentPID, execMD.PID) +} + +func TestCreateContainerWithExecAgent(t *testing.T) { + testcases := []struct { + name string + error error + expectContainerEvent bool + execAgentInitFailed bool + execAgentStatus apicontainerstatus.ManagedAgentStatus + }{ + { + name: "ExecAgent config mount success", + error: nil, + expectContainerEvent: false, + execAgentInitFailed: false, + }, + { + name: "ExecAgent config mount Error", + error: errors.New("mount error"), + expectContainerEvent: true, + execAgentInitFailed: true, + execAgentStatus: apicontainerstatus.ManagedAgentStopped, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, _, engine, _, _, _ := mocks(t, ctx, &config.Config{}) + defer ctrl.Finish() + taskEngine, _ := engine.(*DockerTaskEngine) + stateChangeEvents := engine.StateChangeEvents() + execCmdMgr := mock_execcmdagent.NewMockManager(ctrl) + taskEngine.execCmdMgr = execCmdMgr + sleepTask := testdata.LoadTask("sleep5") + sleepContainer, _ := sleepTask.ContainerByName("sleep5") + enableExecCommandAgentForContainer(sleepContainer, apicontainer.ManagedAgentState{ + Status: tc.execAgentStatus, + InitFailed: tc.execAgentInitFailed, + }) + + mTestTask := &managedTask{ + Task: sleepTask, + engine: taskEngine, + ctx: ctx, + stateChangeEvents: stateChangeEvents, + } + + taskEngine.state.AddTask(sleepTask) + taskEngine.managedTasks[sleepTask.Arn] = mTestTask + + waitDone := make(chan struct{}) + var reason string + if tc.error != nil { + reason = fmt.Sprintf("ExecuteCommandAgent Initialization failed - %v", tc.error) + } + expectedManagedAgent := apicontainer.ManagedAgent{ + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentStopped, + Reason: reason, + }, + } + + go checkManagedAgentEvents(t, tc.expectContainerEvent, stateChangeEvents, expectedManagedAgent, waitDone) + execCmdMgr.EXPECT().InitializeContainer(gomock.Any(), sleepContainer, gomock.Any()).Return(tc.error) + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil) + client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + metadata := taskEngine.createContainer(sleepTask, sleepContainer) + assert.NoError(t, metadata.Error) + + timeout := false + select { + case <-waitDone: + case <-time.After(time.Second): + timeout = true + } + assert.False(t, timeout) + }) + } +} diff --git a/agent/engine/docker_task_engine_windows_test.go b/agent/engine/docker_task_engine_windows_test.go new file mode 100644 index 00000000000..07b8205e8c0 --- /dev/null +++ b/agent/engine/docker_task_engine_windows_test.go @@ -0,0 +1,220 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package engine + +import ( + "context" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_s3_factory "github.com/aws/amazon-ecs-agent/agent/s3/factory/mocks" + mock_ssm_factory "github.com/aws/amazon-ecs-agent/agent/ssm/factory/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDeleteTask(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + task := &apitask.Task{ + Arn: testTaskARN, + } + require.NoError(t, dataClient.SaveTask(task)) + + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + taskEngine := &DockerTaskEngine{ + state: mockState, + dataClient: dataClient, + cfg: &defaultConfig, + ctx: ctx, + } + + gomock.InOrder( + mockState.EXPECT().RemoveTask(task), + ) + + taskEngine.deleteTask(task) + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, tasks, 0) +} + +func TestCredentialSpecResourceTaskFile(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, credentialsManager, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + // metadata required for createContainer workflow validation + credentialSpecTaskARN := "credentialSpecTask" + credentialSpecTaskFamily := "credentialSpecFamily" + credentialSpecTaskVersion := "1" + credentialSpecTaskContainerName := "credentialSpecContainer" + + c := &apicontainer.Container{ + Name: credentialSpecTaskContainerName, + } + credentialspecFile := "credentialspec:file://gmsa_gmsa-acct.json" + targetCredentialspecFile := "credentialspec=file://gmsa_gmsa-acct.json" + hostConfig := "{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct.json\"]}" + c.DockerConfig.HostConfig = &hostConfig + + // sample test + testTask := &apitask.Task{ + Arn: credentialSpecTaskARN, + Family: credentialSpecTaskFamily, + Version: credentialSpecTaskVersion, + Containers: []*apicontainer.Container{c}, + } + + // metadata required for execution role authentication workflow + credentialsID := "execution role" + + // configure the task and container to use execution role + testTask.SetExecutionRoleCredentialsID(credentialsID) + + // validate base config + expectedConfig, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if err != nil { + t.Fatal(err) + } + + expectedConfig.Labels = map[string]string{ + "com.amazonaws.ecs.task-arn": credentialSpecTaskARN, + "com.amazonaws.ecs.container-name": credentialSpecTaskContainerName, + "com.amazonaws.ecs.task-definition-family": credentialSpecTaskFamily, + "com.amazonaws.ecs.task-definition-version": credentialSpecTaskVersion, + "com.amazonaws.ecs.cluster": "", + } + + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + + credentialSpecReq := []string{credentialspecFile} + + credentialSpecRes, cerr := credentialspec.NewCredentialSpecResource( + testTask.Arn, + defaultConfig.AWSRegion, + credentialSpecReq, + credentialsID, + credentialsManager, + ssmClientCreator, + s3ClientCreator) + assert.NoError(t, cerr) + + credSpecdata := map[string]string{ + credentialspecFile: targetCredentialspecFile, + } + credentialSpecRes.CredSpecMap = credSpecdata + + testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{ + credentialspec.ResourceName: {credentialSpecRes}, + } + + mockTime.EXPECT().Now().AnyTimes() + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + + client.EXPECT().CreateContainer(gomock.Any(), expectedConfig, gomock.Any(), gomock.Any(), gomock.Any()) + + ret := taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0]) + assert.Nil(t, ret.Error) +} + +func TestCredentialSpecResourceTaskFileErr(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + ctrl, client, mockTime, taskEngine, credentialsManager, _, _ := mocks(t, ctx, &defaultConfig) + defer ctrl.Finish() + + // metadata required for createContainer workflow validation + credentialSpecTaskARN := "credentialSpecTask" + credentialSpecTaskFamily := "credentialSpecFamily" + credentialSpecTaskVersion := "1" + credentialSpecTaskContainerName := "credentialSpecContainer" + + c := &apicontainer.Container{ + Name: credentialSpecTaskContainerName, + } + credentialspecFile := "credentialspec:file://gmsa_gmsa-acct.json" + targetCredentialspecFile := "credentialspec=file://gmsa_gmsa-acct.json" + hostConfig := "{\"SecurityOpt\": [\"credentialspec:file://gmsa_gmsa-acct.json\"]}" + c.DockerConfig.HostConfig = &hostConfig + + // sample test + testTask := &apitask.Task{ + Arn: credentialSpecTaskARN, + Family: credentialSpecTaskFamily, + Version: credentialSpecTaskVersion, + Containers: []*apicontainer.Container{c}, + } + + // metadata required for execution role authentication workflow + credentialsID := "execution role" + + // configure the task and container to use execution role + testTask.SetExecutionRoleCredentialsID(credentialsID) + + // validate base config + expectedConfig, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion) + if err != nil { + t.Fatal(err) + } + + expectedConfig.Labels = map[string]string{ + "com.amazonaws.ecs.task-arn": credentialSpecTaskARN, + "com.amazonaws.ecs.container-name": credentialSpecTaskContainerName, + "com.amazonaws.ecs.task-definition-family": credentialSpecTaskFamily, + "com.amazonaws.ecs.task-definition-version": credentialSpecTaskVersion, + "com.amazonaws.ecs.cluster": "", + } + + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + + credentialSpecReq := []string{credentialspecFile} + + credentialSpecRes, cerr := credentialspec.NewCredentialSpecResource( + testTask.Arn, + defaultConfig.AWSRegion, + credentialSpecReq, + credentialsID, + credentialsManager, + ssmClientCreator, + s3ClientCreator) + assert.NoError(t, cerr) + + credSpecdata := map[string]string{ + credentialspecFile: targetCredentialspecFile, + } + credentialSpecRes.CredSpecMap = credSpecdata + + mockTime.EXPECT().Now().AnyTimes() + client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes() + + ret := taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0]) + assert.Error(t, ret.Error) +} diff --git a/agent/engine/dockerstate/docker_task_engine_state.go b/agent/engine/dockerstate/docker_task_engine_state.go new file mode 100644 index 00000000000..32817de8d89 --- /dev/null +++ b/agent/engine/dockerstate/docker_task_engine_state.go @@ -0,0 +1,583 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerstate + +import ( + "encoding/json" + "strings" + "sync" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + "github.com/cihub/seelog" +) + +// TaskEngineState keeps track of all mappings between tasks we know about +// and containers docker runs +type TaskEngineState interface { + // AllTasks returns all of the tasks + AllTasks() []*apitask.Task + // AllENIAttachments returns all of the eni attachments + AllENIAttachments() []*apieni.ENIAttachment + // AllImageStates returns all of the image.ImageStates + AllImageStates() []*image.ImageState + // GetAllContainerIDs returns all of the Container Ids + GetAllContainerIDs() []string + // ContainerByID returns an apicontainer.DockerContainer for a given container ID + ContainerByID(id string) (*apicontainer.DockerContainer, bool) + // ContainerMapByArn returns a map of containers belonging to a particular task ARN + ContainerMapByArn(arn string) (map[string]*apicontainer.DockerContainer, bool) + // PulledContainerMapByArn returns a map of pulled containers belonging to a particular task ARN + PulledContainerMapByArn(arn string) (map[string]*apicontainer.DockerContainer, bool) + // TaskByShortID retrieves the task of a given docker short container id + TaskByShortID(cid string) ([]*apitask.Task, bool) + // TaskByID returns an apitask.Task for a given container ID + TaskByID(cid string) (*apitask.Task, bool) + // TaskByArn returns a task for a given ARN + TaskByArn(arn string) (*apitask.Task, bool) + // AddTask adds a task to the state to be stored + AddTask(task *apitask.Task) + // AddPulledContainer adds a pulled container to the state to be stored for a given task + AddPulledContainer(container *apicontainer.DockerContainer, task *apitask.Task) + // AddContainer adds a container to the state to be stored for a given task + AddContainer(container *apicontainer.DockerContainer, task *apitask.Task) + // AddImageState adds an image.ImageState to be stored + AddImageState(imageState *image.ImageState) + // AddENIAttachment adds an eni attachment from acs to be stored + AddENIAttachment(eni *apieni.ENIAttachment) + // RemoveENIAttachment removes an eni attachment to stop tracking + RemoveENIAttachment(mac string) + // ENIByMac returns the specific ENIAttachment of the given mac address + ENIByMac(mac string) (*apieni.ENIAttachment, bool) + // RemoveTask removes a task from the state + RemoveTask(task *apitask.Task) + // Reset resets all the fileds in the state + Reset() + // RemoveImageState removes an image.ImageState + RemoveImageState(imageState *image.ImageState) + // AddTaskIPAddress adds ip adddress for a task arn into the state + AddTaskIPAddress(addr string, taskARN string) + // GetTaskByIPAddress gets the task arn for an IP address + GetTaskByIPAddress(addr string) (string, bool) + // GetIPAddressByTaskARN gets the local ip address of a task. + GetIPAddressByTaskARN(taskARN string) (string, bool) + // DockerIDByV3EndpointID returns a docker ID for a given v3 endpoint ID + DockerIDByV3EndpointID(v3EndpointID string) (string, bool) + // TaskARNByV3EndpointID returns a taskARN for a given v3 endpoint ID + TaskARNByV3EndpointID(v3EndpointID string) (string, bool) + + json.Marshaler + json.Unmarshaler +} + +// DockerTaskEngineState keeps track of all mappings between tasks we know about +// and containers docker runs +// It contains a mutex that can be used to ensure out-of-date state cannot be +// accessed before an update comes and to ensure multiple goroutines can safely +// work with it. +// +// The methods on it will acquire the read lock, but not all acquire the write +// lock (sometimes it is up to the caller). This is because the write lock for +// containers should encapsulate the creation of the resource as well as adding, +// and creating the resource (docker container) is outside the scope of this +// package. This isn't ideal usage and I'm open to this being reworked/improved. +// +// Some information is duplicated in the interest of having efficient lookups +type DockerTaskEngineState struct { + lock sync.RWMutex + + tasks map[string]*apitask.Task // taskarn -> apitask.Task + idToTask map[string]string // DockerId -> taskarn + taskToID map[string]map[string]*apicontainer.DockerContainer // taskarn -> (containername -> c.DockerContainer) + taskToPulledContainer map[string]map[string]*apicontainer.DockerContainer // taskarn -> (containername -> c.DockerContainer) + idToContainer map[string]*apicontainer.DockerContainer // DockerId -> c.DockerContainer + eniAttachments map[string]*apieni.ENIAttachment // ENIMac -> apieni.ENIAttachment + imageStates map[string]*image.ImageState + ipToTask map[string]string // ip address -> task arn + v3EndpointIDToTask map[string]string // container's v3 endpoint id -> taskarn + v3EndpointIDToDockerID map[string]string // container's v3 endpoint id -> DockerId +} + +// NewTaskEngineState returns a new TaskEngineState +func NewTaskEngineState() TaskEngineState { + return newDockerTaskEngineState() +} + +func newDockerTaskEngineState() *DockerTaskEngineState { + state := &DockerTaskEngineState{} + state.initializeDockerTaskEngineState() + return state +} + +func (state *DockerTaskEngineState) initializeDockerTaskEngineState() { + state.lock.Lock() + defer state.lock.Unlock() + + state.tasks = make(map[string]*apitask.Task) + state.idToTask = make(map[string]string) + state.taskToID = make(map[string]map[string]*apicontainer.DockerContainer) + state.taskToPulledContainer = make(map[string]map[string]*apicontainer.DockerContainer) + state.idToContainer = make(map[string]*apicontainer.DockerContainer) + state.imageStates = make(map[string]*image.ImageState) + state.eniAttachments = make(map[string]*apieni.ENIAttachment) + state.ipToTask = make(map[string]string) + state.v3EndpointIDToTask = make(map[string]string) + state.v3EndpointIDToDockerID = make(map[string]string) +} + +// Reset resets all the states +func (state *DockerTaskEngineState) Reset() { + state.initializeDockerTaskEngineState() +} + +// AllTasks returns all of the tasks +func (state *DockerTaskEngineState) AllTasks() []*apitask.Task { + state.lock.RLock() + defer state.lock.RUnlock() + + return state.allTasksUnsafe() +} + +func (state *DockerTaskEngineState) allTasksUnsafe() []*apitask.Task { + ret := make([]*apitask.Task, len(state.tasks)) + ndx := 0 + for _, task := range state.tasks { + ret[ndx] = task + ndx++ + } + return ret +} + +// AllImageStates returns all of the image.ImageStates +func (state *DockerTaskEngineState) AllImageStates() []*image.ImageState { + state.lock.RLock() + defer state.lock.RUnlock() + + return state.allImageStatesUnsafe() +} + +func (state *DockerTaskEngineState) allImageStatesUnsafe() []*image.ImageState { + var allImageStates []*image.ImageState + for _, imageState := range state.imageStates { + allImageStates = append(allImageStates, imageState) + } + return allImageStates +} + +// AllENIAttachments returns all the enis managed by ecs on the instance +func (state *DockerTaskEngineState) AllENIAttachments() []*apieni.ENIAttachment { + state.lock.RLock() + defer state.lock.RUnlock() + + return state.allENIAttachmentsUnsafe() +} + +func (state *DockerTaskEngineState) allENIAttachmentsUnsafe() []*apieni.ENIAttachment { + var allENIAttachments []*apieni.ENIAttachment + for _, v := range state.eniAttachments { + allENIAttachments = append(allENIAttachments, v) + } + + return allENIAttachments +} + +// ENIByMac returns the eni object that match the give mac address +func (state *DockerTaskEngineState) ENIByMac(mac string) (*apieni.ENIAttachment, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + eni, ok := state.eniAttachments[mac] + return eni, ok +} + +// AddENIAttachment adds the eni into the state +func (state *DockerTaskEngineState) AddENIAttachment(eniAttachment *apieni.ENIAttachment) { + if eniAttachment == nil { + seelog.Debug("Cannot add empty eni attachment information") + return + } + + state.lock.Lock() + defer state.lock.Unlock() + + if _, ok := state.eniAttachments[eniAttachment.MACAddress]; !ok { + state.eniAttachments[eniAttachment.MACAddress] = eniAttachment + } else { + seelog.Debugf("Duplicate eni attachment information: %v", eniAttachment) + } + +} + +// RemoveENIAttachment removes the eni from state and stop managing +func (state *DockerTaskEngineState) RemoveENIAttachment(mac string) { + if mac == "" { + seelog.Debug("Cannot remove empty eni attachment information") + return + } + state.lock.Lock() + defer state.lock.Unlock() + if _, ok := state.eniAttachments[mac]; ok { + delete(state.eniAttachments, mac) + } else { + seelog.Debugf("Delete non-existed eni attachment: %v", mac) + } +} + +// GetAllContainerIDs returns all of the Container Ids +func (state *DockerTaskEngineState) GetAllContainerIDs() []string { + state.lock.RLock() + defer state.lock.RUnlock() + + var ids []string + for id := range state.idToTask { + ids = append(ids, id) + } + + return ids +} + +// ContainerByID returns an apicontainer.DockerContainer for a given container ID +func (state *DockerTaskEngineState) ContainerByID(id string) (*apicontainer.DockerContainer, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + c, ok := state.idToContainer[id] + return c, ok +} + +// ContainerMapByArn returns a map of containers belonging to a particular task ARN +func (state *DockerTaskEngineState) ContainerMapByArn(arn string) (map[string]*apicontainer.DockerContainer, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + ret, ok := state.taskToID[arn] + + if !ok { + return ret, ok + } + // Copy the map to avoid data race + mc := make(map[string]*apicontainer.DockerContainer) + for k, v := range ret { + mc[k] = v + } + return mc, ok +} + +// PulledContainerMapByArn returns a map of pulled containers belonging to a particular task ARN +func (state *DockerTaskEngineState) PulledContainerMapByArn(arn string) (map[string]*apicontainer.DockerContainer, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + ret, ok := state.taskToPulledContainer[arn] + if !ok { + return ret, ok + } + // Copy the map to avoid data race + mc := make(map[string]*apicontainer.DockerContainer) + for k, v := range ret { + mc[k] = v + } + return mc, ok +} + +// TaskByShortID retrieves the task of a given docker short container id +func (state *DockerTaskEngineState) TaskByShortID(cid string) ([]*apitask.Task, bool) { + containerIDs := state.GetAllContainerIDs() + var tasks []*apitask.Task + for _, id := range containerIDs { + if strings.HasPrefix(id, cid) { + if task, ok := state.TaskByID(id); ok { + tasks = append(tasks, task) + } + } + } + return tasks, len(tasks) > 0 +} + +// TaskByID retrieves the task of a given docker container id +func (state *DockerTaskEngineState) TaskByID(cid string) (*apitask.Task, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + arn, found := state.idToTask[cid] + if !found { + return nil, false + } + return state.taskByArn(arn) +} + +// TaskByArn returns a task for a given ARN +func (state *DockerTaskEngineState) TaskByArn(arn string) (*apitask.Task, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + return state.taskByArn(arn) +} + +func (state *DockerTaskEngineState) taskByArn(arn string) (*apitask.Task, bool) { + t, ok := state.tasks[arn] + return t, ok +} + +// AddTask adds a new task to the state +func (state *DockerTaskEngineState) AddTask(task *apitask.Task) { + state.lock.Lock() + defer state.lock.Unlock() + + state.tasks[task.Arn] = task +} + +// AddPulledContainer adds a pulled container to the state +func (state *DockerTaskEngineState) AddPulledContainer(container *apicontainer.DockerContainer, task *apitask.Task) { + state.lock.Lock() + defer state.lock.Unlock() + if task == nil || container == nil { + seelog.Critical("AddPulledContainer called with nil task/container") + return + } + + _, exists := state.tasks[task.Arn] + if !exists { + seelog.Debugf("AddPulledContainer called with unknown task; adding", "arn", task.Arn) + state.tasks[task.Arn] = task + } + + existingMap, exists := state.taskToPulledContainer[task.Arn] + if !exists { + existingMap = make(map[string]*apicontainer.DockerContainer, len(task.Containers)) + state.taskToPulledContainer[task.Arn] = existingMap + } + existingMap[container.Container.Name] = container +} + +// AddContainer adds a container to the state. +// If the container has been added with only a name and no docker-id, this +// updates the state to include the docker id +func (state *DockerTaskEngineState) AddContainer(container *apicontainer.DockerContainer, task *apitask.Task) { + state.lock.Lock() + defer state.lock.Unlock() + if task == nil || container == nil { + seelog.Critical("AddContainer called with nil task/container") + return + } + + _, exists := state.tasks[task.Arn] + if !exists { + seelog.Debugf("AddContainer called with unknown task; adding", "arn", task.Arn) + state.tasks[task.Arn] = task + } + + _, pulledExist := state.taskToPulledContainer[task.Arn] + if pulledExist { + seelog.Debugf("Delete a pulled container named %s from the pulled container map associated with the"+ + " task ARN %s since AddContainer is called", container.Container.Name, task.Arn) + delete(state.taskToPulledContainer[task.Arn], container.Container.Name) + if len(state.taskToPulledContainer[task.Arn]) == 0 { + delete(state.taskToPulledContainer, task.Arn) + } + } + + state.storeIDToContainerTaskUnsafe(container, task) + + dockerID := container.DockerID + v3EndpointID := container.Container.V3EndpointID + // stores the v3EndpointID mappings only if container's dockerID exists and container's v3EndpointID has been generated + if dockerID != "" && v3EndpointID != "" { + state.storeV3EndpointIDToTaskUnsafe(v3EndpointID, task.Arn) + state.storeV3EndpointIDToDockerIDUnsafe(v3EndpointID, dockerID) + } + + existingMap, exists := state.taskToID[task.Arn] + if !exists { + existingMap = make(map[string]*apicontainer.DockerContainer, len(task.Containers)) + state.taskToID[task.Arn] = existingMap + } + existingMap[container.Container.Name] = container +} + +// AddImageState adds an image.ImageState to be stored +func (state *DockerTaskEngineState) AddImageState(imageState *image.ImageState) { + if imageState == nil { + seelog.Debug("Cannot add empty image state") + return + } + if imageState.Image.ImageID == "" { + seelog.Debug("Cannot add image state with empty image id") + return + } + state.lock.Lock() + defer state.lock.Unlock() + + state.imageStates[imageState.Image.ImageID] = imageState +} + +// RemoveTask removes a task from this state. It removes all containers and +// other associated metadata. It does acquire the write lock. +func (state *DockerTaskEngineState) RemoveTask(task *apitask.Task) { + state.lock.Lock() + defer state.lock.Unlock() + + task, ok := state.tasks[task.Arn] + if !ok { + seelog.Warnf("Failed to locate task %s for removal from state", task.Arn) + return + } + delete(state.tasks, task.Arn) + if ip, ok := state.taskToIPUnsafe(task.Arn); ok { + delete(state.ipToTask, ip) + } + + containerMap, ok := state.taskToID[task.Arn] + if !ok { + seelog.Warnf("Failed to locate containerMap for task %s for removal from state", task.Arn) + return + } + delete(state.taskToID, task.Arn) + + for _, dockerContainer := range containerMap { + state.removeIDToContainerTaskUnsafe(dockerContainer) + // remove v3 endpoint mappings + state.removeV3EndpointIDToTaskContainerUnsafe(dockerContainer.Container.V3EndpointID) + } + delete(state.taskToPulledContainer, task.Arn) +} + +// taskToIPUnsafe gets the ip address for a given task arn +func (state *DockerTaskEngineState) taskToIPUnsafe(arn string) (string, bool) { + for ip, taskARN := range state.ipToTask { + if arn == taskARN { + return ip, true + } + } + + return "", false +} + +// storeIDToContainerTaskUnsafe stores the container in the idToContainer and idToTask maps. The key to the maps is +// either the Docker-generated ID or the agent-generated name (if the ID is not available). If the container is updated +// with an ID, a subsequent call to this function will update the map to use the ID as the key. +func (state *DockerTaskEngineState) storeIDToContainerTaskUnsafe(container *apicontainer.DockerContainer, task *apitask.Task) { + if container.DockerID != "" { + // Update the container id to the state + state.idToContainer[container.DockerID] = container + state.idToTask[container.DockerID] = task.Arn + + // Remove the previously added name mapping + delete(state.idToContainer, container.DockerName) + delete(state.idToTask, container.DockerName) + } else if container.DockerName != "" { + // Update the container name mapping to the state when the ID isn't available + state.idToContainer[container.DockerName] = container + state.idToTask[container.DockerName] = task.Arn + } +} + +// removeIDToContainerTaskUnsafe removes the container from the idToContainer and idToTask maps. They key to the maps +// is either the Docker-generated ID or the agent-generated name (if the ID is not available). This function assumes +// that the ID takes precedence and will delete by the ID when the ID is available. +func (state *DockerTaskEngineState) removeIDToContainerTaskUnsafe(container *apicontainer.DockerContainer) { + // The key to these maps is either the Docker ID or agent-generated name. We use the agent-generated name + // before a Docker ID is available. + key := container.DockerID + if key == "" { + key = container.DockerName + } + delete(state.idToTask, key) + delete(state.idToContainer, key) +} + +// removeV3EndpointIDToTaskContainerUnsafe removes the container from v3EndpointIDToTask and v3EndpointIDToDockerID maps +func (state *DockerTaskEngineState) removeV3EndpointIDToTaskContainerUnsafe(v3EndpointID string) { + if v3EndpointID != "" { + delete(state.v3EndpointIDToTask, v3EndpointID) + delete(state.v3EndpointIDToDockerID, v3EndpointID) + } +} + +// RemoveImageState removes an image.ImageState +func (state *DockerTaskEngineState) RemoveImageState(imageState *image.ImageState) { + if imageState == nil { + seelog.Debug("Cannot remove empty image state") + return + } + state.lock.Lock() + defer state.lock.Unlock() + + imageState, ok := state.imageStates[imageState.Image.ImageID] + if !ok { + seelog.Debug("Image State is not found. Cannot be removed") + return + } + delete(state.imageStates, imageState.Image.ImageID) +} + +// AddTaskIPAddress adds ip adddress for a task arn into the state +func (state *DockerTaskEngineState) AddTaskIPAddress(addr string, taskARN string) { + state.lock.Lock() + defer state.lock.Unlock() + + state.ipToTask[addr] = taskARN +} + +// GetTaskByIPAddress gets the task arn for an IP address +func (state *DockerTaskEngineState) GetTaskByIPAddress(addr string) (string, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + taskARN, ok := state.ipToTask[addr] + return taskARN, ok +} + +// GetIPAddressByTaskARN gets the local ip address of a task. +func (state *DockerTaskEngineState) GetIPAddressByTaskARN(taskARN string) (string, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + for addr, arn := range state.ipToTask { + if arn == taskARN { + return addr, true + } + } + return "", false +} + +// storeV3EndpointIDToTaskUnsafe adds v3EndpointID -> taskARN mapping to state +func (state *DockerTaskEngineState) storeV3EndpointIDToTaskUnsafe(v3EndpointID, taskARN string) { + state.v3EndpointIDToTask[v3EndpointID] = taskARN +} + +// storeV3EndpointIDToContainerNameUnsafe adds v3EndpointID -> dockerID mapping to state +func (state *DockerTaskEngineState) storeV3EndpointIDToDockerIDUnsafe(v3EndpointID, dockerID string) { + state.v3EndpointIDToDockerID[v3EndpointID] = dockerID +} + +// DockerIDByV3EndpointID returns a docker ID for a given v3 endpoint ID +func (state *DockerTaskEngineState) DockerIDByV3EndpointID(v3EndpointID string) (string, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + dockerID, ok := state.v3EndpointIDToDockerID[v3EndpointID] + return dockerID, ok +} + +// TaskARNByV3EndpointID returns a taskARN for a given v3 endpoint ID +func (state *DockerTaskEngineState) TaskARNByV3EndpointID(v3EndpointID string) (string, bool) { + state.lock.RLock() + defer state.lock.RUnlock() + + taskArn, ok := state.v3EndpointIDToTask[v3EndpointID] + return taskArn, ok +} diff --git a/agent/engine/dockerstate/dockerstate_test.go b/agent/engine/dockerstate/dockerstate_test.go new file mode 100644 index 00000000000..ecee94b3c76 --- /dev/null +++ b/agent/engine/dockerstate/dockerstate_test.go @@ -0,0 +1,532 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerstate + +import ( + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + + "github.com/stretchr/testify/assert" +) + +func TestCreateDockerTaskEngineState(t *testing.T) { + state := NewTaskEngineState() + + if _, ok := state.ContainerByID("test"); ok { + t.Error("Empty state should not have a test container") + } + + if _, ok := state.ContainerMapByArn("test"); ok { + t.Error("Empty state should not have a test container map") + } + + if _, ok := state.PulledContainerMapByArn("test"); ok { + t.Error("Empty state should not have a test pulled container map") + } + + if _, ok := state.TaskByShortID("test"); ok { + t.Error("Empty state should not have a test taskid") + } + + if _, ok := state.TaskByID("test"); ok { + t.Error("Empty state should not have a test taskid") + } + + if len(state.AllTasks()) != 0 { + t.Error("Empty state should have no tasks") + } + + if len(state.AllImageStates()) != 0 { + t.Error("Empty state should have no image states") + } + + assert.Len(t, state.(*DockerTaskEngineState).AllENIAttachments(), 0) + task, ok := state.TaskByShortID("test") + if assert.Empty(t, ok, "Empty state should have no tasks") { + assert.Empty(t, task, "Empty state should have no tasks") + } + + assert.Empty(t, state.GetAllContainerIDs(), "Empty state should have no containers") +} + +func TestAddTask(t *testing.T) { + state := NewTaskEngineState() + + testTask := &apitask.Task{Arn: "test"} + state.AddTask(testTask) + + if len(state.AllTasks()) != 1 { + t.Error("Should have 1 task") + } + + task, ok := state.TaskByArn("test") + if !ok { + t.Error("Couldn't find the test task") + } + if task.Arn != "test" { + t.Error("Wrong task retrieved") + } +} + +func TestAddRemoveENIAttachment(t *testing.T) { + state := NewTaskEngineState() + + attachment := &apieni.ENIAttachment{ + TaskARN: "taskarn", + AttachmentARN: "eni1", + MACAddress: "mac1", + } + + state.AddENIAttachment(attachment) + assert.Len(t, state.(*DockerTaskEngineState).AllENIAttachments(), 1) + eni, ok := state.ENIByMac("mac1") + assert.True(t, ok) + assert.Equal(t, eni.TaskARN, attachment.TaskARN) + + eni, ok = state.ENIByMac("non-mac") + assert.False(t, ok) + assert.Nil(t, eni) + + // Remove the attachment from state + state.RemoveENIAttachment(attachment.MACAddress) + assert.Len(t, state.AllImageStates(), 0) + eni, ok = state.ENIByMac("mac1") + assert.False(t, ok) + assert.Nil(t, eni) +} + +func TestTwophaseAddContainer(t *testing.T) { + state := NewTaskEngineState() + testTask := &apitask.Task{Arn: "test", Containers: []*apicontainer.Container{{ + Name: "testContainer", + }}} + state.AddTask(testTask) + + state.AddContainer(&apicontainer.DockerContainer{DockerName: "dockerName", Container: testTask.Containers[0]}, testTask) + + if len(state.AllTasks()) != 1 { + t.Fatal("Should have 1 task") + } + + task, ok := state.TaskByArn("test") + if !ok { + t.Error("Couldn't find the test task") + } + if task.Arn != "test" { + t.Error("Wrong task retrieved") + } + + containerMap, ok := state.ContainerMapByArn("test") + if !ok { + t.Fatal("Could not get container map") + } + + container, ok := containerMap["testContainer"] + if !ok { + t.Fatal("Could not get container") + } + if container.DockerName != "dockerName" { + t.Fatal("Incorrect docker name") + } + if container.DockerID != "" { + t.Fatal("DockerID Should be blank") + } + + state.AddContainer(&apicontainer.DockerContainer{DockerName: "dockerName", Container: testTask.Containers[0], DockerID: "did"}, testTask) + + containerMap, ok = state.ContainerMapByArn("test") + if !ok { + t.Fatal("Could not get container map") + } + + container, ok = containerMap["testContainer"] + if !ok { + t.Fatal("Could not get container") + } + if container.DockerName != "dockerName" { + t.Fatal("Incorrect docker name") + } + if container.DockerID != "did" { + t.Fatal("DockerID should have been updated") + } + + container, ok = state.ContainerByID("did") + if !ok { + t.Fatal("Could not get container by id") + } + if container.DockerName != "dockerName" || container.DockerID != "did" { + t.Fatal("Incorrect container fetched") + } +} + +func TestRemoveTask(t *testing.T) { + state := NewTaskEngineState() + testContainer1 := &apicontainer.Container{ + Name: "c1", + } + + containerID := "did" + testDockerContainer1 := &apicontainer.DockerContainer{ + DockerID: containerID, + Container: testContainer1, + } + testContainer2 := &apicontainer.Container{ + Name: "c2", + } + testDockerContainer2 := &apicontainer.DockerContainer{ + // DockerName is used before the DockerID is assigned + DockerName: "docker-name-2", + Container: testContainer2, + } + testPulledContainer := &apicontainer.Container{ + Name: "pulled", + } + testPulledDockerContainer := &apicontainer.DockerContainer{ + Container: testPulledContainer, + } + testTask := &apitask.Task{ + Arn: "t1", + Containers: []*apicontainer.Container{testContainer1, testContainer2}, + } + + state.AddTask(testTask) + state.AddContainer(testDockerContainer1, testTask) + state.AddContainer(testDockerContainer2, testTask) + state.AddPulledContainer(testPulledDockerContainer, testTask) + addr := "169.254.170.3" + state.AddTaskIPAddress(addr, testTask.Arn) + engineState := state.(*DockerTaskEngineState) + + assert.Len(t, state.AllTasks(), 1, "Expected one task") + assert.Len(t, engineState.idToTask, 2, "idToTask map should have two entries") + assert.Len(t, engineState.idToContainer, 2, "idToContainer map should have two entries") + assert.Len(t, engineState.taskToPulledContainer, 1, "taskToPulledContainer map should have one entry") + taskARNFromIP, ok := state.GetTaskByIPAddress(addr) + assert.True(t, ok) + assert.Equal(t, testTask.Arn, taskARNFromIP) + + state.RemoveTask(testTask) + + assert.Len(t, state.AllTasks(), 0, "Expected task to be removed") + assert.Len(t, engineState.idToTask, 0, "idToTask map should be empty") + assert.Len(t, engineState.idToContainer, 0, "idToContainer map should be empty") + assert.Len(t, engineState.taskToPulledContainer, 0, "taskToPulledContainer map should be empty") + _, ok = state.GetTaskByIPAddress(addr) + assert.False(t, ok) +} + +func TestAddImageState(t *testing.T) { + state := NewTaskEngineState() + + testImage := &image.Image{ImageID: "sha256:imagedigest"} + testImageState := &image.ImageState{Image: testImage} + state.AddImageState(testImageState) + + if len(state.AllImageStates()) != 1 { + t.Error("Error adding image state") + } + + for _, imageState := range state.AllImageStates() { + if imageState.Image.ImageID != testImage.ImageID { + t.Error("Error in retrieving image state added") + } + } +} + +func TestAddEmptyImageState(t *testing.T) { + state := NewTaskEngineState() + state.AddImageState(nil) + + if len(state.AllImageStates()) != 0 { + t.Error("Error adding empty image state") + } +} + +func TestAddEmptyIdImageState(t *testing.T) { + state := NewTaskEngineState() + + testImage := &image.Image{ImageID: ""} + testImageState := &image.ImageState{Image: testImage} + state.AddImageState(testImageState) + + if len(state.AllImageStates()) != 0 { + t.Error("Error adding image state with empty Image Id") + } +} + +func TestRemoveImageState(t *testing.T) { + state := NewTaskEngineState() + + testImage := &image.Image{ImageID: "sha256:imagedigest"} + testImageState := &image.ImageState{Image: testImage} + state.AddImageState(testImageState) + + if len(state.AllImageStates()) != 1 { + t.Error("Error adding image state") + } + state.RemoveImageState(testImageState) + if len(state.AllImageStates()) != 0 { + t.Error("Error removing image state") + } +} + +func TestRemoveEmptyImageState(t *testing.T) { + state := NewTaskEngineState() + + testImage := &image.Image{ImageID: "sha256:imagedigest"} + testImageState := &image.ImageState{Image: testImage} + state.AddImageState(testImageState) + + if len(state.AllImageStates()) != 1 { + t.Error("Error adding image state") + } + state.RemoveImageState(nil) + if len(state.AllImageStates()) == 0 { + t.Error("Error removing empty image state") + } +} + +func TestRemoveNonExistingImageState(t *testing.T) { + state := NewTaskEngineState() + + testImage := &image.Image{ImageID: "sha256:imagedigest"} + testImageState := &image.ImageState{Image: testImage} + state.AddImageState(testImageState) + + if len(state.AllImageStates()) != 1 { + t.Error("Error adding image state") + } + testImage1 := &image.Image{ImageID: "sha256:imagedigest1"} + testImageState1 := &image.ImageState{Image: testImage1} + state.RemoveImageState(testImageState1) + if len(state.AllImageStates()) == 0 { + t.Error("Error removing incorrect image state") + } +} + +// TestAddContainer tests first add container with docker name and +// then add the container with dockerID +func TestAddContainerNameAndID(t *testing.T) { + state := NewTaskEngineState() + + task := &apitask.Task{ + Arn: "taskArn", + } + container := &apicontainer.DockerContainer{ + DockerName: "ecs-test-container-1", + Container: &apicontainer.Container{ + Name: "test", + }, + } + state.AddTask(task) + state.AddContainer(container, task) + containerMap, ok := state.ContainerMapByArn(task.Arn) + assert.True(t, ok) + assert.Len(t, containerMap, 1) + + assert.Len(t, state.GetAllContainerIDs(), 1) + + _, ok = state.ContainerByID(container.DockerName) + assert.True(t, ok, "container with DockerName should be added to the state") + + container = &apicontainer.DockerContainer{ + DockerName: "ecs-test-container-1", + DockerID: "dockerid", + Container: &apicontainer.Container{ + Name: "test", + }, + } + state.AddContainer(container, task) + assert.Len(t, containerMap, 1) + assert.Len(t, state.GetAllContainerIDs(), 1) + _, ok = state.ContainerByID(container.DockerID) + assert.True(t, ok, "container with DockerName should be added to the state") + _, ok = state.ContainerByID(container.DockerName) + assert.False(t, ok, "container with DockerName should be added to the state") +} + +// TestAddPulledContainer tests add a pulled container. +// A pulled container should exist in the pulled container map, +// but should not exist in the container map +func TestAddPulledContainer(t *testing.T) { + state := NewTaskEngineState() + + task := &apitask.Task{ + Arn: "taskArn", + } + pulledContainer := &apicontainer.DockerContainer{ + Container: &apicontainer.Container{ + Name: "test", + }, + } + state.AddTask(task) + state.AddPulledContainer(pulledContainer, task) + pulledContainerMap, ok := state.PulledContainerMapByArn(task.Arn) + assert.True(t, ok) + assert.Len(t, pulledContainerMap, 1) + _, exist := state.ContainerMapByArn(task.Arn) + assert.False(t, exist) + assert.Len(t, state.GetAllContainerIDs(), 0) +} + +// TestPulledContainerToAddContainer tests a pulled container +// is removed from the pulled container map when it transits +// from PULLED state to CREATED state +func TestPulledContainerToAddContainer(t *testing.T) { + state := NewTaskEngineState() + + task := &apitask.Task{ + Arn: "taskArn", + } + pulledContainer := &apicontainer.DockerContainer{ + Container: &apicontainer.Container{ + Name: "test", + }, + } + state.AddTask(task) + state.AddPulledContainer(pulledContainer, task) + pulledContainerMap, ok := state.PulledContainerMapByArn(task.Arn) + assert.True(t, ok) + assert.Len(t, pulledContainerMap, 1) + assert.Len(t, state.GetAllContainerIDs(), 0) + pulledContainer.DockerID = "dockerid" + state.AddContainer(pulledContainer, task) + containerMap, exist := state.ContainerMapByArn(task.Arn) + assert.True(t, exist) + assert.Len(t, containerMap, 1) + _, ok = state.ContainerByID(pulledContainer.DockerID) + assert.True(t, ok, "container with DockerID should be added to the state") + postPulledContainerMap, _ := state.PulledContainerMapByArn(task.Arn) + assert.Len(t, postPulledContainerMap, 0) +} + +func TestTaskIPAddress(t *testing.T) { + state := newDockerTaskEngineState() + addr := "169.254.170.3" + taskARN := "t1" + state.AddTaskIPAddress(addr, taskARN) + taskARNFromIP, ok := state.GetTaskByIPAddress(addr) + assert.True(t, ok) + assert.Equal(t, taskARN, taskARNFromIP) + ipFromTaskARN, ok := state.GetIPAddressByTaskARN(taskARN) + assert.True(t, ok) + assert.Equal(t, addr, ipFromTaskARN) + taskIP, ok := state.taskToIPUnsafe(taskARN) + assert.True(t, ok) + assert.Equal(t, addr, taskIP) +} + +// TestAddContainerAddV3EndpointID tests that when we add a container, containers' v3EndpointID mappings +// will be added to state +func TestAddContainerAddV3EndpointID(t *testing.T) { + state := newDockerTaskEngineState() + assert.NotNil(t, state.v3EndpointIDToTask) + assert.NotNil(t, state.v3EndpointIDToDockerID) + + container1 := &apicontainer.Container{ + Name: "containerName1", + V3EndpointID: "new-uuid-1", + } + dockerContainer1 := &apicontainer.DockerContainer{ + DockerID: "dockerID1", + Container: container1, + } + container2 := &apicontainer.Container{ + Name: "containerName2", + V3EndpointID: "new-uuid-2", + } + dockerContainer2 := &apicontainer.DockerContainer{ + DockerID: "dockerID2", + Container: container2, + } + + task := &apitask.Task{ + Arn: "taskArn", + Containers: []*apicontainer.Container{ + container1, + container2, + }, + } + + state.AddTask(task) + state.AddContainer(dockerContainer1, task) + state.AddContainer(dockerContainer2, task) + + // check that v3EndpointID mappings have been added to state + addedTaskArn1, _ := state.v3EndpointIDToTask["new-uuid-1"] + assert.Equal(t, addedTaskArn1, "taskArn") + + addedDockerID1, _ := state.v3EndpointIDToDockerID["new-uuid-1"] + assert.Equal(t, addedDockerID1, "dockerID1") + + addedTaskArn2, _ := state.v3EndpointIDToTask["new-uuid-2"] + assert.Equal(t, addedTaskArn2, "taskArn") + + addedDockerID2, _ := state.v3EndpointIDToDockerID["new-uuid-2"] + assert.Equal(t, addedDockerID2, "dockerID2") +} + +// TestRemoveTaskRemoveV3EndpointID tests that when we remove the task, containers' v3EndpointIDs will be +// removed from state +func TestRemoveTaskRemoveV3EndpointID(t *testing.T) { + state := newDockerTaskEngineState() + assert.NotNil(t, state.v3EndpointIDToTask) + assert.NotNil(t, state.v3EndpointIDToDockerID) + + container1 := &apicontainer.Container{ + Name: "containerName1", + V3EndpointID: "new-uuid-1", + } + dockerContainer1 := &apicontainer.DockerContainer{ + DockerID: "dockerID1", + Container: container1, + } + container2 := &apicontainer.Container{ + Name: "containerName2", + V3EndpointID: "new-uuid-2", + } + dockerContainer2 := &apicontainer.DockerContainer{ + DockerName: "dockerID2", + Container: container2, + } + + task := &apitask.Task{ + Arn: "taskArn", + Containers: []*apicontainer.Container{ + container1, + container2, + }, + } + + state.AddTask(task) + state.AddContainer(dockerContainer1, task) + state.AddContainer(dockerContainer2, task) + state.RemoveTask(task) + + // check that v3EndpointIDToTask and v3EndpointIDToDockerID maps have been cleaned up + _, ok := state.v3EndpointIDToTask["new-uuid-1"] + assert.False(t, ok) + _, ok = state.v3EndpointIDToDockerID["new-uuid-1"] + assert.False(t, ok) + _, ok = state.v3EndpointIDToTask["new-uuid-2"] + assert.False(t, ok) + _, ok = state.v3EndpointIDToDockerID["new-uuid-2"] + assert.False(t, ok) +} diff --git a/agent/engine/dockerstate/generate_mocks.go b/agent/engine/dockerstate/generate_mocks.go new file mode 100644 index 00000000000..26e9a41d765 --- /dev/null +++ b/agent/engine/dockerstate/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerstate + +//go:generate mockgen -destination=mocks/dockerstate_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/engine/dockerstate TaskEngineState diff --git a/agent/engine/dockerstate/json.go b/agent/engine/dockerstate/json.go new file mode 100644 index 00000000000..1c2e12b692c --- /dev/null +++ b/agent/engine/dockerstate/json.go @@ -0,0 +1,102 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerstate + +import ( + "encoding/json" + "errors" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/engine/image" +) + +// These bits of information should be enough to reconstruct the entire +// DockerTaskEngine state +type savedState struct { + Tasks []*apitask.Task + IdToContainer map[string]*apicontainer.DockerContainer `json:"IdToContainer"` // DockerId -> apicontainer.DockerContainer + IdToTask map[string]string `json:"IdToTask"` // DockerId -> taskarn + ImageStates []*image.ImageState + ENIAttachments []*apieni.ENIAttachment `json:"ENIAttachments"` + IPToTask map[string]string `json:"IPToTask"` +} + +func (state *DockerTaskEngineState) MarshalJSON() ([]byte, error) { + state.lock.RLock() + defer state.lock.RUnlock() + toSave := savedState{ + Tasks: state.allTasksUnsafe(), + IdToContainer: state.idToContainer, + IdToTask: state.idToTask, + ImageStates: state.allImageStatesUnsafe(), + ENIAttachments: state.allENIAttachmentsUnsafe(), + IPToTask: state.ipToTask, + } + return json.Marshal(toSave) +} + +func (state *DockerTaskEngineState) UnmarshalJSON(data []byte) error { + var saved savedState + + err := json.Unmarshal(data, &saved) + if err != nil { + return err + } + + // run precheck to shake out all the errors before resetting state. + precheckState := newDockerTaskEngineState() + for _, task := range saved.Tasks { + precheckState.AddTask(task) + } + for id, container := range saved.IdToContainer { + taskArn, ok := saved.IdToTask[id] + if !ok { + return errors.New("Could not unmarshal state; incomplete save. There was no task for docker id " + id) + } + task, ok := precheckState.TaskByArn(taskArn) + if !ok { + return errors.New("Could not unmarshal state; incomplete save. There was no task for arn " + taskArn) + } + _, ok = task.ContainerByName(container.Container.Name) + if !ok { + return errors.New("Could not resolve a container into a task based on name: " + task.String() + " -- " + container.String()) + } + } + + // reset state and safely populate with saved + state.Reset() + for _, task := range saved.Tasks { + state.AddTask(task) + } + for _, imageState := range saved.ImageStates { + state.AddImageState(imageState) + } + for id, container := range saved.IdToContainer { + taskArn := saved.IdToTask[id] + task, _ := state.TaskByArn(taskArn) + taskContainer, _ := task.ContainerByName(container.Container.Name) + container.Container = taskContainer + state.AddContainer(container, task) + } + for _, eniAttachment := range saved.ENIAttachments { + state.AddENIAttachment(eniAttachment) + } + for ipAddr, taskARN := range saved.IPToTask { + state.AddTaskIPAddress(ipAddr, taskARN) + } + + return nil +} diff --git a/agent/engine/dockerstate/json_test.go b/agent/engine/dockerstate/json_test.go new file mode 100644 index 00000000000..e3ba6323f7b --- /dev/null +++ b/agent/engine/dockerstate/json_test.go @@ -0,0 +1,323 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dockerstate + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + stateFileContents = ` +{ + "Tasks": [ + { + "Arn": "task1", + "Family": "mytask", + "Version": "1", + "Containers": [ + { + "Name": "foo", + "V3EndpointID": "fooV3EndpointID", + "Image": "myimage:latest", + "ImageID": "sha256:invalid", + "Command": null, + "Cpu": 0, + "Memory": 256, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": null, + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"NetworkMode\":\"awsvpc\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.18" + }, + "registryAuthentication": { + "type": "ecr", + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "2013" + } + }, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "ContainerDependencies": [ + { + "ContainerName": "~internal~ecs~pause", + "SatisfiedStatus": "RESOURCES_PROVISIONED", + "DependentStatus": "PULLED" + } + ] + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "AppliedStatus": "NONE", + "ApplyingError": null, + "SentStatus": "NONE", + "KnownPortBindingsUnsafe": [] + }, + { + "Name": "~internal~ecs~pause", + "V3EndpointID": "pauseV3EndpointID", + "Image": "amazon/amazon-ecs-pause:0.1.0", + "ImageID": "", + "Command": null, + "Cpu": 0, + "Memory": 0, + "Links": null, + "volumesFrom": null, + "mountPoints": null, + "portMappings": null, + "Essential": true, + "EntryPoint": null, + "environment": null, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": null, + "hostConfig": null, + "version": null + }, + "registryAuthentication": null, + "desiredStatus": "RESOURCES_PROVISIONED", + "KnownStatus": "RESOURCES_PROVISIONED", + "TransitionDependencySet": { + "ContainerDependencies": null + }, + "RunDependencies": null, + "IsInternal": "CNI_PAUSE", + "AppliedStatus": "NONE", + "ApplyingError": null, + "SentStatus": "NONE", + "KnownPortBindingsUnsafe": [], + "SteadyStateStatus": "RESOURCES_PROVISIONED" + } + ], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2017-11-01T20:24:21.449897483Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 9, + "StopSequenceNumber": 0, + "ENI": { + "ec2Id": "eni-abcd", + "IPV4Addresses": [ + { + "Primary": true, + "Address": "10.0.0.142" + } + ], + "IPV6Addresses": null, + "MacAddress": "0a:1b:2c:3d:4e:5f" + }, + "associations": [ + { + "containers": ["foo"], + "content": { + "encoding": "base64", + "value": "val" + }, + "name": "dev1", + "type": "elastic-inference" + } + ] + } + ], + "IdToContainer": { + "042fd42cf3016526ccff43ef6ccb2c6a4b6f112bb74d5175cbebca75059b0c38": { + "DockerId": "042fd42cf3016526ccff43ef6ccb2c6a4b6f112bb74d5175cbebca75059b0c38", + "DockerName": "ecs-mytask-1-internalecspause-a4d2fdeaf1eed2cf8001", + "Container": { + "Name": "~internal~ecs~pause", + "Image": "amazon/amazon-ecs-pause:0.1.0", + "ImageID": "", + "Command": null, + "Cpu": 0, + "Memory": 0, + "Links": null, + "volumesFrom": null, + "mountPoints": null, + "portMappings": null, + "Essential": true, + "EntryPoint": null, + "environment": null, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": null, + "hostConfig": null, + "version": null + }, + "registryAuthentication": null, + "desiredStatus": "RESOURCES_PROVISIONED", + "KnownStatus": "RESOURCES_PROVISIONED", + "TransitionDependencySet": { + "ContainerDependencies": null + }, + "RunDependencies": null, + "IsInternal": "CNI_PAUSE", + "AppliedStatus": "NONE", + "ApplyingError": null, + "SentStatus": "NONE", + "KnownPortBindingsUnsafe": [], + "SteadyStateStatus": "RESOURCES_PROVISIONED" + } + }, + "40109a71187ddd35effcd4e20067f97c140cd8ca7bef6a62028743c7f5b88a53": { + "DockerId": "40109a71187ddd35effcd4e20067f97c140cd8ca7bef6a62028743c7f5b88a53", + "DockerName": "ecs-mytask-1-foo-96ec97fe92a5e9dc4f00", + "Container": { + "Name": "foo", + "Image": "746413922013.dkr.ecr.us-west-2.amazonaws.com/fortune-server:latest", + "ImageID": "sha256:invalid", + "Command": null, + "Cpu": 0, + "Memory": 256, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": null, + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"NetworkMode\":\"awsvpc\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.18" + }, + "registryAuthentication": { + "type": "ecr", + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "746413922013" + } + }, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "ContainerDependencies": [ + { + "ContainerName": "~internal~ecs~pause", + "SatisfiedStatus": "RESOURCES_PROVISIONED", + "DependentStatus": "PULLED" + } + ] + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "AppliedStatus": "NONE", + "ApplyingError": null, + "SentStatus": "NONE", + "KnownPortBindingsUnsafe": [] + } + } + }, + "IdToTask": { + "042fd42cf3016526ccff43ef6ccb2c6a4b6f112bb74d5175cbebca75059b0c38": "task1", + "40109a71187ddd35effcd4e20067f97c140cd8ca7bef6a62028743c7f5b88a53": "task1" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:invalid", + "Names": [ + "myimage:latest" + ], + "Size": 150947595 + }, + "PulledAt": "2017-11-01T15:26:56.610709526Z", + "LastUsedAt": "2017-11-01T15:26:56.610709991Z", + "PullSucceeded": true + } + ], + "ENIAttachments": [ + { + "taskArn": "task1", + "attachmentArn": "attachment1", + "attachSent": true, + "macAddress": "0a:1b:2c:3d:4e:5f", + "status": 1, + "expiresAt": "2017-11-01T15:29:39.239357758Z" + } + ], + "IPToTask": { + "169.254.172.2": "task1" + } +} +` +) + +func TestUnmarshalMarshal(t *testing.T) { + // Create a new state object + state := newDockerTaskEngineState() + // Validate all the upper level fields unmarshaled + validateUnmarshaledState(t, state, []byte(stateFileContents)) + // Marshal the state again + stateContents, err := state.MarshalJSON() + assert.NoError(t, err) + t.Logf("Validating marshaled state by unmarshaling it again") + validateUnmarshaledState(t, state, stateContents) +} + +func validateUnmarshaledState(t *testing.T, state *DockerTaskEngineState, contents []byte) { + state.initializeDockerTaskEngineState() + // Load state from string + err := state.UnmarshalJSON(contents) + assert.NoError(t, err) + tasks := state.AllTasks() + assert.Len(t, tasks, 1) + assert.Equal(t, "task1", tasks[0].Arn) + _, ok := state.idToContainer["042fd42cf3016526ccff43ef6ccb2c6a4b6f112bb74d5175cbebca75059b0c38"] + assert.True(t, ok) + _, ok = state.idToContainer["40109a71187ddd35effcd4e20067f97c140cd8ca7bef6a62028743c7f5b88a53"] + assert.True(t, ok) + images := state.AllImageStates() + assert.Len(t, images, 1) + assert.Equal(t, "sha256:invalid", images[0].Image.ImageID) + attachments := state.AllENIAttachments() + assert.Len(t, attachments, 1) + assert.Equal(t, "attachment1", attachments[0].AttachmentARN) + _, ok = state.ipToTask["169.254.172.2"] + assert.True(t, ok, fmt.Sprintf("%s", state.ipToTask)) + + // verify v3EndpointID mappings have been correctly added + fooTaskArn, ok := state.v3EndpointIDToTask["fooV3EndpointID"] + assert.Equal(t, fooTaskArn, "task1") + pauseTaskArn, ok := state.v3EndpointIDToTask["pauseV3EndpointID"] + assert.Equal(t, pauseTaskArn, "task1") + fooContainerID, ok := state.v3EndpointIDToDockerID["fooV3EndpointID"] + assert.Equal(t, fooContainerID, "40109a71187ddd35effcd4e20067f97c140cd8ca7bef6a62028743c7f5b88a53") + pauseContainerID, ok := state.v3EndpointIDToDockerID["pauseV3EndpointID"] + assert.Equal(t, pauseContainerID, "042fd42cf3016526ccff43ef6ccb2c6a4b6f112bb74d5175cbebca75059b0c38") +} diff --git a/agent/engine/dockerstate/mocks/dockerstate_mocks.go b/agent/engine/dockerstate/mocks/dockerstate_mocks.go new file mode 100644 index 00000000000..c690c2d7c6b --- /dev/null +++ b/agent/engine/dockerstate/mocks/dockerstate_mocks.go @@ -0,0 +1,422 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/engine/dockerstate (interfaces: TaskEngineState) + +// Package mock_dockerstate is a generated GoMock package. +package mock_dockerstate + +import ( + reflect "reflect" + + container "github.com/aws/amazon-ecs-agent/agent/api/container" + eni "github.com/aws/amazon-ecs-agent/agent/api/eni" + task "github.com/aws/amazon-ecs-agent/agent/api/task" + image "github.com/aws/amazon-ecs-agent/agent/engine/image" + gomock "github.com/golang/mock/gomock" +) + +// MockTaskEngineState is a mock of TaskEngineState interface +type MockTaskEngineState struct { + ctrl *gomock.Controller + recorder *MockTaskEngineStateMockRecorder +} + +// MockTaskEngineStateMockRecorder is the mock recorder for MockTaskEngineState +type MockTaskEngineStateMockRecorder struct { + mock *MockTaskEngineState +} + +// NewMockTaskEngineState creates a new mock instance +func NewMockTaskEngineState(ctrl *gomock.Controller) *MockTaskEngineState { + mock := &MockTaskEngineState{ctrl: ctrl} + mock.recorder = &MockTaskEngineStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTaskEngineState) EXPECT() *MockTaskEngineStateMockRecorder { + return m.recorder +} + +// AddContainer mocks base method +func (m *MockTaskEngineState) AddContainer(arg0 *container.DockerContainer, arg1 *task.Task) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddContainer", arg0, arg1) +} + +// AddContainer indicates an expected call of AddContainer +func (mr *MockTaskEngineStateMockRecorder) AddContainer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddContainer", reflect.TypeOf((*MockTaskEngineState)(nil).AddContainer), arg0, arg1) +} + +// AddENIAttachment mocks base method +func (m *MockTaskEngineState) AddENIAttachment(arg0 *eni.ENIAttachment) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddENIAttachment", arg0) +} + +// AddENIAttachment indicates an expected call of AddENIAttachment +func (mr *MockTaskEngineStateMockRecorder) AddENIAttachment(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddENIAttachment", reflect.TypeOf((*MockTaskEngineState)(nil).AddENIAttachment), arg0) +} + +// AddImageState mocks base method +func (m *MockTaskEngineState) AddImageState(arg0 *image.ImageState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddImageState", arg0) +} + +// AddImageState indicates an expected call of AddImageState +func (mr *MockTaskEngineStateMockRecorder) AddImageState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddImageState", reflect.TypeOf((*MockTaskEngineState)(nil).AddImageState), arg0) +} + +// AddPulledContainer mocks base method +func (m *MockTaskEngineState) AddPulledContainer(arg0 *container.DockerContainer, arg1 *task.Task) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddPulledContainer", arg0, arg1) +} + +// AddPulledContainer indicates an expected call of AddPulledContainer +func (mr *MockTaskEngineStateMockRecorder) AddPulledContainer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPulledContainer", reflect.TypeOf((*MockTaskEngineState)(nil).AddPulledContainer), arg0, arg1) +} + +// AddTask mocks base method +func (m *MockTaskEngineState) AddTask(arg0 *task.Task) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTask", arg0) +} + +// AddTask indicates an expected call of AddTask +func (mr *MockTaskEngineStateMockRecorder) AddTask(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTask", reflect.TypeOf((*MockTaskEngineState)(nil).AddTask), arg0) +} + +// AddTaskIPAddress mocks base method +func (m *MockTaskEngineState) AddTaskIPAddress(arg0, arg1 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTaskIPAddress", arg0, arg1) +} + +// AddTaskIPAddress indicates an expected call of AddTaskIPAddress +func (mr *MockTaskEngineStateMockRecorder) AddTaskIPAddress(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTaskIPAddress", reflect.TypeOf((*MockTaskEngineState)(nil).AddTaskIPAddress), arg0, arg1) +} + +// AllENIAttachments mocks base method +func (m *MockTaskEngineState) AllENIAttachments() []*eni.ENIAttachment { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllENIAttachments") + ret0, _ := ret[0].([]*eni.ENIAttachment) + return ret0 +} + +// AllENIAttachments indicates an expected call of AllENIAttachments +func (mr *MockTaskEngineStateMockRecorder) AllENIAttachments() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllENIAttachments", reflect.TypeOf((*MockTaskEngineState)(nil).AllENIAttachments)) +} + +// AllImageStates mocks base method +func (m *MockTaskEngineState) AllImageStates() []*image.ImageState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllImageStates") + ret0, _ := ret[0].([]*image.ImageState) + return ret0 +} + +// AllImageStates indicates an expected call of AllImageStates +func (mr *MockTaskEngineStateMockRecorder) AllImageStates() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllImageStates", reflect.TypeOf((*MockTaskEngineState)(nil).AllImageStates)) +} + +// AllTasks mocks base method +func (m *MockTaskEngineState) AllTasks() []*task.Task { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllTasks") + ret0, _ := ret[0].([]*task.Task) + return ret0 +} + +// AllTasks indicates an expected call of AllTasks +func (mr *MockTaskEngineStateMockRecorder) AllTasks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllTasks", reflect.TypeOf((*MockTaskEngineState)(nil).AllTasks)) +} + +// ContainerByID mocks base method +func (m *MockTaskEngineState) ContainerByID(arg0 string) (*container.DockerContainer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerByID", arg0) + ret0, _ := ret[0].(*container.DockerContainer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// ContainerByID indicates an expected call of ContainerByID +func (mr *MockTaskEngineStateMockRecorder) ContainerByID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerByID", reflect.TypeOf((*MockTaskEngineState)(nil).ContainerByID), arg0) +} + +// ContainerMapByArn mocks base method +func (m *MockTaskEngineState) ContainerMapByArn(arg0 string) (map[string]*container.DockerContainer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerMapByArn", arg0) + ret0, _ := ret[0].(map[string]*container.DockerContainer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// ContainerMapByArn indicates an expected call of ContainerMapByArn +func (mr *MockTaskEngineStateMockRecorder) ContainerMapByArn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerMapByArn", reflect.TypeOf((*MockTaskEngineState)(nil).ContainerMapByArn), arg0) +} + +// DockerIDByV3EndpointID mocks base method +func (m *MockTaskEngineState) DockerIDByV3EndpointID(arg0 string) (string, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DockerIDByV3EndpointID", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// DockerIDByV3EndpointID indicates an expected call of DockerIDByV3EndpointID +func (mr *MockTaskEngineStateMockRecorder) DockerIDByV3EndpointID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DockerIDByV3EndpointID", reflect.TypeOf((*MockTaskEngineState)(nil).DockerIDByV3EndpointID), arg0) +} + +// ENIByMac mocks base method +func (m *MockTaskEngineState) ENIByMac(arg0 string) (*eni.ENIAttachment, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ENIByMac", arg0) + ret0, _ := ret[0].(*eni.ENIAttachment) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// ENIByMac indicates an expected call of ENIByMac +func (mr *MockTaskEngineStateMockRecorder) ENIByMac(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ENIByMac", reflect.TypeOf((*MockTaskEngineState)(nil).ENIByMac), arg0) +} + +// GetAllContainerIDs mocks base method +func (m *MockTaskEngineState) GetAllContainerIDs() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllContainerIDs") + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetAllContainerIDs indicates an expected call of GetAllContainerIDs +func (mr *MockTaskEngineStateMockRecorder) GetAllContainerIDs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllContainerIDs", reflect.TypeOf((*MockTaskEngineState)(nil).GetAllContainerIDs)) +} + +// GetIPAddressByTaskARN mocks base method +func (m *MockTaskEngineState) GetIPAddressByTaskARN(arg0 string) (string, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIPAddressByTaskARN", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetIPAddressByTaskARN indicates an expected call of GetIPAddressByTaskARN +func (mr *MockTaskEngineStateMockRecorder) GetIPAddressByTaskARN(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPAddressByTaskARN", reflect.TypeOf((*MockTaskEngineState)(nil).GetIPAddressByTaskARN), arg0) +} + +// GetTaskByIPAddress mocks base method +func (m *MockTaskEngineState) GetTaskByIPAddress(arg0 string) (string, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskByIPAddress", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetTaskByIPAddress indicates an expected call of GetTaskByIPAddress +func (mr *MockTaskEngineStateMockRecorder) GetTaskByIPAddress(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskByIPAddress", reflect.TypeOf((*MockTaskEngineState)(nil).GetTaskByIPAddress), arg0) +} + +// MarshalJSON mocks base method +func (m *MockTaskEngineState) MarshalJSON() ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarshalJSON") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarshalJSON indicates an expected call of MarshalJSON +func (mr *MockTaskEngineStateMockRecorder) MarshalJSON() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarshalJSON", reflect.TypeOf((*MockTaskEngineState)(nil).MarshalJSON)) +} + +// PulledContainerMapByArn mocks base method +func (m *MockTaskEngineState) PulledContainerMapByArn(arg0 string) (map[string]*container.DockerContainer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PulledContainerMapByArn", arg0) + ret0, _ := ret[0].(map[string]*container.DockerContainer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// PulledContainerMapByArn indicates an expected call of PulledContainerMapByArn +func (mr *MockTaskEngineStateMockRecorder) PulledContainerMapByArn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PulledContainerMapByArn", reflect.TypeOf((*MockTaskEngineState)(nil).PulledContainerMapByArn), arg0) +} + +// RemoveENIAttachment mocks base method +func (m *MockTaskEngineState) RemoveENIAttachment(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveENIAttachment", arg0) +} + +// RemoveENIAttachment indicates an expected call of RemoveENIAttachment +func (mr *MockTaskEngineStateMockRecorder) RemoveENIAttachment(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveENIAttachment", reflect.TypeOf((*MockTaskEngineState)(nil).RemoveENIAttachment), arg0) +} + +// RemoveImageState mocks base method +func (m *MockTaskEngineState) RemoveImageState(arg0 *image.ImageState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveImageState", arg0) +} + +// RemoveImageState indicates an expected call of RemoveImageState +func (mr *MockTaskEngineStateMockRecorder) RemoveImageState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImageState", reflect.TypeOf((*MockTaskEngineState)(nil).RemoveImageState), arg0) +} + +// RemoveTask mocks base method +func (m *MockTaskEngineState) RemoveTask(arg0 *task.Task) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveTask", arg0) +} + +// RemoveTask indicates an expected call of RemoveTask +func (mr *MockTaskEngineStateMockRecorder) RemoveTask(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTask", reflect.TypeOf((*MockTaskEngineState)(nil).RemoveTask), arg0) +} + +// Reset mocks base method +func (m *MockTaskEngineState) Reset() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset") +} + +// Reset indicates an expected call of Reset +func (mr *MockTaskEngineStateMockRecorder) Reset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockTaskEngineState)(nil).Reset)) +} + +// TaskARNByV3EndpointID mocks base method +func (m *MockTaskEngineState) TaskARNByV3EndpointID(arg0 string) (string, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TaskARNByV3EndpointID", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// TaskARNByV3EndpointID indicates an expected call of TaskARNByV3EndpointID +func (mr *MockTaskEngineStateMockRecorder) TaskARNByV3EndpointID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskARNByV3EndpointID", reflect.TypeOf((*MockTaskEngineState)(nil).TaskARNByV3EndpointID), arg0) +} + +// TaskByArn mocks base method +func (m *MockTaskEngineState) TaskByArn(arg0 string) (*task.Task, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TaskByArn", arg0) + ret0, _ := ret[0].(*task.Task) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// TaskByArn indicates an expected call of TaskByArn +func (mr *MockTaskEngineStateMockRecorder) TaskByArn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskByArn", reflect.TypeOf((*MockTaskEngineState)(nil).TaskByArn), arg0) +} + +// TaskByID mocks base method +func (m *MockTaskEngineState) TaskByID(arg0 string) (*task.Task, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TaskByID", arg0) + ret0, _ := ret[0].(*task.Task) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// TaskByID indicates an expected call of TaskByID +func (mr *MockTaskEngineStateMockRecorder) TaskByID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskByID", reflect.TypeOf((*MockTaskEngineState)(nil).TaskByID), arg0) +} + +// TaskByShortID mocks base method +func (m *MockTaskEngineState) TaskByShortID(arg0 string) ([]*task.Task, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TaskByShortID", arg0) + ret0, _ := ret[0].([]*task.Task) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// TaskByShortID indicates an expected call of TaskByShortID +func (mr *MockTaskEngineStateMockRecorder) TaskByShortID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskByShortID", reflect.TypeOf((*MockTaskEngineState)(nil).TaskByShortID), arg0) +} + +// UnmarshalJSON mocks base method +func (m *MockTaskEngineState) UnmarshalJSON(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnmarshalJSON", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnmarshalJSON indicates an expected call of UnmarshalJSON +func (mr *MockTaskEngineStateMockRecorder) UnmarshalJSON(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnmarshalJSON", reflect.TypeOf((*MockTaskEngineState)(nil).UnmarshalJSON), arg0) +} diff --git a/agent/engine/dockerstate/testutils/docker_state_equal.go b/agent/engine/dockerstate/testutils/docker_state_equal.go new file mode 100644 index 00000000000..737a81f9993 --- /dev/null +++ b/agent/engine/dockerstate/testutils/docker_state_equal.go @@ -0,0 +1,44 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package testutils contains files that are used in tests but not elsewhere and thus can +// be excluded from the final executable. Including them in a different package +// allows this to happen +package testutils + +import ( + api_testutils "github.com/aws/amazon-ecs-agent/agent/api/testutils" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" +) + +// DockerStatesEqual determines if the two given dockerstates are equal, for +// equal meaning they have the same tasks and their tasks are equal +func DockerStatesEqual(lhs, rhs dockerstate.TaskEngineState) bool { + // Simple equality check; just verify that all tasks are equal + lhsTasks := lhs.AllTasks() + rhsTasks := rhs.AllTasks() + if len(lhsTasks) != len(rhsTasks) { + return false + } + + for _, left := range lhsTasks { + right, ok := rhs.TaskByArn(left.Arn) + if !ok { + return false + } + if !api_testutils.TasksEqual(left, right) { + return false + } + } + return true +} diff --git a/agent/engine/dockerstate/testutils/json_test.go b/agent/engine/dockerstate/testutils/json_test.go new file mode 100644 index 00000000000..a9a61ca53d3 --- /dev/null +++ b/agent/engine/dockerstate/testutils/json_test.go @@ -0,0 +1,84 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package testutils + +import ( + "encoding/json" + "runtime/debug" + "strconv" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/stretchr/testify/assert" +) + +func createTestContainer(num int) *apicontainer.Container { + return &apicontainer.Container{ + Name: "busybox-" + strconv.Itoa(num), + Image: "busybox:1.32.0", + Essential: true, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + } +} + +func createTestTask(arn string, numContainers int) *apitask.Task { + task := &apitask.Task{ + Arn: arn, + Family: arn, + Version: "1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{}, + } + + for i := 0; i < numContainers; i++ { + task.Containers = append(task.Containers, createTestContainer(i+1)) + } + return task +} + +func decodeEqual(t *testing.T, state dockerstate.TaskEngineState) dockerstate.TaskEngineState { + data, err := json.Marshal(&state) + assert.NoError(t, err, "marshal state") + + otherState := dockerstate.NewTaskEngineState() + err = json.Unmarshal(data, &otherState) + assert.NoError(t, err, "unmarshal state") + + if !DockerStatesEqual(state, otherState) { + debug.PrintStack() + t.Error("States were not equal") + } + return otherState +} + +func TestJsonEncoding(t *testing.T) { + state := dockerstate.NewTaskEngineState() + decodeEqual(t, state) + + testState := dockerstate.NewTaskEngineState() + testTask := createTestTask("test1", 1) + testState.AddTask(testTask) + for i, cont := range testTask.Containers { + testState.AddContainer(&apicontainer.DockerContainer{DockerID: "docker" + strconv.Itoa(i), DockerName: "someName", Container: cont}, testTask) + } + other := decodeEqual(t, testState) + _, ok := other.ContainerMapByArn("test1") + assert.True(t, ok, "could not retrieve expected task") +} diff --git a/agent/engine/engine_integ_test.go b/agent/engine/engine_integ_test.go new file mode 100644 index 00000000000..791164e60e2 --- /dev/null +++ b/agent/engine/engine_integ_test.go @@ -0,0 +1,654 @@ +// +build integration +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + sdkClient "github.com/docker/docker/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testDockerStopTimeout = 5 * time.Second + credentialsIDIntegTest = "credsid" + serverContent = "ecs test container" + dialTimeout = 200 * time.Millisecond + localhost = "127.0.0.1" + waitForDockerDuration = 50 * time.Millisecond + removeVolumeTimeout = 5 * time.Second + + alwaysHealthyHealthCheckConfig = `{ + "HealthCheck":{ + "Test":["CMD-SHELL", "echo hello"], + "Interval":100000000, + "Timeout":2000000000, + "StartPeriod":100000000, + "Retries":3} + }` +) + +func init() { + // Set this very low for integ tests only + _stoppedSentWaitInterval = 1 * time.Second +} + +func setupWithDefaultConfig(t *testing.T) (TaskEngine, func(), credentials.Manager) { + return setup(defaultTestConfigIntegTest(), nil, t) +} + +func setupWithState(t *testing.T, state dockerstate.TaskEngineState) (TaskEngine, func(), credentials.Manager) { + return setup(defaultTestConfigIntegTest(), state, t) +} + +func verifyTaskRunningStateChange(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning, + "Expected task to be RUNNING") +} + +func verifyTaskRunningStateChangeWithRuntimeID(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning, + "Expected task to be RUNNING") + assert.NotEqualf(t, "", event.(api.TaskStateChange).Task.Containers[0].RuntimeID, + "Expected task with runtimeID per container should not empty when Running") +} + +func verifyTaskStoppedStateChange(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskStopped, + "Expected task to be STOPPED") +} + +func verifyTaskStoppedStateChangeWithRuntimeID(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskStopped, + "Expected task to be STOPPED") + assert.NotEqual(t, "", event.(api.TaskStateChange).Task.Containers[0].RuntimeID, + "Expected task with runtimeID per container should not empty when stopped") +} + +func dialWithRetries(proto string, address string, tries int, timeout time.Duration) (net.Conn, error) { + var err error + var conn net.Conn + for i := 0; i < tries; i++ { + conn, err = net.DialTimeout(proto, address, timeout) + if err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + return conn, err +} + +func removeImage(t *testing.T, img string) { + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "create docker client failed") + client.ImageRemove(context.TODO(), img, types.ImageRemoveOptions{}) +} + +func cleanVolumes(testTask *apitask.Task, taskEngine TaskEngine) { + client := taskEngine.(*DockerTaskEngine).client + for _, aVolume := range testTask.Volumes { + client.RemoveVolume(context.TODO(), aVolume.Name, removeVolumeTimeout) + } +} + +// TestDockerStateToContainerState tests convert the container status from +// docker inspect to the status defined in agent +func TestDockerStateToContainerState(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + testTask := createTestTask("test_task") + container := testTask.Containers[0] + + // let the container keep running to prevent the edge case where it's already stopped when we check whether + // it's running + container.Command = getLongRunningCommand() + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + containerMetadata := taskEngine.(*DockerTaskEngine).pullContainer(testTask, container) + assert.NoError(t, containerMetadata.Error) + + containerMetadata = taskEngine.(*DockerTaskEngine).createContainer(testTask, container) + assert.NoError(t, containerMetadata.Error) + state, _ := client.ContainerInspect(ctx, containerMetadata.DockerID) + assert.Equal(t, apicontainerstatus.ContainerCreated, dockerapi.DockerStateToState(state.ContainerJSONBase.State)) + + containerMetadata = taskEngine.(*DockerTaskEngine).startContainer(testTask, container) + assert.NoError(t, containerMetadata.Error) + state, _ = client.ContainerInspect(ctx, containerMetadata.DockerID) + assert.Equal(t, apicontainerstatus.ContainerRunning, dockerapi.DockerStateToState(state.ContainerJSONBase.State)) + + containerMetadata = taskEngine.(*DockerTaskEngine).stopContainer(testTask, container) + assert.NoError(t, containerMetadata.Error) + state, _ = client.ContainerInspect(ctx, containerMetadata.DockerID) + assert.Equal(t, apicontainerstatus.ContainerStopped, dockerapi.DockerStateToState(state.ContainerJSONBase.State)) + + // clean up the container + err = taskEngine.(*DockerTaskEngine).removeContainer(testTask, container) + assert.NoError(t, err, "remove the created container failed") + + // Start the container failed + testTask = createTestTask("test_task2") + testTask.Containers[0].EntryPoint = &[]string{"non-existed"} + container = testTask.Containers[0] + containerMetadata = taskEngine.(*DockerTaskEngine).createContainer(testTask, container) + assert.NoError(t, containerMetadata.Error) + containerMetadata = taskEngine.(*DockerTaskEngine).startContainer(testTask, container) + assert.Error(t, containerMetadata.Error) + state, _ = client.ContainerInspect(ctx, containerMetadata.DockerID) + assert.Equal(t, apicontainerstatus.ContainerStopped, dockerapi.DockerStateToState(state.ContainerJSONBase.State)) + + // clean up the container + err = taskEngine.(*DockerTaskEngine).removeContainer(testTask, container) + assert.NoError(t, err, "remove the created container failed") +} + +func TestHostVolumeMount(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + tmpPath, _ := ioutil.TempDir("", "ecs_volume_test") + defer os.RemoveAll(tmpPath) + ioutil.WriteFile(filepath.Join(tmpPath, "test-file"), []byte("test-data"), 0644) + + testTask := createTestHostVolumeMountTask(tmpPath) + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) + + assert.NotNil(t, testTask.Containers[0].GetKnownExitCode(), "No exit code found") + assert.Equal(t, 42, *testTask.Containers[0].GetKnownExitCode(), "Wrong exit code") + + data, err := ioutil.ReadFile(filepath.Join(tmpPath, "hello-from-container")) + assert.Nil(t, err, "Unexpected error") + assert.Equal(t, "hi", strings.TrimSpace(string(data)), "Incorrect file contents") + + cleanVolumes(testTask, taskEngine) +} + +func TestSweepContainer(t *testing.T) { + cfg := defaultTestConfigIntegTest() + cfg.TaskCleanupWaitDuration = 1 * time.Minute + cfg.ContainerMetadataEnabled = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + taskEngine, done, _ := setup(cfg, nil, t) + defer done() + + taskArn := "arn:aws:ecs:us-east-1:123456789012:task/testSweepContainer" + testTask := createTestTask(taskArn) + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) + + tasks, _ := taskEngine.ListTasks() + assert.Equal(t, len(tasks), 1) + assert.Equal(t, tasks[0].GetKnownStatus(), apitaskstatus.TaskStopped) + + // Should be stopped, let's verify it's still listed... + task, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(taskArn) + assert.True(t, ok, "Expected task to be present still, but wasn't") + task.SetSentStatus(apitaskstatus.TaskStopped) // cleanupTask waits for TaskStopped to be sent before cleaning + time.Sleep(1 * time.Minute) + for i := 0; i < 60; i++ { + _, ok = taskEngine.(*DockerTaskEngine).State().TaskByArn(taskArn) + if !ok { + break + } + time.Sleep(1 * time.Second) + } + assert.False(t, ok, "Expected container to have been swept but was not") + + tasks, _ = taskEngine.ListTasks() + assert.Equal(t, len(tasks), 0) +} + +// TestStartStopWithCredentials starts and stops a task for which credentials id +// has been set +func TestStartStopWithCredentials(t *testing.T) { + taskEngine, done, credentialsManager := setupWithDefaultConfig(t) + defer done() + + testTask := createTestTask("testStartWithCredentials") + taskCredentials := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: credentials.IAMRoleCredentials{CredentialsID: credentialsIDIntegTest}, + } + credentialsManager.SetTaskCredentials(&taskCredentials) + testTask.SetCredentialsID(credentialsIDIntegTest) + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) + + // When task is stopped, credentials should have been removed for the + // credentials id set in the task + _, ok := credentialsManager.GetTaskCredentials(credentialsIDIntegTest) + assert.False(t, ok, "Credentials not removed from credentials manager for stopped task") +} + +// TestStartStopWithRuntimeID starts and stops a task for which runtimeID has been set. +func TestStartStopWithRuntimeID(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + testTask := createTestTask("testTaskWithContainerID") + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChangeWithRuntimeID(t, taskEngine) + verifyTaskRunningStateChangeWithRuntimeID(t, taskEngine) + verifyContainerStoppedStateChangeWithRuntimeID(t, taskEngine) + verifyTaskStoppedStateChangeWithRuntimeID(t, taskEngine) +} + +func TestTaskStopWhenPullImageFail(t *testing.T) { + cfg := defaultTestConfigIntegTest() + cfg.ImagePullBehavior = config.ImagePullAlwaysBehavior + taskEngine, done, _ := setup(cfg, nil, t) + defer done() + + testTask := createTestTask("testTaskStopWhenPullImageFail") + // Assign an invalid image to the task, and verify the task fails + // when the pull image behavior is "always". + testTask.Containers = []*apicontainer.Container{createTestContainerWithImageAndName("invalidImage", "invalidName")} + + go taskEngine.AddTask(testTask) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +func TestContainerHealthCheck(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testTaskWithHealthCheck" + testTask := createTestHealthCheckTask(taskArn) + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + + waitForContainerHealthStatus(t, testTask) + healthStatus := testTask.Containers[0].GetHealthStatus() + assert.Equal(t, "HEALTHY", healthStatus.Status.BackendStatus(), "container health status is not HEALTHY") + + taskUpdate := createTestTask(taskArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskIsStopped(stateChangeEvents, testTask) +} + +// TestEngineSynchronize tests the agent synchronize the container status on restart +func TestEngineSynchronize(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + + taskArn := "arn:aws:ecs:us-east-1:123456789012:task/testEngineSynchronize" + testTask := createTestTask(taskArn) + testTask.Containers[0].Image = testVolumeImage + + // Start a task + go taskEngine.AddTask(testTask) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + // Record the container information + state := taskEngine.(*DockerTaskEngine).State() + containersMap, ok := state.ContainerMapByArn(taskArn) + require.True(t, ok, "no container found in the agent state") + require.Len(t, containersMap, 1) + containerIDs := state.GetAllContainerIDs() + require.Len(t, containerIDs, 1) + imageStates := state.AllImageStates() + assert.Len(t, imageStates, 1) + taskEngine.(*DockerTaskEngine).stopEngine() + + containerBeforeSync, ok := containersMap[testTask.Containers[0].Name] + assert.True(t, ok, "container not found in the containers map") + // Task and Container restored from state file + containerSaved := &apicontainer.Container{ + Name: containerBeforeSync.Container.Name, + SentStatusUnsafe: apicontainerstatus.ContainerRunning, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + } + task := &apitask.Task{ + Arn: taskArn, + Containers: []*apicontainer.Container{ + containerSaved, + }, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + SentStatusUnsafe: apitaskstatus.TaskRunning, + } + + state = dockerstate.NewTaskEngineState() + state.AddTask(task) + state.AddContainer(&apicontainer.DockerContainer{ + DockerID: containerBeforeSync.DockerID, + Container: containerSaved, + }, task) + state.AddImageState(imageStates[0]) + + // Simulate the agent restart + taskEngine, done, _ = setupWithState(t, state) + defer done() + + taskEngine.MustInit(context.TODO()) + + // Check container status/metadata and image information are correctly synchronized + containerIDAfterSync := state.GetAllContainerIDs() + require.Len(t, containerIDAfterSync, 1) + containerAfterSync, ok := state.ContainerByID(containerIDAfterSync[0]) + assert.True(t, ok, "no container found in the agent state") + + assert.Equal(t, containerAfterSync.Container.GetKnownStatus(), containerBeforeSync.Container.GetKnownStatus()) + assert.Equal(t, containerAfterSync.Container.GetLabels(), containerBeforeSync.Container.GetLabels()) + assert.Equal(t, containerAfterSync.Container.GetStartedAt(), containerBeforeSync.Container.GetStartedAt()) + assert.Equal(t, containerAfterSync.Container.GetCreatedAt(), containerBeforeSync.Container.GetCreatedAt()) + + imageStateAfterSync := state.AllImageStates() + assert.Len(t, imageStateAfterSync, 1) + assert.Equal(t, *imageStateAfterSync[0], *imageStates[0]) + + testTask.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(testTask) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +func TestSharedAutoprovisionVolume(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + stateChangeEvents := taskEngine.StateChangeEvents() + // Set the task clean up duration to speed up the test + taskEngine.(*DockerTaskEngine).cfg.TaskCleanupWaitDuration = 1 * time.Second + + testTask, tmpDirectory, err := createVolumeTask("shared", "TestSharedAutoprovisionVolume", "TestSharedAutoprovisionVolume", true) + defer os.Remove(tmpDirectory) + require.NoError(t, err, "creating test task failed") + + go taskEngine.AddTask(testTask) + + verifyTaskIsRunning(stateChangeEvents, testTask) + verifyTaskIsStopped(stateChangeEvents, testTask) + assert.Equal(t, *testTask.Containers[0].GetKnownExitCode(), 0) + assert.Equal(t, testTask.ResourcesMapUnsafe["dockerVolume"][0].(*taskresourcevolume.VolumeResource).VolumeConfig.DockerVolumeName, "TestSharedAutoprovisionVolume", "task volume name is not the same as specified in task definition") + // Wait for task to be cleaned up + testTask.SetSentStatus(apitaskstatus.TaskStopped) + waitForTaskCleanup(t, taskEngine, testTask.Arn, 5) + client := taskEngine.(*DockerTaskEngine).client + response := client.InspectVolume(context.TODO(), "TestSharedAutoprovisionVolume", 1*time.Second) + assert.NoError(t, response.Error, "expect shared volume not removed") + + cleanVolumes(testTask, taskEngine) +} + +func TestSharedDoNotAutoprovisionVolume(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + stateChangeEvents := taskEngine.StateChangeEvents() + client := taskEngine.(*DockerTaskEngine).client + // Set the task clean up duration to speed up the test + taskEngine.(*DockerTaskEngine).cfg.TaskCleanupWaitDuration = 1 * time.Second + + testTask, tmpDirectory, err := createVolumeTask("shared", "TestSharedDoNotAutoprovisionVolume", "TestSharedDoNotAutoprovisionVolume", false) + defer os.Remove(tmpDirectory) + require.NoError(t, err, "creating test task failed") + + // creating volume to simulate previously provisioned volume + volumeConfig := testTask.Volumes[0].Volume.(*taskresourcevolume.DockerVolumeConfig) + volumeMetadata := client.CreateVolume(context.TODO(), "TestSharedDoNotAutoprovisionVolume", + volumeConfig.Driver, volumeConfig.DriverOpts, volumeConfig.Labels, 1*time.Minute) + require.NoError(t, volumeMetadata.Error) + + go taskEngine.AddTask(testTask) + + verifyTaskIsRunning(stateChangeEvents, testTask) + verifyTaskIsStopped(stateChangeEvents, testTask) + assert.Equal(t, *testTask.Containers[0].GetKnownExitCode(), 0) + assert.Len(t, testTask.ResourcesMapUnsafe["dockerVolume"], 0, "volume that has been provisioned does not require the agent to create it again") + // Wait for task to be cleaned up + testTask.SetSentStatus(apitaskstatus.TaskStopped) + waitForTaskCleanup(t, taskEngine, testTask.Arn, 5) + response := client.InspectVolume(context.TODO(), "TestSharedDoNotAutoprovisionVolume", 1*time.Second) + assert.NoError(t, response.Error, "expect shared volume not removed") + + cleanVolumes(testTask, taskEngine) +} + +func TestLabels(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + testArn := "TestLabels" + testTask := createTestTask(testArn) + testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{Config: aws.String(`{ + "Labels": { + "com.foo.label2": "value", + "label1":"" + }}`)} + go taskEngine.AddTask(testTask) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + state, _ := client.ContainerInspect(ctx, cid) + assert.EqualValues(t, "value", state.Config.Labels["com.foo.label2"]) + assert.EqualValues(t, "", state.Config.Labels["label1"]) + + // Kill the existing container now + // Create instead of copying the testTask, to avoid race condition. + // AddTask idempotently handles update, filtering by Task ARN. + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +func TestLogDriverOptions(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), + sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + testArn := "TestLogdriverOptions" + testTask := createTestTask(testArn) + testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{HostConfig: aws.String(`{ + "LogConfig": { + "Type": "json-file", + "Config": { + "max-file": "50", + "max-size": "50k" + } + }}`)} + go taskEngine.AddTask(testTask) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + state, _ := client.ContainerInspect(ctx, cid) + + containerExpected := container.LogConfig{ + Type: "json-file", + Config: map[string]string{"max-file": "50", "max-size": "50k"}, + } + + assert.EqualValues(t, containerExpected, state.HostConfig.LogConfig) + + // Kill the existing container now + // Create instead of copying the testTask, to avoid race condition. + // AddTask idempotently handles update, filtering by Task ARN. + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +// TestNetworkModeNone tests the container network can be configured +// as None mode in task definition +func TestNetworkModeNone(t *testing.T) { + testNetworkMode(t, "none") +} + +func testNetworkMode(t *testing.T, networkMode string) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), + sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + testArn := "TestNetworkMode" + testTask := createTestTask(testArn) + testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{ + HostConfig: aws.String(fmt.Sprintf(`{"NetworkMode":"%s"}`, networkMode))} + + go taskEngine.AddTask(testTask) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + + state, _ := client.ContainerInspect(ctx, cid) + assert.NotNil(t, state.NetworkSettings, "Couldn't find the container network setting info") + + var networks []string + for key := range state.NetworkSettings.Networks { + networks = append(networks, key) + } + assert.Equal(t, 1, len(networks), "found multiple networks in container config") + assert.Equal(t, networkMode, networks[0], "did not find the expected network mode") + + // Kill the existing container now + // Create instead of copying the testTask, to avoid race condition. + // AddTask idempotently handles update, filtering by Task ARN. + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +func TestTaskCleanup(t *testing.T) { + os.Setenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "40s") + defer os.Unsetenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION") + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion( + sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + testArn := "TestTaskCleanup" + testTask := createTestTask(testArn) + + go taskEngine.AddTask(testTask) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + _, err = client.ContainerInspect(ctx, cid) + assert.NoError(t, err, "Inspect should work") + + // Create instead of copying the testTask, to avoid race condition. + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) + + task, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(testArn) + assert.True(t, ok, "Expected task to be present still, but wasn't") + task.SetSentStatus(apitaskstatus.TaskStopped) // cleanupTask waits for TaskStopped to be sent before cleaning + waitForTaskCleanup(t, taskEngine, testArn, 120) // 120 seconds + + _, err = client.ContainerInspect(ctx, cid) + assert.Error(t, err, "Inspect should not work") +} diff --git a/agent/engine/engine_sudo_linux_integ_test.go b/agent/engine/engine_sudo_linux_integ_test.go new file mode 100644 index 00000000000..8a68c9601aa --- /dev/null +++ b/agent/engine/engine_sudo_linux_integ_test.go @@ -0,0 +1,757 @@ +// +build linux,sudo + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" + "time" + + "github.com/cihub/seelog" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + sdkClient "github.com/docker/docker/client" + "github.com/pborman/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + cgroup "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" + "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" +) + +var ( + endpoint = utils.DefaultIfBlank(os.Getenv(DockerEndpointEnvVariable), DockerDefaultEndpoint) +) + +const ( + testLogSenderImage = "public.ecr.aws/amazonlinux/amazonlinux:2.0.20210126.0" + testFluentbitImage = "public.ecr.aws/aws-observability/aws-for-fluent-bit:2.10.1" + testVolumeImage = "127.0.0.1:51670/amazon/amazon-ecs-volumes-test:latest" + testCluster = "testCluster" + validTaskArnPrefix = "arn:aws:ecs:region:account-id:task/" + testDataDir = "/var/lib/ecs/data/" + testDataDirOnHost = "/var/lib/ecs/" + testInstanceID = "testInstanceID" + testTaskDefFamily = "testFamily" + testTaskDefVersion = "1" + testECSRegion = "us-east-1" + testLogGroupName = "test-fluentbit" + testLogGroupPrefix = "firelens-fluentbit-" + testExecCommandAgentImage = "127.0.0.1:51670/amazon/amazon-ecs-exec-command-agent-test:latest" + testExecCommandAgentSleepBin = "/sleep" + testExecCommandAgentKillBin = "/kill" +) + +var ( + mockTaskMeatadataHandler = http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Write([]byte(`{"TaskARN": "arn:aws:ecs:region:account-id:task/task-id"}`)) + }) +) + +func TestStartStopWithCgroup(t *testing.T) { + cfg := defaultTestConfigIntegTest() + cfg.TaskCleanupWaitDuration = 1 * time.Second + cfg.TaskCPUMemLimit.Value = config.ExplicitlyEnabled + cfg.CgroupPath = "/cgroup" + + taskEngine, done, _ := setup(cfg, nil, t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "arn:aws:ecs:us-east-1:123456789012:task/testCgroup" + testTask := createTestTask(taskArn) + testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + for _, container := range testTask.Containers { + container.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + } + control := cgroup.New() + + commonResources := &taskresource.ResourceFieldsCommon{ + IOUtil: ioutilwrapper.NewIOUtil(), + } + + taskEngine.(*DockerTaskEngine).resourceFields = &taskresource.ResourceFields{ + Control: control, + ResourceFieldsCommon: commonResources, + } + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskIsStopped(stateChangeEvents, testTask) + + // Should be stopped, let's verify it's still listed... + task, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(taskArn) + assert.True(t, ok, "Expected task to be present still, but wasn't") + + cgroupRoot, err := testTask.BuildCgroupRoot() + assert.Nil(t, err) + assert.True(t, control.Exists(cgroupRoot)) + + task.SetSentStatus(apitaskstatus.TaskStopped) // cleanupTask waits for TaskStopped to be sent before cleaning + time.Sleep(cfg.TaskCleanupWaitDuration) + for i := 0; i < 60; i++ { + _, ok = taskEngine.(*DockerTaskEngine).State().TaskByArn(taskArn) + if !ok { + break + } + time.Sleep(1 * time.Second) + } + assert.False(t, ok, "Expected container to have been swept but was not") + assert.False(t, control.Exists(cgroupRoot)) +} + +func TestLocalHostVolumeMount(t *testing.T) { + cfg := defaultTestConfigIntegTest() + taskEngine, done, _ := setup(cfg, nil, t) + defer done() + + // creates a task with local volume + testTask := createTestLocalVolumeMountTask() + stateChangeEvents := taskEngine.StateChangeEvents() + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskIsStopped(stateChangeEvents, testTask) + + assert.NotNil(t, testTask.Containers[0].GetKnownExitCode(), "No exit code found") + assert.Equal(t, 0, *testTask.Containers[0].GetKnownExitCode(), "Wrong exit code") + data, err := ioutil.ReadFile(filepath.Join("/var/lib/docker/volumes/", testTask.Volumes[0].Volume.Source(), "/_data", "hello-from-container")) + assert.Nil(t, err, "Unexpected error") + assert.Equal(t, "empty-data-volume", strings.TrimSpace(string(data)), "Incorrect file contents") +} + +func createTestLocalVolumeMountTask() *apitask.Task { + testTask := createTestTask("testLocalHostVolumeMount") + testTask.Volumes = []apitask.TaskVolume{{Name: "test-tmp", Volume: &taskresourcevolume.LocalDockerVolume{}}} + testTask.Containers[0].Image = testVolumeImage + testTask.Containers[0].MountPoints = []apicontainer.MountPoint{{ContainerPath: "/host/tmp", SourceVolume: "test-tmp"}} + testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + testTask.Containers[0].TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + testTask.Containers[0].Command = []string{`echo -n "empty-data-volume" > /host/tmp/hello-from-container;`} + return testTask +} + +func TestFirelensFluentbit(t *testing.T) { + // Skipping the test for arm as they do not have official support for Arm images + if runtime.GOARCH == "arm64" { + t.Skip("Skipping test, unsupported image for arm64") + } + cfg := defaultTestConfigIntegTest() + cfg.DataDir = testDataDir + cfg.DataDirOnHost = testDataDirOnHost + cfg.TaskCleanupWaitDuration = 1 * time.Second + cfg.Cluster = testCluster + taskEngine, done, _ := setup(cfg, nil, t) + defer done() + + // Mock task metadata server as the firelens container needs to access it. + // Note that the listener has to be overwritten here, because the default one from httptest.NewServer + // only listens to localhost and isn't reachable from container running in bridge network mode. + l, err := net.Listen("tcp", ":0") + require.NoError(t, err) + server := httptest.NewUnstartedServer(mockTaskMeatadataHandler) + server.Listener = l + server.Start() + defer server.Close() + + port := getPortFromAddr(t, server.URL) + serverURL := fmt.Sprintf("http://%s:%s", getHostPrivateIP(t, ec2.NewEC2MetadataClient(nil)), port) + defer setV3MetadataURLFormat(serverURL + "/v3/%s")() + + testTask := createFirelensTask(t) + taskEngine.(*DockerTaskEngine).resourceFields = &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + EC2InstanceID: testInstanceID, + }, + } + go taskEngine.AddTask(testTask) + testEvents := InitEventCollection(taskEngine) + + //Verify logsender container is running + err = VerifyContainerStatus(apicontainerstatus.ContainerRunning, testTask.Arn+":logsender", testEvents, t) + assert.NoError(t, err, "Verify logsender container is running") + + //Verify firelens container is running + err = VerifyContainerStatus(apicontainerstatus.ContainerRunning, testTask.Arn+":firelens", testEvents, t) + assert.NoError(t, err, "Verify firelens container is running") + + //Verify task is in running state + err = VerifyTaskStatus(apitaskstatus.TaskRunning, testTask.Arn, testEvents, t) + assert.NoError(t, err, "Not verified task running") + + //Verify logsender container is stopped + err = VerifyContainerStatus(apicontainerstatus.ContainerStopped, testTask.Arn+":logsender", testEvents, t) + assert.NoError(t, err) + + //Verify firelens container is stopped + err = VerifyContainerStatus(apicontainerstatus.ContainerStopped, testTask.Arn+":firelens", testEvents, t) + assert.NoError(t, err) + + //Verify the task itself has stopped + err = VerifyTaskStatus(apitaskstatus.TaskStopped, testTask.Arn, testEvents, t) + assert.NoError(t, err) + + taskID, err := testTask.GetID() + + //declare a cloudwatch client + cwlClient := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(testECSRegion)) + params := &cloudwatchlogs.GetLogEventsInput{ + LogGroupName: aws.String(testLogGroupName), + LogStreamName: aws.String(fmt.Sprintf("firelens-fluentbit-logsender-firelens-%s", taskID)), + } + + // wait for the cloud watch logs + resp, err := waitCloudwatchLogs(cwlClient, params) + require.NoError(t, err) + // there should only be one event as we are echoing only one thing that part of the include-filter + assert.Equal(t, 1, len(resp.Events)) + + message := aws.StringValue(resp.Events[0].Message) + jsonBlob := make(map[string]string) + err = json.Unmarshal([]byte(message), &jsonBlob) + require.NoError(t, err) + assert.Equal(t, "stdout", jsonBlob["source"]) + assert.Equal(t, "include", jsonBlob["log"]) + assert.Contains(t, jsonBlob, "container_id") + assert.Contains(t, jsonBlob["container_name"], "logsender") + assert.Equal(t, testCluster, jsonBlob["ecs_cluster"]) + assert.Equal(t, testTask.Arn, jsonBlob["ecs_task_arn"]) + + testTask.SetSentStatus(apitaskstatus.TaskStopped) + time.Sleep(3 * cfg.TaskCleanupWaitDuration) + + for i := 0; i < 60; i++ { + _, ok := taskEngine.(*DockerTaskEngine).State().TaskByArn(testTask.Arn) + if !ok { + break + } + time.Sleep(1 * time.Second) + } + // Make sure all the resource is cleaned up + _, err = ioutil.ReadDir(filepath.Join(testDataDir, "firelens", testTask.Arn)) + assert.Error(t, err) +} + +// getPortFromAddr returns the port part of an address in format "http://:". +func getPortFromAddr(t *testing.T, addr string) string { + u, err := url.Parse(addr) + require.NoErrorf(t, err, "unable to parse address: %s", addr) + _, port, err := net.SplitHostPort(u.Host) + require.NoErrorf(t, err, "unable to get port from address: %s", addr) + return port +} + +// getHostPrivateIP returns the host's private IP. +func getHostPrivateIP(t *testing.T, ec2MetadataClient ec2.EC2MetadataClient) string { + ip, err := ec2MetadataClient.PrivateIPv4Address() + require.NoError(t, err) + return ip +} + +// setV3MetadataURLFormat sets the container metadata URI format and returns a function to set it back. +func setV3MetadataURLFormat(fmt string) func() { + backup := apicontainer.MetadataURIFormat + apicontainer.MetadataURIFormat = fmt + return func() { + apicontainer.MetadataURIFormat = backup + } +} + +func createFirelensTask(t *testing.T) *apitask.Task { + testTask := createTestTask(validTaskArnPrefix + uuid.New()) + rawHostConfigInputForLogSender := dockercontainer.HostConfig{ + LogConfig: dockercontainer.LogConfig{ + Type: logDriverTypeFirelens, + Config: map[string]string{ + "Name": "cloudwatch", + "exclude-pattern": "exclude", + "include-pattern": "include", + "log_group_name": testLogGroupName, + "log_stream_prefix": testLogGroupPrefix, + "region": testECSRegion, + "auto_create_group": "true", + }, + }, + } + rawHostConfigForLogSender, err := json.Marshal(&rawHostConfigInputForLogSender) + require.NoError(t, err) + testTask.Containers = []*apicontainer.Container{ + { + Name: "logsender", + Image: testLogSenderImage, + Essential: true, + // TODO: the firelens router occasionally failed to send logs when it's shut down very quickly after started. + // Let the task run for a while with a sleep helps avoid that failure, but still needs to figure out the + // root cause. + Command: []string{"sh", "-c", "echo exclude; echo include; sleep 10;"}, + DockerConfig: apicontainer.DockerConfig{ + HostConfig: func() *string { + s := string(rawHostConfigForLogSender) + return &s + }(), + }, + DependsOnUnsafe: []apicontainer.DependsOn{ + { + ContainerName: "firelens", + Condition: "START", + }, + }, + }, + { + Name: "firelens", + Image: testFluentbitImage, + Essential: true, + FirelensConfig: &apicontainer.FirelensConfig{ + Type: firelens.FirelensConfigTypeFluentbit, + Options: map[string]string{ + "enable-ecs-log-metadata": "true", + }, + }, + Environment: map[string]string{ + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "FLB_LOG_LEVEL": "debug", + }, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + } + testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + return testTask +} + +func waitCloudwatchLogs(client *cloudwatchlogs.CloudWatchLogs, params *cloudwatchlogs.GetLogEventsInput) (*cloudwatchlogs.GetLogEventsOutput, error) { + // The test could fail for timing issue, so retry for 30 seconds to make this test more stable + for i := 0; i < 30; i++ { + resp, err := client.GetLogEvents(params) + if err != nil { + awsError, ok := err.(awserr.Error) + if !ok || awsError.Code() != "ResourceNotFoundException" { + return nil, err + } + } else if len(resp.Events) > 0 { + return resp, nil + } + time.Sleep(time.Second) + } + return nil, fmt.Errorf("timeout waiting for the logs to be sent to cloud watch logs") +} + +// TestExecCommandAgent validates ExecCommandAgent start and monitor processes. The algorithm to test is as follows: +// 1. Pre-setup: the make file in ../../misc/exec-command-agent-test will create a special docker sleeper image +// based on a scratch image. This image simulates a customer image and contains pre-baked /sleep and /kill binaries. +// /sleep is the main process used to launch the test container; /kill is an application that kills a process running in +// the container given a PID. +// The make file will also create a fake amazon-ssm-agent which is a go program that only sleeps for a certain time specified. +// +// 2. Setup: Create a new docker task engine with a modified path pointing to our fake amazon-ssm-agent binary +// 3. Create and start our test task using our test image +// 4. Wait for the task to start and verify that the expected ExecCommandAgent bind mounts are present in the containers +// 5. Verify that our fake amazon-ssm-agent was started inside the container using docker top, and retrieve its PID +// 6. Kill the fake amazon-ssm-agent using the PID retrieved in previous step +// 7. Verify that the engine restarted our fake amazon-ssm-agent by doing docker top one more time (a new PID should popup) +func TestExecCommandAgent(t *testing.T) { + const ( + testTaskId = "exec-command-agent-test-task" + testContainerName = "exec-command-agent-test-container" + sleepFor = time.Minute * 2 + ) + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + testExecCmdHostBinDir := "/managed-agents/execute-command/bin" + + taskEngine, done, _ := setupEngineForExecCommandAgent(t, testExecCmdHostBinDir) + stateChangeEvents := taskEngine.StateChangeEvents() + defer done() + + testTask := createTestExecCommandAgentTask(testTaskId, testContainerName, sleepFor) + execAgentLogPath := filepath.Join("/log/exec", testTaskId) + err = os.MkdirAll(execAgentLogPath, 0644) + require.NoError(t, err, "error creating execAgent log file") + _, err = os.Stat(execAgentLogPath) + require.NoError(t, err, "execAgent log dir doesn't exist") + err = os.MkdirAll(execcmd.ECSAgentExecConfigDir, 0644) + require.NoError(t, err, "error creating execAgent config dir") + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + + // session limit is 2 + testConfigFileName, _ := execcmd.GetExecAgentConfigFileName(2) + testLogConfigFileName, _ := execcmd.GetExecAgentLogConfigFile() + verifyExecCmdAgentExpectedMounts(t, ctx, client, testTaskId, cid, testContainerName, testExecCmdHostBinDir+"/1.0.0.0", testConfigFileName, testLogConfigFileName) + pidA := verifyMockExecCommandAgentIsRunning(t, client, cid) + seelog.Infof("Verified mock ExecCommandAgent is running (pidA=%s)", pidA) + killMockExecCommandAgent(t, client, cid, pidA) + seelog.Infof("kill signal sent to ExecCommandAgent (pidA=%s)", pidA) + verifyMockExecCommandAgentIsStopped(t, client, cid, pidA) + seelog.Infof("Verified mock ExecCommandAgent was killed (pidA=%s)", pidA) + pidB := verifyMockExecCommandAgentIsRunning(t, client, cid) + seelog.Infof("Verified mock ExecCommandAgent was restarted (pidB=%s)", pidB) + require.NotEqual(t, pidA, pidB, "ExecCommandAgent PID did not change after restart") + + taskUpdate := createTestExecCommandAgentTask(testTaskId, testContainerName, sleepFor) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + ctx, cancel = context.WithTimeout(context.Background(), time.Second*20) + go func() { + verifyTaskIsStopped(stateChangeEvents, testTask) + cancel() + }() + + <-ctx.Done() + require.NotEqual(t, context.DeadlineExceeded, ctx.Err(), "Timed out waiting for task (%s) to stop", testTaskId) + assert.NotNil(t, testTask.Containers[0].GetKnownExitCode(), "No exit code found") + // TODO: [ecs-exec] We should be able to wait for cleanup instead of calling deleteTask directly + taskEngine.(*DockerTaskEngine).deleteTask(testTask) + _, err = os.Stat(execAgentLogPath) + assert.True(t, os.IsNotExist(err), "execAgent log cleanup failed") + os.RemoveAll(execcmd.ECSAgentExecConfigDir) +} + +// TestManagedAgentEvent validates the emitted container events for a started and a stopped managed agent. +func TestManagedAgentEvent(t *testing.T) { + testcases := []struct { + Name string + ExpectedStatus apicontainerstatus.ManagedAgentStatus + ManagedAgentLifetime time.Duration + ShouldBeRunning bool + }{ + { + Name: "Confirmed emit RUNNING event", + ExpectedStatus: apicontainerstatus.ManagedAgentRunning, + ManagedAgentLifetime: 1, + ShouldBeRunning: true, + }, + { + Name: "Confirmed emit STOPPED event", + ExpectedStatus: apicontainerstatus.ManagedAgentStopped, + ManagedAgentLifetime: 0, + ShouldBeRunning: false, + }, + } + for _, tc := range testcases { + t.Run(tc.Name, func(t *testing.T) { + + const ( + testTaskId = "exec-command-agent-test-task" + testContainerName = "exec-command-agent-test-container" + ) + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + testExecCmdHostBinDir := "/managed-agents/execute-command/bin" + + taskEngine, done, _ := setupEngineForExecCommandAgent(t, testExecCmdHostBinDir) + defer done() + + testTask := createTestExecCommandAgentTask(testTaskId, testContainerName, time.Minute*tc.ManagedAgentLifetime) + execAgentLogPath := filepath.Join("/log/exec", testTaskId) + err = os.MkdirAll(execAgentLogPath, 0644) + require.NoError(t, err, "error creating execAgent log file") + _, err = os.Stat(execAgentLogPath) + require.NoError(t, err, "execAgent log dir doesn't exist") + err = os.MkdirAll(execcmd.ECSAgentExecConfigDir, 0644) + require.NoError(t, err, "error creating execAgent config dir") + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + if tc.ShouldBeRunning { + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + verifyMockExecCommandAgentIsRunning(t, client, cid) + } + waitDone := make(chan struct{}) + + go verifyExecAgentStateChange(t, taskEngine, tc.ExpectedStatus, waitDone) + + timeout := false + select { + case <-waitDone: + case <-time.After(20 * time.Second): + timeout = true + } + assert.False(t, timeout) + + taskEngine.(*DockerTaskEngine).deleteTask(testTask) + _, err = os.Stat(execAgentLogPath) + assert.True(t, os.IsNotExist(err), "execAgent log cleanup failed") + os.RemoveAll(execcmd.ECSAgentExecConfigDir) + }) + } +} + +func createTestExecCommandAgentTask(taskId, containerName string, sleepFor time.Duration) *apitask.Task { + testTask := createTestTask("arn:aws:ecs:us-west-2:1234567890:task/" + taskId) + testTask.PIDMode = ecs.PidModeHost + testTask.Containers[0].Name = containerName + testTask.Containers[0].Image = testExecCommandAgentImage + testTask.Containers[0].Command = []string{testExecCommandAgentSleepBin, "-time=" + sleepFor.String()} + enableExecCommandAgentForContainer(testTask.Containers[0], apicontainer.ManagedAgentState{}) + return testTask +} + +// setupEngineForExecCommandAgent creates a new TaskEngine with a custom execcmd.Manager that will attempt to read the +// host binaries from the directory passed as parameter (as opposed to the default directory). +// Additionally, it overrides the engine's monitorExecAgentsInterval to one second. +func setupEngineForExecCommandAgent(t *testing.T, hostBinDir string) (TaskEngine, func(), credentials.Manager) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + skipIntegTestIfApplicable(t) + + cfg := defaultTestConfigIntegTest() + sdkClientFactory := sdkclientfactory.NewFactory(ctx, dockerEndpoint) + dockerClient, err := dockerapi.NewDockerGoClient(sdkClientFactory, cfg, context.Background()) + if err != nil { + t.Fatalf("Error creating Docker client: %v", err) + } + credentialsManager := credentials.NewManager() + state := dockerstate.NewTaskEngineState() + imageManager := NewImageManager(cfg, dockerClient, state) + imageManager.SetDataClient(data.NewNoopClient()) + metadataManager := containermetadata.NewManager(dockerClient, cfg) + execCmdMgr := execcmd.NewManagerWithBinDir(hostBinDir) + + taskEngine := NewDockerTaskEngine(cfg, dockerClient, credentialsManager, + eventstream.NewEventStream("ENGINEINTEGTEST", context.Background()), imageManager, state, metadataManager, + nil, execCmdMgr) + taskEngine.monitorExecAgentsInterval = time.Second + taskEngine.MustInit(context.TODO()) + return taskEngine, func() { + taskEngine.Shutdown() + }, credentialsManager +} + +const ( + uuidRegex = "[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}" // matches a UUID + containerDepsPrefixRegex = execcmd.ContainerDepsDirPrefix + uuidRegex +) + +func verifyExecCmdAgentExpectedMounts(t *testing.T, + ctx context.Context, + client *sdkClient.Client, + testTaskId, containerId, containerName, testExecCmdHostVersionedBinDir, testConfigFileName, testLogConfigFileName string) { + inspectState, _ := client.ContainerInspect(ctx, containerId) + + expectedMounts := []struct { + source string + destRegex string + readOnly bool + }{ + { + source: filepath.Join(testExecCmdHostVersionedBinDir, execcmd.SSMAgentBinName), + destRegex: filepath.Join(containerDepsPrefixRegex, execcmd.SSMAgentBinName), + readOnly: true, + }, + { + source: filepath.Join(testExecCmdHostVersionedBinDir, execcmd.SSMAgentWorkerBinName), + destRegex: filepath.Join(containerDepsPrefixRegex, execcmd.SSMAgentWorkerBinName), + readOnly: true, + }, + { + source: filepath.Join(testExecCmdHostVersionedBinDir, execcmd.SessionWorkerBinName), + destRegex: filepath.Join(containerDepsPrefixRegex, execcmd.SessionWorkerBinName), + readOnly: true, + }, + { + source: execcmd.HostCertFile, + destRegex: filepath.Join(containerDepsPrefixRegex, execcmd.ContainerCertFileSuffix), + readOnly: true, + }, + { + source: filepath.Join(execcmd.HostExecConfigDir, testConfigFileName), + destRegex: filepath.Join(containerDepsPrefixRegex, execcmd.ContainerConfigFileSuffix), + readOnly: true, + }, + { + source: filepath.Join(execcmd.HostExecConfigDir, testLogConfigFileName), + destRegex: filepath.Join(containerDepsPrefixRegex, execcmd.ContainerLogConfigFile), + readOnly: true, + }, + { + source: filepath.Join(execcmd.HostLogDir, testTaskId, containerName), + destRegex: execcmd.ContainerLogDir, + readOnly: false, + }, + } + + for _, em := range expectedMounts { + var found *types.MountPoint + for _, m := range inspectState.Mounts { + if m.Source == em.source { + found = &m + break + } + } + require.NotNil(t, found, "Expected mount point not found (%s)", em.source) + require.Regexp(t, em.destRegex, found.Destination, "Destination for mount point (%s) is invalid expected: %s, actual: %s", em.source, em.destRegex, found.Destination) + if em.readOnly { + require.Equal(t, "ro", found.Mode, "Destination for mount point (%s) should be read only", em.source) + } else { + require.True(t, found.RW, "Destination for mount point (%s) should be writable", em.source) + } + require.Equal(t, "bind", string(found.Type), "Destination for mount point (%s) is not of type bind", em.source) + } + + require.Equal(t, len(expectedMounts), len(inspectState.Mounts), "Wrong number of bind mounts detected in container (%s)", containerName) +} + +func verifyMockExecCommandAgentIsRunning(t *testing.T, client *sdkClient.Client, containerId string) string { + return verifyMockExecCommandAgentStatus(t, client, containerId, "", true) +} + +func verifyMockExecCommandAgentIsStopped(t *testing.T, client *sdkClient.Client, containerId, pid string) { + verifyMockExecCommandAgentStatus(t, client, containerId, pid, false) +} + +func verifyMockExecCommandAgentStatus(t *testing.T, client *sdkClient.Client, containerId, expectedPid string, checkIsRunning bool) string { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer cancel() + res := make(chan string, 1) + execCmdAgentProcessRegex := filepath.Join(containerDepsPrefixRegex, execcmd.SSMAgentBinName) + go func() { + for { + top, err := client.ContainerTop(ctx, containerId, nil) + if err != nil { + continue + } + cmdPos := -1 + pidPos := -1 + for i, t := range top.Titles { + if strings.ToUpper(t) == "CMD" { + cmdPos = i + } + if strings.ToUpper(t) == "PID" { + pidPos = i + } + + } + require.NotEqual(t, -1, cmdPos, "CMD title not found in the container top response") + require.NotEqual(t, -1, pidPos, "PID title not found in the container top response") + for _, proc := range top.Processes { + matched, _ := regexp.MatchString(execCmdAgentProcessRegex, proc[cmdPos]) + if matched { + res <- proc[pidPos] + return + } + } + seelog.Infof("Processes running in container: %s", top.Processes) + select { + case <-ctx.Done(): + return + case <-time.After(time.Second * 4): + } + } + }() + + var ( + isRunning bool + pid string + ) + select { + case <-ctx.Done(): + case r := <-res: + if r != "" { + pid = r + isRunning = true + if expectedPid != "" && pid != expectedPid { + isRunning = false + } + } + + } + require.Equal(t, checkIsRunning, isRunning, "SSM agent was not found in container's process list") + return pid +} + +func killMockExecCommandAgent(t *testing.T, client *sdkClient.Client, containerId, pid string) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + create, err := client.ContainerExecCreate(ctx, containerId, types.ExecConfig{ + Detach: true, + Cmd: []string{testExecCommandAgentKillBin, "-pid=" + pid}, + }) + require.NoError(t, err) + + err = client.ContainerExecStart(ctx, create.ID, types.ExecStartCheck{ + Detach: true, + }) + require.NoError(t, err) +} + +func verifyTaskRunningStateChange(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning, + "Expected task to be RUNNING") +} diff --git a/agent/engine/engine_unix_integ_test.go b/agent/engine/engine_unix_integ_test.go new file mode 100644 index 00000000000..2d93cbe77e4 --- /dev/null +++ b/agent/engine/engine_unix_integ_test.go @@ -0,0 +1,1044 @@ +// +build !windows,integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + "github.com/aws/aws-sdk-go/aws" + sdkClient "github.com/docker/docker/client" + "github.com/stretchr/testify/require" +) + +const ( + testRegistryHost = "127.0.0.1:51670" + testBusyboxImage = testRegistryHost + "/busybox:latest" + testVolumeImage = testRegistryHost + "/amazon/amazon-ecs-volumes-test:latest" + testFluentdImage = testRegistryHost + "/amazon/fluentd:latest" +) + +var ( + endpoint = utils.DefaultIfBlank(os.Getenv(DockerEndpointEnvVariable), DockerDefaultEndpoint) + TestGPUInstanceType = []string{"p2", "p3", "g3", "g4dn", "p4d"} +) + +func createTestHealthCheckTask(arn string) *apitask.Task { + testTask := &apitask.Task{ + Arn: arn, + Family: "family", + Version: "1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{createTestContainer()}, + } + testTask.Containers[0].Image = testBusyboxImage + testTask.Containers[0].Name = "test-health-check" + testTask.Containers[0].HealthCheckType = "docker" + testTask.Containers[0].Command = []string{"sh", "-c", "sleep 300"} + testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{ + Config: aws.String(alwaysHealthyHealthCheckConfig), + } + return testTask +} + +// All Namespace Sharing Tests will rely on 3 containers +// container0 will be the container that starts an executable or creates a resource +// container1 and container2 will attempt to see this process/resource +// and quit with exit 0 for success and 1 for failure +func createNamespaceSharingTask(arn, pidMode, ipcMode, testImage string, theCommand []string) *apitask.Task { + testTask := &apitask.Task{ + Arn: arn, + Family: "family", + Version: "1", + PIDMode: pidMode, + IPCMode: ipcMode, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{ + &apicontainer.Container{ + Name: "container0", + Image: testImage, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 100, + Memory: 80, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + &apicontainer.Container{ + Name: "container1", + Image: testBusyboxImage, + Command: theCommand, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 100, + Memory: 80, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + &apicontainer.Container{ + Name: "container2", + Image: testBusyboxImage, + Command: theCommand, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 100, + Memory: 80, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + }, + }, + } + + // Setting a container dependency so the executable can be started or resource can be created + // before read is attempted by other containers + testTask.Containers[1].BuildContainerDependency(testTask.Containers[0].Name, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerCreated) + testTask.Containers[2].BuildContainerDependency(testTask.Containers[0].Name, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerCreated) + return testTask +} + +func createVolumeTask(scope, arn, volume string, autoprovision bool) (*apitask.Task, string, error) { + tmpDirectory, err := ioutil.TempDir("", "ecs_test") + if err != nil { + return nil, "", err + } + err = ioutil.WriteFile(filepath.Join(tmpDirectory, "volume-data"), []byte("volume"), 0666) + if err != nil { + return nil, "", err + } + + testTask := createTestTask(arn) + + volumeConfig := &taskresourcevolume.DockerVolumeConfig{ + Scope: scope, + Driver: "local", + DriverOpts: map[string]string{ + "device": tmpDirectory, + "o": "bind", + "type": "tmpfs", + }, + } + if scope == "shared" { + volumeConfig.Autoprovision = autoprovision + } + + testTask.Volumes = []apitask.TaskVolume{ + { + Type: "docker", + Name: volume, + Volume: volumeConfig, + }, + } + + testTask.Containers[0].Image = testVolumeImage + testTask.Containers[0].TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + testTask.Containers[0].MountPoints = []apicontainer.MountPoint{ + { + SourceVolume: volume, + ContainerPath: "/ecs", + }, + } + testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + testTask.Containers[0].Command = []string{"sh", "-c", "if [[ $(cat /ecs/volume-data) != \"volume\" ]]; then cat /ecs/volume-data; exit 1; fi; exit 0"} + return testTask, tmpDirectory, nil +} + +// TestStartStopUnpulledImage ensures that an unpulled image is successfully +// pulled, run, and stopped via docker. +func TestStartStopUnpulledImage(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + // Ensure this image isn't pulled by deleting it + removeImage(t, testRegistryImage) + + testTask := createTestTask("testStartUnpulled") + + go taskEngine.AddTask(testTask) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +// TestStartStopUnpulledImageDigest ensures that an unpulled image with +// specified digest is successfully pulled, run, and stopped via docker. +func TestStartStopUnpulledImageDigest(t *testing.T) { + imageDigest := "public.ecr.aws/amazonlinux/amazonlinux@sha256:1b6599b4846a765106350130125e2480f6c1cb7791df0ce3e59410362f311259" + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + // Ensure this image isn't pulled by deleting it + removeImage(t, imageDigest) + + testTask := createTestTask("testStartUnpulledDigest") + testTask.Containers[0].Image = imageDigest + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +// TestPortForward runs a container serving data on the randomly chosen port +// 24751 and verifies that when you do forward the port you can access it and if +// you don't forward the port you can't +func TestPortForward(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + testArn := "testPortForwardFail" + testTask := createTestTask(testArn) + port1 := getUnassignedPort() + testTask.Containers[0].Command = []string{fmt.Sprintf("-l=%d", port1), "-serve", serverContent} + + // Port not forwarded; verify we can't access it + go taskEngine.AddTask(testTask) + + err := verifyTaskIsRunning(stateChangeEvents, testTask) + require.NoError(t, err) + + time.Sleep(waitForDockerDuration) // wait for Docker + _, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", localhost, port1), dialTimeout) + + require.Error(t, err, "Did not expect to be able to dial %s:%d but didn't get error", localhost, port1) + + // Kill the existing container now to make the test run more quickly. + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + client, _ := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + err = client.ContainerKill(context.TODO(), cid, "SIGKILL") + require.NoError(t, err, "Could not kill container", err) + + verifyTaskIsStopped(stateChangeEvents, testTask) + + // Now forward it and make sure that works + testArn = "testPortForwardWorking" + testTask = createTestTask(testArn) + port2 := getUnassignedPort() + testTask.Containers[0].Command = []string{fmt.Sprintf("-l=%d", port2), "-serve", serverContent} + testTask.Containers[0].Ports = []apicontainer.PortBinding{{ContainerPort: port2, HostPort: port2}} + + taskEngine.AddTask(testTask) + + err = verifyTaskIsRunning(stateChangeEvents, testTask) + if err != nil { + t.Fatal(err) + } + + time.Sleep(waitForDockerDuration) // wait for Docker + conn, err := dialWithRetries("tcp", fmt.Sprintf("%s:%d", localhost, port2), 10, dialTimeout) + if err != nil { + t.Fatal("Error dialing simple container " + err.Error()) + } + + var response []byte + for i := 0; i < 10; i++ { + response, err = ioutil.ReadAll(conn) + if err != nil { + t.Error("Error reading response", err) + } + if len(response) > 0 { + break + } + // Retry for a non-blank response. The container in docker 1.7+ sometimes + // isn't up quickly enough and we get a blank response. It's still unclear + // to me if this is a docker bug or netkitten bug + t.Log("Retrying getting response from container; got nothing") + time.Sleep(100 * time.Millisecond) + } + if string(response) != serverContent { + t.Error("Got response: " + string(response) + " instead of " + serverContent) + } + + // Stop the existing container now + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + verifyTaskIsStopped(stateChangeEvents, testTask) +} + +// TestMultiplePortForwards tests that two links containers in the same task can +// both expose ports successfully +func TestMultiplePortForwards(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + // Forward it and make sure that works + testArn := "testMultiplePortForwards" + testTask := createTestTask(testArn) + port1 := getUnassignedPort() + port2 := getUnassignedPort() + testTask.Containers[0].Command = []string{fmt.Sprintf("-l=%d", port1), "-serve", serverContent + "1"} + testTask.Containers[0].Ports = []apicontainer.PortBinding{{ContainerPort: port1, HostPort: port1}} + testTask.Containers[0].Essential = false + testTask.Containers = append(testTask.Containers, createTestContainer()) + testTask.Containers[1].Name = "nc2" + testTask.Containers[1].Command = []string{fmt.Sprintf("-l=%d", port1), "-serve", serverContent + "2"} + testTask.Containers[1].Ports = []apicontainer.PortBinding{{ContainerPort: port1, HostPort: port2}} + + go taskEngine.AddTask(testTask) + + err := verifyTaskIsRunning(stateChangeEvents, testTask) + if err != nil { + t.Fatal(err) + } + + time.Sleep(waitForDockerDuration) // wait for Docker + conn, err := dialWithRetries("tcp", fmt.Sprintf("%s:%d", localhost, port1), 10, dialTimeout) + if err != nil { + t.Fatal("Error dialing simple container 1 " + err.Error()) + } + t.Log("Dialed first container") + response, _ := ioutil.ReadAll(conn) + if string(response) != serverContent+"1" { + t.Error("Got response: " + string(response) + " instead of" + serverContent + "1") + } + t.Log("Read first container") + conn, err = dialWithRetries("tcp", fmt.Sprintf("%s:%d", localhost, port2), 10, dialTimeout) + if err != nil { + t.Fatal("Error dialing simple container 2 " + err.Error()) + } + t.Log("Dialed second container") + response, _ = ioutil.ReadAll(conn) + if string(response) != serverContent+"2" { + t.Error("Got response: " + string(response) + " instead of" + serverContent + "2") + } + t.Log("Read second container") + + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + verifyTaskIsStopped(stateChangeEvents, testTask) +} + +// TestDynamicPortForward runs a container serving data on a port chosen by the +// docker deamon and verifies that the port is reported in the state-change +func TestDynamicPortForward(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + testArn := "testDynamicPortForward" + testTask := createTestTask(testArn) + port := getUnassignedPort() + testTask.Containers[0].Command = []string{fmt.Sprintf("-l=%d", port), "-serve", serverContent} + // No HostPort = docker should pick + testTask.Containers[0].Ports = []apicontainer.PortBinding{{ContainerPort: port}} + + go taskEngine.AddTask(testTask) + + event := <-stateChangeEvents + require.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, "Expected container to be RUNNING") + + portBindings := event.(api.ContainerStateChange).PortBindings + + verifyTaskRunningStateChange(t, taskEngine) + + if len(portBindings) != 1 { + t.Error("PortBindings was not set; should have been len 1", portBindings) + } + var bindingForcontainerPortOne uint16 + for _, binding := range portBindings { + if binding.ContainerPort == port { + bindingForcontainerPortOne = binding.HostPort + } + } + if bindingForcontainerPortOne == 0 { + t.Errorf("Could not find the port mapping for %d!", port) + } + + time.Sleep(waitForDockerDuration) // wait for Docker + conn, err := dialWithRetries("tcp", localhost+":"+strconv.Itoa(int(bindingForcontainerPortOne)), 10, dialTimeout) + if err != nil { + t.Fatal("Error dialing simple container " + err.Error()) + } + + response, _ := ioutil.ReadAll(conn) + if string(response) != serverContent { + t.Error("Got response: " + string(response) + " instead of " + serverContent) + } + + // Kill the existing container now + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + verifyTaskIsStopped(stateChangeEvents, testTask) +} + +func TestMultipleDynamicPortForward(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + testArn := "testDynamicPortForward2" + testTask := createTestTask(testArn) + port := getUnassignedPort() + testTask.Containers[0].Command = []string{fmt.Sprintf("-l=%d", port), "-serve", serverContent, `-loop`} + // No HostPort or 0 hostport; docker should pick two ports for us + testTask.Containers[0].Ports = []apicontainer.PortBinding{{ContainerPort: port}, {ContainerPort: port, HostPort: 0}} + + go taskEngine.AddTask(testTask) + + event := <-stateChangeEvents + require.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, "Expected container to be RUNNING") + + portBindings := event.(api.ContainerStateChange).PortBindings + + verifyTaskRunningStateChange(t, taskEngine) + + if len(portBindings) != 2 { + t.Error("Could not bind to two ports from one container port", portBindings) + } + var bindingForcontainerPortOne_1 uint16 + var bindingForcontainerPortOne_2 uint16 + for _, binding := range portBindings { + if binding.ContainerPort == port { + if bindingForcontainerPortOne_1 == 0 { + bindingForcontainerPortOne_1 = binding.HostPort + } else { + bindingForcontainerPortOne_2 = binding.HostPort + } + } + } + if bindingForcontainerPortOne_1 == 0 { + t.Errorf("Could not find the port mapping for %d!", port) + } + if bindingForcontainerPortOne_2 == 0 { + t.Errorf("Could not find the port mapping for %d!", port) + } + + time.Sleep(waitForDockerDuration) // wait for Docker + conn, err := dialWithRetries("tcp", localhost+":"+strconv.Itoa(int(bindingForcontainerPortOne_1)), 10, dialTimeout) + if err != nil { + t.Fatal("Error dialing simple container " + err.Error()) + } + + response, _ := ioutil.ReadAll(conn) + if string(response) != serverContent { + t.Error("Got response: " + string(response) + " instead of " + serverContent) + } + + conn, err = dialWithRetries("tcp", localhost+":"+strconv.Itoa(int(bindingForcontainerPortOne_2)), 10, dialTimeout) + if err != nil { + t.Fatal("Error dialing simple container " + err.Error()) + } + + response, _ = ioutil.ReadAll(conn) + if string(response) != serverContent { + t.Error("Got response: " + string(response) + " instead of " + serverContent) + } + + // Kill the existing container now + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + verifyTaskIsStopped(stateChangeEvents, testTask) +} + +// TestLinking ensures that container linking does allow networking to go +// through to a linked container. this test specifically starts a server that +// prints "hello linker" and then links a container that proxies that data to +// a publicly exposed port, where the tests reads it +func TestLinking(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + testArn := "TestLinking" + testTask := createTestTask(testArn) + testTask.Containers = append(testTask.Containers, createTestContainer()) + testTask.Containers[0].Command = []string{"-l=80", "-serve", "hello linker"} + testTask.Containers[0].Name = "linkee" + port := getUnassignedPort() + testTask.Containers[1].Command = []string{fmt.Sprintf("-l=%d", port), "linkee_alias:80"} + testTask.Containers[1].Links = []string{"linkee:linkee_alias"} + testTask.Containers[1].Ports = []apicontainer.PortBinding{{ContainerPort: port, HostPort: port}} + + stateChangeEvents := taskEngine.StateChangeEvents() + + go taskEngine.AddTask(testTask) + + err := verifyTaskIsRunning(stateChangeEvents, testTask) + if err != nil { + t.Fatal(err) + } + + time.Sleep(waitForDockerDuration) + + var response []byte + for i := 0; i < 10; i++ { + conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", localhost, port), dialTimeout) + if err != nil { + t.Log("Error dialing simple container" + err.Error()) + } + response, err = ioutil.ReadAll(conn) + if err != nil { + t.Error("Error reading response", err) + } + if len(response) > 0 { + break + } + // Retry for a non-blank response. The container in docker 1.7+ sometimes + // isn't up quickly enough and we get a blank response. It's still unclear + // to me if this is a docker bug or netkitten bug + t.Log("Retrying getting response from container; got nothing") + time.Sleep(500 * time.Millisecond) + } + if string(response) != "hello linker" { + t.Error("Got response: " + string(response) + " instead of 'hello linker'") + } + + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + verifyTaskIsStopped(stateChangeEvents, testTask) +} + +func TestVolumesFromRO(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + testTask := createTestTask("testVolumeROContainer") + testTask.Containers[0].Image = testVolumeImage + for i := 0; i < 3; i++ { + cont := createTestContainer() + cont.Name = "test" + strconv.Itoa(i) + cont.Image = testVolumeImage + cont.Essential = i > 0 + testTask.Containers = append(testTask.Containers, cont) + } + testTask.Containers[1].VolumesFrom = []apicontainer.VolumeFrom{{SourceContainer: testTask.Containers[0].Name, ReadOnly: true}} + testTask.Containers[1].Command = []string{"touch /data/readonly-fs || exit 42"} + // make all the three containers non-essential to make sure all of the + // container can be transitioned to running even one of them finished first + testTask.Containers[1].Essential = false + testTask.Containers[2].VolumesFrom = []apicontainer.VolumeFrom{{SourceContainer: testTask.Containers[0].Name}} + testTask.Containers[2].Command = []string{"touch /data/notreadonly-fs-1 || exit 42"} + testTask.Containers[2].Essential = false + testTask.Containers[3].VolumesFrom = []apicontainer.VolumeFrom{{SourceContainer: testTask.Containers[0].Name, ReadOnly: false}} + testTask.Containers[3].Command = []string{"touch /data/notreadonly-fs-2 || exit 42"} + testTask.Containers[3].Essential = false + + go taskEngine.AddTask(testTask) + + verifyTaskIsRunning(stateChangeEvents, testTask) + taskEngine.(*DockerTaskEngine).stopContainer(testTask, testTask.Containers[0]) + + verifyTaskIsStopped(stateChangeEvents, testTask) + + if testTask.Containers[1].GetKnownExitCode() == nil || *testTask.Containers[1].GetKnownExitCode() != 42 { + t.Error("Didn't exit due to failure to touch ro fs as expected: ", testTask.Containers[1].GetKnownExitCode()) + } + if testTask.Containers[2].GetKnownExitCode() == nil || *testTask.Containers[2].GetKnownExitCode() != 0 { + t.Error("Couldn't touch with default of rw") + } + if testTask.Containers[3].GetKnownExitCode() == nil || *testTask.Containers[3].GetKnownExitCode() != 0 { + t.Error("Couldn't touch with explicit rw") + } +} + +func createTestHostVolumeMountTask(tmpPath string) *apitask.Task { + testTask := createTestTask("testHostVolumeMount") + testTask.Volumes = []apitask.TaskVolume{{Name: "test-tmp", Volume: &taskresourcevolume.FSHostVolume{FSSourcePath: tmpPath}}} + testTask.Containers[0].Image = testVolumeImage + testTask.Containers[0].MountPoints = []apicontainer.MountPoint{{ContainerPath: "/host/tmp", SourceVolume: "test-tmp"}} + testTask.Containers[0].Command = []string{`echo -n "hi" > /host/tmp/hello-from-container; if [[ "$(cat /host/tmp/test-file)" != "test-data" ]]; then exit 4; fi; exit 42`} + return testTask +} + +// This integ test is meant to validate the docker assumptions related to +// https://github.com/aws/amazon-ecs-agent/issues/261 +// Namely, this test verifies that Docker does emit a 'die' event after an OOM +// event if the init dies. +// Note: Your kernel must support swap limits in order for this test to run. +// See https://github.com/docker/docker/pull/4251 about enabling swap limit +// support, or set MY_KERNEL_DOES_NOT_SUPPORT_SWAP_LIMIT to non-empty to skip +// this test. +func TestInitOOMEvent(t *testing.T) { + if os.Getenv("MY_KERNEL_DOES_NOT_SUPPORT_SWAP_LIMIT") != "" { + t.Skip("Skipped because MY_KERNEL_DOES_NOT_SUPPORT_SWAP_LIMIT") + } + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + testTask := createTestTask("oomtest") + testTask.Containers[0].Memory = 20 + testTask.Containers[0].Image = testBusyboxImage + testTask.Containers[0].Command = []string{"sh", "-c", `x="a"; while true; do x=$x$x$x; done`} + // should cause sh to get oomkilled as pid 1 + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + event := <-stateChangeEvents + require.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerStopped, "Expected container to be STOPPED") + + // hold on to the container stopped event, will need to check exit code + contEvent := event.(api.ContainerStateChange) + + verifyTaskStoppedStateChange(t, taskEngine) + + if contEvent.ExitCode == nil { + t.Error("Expected exitcode to be set") + } else if *contEvent.ExitCode != 137 { + t.Errorf("Expected exitcode to be 137, not %v", *contEvent.ExitCode) + } + + dockerVersion, err := taskEngine.Version() + if err != nil { + t.Fatal(err) + } + if strings.Contains(dockerVersion, " 1.9.") { + // Skip the final check for some versions of docker + t.Logf("Docker version is 1.9.x (%s); not checking OOM reason", dockerVersion) + return + } + if !strings.HasPrefix(contEvent.Reason, dockerapi.OutOfMemoryError{}.ErrorName()) { + t.Errorf("Expected reason to have OOM error, was: %v", contEvent.Reason) + } +} + +// This integ test exercises the Docker "kill" facility, which exists to send +// signals to PID 1 inside a container. Starting with Docker 1.7, a `kill` +// event was emitted by the Docker daemon on any `kill` invocation. +// Signals used in this test: +// SIGTERM - sent by Docker "stop" prior to SIGKILL (9) +// SIGUSR1 - used for the test as an arbitrary signal +func TestSignalEvent(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + testArn := "signaltest" + testTask := createTestTask(testArn) + testTask.Containers[0].Image = testBusyboxImage + testTask.Containers[0].Command = []string{ + "sh", + "-c", + fmt.Sprintf(`trap "exit 42" %d; trap "echo signal!" %d; while true; do sleep 1; done`, int(syscall.SIGTERM), int(syscall.SIGUSR1)), + } + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + // Signal the container now + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + client, _ := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + err := client.ContainerKill(context.TODO(), cid, "SIGUSR1") + require.NoError(t, err, "Could not signal container", err) + + // Verify the container has not stopped + time.Sleep(2 * time.Second) +check_events: + for { + select { + case event := <-stateChangeEvents: + if event.GetEventType() == statechange.ContainerEvent { + contEvent := event.(api.ContainerStateChange) + if contEvent.TaskArn != testTask.Arn { + continue + } + t.Fatalf("Expected no events; got " + contEvent.Status.String()) + } + default: + break check_events + } + } + + // Stop the container now + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) + + if testTask.Containers[0].GetKnownExitCode() == nil || *testTask.Containers[0].GetKnownExitCode() != 42 { + t.Error("Wrong exit code; file probably wasn't present") + } +} + +// TestDockerStopTimeout tests the container was killed after ECS_CONTAINER_STOP_TIMEOUT +func TestDockerStopTimeout(t *testing.T) { + os.Setenv("ECS_CONTAINER_STOP_TIMEOUT", testDockerStopTimeout.String()) + defer os.Unsetenv("ECS_CONTAINER_STOP_TIMEOUT") + cfg := defaultTestConfigIntegTest() + + taskEngine, _, _ := setup(cfg, nil, t) + + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + + if dockerTaskEngine.cfg.DockerStopTimeout != testDockerStopTimeout { + t.Errorf("Expect the docker stop timeout read from environment variable when ECS_CONTAINER_STOP_TIMEOUT is set, %v", dockerTaskEngine.cfg.DockerStopTimeout) + } + testTask := createTestTask("TestDockerStopTimeout") + testTask.Containers[0].Command = []string{"sh", "-c", "trap 'echo hello' SIGTERM; while true; do echo `date +%T`; sleep 1s; done;"} + testTask.Containers[0].Image = testBusyboxImage + testTask.Containers[0].Name = "test-docker-timeout" + + go dockerTaskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + startTime := ttime.Now() + dockerTaskEngine.stopContainer(testTask, testTask.Containers[0]) + + verifyContainerStoppedStateChange(t, taskEngine) + + if ttime.Since(startTime) < testDockerStopTimeout { + t.Errorf("Container stopped before the timeout: %v", ttime.Since(startTime)) + } + if ttime.Since(startTime) > testDockerStopTimeout+1*time.Second { + t.Errorf("Container should have stopped eariler, but stopped after %v", ttime.Since(startTime)) + } +} + +func TestStartStopWithSecurityOptionNoNewPrivileges(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + testArn := "testSecurityOptionNoNewPrivileges" + testTask := createTestTask(testArn) + testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{HostConfig: aws.String(`{"SecurityOpt":["no-new-privileges"]}`)} + + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + // Kill the existing container now + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +func TestTaskLevelVolume(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + stateChangeEvents := taskEngine.StateChangeEvents() + + testTask, tmpDirectory, err := createVolumeTask("task", "TestTaskLevelVolume", "TestTaskLevelVolume", true) + defer os.Remove(tmpDirectory) + require.NoError(t, err, "creating test task failed") + + go taskEngine.AddTask(testTask) + + verifyTaskIsRunning(stateChangeEvents, testTask) + verifyTaskIsStopped(stateChangeEvents, testTask) + require.Equal(t, *testTask.Containers[0].GetKnownExitCode(), 0) + require.NotEqual(t, testTask.ResourcesMapUnsafe["dockerVolume"][0].(*taskresourcevolume.VolumeResource).VolumeConfig.Source(), "TestTaskLevelVolume", "task volume name is the same as specified in task definition") + + client := taskEngine.(*DockerTaskEngine).client + client.RemoveVolume(context.TODO(), "TestTaskLevelVolume", 5*time.Second) +} + +func TestSwapConfigurationTask(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + testArn := "TestSwapMemory" + testTask := createTestTask(testArn) + testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{HostConfig: aws.String(`{"MemorySwap":314572800, "MemorySwappiness":90}`)} + + go taskEngine.AddTask(testTask) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + state, _ := client.ContainerInspect(ctx, cid) + require.EqualValues(t, 314572800, state.HostConfig.MemorySwap) + require.EqualValues(t, 90, *state.HostConfig.MemorySwappiness) + + // Kill the existing container now + taskUpdate := createTestTask(testArn) + taskUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(taskUpdate) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +func TestPerContainerStopTimeout(t *testing.T) { + // set the global stop timemout, but this should be ignored since the per container value is set + globalStopContainerTimeout := 1000 * time.Second + os.Setenv("ECS_CONTAINER_STOP_TIMEOUT", globalStopContainerTimeout.String()) + defer os.Unsetenv("ECS_CONTAINER_STOP_TIMEOUT") + cfg := defaultTestConfigIntegTest() + + taskEngine, _, _ := setup(cfg, nil, t) + + dockerTaskEngine := taskEngine.(*DockerTaskEngine) + + if dockerTaskEngine.cfg.DockerStopTimeout != globalStopContainerTimeout { + t.Errorf("Expect ECS_CONTAINER_STOP_TIMEOUT to be set to , %v", dockerTaskEngine.cfg.DockerStopTimeout) + } + + testTask := createTestTask("TestDockerStopTimeout") + testTask.Containers[0].Command = []string{"sh", "-c", "trap 'echo hello' SIGTERM; while true; do echo `date +%T`; sleep 1s; done;"} + testTask.Containers[0].Image = testBusyboxImage + testTask.Containers[0].Name = "test-docker-timeout" + testTask.Containers[0].StopTimeout = uint(testDockerStopTimeout.Seconds()) + + go dockerTaskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + startTime := ttime.Now() + dockerTaskEngine.stopContainer(testTask, testTask.Containers[0]) + + verifyContainerStoppedStateChange(t, taskEngine) + + if ttime.Since(startTime) < testDockerStopTimeout { + t.Errorf("Container stopped before the timeout: %v", ttime.Since(startTime)) + } + if ttime.Since(startTime) > testDockerStopTimeout+1*time.Second { + t.Errorf("Container should have stopped eariler, but stopped after %v", ttime.Since(startTime)) + } +} + +func TestMemoryOverCommit(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + memoryReservation := 50 + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + testArn := "TestMemoryOverCommit" + testTask := createTestTask(testArn) + + testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{HostConfig: aws.String(`{ + "MemoryReservation": 52428800 }`)} + + go taskEngine.AddTask(testTask) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + state, _ := client.ContainerInspect(ctx, cid) + + require.EqualValues(t, memoryReservation*1024*1024, state.HostConfig.MemoryReservation) + + // Kill the existing container now + testUpdate := createTestTask(testArn) + testUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(testUpdate) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) +} + +// TestNetworkModeHost tests the container network can be configured +// as bridge mode in task definition +func TestNetworkModeHost(t *testing.T) { + testNetworkMode(t, "bridge") +} + +// TestNetworkModeBridge tests the container network can be configured +// as host mode in task definition +func TestNetworkModeBridge(t *testing.T) { + testNetworkMode(t, "host") +} + +func TestFluentdTag(t *testing.T) { + // Skipping the test for arm as they do not have official support for Arm images + if runtime.GOARCH == "arm64" { + t.Skip("Skipping test, unsupported image for arm64") + } + + logdir := os.TempDir() + logdir = path.Join(logdir, "ftslog") + defer os.RemoveAll(logdir) + + os.Setenv("ECS_AVAILABLE_LOGGING_DRIVERS", `["fluentd"]`) + defer os.Unsetenv("ECS_AVAILABLE_LOGGING_DRIVERS") + + taskEngine, _, _ := setupWithDefaultConfig(t) + + client, err := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), + sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + require.NoError(t, err, "Creating go docker client failed") + + // start Fluentd driver task + testTaskFleuntdDriver := createTestTask("testFleuntdDriver") + testTaskFleuntdDriver.Volumes = []apitask.TaskVolume{{Name: "logs", Volume: &taskresourcevolume.FSHostVolume{FSSourcePath: "/tmp"}}} + testTaskFleuntdDriver.Containers[0].Image = testFluentdImage + testTaskFleuntdDriver.Containers[0].MountPoints = []apicontainer.MountPoint{{ContainerPath: "/fluentd/log", + SourceVolume: "logs"}} + testTaskFleuntdDriver.Containers[0].Ports = []apicontainer.PortBinding{{ContainerPort: 24224, HostPort: 24224}} + go taskEngine.AddTask(testTaskFleuntdDriver) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + // Sleep before starting the test task so that fluentd driver is setup + time.Sleep(30 * time.Second) + + // start fluentd log task + testTaskFluentdLogTag := createTestTask("testFleuntdTag") + testTaskFluentdLogTag.Containers[0].Command = []string{"/bin/echo", "hello, this is fluentd integration test"} + testTaskFluentdLogTag.Containers[0].Image = testBusyboxImage + testTaskFluentdLogTag.Containers[0].DockerConfig = apicontainer.DockerConfig{ + HostConfig: aws.String(`{"LogConfig": { + "Type": "fluentd", + "Config": { + "fluentd-address":"0.0.0.0:24224", + "tag":"ecs.{{.Name}}.{{.FullID}}" + } + }}`)} + + go taskEngine.AddTask(testTaskFluentdLogTag) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskRunningStateChange(t, taskEngine) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTaskFluentdLogTag.Arn) + cid := containerMap[testTaskFluentdLogTag.Containers[0].Name].DockerID + state, _ := client.ContainerInspect(ctx, cid) + + // Kill the fluentd driver task + testUpdate := createTestTask("testFleuntdDriver") + testUpdate.SetDesiredStatus(apitaskstatus.TaskStopped) + go taskEngine.AddTask(testUpdate) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskStoppedStateChange(t, taskEngine) + + logTag := fmt.Sprintf("ecs.%v.%v", strings.Replace(state.Name, + "/", "", 1), cid) + + // Verify the log file existed and also the content contains the expected format + err = utils.SearchStrInDir(logdir, "ecsfts", "hello, this is fluentd integration test") + require.NoError(t, err, "failed to find the content in the fluent log file") + + err = utils.SearchStrInDir(logdir, "ecsfts", logTag) + require.NoError(t, err, "failed to find the log tag specified in the task definition") +} + +func TestDockerExecAPI(t *testing.T) { + testTimeout := 1 * time.Minute + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testDockerExec" + testTask := createTestTask(taskArn) + + A := createTestContainerWithImageAndName(baseImageForOS, "A") + + A.EntryPoint = &entryPointForOS + A.Command = []string{"sleep 30"} + A.Essential = true + + testTask.Containers = []*apicontainer.Container{ + A, + } + execConfig := types.ExecConfig{ + User: "0", + Detach: true, + Cmd: []string{"ls"}, + } + go taskEngine.AddTask(testTask) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + finished := make(chan interface{}) + go func() { + // Both containers should start + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + dockerID := containerMap[testTask.Containers[0].Name].DockerID + + //Create Exec object on the host + execContainerOut, err := taskEngine.(*DockerTaskEngine).client.CreateContainerExec(ctx, dockerID, execConfig, dockerclient.ContainerExecCreateTimeout) + require.NoError(t, err) + require.NotNil(t, execContainerOut) + + //Start the above Exec process on the host + err1 := taskEngine.(*DockerTaskEngine).client.StartContainerExec(ctx, execContainerOut.ID, dockerclient.ContainerExecStartTimeout) + require.NoError(t, err1) + + //Inspect the above Exec process on the host to check if the exit code is 0 which indicates successful run of the command + execContInspectOut, err := taskEngine.(*DockerTaskEngine).client.InspectContainerExec(ctx, execContainerOut.ID, dockerclient.ContainerExecInspectTimeout) + require.NoError(t, err) + require.Equal(t, dockerID, execContInspectOut.ContainerID) + require.Equal(t, 0, execContInspectOut.ExitCode) + + // Task should stop + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, testTimeout) +} diff --git a/agent/engine/engine_windows_integ_test.go b/agent/engine/engine_windows_integ_test.go new file mode 100644 index 00000000000..9091f308d46 --- /dev/null +++ b/agent/engine/engine_windows_integ_test.go @@ -0,0 +1,508 @@ +// +build windows,integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + "github.com/aws/amazon-ecs-agent/agent/ec2" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + s3factory "github.com/aws/amazon-ecs-agent/agent/s3/factory" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/aws-sdk-go/aws" + sdkClient "github.com/docker/docker/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + dockerEndpoint = "npipe:////./pipe/docker_engine" + testVolumeImage = "amazon/amazon-ecs-volumes-test:make" + testRegistryImage = "amazon/amazon-ecs-netkitten:make" + testBaseImage = "amazon-ecs-ftest-windows-base:make" + dockerVolumeDirectoryFormat = "c:\\ProgramData\\docker\\volumes\\%s\\_data" +) + +var endpoint = utils.DefaultIfBlank(os.Getenv(DockerEndpointEnvVariable), dockerEndpoint) + +// TODO implement this +func isDockerRunning() bool { return true } + +func createTestContainer() *apicontainer.Container { + return &apicontainer.Container{ + Name: "windows", + Image: testBaseImage, + Essential: true, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: 512, + Memory: 256, + } +} + +// getLongRunningCommand returns the command that keeps the container running for the container +// that uses the default integ test image (amazon-ecs-ftest-windows-base:make for windows) +func getLongRunningCommand() []string { + return []string{"ping", "-t", "localhost"} +} + +func createTestHostVolumeMountTask(tmpPath string) *apitask.Task { + testTask := createTestTask("testHostVolumeMount") + testTask.Volumes = []apitask.TaskVolume{{Name: "test-tmp", Volume: &taskresourcevolume.FSHostVolume{FSSourcePath: tmpPath}}} + testTask.Containers[0].Image = testVolumeImage + testTask.Containers[0].MountPoints = []apicontainer.MountPoint{{ContainerPath: "C:/host/tmp", SourceVolume: "test-tmp"}} + testTask.Containers[0].Command = []string{ + `echo "hi" | Out-File -FilePath C:\host\tmp\hello-from-container -Encoding ascii ; $exists = Test-Path C:\host\tmp\test-file ; if (!$exists) { exit 2 } ;$contents = [IO.File]::ReadAllText("C:\host\tmp\test-file") ; if (!$contents -match "test-data") { $contents ; exit 4 } ; exit 42`, + } + return testTask +} + +func createTestLocalVolumeMountTask() *apitask.Task { + testTask := createTestTask("testLocalHostVolumeMount") + testTask.Containers[0].Image = testVolumeImage + testTask.Containers[0].Command = []string{`Write-Output "empty-data-volume" | Out-File -FilePath C:\host\tmp\hello-from-container -Encoding ascii`} + testTask.Containers[0].MountPoints = []apicontainer.MountPoint{{ContainerPath: "C:\\host\\tmp", SourceVolume: "test-tmp"}} + testTask.Volumes = []apitask.TaskVolume{{Name: "test-tmp", Volume: &taskresourcevolume.LocalDockerVolume{}}} + testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + testTask.Containers[0].TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + return testTask +} + +func createTestHealthCheckTask(arn string) *apitask.Task { + testTask := &apitask.Task{ + Arn: arn, + Family: "family", + Version: "1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{createTestContainer()}, + } + testTask.Containers[0].Image = testBaseImage + testTask.Containers[0].Name = "test-health-check" + testTask.Containers[0].HealthCheckType = "docker" + testTask.Containers[0].Command = []string{"powershell", "-command", "Start-Sleep -s 300"} + testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{ + Config: aws.String(alwaysHealthyHealthCheckConfig), + } + return testTask +} + +func createVolumeTask(scope, arn, volume string, autoprovision bool) (*apitask.Task, string, error) { + testTask := createTestTask(arn) + + volumeConfig := &taskresourcevolume.DockerVolumeConfig{ + Scope: scope, + Driver: "local", + } + if scope == "shared" { + volumeConfig.Autoprovision = autoprovision + } + + testTask.Volumes = []apitask.TaskVolume{ + { + Type: "docker", + Name: volume, + Volume: volumeConfig, + }, + } + + // Construct the volume path, windows doesn't support create a volume from local directory + err := os.MkdirAll(fmt.Sprintf(dockerVolumeDirectoryFormat, volume), 0666) + if err != nil { + return nil, "", err + } + volumePath := filepath.Join(fmt.Sprintf(dockerVolumeDirectoryFormat, volume), "volumecontent") + err = ioutil.WriteFile(volumePath, []byte("volume"), 0666) + if err != nil { + return nil, "", err + } + + testTask.Containers[0].Image = testVolumeImage + testTask.Containers[0].TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + testTask.Containers[0].MountPoints = []apicontainer.MountPoint{ + { + SourceVolume: volume, + ContainerPath: "c:\\ecs", + }, + } + testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + testTask.Containers[0].Command = []string{"$output = (cat c:\\ecs\\volumecontent); if ( $output -eq \"volume\" ) { Exit 0 } else { Exit 1 }"} + return testTask, volumePath, nil +} + +// TODO Modify the container ip to localhost after the AMI has the required feature +// https://github.com/docker/for-win/issues/204#issuecomment-352899657 + +func getContainerIP(client *sdkClient.Client, id string) (string, error) { + dockerContainer, err := client.ContainerInspect(context.TODO(), id) + if err != nil { + return "", err + } + + networks := dockerContainer.NetworkSettings.Networks + if len(networks) != 1 { + return "", fmt.Errorf("getContainerIP: inspect return multiple networks of container") + } + for _, v := range networks { + return v.IPAddress, nil + } + return "", nil +} + +func TestLocalHostVolumeMount(t *testing.T) { + cfg := defaultTestConfigIntegTest() + taskEngine, done, _ := setup(cfg, nil, t) + defer done() + + // creates a task with local volume + testTask := createTestLocalVolumeMountTask() + stateChangeEvents := taskEngine.StateChangeEvents() + go taskEngine.AddTask(testTask) + + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskIsStopped(stateChangeEvents, testTask) + + assert.NotNil(t, testTask.Containers[0].GetKnownExitCode(), "No exit code found") + assert.Equal(t, 0, *testTask.Containers[0].GetKnownExitCode(), "Wrong exit code") + + data, err := ioutil.ReadFile(filepath.Join("c:\\ProgramData\\docker\\volumes", testTask.Volumes[0].Volume.Source(), "_data", "hello-from-container")) + assert.Nil(t, err, "Unexpected error") + assert.Equal(t, "empty-data-volume", strings.TrimSpace(string(data)), "Incorrect file contents") +} + +// TestStartStopUnpulledImage ensures that an unpulled image is successfully +// pulled, run, and stopped via docker. +func TestStartStopUnpulledImage(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + // Ensure this image isn't pulled by deleting it + baseImg := os.Getenv("BASE_IMAGE_NAME") + removeImage(t, baseImg) + + testTask := createTestTask("testStartUnpulled") + testTask.Containers[0].Image = baseImg + + stateChangeEvents := taskEngine.StateChangeEvents() + + go taskEngine.AddTask(testTask) + + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, "Expected container to be RUNNING") + + event = <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning, "Expected task to be RUNNING") + + event = <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerStopped, "Expected container to be STOPPED") + + event = <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskStopped, "Expected task to be STOPPED") +} + +// TestStartStopUnpulledImageDigest ensures that an unpulled image with +// specified digest is successfully pulled, run, and stopped via docker. +func TestStartStopUnpulledImageDigest(t *testing.T) { + baseImgWithDigest := os.Getenv("BASE_IMAGE_NAME_WITH_DIGEST") + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + // Ensure this image isn't pulled by deleting it + removeImage(t, baseImgWithDigest) + + testTask := createTestTask("testStartUnpulledDigest") + testTask.Containers[0].Image = baseImgWithDigest + + stateChangeEvents := taskEngine.StateChangeEvents() + + go taskEngine.AddTask(testTask) + + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, "Expected container to be RUNNING") + + event = <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning, "Expected task to be RUNNING") + + event = <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerStopped, "Expected container to be STOPPED") + + event = <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskStopped, "Expected task to be STOPPED") +} + +func TestVolumesFrom(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + testTask := createTestTask("testVolumeContainer") + testTask.Containers[0].Image = testVolumeImage + testTask.Containers = append(testTask.Containers, createTestContainer()) + testTask.Containers[1].Name = "test2" + testTask.Containers[1].Image = testVolumeImage + testTask.Containers[1].VolumesFrom = []apicontainer.VolumeFrom{{SourceContainer: testTask.Containers[0].Name}} + testTask.Containers[1].Command = []string{"-c", "$output= (cat /data/test-file); if ($output -eq \"test\") { Exit 42 } else { Exit 1 }"} + + go taskEngine.AddTask(testTask) + + err := verifyTaskIsRunning(stateChangeEvents, testTask) + require.NoError(t, err) + verifyTaskIsStopped(stateChangeEvents, testTask) + assert.Equal(t, *testTask.Containers[1].GetKnownExitCode(), 42) +} + +func TestVolumesFromRO(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + testTask := createTestTask("testVolumeROContainer") + testTask.Containers[0].Image = testVolumeImage + for i := 0; i < 3; i++ { + cont := createTestContainer() + cont.Name = "test" + strconv.Itoa(i) + cont.Image = testVolumeImage + cont.Essential = i > 0 + testTask.Containers = append(testTask.Containers, cont) + } + testTask.Containers[1].VolumesFrom = []apicontainer.VolumeFrom{{SourceContainer: testTask.Containers[0].Name, ReadOnly: true}} + testTask.Containers[1].Command = []string{"New-Item c:/volume/readonly-fs; if ($?) { Exit 0 } else { Exit 42 }"} + testTask.Containers[1].Essential = false + testTask.Containers[2].VolumesFrom = []apicontainer.VolumeFrom{{SourceContainer: testTask.Containers[0].Name}} + testTask.Containers[2].Command = []string{"New-Item c:/volume/readonly-fs-2; if ($?) { Exit 0 } else { Exit 42 }"} + testTask.Containers[2].Essential = false + testTask.Containers[3].VolumesFrom = []apicontainer.VolumeFrom{{SourceContainer: testTask.Containers[0].Name, ReadOnly: false}} + testTask.Containers[3].Command = []string{"New-Item c:/volume/readonly-fs-3; if ($?) { Exit 0 } else { Exit 42 }"} + testTask.Containers[3].Essential = false + + go taskEngine.AddTask(testTask) + verifyTaskIsRunning(stateChangeEvents, testTask) + // Make sure all the three test container stopped first + verifyContainerStoppedStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + // Stop the task by stopping the essential container + taskEngine.(*DockerTaskEngine).stopContainer(testTask, testTask.Containers[0]) + verifyTaskIsStopped(stateChangeEvents, testTask) + + assert.NotEqual(t, *testTask.Containers[1].GetKnownExitCode(), 0, "didn't exit due to failure to touch ro fs as expected: ", *testTask.Containers[1].GetKnownExitCode()) + assert.Equal(t, *testTask.Containers[2].GetKnownExitCode(), 0, "couldn't touch with default of rw") + assert.Equal(t, *testTask.Containers[3].GetKnownExitCode(), 0, "couldn't touch with explicit rw") +} + +func TestTaskLevelVolume(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + stateChangeEvents := taskEngine.StateChangeEvents() + + testTask, tmpDirectory, err := createVolumeTask("task", "TestTaskLevelVolume", "TestTaskLevelVolume", true) + defer os.Remove(tmpDirectory) + require.NoError(t, err, "creating test task failed") + + // modify the command of the container so that the container will write to the volume + testTask.Containers[0].Command = []string{"Write-Output \"volume\" | Out-File -FilePath C:\\ecs\\volumecontent -Encoding ascii"} + + go taskEngine.AddTask(testTask) + + verifyTaskIsRunning(stateChangeEvents, testTask) + verifyTaskIsStopped(stateChangeEvents, testTask) + assert.NotEqual(t, testTask.ResourcesMapUnsafe["dockerVolume"][0].(*taskresourcevolume.VolumeResource).VolumeConfig.Source(), "TestTaskLevelVolume", "task volume name is the same as specified in task definition") + + // Find the volume mount path + var sourceVolume string + for _, vol := range testTask.Containers[0].MountPoints { + if vol.ContainerPath == "c:\\ecs" { + sourceVolume = vol.SourceVolume + } + } + assert.NotEmpty(t, sourceVolume) + + volumeFile := filepath.Join(fmt.Sprintf(dockerVolumeDirectoryFormat, sourceVolume), "volumecontent") + data, err := ioutil.ReadFile(volumeFile) + assert.NoError(t, err) + assert.Equal(t, string(data), "volume") +} + +func TestGMSATaskFile(t *testing.T) { + taskEngine, done, _ := setupGMSA(defaultTestConfigIntegTestGMSA(), nil, t) + defer done() + stateChangeEvents := taskEngine.StateChangeEvents() + + // Setup test gmsa file + envProgramData := "ProgramData" + dockerCredentialSpecDataDir := "docker/credentialspecs" + programDataDir := os.Getenv(envProgramData) + if programDataDir == "" { + programDataDir = "C:/ProgramData" + } + testFileName := "test-gmsa.json" + testCredSpecFilePath := filepath.Join(programDataDir, dockerCredentialSpecDataDir, testFileName) + testCredSpecData := []byte(`{ + "CmsPlugins": [ + "ActiveDirectory" + ], + "DomainJoinConfig": { + "Sid": "S-1-5-21-975084816-3050680612-2826754290", + "MachineAccountName": "gmsa-acct-test", + "Guid": "92a07e28-bd9f-4bf3-b1f7-0894815a5257", + "DnsTreeName": "gmsa.test.com", + "DnsName": "gmsa.test.com", + "NetBiosName": "gmsa" + }, + "ActiveDirectoryConfig": { + "GroupManagedServiceAccounts": [ + { + "Name": "gmsa-acct-test", + "Scope": "gmsa.test.com" + } + ] + } +}`) + + err := ioutil.WriteFile(testCredSpecFilePath, testCredSpecData, 0755) + require.NoError(t, err) + + testContainer := createTestContainer() + testContainer.Name = "testGMSATaskFile" + + hostConfig := "{\"SecurityOpt\": [\"credentialspec:file://test-gmsa.json\"]}" + testContainer.DockerConfig.HostConfig = &hostConfig + + testTask := &apitask.Task{ + Arn: "testGMSAFileTaskARN", + Family: "family", + Version: "1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{testContainer}, + } + testTask.Containers[0].TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + testTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + testTask.Containers[0].Command = getLongRunningCommand() + + go taskEngine.AddTask(testTask) + + err = verifyTaskIsRunning(stateChangeEvents, testTask) + require.NoError(t, err) + + client, _ := sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + containerMap, _ := taskEngine.(*DockerTaskEngine).state.ContainerMapByArn(testTask.Arn) + cid := containerMap[testTask.Containers[0].Name].DockerID + + testCredentialSpecOption := "credentialspec=file://test-gmsa.json" + err = verifyContainerCredentialSpec(client, cid, testCredentialSpecOption) + assert.NoError(t, err) + + // Kill the existing container now + err = client.ContainerKill(context.TODO(), cid, "SIGKILL") + assert.NoError(t, err, "Could not kill container") + + verifyTaskIsStopped(stateChangeEvents, testTask) + + // Cleanup the test file + err = os.Remove(testCredSpecFilePath) + assert.NoError(t, err) +} + +func defaultTestConfigIntegTestGMSA() *config.Config { + cfg, _ := config.NewConfig(ec2.NewBlackholeEC2MetadataClient()) + cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + cfg.ImagePullBehavior = config.ImagePullPreferCachedBehavior + + cfg.GMSACapable = true + cfg.AWSRegion = "us-west-2" + + return cfg +} + +func setupGMSA(cfg *config.Config, state dockerstate.TaskEngineState, t *testing.T) (TaskEngine, func(), credentials.Manager) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + if os.Getenv("ECS_SKIP_ENGINE_INTEG_TEST") != "" { + t.Skip("ECS_SKIP_ENGINE_INTEG_TEST") + } + if !isDockerRunning() { + t.Skip("Docker not running") + } + + sdkClientFactory := sdkclientfactory.NewFactory(ctx, dockerEndpoint) + dockerClient, err := dockerapi.NewDockerGoClient(sdkClientFactory, cfg, context.Background()) + if err != nil { + t.Fatalf("Error creating Docker client: %v", err) + } + credentialsManager := credentials.NewManager() + if state == nil { + state = dockerstate.NewTaskEngineState() + } + imageManager := NewImageManager(cfg, dockerClient, state) + imageManager.SetDataClient(data.NewNoopClient()) + metadataManager := containermetadata.NewManager(dockerClient, cfg) + + resourceFields := &taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmfactory.NewSSMClientCreator(), + }, + DockerClient: dockerClient, + S3ClientCreator: s3factory.NewS3ClientCreator(), + } + + taskEngine := NewDockerTaskEngine(cfg, dockerClient, credentialsManager, + eventstream.NewEventStream("ENGINEINTEGTEST", context.Background()), imageManager, state, metadataManager, + resourceFields, execcmd.NewManager()) + taskEngine.MustInit(context.TODO()) + return taskEngine, func() { + taskEngine.Shutdown() + }, credentialsManager +} + +func verifyContainerCredentialSpec(client *sdkClient.Client, id, credentialspecOpt string) error { + dockerContainer, err := client.ContainerInspect(context.TODO(), id) + if err != nil { + return err + } + + for _, opt := range dockerContainer.HostConfig.SecurityOpt { + if strings.HasPrefix(opt, "credentialspec=") && opt == credentialspecOpt { + return nil + } + } + + return errors.New("unable to obtain credentialspec") +} diff --git a/agent/engine/errors.go b/agent/engine/errors.go new file mode 100644 index 00000000000..5c99715889a --- /dev/null +++ b/agent/engine/errors.go @@ -0,0 +1,99 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" +) + +type cannotStopContainerError interface { + apierrors.NamedError + IsRetriableError() bool +} + +// impossibleTransitionError is an error that occurs when an event causes a +// container to try and transition to a state that it cannot be moved to +type impossibleTransitionError struct { + state apicontainerstatus.ContainerStatus +} + +func (err *impossibleTransitionError) Error() string { + return "Cannot transition to " + err.state.String() +} +func (err *impossibleTransitionError) ErrorName() string { return "ImpossibleStateTransitionError" } + +// ContainerVanishedError is a type for describing a container that does not exist +type ContainerVanishedError struct{} + +func (err ContainerVanishedError) Error() string { return "No container matching saved ID found" } + +// ErrorName returns the name of the error +func (err ContainerVanishedError) ErrorName() string { return "ContainerVanishedError" } + +// TaskDependencyError is the error for task that dependencies can't +// be resolved +type TaskDependencyError struct { + taskArn string +} + +func (err TaskDependencyError) Error() string { + return "Task dependencies cannot be resolved, taskArn: " + err.taskArn +} + +// ErrorName is the name of the error +func (err TaskDependencyError) ErrorName() string { + return "TaskDependencyError" +} + +// TaskStoppedBeforePullBeginError is a type for task errors involving pull +type TaskStoppedBeforePullBeginError struct { + taskArn string +} + +func (err TaskStoppedBeforePullBeginError) Error() string { + return "Task stopped before image pull could begin for task: " + err.taskArn +} + +// ErrorName returns the name of the error +func (TaskStoppedBeforePullBeginError) ErrorName() string { + return "TaskStoppedBeforePullBeginError" +} + +// ContainerNetworkingError indicates any error when dealing with the network +// namespace of container +type ContainerNetworkingError struct { + fromError error +} + +func (err ContainerNetworkingError) Error() string { + return err.fromError.Error() +} + +func (err ContainerNetworkingError) ErrorName() string { + return "ContainerNetworkingError" +} + +// CannotGetDockerClientVersionError indicates error when trying to get docker +// client api version +type CannotGetDockerClientVersionError struct { + fromError error +} + +func (err CannotGetDockerClientVersionError) ErrorName() string { + return "CannotGetDockerClientVersionError" +} +func (err CannotGetDockerClientVersionError) Error() string { + return err.fromError.Error() +} diff --git a/agent/engine/execcmd/agent_version.go b/agent/engine/execcmd/agent_version.go new file mode 100644 index 00000000000..96ae6db5cbd --- /dev/null +++ b/agent/engine/execcmd/agent_version.go @@ -0,0 +1,122 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package execcmd + +import ( + "errors" + "io/ioutil" + "strconv" + "strings" +) + +const ( + versionDelimiter = "." + badFormatErrorStr = "invalid version format" +) + +type agentVersion []uint + +// compare compares this agentVersion against another one passed as parameter. +// returns -1 if lhs < rhs, 0 lhs == rhs, and 1 if lhs > rhs. +// example: +// [3, 0, 236, 0].compare([3, 0, 237, 0]) returns -1 +// [3, 0, 236, 0].compare([3, 0, 236, 0]) returns 0 +// [3, 0, 237, 0].compare([3, 0, 236, 0]) returns 1 +// [3].compare([2, 0, 236]) returns 1 +func (lhs agentVersion) compare(rhs agentVersion) int { + lhsLen := len(lhs) + rhsLen := len(rhs) + maxLen := lhsLen + if rhsLen > maxLen { + maxLen = rhsLen + } + for i := 0; i < maxLen; i++ { + v1 := uint(0) + v2 := uint(0) + if i < lhsLen { + v1 = lhs[i] + } + if i < rhsLen { + v2 = rhs[i] + } + if v1 < v2 { + return -1 + } else if v1 > v2 { + return 1 + } + } + return 0 +} + +// String the string representation of an agentVersion +// e.g. [3, 0, 236, 0] -> "3.0.236.0" +func (lhs agentVersion) String() string { + var avStr []string + for _, e := range lhs { + avStr = append(avStr, strconv.Itoa(int(e))) + } + return strings.Join(avStr, versionDelimiter) +} + +func parseAgentVersion(version string) (agentVersion, error) { + var parsedVersion []uint + vArr := strings.Split(version, versionDelimiter) + for _, part := range vArr { + pInt, err := strconv.ParseUint(part, 10, 32) + if err != nil { + return nil, errors.New(badFormatErrorStr) + } + parsedVersion = append(parsedVersion, uint(pInt)) + } + return parsedVersion, nil +} + +// This is a helper type to make []agentVersion sortable +type byAgentVersion []agentVersion + +// Len fulfills sort.Interface +func (bav byAgentVersion) Len() int { + return len(bav) +} + +// Less fulfills sort.Interface +func (bav byAgentVersion) Less(i, j int) bool { + return bav[i].compare(bav[j]) < 0 +} + +// Swap fulfills sort.Interface +func (bav byAgentVersion) Swap(i, j int) { + bav[i], bav[j] = bav[j], bav[i] +} + +var ioUtilReadDir = ioutil.ReadDir + +func retrieveAgentVersions(agentBinPath string) ([]agentVersion, error) { + files, err := ioUtilReadDir(agentBinPath) + if err != nil { + return nil, err + } + var versions []agentVersion + for _, f := range files { + if !f.IsDir() { + continue + } + v, err := parseAgentVersion(f.Name()) + if err != nil { // The bin directory could have random folders, so we omit anything that cannot be parsed to a version + continue + } + versions = append(versions, v) + } + return versions, nil +} diff --git a/agent/engine/execcmd/agent_version_test.go b/agent/engine/execcmd/agent_version_test.go new file mode 100644 index 00000000000..b65b6ee5fce --- /dev/null +++ b/agent/engine/execcmd/agent_version_test.go @@ -0,0 +1,222 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package execcmd + +import ( + "errors" + "io/ioutil" + "os" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestAgentVersionCompare(t *testing.T) { + tt := []struct { + name string + lhs, rhs agentVersion + res int + }{ + { + "lhs < rhs", []uint{3, 0, 236, 0}, []uint{3, 0, 237, 0}, -1, + }, + { + "lhs == rhs, where both are empty", []uint{}, []uint{}, 0, + }, + { + "lhs == rhs", []uint{3, 0, 236, 0}, []uint{3, 0, 236, 0}, 0, + }, + { + "lhs > rhs", []uint{3, 0, 237, 0}, []uint{3, 0, 236, 0}, 1, + }, + { + "fewer elements lhs", []uint{3, 0, 236}, []uint{1, 1, 1, 1}, 1, + }, + { + "fewer elements rhs", []uint{1, 1, 1, 1}, []uint{3, 0, 236}, -1, + }, + { + "blank lhs", []uint{}, []uint{1, 1, 1, 1}, -1, + }, + { + "blank rhs", []uint{1, 1, 1, 1}, []uint{}, 1, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + res := tc.lhs.compare(tc.rhs) + assert.Equal(t, tc.res, res) + }) + } +} + +func TestAgentVersionString(t *testing.T) { + tt := []struct { + name string + av agentVersion + expectedString string + }{ + { + "nil returns empty string", + nil, + ""}, + { + "single element", + agentVersion{1}, + "1", + }, + { + "multi element", + agentVersion{1, 2}, + "1.2", + }, + { + "real world case", + agentVersion{3, 0, 236, 0}, + "3.0.236.0", + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedString, tc.av.String()) + }) + } +} + +func TestByAgentVersion(t *testing.T) { + bav := byAgentVersion( + []agentVersion{ + {3, 0, 236, 0}, + {1, 1, 1, 1}, + {1, 1, 1, 1}, + {2, 0, 0, 0}, + }) + + assert.Equal(t, bav.Len(), 4) + assert.True(t, bav.Less(3, 0)) // {2, 0, 0, 0} < {3, 0, 236, 0} + assert.False(t, bav.Less(0, 3)) // {3, 0, 236, 0} > {2, 0, 0, 0} + assert.False(t, bav.Less(1, 2)) // {1, 1, 1, 1} == {1, 1, 1, 1} + + swappedBav := byAgentVersion( + []agentVersion{ + {2, 0, 0, 0}, + {1, 1, 1, 1}, + {1, 1, 1, 1}, + {3, 0, 236, 0}, + }) + bav.Swap(0, 3) + assert.Equal(t, swappedBav, bav) + + sortedBav := byAgentVersion( + []agentVersion{ + {1, 1, 1, 1}, + {1, 1, 1, 1}, + {2, 0, 0, 0}, + {3, 0, 236, 0}, + }) + sort.Sort(bav) + assert.Equal(t, sortedBav, bav) +} + +type mockFileInfo struct { + name string + isDir bool +} + +func (fi *mockFileInfo) Name() string { return fi.name } +func (fi *mockFileInfo) IsDir() bool { return fi.isDir } +func (fi *mockFileInfo) Size() int64 { return 0 } +func (fi *mockFileInfo) Mode() os.FileMode { return 0 } +func (fi *mockFileInfo) ModTime() time.Time { return time.Time{} } +func (fi *mockFileInfo) Sys() interface{} { return nil } + +func TestRetrieveAgentVersions(t *testing.T) { + defer func() { + ioUtilReadDir = ioutil.ReadDir + }() + tt := []struct { + name string + files []os.FileInfo + expectedVersions []agentVersion + expectedError error + }{ + { + name: "ioutil.ReadDir error", + expectedError: errors.New("mock error"), + }, + { + name: "no files", + files: nil, + }, + { + name: "non-version directories/files", + files: []os.FileInfo{ + &mockFileInfo{ + name: "0.0.0.0", + isDir: true, + }, + &mockFileInfo{ + name: "randomDir", + isDir: true, + }, + &mockFileInfo{ + name: "randomFile", + isDir: false, + }, + &mockFileInfo{ + name: "1.0.0.0", + isDir: true, + }, + }, + expectedVersions: []agentVersion{ + {0, 0, 0, 0}, + {1, 0, 0, 0}, + }, + }, + { + name: "happy path", + files: []os.FileInfo{ + &mockFileInfo{ + name: "3.0.236.0", + isDir: true, + }, + &mockFileInfo{ + name: "0.0.0.0", + isDir: true, + }, + &mockFileInfo{ + name: "1.0.0.0", + isDir: true, + }, + }, + expectedVersions: []agentVersion{ + {3, 0, 236, 0}, + {0, 0, 0, 0}, + {1, 0, 0, 0}, + }, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + ioUtilReadDir = func(dirname string) ([]os.FileInfo, error) { + return tc.files, tc.expectedError + } + versions, err := retrieveAgentVersions("/var/lib/ecs/deps/execute-command/bin") + assert.Equal(t, tc.expectedError, err) + assert.Equal(t, tc.expectedVersions, versions) + }) + } +} diff --git a/agent/engine/execcmd/generate_mocks.go b/agent/engine/execcmd/generate_mocks.go new file mode 100644 index 00000000000..2372e054fc9 --- /dev/null +++ b/agent/engine/execcmd/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package execcmd + +//go:generate mockgen -destination=mocks/execcmd_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/engine/execcmd Manager diff --git a/agent/engine/execcmd/manager.go b/agent/engine/execcmd/manager.go new file mode 100644 index 00000000000..af5822333a5 --- /dev/null +++ b/agent/engine/execcmd/manager.go @@ -0,0 +1,117 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package execcmd + +import ( + "context" + "strconv" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + + dockercontainer "github.com/docker/docker/api/types/container" +) + +const ( + hostExecDepsDir = "/var/lib/ecs/deps/execute-command" + HostBinDir = hostExecDepsDir + "/bin" + + ExecuteCommandAgentName = ecs.ManagedAgentNameExecuteCommandAgent + defaultStartRetryTimeout = time.Minute * 10 + defaultRetryMinDelay = time.Second * 1 + defaultRetryMaxDelay = time.Second * 30 + defaultInspectRetryTimeout = time.Minute * 2 + maxRetries = 5 + retryDelayMultiplier = 1.5 + retryJitterMultiplier = 0.2 + + Restarted RestartStatus = iota + NotRestarted + Unknown +) + +type RestartStatus int + +func (rs RestartStatus) String() string { + switch rs { + case Restarted: + return "Restarted" + case NotRestarted: + return "NotRestarted" + case Unknown: + return "Unknown" + default: + return strconv.Itoa(int(rs)) + } +} + +type StartError struct { + error + retryable bool +} + +func (e StartError) Retry() bool { + return e.retryable +} + +type Manager interface { + InitializeContainer(taskId string, container *apicontainer.Container, hostConfig *dockercontainer.HostConfig) error + StartAgent(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) error + RestartAgentIfStopped(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) (RestartStatus, error) +} + +type manager struct { + hostBinDir string + retryMaxDelay time.Duration + retryMinDelay time.Duration + startRetryTimeout time.Duration + inspectRetryTimeout time.Duration +} + +func NewManager() *manager { + return &manager{ + hostBinDir: HostBinDir, + retryMaxDelay: defaultRetryMaxDelay, + retryMinDelay: defaultRetryMinDelay, + startRetryTimeout: defaultStartRetryTimeout, + inspectRetryTimeout: defaultInspectRetryTimeout, + } +} + +func NewManagerWithBinDir(hostBinDir string) *manager { + m := NewManager() + m.hostBinDir = hostBinDir + return m +} + +func (m *manager) isAgentStarted(ma apicontainer.ManagedAgent) bool { + return !ma.LastStartedAt.IsZero() +} + +func IsExecEnabledTask(task *apitask.Task) bool { + for _, c := range task.Containers { + if IsExecEnabledContainer(c) { + return true + } + } + return false +} + +func IsExecEnabledContainer(container *apicontainer.Container) bool { + _, ok := container.GetManagedAgentByName(ExecuteCommandAgentName) + return ok +} diff --git a/agent/engine/execcmd/manager_init_task_linux.go b/agent/engine/execcmd/manager_init_task_linux.go new file mode 100644 index 00000000000..160e67368b9 --- /dev/null +++ b/agent/engine/execcmd/manager_init_task_linux.go @@ -0,0 +1,359 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package execcmd + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/pborman/uuid" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" +) + +const ( + namelessContainerPrefix = "nameless-container-" + + ecsAgentExecDepsDir = "/managed-agents/execute-command" + + // ecsAgentDepsBinDir is the directory where ECS Agent will read versions of SSM agent + ecsAgentDepsBinDir = ecsAgentExecDepsDir + "/bin" + ecsAgentDepsCertsDir = ecsAgentExecDepsDir + "/certs" + + ContainerDepsDirPrefix = "/ecs-execute-command-" + + // filePerm is the permission for the exec agent config file. + filePerm = 0644 + defaultSessionLimit = 2 + + SSMAgentBinName = "amazon-ssm-agent" + SSMAgentWorkerBinName = "ssm-agent-worker" + SessionWorkerBinName = "ssm-session-worker" + + HostLogDir = "/var/log/ecs/exec" + ContainerLogDir = "/var/log/amazon/ssm" + ECSAgentExecLogDir = "/log/exec" + + HostCertFile = "/var/lib/ecs/deps/execute-command/certs/tls-ca-bundle.pem" + ContainerCertFileSuffix = "certs/amazon-ssm-agent.crt" + + containerConfigFileName = "amazon-ssm-agent.json" + ContainerConfigDirName = "config" + ContainerConfigFileSuffix = "configuration/" + containerConfigFileName + + // ECSAgentExecConfigDir is the directory where ECS Agent will write the ExecAgent config files to + ECSAgentExecConfigDir = ecsAgentExecDepsDir + "/" + ContainerConfigDirName + // HostExecConfigDir is the dir where ExecAgents Config files will live + HostExecConfigDir = hostExecDepsDir + "/" + ContainerConfigDirName + ExecAgentLogConfigFileName = "seelog.xml" + ContainerLogConfigFile = "configuration/" + ExecAgentLogConfigFileName +) + +var ( + execAgentConfigTemplate = `{ + "Mgs": { + "Region": "", + "Endpoint": "", + "StopTimeoutMillis": 20000, + "SessionWorkersLimit": %d + }, + "Agent": { + "Region": "", + "OrchestrationRootDir": "", + "ContainerMode": true + } +}` + execAgentLogConfigTemplate = ` + + + + + + + + + + + + + + + +` + // TODO: [ecs-exec] seelog config needs to be implemented following a similar approach to ss, config + execAgentConfigFileNameTemplate = `amazon-ssm-agent-%s.json` + logConfigFileNameTemplate = `seelog-%s.xml` + errExecCommandManagedAgentNotFound = fmt.Errorf("managed agent not found (%s)", ExecuteCommandAgentName) +) + +// InitializeContainer adds the necessary bind mounts in order for the ExecCommandAgent to run properly in the container +// TODO: [ecs-exec] Should we validate the ssm agent binaries & certs are valid and fail here if they're not? (bind mount will succeed even if files don't exist in host) +func (m *manager) InitializeContainer(taskId string, container *apicontainer.Container, hostConfig *dockercontainer.HostConfig) (rErr error) { + defer func() { + if rErr != nil { + container.UpdateManagedAgentByName(ExecuteCommandAgentName, apicontainer.ManagedAgentState{ + InitFailed: true, + Status: apicontainerstatus.ManagedAgentStopped, + Reason: rErr.Error(), + }) + } + }() + ma, ok := container.GetManagedAgentByName(ExecuteCommandAgentName) + if !ok { + return errExecCommandManagedAgentNotFound + } + configFile, rErr := GetExecAgentConfigFileName(getSessionWorkersLimit(ma)) + if rErr != nil { + rErr = fmt.Errorf("could not generate ExecAgent Config File: %v", rErr) + return rErr + } + logConfigFile, rErr := GetExecAgentLogConfigFile() + if rErr != nil { + rErr = fmt.Errorf("could not generate ExecAgent LogConfig file: %v", rErr) + return rErr + } + uuid := newUUID() + containerDepsFolder := ContainerDepsDirPrefix + uuid + + latestBinVersionDir, rErr := m.getLatestVersionedHostBinDir() + if rErr != nil { + return rErr + } + + if !certsExist() { + rErr = fmt.Errorf("could not find certs") + return rErr + } + + // Add ssm binary mounts + hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping( + filepath.Join(latestBinVersionDir, SSMAgentBinName), + filepath.Join(containerDepsFolder, SSMAgentBinName))) + + hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping( + filepath.Join(latestBinVersionDir, SSMAgentWorkerBinName), + filepath.Join(containerDepsFolder, SSMAgentWorkerBinName))) + + hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping( + filepath.Join(latestBinVersionDir, SessionWorkerBinName), + filepath.Join(containerDepsFolder, SessionWorkerBinName))) + + // Add exec agent config file mount + hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping( + filepath.Join(HostExecConfigDir, configFile), + filepath.Join(containerDepsFolder, ContainerConfigFileSuffix))) + + // Add exec agent log config file mount + hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping( + filepath.Join(HostExecConfigDir, logConfigFile), + filepath.Join(containerDepsFolder, ContainerLogConfigFile))) + + // Append TLS cert mount + hostConfig.Binds = append(hostConfig.Binds, getReadOnlyBindMountMapping( + HostCertFile, + filepath.Join(containerDepsFolder, ContainerCertFileSuffix))) + + // Add ssm log bind mount + cn := fileSystemSafeContainerName(container) + hostConfig.Binds = append(hostConfig.Binds, getBindMountMapping( + filepath.Join(HostLogDir, taskId, cn), + ContainerLogDir)) + + container.UpdateManagedAgentByName(ExecuteCommandAgentName, apicontainer.ManagedAgentState{ + ID: uuid, + }) + + return nil +} + +func (m *manager) getLatestVersionedHostBinDir() (string, error) { + versions, err := retrieveAgentVersions(ecsAgentDepsBinDir) + if err != nil { + return "", err + } + sort.Sort(sort.Reverse(byAgentVersion(versions))) + + var latest string + for _, v := range versions { + vStr := v.String() + ecsAgentDepsVersionedBinDir := filepath.Join(ecsAgentDepsBinDir, vStr) + if !fileExists(filepath.Join(ecsAgentDepsVersionedBinDir, SSMAgentBinName)) { + continue // try falling back to the previous version + } + // TODO: [ecs-exec] This requirement will be removed for SSM agent V2 + if !fileExists(filepath.Join(ecsAgentDepsVersionedBinDir, SSMAgentWorkerBinName)) { + continue // try falling back to the previous version + } + if !fileExists(filepath.Join(ecsAgentDepsVersionedBinDir, SessionWorkerBinName)) { + continue // try falling back to the previous version + } + latest = filepath.Join(m.hostBinDir, vStr) + break + } + if latest == "" { + return "", fmt.Errorf("no valid versions were found in %s", m.hostBinDir) + } + return latest, nil +} + +func getReadOnlyBindMountMapping(hostDir, containerDir string) string { + return getBindMountMapping(hostDir, containerDir) + ":ro" +} + +func getBindMountMapping(hostDir, containerDir string) string { + return hostDir + ":" + containerDir +} + +var newUUID = uuid.New + +func fileSystemSafeContainerName(c *apicontainer.Container) string { + // Trim leading hyphens since they're not valid directory names + cn := strings.TrimLeft(c.Name, "-") + if cn == "" { + // Fallback name in the extreme case that we end up with an empty string after trimming all leading hyphens. + return namelessContainerPrefix + newUUID() + } + return cn +} + +func getSessionWorkersLimit(ma apicontainer.ManagedAgent) int { + // TODO [ecs-exec] : verify that returning the default session limit (2) is ok in case of any errors, misconfiguration + limit := defaultSessionLimit + if ma.Properties == nil { // This means ACS didn't send the limit + return limit + } + limitStr, ok := ma.Properties["sessionLimit"] + if !ok { // This also means ACS didn't send the limit + return limit + } + limit, err := strconv.Atoi(limitStr) + if err != nil { // This means ACS send a limit that can't be converted to an int + return limit + } + if limit <= 0 { + limit = defaultSessionLimit + } + return limit +} + +var GetExecAgentLogConfigFile = getAgentLogConfigFile + +func getAgentLogConfigFile() (string, error) { + hash := getExecAgentConfigHash(execAgentLogConfigTemplate) + logConfigFileName := fmt.Sprintf(logConfigFileNameTemplate, hash) + // check if config file exists already + logConfigFilePath := filepath.Join(ECSAgentExecConfigDir, logConfigFileName) + if fileExists(logConfigFilePath) && validConfigExists(logConfigFilePath, hash) { + return logConfigFileName, nil + } + // check if config file is a dir; if true, remove it + if isDir(logConfigFilePath) { + if err := removeAll(logConfigFilePath); err != nil { + return "", err + } + } + // create new seelog config file + if err := createNewExecAgentConfigFile(execAgentLogConfigTemplate, logConfigFilePath); err != nil { + return "", err + } + return logConfigFileName, nil +} + +var removeAll = os.RemoveAll + +func validConfigExists(configFilePath, expectedHash string) bool { + config, err := getFileContent(configFilePath) + if err != nil { + return false + } + hash := getExecAgentConfigHash(string(config)) + return strings.Compare(expectedHash, hash) == 0 +} + +var getFileContent = readFileContent + +func readFileContent(filePath string) ([]byte, error) { + return ioutil.ReadFile(filePath) +} + +var GetExecAgentConfigFileName = getAgentConfigFileName + +func getAgentConfigFileName(sessionLimit int) (string, error) { + config := fmt.Sprintf(execAgentConfigTemplate, sessionLimit) + hash := getExecAgentConfigHash(config) + configFileName := fmt.Sprintf(execAgentConfigFileNameTemplate, hash) + // check if config file exists already + configFilePath := filepath.Join(ECSAgentExecConfigDir, configFileName) + if fileExists(configFilePath) && validConfigExists(configFilePath, hash) { + return configFileName, nil + } + // check if config file is a dir; if true, remove it + if isDir(configFilePath) { + if err := removeAll(configFilePath); err != nil { + return "", err + } + } + // config doesn't exist; create a new one + if err := createNewExecAgentConfigFile(config, configFilePath); err != nil { + return "", err + } + return configFileName, nil +} + +func getExecAgentConfigHash(config string) string { + hash := sha256.New() + hash.Write([]byte(config)) + return base64.URLEncoding.EncodeToString(hash.Sum(nil)) +} + +var osStat = os.Stat + +func fileExists(path string) bool { + if fi, err := osStat(path); err == nil { + return !fi.IsDir() + } + return false +} + +func isDir(path string) bool { + if fi, err := osStat(path); err == nil { + return fi.IsDir() + } + return false +} + +var createNewExecAgentConfigFile = createNewConfigFile + +func createNewConfigFile(config, configFilePath string) error { + return ioutil.WriteFile(configFilePath, []byte(config), filePerm) +} + +func certsExist() bool { + return fileExists(filepath.Join(ecsAgentDepsCertsDir, "tls-ca-bundle.pem")) +} diff --git a/agent/engine/execcmd/manager_init_task_linux_test.go b/agent/engine/execcmd/manager_init_task_linux_test.go new file mode 100644 index 00000000000..012a7d19e73 --- /dev/null +++ b/agent/engine/execcmd/manager_init_task_linux_test.go @@ -0,0 +1,487 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package execcmd + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strconv" + "testing" + + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/pborman/uuid" + "github.com/stretchr/testify/assert" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" +) + +func TestInitializeContainer(t *testing.T) { + defer func() { + GetExecAgentConfigFileName = getAgentConfigFileName + newUUID = uuid.New + ioUtilReadDir = ioutil.ReadDir + osStat = os.Stat + GetExecAgentLogConfigFile = getAgentLogConfigFile + }() + + newUUID = func() string { + return "test-UUID" + } + + const ( + containerNameOnlyHyphens = "--" + latestVersion = "3.0.236.0" + previousVersion = "2.0.0.0" + ) + var tt = []struct { + name string + managedAgentName string + getExecAgentConfigFileNameError error + getExecAgentLogConfigError error + retrieveBinVersionsError error + simulateNoValidVersion bool + simulateEmptyVersionDir bool + simulateFallbackToPreviousVersion bool + expectedVersionUsed string + expectedError error + }{ + { + name: "no ExecuteCommandAgent in container", + managedAgentName: "randomAgent", + expectedError: errExecCommandManagedAgentNotFound, + }, + { + name: "simulate config file name generation error", + managedAgentName: ExecuteCommandAgentName, + getExecAgentConfigFileNameError: errors.New("mockError"), + expectedError: fmt.Errorf("could not generate ExecAgent Config File: %v", errors.New("mockError")), + }, + { + name: "simulate error when reading versions in bin dir", + managedAgentName: ExecuteCommandAgentName, + retrieveBinVersionsError: errors.New("mockError"), + expectedError: errors.New("mockError"), + }, + { + name: "simulate no valid versions exist in bin dir", + managedAgentName: ExecuteCommandAgentName, + simulateNoValidVersion: true, + expectedError: fmt.Errorf("no valid versions were found in %s", HostBinDir), + }, + { + name: "simulate valid version exists but dir is empty", + managedAgentName: ExecuteCommandAgentName, + simulateEmptyVersionDir: true, + expectedError: fmt.Errorf("no valid versions were found in %s", HostBinDir), + }, + { + name: "can fallback to previous version if latest is empty", + managedAgentName: ExecuteCommandAgentName, + simulateFallbackToPreviousVersion: true, + expectedVersionUsed: previousVersion, + }, + { + name: "happy path", + managedAgentName: ExecuteCommandAgentName, + expectedVersionUsed: latestVersion, + }, + { + name: "simulate log config file generation error", + managedAgentName: ExecuteCommandAgentName, + getExecAgentLogConfigError: errors.New("mockError"), + expectedError: fmt.Errorf("could not generate ExecAgent LogConfig file: %v", errors.New("mockError")), + }, + } + for _, test := range tt { + t.Run(test.name, func(t *testing.T) { + containers := []*apicontainer.Container{ + { + Name: "container-name", + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{{Name: test.managedAgentName}}, + }, + { + Name: "--container-name", + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{{Name: test.managedAgentName}}, + }, + { + Name: containerNameOnlyHyphens, // Container that will end up in an empty directory name (trailing hyphens are removed) + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{{Name: test.managedAgentName}}, + }, + } + + execCmdMgr := newTestManager() + + GetExecAgentConfigFileName = func(s int) (string, error) { + return "amazon-ssm-agent.json", test.getExecAgentConfigFileNameError + } + + GetExecAgentLogConfigFile = func() (string, error) { + return "seelog.xml", test.getExecAgentLogConfigError + } + + ioUtilReadDir = func(dirname string) ([]os.FileInfo, error) { // Needed to simulate retrieving agent versions + latestVersionIsDir := true + previousVersionIsDir := true + if test.simulateNoValidVersion { + // A valid version is only considered if it's a dir, so to simulate invalid version we just say it's + // NOT a dir. + latestVersionIsDir = false + previousVersionIsDir = false + } + + if test.simulateFallbackToPreviousVersion { + // making latest version invalid and previous one valid + latestVersionIsDir = false + previousVersionIsDir = true + } + + return []os.FileInfo{ + &mockFileInfo{name: latestVersion, isDir: latestVersionIsDir}, + &mockFileInfo{name: previousVersion, isDir: previousVersionIsDir}, + }, test.retrieveBinVersionsError + } + + osStat = func(name string) (os.FileInfo, error) { + var err error + if test.simulateEmptyVersionDir { + err = errors.New("mock error to simulate file doesn't exist") + } + return &mockFileInfo{name: "", isDir: false}, err + } + + for _, container := range containers { + hc := &dockercontainer.HostConfig{} + err := execCmdMgr.InitializeContainer("task-id", container, hc) + + assert.Equal(t, test.expectedError, err) + + if test.expectedError != nil { + assert.Empty(t, hc.Binds) + if ma, ok := container.GetManagedAgentByName(ExecuteCommandAgentName); ok { + assert.Equal(t, "", ma.ID) + assert.True(t, ma.InitFailed) + assert.Equal(t, apicontainerstatus.ManagedAgentStopped, ma.Status) + } + continue + } + + assert.Len(t, hc.Binds, 7) + + assert.Subset(t, hc.Binds, []string{ + "/var/lib/ecs/deps/execute-command/bin/" + test.expectedVersionUsed + "/amazon-ssm-agent:" + + "/ecs-execute-command-test-UUID/amazon-ssm-agent:ro"}) + + assert.Subset(t, hc.Binds, []string{ + "/var/lib/ecs/deps/execute-command/bin/" + test.expectedVersionUsed + "/ssm-agent-worker:" + + "/ecs-execute-command-test-UUID/ssm-agent-worker:ro"}) + + assert.Subset(t, hc.Binds, []string{ + "/var/lib/ecs/deps/execute-command/bin/" + test.expectedVersionUsed + "/ssm-session-worker:" + + "/ecs-execute-command-test-UUID/ssm-session-worker:ro"}) + + assert.Subset(t, hc.Binds, []string{ + "/var/lib/ecs/deps/execute-command/config/amazon-ssm-agent.json:" + + "/ecs-execute-command-test-UUID/configuration/amazon-ssm-agent.json:ro"}) + + assert.Subset(t, hc.Binds, []string{ + "/var/lib/ecs/deps/execute-command/certs/tls-ca-bundle.pem:" + + "/ecs-execute-command-test-UUID/certs/amazon-ssm-agent.crt:ro"}) + + assert.Subset(t, hc.Binds, []string{ + "/var/lib/ecs/deps/execute-command/config/seelog.xml:" + + "/ecs-execute-command-test-UUID/configuration/seelog.xml:ro"}) + + expectedHostLogDir := "/var/log/ecs/exec/task-id/container-name:" + if container.Name == containerNameOnlyHyphens { + expectedHostLogDir = "/var/log/ecs/exec/task-id/nameless-container-test-UUID:" + } + assert.Subset(t, hc.Binds, []string{ + expectedHostLogDir + + "/var/log/amazon/ssm"}) + } + }) + } +} + +func TestGetExecAgentConfigFileName(t *testing.T) { + execAgentConfig := `{ + "Mgs": { + "Region": "", + "Endpoint": "", + "StopTimeoutMillis": 20000, + "SessionWorkersLimit": 2 + }, + "Agent": { + "Region": "", + "OrchestrationRootDir": "", + "ContainerMode": true + } +}` + sha := getExecAgentConfigHash(execAgentConfig) + configFileName := fmt.Sprintf("amazon-ssm-agent-%s.json", sha) + var tests = []struct { + fileExists bool + fileIsDir bool + removeDirErr error + createConfigFileErr error + expectedConfigFileName string + expectedError error + }{ + { + fileExists: true, + fileIsDir: false, + removeDirErr: nil, + createConfigFileErr: nil, + expectedConfigFileName: configFileName, + expectedError: nil, + }, + { + fileExists: false, + fileIsDir: false, + removeDirErr: nil, + createConfigFileErr: nil, + expectedConfigFileName: configFileName, + expectedError: nil, + }, + { + fileExists: false, + fileIsDir: false, + removeDirErr: nil, + createConfigFileErr: errors.New("cannot create config"), + expectedConfigFileName: "", + expectedError: errors.New("cannot create config"), + }, + { + fileExists: true, + fileIsDir: true, + removeDirErr: nil, + createConfigFileErr: nil, + expectedConfigFileName: configFileName, + expectedError: nil, + }, + { + fileExists: true, + fileIsDir: true, + removeDirErr: errors.New("remove dir error"), + createConfigFileErr: nil, + expectedConfigFileName: "", + expectedError: errors.New("remove dir error"), + }, + } + defer func() { + osStat = os.Stat + createNewExecAgentConfigFile = createNewConfigFile + removeAll = os.RemoveAll + }() + for _, tc := range tests { + osStat = func(name string) (os.FileInfo, error) { + return &mockFileInfo{ + name: "whatever", + isDir: tc.fileIsDir, + }, nil + } + + removeAll = func(name string) error { + return tc.removeDirErr + } + + createNewExecAgentConfigFile = func(c, f string) error { + return tc.createConfigFileErr + } + fileName, err := GetExecAgentConfigFileName(2) + assert.Equal(t, tc.expectedConfigFileName, fileName, "incorrect config file name") + assert.Equal(t, tc.expectedError, err) + } +} + +func TestGetSessionWorkersLimit(t *testing.T) { + var tests = []struct { + sessionLimit int + expectedLimit int + }{ + { + sessionLimit: -2, + expectedLimit: 2, + }, + { + sessionLimit: 0, + expectedLimit: 2, + }, + { + sessionLimit: 1, + expectedLimit: 1, + }, + { + sessionLimit: 2, + expectedLimit: 2, + }, + } + for _, tc := range tests { + ma := apicontainer.ManagedAgent{ + Properties: map[string]string{ + "sessionLimit": strconv.Itoa(tc.sessionLimit), + }, + } + limit := getSessionWorkersLimit(ma) + assert.Equal(t, tc.expectedLimit, limit) + } +} + +func TestGetExecAgentLogConfigFile(t *testing.T) { + hash := getExecAgentConfigHash(execAgentLogConfigTemplate) + var tests = []struct { + expectedFile string + expectedError error + execAgentConfigFileExist bool + fileIsDir bool + removeDirErr error + existingLogConfigReadErr error + existingLogConfig string + createNewConfigError error + }{ + { + expectedFile: fmt.Sprintf("seelog-%s.xml", hash), + expectedError: nil, + execAgentConfigFileExist: true, + fileIsDir: false, + removeDirErr: nil, + existingLogConfigReadErr: nil, + existingLogConfig: execAgentLogConfigTemplate, + createNewConfigError: nil, + }, + { + expectedFile: fmt.Sprintf("seelog-%s.xml", hash), + expectedError: nil, + execAgentConfigFileExist: false, + fileIsDir: false, + removeDirErr: nil, + existingLogConfigReadErr: nil, + existingLogConfig: execAgentLogConfigTemplate, + createNewConfigError: nil, + }, + { + expectedFile: fmt.Sprintf("seelog-%s.xml", hash), + expectedError: nil, + execAgentConfigFileExist: true, + fileIsDir: false, + removeDirErr: nil, + existingLogConfigReadErr: nil, + existingLogConfig: "junk", + createNewConfigError: nil, + }, + { + expectedFile: fmt.Sprintf("seelog-%s.xml", hash), + expectedError: nil, + execAgentConfigFileExist: true, + fileIsDir: false, + removeDirErr: nil, + existingLogConfigReadErr: errors.New("read file error"), + existingLogConfig: "", + createNewConfigError: nil, + }, + { + expectedFile: "", + expectedError: errors.New("create file error"), + execAgentConfigFileExist: false, + fileIsDir: false, + removeDirErr: nil, + existingLogConfigReadErr: nil, + existingLogConfig: "", + createNewConfigError: errors.New("create file error"), + }, + { + expectedFile: fmt.Sprintf("seelog-%s.xml", hash), + expectedError: nil, + execAgentConfigFileExist: true, + fileIsDir: true, + removeDirErr: nil, + existingLogConfigReadErr: nil, + existingLogConfig: execAgentLogConfigTemplate, + createNewConfigError: nil, + }, + { + expectedFile: "", + expectedError: errors.New("remove dir error"), + execAgentConfigFileExist: true, + fileIsDir: true, + removeDirErr: errors.New("remove dir error"), + existingLogConfigReadErr: nil, + existingLogConfig: execAgentLogConfigTemplate, + createNewConfigError: nil, + }, + } + defer func() { + osStat = os.Stat + getFileContent = readFileContent + createNewExecAgentConfigFile = createNewConfigFile + removeAll = os.RemoveAll + }() + for _, tc := range tests { + osStat = func(name string) (os.FileInfo, error) { + if tc.execAgentConfigFileExist { + return &mockFileInfo{name: "", isDir: tc.fileIsDir}, nil + } + return &mockFileInfo{}, errors.New("no such file") + } + removeAll = func(name string) error { + return tc.removeDirErr + } + getFileContent = func(path string) ([]byte, error) { + return []byte(tc.existingLogConfig), tc.existingLogConfigReadErr + } + createNewExecAgentConfigFile = func(c, f string) error { + return tc.createNewConfigError + } + configFile, err := GetExecAgentLogConfigFile() + assert.Equal(t, tc.expectedFile, configFile) + assert.Equal(t, tc.expectedError, err) + } +} + +func TestGetValidConfigExists(t *testing.T) { + var tests = []struct { + isValid bool + existingLogConfigReadErr error + existingLogConfig string + }{ + { + isValid: true, + existingLogConfigReadErr: nil, + existingLogConfig: execAgentLogConfigTemplate, + }, + { + isValid: false, + existingLogConfigReadErr: nil, + existingLogConfig: "junk", + }, + { + isValid: false, + existingLogConfigReadErr: errors.New("read file error"), + existingLogConfig: "", + }, + } + defer func() { + getFileContent = readFileContent + }() + for _, tc := range tests { + getFileContent = func(path string) ([]byte, error) { + return []byte(tc.existingLogConfig), tc.existingLogConfigReadErr + } + assert.Equal(t, tc.isValid, validConfigExists("configpath", getExecAgentConfigHash(execAgentLogConfigTemplate))) + } +} diff --git a/agent/engine/execcmd/manager_start_linux.go b/agent/engine/execcmd/manager_start_linux.go new file mode 100644 index 00000000000..b31ca798d8c --- /dev/null +++ b/agent/engine/execcmd/manager_start_linux.go @@ -0,0 +1,191 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package execcmd + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strconv" + "time" + + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" +) + +// RestartAgentIfStopped restarts the ExecCommandAgent in the container passed as parameter, only for ExecCommandAgent-enabled containers. +// The status of the ExecCommandAgent in the container is retrieved using a docker exec inspect call, using the dockerExecID +// stored in the AgentMetadata.DockerExecID. +// +// If the ExecCommandAgent is still running (or has never started), no action is taken. +// To actually restart the ExecCommandAgent, this function invokes this instance's StartAgent method. +func (m *manager) RestartAgentIfStopped(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) (RestartStatus, error) { + if !IsExecEnabledContainer(container) { + seelog.Warnf("Task engine [%s]: an attempt to restart ExecCommandAgent for a non ExecCommandAgent-enabled container was made; container %s", task.Arn, containerId) + return NotRestarted, nil + } + ma, _ := container.GetManagedAgentByName(ExecuteCommandAgentName) + metadata := MapToAgentMetadata(ma.Metadata) + if !m.isAgentStarted(ma) { + return NotRestarted, nil + } + res, err := m.inspectExecAgentProcess(ctx, client, metadata) + if err != nil || res == nil { + // We don't want to report InspectContainerExec errors, just that we don't know what the status of the agent is + return Unknown, nil + } + if res.Running { // agent still running, nothing to do + return NotRestarted, nil + } + // Restart if not running + //TODO: [ecs-exec] retry only for certain exit codes? + seelog.Warnf("Task engine [%s]: ExecCommandAgent Process stopped (exitCode=%d) for container %s, restarting...", task.Arn, res.ExitCode, containerId) + container.UpdateManagedAgentByName(ExecuteCommandAgentName, apicontainer.ManagedAgentState{ + ID: ma.ID, + }) + err = m.StartAgent(ctx, client, task, container, containerId) + if err != nil { + return NotRestarted, err + } + return Restarted, nil +} + +func (m *manager) inspectExecAgentProcess(ctx context.Context, client dockerapi.DockerClient, metadata AgentMetadata) (*types.ContainerExecInspect, error) { + backoff := retry.NewExponentialBackoff(m.retryMinDelay, m.retryMaxDelay, retryJitterMultiplier, retryDelayMultiplier) + ctx, cancel := context.WithTimeout(ctx, m.inspectRetryTimeout) + defer cancel() + var ( + inspectRes *types.ContainerExecInspect + inspectErr error + ) + retry.RetryNWithBackoffCtx(ctx, backoff, maxRetries, func() error { + inspectRes, inspectErr = client.InspectContainerExec(ctx, metadata.DockerExecID, dockerclient.ContainerExecInspectTimeout) + if inspectErr != nil { + retryable := true + if _, ok := inspectErr.(*dockerapi.DockerTimeoutError); ok { + retryable = false + } + return StartError{ + error: inspectErr, + retryable: retryable, + } + } + return nil + }) + return inspectRes, inspectErr +} + +// StartAgent idempotently starts the ExecCommandAgent in the container passed as parameter, only for ExecCommandAgent-enabled containers. +// If no error is returned, it can be assumed the ExecCommandAgent is started. +func (m *manager) StartAgent(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) error { + if !IsExecEnabledContainer(container) { + seelog.Warnf("Task engine [%s]: an attempt to start ExecCommandAgent for a non ExecCommandAgent-enabled container was made; container %s", task.Arn, containerId) + return nil + } + ma, _ := container.GetManagedAgentByName(ExecuteCommandAgentName) + existingMetadata := MapToAgentMetadata(ma.Metadata) + if ma.ID == "" { + return errors.New("container has not been initialized: missing UUID") + } + // Guarantee idempotency if the container already has been started + if m.isAgentStarted(ma) { + res, err := m.inspectExecAgentProcess(ctx, client, existingMetadata) + if err != nil { + seelog.Warnf("Task engine [%s]: could not verify if the ExecCommandAgent was already running for container %s: %v", task.Arn, containerId, err) + } else if res.Running { // agent is already running, nothing to do + seelog.Warnf("Task engine [%s]: an attempt was made to start the ExecCommandAgent but it was already running (%s)", task.Arn, containerId) + return nil + } + } + + backoff := retry.NewExponentialBackoff(m.retryMinDelay, m.retryMaxDelay, retryJitterMultiplier, retryDelayMultiplier) + ctx, cancel := context.WithTimeout(ctx, m.startRetryTimeout) + defer cancel() + var startErr error + + var execMD *AgentMetadata + retry.RetryNWithBackoffCtx(ctx, backoff, maxRetries, func() error { + execMD, startErr = m.doStartAgent(ctx, client, task, ma, containerId) + if startErr != nil { + seelog.Warnf("Task engine [%s]: exec command agent failed to start for container %s: %v. Retrying...", task.Arn, containerId, startErr) + } + return startErr + }) + if startErr != nil { + container.UpdateManagedAgentByName(ExecuteCommandAgentName, apicontainer.ManagedAgentState{ + ID: ma.ID, + Status: status.ManagedAgentStopped, + Reason: startErr.Error(), + }) + return startErr + } + container.UpdateManagedAgentByName(ExecuteCommandAgentName, apicontainer.ManagedAgentState{ + ID: ma.ID, + Status: status.ManagedAgentRunning, + LastStartedAt: time.Now(), + Metadata: execMD.ToMap(), + }) + return nil +} + +func (m *manager) doStartAgent(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, ma apicontainer.ManagedAgent, containerId string) (*AgentMetadata, error) { + execAgentCmdBinDir := ContainerDepsDirPrefix + ma.ID + execAgentCmd := filepath.Join(execAgentCmdBinDir, SSMAgentBinName) + execCfg := types.ExecConfig{ + User: "0", + Detach: true, + Cmd: []string{execAgentCmd}, + } + newMD := &AgentMetadata{} + execRes, err := client.CreateContainerExec(ctx, containerId, execCfg, dockerclient.ContainerExecCreateTimeout) + if err != nil { + return newMD, StartError{error: fmt.Errorf("unable to start ExecuteCommandAgent [create]: %v", err), retryable: true} + } + + seelog.Debugf("Task engine [%s]: created ExecCommandAgent for container: %s -> docker exec id: %s", task.Arn, containerId, execRes.ID) + + err = client.StartContainerExec(ctx, execRes.ID, dockerclient.ContainerExecStartTimeout) + if err != nil { + return newMD, StartError{error: fmt.Errorf("unable to start ExecuteCommandAgent [pre-start]: %v", err), retryable: true} + } + seelog.Debugf("Task engine [%s]: sent ExecCommandAgent start signal for container: %s -> docker exec id: %s", task.Arn, containerId, execRes.ID) + + inspect, err := client.InspectContainerExec(ctx, execRes.ID, dockerclient.ContainerExecInspectTimeout) + if err != nil { + return newMD, StartError{error: fmt.Errorf("unable to start ExecuteCommandAgent [inspect]: %v", err), retryable: true} + } + seelog.Debugf("Task engine [%s]: inspect ExecCommandAgent for container: %s -> pid: %d, exitCode: %d, running:%v, err:%v", + task.Arn, containerId, inspect.Pid, inspect.ExitCode, inspect.Running, err) + + if !inspect.Running { //TODO: [ecs-exec] retry only for certain exit codes? + return newMD, StartError{ + error: fmt.Errorf("ExecuteCommandAgent process exited with exit code: %d", inspect.ExitCode), + retryable: true, + } + } + seelog.Infof("Task engine [%s]: started ExecCommandAgent for container: %s -> docker exec id: %s", task.Arn, containerId, execRes.ID) + newMD.PID = strconv.Itoa(inspect.Pid) + newMD.DockerExecID = execRes.ID + newMD.CMD = execAgentCmd + return newMD, nil +} diff --git a/agent/engine/execcmd/manager_start_linux_test.go b/agent/engine/execcmd/manager_start_linux_test.go new file mode 100644 index 00000000000..64a4bd4528f --- /dev/null +++ b/agent/engine/execcmd/manager_start_linux_test.go @@ -0,0 +1,476 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package execcmd + +import ( + "context" + "errors" + "fmt" + "strconv" + "testing" + "time" + + "github.com/pborman/uuid" + + "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + errors2 "github.com/aws/amazon-ecs-agent/agent/api/errors" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func getAgentMetadata(container *container.Container) AgentMetadata { + ma, _ := container.GetManagedAgentByName(ExecuteCommandAgentName) + return MapToAgentMetadata(ma.Metadata) +} + +func TestStartAgent(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + const ( + testPid1 = 9876 + testContainerRuntimeId = "123abc" + testDockerExecId = "mockDockerExecID" + ) + nowTime := time.Now() + zeroTime := time.Time{} + var ( + mockError = errors.New("mock error") + testContainers = []*apicontainer.Container{{ + RuntimeID: testContainerRuntimeId, + }} + ) + + tt := []struct { + name string + execEnabled bool + containers []*apicontainer.Container + expectCreateContainerExec bool + createContainerExecRes *types.IDResponse + createContainerExecErr error + expectStartContainerExec bool + startContainerExecErr error + expectInspectContainerExec bool + inspectContainerExecRes *types.ContainerExecInspect + inspectContainerExecErr error + expectedError error + expectedStatus apicontainerstatus.ManagedAgentStatus + expectedStartTime time.Time + }{ + { + name: "test exec disabled", + execEnabled: false, + containers: testContainers, + }, + { + name: "test with create container error", + execEnabled: true, + containers: testContainers, + expectCreateContainerExec: true, + expectedStatus: apicontainerstatus.ManagedAgentStopped, + createContainerExecErr: mockError, + expectedError: StartError{error: fmt.Errorf("unable to start ExecuteCommandAgent [create]: %v", mockError), retryable: true}, + expectedStartTime: zeroTime, + }, + { + name: "test with start container error", + execEnabled: true, + containers: testContainers, + expectCreateContainerExec: true, + createContainerExecRes: &types.IDResponse{ + ID: testDockerExecId, + }, + expectStartContainerExec: true, + expectedStatus: apicontainerstatus.ManagedAgentStopped, + startContainerExecErr: mockError, + expectedError: StartError{error: fmt.Errorf("unable to start ExecuteCommandAgent [pre-start]: %v", mockError), retryable: true}, + expectedStartTime: zeroTime, + }, + { + name: "test with inspect container error", + execEnabled: true, + containers: testContainers, + expectCreateContainerExec: true, + createContainerExecRes: &types.IDResponse{ + ID: testDockerExecId, + }, + expectStartContainerExec: true, + startContainerExecErr: nil, // Simulate StartContainerExec succeeds + expectInspectContainerExec: true, + expectedStatus: apicontainerstatus.ManagedAgentStopped, + inspectContainerExecErr: mockError, + expectedError: StartError{error: fmt.Errorf("unable to start ExecuteCommandAgent [inspect]: %v", mockError), retryable: true}, + expectedStartTime: zeroTime, + }, + { + name: "test happy path", + execEnabled: true, + containers: testContainers, + expectCreateContainerExec: true, + createContainerExecRes: &types.IDResponse{ + ID: testDockerExecId, + }, + expectStartContainerExec: true, + startContainerExecErr: nil, // Simulate StartContainerExec succeeds + expectInspectContainerExec: true, + expectedStatus: apicontainerstatus.ManagedAgentRunning, + expectedStartTime: nowTime, + inspectContainerExecRes: &types.ContainerExecInspect{ + ExecID: testDockerExecId, + Pid: testPid1, + Running: true, + }, + }, + } + testUUID := "test-uid" + defer func() { + newUUID = uuid.New + }() + newUUID = func() string { + return testUUID + } + for _, test := range tt { + t.Run(test.name, func(t *testing.T) { + + testTask := &apitask.Task{ + Arn: "taskArn:aws:ecs:region:account-id:task/test-task-taskArn", + Containers: test.containers, + } + if test.execEnabled { + for _, c := range testTask.Containers { + c.ManagedAgentsUnsafe = []apicontainer.ManagedAgent{ + { + Name: ExecuteCommandAgentName, + ManagedAgentState: apicontainer.ManagedAgentState{ + ID: testUUID, + }, + }, + } + } + } + + times := maxRetries + retryableErr, isRetryable := test.expectedError.(errors2.RetriableError) + if test.expectedError == nil || (isRetryable && !retryableErr.Retry()) { + times = 1 + } + if test.expectCreateContainerExec { + execCfg := types.ExecConfig{ + User: "0", + Detach: true, + Cmd: []string{"/ecs-execute-command-test-uid/amazon-ssm-agent"}, + } + client.EXPECT().CreateContainerExec(gomock.Any(), testTask.Containers[0].RuntimeID, execCfg, dockerclient.ContainerExecCreateTimeout). + Return(test.createContainerExecRes, test.createContainerExecErr). + Times(times) + } + + if test.expectStartContainerExec { + client.EXPECT().StartContainerExec(gomock.Any(), testDockerExecId, dockerclient.ContainerExecStartTimeout). + Return(test.startContainerExecErr). + Times(times) + } + + if test.expectInspectContainerExec { + client.EXPECT().InspectContainerExec(gomock.Any(), testDockerExecId, dockerclient.ContainerExecInspectTimeout). + Return(test.inspectContainerExecRes, test.inspectContainerExecErr). + Times(times) + } + + mgr := newTestManager() + prevMetadata := getAgentMetadata(test.containers[0]) + err := mgr.StartAgent(context.TODO(), client, testTask, testTask.Containers[0], testTask.Containers[0].RuntimeID) + if test.expectedError != nil { + assert.Equal(t, test.expectedError, err, "Wrong error returned") + // When there's an error, ExecCommandAgentMetadata should not be modified + newMetadata := getAgentMetadata(test.containers[0]) + assert.Equal(t, prevMetadata, newMetadata) + } else { // No error case + assert.NoError(t, err, "No error was expected") + if !test.execEnabled { + _, ok := test.containers[0].GetManagedAgentByName(ExecuteCommandAgentName) + assert.False(t, ok) + } else { + ma, _ := test.containers[0].GetManagedAgentByName(ExecuteCommandAgentName) + execMD := getAgentMetadata(test.containers[0]) + assert.Equal(t, strconv.Itoa(testPid1), execMD.PID, "PID not equal") + assert.Equal(t, testDockerExecId, execMD.DockerExecID, "DockerExecId not equal") + assert.Equal(t, "/ecs-execute-command-test-uid/amazon-ssm-agent", execMD.CMD) + assert.Equal(t, test.expectedStatus, ma.Status, "Exec status not equal") + assert.WithinDuration(t, test.expectedStartTime, ma.LastStartedAt, 5*time.Second, "StartedAt not equal") + } + } + }) + } +} + +func TestIdempotentStartAgent(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + const ( + testDockerExecId = "abc" + testPid = 111 + ) + var ( + testPidStr = strconv.Itoa(testPid) + testCmd = "/ecs-execute-command-test-uid/amazon-ssm-agent" + ) + testUUID := "test-uid" + defer func() { + newUUID = uuid.New + }() + newUUID = func() string { + return testUUID + } + + testTask := &apitask.Task{ + Arn: "taskArn:aws:ecs:region:account-id:task/test-task-taskArn", + Containers: []*apicontainer.Container{{ + RuntimeID: "123", + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{ + { + Name: ExecuteCommandAgentName, + ManagedAgentState: apicontainer.ManagedAgentState{ + ID: testUUID, + }, + }, + }, + }}, + } + + execCfg := types.ExecConfig{ + User: "0", + Detach: true, + Cmd: []string{"/ecs-execute-command-test-uid/amazon-ssm-agent"}, + } + client.EXPECT().CreateContainerExec(gomock.Any(), testTask.Containers[0].RuntimeID, execCfg, dockerclient.ContainerExecCreateTimeout). + Return(&types.IDResponse{ID: testDockerExecId}, nil). + Times(1) + + client.EXPECT().StartContainerExec(gomock.Any(), testDockerExecId, dockerclient.ContainerExecStartTimeout). + Return(nil). + Times(1) + + client.EXPECT().InspectContainerExec(gomock.Any(), testDockerExecId, dockerclient.ContainerExecInspectTimeout). + Return(&types.ContainerExecInspect{ + ExecID: testDockerExecId, + Pid: testPid, + Running: true, + }, nil). + Times(2) + + mgr := newTestManager() + err := mgr.StartAgent(context.TODO(), client, testTask, testTask.Containers[0], testTask.Containers[0].RuntimeID) + assert.NoError(t, err) + + ma, _ := testTask.Containers[0].GetManagedAgentByName(ExecuteCommandAgentName) + execMD := getAgentMetadata(testTask.Containers[0]) + firstStart := ma.LastStartedAt + assert.NotNil(t, ma.LastStartedAt) + assert.Equal(t, testPidStr, execMD.PID) + assert.Equal(t, testDockerExecId, execMD.DockerExecID) + assert.Equal(t, testCmd, execMD.CMD) + assert.Equal(t, apicontainerstatus.ManagedAgentRunning, ma.Status) + + // Second call to start. The mock's expected call times is 1 (except for inspect); the absence of "too many calls" + // along with unchanged metadata guarantees idempotency + err = mgr.StartAgent(context.TODO(), client, testTask, testTask.Containers[0], testTask.Containers[0].RuntimeID) + assert.NoError(t, err) + + ma, _ = testTask.Containers[0].GetManagedAgentByName(ExecuteCommandAgentName) + execMD = getAgentMetadata(testTask.Containers[0]) + // check StartedAt is not Zero and hasn't been updated + assert.NotNil(t, ma.LastStartedAt) + assert.False(t, ma.LastStartedAt.IsZero()) + assert.Equal(t, ma.LastStartedAt, firstStart) + assert.Equal(t, testPidStr, execMD.PID) + assert.Equal(t, testDockerExecId, execMD.DockerExecID) + assert.Equal(t, testCmd, execMD.CMD) + assert.Equal(t, apicontainerstatus.ManagedAgentRunning, ma.Status) +} + +func TestRestartAgentIfStopped(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_dockerapi.NewMockDockerClient(ctrl) + nowTime := time.Now() + + const ( + testContainerId = "123" + testNewDockerExecID = "newDockerExecId" + testNewPID = 111 + ) + testUUID := "test-uid" + defer func() { + newUUID = uuid.New + }() + newUUID = func() string { + return testUUID + } + var ( + mockError = errors.New("mock error") + dockerTimeoutErr = &dockerapi.DockerTimeoutError{} + testExecAgentState = apicontainer.ManagedAgentState{ + ID: testUUID, + LastStartedAt: nowTime, + Metadata: map[string]interface{}{ + "PID": "456", + "DockerExecID": "789", + "CMD": "amazon-ssm-agent", + }, + } + ) + tt := []struct { + name string + execEnabled bool + expectedRestartStatus RestartStatus + execAgentState apicontainer.ManagedAgentState + containerExecInspectRes *types.ContainerExecInspect + expectedInspectErr error + expectedRestartErr error + expectedExecAgentStatus apicontainerstatus.ManagedAgentStatus + }{ + { + name: "test with exec agent disabled", + execEnabled: false, + expectedRestartStatus: NotRestarted, + }, + { + name: "test with inspect timeout error", + execEnabled: true, + execAgentState: testExecAgentState, + expectedInspectErr: dockerTimeoutErr, + expectedRestartErr: nil, + expectedRestartStatus: Unknown, + expectedExecAgentStatus: apicontainerstatus.ManagedAgentStopped, + }, + { + name: "test with other inspect error", + execEnabled: true, + execAgentState: testExecAgentState, + expectedInspectErr: mockError, + expectedRestartErr: nil, + expectedRestartStatus: Unknown, + expectedExecAgentStatus: apicontainerstatus.ManagedAgentStopped, + }, + { + name: "test with exec command still running", + execEnabled: true, + execAgentState: testExecAgentState, + containerExecInspectRes: &types.ContainerExecInspect{ + Running: true, + }, + expectedRestartStatus: NotRestarted, + expectedExecAgentStatus: apicontainerstatus.ManagedAgentRunning, + }, + { + name: "test with exec command stopped", + execEnabled: true, + execAgentState: testExecAgentState, + containerExecInspectRes: &types.ContainerExecInspect{ + Running: false, + }, + expectedRestartStatus: Restarted, + expectedExecAgentStatus: apicontainerstatus.ManagedAgentRunning, + }, + } + + for _, test := range tt { + t.Run(test.name, func(t *testing.T) { + testTask := &apitask.Task{ + Arn: "taskArn:aws:ecs:region:account-id:task/test-task-taskArn", + Containers: []*apicontainer.Container{{ + RuntimeID: testContainerId, + }}, + } + + if test.execEnabled { + testTask.Containers[0].ManagedAgentsUnsafe = []apicontainer.ManagedAgent{ + { + Name: ExecuteCommandAgentName, + ManagedAgentState: test.execAgentState, + }, + } + times := 1 + if test.expectedInspectErr == mockError { + times = maxRetries + } + execMD := getAgentMetadata(testTask.Containers[0]) + client.EXPECT().InspectContainerExec(gomock.Any(), execMD.DockerExecID, dockerclient.ContainerExecInspectTimeout). + Return(test.containerExecInspectRes, test.expectedInspectErr).Times(times) + } + + // Expect calls made by Start() + if test.containerExecInspectRes != nil && !test.containerExecInspectRes.Running { + client.EXPECT().CreateContainerExec(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(&types.IDResponse{ID: testNewDockerExecID}, nil). + Times(1) + + client.EXPECT().StartContainerExec(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil). + Times(1) + + client.EXPECT().InspectContainerExec(gomock.Any(), gomock.Any(), gomock.Any()). + Return(&types.ContainerExecInspect{ + ExecID: testNewDockerExecID, + Pid: testNewPID, + Running: true, + }, nil). + Times(1) + } + + mgr := newTestManager() + restarted, err := mgr.RestartAgentIfStopped(context.TODO(), client, testTask, testTask.Containers[0], testTask.Containers[0].RuntimeID) + assert.Equal(t, test.expectedRestartErr, err) + assert.Equal(t, test.expectedRestartStatus, restarted, "expected: %s, actual: %s", test.expectedRestartStatus, restarted) + + if test.expectedRestartStatus != Restarted { + ma, _ := testTask.Containers[0].GetManagedAgentByName(ExecuteCommandAgentName) + actualState := ma.ManagedAgentState + assert.Equal(t, test.execAgentState, actualState, "ExecCommandAgentMetadata was incorrectly modified") + } else { + ma, _ := testTask.Containers[0].GetManagedAgentByName(ExecuteCommandAgentName) + actualState := ma.ManagedAgentState + execMD := getAgentMetadata(testTask.Containers[0]) + assert.Equal(t, strconv.Itoa(testNewPID), execMD.PID, + "ExecCommandAgentMetadata.PID is not the newest after restart") + assert.Equal(t, testNewDockerExecID, execMD.DockerExecID, + "ExecCommandAgentMetadata.ExecID is not the newest after restart") + assert.Equal(t, "/ecs-execute-command-test-uid/amazon-ssm-agent", execMD.CMD, + "ExecCommandAgentMetadata.CMD is not the newest after restart") + assert.Equal(t, test.expectedExecAgentStatus, actualState.Status, + "ExecCommandAgentStatus is not correct after restart") + } + }) + } +} + +func newTestManager() *manager { + m := NewManager() + m.retryMaxDelay = time.Millisecond * 30 + m.retryMinDelay = time.Millisecond * 1 + m.startRetryTimeout = time.Second * 2 + m.inspectRetryTimeout = time.Second + return m +} diff --git a/agent/engine/execcmd/manager_test.go b/agent/engine/execcmd/manager_test.go new file mode 100644 index 00000000000..3d6c81f2b38 --- /dev/null +++ b/agent/engine/execcmd/manager_test.go @@ -0,0 +1,105 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package execcmd + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" +) + +func TestNewManager(t *testing.T) { + m := NewManager() + assert.Equal(t, HostBinDir, m.hostBinDir) + assert.Equal(t, defaultInspectRetryTimeout, m.inspectRetryTimeout) + assert.Equal(t, defaultRetryMinDelay, m.retryMinDelay) + assert.Equal(t, defaultRetryMaxDelay, m.retryMaxDelay) + assert.Equal(t, defaultStartRetryTimeout, m.startRetryTimeout) +} + +func TestNewManagerWithBinDir(t *testing.T) { + const customHostBinDir = "/test" + m := NewManagerWithBinDir(customHostBinDir) + assert.Equal(t, customHostBinDir, m.hostBinDir) + assert.Equal(t, defaultInspectRetryTimeout, m.inspectRetryTimeout) + assert.Equal(t, defaultRetryMinDelay, m.retryMinDelay) + assert.Equal(t, defaultRetryMaxDelay, m.retryMaxDelay) + assert.Equal(t, defaultStartRetryTimeout, m.startRetryTimeout) +} + +func TestIsExecEnabledTask(t *testing.T) { + var tests = []struct { + name string + agentName string + expectedExecEnabled bool + }{ + { + name: "test task not exec enabled when no exec command managed agent is present", + agentName: "randomAgent", + }, + { + name: "test task not exec enabled when no exec command managed agent is present", + agentName: "ExecuteCommandAgent", + expectedExecEnabled: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + task := &apitask.Task{ + Containers: []*apicontainer.Container{{ + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{ + { + Name: tc.agentName, + }}}, + }, + } + enabled := IsExecEnabledTask(task) + assert.Equal(t, tc.expectedExecEnabled, enabled) + }) + } +} + +func TestExecEnabledContainer(t *testing.T) { + + var tests = []struct { + name string + agentName string + expectedExecEnabled bool + }{ + { + name: "test container not exec enabled when no exec command managed agent is present", + agentName: "randomAgent", + }, + { + name: "test container not exec enabled when no exec command managed agent is present", + agentName: "ExecuteCommandAgent", + expectedExecEnabled: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + container := &apicontainer.Container{ + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{ + { + Name: tc.agentName, + }}, + } + enabled := IsExecEnabledContainer(container) + assert.Equal(t, tc.expectedExecEnabled, enabled) + }) + } +} diff --git a/agent/engine/execcmd/manager_unsupported.go b/agent/engine/execcmd/manager_unsupported.go new file mode 100644 index 00000000000..095a514ead1 --- /dev/null +++ b/agent/engine/execcmd/manager_unsupported.go @@ -0,0 +1,46 @@ +// +build !linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package execcmd + +import ( + "context" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + dockercontainer "github.com/docker/docker/api/types/container" +) + +const ( + // ECSAgentExecLogDir here is used used while cleaning up exec logs when task exits. + // When this path is empty, nothing is cleaned up for unsupported platforms. + ECSAgentExecLogDir = "" +) + +// Note: exec cmd agent is a linux-only feature, thus implemented here as a no-op. +func (m *manager) RestartAgentIfStopped(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) (RestartStatus, error) { + return NotRestarted, nil +} + +// Note: exec cmd agent is a linux-only feature, thus implemented here as a no-op. +func (m *manager) StartAgent(ctx context.Context, client dockerapi.DockerClient, task *apitask.Task, container *apicontainer.Container, containerId string) error { + return nil +} + +// InitializeContainer adds the necessary bind mounts in order for the ExecCommandAgent to run properly in the container +// Note: exec cmd agent is a linux-only feature, thus implemented here as a no-op. +func (m *manager) InitializeContainer(taskId string, container *apicontainer.Container, hostConfig *dockercontainer.HostConfig) error { + return nil +} diff --git a/agent/engine/execcmd/metadata.go b/agent/engine/execcmd/metadata.go new file mode 100644 index 00000000000..5addf99e39d --- /dev/null +++ b/agent/engine/execcmd/metadata.go @@ -0,0 +1,35 @@ +package execcmd + +import ( + "fmt" +) + +// AgentMetadata holds metadata about the exec agent running inside the container (i.e. SSM Agent). +type AgentMetadata struct { + PID string `json:"PID"` + DockerExecID string `json:"DockerExecID"` + CMD string `json:"CMD"` +} + +func (md *AgentMetadata) String() string { + return fmt.Sprintf("[PID: %s, DockerExecId: %s, CMD: %s]", md.PID, md.DockerExecID, md.CMD) +} + +func (md *AgentMetadata) ToMap() map[string]interface{} { + return map[string]interface{}{ + "PID": md.PID, + "DockerExecID": md.DockerExecID, + "CMD": md.CMD, + } +} + +func MapToAgentMetadata(md map[string]interface{}) AgentMetadata { + var execMD AgentMetadata + if md == nil { + return execMD + } + execMD.PID, _ = md["PID"].(string) + execMD.DockerExecID, _ = md["DockerExecID"].(string) + execMD.CMD, _ = md["CMD"].(string) + return execMD +} diff --git a/agent/engine/execcmd/mocks/execcmd_mocks.go b/agent/engine/execcmd/mocks/execcmd_mocks.go new file mode 100644 index 00000000000..68e32c01e56 --- /dev/null +++ b/agent/engine/execcmd/mocks/execcmd_mocks.go @@ -0,0 +1,97 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/engine/execcmd (interfaces: Manager) + +// Package mock_execcmd is a generated GoMock package. +package mock_execcmd + +import ( + context "context" + reflect "reflect" + + container "github.com/aws/amazon-ecs-agent/agent/api/container" + task "github.com/aws/amazon-ecs-agent/agent/api/task" + dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + execcmd "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + container0 "github.com/docker/docker/api/types/container" + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// InitializeContainer mocks base method +func (m *MockManager) InitializeContainer(arg0 string, arg1 *container.Container, arg2 *container0.HostConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InitializeContainer", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// InitializeContainer indicates an expected call of InitializeContainer +func (mr *MockManagerMockRecorder) InitializeContainer(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeContainer", reflect.TypeOf((*MockManager)(nil).InitializeContainer), arg0, arg1, arg2) +} + +// RestartAgentIfStopped mocks base method +func (m *MockManager) RestartAgentIfStopped(arg0 context.Context, arg1 dockerapi.DockerClient, arg2 *task.Task, arg3 *container.Container, arg4 string) (execcmd.RestartStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestartAgentIfStopped", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(execcmd.RestartStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestartAgentIfStopped indicates an expected call of RestartAgentIfStopped +func (mr *MockManagerMockRecorder) RestartAgentIfStopped(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestartAgentIfStopped", reflect.TypeOf((*MockManager)(nil).RestartAgentIfStopped), arg0, arg1, arg2, arg3, arg4) +} + +// StartAgent mocks base method +func (m *MockManager) StartAgent(arg0 context.Context, arg1 dockerapi.DockerClient, arg2 *task.Task, arg3 *container.Container, arg4 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartAgent", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// StartAgent indicates an expected call of StartAgent +func (mr *MockManagerMockRecorder) StartAgent(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartAgent", reflect.TypeOf((*MockManager)(nil).StartAgent), arg0, arg1, arg2, arg3, arg4) +} diff --git a/agent/engine/generate_mocks.go b/agent/engine/generate_mocks.go new file mode 100644 index 00000000000..30da41d3bbe --- /dev/null +++ b/agent/engine/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +//go:generate mockgen -destination=mocks/engine_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/engine TaskEngine,ImageManager diff --git a/agent/engine/image/types.go b/agent/engine/image/types.go new file mode 100644 index 00000000000..acdb92fc71b --- /dev/null +++ b/agent/engine/image/types.go @@ -0,0 +1,179 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package image + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/cihub/seelog" +) + +type Image struct { + ImageID string + Names []string + Size int64 +} + +func (image *Image) String() string { + return fmt.Sprintf("ImageID: %s; Names: %s", image.ImageID, strings.Join(image.Names, ", ")) +} + +// ImageState represents a docker image +// and its state information such as containers associated with it +type ImageState struct { + // Image is the image corresponding to this ImageState. + Image *Image + // Containers are the containers that use this image. + Containers []*apicontainer.Container `json:"-"` + // PulledAt is the time when this image was pulled. + PulledAt time.Time + // LastUsedAt is the time when this image was used last time. + LastUsedAt time.Time + // PullSucceeded defines whether this image has been pulled successfully before, + // this should be set to true when one of the pull image call succeeds. + PullSucceeded bool + lock sync.RWMutex +} + +// UpdateContainerReference updates container reference in image state +func (imageState *ImageState) UpdateContainerReference(container *apicontainer.Container) { + imageState.lock.Lock() + defer imageState.lock.Unlock() + seelog.Infof("Updating container reference %v in Image State - %v", container.Name, imageState.Image.ImageID) + imageState.Containers = append(imageState.Containers, container) +} + +// AddImageName adds image name to image state +func (imageState *ImageState) AddImageName(imageName string) { + imageState.lock.Lock() + defer imageState.lock.Unlock() + if !imageState.HasImageName(imageName) { + seelog.Infof("Adding image name- %v to Image state- %v", imageName, imageState.Image.ImageID) + imageState.Image.Names = append(imageState.Image.Names, imageName) + } +} + +// GetImageID returns id of image +func (imageState *ImageState) GetImageID() string { + imageState.lock.RLock() + defer imageState.lock.RUnlock() + return imageState.Image.ImageID +} + +// GetImageNamesCount returns number of image names +func (imageState *ImageState) GetImageNamesCount() int { + imageState.lock.RLock() + defer imageState.lock.RUnlock() + return len(imageState.Image.Names) +} + +// HasNoAssociatedContainers returns true if image has no associated containers, false otherwise +func (imageState *ImageState) HasNoAssociatedContainers() bool { + return len(imageState.Containers) == 0 +} + +// UpdateImageState updates image name and container reference in image state +func (imageState *ImageState) UpdateImageState(container *apicontainer.Container) { + imageState.AddImageName(container.Image) + imageState.UpdateContainerReference(container) +} + +// RemoveImageName removes image name from image state +func (imageState *ImageState) RemoveImageName(containerImageName string) bool { + imageState.lock.Lock() + defer imageState.lock.Unlock() + for i, imageName := range imageState.Image.Names { + if imageName == containerImageName { + imageState.Image.Names = append(imageState.Image.Names[:i], imageState.Image.Names[i+1:]...) + return true + } + } + return false +} + +// HasImageName returns true if image state contains the containerImageName +func (imageState *ImageState) HasImageName(containerImageName string) bool { + for _, imageName := range imageState.Image.Names { + if imageName == containerImageName { + return true + } + } + return false +} + +// RemoveContainerReference removes container reference from image state +func (imageState *ImageState) RemoveContainerReference(container *apicontainer.Container) error { + // Get the image state write lock for updating container reference + imageState.lock.Lock() + defer imageState.lock.Unlock() + for i := range imageState.Containers { + if imageState.Containers[i].Name == container.Name { + // Container reference found; hence remove it + seelog.Infof("Removing Container Reference: %v from Image State- %v", container.Name, imageState.Image.ImageID) + imageState.Containers = append(imageState.Containers[:i], imageState.Containers[i+1:]...) + // Update the last used time for the image + imageState.LastUsedAt = time.Now() + return nil + } + } + return fmt.Errorf("Container reference is not found in the image state container: %s", container.String()) +} + +// SetPullSucceeded sets the PullSucceeded of the imageState +func (imageState *ImageState) SetPullSucceeded(pullSucceeded bool) { + imageState.lock.Lock() + defer imageState.lock.Unlock() + + imageState.PullSucceeded = pullSucceeded +} + +// GetPullSucceeded safely returns the PullSucceeded of the imageState +func (imageState *ImageState) GetPullSucceeded() bool { + imageState.lock.RLock() + defer imageState.lock.RUnlock() + + return imageState.PullSucceeded +} + +// MarshalJSON marshals image state +func (imageState *ImageState) MarshalJSON() ([]byte, error) { + imageState.lock.Lock() + defer imageState.lock.Unlock() + + return json.Marshal(&struct { + Image *Image + PulledAt time.Time + LastUsedAt time.Time + PullSucceeded bool + }{ + Image: imageState.Image, + PulledAt: imageState.PulledAt, + LastUsedAt: imageState.LastUsedAt, + PullSucceeded: imageState.PullSucceeded, + }) +} + +func (imageState *ImageState) String() string { + image := "" + if imageState.Image != nil { + image = imageState.Image.String() + } + return fmt.Sprintf("Image: [%s] referenced by %d containers; PulledAt: %s; LastUsedAt: %s; PullSucceeded: %t", + image, len(imageState.Containers), imageState.PulledAt.String(), imageState.LastUsedAt.String(), imageState.PullSucceeded) +} diff --git a/agent/engine/interface.go b/agent/engine/interface.go new file mode 100644 index 00000000000..5377392ff36 --- /dev/null +++ b/agent/engine/interface.go @@ -0,0 +1,61 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "encoding/json" + + "context" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/statechange" +) + +// TaskEngine is an interface for the DockerTaskEngine +type TaskEngine interface { + Init(context.Context) error + MustInit(context.Context) + // Disable *must* only be called when this engine will no longer be used + // (e.g. right before exiting down the process). It will irreversibly stop + // this task engine from processing new tasks + Disable() + + // StateChangeEvents will provide information about tasks that have been previously + // executed. Specifically, it will provide information when they reach + // running or stopped, as well as providing portbinding and other metadata + StateChangeEvents() chan statechange.Event + // SetDataClient sets the data client that is used by the task engine. + SetDataClient(data.Client) + + // AddTask adds a new task to the task engine and manages its container's + // lifecycle. If it returns an error, the task was not added. + AddTask(*apitask.Task) + + // ListTasks lists all the tasks being managed by the TaskEngine. + ListTasks() ([]*apitask.Task, error) + + // GetTaskByArn gets a managed task, given a task arn. + GetTaskByArn(string) (*apitask.Task, bool) + + Version() (string, error) + + // LoadState loads the task engine state with data in db. + LoadState() error + // SaveState saves all the data in task engine state to db. + SaveState() error + + json.Marshaler + json.Unmarshaler +} diff --git a/agent/engine/mocks/engine_mocks.go b/agent/engine/mocks/engine_mocks.go new file mode 100644 index 00000000000..2de73cc6299 --- /dev/null +++ b/agent/engine/mocks/engine_mocks.go @@ -0,0 +1,334 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/engine (interfaces: TaskEngine,ImageManager) + +// Package mock_engine is a generated GoMock package. +package mock_engine + +import ( + context "context" + reflect "reflect" + + container "github.com/aws/amazon-ecs-agent/agent/api/container" + task "github.com/aws/amazon-ecs-agent/agent/api/task" + data "github.com/aws/amazon-ecs-agent/agent/data" + image "github.com/aws/amazon-ecs-agent/agent/engine/image" + statechange "github.com/aws/amazon-ecs-agent/agent/statechange" + gomock "github.com/golang/mock/gomock" +) + +// MockTaskEngine is a mock of TaskEngine interface +type MockTaskEngine struct { + ctrl *gomock.Controller + recorder *MockTaskEngineMockRecorder +} + +// MockTaskEngineMockRecorder is the mock recorder for MockTaskEngine +type MockTaskEngineMockRecorder struct { + mock *MockTaskEngine +} + +// NewMockTaskEngine creates a new mock instance +func NewMockTaskEngine(ctrl *gomock.Controller) *MockTaskEngine { + mock := &MockTaskEngine{ctrl: ctrl} + mock.recorder = &MockTaskEngineMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTaskEngine) EXPECT() *MockTaskEngineMockRecorder { + return m.recorder +} + +// AddTask mocks base method +func (m *MockTaskEngine) AddTask(arg0 *task.Task) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTask", arg0) +} + +// AddTask indicates an expected call of AddTask +func (mr *MockTaskEngineMockRecorder) AddTask(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTask", reflect.TypeOf((*MockTaskEngine)(nil).AddTask), arg0) +} + +// Disable mocks base method +func (m *MockTaskEngine) Disable() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Disable") +} + +// Disable indicates an expected call of Disable +func (mr *MockTaskEngineMockRecorder) Disable() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disable", reflect.TypeOf((*MockTaskEngine)(nil).Disable)) +} + +// GetTaskByArn mocks base method +func (m *MockTaskEngine) GetTaskByArn(arg0 string) (*task.Task, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskByArn", arg0) + ret0, _ := ret[0].(*task.Task) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetTaskByArn indicates an expected call of GetTaskByArn +func (mr *MockTaskEngineMockRecorder) GetTaskByArn(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskByArn", reflect.TypeOf((*MockTaskEngine)(nil).GetTaskByArn), arg0) +} + +// Init mocks base method +func (m *MockTaskEngine) Init(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Init", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Init indicates an expected call of Init +func (mr *MockTaskEngineMockRecorder) Init(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockTaskEngine)(nil).Init), arg0) +} + +// ListTasks mocks base method +func (m *MockTaskEngine) ListTasks() ([]*task.Task, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTasks") + ret0, _ := ret[0].([]*task.Task) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTasks indicates an expected call of ListTasks +func (mr *MockTaskEngineMockRecorder) ListTasks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTasks", reflect.TypeOf((*MockTaskEngine)(nil).ListTasks)) +} + +// LoadState mocks base method +func (m *MockTaskEngine) LoadState() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadState") + ret0, _ := ret[0].(error) + return ret0 +} + +// LoadState indicates an expected call of LoadState +func (mr *MockTaskEngineMockRecorder) LoadState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadState", reflect.TypeOf((*MockTaskEngine)(nil).LoadState)) +} + +// MarshalJSON mocks base method +func (m *MockTaskEngine) MarshalJSON() ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarshalJSON") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarshalJSON indicates an expected call of MarshalJSON +func (mr *MockTaskEngineMockRecorder) MarshalJSON() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarshalJSON", reflect.TypeOf((*MockTaskEngine)(nil).MarshalJSON)) +} + +// MustInit mocks base method +func (m *MockTaskEngine) MustInit(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "MustInit", arg0) +} + +// MustInit indicates an expected call of MustInit +func (mr *MockTaskEngineMockRecorder) MustInit(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MustInit", reflect.TypeOf((*MockTaskEngine)(nil).MustInit), arg0) +} + +// SaveState mocks base method +func (m *MockTaskEngine) SaveState() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveState") + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveState indicates an expected call of SaveState +func (mr *MockTaskEngineMockRecorder) SaveState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveState", reflect.TypeOf((*MockTaskEngine)(nil).SaveState)) +} + +// SetDataClient mocks base method +func (m *MockTaskEngine) SetDataClient(arg0 data.Client) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDataClient", arg0) +} + +// SetDataClient indicates an expected call of SetDataClient +func (mr *MockTaskEngineMockRecorder) SetDataClient(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDataClient", reflect.TypeOf((*MockTaskEngine)(nil).SetDataClient), arg0) +} + +// StateChangeEvents mocks base method +func (m *MockTaskEngine) StateChangeEvents() chan statechange.Event { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateChangeEvents") + ret0, _ := ret[0].(chan statechange.Event) + return ret0 +} + +// StateChangeEvents indicates an expected call of StateChangeEvents +func (mr *MockTaskEngineMockRecorder) StateChangeEvents() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangeEvents", reflect.TypeOf((*MockTaskEngine)(nil).StateChangeEvents)) +} + +// UnmarshalJSON mocks base method +func (m *MockTaskEngine) UnmarshalJSON(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnmarshalJSON", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnmarshalJSON indicates an expected call of UnmarshalJSON +func (mr *MockTaskEngineMockRecorder) UnmarshalJSON(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnmarshalJSON", reflect.TypeOf((*MockTaskEngine)(nil).UnmarshalJSON), arg0) +} + +// Version mocks base method +func (m *MockTaskEngine) Version() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version +func (mr *MockTaskEngineMockRecorder) Version() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockTaskEngine)(nil).Version)) +} + +// MockImageManager is a mock of ImageManager interface +type MockImageManager struct { + ctrl *gomock.Controller + recorder *MockImageManagerMockRecorder +} + +// MockImageManagerMockRecorder is the mock recorder for MockImageManager +type MockImageManagerMockRecorder struct { + mock *MockImageManager +} + +// NewMockImageManager creates a new mock instance +func NewMockImageManager(ctrl *gomock.Controller) *MockImageManager { + mock := &MockImageManager{ctrl: ctrl} + mock.recorder = &MockImageManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockImageManager) EXPECT() *MockImageManagerMockRecorder { + return m.recorder +} + +// AddAllImageStates mocks base method +func (m *MockImageManager) AddAllImageStates(arg0 []*image.ImageState) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddAllImageStates", arg0) +} + +// AddAllImageStates indicates an expected call of AddAllImageStates +func (mr *MockImageManagerMockRecorder) AddAllImageStates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAllImageStates", reflect.TypeOf((*MockImageManager)(nil).AddAllImageStates), arg0) +} + +// GetImageStateFromImageName mocks base method +func (m *MockImageManager) GetImageStateFromImageName(arg0 string) (*image.ImageState, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetImageStateFromImageName", arg0) + ret0, _ := ret[0].(*image.ImageState) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetImageStateFromImageName indicates an expected call of GetImageStateFromImageName +func (mr *MockImageManagerMockRecorder) GetImageStateFromImageName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageStateFromImageName", reflect.TypeOf((*MockImageManager)(nil).GetImageStateFromImageName), arg0) +} + +// RecordContainerReference mocks base method +func (m *MockImageManager) RecordContainerReference(arg0 *container.Container) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecordContainerReference", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecordContainerReference indicates an expected call of RecordContainerReference +func (mr *MockImageManagerMockRecorder) RecordContainerReference(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordContainerReference", reflect.TypeOf((*MockImageManager)(nil).RecordContainerReference), arg0) +} + +// RemoveContainerReferenceFromImageState mocks base method +func (m *MockImageManager) RemoveContainerReferenceFromImageState(arg0 *container.Container) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveContainerReferenceFromImageState", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveContainerReferenceFromImageState indicates an expected call of RemoveContainerReferenceFromImageState +func (mr *MockImageManagerMockRecorder) RemoveContainerReferenceFromImageState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveContainerReferenceFromImageState", reflect.TypeOf((*MockImageManager)(nil).RemoveContainerReferenceFromImageState), arg0) +} + +// SetDataClient mocks base method +func (m *MockImageManager) SetDataClient(arg0 data.Client) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDataClient", arg0) +} + +// SetDataClient indicates an expected call of SetDataClient +func (mr *MockImageManagerMockRecorder) SetDataClient(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDataClient", reflect.TypeOf((*MockImageManager)(nil).SetDataClient), arg0) +} + +// StartImageCleanupProcess mocks base method +func (m *MockImageManager) StartImageCleanupProcess(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StartImageCleanupProcess", arg0) +} + +// StartImageCleanupProcess indicates an expected call of StartImageCleanupProcess +func (mr *MockImageManagerMockRecorder) StartImageCleanupProcess(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartImageCleanupProcess", reflect.TypeOf((*MockImageManager)(nil).StartImageCleanupProcess), arg0) +} diff --git a/agent/engine/ordering_integ_test.go b/agent/engine/ordering_integ_test.go new file mode 100644 index 00000000000..1e6cae6a43c --- /dev/null +++ b/agent/engine/ordering_integ_test.go @@ -0,0 +1,512 @@ +// +build integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +const orderingTimeout = 90 * time.Second + +// TestDependencyHealthCheck is a happy-case integration test that considers a workflow with a HEALTHY dependency +// condition. We ensure that the task can be both started and stopped. +func TestDependencyHealthCheck(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testDependencyHealth" + testTask := createTestTask(taskArn) + + parent := createTestContainerWithImageAndName(baseImageForOS, "parent") + dependency := createTestContainerWithImageAndName(baseImageForOS, "dependency") + + parent.EntryPoint = &entryPointForOS + parent.Command = []string{"exit 0"} + parent.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "dependency", + Condition: "HEALTHY", + }, + } + parent.Essential = true + + dependency.EntryPoint = &entryPointForOS + dependency.Command = []string{"sleep 90"} + dependency.HealthCheckType = apicontainer.DockerHealthCheckType + dependency.DockerConfig.Config = aws.String(alwaysHealthyHealthCheckConfig) + + testTask.Containers = []*apicontainer.Container{ + parent, + dependency, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + // Both containers should start + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + + // Task should stop all at once + verifyContainerStoppedStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, orderingTimeout) +} + +// TestDependencyComplete validates that the COMPLETE dependency condition will resolve when the child container exits +// with exit code 1. It ensures that the child is started and stopped before the parent starts. +func TestDependencyComplete(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testDependencyComplete" + testTask := createTestTask(taskArn) + + parent := createTestContainerWithImageAndName(baseImageForOS, "parent") + dependency := createTestContainerWithImageAndName(baseImageForOS, "dependency") + + parent.EntryPoint = &entryPointForOS + parent.Command = []string{"sleep 5"} + parent.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "dependency", + Condition: "COMPLETE", + }, + } + + dependency.EntryPoint = &entryPointForOS + dependency.Command = []string{"sleep 10 && exit 1"} + dependency.Essential = false + + testTask.Containers = []*apicontainer.Container{ + parent, + dependency, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + // First container should run to completion and then exit + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + + // Second container starts after the first stops, task becomes running + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + + // Last container stops and then the task stops + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, orderingTimeout) +} + +// TestDependencySuccess validates that the SUCCESS dependency condition will resolve when the child container exits +// with exit code 0. It ensures that the child is started and stopped before the parent starts. +func TestDependencySuccess(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testDependencySuccess" + testTask := createTestTask(taskArn) + + parent := createTestContainerWithImageAndName(baseImageForOS, "parent") + dependency := createTestContainerWithImageAndName(baseImageForOS, "dependency") + + parent.EntryPoint = &entryPointForOS + parent.Command = []string{"exit 0"} + parent.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "dependency", + Condition: "SUCCESS", + }, + } + + dependency.EntryPoint = &entryPointForOS + dependency.Command = []string{"sleep 10"} + dependency.Essential = false + + testTask.Containers = []*apicontainer.Container{ + parent, + dependency, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + // First container should run to completion + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + + // Second container starts after the first stops, task becomes running + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + + // Last container stops and then the task stops + verifyContainerStoppedStateChange(t, taskEngine) + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, orderingTimeout) +} + +// TestDependencySuccess validates that the SUCCESS dependency condition will fail when the child exits 1. This is a +// contrast to how COMPLETE behaves. Instead of starting the parent, the task should simply exit. +func TestDependencySuccessErrored(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testDependencySuccessErrored" + testTask := createTestTask(taskArn) + + parent := createTestContainerWithImageAndName(baseImageForOS, "parent") + dependency := createTestContainerWithImageAndName(baseImageForOS, "dependency") + + parent.EntryPoint = &entryPointForOS + parent.Command = []string{"exit 0"} + parent.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "dependency", + Condition: "SUCCESS", + }, + } + + dependency.EntryPoint = &entryPointForOS + dependency.Command = []string{"sleep 10 && exit 1"} + dependency.Essential = false + + testTask.Containers = []*apicontainer.Container{ + parent, + dependency, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + // First container should run to completion + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + + // task should transition to stopped + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, orderingTimeout) +} + +// TestDependencySuccessTimeout +func TestDependencySuccessTimeout(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testDependencySuccessTimeout" + testTask := createTestTask(taskArn) + + parent := createTestContainerWithImageAndName(baseImageForOS, "parent") + dependency := createTestContainerWithImageAndName(baseImageForOS, "dependency") + + parent.EntryPoint = &entryPointForOS + parent.Command = []string{"exit 0"} + parent.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "dependency", + Condition: "SUCCESS", + }, + } + + dependency.EntryPoint = &entryPointForOS + dependency.Command = []string{"sleep 15"} + dependency.Essential = false + + // set the timeout to be shorter than the amount of time it takes to stop + dependency.StartTimeout = 8 + + testTask.Containers = []*apicontainer.Container{ + parent, + dependency, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + // First container should run to completion + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + + // task should transition to stopped + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, orderingTimeout) +} + +// TestDependencyHealthyTimeout +func TestDependencyHealthyTimeout(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testDependencyHealthyTimeout" + testTask := createTestTask(taskArn) + + parent := createTestContainerWithImageAndName(baseImageForOS, "parent") + dependency := createTestContainerWithImageAndName(baseImageForOS, "dependency") + + parent.EntryPoint = &entryPointForOS + parent.Command = []string{"exit 0"} + parent.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "dependency", + Condition: "HEALTHY", + }, + } + + dependency.EntryPoint = &entryPointForOS + dependency.Command = []string{"sleep 30"} + dependency.HealthCheckType = apicontainer.DockerHealthCheckType + + // enter a healthcheck that will fail + dependency.DockerConfig.Config = aws.String(`{ + "HealthCheck":{ + "Test":["CMD-SHELL", "exit 1"] + } + }`) + + // set the timeout. Duration doesn't matter since healthcheck will always be unhealthy. + dependency.StartTimeout = 8 + + testTask.Containers = []*apicontainer.Container{ + parent, + dependency, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + // First container should run to completion + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + + // task should transition to stopped + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, orderingTimeout) +} + +// TestShutdownOrder +func TestShutdownOrder(t *testing.T) { + shutdownOrderingTimeout := 120 * time.Second + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testShutdownOrder" + testTask := createTestTask(taskArn) + + parent := createTestContainerWithImageAndName(baseImageForOS, "parent") + A := createTestContainerWithImageAndName(baseImageForOS, "A") + B := createTestContainerWithImageAndName(baseImageForOS, "B") + C := createTestContainerWithImageAndName(baseImageForOS, "C") + + parent.EntryPoint = &entryPointForOS + parent.Command = []string{"echo hi"} + parent.Essential = true + parent.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "A", + Condition: "START", + }, + } + + A.EntryPoint = &entryPointForOS + A.Command = []string{"sleep 100"} + A.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "B", + Condition: "START", + }, + } + + B.EntryPoint = &entryPointForOS + B.Command = []string{"sleep 100"} + B.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "C", + Condition: "START", + }, + } + + C.EntryPoint = &entryPointForOS + C.Command = []string{"sleep 100"} + + testTask.Containers = []*apicontainer.Container{ + parent, + A, + B, + C, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + // Everything should first progress to running + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerRunningStateChange(t, taskEngine) + verifyTaskIsRunning(stateChangeEvents, testTask) + + // The shutdown order will now proceed. Parent will exit first since it has an explicit exit command. + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, status.ContainerStopped) + assert.Equal(t, event.(api.ContainerStateChange).ContainerName, "parent") + + // The dependency chain is A -> B -> C. We expect the inverse order to be followed for shutdown: + // A shuts down, then B, then C + expectedC := <-stateChangeEvents + assert.Equal(t, expectedC.(api.ContainerStateChange).Status, status.ContainerStopped) + assert.Equal(t, expectedC.(api.ContainerStateChange).ContainerName, "A") + + expectedB := <-stateChangeEvents + assert.Equal(t, expectedB.(api.ContainerStateChange).Status, status.ContainerStopped) + assert.Equal(t, expectedB.(api.ContainerStateChange).ContainerName, "B") + + expectedA := <-stateChangeEvents + assert.Equal(t, expectedA.(api.ContainerStateChange).Status, status.ContainerStopped) + assert.Equal(t, expectedA.(api.ContainerStateChange).ContainerName, "C") + + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, shutdownOrderingTimeout) +} + +func TestMultipleContainerDependency(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "testMultipleContainerDependency" + testTask := createTestTask(taskArn) + + exit := createTestContainerWithImageAndName(baseImageForOS, "exit") + A := createTestContainerWithImageAndName(baseImageForOS, "A") + B := createTestContainerWithImageAndName(baseImageForOS, "B") + + exit.EntryPoint = &entryPointForOS + exit.Command = []string{"exit 1"} + exit.Essential = false + + A.EntryPoint = &entryPointForOS + A.Command = []string{"sleep 10"} + A.Essential = true + A.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "exit", + Condition: "SUCCESS", + }, + } + + B.EntryPoint = &entryPointForOS + B.Command = []string{"sleep 10"} + B.Essential = true + B.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "A", + Condition: "START", + }, + { + ContainerName: "exit", + Condition: "SUCCESS", + }, + } + + testTask.Containers = []*apicontainer.Container{ + exit, + A, + B, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + // Only exit should first progress to running + verifyContainerRunningStateChange(t, taskEngine) + + // Exit container should stop with exit code 1 + event := <-stateChangeEvents + assert.Equal(t, event.(api.ContainerStateChange).Status, status.ContainerStopped) + assert.Equal(t, event.(api.ContainerStateChange).ContainerName, "exit") + + // The task should be now stopped as dependencies of A and B are not resolved + verifyTaskIsStopped(stateChangeEvents, testTask) + close(finished) + }() + + waitFinished(t, finished, orderingTimeout) +} + +func waitFinished(t *testing.T, finished <-chan interface{}, duration time.Duration) { + select { + case <-finished: + t.Log("Finished successfully.") + return + case <-time.After(duration): + t.Error("timed out after: ", duration) + t.FailNow() + } +} diff --git a/agent/engine/ordering_integ_unix_test.go b/agent/engine/ordering_integ_unix_test.go new file mode 100644 index 00000000000..c92b94673bf --- /dev/null +++ b/agent/engine/ordering_integ_unix_test.go @@ -0,0 +1,98 @@ +// +build integration,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/stretchr/testify/assert" +) + +const ( + baseImageForOS = testRegistryHost + "/" + "busybox" +) + +var ( + entryPointForOS = []string{"sh", "-c"} +) + +func TestGranularStopTimeout(t *testing.T) { + taskEngine, done, _ := setupWithDefaultConfig(t) + defer done() + + stateChangeEvents := taskEngine.StateChangeEvents() + + taskArn := "TestGranularStopTimeout" + testTask := createTestTask(taskArn) + + parent := createTestContainerWithImageAndName(baseImageForOS, "parent") + dependency1 := createTestContainerWithImageAndName(baseImageForOS, "dependency1") + dependency2 := createTestContainerWithImageAndName(baseImageForOS, "dependency2") + + parent.EntryPoint = &entryPointForOS + parent.Command = []string{"sleep 30"} + parent.Essential = true + parent.DependsOnUnsafe = []apicontainer.DependsOn{ + { + ContainerName: "dependency1", + Condition: "START", + }, + { + ContainerName: "dependency2", + Condition: "START", + }, + } + + dependency1.EntryPoint = &entryPointForOS + dependency1.Command = []string{"trap 'echo caught' SIGTERM; sleep 60"} + dependency1.StopTimeout = 5 + + dependency2.EntryPoint = &entryPointForOS + dependency2.Command = []string{"trap 'echo caught' SIGTERM; sleep 60"} + dependency2.StopTimeout = 50 + + testTask.Containers = []*apicontainer.Container{ + dependency1, + dependency2, + parent, + } + + go taskEngine.AddTask(testTask) + + finished := make(chan interface{}) + go func() { + + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerRunningStateChange(t, taskEngine) + verifyContainerRunningStateChange(t, taskEngine) + + verifyTaskIsRunning(stateChangeEvents, testTask) + + verifyContainerStoppedStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + verifyContainerStoppedStateChange(t, taskEngine) + + verifyTaskIsStopped(stateChangeEvents, testTask) + + assert.Equal(t, 137, *testTask.Containers[0].GetKnownExitCode(), "Dependency1 should exit with code 137") + assert.Equal(t, 0, *testTask.Containers[1].GetKnownExitCode(), "Dependency2 should exit with code 0") + + close(finished) + }() + + waitFinished(t, finished, orderingTimeout) +} diff --git a/agent/engine/ordering_integ_windows_test.go b/agent/engine/ordering_integ_windows_test.go new file mode 100644 index 00000000000..ee23a07dbdf --- /dev/null +++ b/agent/engine/ordering_integ_windows_test.go @@ -0,0 +1,24 @@ +// +build windows,integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +const ( + baseImageForOS = "amazon-ecs-ftest-windows-base:make" +) + +var ( + entryPointForOS = []string{"powershell"} +) diff --git a/agent/engine/task_manager.go b/agent/engine/task_manager.go new file mode 100644 index 00000000000..1aa92481f83 --- /dev/null +++ b/agent/engine/task_manager.go @@ -0,0 +1,1531 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "fmt" + "io" + "strconv" + "strings" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/logger" + "github.com/aws/amazon-ecs-agent/agent/logger/field" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" +) + +const ( + // waitForPullCredentialsTimeout is the timeout agent trying to wait for pull + // credentials from acs, after the timeout it will check the credentials manager + // and start processing the task or start another round of waiting + waitForPullCredentialsTimeout = 1 * time.Minute + defaultTaskSteadyStatePollInterval = 5 * time.Minute + defaultTaskSteadyStatePollIntervalJitter = 30 * time.Second + transitionPollTime = 5 * time.Second + stoppedSentWaitInterval = 30 * time.Second + maxStoppedWaitTimes = 72 * time.Hour / stoppedSentWaitInterval + taskUnableToTransitionToStoppedReason = "TaskStateError: Agent could not progress task's state to stopped" +) + +var ( + _stoppedSentWaitInterval = stoppedSentWaitInterval + _maxStoppedWaitTimes = int(maxStoppedWaitTimes) +) + +type dockerContainerChange struct { + container *apicontainer.Container + event dockerapi.DockerContainerChangeEvent +} + +// resourceStateChange represents the required status change after resource transition +type resourceStateChange struct { + resource taskresource.TaskResource + nextState resourcestatus.ResourceStatus + err error +} + +type acsTransition struct { + seqnum int64 + desiredStatus apitaskstatus.TaskStatus +} + +// containerTransition defines the struct for a container to transition +type containerTransition struct { + nextState apicontainerstatus.ContainerStatus + actionRequired bool + blockedOn *apicontainer.DependsOn + reason error +} + +// resourceTransition defines the struct for a resource to transition. +type resourceTransition struct { + // nextState represents the next known status that the resource can move to + nextState resourcestatus.ResourceStatus + // status is the string value of nextState + status string + // actionRequired indicates if the transition function needs to be called for + // the transition to be complete + actionRequired bool + // reason represents the error blocks transition + reason error +} + +// managedTask is a type that is meant to manage the lifecycle of a task. +// There should be only one managed task construct for a given task arn and the +// managed task should be the only thing to modify the task's known or desired statuses. +// +// The managedTask should run serially in a single goroutine in which it reads +// messages from the two given channels and acts upon them. +// This design is chosen to allow a safe level if isolation and avoid any race +// conditions around the state of a task. +// The data sources (e.g. docker, acs) that write to the task's channels may +// block and it is expected that the managedTask listen to those channels +// almost constantly. +// The general operation should be: +// 1) Listen to the channels +// 2) On an event, update the status of the task and containers (known/desired) +// 3) Figure out if any action needs to be done. If so, do it +// 4) GOTO 1 +// Item '3' obviously might lead to some duration where you are not listening +// to the channels. However, this can be solved by kicking off '3' as a +// goroutine and then only communicating the result back via the channels +// (obviously once you kick off a goroutine you give up the right to write the +// task's statuses yourself) +type managedTask struct { + *apitask.Task + ctx context.Context + cancel context.CancelFunc + + engine *DockerTaskEngine + cfg *config.Config + credentialsManager credentials.Manager + cniClient ecscni.CNIClient + taskStopWG *utilsync.SequentialWaitGroup + + acsMessages chan acsTransition + dockerMessages chan dockerContainerChange + resourceStateChangeEvent chan resourceStateChange + stateChangeEvents chan statechange.Event + containerChangeEventStream *eventstream.EventStream + + // unexpectedStart is a once that controls stopping a container that + // unexpectedly started one time. + // This exists because a 'start' after a container is meant to be stopped is + // possible under some circumstances (e.g. a timeout). However, if it + // continues to 'start' when we aren't asking it to, let it go through in + // case it's a user trying to debug it or in case we're fighting with another + // thing managing the container. + unexpectedStart sync.Once + + _time ttime.Time + _timeOnce sync.Once + + // steadyStatePollInterval is the duration that a managed task waits + // once the task gets into steady state before polling the state of all of + // the task's containers to re-evaluate if the task is still in steady state + // This is set to defaultTaskSteadyStatePollInterval in production code. + // This can be used by tests that are looking to ensure that the steady state + // verification logic gets executed to set it to a low interval + steadyStatePollInterval time.Duration + steadyStatePollIntervalJitter time.Duration +} + +// newManagedTask is a method on DockerTaskEngine to create a new managedTask. +// This method must only be called when the engine.processTasks write lock is +// already held. +func (engine *DockerTaskEngine) newManagedTask(task *apitask.Task) *managedTask { + ctx, cancel := context.WithCancel(engine.ctx) + t := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: task, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + resourceStateChangeEvent: make(chan resourceStateChange), + engine: engine, + cfg: engine.cfg, + stateChangeEvents: engine.stateChangeEvents, + containerChangeEventStream: engine.containerChangeEventStream, + credentialsManager: engine.credentialsManager, + cniClient: engine.cniClient, + taskStopWG: engine.taskStopGroup, + steadyStatePollInterval: engine.taskSteadyStatePollInterval, + steadyStatePollIntervalJitter: engine.taskSteadyStatePollIntervalJitter, + } + engine.managedTasks[task.Arn] = t + return t +} + +// overseeTask is the main goroutine of the managedTask. It runs an infinite +// loop of receiving messages and attempting to take action based on those +// messages. +func (mtask *managedTask) overseeTask() { + // Do a single updatestatus at the beginning to create the container + // `desiredstatus`es which are a construct of the engine used only here, + // not present on the backend + mtask.UpdateStatus() + // If this was a 'state restore', send all unsent statuses + mtask.emitCurrentStatus() + + // Wait for host resources required by this task to become available + mtask.waitForHostResources() + + // Main infinite loop. This is where we receive messages and dispatch work. + for { + if mtask.shouldExit() { + return + } + + // If it's steadyState, just spin until we need to do work + for mtask.steadyState() { + mtask.waitSteady() + } + + if mtask.shouldExit() { + return + } + + if !mtask.GetKnownStatus().Terminal() { + // If we aren't terminal and we aren't steady state, we should be + // able to move some containers along. + logger.Info("Task not steady state or terminal; progressing it", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + + mtask.progressTask() + } + + if mtask.GetKnownStatus().Terminal() { + break + } + } + // We only break out of the above if this task is known to be stopped. Do + // onetime cleanup here, including removing the task after a timeout + logger.Info("Managed task has reached stopped; waiting for container cleanup", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + mtask.engine.checkTearDownPauseContainer(mtask.Task) + mtask.cleanupCredentials() + if mtask.StopSequenceNumber != 0 { + logger.Debug("Marking done for this sequence", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Sequence: mtask.StopSequenceNumber, + }) + mtask.taskStopWG.Done(mtask.StopSequenceNumber) + } + // TODO: make this idempotent on agent restart + go mtask.releaseIPInIPAM() + mtask.cleanupTask(mtask.cfg.TaskCleanupWaitDuration) +} + +// shouldExit checks if the task manager should exit, as the agent is exiting. +func (mtask *managedTask) shouldExit() bool { + select { + case <-mtask.ctx.Done(): + return true + default: + return false + } +} + +// emitCurrentStatus emits a container event for every container and a task +// event for the task +func (mtask *managedTask) emitCurrentStatus() { + for _, container := range mtask.Containers { + mtask.emitContainerEvent(mtask.Task, container, "") + } + mtask.emitTaskEvent(mtask.Task, "") +} + +// waitForHostResources waits for host resources to become available to start +// the task. This involves waiting for previous stops to complete so the +// resources become free. +func (mtask *managedTask) waitForHostResources() { + if mtask.StartSequenceNumber == 0 { + // This is the first transition on this host. No need to wait + return + } + if mtask.GetDesiredStatus().Terminal() { + // Task's desired status is STOPPED. No need to wait in this case either + return + } + + logger.Info("Waiting for any previous stops to complete", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Sequence: mtask.StartSequenceNumber, + }) + + othersStoppedCtx, cancel := context.WithCancel(mtask.ctx) + defer cancel() + + go func() { + mtask.taskStopWG.Wait(mtask.StartSequenceNumber) + cancel() + }() + + for !mtask.waitEvent(othersStoppedCtx.Done()) { + if mtask.GetDesiredStatus().Terminal() { + // If we end up here, that means we received a start then stop for this + // task before a task that was expected to stop before it could + // actually stop + break + } + } + logger.Info("Wait over; ready to move towards desired status", logger.Fields{ + field.TaskARN: mtask.Arn, + field.DesiredStatus: mtask.GetDesiredStatus().String(), + }) +} + +// waitSteady waits for a task to leave steady-state by waiting for a new +// event, or a timeout. +func (mtask *managedTask) waitSteady() { + logger.Info("Managed task at steady state", logger.Fields{ + field.TaskARN: mtask.Arn, + field.KnownStatus: mtask.GetKnownStatus().String(), + }) + + timeoutCtx, cancel := context.WithTimeout(mtask.ctx, retry.AddJitter(mtask.steadyStatePollInterval, mtask.steadyStatePollIntervalJitter)) + defer cancel() + timedOut := mtask.waitEvent(timeoutCtx.Done()) + if mtask.shouldExit() { + return + } + + if timedOut { + logger.Info("Checking to verify it's still at steady state", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + go mtask.engine.checkTaskState(mtask.Task) + } +} + +// steadyState returns if the task is in a steady state. Steady state is when task's desired +// and known status are both RUNNING +func (mtask *managedTask) steadyState() bool { + select { + case <-mtask.ctx.Done(): + logger.Info("Task manager exiting", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + return false + default: + taskKnownStatus := mtask.GetKnownStatus() + return taskKnownStatus == apitaskstatus.TaskRunning && taskKnownStatus >= mtask.GetDesiredStatus() + } +} + +// cleanupCredentials removes credentials for a stopped task +func (mtask *managedTask) cleanupCredentials() { + taskCredentialsID := mtask.GetCredentialsID() + if taskCredentialsID != "" { + mtask.credentialsManager.RemoveCredentials(taskCredentialsID) + } +} + +// waitEvent waits for any event to occur. If an event occurs, the appropriate +// handler is called. Generally the stopWaiting arg is the context's Done +// channel. When the Done channel is signalled by the context, waitEvent will +// return true. +func (mtask *managedTask) waitEvent(stopWaiting <-chan struct{}) bool { + logger.Info("Waiting for task event", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + select { + case acsTransition := <-mtask.acsMessages: + logger.Info("Managed task got acs event", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + mtask.handleDesiredStatusChange(acsTransition.desiredStatus, acsTransition.seqnum) + return false + case dockerChange := <-mtask.dockerMessages: + mtask.handleContainerChange(dockerChange) + return false + case resChange := <-mtask.resourceStateChangeEvent: + res := resChange.resource + logger.Info("Managed task got resource", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: res.GetName(), + field.Status: res.StatusString(resChange.nextState), + }) + mtask.handleResourceStateChange(resChange) + return false + case <-stopWaiting: + return true + } +} + +// handleDesiredStatusChange updates the desired status on the task. Updates +// only occur if the new desired status is "compatible" (farther along than the +// current desired state); "redundant" (less-than or equal desired states) are +// ignored and dropped. +func (mtask *managedTask) handleDesiredStatusChange(desiredStatus apitaskstatus.TaskStatus, seqnum int64) { + // Handle acs message changes this task's desired status to whatever + // acs says it should be if it is compatible + logger.Info("New acs transition", logger.Fields{ + field.TaskARN: mtask.Arn, + field.DesiredStatus: desiredStatus.String(), + field.Sequence: seqnum, + "StopNumber": mtask.StopSequenceNumber, + }) + if desiredStatus <= mtask.GetDesiredStatus() { + logger.Info("Redundant task transition; ignoring", logger.Fields{ + field.TaskARN: mtask.Arn, + field.DesiredStatus: desiredStatus.String(), + field.Sequence: seqnum, + "StopNumber": mtask.StopSequenceNumber, + }) + return + } + if desiredStatus == apitaskstatus.TaskStopped && seqnum != 0 && mtask.GetStopSequenceNumber() == 0 { + logger.Info("Managed task moving to stopped, adding to stopgroup with sequence number", + logger.Fields{ + field.TaskARN: mtask.Arn, + field.Sequence: seqnum, + }) + mtask.SetStopSequenceNumber(seqnum) + mtask.taskStopWG.Add(seqnum, 1) + } + mtask.SetDesiredStatus(desiredStatus) + mtask.UpdateDesiredStatus() + mtask.engine.saveTaskData(mtask.Task) +} + +// handleContainerChange updates a container's known status. If the message +// contains any interesting information (like exit codes or ports), they are +// propagated. +func (mtask *managedTask) handleContainerChange(containerChange dockerContainerChange) { + // locate the container + container := containerChange.container + runtimeID := container.GetRuntimeID() + event := containerChange.event + logger.Info("Handling container change event", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: runtimeID, + field.Status: event.Status.String(), + }) + found := mtask.isContainerFound(container) + if !found { + logger.Critical("State error; invoked with another task's container!", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: runtimeID, + field.Status: event.Status.String(), + }) + return + } + + // If this is a backwards transition stopped->running, the first time set it + // to be known running so it will be stopped. Subsequently ignore these backward transitions + containerKnownStatus := container.GetKnownStatus() + mtask.handleStoppedToRunningContainerTransition(event.Status, container) + if event.Status <= containerKnownStatus { + logger.Info("Container change is redundant", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: runtimeID, + field.Status: event.Status.String(), + field.KnownStatus: containerKnownStatus.String(), + }) + + // Only update container metadata when status stays RUNNING + if event.Status == containerKnownStatus && event.Status == apicontainerstatus.ContainerRunning { + updateContainerMetadata(&event.DockerContainerMetadata, container, mtask.Task) + } + return + } + + // Container has progressed its status if we reach here. Make sure to save it to database. + defer mtask.engine.saveContainerData(container) + + // Update the container to be known + currentKnownStatus := containerKnownStatus + container.SetKnownStatus(event.Status) + updateContainerMetadata(&event.DockerContainerMetadata, container, mtask.Task) + + if event.Error != nil { + proceedAnyway := mtask.handleEventError(containerChange, currentKnownStatus) + if !proceedAnyway { + return + } + } + + if execcmd.IsExecEnabledContainer(container) && container.GetKnownStatus() == apicontainerstatus.ContainerStopped { + // if this is an execute-command-enabled container STOPPED event, we should emit a corresponding managedAgent event + mtask.handleManagedAgentStoppedTransition(container, execcmd.ExecuteCommandAgentName) + } + + mtask.RecordExecutionStoppedAt(container) + logger.Debug("Sending container change event to tcs", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: runtimeID, + field.Status: event.Status.String(), + }) + err := mtask.containerChangeEventStream.WriteToEventStream(event) + if err != nil { + logger.Warn("Failed to write container change event to tcs event stream", + logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: runtimeID, + field.Error: err, + }) + } + + mtask.emitContainerEvent(mtask.Task, container, "") + if mtask.UpdateStatus() { + logger.Info("Container change also resulted in task change", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: runtimeID, + field.DesiredStatus: mtask.GetDesiredStatus().String(), + }) + // If knownStatus changed, let it be known + var taskStateChangeReason string + if mtask.GetKnownStatus().Terminal() { + taskStateChangeReason = mtask.Task.GetTerminalReason() + } + mtask.emitTaskEvent(mtask.Task, taskStateChangeReason) + // Save the new task status to database. + mtask.engine.saveTaskData(mtask.Task) + } +} + +// handleResourceStateChange attempts to update resource's known status depending on +// the current status and errors during transition +func (mtask *managedTask) handleResourceStateChange(resChange resourceStateChange) { + // locate the resource + res := resChange.resource + if !mtask.isResourceFound(res) { + logger.Critical("State error; invoked with another task's resource", + logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: res.GetName(), + }) + return + } + + status := resChange.nextState + err := resChange.err + currentKnownStatus := res.GetKnownStatus() + + if status <= currentKnownStatus { + logger.Info("Redundant resource state change", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: res.GetName(), + field.Status: res.StatusString(status), + field.KnownStatus: res.StatusString(currentKnownStatus), + }) + return + } + + // There is a resource state change. Resource is stored as part of the task, so make sure to save the task + // at the end. + defer mtask.engine.saveTaskData(mtask.Task) + + // Set known status regardless of error so the applied status can be cleared. If there is error, + // the known status might be set again below (but that won't affect the applied status anymore). + // This follows how container state change is handled. + res.SetKnownStatus(status) + if err == nil { + return + } + + if status == res.SteadyState() { // Failed to create resource. + logger.Error("Managed task [%s]: failed to create task resource [%s]: %v", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: res.GetName(), + field.Error: err, + }) + res.SetKnownStatus(currentKnownStatus) // Set status back to None. + + logger.Info("Marking task desired status to STOPPED", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + mtask.SetDesiredStatus(apitaskstatus.TaskStopped) + mtask.Task.SetTerminalReason(res.GetTerminalReason()) + } +} + +func (mtask *managedTask) emitResourceChange(change resourceStateChange) { + select { + case <-mtask.ctx.Done(): + logger.Info("Unable to emit resource state change due to exit", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + case mtask.resourceStateChangeEvent <- change: + } +} + +func getContainerEventLogFields(c api.ContainerStateChange) logger.Fields { + f := logger.Fields{ + "ContainerName": c.ContainerName, + } + if c.ExitCode != nil { + f["Exit"] = strconv.Itoa(*c.ExitCode) + } + if c.Reason != "" { + f["Reason"] = c.Reason + } + if len(c.PortBindings) != 0 { + f["Ports"] = fmt.Sprintf("%v", c.PortBindings) + } + if c.Container != nil { + f["KnownSent"] = c.Container.GetSentStatus().String() + } + return f +} + +func (mtask *managedTask) emitTaskEvent(task *apitask.Task, reason string) { + event, err := api.NewTaskStateChangeEvent(task, reason) + if err != nil { + logger.Debug("Managed task [%s]: skipping emitting event for task [%s]: %v", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Reason: reason, + field.Error: err, + }) + return + } + logger.Info("Sending task change event", logger.Fields{ + field.TaskARN: task.Arn, + field.Status: event.Status.String(), + field.SentStatus: task.GetSentStatus().String(), + field.Event: event.String(), + }) + select { + case <-mtask.ctx.Done(): + logger.Info("Unable to send task change event due to exit", logger.Fields{ + field.TaskARN: task.Arn, + field.Status: event.Status.String(), + field.SentStatus: task.GetSentStatus().String(), + field.Event: event.String(), + }) + case mtask.stateChangeEvents <- event: + } + logger.Info("Sent task change event", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Event: event.String(), + }) +} + +// emitManagedAgentEvent passes a special task event up through the taskEvents channel if there are managed +// agent changes in the container passed as parameter. +// It will omit events the backend would not process +func (mtask *managedTask) emitManagedAgentEvent(task *apitask.Task, cont *apicontainer.Container, managedAgentName string, reason string) { + event, err := api.NewManagedAgentChangeEvent(task, cont, managedAgentName, reason) + if err != nil { + logger.Error("Skipping emitting ManagedAgent event for task", logger.Fields{ + field.TaskARN: task.Arn, + field.Reason: reason, + field.Error: err, + }) + return + } + logger.Info("Sending ManagedAgent event", logger.Fields{ + field.TaskARN: task.Arn, + field.Event: event.String(), + }) + select { + case <-mtask.ctx.Done(): + logger.Info("Unable to send managed agent event due to exit", logger.Fields{ + field.TaskARN: task.Arn, + field.Event: event.String(), + }) + case mtask.stateChangeEvents <- event: + } + logger.Info("Sent managed agent event [%s]", logger.Fields{ + field.TaskARN: task.Arn, + field.Event: event.String(), + }) +} + +// emitContainerEvent passes a given event up through the containerEvents channel if necessary. +// It will omit events the backend would not process and will perform best-effort deduplication of events. +func (mtask *managedTask) emitContainerEvent(task *apitask.Task, cont *apicontainer.Container, reason string) { + event, err := api.NewContainerStateChangeEvent(task, cont, reason) + if err != nil { + logger.Debug("Skipping emitting event for container", logger.Fields{ + field.TaskARN: task.Arn, + field.Container: cont.Name, + field.Error: err, + }) + return + } + mtask.doEmitContainerEvent(event) +} + +func (mtask *managedTask) doEmitContainerEvent(event api.ContainerStateChange) { + logger.Info("Sending container change event", getContainerEventLogFields(event), logger.Fields{ + field.TaskARN: mtask.Arn, + }) + select { + case <-mtask.ctx.Done(): + logger.Info("Unable to send container change event due to exit", getContainerEventLogFields(event), logger.Fields{ + field.TaskARN: mtask.Arn, + }) + case mtask.stateChangeEvents <- event: + } + logger.Info("Sent container change event", getContainerEventLogFields(event), logger.Fields{ + field.TaskARN: mtask.Arn, + }) +} + +func (mtask *managedTask) emitDockerContainerChange(change dockerContainerChange) { + select { + case <-mtask.ctx.Done(): + logger.Info("Unable to emit docker container change due to exit", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + case mtask.dockerMessages <- change: + } +} + +func (mtask *managedTask) emitACSTransition(transition acsTransition) { + select { + case <-mtask.ctx.Done(): + logger.Info("Unable to emit docker container change due to exit", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + case mtask.acsMessages <- transition: + } +} + +func (mtask *managedTask) isContainerFound(container *apicontainer.Container) bool { + found := false + for _, c := range mtask.Containers { + if container == c { + found = true + break + } + } + return found +} + +func (mtask *managedTask) isResourceFound(res taskresource.TaskResource) bool { + for _, r := range mtask.GetResources() { + if res.GetName() == r.GetName() { + return true + } + } + return false +} + +// releaseIPInIPAM releases the IP address used by the task in awsvpc mode. +func (mtask *managedTask) releaseIPInIPAM() { + if !mtask.IsNetworkModeAWSVPC() { + return + } + logger.Info("IPAM releasing ip for task eni", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + + cfg, err := mtask.BuildCNIConfig(true, &ecscni.Config{ + MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion, + }) + if err != nil { + logger.Error("Failed to release ip; unable to build cni configuration", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Error: err, + }) + return + } + err = mtask.cniClient.ReleaseIPResource(mtask.ctx, cfg, ipamCleanupTmeout) + if err != nil { + logger.Error("Failed to release ip; IPAM error", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Error: err, + }) + return + } +} + +// handleStoppedToRunningContainerTransition detects a "backwards" container +// transition where a known-stopped container is found to be running again and +// handles it. +func (mtask *managedTask) handleStoppedToRunningContainerTransition(status apicontainerstatus.ContainerStatus, container *apicontainer.Container) { + containerKnownStatus := container.GetKnownStatus() + if status > containerKnownStatus { + // Event status is greater than container's known status. + // This is not a backward transition, return + return + } + if containerKnownStatus != apicontainerstatus.ContainerStopped { + // Container's known status is not STOPPED. Nothing to do here. + return + } + if !status.IsRunning() { + // Container's 'to' transition was not either of RUNNING or RESOURCES_PROVISIONED + // states. Nothing to do in this case as well + return + } + // If the container becomes running after we've stopped it (possibly + // because we got an error running it and it ran anyways), the first time + // update it to 'known running' so that it will be driven back to stopped + mtask.unexpectedStart.Do(func() { + logger.Warn("Stopped container came back; re-stopping it once", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + }) + go mtask.engine.transitionContainer(mtask.Task, container, apicontainerstatus.ContainerStopped) + // This will not proceed afterwards because status <= knownstatus below + }) +} + +// handleManagedAgentStoppedTransition handles a container change event which has a managed agent status +// we should emit ManagedAgent events for certain container events. +func (mtask *managedTask) handleManagedAgentStoppedTransition(container *apicontainer.Container, managedAgentName string) { + //for now we only have the ExecuteCommandAgent + switch managedAgentName { + case execcmd.ExecuteCommandAgentName: + if !container.UpdateManagedAgentStatus(managedAgentName, apicontainerstatus.ManagedAgentStopped) { + logger.Warn("Cannot find ManagedAgent for container", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.ManagedAgent: managedAgentName, + }) + + } + mtask.emitManagedAgentEvent(mtask.Task, container, managedAgentName, "Received Container Stopped event") + default: + logger.Warn("Unexpected ManagedAgent in container; unable to process ManagedAgent transition event", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.ManagedAgent: managedAgentName, + }) + } +} + +// handleEventError handles a container change event error and decides whether +// we should proceed to transition the container +func (mtask *managedTask) handleEventError(containerChange dockerContainerChange, currentKnownStatus apicontainerstatus.ContainerStatus) bool { + container := containerChange.container + event := containerChange.event + if container.ApplyingError == nil { + container.ApplyingError = apierrors.NewNamedError(event.Error) + } + switch event.Status { + // event.Status is the desired container transition from container's known status + // (* -> event.Status) + case apicontainerstatus.ContainerPulled: + // If the agent pull behavior is always or once, we receive the error because + // the image pull fails, the task should fail. If we don't fail task here, + // then the cached image will probably be used for creating container, and we + // don't want to use cached image for both cases. + if mtask.cfg.ImagePullBehavior == config.ImagePullAlwaysBehavior || + mtask.cfg.ImagePullBehavior == config.ImagePullOnceBehavior { + logger.Error("Error while pulling image; moving task to STOPPED", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Image: container.Image, + field.Container: container.Name, + field.Error: event.Error, + }) + // The task should be stopped regardless of whether this container is + // essential or non-essential. + mtask.SetDesiredStatus(apitaskstatus.TaskStopped) + return false + } + // If the agent pull behavior is prefer-cached, we receive the error because + // the image pull fails and there is no cached image in local, we don't make + // the task fail here, will let create container handle it instead. + // If the agent pull behavior is default, use local image cache directly, + // assuming it exists. + logger.Error("Error while pulling image; will try to run anyway", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Image: container.Image, + field.Container: container.Name, + field.Error: event.Error, + }) + // proceed anyway + return true + case apicontainerstatus.ContainerStopped: + // Container's desired transition was to 'STOPPED' + return mtask.handleContainerStoppedTransitionError(event, container, currentKnownStatus) + case apicontainerstatus.ContainerStatusNone: + fallthrough + case apicontainerstatus.ContainerCreated: + // No need to explicitly stop containers if this is a * -> NONE/CREATED transition + logger.Warn("Error creating container; marking its desired status as STOPPED", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.Error: event.Error, + }) + container.SetKnownStatus(currentKnownStatus) + container.SetDesiredStatus(apicontainerstatus.ContainerStopped) + return false + default: + // If this is a * -> RUNNING / RESOURCES_PROVISIONED transition, we need to stop + // the container. + logger.Warn("Error starting/provisioning container[%s (Runtime ID: %s)];", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: container.GetRuntimeID(), + field.Error: event.Error, + }) + container.SetKnownStatus(currentKnownStatus) + container.SetDesiredStatus(apicontainerstatus.ContainerStopped) + errorName := event.Error.ErrorName() + errorStr := event.Error.Error() + shouldForceStop := false + if errorName == dockerapi.DockerTimeoutErrorName || errorName == dockerapi.CannotInspectContainerErrorName { + // If there's an error with inspecting the container or in case of timeout error, + // we'll assume that the container has transitioned to RUNNING and issue + // a stop. See #1043 for details + shouldForceStop = true + } else if errorName == dockerapi.CannotStartContainerErrorName && strings.HasSuffix(errorStr, io.EOF.Error()) { + // If we get an EOF error from Docker when starting the container, we don't really know whether the + // container is started anyway. So issuing a stop here as well. See #1708. + shouldForceStop = true + } + + if shouldForceStop { + logger.Warn("Forcing container to stop", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: container.GetRuntimeID(), + }) + go mtask.engine.transitionContainer(mtask.Task, container, apicontainerstatus.ContainerStopped) + } + // Container known status not changed, no need for further processing + return false + } +} + +// handleContainerStoppedTransitionError handles an error when transitioning a container to +// STOPPED. It returns a boolean indicating whether the tak can continue with updating its +// state +func (mtask *managedTask) handleContainerStoppedTransitionError(event dockerapi.DockerContainerChangeEvent, + container *apicontainer.Container, + currentKnownStatus apicontainerstatus.ContainerStatus) bool { + // If we were trying to transition to stopped and had a timeout error + // from docker, reset the known status to the current status and return + // This ensures that we don't emit a containerstopped event; a + // terminal container event from docker event stream will instead be + // responsible for the transition. Alternatively, the steadyState check + // could also trigger the progress and have another go at stopping the + // container + if event.Error.ErrorName() == dockerapi.DockerTimeoutErrorName { + logger.Info("Error stopping container; ignoring state change", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: container.GetRuntimeID(), + "ErrorName": event.Error.ErrorName(), + field.Error: event.Error.Error(), + }) + container.SetKnownStatus(currentKnownStatus) + return false + } + // If docker returned a transient error while trying to stop a container, + // reset the known status to the current status and return + cannotStopContainerError, ok := event.Error.(cannotStopContainerError) + if ok && cannotStopContainerError.IsRetriableError() { + logger.Info("Error stopping the container; ignoring state change", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: container.GetRuntimeID(), + "ErrorName": event.Error.ErrorName(), + field.Error: cannotStopContainerError.Error(), + }) + container.SetKnownStatus(currentKnownStatus) + return false + } + + // If we were trying to transition to stopped and had an error, we + // clearly can't just continue trying to transition it to stopped + // again and again. In this case, assume it's stopped (or close + // enough) and get on with it + // This can happen in cases where the container we tried to stop + // was already stopped or did not exist at all. + logger.Warn("Error stopping the container; ignoring state change", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: container.GetRuntimeID(), + "ErrorName": event.Error.ErrorName(), + field.Error: event.Error.Error(), + }) + container.SetKnownStatus(apicontainerstatus.ContainerStopped) + container.SetDesiredStatus(apicontainerstatus.ContainerStopped) + return true +} + +// progressTask tries to step forwards all containers and resources that are able to be +// transitioned in the task's current state. +// It will continue listening to events from all channels while it does so, but +// none of those changes will be acted upon until this set of requests to +// docker completes. +// Container changes may also prompt the task status to change as well. +func (mtask *managedTask) progressTask() { + logger.Debug("Progressing containers and resources in task", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + // max number of transitions length to ensure writes will never block on + // these and if we exit early transitions can exit the goroutine and it'll + // get GC'd eventually + resources := mtask.GetResources() + transitionChange := make(chan struct{}, len(mtask.Containers)+len(resources)) + transitionChangeEntity := make(chan string, len(mtask.Containers)+len(resources)) + + // startResourceTransitions should always be called before startContainerTransitions, + // else it might result in a state where none of the containers can transition and + // task may be moved to stopped. + // anyResourceTransition is set to true when transition function needs to be called or + // known status can be changed + anyResourceTransition, resTransitions := mtask.startResourceTransitions( + func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus) { + mtask.transitionResource(resource, nextStatus) + transitionChange <- struct{}{} + transitionChangeEntity <- resource.GetName() + }) + + anyContainerTransition, blockedDependencies, contTransitions, reasons := mtask.startContainerTransitions( + func(container *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus) { + mtask.engine.transitionContainer(mtask.Task, container, nextStatus) + transitionChange <- struct{}{} + transitionChangeEntity <- container.Name + }) + + atLeastOneTransitionStarted := anyResourceTransition || anyContainerTransition + + blockedByOrderingDependencies := len(blockedDependencies) > 0 + + // If no transitions happened and we aren't blocked by ordering dependencies, then we are possibly in a state where + // its impossible for containers to move forward. We will do an additional check to see if we are waiting for ACS + // execution credentials. If not, then we will abort the task progression. + if !atLeastOneTransitionStarted && !blockedByOrderingDependencies { + if !mtask.isWaitingForACSExecutionCredentials(reasons) { + mtask.handleContainersUnableToTransitionState() + } + return + } + + // If no containers are starting and we are blocked on ordering dependencies, we should watch for the task to change + // over time. This will update the containers if they become healthy or stop, which makes it possible for the + // conditions "HEALTHY" and "SUCCESS" to succeed. + if !atLeastOneTransitionStarted && blockedByOrderingDependencies { + go mtask.engine.checkTaskState(mtask.Task) + ctx, cancel := context.WithTimeout(context.Background(), transitionPollTime) + defer cancel() + for timeout := mtask.waitEvent(ctx.Done()); !timeout; { + timeout = mtask.waitEvent(ctx.Done()) + } + return + } + + // combine the resource and container transitions + transitions := make(map[string]string) + for k, v := range resTransitions { + transitions[k] = v + } + for k, v := range contTransitions { + transitions[k] = v.String() + } + + // We've kicked off one or more transitions, wait for them to + // complete, but keep reading events as we do. in fact, we have to for + // transitions to complete + mtask.waitForTransition(transitions, transitionChange, transitionChangeEntity) + // update the task status + if mtask.UpdateStatus() { + logger.Info("Container or resource change also resulted in task change", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + + // If knownStatus changed, let it be known + var taskStateChangeReason string + if mtask.GetKnownStatus().Terminal() { + taskStateChangeReason = mtask.Task.GetTerminalReason() + } + mtask.emitTaskEvent(mtask.Task, taskStateChangeReason) + } +} + +// isWaitingForACSExecutionCredentials checks if the container that can't be transitioned +// was caused by waiting for credentials and start waiting +func (mtask *managedTask) isWaitingForACSExecutionCredentials(reasons []error) bool { + for _, reason := range reasons { + if reason == dependencygraph.CredentialsNotResolvedErr { + logger.Info("Waiting for credentials to pull from ECR", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + + timeoutCtx, timeoutCancel := context.WithTimeout(mtask.ctx, waitForPullCredentialsTimeout) + defer timeoutCancel() + + timedOut := mtask.waitEvent(timeoutCtx.Done()) + if timedOut { + logger.Info("Timed out waiting for acs credentials message", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + } + return true + } + } + return false +} + +// startContainerTransitions steps through each container in the task and calls +// the passed transition function when a transition should occur. +func (mtask *managedTask) startContainerTransitions(transitionFunc containerTransitionFunc) (bool, map[string]apicontainer.DependsOn, map[string]apicontainerstatus.ContainerStatus, []error) { + anyCanTransition := false + var reasons []error + blocked := make(map[string]apicontainer.DependsOn) + transitions := make(map[string]apicontainerstatus.ContainerStatus) + for _, cont := range mtask.Containers { + transition := mtask.containerNextState(cont) + if transition.reason != nil { + // container can't be transitioned + reasons = append(reasons, transition.reason) + if transition.blockedOn != nil { + blocked[cont.Name] = *transition.blockedOn + } + continue + } + + // If the container is already in a transition, skip + if transition.actionRequired && !cont.SetAppliedStatus(transition.nextState) { + // At least one container is able to be moved forwards, so we're not deadlocked + anyCanTransition = true + continue + } + + // At least one container is able to be moved forwards, so we're not deadlocked + anyCanTransition = true + + if !transition.actionRequired { + // Updating the container status without calling any docker API, send in + // a goroutine so that it won't block here before the waitForContainerTransition + // was called after this function. And all the events sent to mtask.dockerMessages + // will be handled by handleContainerChange. + go func(cont *apicontainer.Container, status apicontainerstatus.ContainerStatus) { + mtask.dockerMessages <- dockerContainerChange{ + container: cont, + event: dockerapi.DockerContainerChangeEvent{ + Status: status, + }, + } + }(cont, transition.nextState) + continue + } + transitions[cont.Name] = transition.nextState + go transitionFunc(cont, transition.nextState) + } + + return anyCanTransition, blocked, transitions, reasons +} + +// startResourceTransitions steps through each resource in the task and calls +// the passed transition function when a transition should occur +func (mtask *managedTask) startResourceTransitions(transitionFunc resourceTransitionFunc) (bool, map[string]string) { + anyCanTransition := false + transitions := make(map[string]string) + for _, res := range mtask.GetResources() { + knownStatus := res.GetKnownStatus() + desiredStatus := res.GetDesiredStatus() + if knownStatus >= desiredStatus { + logger.Debug("Resource has already transitioned to or beyond the desired status", + logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: res.GetName(), + field.KnownStatus: res.StatusString(knownStatus), + field.DesiredStatus: res.StatusString(desiredStatus), + }) + continue + } + anyCanTransition = true + transition := mtask.resourceNextState(res) + // If the resource is already in a transition, skip + if transition.actionRequired && !res.SetAppliedStatus(transition.nextState) { + // At least one resource is able to be moved forwards, so we're not deadlocked + continue + } + if !transition.actionRequired { + // no action is required for the transition, just set the known status without + // calling any transition function + go mtask.emitResourceChange(resourceStateChange{ + resource: res, + nextState: transition.nextState, + err: nil, + }) + continue + } + // At least one resource is able to be move forwards, so we're not deadlocked + transitions[res.GetName()] = transition.status + go transitionFunc(res, transition.nextState) + } + return anyCanTransition, transitions +} + +// transitionResource calls applyResourceState, and then notifies the managed +// task of the change. transitionResource is called by progressTask +func (mtask *managedTask) transitionResource(resource taskresource.TaskResource, + to resourcestatus.ResourceStatus) { + err := mtask.applyResourceState(resource, to) + + if mtask.engine.isTaskManaged(mtask.Arn) { + mtask.emitResourceChange(resourceStateChange{ + resource: resource, + nextState: to, + err: err, + }) + } +} + +// applyResourceState moves the resource to the given state by calling the +// function defined in the transitionFunctionMap for the state +func (mtask *managedTask) applyResourceState(resource taskresource.TaskResource, + nextState resourcestatus.ResourceStatus) error { + resName := resource.GetName() + resStatus := resource.StatusString(nextState) + err := resource.ApplyTransition(nextState) + if err != nil { + logger.Info("Error transitioning resource", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: resName, + field.FailedStatus: resStatus, + field.Error: err, + }) + + return err + } + logger.Info("Transitioned resource", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: resName, + field.FailedStatus: resStatus, + }) + return nil +} + +type containerTransitionFunc func(container *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus) + +type resourceTransitionFunc func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus) + +// containerNextState determines the next state a container should go to. +// It returns a transition struct including the information: +// * container state it should transition to, +// * a bool indicating whether any action is required +// * an error indicating why this transition can't happen +// +// 'Stopped, false, ""' -> "You can move it to known stopped, but you don't have to call a transition function" +// 'Running, true, ""' -> "You can move it to running and you need to call the transition function" +// 'None, false, containerDependencyNotResolved' -> "This should not be moved; it has unresolved dependencies;" +// +// Next status is determined by whether the known and desired statuses are +// equal, the next numerically greater (iota-wise) status, and whether +// dependencies are fully resolved. +func (mtask *managedTask) containerNextState(container *apicontainer.Container) *containerTransition { + containerKnownStatus := container.GetKnownStatus() + containerDesiredStatus := container.GetDesiredStatus() + + if containerKnownStatus == containerDesiredStatus { + logger.Debug("Container at desired status", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: container.GetRuntimeID(), + field.DesiredStatus: containerDesiredStatus.String(), + }) + return &containerTransition{ + nextState: apicontainerstatus.ContainerStatusNone, + actionRequired: false, + reason: dependencygraph.ContainerPastDesiredStatusErr, + } + } + + if containerKnownStatus > containerDesiredStatus { + logger.Debug("Managed task [%s]: container [%s (Runtime ID: %s)] has already transitioned beyond desired status(%s): %s", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: container.GetRuntimeID(), + field.KnownStatus: containerKnownStatus.String(), + field.DesiredStatus: containerDesiredStatus.String(), + }) + return &containerTransition{ + nextState: apicontainerstatus.ContainerStatusNone, + actionRequired: false, + reason: dependencygraph.ContainerPastDesiredStatusErr, + } + } + if blocked, err := dependencygraph.DependenciesAreResolved(container, mtask.Containers, + mtask.Task.GetExecutionCredentialsID(), mtask.credentialsManager, mtask.GetResources(), mtask.cfg); err != nil { + logger.Debug("Can't apply state to container yet due to unresolved dependencies", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Container: container.Name, + field.RuntimeID: container.GetRuntimeID(), + field.Error: err, + }) + return &containerTransition{ + nextState: apicontainerstatus.ContainerStatusNone, + actionRequired: false, + reason: err, + blockedOn: blocked, + } + } + + var nextState apicontainerstatus.ContainerStatus + if container.DesiredTerminal() { + nextState = apicontainerstatus.ContainerStopped + // It's not enough to just check if container is in steady state here + // we should really check if >= RUNNING <= STOPPED + if !container.IsRunning() { + // If the container's AppliedStatus is running, it means the StartContainer + // api call has already been scheduled, we should not mark it as stopped + // directly, because when the stopped container comes back, we will end up + // with either: + // 1. The task is not cleaned up, the handleStoppedToRunningContainerTransition + // function will handle this case, but only once. If there are some + // other stopped containers come back, they will not be stopped by + // Agent. + // 2. The task has already been cleaned up, in this case any stopped container + // will not be stopped by Agent when they come back. + if container.GetAppliedStatus() == apicontainerstatus.ContainerRunning { + nextState = apicontainerstatus.ContainerStatusNone + } + + return &containerTransition{ + nextState: nextState, + actionRequired: false, + } + } + } else { + nextState = container.GetNextKnownStateProgression() + } + return &containerTransition{ + nextState: nextState, + actionRequired: true, + } +} + +// resourceNextState determines the next state a resource should go to. +// It returns a transition struct including the information: +// * resource state it should transition to, +// * string presentation of the resource state +// * a bool indicating whether any action is required +// * an error indicating why this transition can't happen +// +// Next status is determined by whether the known and desired statuses are +// equal, the next numerically greater (iota-wise) status, and whether +// dependencies are fully resolved. +func (mtask *managedTask) resourceNextState(resource taskresource.TaskResource) *resourceTransition { + resKnownStatus := resource.GetKnownStatus() + resDesiredStatus := resource.GetDesiredStatus() + + if resKnownStatus >= resDesiredStatus { + logger.Debug("Managed task [%s]: task resource [%s] has already transitioned to or beyond desired status(%s): %s", logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: resource.GetName(), + field.KnownStatus: resource.StatusString(resKnownStatus), + field.DesiredStatus: resource.StatusString(resDesiredStatus), + }) + return &resourceTransition{ + nextState: resourcestatus.ResourceStatusNone, + status: resource.StatusString(resourcestatus.ResourceStatusNone), + actionRequired: false, + reason: dependencygraph.ResourcePastDesiredStatusErr, + } + } + if err := dependencygraph.TaskResourceDependenciesAreResolved(resource, mtask.Containers); err != nil { + logger.Debug("Can't apply state to resource yet due to unresolved dependencies", + logger.Fields{ + field.TaskARN: mtask.Arn, + field.Resource: resource.GetName(), + field.Error: err, + }) + return &resourceTransition{ + nextState: resourcestatus.ResourceStatusNone, + status: resource.StatusString(resourcestatus.ResourceStatusNone), + actionRequired: false, + reason: err, + } + } + + var nextState resourcestatus.ResourceStatus + if resource.DesiredTerminal() { + nextState := resource.TerminalStatus() + return &resourceTransition{ + nextState: nextState, + status: resource.StatusString(nextState), + actionRequired: false, // Resource cleanup is done while cleaning up task, so not doing anything here. + } + } + nextState = resource.NextKnownState() + return &resourceTransition{ + nextState: nextState, + status: resource.StatusString(nextState), + actionRequired: true, + } +} + +func (mtask *managedTask) handleContainersUnableToTransitionState() { + logger.Critical("Task in a bad state; it's not steady state but no containers want to transition", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + + if mtask.GetDesiredStatus().Terminal() { + // Ack, really bad. We want it to stop but the containers don't think + // that's possible. let's just break out and hope for the best! + logger.Critical("The state is so bad that we're just giving up on it", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + mtask.SetKnownStatus(apitaskstatus.TaskStopped) + mtask.emitTaskEvent(mtask.Task, taskUnableToTransitionToStoppedReason) + // TODO we should probably panic here + } else { + logger.Critical("Moving task to stopped due to bad state", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + mtask.handleDesiredStatusChange(apitaskstatus.TaskStopped, 0) + } +} + +func (mtask *managedTask) waitForTransition(transitions map[string]string, + transition <-chan struct{}, + transitionChangeEntity <-chan string) { + // There could be multiple transitions, but we just need to wait for one of them + // to ensure that there is at least one container or resource can be processed in the next + // progressTask call. This is done by waiting for one transition/acs/docker message. + if !mtask.waitEvent(transition) { + logger.Debug("Received non-transition events", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + return + } + transitionedEntity := <-transitionChangeEntity + logger.Debug("Managed task [%s]: transition for [%s] finished", logger.Fields{ + field.TaskARN: mtask.Arn, + "TransitionedEntity": transitionedEntity, + }) + delete(transitions, transitionedEntity) + logger.Debug("Task still waiting for: %v", logger.Fields{ + field.TaskARN: mtask.Arn, + "TransitionedEntity": fmt.Sprintf("%v", transitions), + }) +} + +func (mtask *managedTask) time() ttime.Time { + mtask._timeOnce.Do(func() { + if mtask._time == nil { + mtask._time = &ttime.DefaultTime{} + } + }) + return mtask._time +} + +func (mtask *managedTask) cleanupTask(taskStoppedDuration time.Duration) { + cleanupTimeDuration := mtask.GetKnownStatusTime().Add(taskStoppedDuration).Sub(ttime.Now()) + cleanupTime := make(<-chan time.Time) + if cleanupTimeDuration < 0 { + logger.Info("Cleanup Duration has been exceeded; starting cleanup now", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + cleanupTime = mtask.time().After(time.Nanosecond) + } else { + cleanupTime = mtask.time().After(cleanupTimeDuration) + } + cleanupTimeBool := make(chan struct{}) + go func() { + <-cleanupTime + close(cleanupTimeBool) + }() + // wait for the cleanup time to elapse, signalled by cleanupTimeBool + for !mtask.waitEvent(cleanupTimeBool) { + } + + // wait for apitaskstatus.TaskStopped to be sent + ok := mtask.waitForStopReported() + if !ok { + logger.Error("Aborting cleanup for task as it is not reported as stopped", logger.Fields{ + field.TaskARN: mtask.Arn, + field.SentStatus: mtask.GetSentStatus().String(), + }) + return + } + + logger.Info("Cleaning up task's containers and data", logger.Fields{ + field.TaskARN: mtask.Arn, + }) + + // For the duration of this, simply discard any task events; this ensures the + // speedy processing of other events for other tasks + // discard events while the task is being removed from engine state + go mtask.discardEvents() + mtask.engine.sweepTask(mtask.Task) + mtask.engine.deleteTask(mtask.Task) + + // The last thing to do here is to cancel the context, which should cancel + // all outstanding go routines associated with this managed task. + mtask.cancel() +} + +func (mtask *managedTask) discardEvents() { + for { + select { + case <-mtask.dockerMessages: + case <-mtask.acsMessages: + case <-mtask.resourceStateChangeEvent: + case <-mtask.ctx.Done(): + return + } + } +} + +// waitForStopReported will wait for the task to be reported stopped and return true, or will time-out and return false. +// Messages on the mtask.dockerMessages and mtask.acsMessages channels will be handled while this function is waiting. +func (mtask *managedTask) waitForStopReported() bool { + stoppedSentBool := make(chan struct{}) + taskStopped := false + go func() { + for i := 0; i < _maxStoppedWaitTimes; i++ { + // ensure that we block until apitaskstatus.TaskStopped is actually sent + sentStatus := mtask.GetSentStatus() + if sentStatus >= apitaskstatus.TaskStopped { + taskStopped = true + break + } + logger.Warn("Blocking cleanup until the task has been reported stopped", logger.Fields{ + field.TaskARN: mtask.Arn, + field.SentStatus: sentStatus.String(), + "Attempt": i + 1, + "MaxAttempts": _maxStoppedWaitTimes, + }) + mtask._time.Sleep(_stoppedSentWaitInterval) + } + stoppedSentBool <- struct{}{} + close(stoppedSentBool) + }() + // wait for apitaskstatus.TaskStopped to be sent + for !mtask.waitEvent(stoppedSentBool) { + } + return taskStopped +} diff --git a/agent/engine/task_manager_data_test.go b/agent/engine/task_manager_data_test.go new file mode 100644 index 00000000000..a5807c56073 --- /dev/null +++ b/agent/engine/task_manager_data_test.go @@ -0,0 +1,306 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + resourcetype "github.com/aws/amazon-ecs-agent/agent/taskresource/types" + "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +/* +This file contains unit tests that specifically check the data persisting behavior of the task manager. +*/ + +const ( + dataTestVolumeName = "test-vol" + dataTestContainerName1 = "test-name-1" + dataTestContainerName2 = "test-name-2" +) + +var ( + testErr = errors.New("test error") +) + +func TestHandleDesiredStatusChangeSaveData(t *testing.T) { + testCases := []struct { + name string + desiredStatus apitaskstatus.TaskStatus + targetDesiredStatus apitaskstatus.TaskStatus + shouldSave bool + }{ + { + name: "non-redundant desired status change is saved", + desiredStatus: apitaskstatus.TaskRunning, + targetDesiredStatus: apitaskstatus.TaskStopped, + shouldSave: true, + }, + { + name: "redundant desired status change is saved", + desiredStatus: apitaskstatus.TaskStopped, + targetDesiredStatus: apitaskstatus.TaskStopped, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + mtask := managedTask{ + Task: &apitask.Task{ + Arn: testTaskARN, + DesiredStatusUnsafe: tc.desiredStatus, + }, + engine: &DockerTaskEngine{ + dataClient: dataClient, + }, + taskStopWG: utilsync.NewSequentialWaitGroup(), + } + mtask.handleDesiredStatusChange(tc.targetDesiredStatus, int64(1)) + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + if tc.shouldSave { + assert.Len(t, tasks, 1) + } else { + assert.Len(t, tasks, 0) + } + }) + } +} + +func TestHandleContainerStateChangeSaveData(t *testing.T) { + testCases := []struct { + name string + knownState apicontainerstatus.ContainerStatus + nextState apicontainerstatus.ContainerStatus + taskKnownState apitaskstatus.TaskStatus + taskDesiredState apitaskstatus.TaskStatus + err error + shouldSaveContainer bool + }{ + { + name: "non-redundant container state change is saved", + knownState: apicontainerstatus.ContainerCreated, + nextState: apicontainerstatus.ContainerRunning, + taskKnownState: apitaskstatus.TaskCreated, + taskDesiredState: apitaskstatus.TaskRunning, + shouldSaveContainer: true, + }, + { + name: "non-redundant container state change with error is saved", + knownState: apicontainerstatus.ContainerPulled, + nextState: apicontainerstatus.ContainerCreated, + taskKnownState: apitaskstatus.TaskPulled, + taskDesiredState: apitaskstatus.TaskCreated, + err: testErr, + shouldSaveContainer: true, + }, + { + name: "redundant container state change is not saved", + knownState: apicontainerstatus.ContainerCreated, + nextState: apicontainerstatus.ContainerCreated, + taskKnownState: apitaskstatus.TaskCreated, + taskDesiredState: apitaskstatus.TaskCreated, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + eventStreamName := "TestHandleContainerStateChangeSaveData" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + containerChangeEventStream := eventstream.NewEventStream(eventStreamName, ctx) + containerChangeEventStream.StartListening() + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + mTask := managedTask{ + Task: &apitask.Task{ + Arn: testTaskARN, + DesiredStatusUnsafe: tc.taskDesiredState, + KnownStatusUnsafe: tc.taskKnownState, + Containers: []*apicontainer.Container{ + newTestContainer(dataTestContainerName1, tc.knownState, tc.nextState), + newTestContainer(dataTestContainerName2, tc.knownState, tc.nextState), + }, + }, + engine: &DockerTaskEngine{ + dataClient: dataClient, + }, + ctx: context.TODO(), + containerChangeEventStream: containerChangeEventStream, + stateChangeEvents: make(chan statechange.Event), + } + defer discardEvents(mTask.stateChangeEvents)() + + containerChange := dockerContainerChange{ + container: mTask.Containers[0], + event: dockerapi.DockerContainerChangeEvent{ + Status: tc.nextState, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{}, + }, + } + mTask.handleContainerChange(containerChange) + containers, err := dataClient.GetContainers() + require.NoError(t, err) + if tc.shouldSaveContainer { + assert.Len(t, containers, 1) + } else { + assert.Len(t, containers, 0) + } + + // All test cases in this test are made such that the container state change is not + // causing task state change. Check that the task is not saved in these case. + tasks, err := dataClient.GetTasks() + assert.Len(t, tasks, 0) + }) + } +} + +func TestHandleContainerChangeWithTaskStateChangeSaveData(t *testing.T) { + eventStreamName := "TestHandleContainerChangeWithTaskStateChangeSaveData" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + containerChangeEventStream := eventstream.NewEventStream(eventStreamName, ctx) + containerChangeEventStream.StartListening() + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + mTask := managedTask{ + Task: &apitask.Task{ + Arn: testTaskARN, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskCreated, + Containers: []*apicontainer.Container{ + newTestContainer(dataTestContainerName1, apicontainerstatus.ContainerCreated, + apicontainerstatus.ContainerRunning), + }, + }, + engine: &DockerTaskEngine{ + dataClient: dataClient, + }, + ctx: context.TODO(), + containerChangeEventStream: containerChangeEventStream, + stateChangeEvents: make(chan statechange.Event), + } + defer discardEvents(mTask.stateChangeEvents)() + + containerChange := dockerContainerChange{ + container: mTask.Containers[0], + event: dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{}, + }, + } + mTask.handleContainerChange(containerChange) + containers, err := dataClient.GetContainers() + require.NoError(t, err) + assert.Len(t, containers, 1) + + tasks, err := dataClient.GetTasks() + assert.Len(t, tasks, 1) +} + +func TestHandleResourceStateChangeSaveData(t *testing.T) { + testCases := []struct { + name string + knownState resourcestatus.ResourceStatus + nextState resourcestatus.ResourceStatus + err error + shouldSave bool + }{ + { + name: "non-redundant resource state change is saved", + knownState: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + nextState: resourcestatus.ResourceStatus(volume.VolumeCreated), + shouldSave: true, + }, + { + name: "redundant resource state change is not saved", + knownState: resourcestatus.ResourceStatus(volume.VolumeCreated), + nextState: resourcestatus.ResourceStatus(volume.VolumeCreated), + }, + { + name: "non-redundant resource state change with error is saved", + shouldSave: true, + knownState: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + nextState: resourcestatus.ResourceStatus(volume.VolumeCreated), + err: testErr, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + res := &volume.VolumeResource{Name: dataTestVolumeName} + res.SetKnownStatus(tc.knownState) + mTask := managedTask{ + Task: &apitask.Task{ + Arn: testTaskARN, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{ + dataClient: dataClient, + }, + } + mTask.AddResource(resourcetype.DockerVolumeKey, res) + mTask.handleResourceStateChange(resourceStateChange{ + res, tc.nextState, tc.err, + }) + + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + if tc.shouldSave { + assert.Len(t, tasks, 1) + } else { + assert.Len(t, tasks, 0) + } + }) + } +} + +func newTestContainer(name string, knownStatus, desiredStatus apicontainerstatus.ContainerStatus) *apicontainer.Container { + return &apicontainer.Container{ + Name: name, + TaskARNUnsafe: testTaskARN, + KnownStatusUnsafe: knownStatus, + DesiredStatusUnsafe: desiredStatus, + } +} diff --git a/agent/engine/task_manager_test.go b/agent/engine/task_manager_test.go new file mode 100644 index 00000000000..a1d0b3a2c73 --- /dev/null +++ b/agent/engine/task_manager_test.go @@ -0,0 +1,2020 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + mock_taskresource "github.com/aws/amazon-ecs-agent/agent/taskresource/mocks" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/data" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/testdata" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + mock_ttime "github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks" + "github.com/stretchr/testify/assert" + + "github.com/golang/mock/gomock" +) + +func TestHandleEventError(t *testing.T) { + testCases := []struct { + Name string + EventStatus apicontainerstatus.ContainerStatus + CurrentContainerKnownStatus apicontainerstatus.ContainerStatus + ImagePullBehavior config.ImagePullBehaviorType + Error apierrors.NamedError + ExpectedContainerKnownStatusSet bool + ExpectedContainerKnownStatus apicontainerstatus.ContainerStatus + ExpectedContainerDesiredStatusStopped bool + ExpectedTaskDesiredStatusStopped bool + ExpectedOK bool + }{ + { + Name: "Stop timed out", + EventStatus: apicontainerstatus.ContainerStopped, + CurrentContainerKnownStatus: apicontainerstatus.ContainerRunning, + Error: &dockerapi.DockerTimeoutError{}, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerRunning, + ExpectedOK: false, + }, + { + Name: "Retriable error with stop", + EventStatus: apicontainerstatus.ContainerStopped, + CurrentContainerKnownStatus: apicontainerstatus.ContainerRunning, + Error: &dockerapi.CannotStopContainerError{ + FromError: errors.New(""), + }, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerRunning, + ExpectedOK: false, + }, + { + Name: "Unretriable error with Stop", + EventStatus: apicontainerstatus.ContainerStopped, + CurrentContainerKnownStatus: apicontainerstatus.ContainerRunning, + Error: &dockerapi.CannotStopContainerError{ + FromError: dockerapi.NoSuchContainerError{}, + }, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerStopped, + ExpectedContainerDesiredStatusStopped: true, + ExpectedOK: true, + }, + { + Name: "Pull failed", + Error: &dockerapi.DockerTimeoutError{}, + ExpectedContainerKnownStatusSet: true, + EventStatus: apicontainerstatus.ContainerPulled, + ExpectedOK: true, + }, + { + Name: "Container vanished betweeen pull and running", + EventStatus: apicontainerstatus.ContainerRunning, + CurrentContainerKnownStatus: apicontainerstatus.ContainerPulled, + Error: &ContainerVanishedError{}, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerPulled, + ExpectedContainerDesiredStatusStopped: true, + ExpectedOK: false, + }, + { + Name: "Inspect failed during start", + EventStatus: apicontainerstatus.ContainerRunning, + CurrentContainerKnownStatus: apicontainerstatus.ContainerCreated, + Error: &dockerapi.CannotInspectContainerError{ + FromError: errors.New("error"), + }, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerCreated, + ExpectedContainerDesiredStatusStopped: true, + ExpectedOK: false, + }, + { + Name: "Start timed out", + EventStatus: apicontainerstatus.ContainerRunning, + CurrentContainerKnownStatus: apicontainerstatus.ContainerCreated, + Error: &dockerapi.DockerTimeoutError{}, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerCreated, + ExpectedContainerDesiredStatusStopped: true, + ExpectedOK: false, + }, + { + Name: "Start failed with EOF error", + EventStatus: apicontainerstatus.ContainerRunning, + CurrentContainerKnownStatus: apicontainerstatus.ContainerCreated, + Error: &dockerapi.CannotStartContainerError{ + FromError: errors.New("error during connect: Post http://%2Fvar%2Frun%2Fdocker.sock/v1.19/containers/containerid/start: EOF"), + }, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerCreated, + ExpectedContainerDesiredStatusStopped: true, + ExpectedOK: false, + }, + { + Name: "Inspect failed during create", + EventStatus: apicontainerstatus.ContainerCreated, + CurrentContainerKnownStatus: apicontainerstatus.ContainerPulled, + Error: &dockerapi.CannotInspectContainerError{ + FromError: errors.New("error"), + }, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerPulled, + ExpectedContainerDesiredStatusStopped: true, + ExpectedOK: false, + }, + { + Name: "Create timed out", + EventStatus: apicontainerstatus.ContainerCreated, + CurrentContainerKnownStatus: apicontainerstatus.ContainerPulled, + Error: &dockerapi.DockerTimeoutError{}, + ExpectedContainerKnownStatusSet: true, + ExpectedContainerKnownStatus: apicontainerstatus.ContainerPulled, + ExpectedContainerDesiredStatusStopped: true, + ExpectedOK: false, + }, + { + Name: "Pull image fails and task fails", + EventStatus: apicontainerstatus.ContainerPulled, + Error: &dockerapi.CannotPullContainerError{ + FromError: errors.New("error"), + }, + ImagePullBehavior: config.ImagePullAlwaysBehavior, + ExpectedContainerKnownStatusSet: false, + ExpectedTaskDesiredStatusStopped: true, + ExpectedOK: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + container := &apicontainer.Container{ + KnownStatusUnsafe: tc.CurrentContainerKnownStatus, + } + containerChange := dockerContainerChange{ + container: container, + event: dockerapi.DockerContainerChangeEvent{ + Status: tc.EventStatus, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + Error: tc.Error, + }, + }, + } + mtask := managedTask{ + Task: &apitask.Task{ + Arn: "task1", + }, + engine: &DockerTaskEngine{}, + cfg: &config.Config{ImagePullBehavior: tc.ImagePullBehavior}, + } + ok := mtask.handleEventError(containerChange, tc.CurrentContainerKnownStatus) + assert.Equal(t, tc.ExpectedOK, ok, "to proceed") + if tc.ExpectedContainerKnownStatusSet { + containerKnownStatus := containerChange.container.GetKnownStatus() + assert.Equal(t, tc.ExpectedContainerKnownStatus, containerKnownStatus, + "expected container known status %s != %s", tc.ExpectedContainerKnownStatus.String(), containerKnownStatus.String()) + } + if tc.ExpectedContainerDesiredStatusStopped { + containerDesiredStatus := containerChange.container.GetDesiredStatus() + assert.Equal(t, apicontainerstatus.ContainerStopped, containerDesiredStatus, + "desired status %s != %s", apicontainerstatus.ContainerStopped.String(), containerDesiredStatus.String()) + } + assert.Equal(t, tc.Error.ErrorName(), containerChange.container.ApplyingError.ErrorName()) + }) + } +} + +func TestContainerNextState(t *testing.T) { + testCases := []struct { + containerCurrentStatus apicontainerstatus.ContainerStatus + containerDesiredStatus apicontainerstatus.ContainerStatus + expectedContainerStatus apicontainerstatus.ContainerStatus + expectedTransitionActionable bool + reason error + }{ + // NONE -> RUNNING transition is allowed and actionable, when desired is Running + // The expected next status is Pulled + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerPulled, true, nil}, + // NONE -> RESOURCES_PROVISIONED transition is allowed and actionable, when desired + // is Running. The exptected next status is Pulled + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerPulled, true, nil}, + // NONE -> NONE transition is not be allowed and is not actionable, + // when desired is Running + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // NONE -> STOPPED transition will result in STOPPED and is allowed, but not + // actionable, when desired is STOPPED + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped, false, nil}, + // PULLED -> RUNNING transition is allowed and actionable, when desired is Running + // The exptected next status is Created + {apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerCreated, true, nil}, + // PULLED -> RESOURCES_PROVISIONED transition is allowed and actionable, when desired + // is Running. The exptected next status is Created + {apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerCreated, true, nil}, + // PULLED -> PULLED transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // PULLED -> NONE transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // PULLED -> STOPPED transition will result in STOPPED and is allowed, but not + // actionable, when desired is STOPPED + {apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped, false, nil}, + // CREATED -> RUNNING transition is allowed and actionable, when desired is Running + // The expected next status is Running + {apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning, true, nil}, + // CREATED -> RESOURCES_PROVISIONED transition is allowed and actionable, when desired + // is Running. The exptected next status is Running + {apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerRunning, true, nil}, + // CREATED -> CREATED transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // CREATED -> NONE transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // CREATED -> PULLED transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // CREATED -> STOPPED transition will result in STOPPED and is allowed, but not + // actionable, when desired is STOPPED + {apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped, false, nil}, + // RUNNING -> STOPPED transition is allowed and actionable, when desired is Running + // The expected next status is STOPPED + {apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped, true, nil}, + // RUNNING -> RUNNING transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // RUNNING -> RESOURCES_PROVISIONED is allowed when steady state status is + // RESOURCES_PROVISIONED and desired is RESOURCES_PROVISIONED + {apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerResourcesProvisioned, true, nil}, + // RUNNING -> NONE transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // RUNNING -> PULLED transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // RUNNING -> CREATED transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + + // RESOURCES_PROVISIONED -> RESOURCES_PROVISIONED transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // RESOURCES_PROVISIONED -> RUNNING transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // RESOURCES_PROVISIONED -> STOPPED transition is allowed and actionable, when desired + // is Running. The exptected next status is STOPPED + {apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped, true, nil}, + // RESOURCES_PROVISIONED -> NONE transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // RESOURCES_PROVISIONED -> PULLED transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + // RESOURCES_PROVISIONED -> CREATED transition is not allowed and not actionable, + // when desired is Running + {apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerStatusNone, false, dependencygraph.ContainerPastDesiredStatusErr}, + } + + steadyStates := []apicontainerstatus.ContainerStatus{apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerResourcesProvisioned} + + for _, tc := range testCases { + for _, steadyState := range steadyStates { + t.Run(fmt.Sprintf("%s to %s Transition with Steady State %s", + tc.containerCurrentStatus.String(), tc.containerDesiredStatus.String(), steadyState.String()), func(t *testing.T) { + if tc.containerDesiredStatus == apicontainerstatus.ContainerResourcesProvisioned && + steadyState < tc.containerDesiredStatus { + t.Skipf("Skipping because of unassumable steady state [%s] and desired state [%s]", + steadyState.String(), tc.containerDesiredStatus.String()) + } + container := apicontainer.NewContainerWithSteadyState(steadyState) + container.DesiredStatusUnsafe = tc.containerDesiredStatus + container.KnownStatusUnsafe = tc.containerCurrentStatus + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + container, + }, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{}, + } + transition := task.containerNextState(container) + t.Logf("%s %v %v", transition.nextState, transition.actionRequired, transition.reason) + assert.Equal(t, tc.expectedContainerStatus, transition.nextState, "Mismatch for expected next state") + assert.Equal(t, tc.expectedTransitionActionable, transition.actionRequired, "Mismatch for expected actionable flag") + assert.Equal(t, tc.reason, transition.reason, "Mismatch for expected reason") + }) + } + } +} + +func TestContainerNextStateWithTransitionDependencies(t *testing.T) { + testCases := []struct { + name string + containerCurrentStatus apicontainerstatus.ContainerStatus + containerDesiredStatus apicontainerstatus.ContainerStatus + containerDependentStatus apicontainerstatus.ContainerStatus + dependencyCurrentStatus apicontainerstatus.ContainerStatus + dependencySatisfiedStatus apicontainerstatus.ContainerStatus + expectedContainerStatus apicontainerstatus.ContainerStatus + expectedTransitionActionable bool + reason error + }{ + // NONE -> RUNNING transition is not allowed and not actionable, when pull depends on create and dependency is None + { + name: "pull depends on created, dependency is none", + containerCurrentStatus: apicontainerstatus.ContainerStatusNone, + containerDesiredStatus: apicontainerstatus.ContainerRunning, + containerDependentStatus: apicontainerstatus.ContainerPulled, + dependencyCurrentStatus: apicontainerstatus.ContainerStatusNone, + dependencySatisfiedStatus: apicontainerstatus.ContainerCreated, + expectedContainerStatus: apicontainerstatus.ContainerStatusNone, + expectedTransitionActionable: false, + reason: dependencygraph.ErrContainerDependencyNotResolved, + }, + // NONE -> RUNNING transition is not allowed and not actionable, when desired is Running and dependency is Created + { + name: "pull depends on running, dependency is created", + containerCurrentStatus: apicontainerstatus.ContainerStatusNone, + containerDesiredStatus: apicontainerstatus.ContainerRunning, + containerDependentStatus: apicontainerstatus.ContainerPulled, + dependencyCurrentStatus: apicontainerstatus.ContainerCreated, + dependencySatisfiedStatus: apicontainerstatus.ContainerRunning, + expectedContainerStatus: apicontainerstatus.ContainerStatusNone, + expectedTransitionActionable: false, + reason: dependencygraph.ErrContainerDependencyNotResolved, + }, + // NONE -> RUNNING transition is allowed and actionable, when desired is Running and dependency is Running + // The expected next status is Pulled + { + name: "pull depends on running, dependency is running, next status is pulled", + containerCurrentStatus: apicontainerstatus.ContainerStatusNone, + containerDesiredStatus: apicontainerstatus.ContainerRunning, + containerDependentStatus: apicontainerstatus.ContainerPulled, + dependencyCurrentStatus: apicontainerstatus.ContainerRunning, + dependencySatisfiedStatus: apicontainerstatus.ContainerRunning, + expectedContainerStatus: apicontainerstatus.ContainerPulled, + expectedTransitionActionable: true, + }, + // NONE -> RUNNING transition is allowed and actionable, when desired is Running and dependency is Stopped + // The expected next status is Pulled + { + name: "pull depends on running, dependency is stopped, next status is pulled", + containerCurrentStatus: apicontainerstatus.ContainerStatusNone, + containerDesiredStatus: apicontainerstatus.ContainerRunning, + containerDependentStatus: apicontainerstatus.ContainerPulled, + dependencyCurrentStatus: apicontainerstatus.ContainerStopped, + dependencySatisfiedStatus: apicontainerstatus.ContainerRunning, + expectedContainerStatus: apicontainerstatus.ContainerPulled, + expectedTransitionActionable: true, + }, + // NONE -> RUNNING transition is allowed and actionable, when desired is Running and dependency is None and + // dependent status is Running + { + name: "create depends on running, dependency is none, next status is pulled", + containerCurrentStatus: apicontainerstatus.ContainerStatusNone, + containerDesiredStatus: apicontainerstatus.ContainerRunning, + containerDependentStatus: apicontainerstatus.ContainerCreated, + dependencyCurrentStatus: apicontainerstatus.ContainerStatusNone, + dependencySatisfiedStatus: apicontainerstatus.ContainerRunning, + expectedContainerStatus: apicontainerstatus.ContainerPulled, + expectedTransitionActionable: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dependencyName := "dependency" + container := &apicontainer.Container{ + DesiredStatusUnsafe: tc.containerDesiredStatus, + KnownStatusUnsafe: tc.containerCurrentStatus, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + container.BuildContainerDependency(dependencyName, tc.dependencySatisfiedStatus, tc.containerDependentStatus) + dependency := &apicontainer.Container{ + Name: dependencyName, + KnownStatusUnsafe: tc.dependencyCurrentStatus, + } + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + container, + dependency, + }, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{}, + } + transition := task.containerNextState(container) + assert.Equal(t, tc.expectedContainerStatus, transition.nextState, + "Expected next state [%s] != Retrieved next state [%s]", + tc.expectedContainerStatus.String(), transition.nextState.String()) + assert.Equal(t, tc.expectedTransitionActionable, transition.actionRequired, "transition actionable") + assert.Equal(t, tc.reason, transition.reason, "transition possible") + }) + } +} + +func TestContainerNextStateWithDependencies(t *testing.T) { + testCases := []struct { + containerCurrentStatus apicontainerstatus.ContainerStatus + containerDesiredStatus apicontainerstatus.ContainerStatus + dependencyCurrentStatus apicontainerstatus.ContainerStatus + expectedContainerStatus apicontainerstatus.ContainerStatus + expectedTransitionActionable bool + reason error + }{ + // NONE -> RUNNING transition is not allowed and not actionable, when desired is Running and dependency is None + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStatusNone, false, dependencygraph.DependentContainerNotResolvedErr}, + // NONE -> RUNNING transition is not allowed and not actionable, when desired is Running and dependency is Created + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerCreated, apicontainerstatus.ContainerStatusNone, false, dependencygraph.DependentContainerNotResolvedErr}, + // NONE -> RUNNING transition is allowed and actionable, when desired is Running and dependency is Running + // The expected next status is Pulled + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerPulled, true, nil}, + // NONE -> RUNNING transition is allowed and actionable, when desired is Running and dependency is Stopped + // The expected next status is Pulled + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerPulled, true, nil}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s to %s Transition", + tc.containerCurrentStatus.String(), tc.containerDesiredStatus.String()), func(t *testing.T) { + dependencyName := "dependency" + container := &apicontainer.Container{ + DesiredStatusUnsafe: tc.containerDesiredStatus, + KnownStatusUnsafe: tc.containerCurrentStatus, + SteadyStateDependencies: []string{dependencyName}, + } + dependency := &apicontainer.Container{ + Name: dependencyName, + KnownStatusUnsafe: tc.dependencyCurrentStatus, + } + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + container, + dependency, + }, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{}, + } + transition := task.containerNextState(container) + assert.Equal(t, tc.expectedContainerStatus, transition.nextState, + "Expected next state [%s] != Retrieved next state [%s]", + tc.expectedContainerStatus.String(), transition.nextState.String()) + assert.Equal(t, tc.expectedTransitionActionable, transition.actionRequired, "transition actionable") + assert.Equal(t, tc.reason, transition.reason, "transition possible") + }) + } +} + +func TestContainerNextStateWithPullCredentials(t *testing.T) { + testCases := []struct { + containerCurrentStatus apicontainerstatus.ContainerStatus + containerDesiredStatus apicontainerstatus.ContainerStatus + expectedContainerStatus apicontainerstatus.ContainerStatus + credentialsID string + useExecutionRole bool + expectedTransitionActionable bool + expectedTransitionReason error + }{ + // NONE -> RUNNING transition is not allowed when container is waiting for credentials + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerStatusNone, "not_existed", true, false, dependencygraph.CredentialsNotResolvedErr}, + // NONE -> RUNNING transition is allowed when the required execution credentials existed + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerPulled, "existed", true, true, nil}, + // PULLED -> RUNNING transition is allowed even the credentials is required + {apicontainerstatus.ContainerPulled, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerCreated, "not_existed", true, true, nil}, + // NONE -> STOPPED transition is allowed even the credentials is required + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped, "not_existed", true, false, nil}, + // NONE -> RUNNING transition is allowed when the container doesn't use execution credentials + {apicontainerstatus.ContainerStatusNone, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerPulled, "not_existed", false, true, nil}, + } + + taskEngine := &DockerTaskEngine{ + credentialsManager: credentials.NewManager(), + } + + err := taskEngine.credentialsManager.SetTaskCredentials(&credentials.TaskIAMRoleCredentials{ + ARN: "taskarn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + CredentialsID: "existed", + SessionToken: "token", + AccessKeyID: "id", + SecretAccessKey: "accesskey", + }, + }) + assert.NoError(t, err, "setting task credentials failed") + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s to %s transition with useExecutionRole %v and credentials %s", + tc.containerCurrentStatus.String(), + tc.containerDesiredStatus.String(), + tc.useExecutionRole, + tc.credentialsID), func(t *testing.T) { + container := &apicontainer.Container{ + DesiredStatusUnsafe: tc.containerDesiredStatus, + KnownStatusUnsafe: tc.containerCurrentStatus, + RegistryAuthentication: &apicontainer.RegistryAuthenticationData{ + Type: "ecr", + ECRAuthData: &apicontainer.ECRAuthData{ + UseExecutionRole: tc.useExecutionRole, + }, + }, + } + + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + container, + }, + ExecutionCredentialsID: tc.credentialsID, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: taskEngine, + credentialsManager: taskEngine.credentialsManager, + } + + transition := task.containerNextState(container) + assert.Equal(t, tc.expectedContainerStatus, transition.nextState, "Mismatch container status") + assert.Equal(t, tc.expectedTransitionReason, transition.reason, "Mismatch transition possible") + assert.Equal(t, tc.expectedTransitionActionable, transition.actionRequired, "Mismatch transition actionable") + }) + } +} + +func TestContainerNextStateWithAvoidingDanglingContainers(t *testing.T) { + container := &apicontainer.Container{ + DesiredStatusUnsafe: apicontainerstatus.ContainerStopped, + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + AppliedStatus: apicontainerstatus.ContainerRunning, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + } + + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + container, + }, + DesiredStatusUnsafe: apitaskstatus.TaskStopped, + }, + engine: &DockerTaskEngine{}, + } + transition := task.containerNextState(container) + assert.Equal(t, apicontainerstatus.ContainerStatusNone, transition.nextState, + "Expected next state [%s] != Retrieved next state [%s]", + apicontainerstatus.ContainerStatusNone.String(), transition.nextState.String()) + assert.Equal(t, false, transition.actionRequired, "Mismatch transition actionable") + assert.Equal(t, nil, transition.reason, "Mismatch transition possible") +} + +func TestStartContainerTransitionsWhenForwardTransitionPossible(t *testing.T) { + steadyStates := []apicontainerstatus.ContainerStatus{apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerResourcesProvisioned} + for _, steadyState := range steadyStates { + t.Run(fmt.Sprintf("Steady State is %s", steadyState.String()), func(t *testing.T) { + firstContainerName := "container1" + firstContainer := apicontainer.NewContainerWithSteadyState(steadyState) + firstContainer.KnownStatusUnsafe = apicontainerstatus.ContainerCreated + firstContainer.DesiredStatusUnsafe = apicontainerstatus.ContainerRunning + firstContainer.Name = firstContainerName + + secondContainerName := "container2" + secondContainer := apicontainer.NewContainerWithSteadyState(steadyState) + secondContainer.KnownStatusUnsafe = apicontainerstatus.ContainerPulled + secondContainer.DesiredStatusUnsafe = apicontainerstatus.ContainerRunning + secondContainer.Name = secondContainerName + + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + firstContainer, + secondContainer, + }, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{}, + } + + pauseContainerName := "pause" + waitForAssertions := sync.WaitGroup{} + if steadyState == apicontainerstatus.ContainerResourcesProvisioned { + pauseContainer := apicontainer.NewContainerWithSteadyState(steadyState) + pauseContainer.KnownStatusUnsafe = apicontainerstatus.ContainerRunning + pauseContainer.DesiredStatusUnsafe = apicontainerstatus.ContainerResourcesProvisioned + pauseContainer.Name = pauseContainerName + task.Containers = append(task.Containers, pauseContainer) + waitForAssertions.Add(1) + } + + waitForAssertions.Add(2) + canTransition, _, transitions, _ := task.startContainerTransitions( + func(cont *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus) { + if cont.Name == firstContainerName { + assert.Equal(t, nextStatus, apicontainerstatus.ContainerRunning, "Mismatch for first container next status") + } else if cont.Name == secondContainerName { + assert.Equal(t, nextStatus, apicontainerstatus.ContainerCreated, "Mismatch for second container next status") + } else if cont.Name == pauseContainerName { + assert.Equal(t, nextStatus, apicontainerstatus.ContainerResourcesProvisioned, "Mismatch for pause container next status") + } + waitForAssertions.Done() + }) + waitForAssertions.Wait() + assert.True(t, canTransition, "Mismatch for canTransition") + assert.NotEmpty(t, transitions) + if steadyState == apicontainerstatus.ContainerResourcesProvisioned { + assert.Len(t, transitions, 3) + pauseContainerTransition, ok := transitions[pauseContainerName] + assert.True(t, ok, "Expected pause container transition to be in the transitions map") + assert.Equal(t, pauseContainerTransition, apicontainerstatus.ContainerResourcesProvisioned, "Mismatch for pause container transition state") + } else { + assert.Len(t, transitions, 2) + } + firstContainerTransition, ok := transitions[firstContainerName] + assert.True(t, ok, "Expected first container transition to be in the transitions map") + assert.Equal(t, firstContainerTransition, apicontainerstatus.ContainerRunning, "Mismatch for first container transition state") + secondContainerTransition, ok := transitions[secondContainerName] + assert.True(t, ok, "Expected second container transition to be in the transitions map") + assert.Equal(t, secondContainerTransition, apicontainerstatus.ContainerCreated, "Mismatch for second container transition state") + }) + } +} + +func TestStartContainerTransitionsWhenForwardTransitionIsNotPossible(t *testing.T) { + firstContainerName := "container1" + firstContainer := &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + Name: firstContainerName, + } + secondContainerName := "container2" + secondContainer := &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + Name: secondContainerName, + } + pauseContainerName := "pause" + pauseContainer := apicontainer.NewContainerWithSteadyState(apicontainerstatus.ContainerResourcesProvisioned) + pauseContainer.KnownStatusUnsafe = apicontainerstatus.ContainerResourcesProvisioned + pauseContainer.DesiredStatusUnsafe = apicontainerstatus.ContainerResourcesProvisioned + pauseContainer.Name = pauseContainerName + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + firstContainer, + secondContainer, + pauseContainer, + }, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{}, + } + + canTransition, _, transitions, _ := task.startContainerTransitions( + func(cont *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus) { + t.Error("Transition function should not be called when no transitions are possible") + }) + assert.False(t, canTransition) + assert.Empty(t, transitions) +} + +func TestStartContainerTransitionsInvokesHandleContainerChange(t *testing.T) { + eventStreamName := "TESTTASKENGINE" + + // Create a container with the intent to do + // CREATERD -> STOPPED transition. This triggers + // `managedTask.handleContainerChange()` and generates the following + // events: + // 1. container state change event for Submit* API + // 2. task state change event for Submit* API + // 3. container state change event for the internal event stream + firstContainerName := "container1" + firstContainer := &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + DesiredStatusUnsafe: apicontainerstatus.ContainerStopped, + Name: firstContainerName, + } + + containerChangeEventStream := eventstream.NewEventStream(eventStreamName, context.Background()) + containerChangeEventStream.StartListening() + + stateChangeEvents := make(chan statechange.Event) + + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + firstContainer, + }, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{ + containerChangeEventStream: containerChangeEventStream, + stateChangeEvents: stateChangeEvents, + dataClient: data.NewNoopClient(), + }, + stateChangeEvents: stateChangeEvents, + containerChangeEventStream: containerChangeEventStream, + dockerMessages: make(chan dockerContainerChange), + ctx: context.TODO(), + } + + eventsGenerated := sync.WaitGroup{} + eventsGenerated.Add(2) + containerChangeEventStream.Subscribe(eventStreamName, func(events ...interface{}) error { + assert.NotNil(t, events) + assert.Len(t, events, 1) + event := events[0] + containerChangeEvent, ok := event.(dockerapi.DockerContainerChangeEvent) + assert.True(t, ok) + assert.Equal(t, containerChangeEvent.Status, apicontainerstatus.ContainerStopped) + eventsGenerated.Done() + return nil + }) + defer containerChangeEventStream.Unsubscribe(eventStreamName) + + // account for container and task state change events for Submit* API + go func() { + <-stateChangeEvents + <-stateChangeEvents + eventsGenerated.Done() + }() + + go task.waitEvent(nil) + canTransition, _, transitions, _ := task.startContainerTransitions( + func(cont *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus) { + t.Error("Invalid code path. The transition function should not be invoked when transitioning container from CREATED -> STOPPED") + }) + assert.True(t, canTransition) + assert.Empty(t, transitions) + eventsGenerated.Wait() +} + +func TestWaitForContainerTransitionsForNonTerminalTask(t *testing.T) { + acsMessages := make(chan acsTransition) + dockerMessages := make(chan dockerContainerChange) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + task := &managedTask{ + ctx: ctx, + acsMessages: acsMessages, + dockerMessages: dockerMessages, + Task: &apitask.Task{ + Containers: []*apicontainer.Container{}, + }, + } + + transitionChange := make(chan struct{}, 2) + transitionChangeContainer := make(chan string, 2) + + firstContainerName := "container1" + secondContainerName := "container2" + + // populate the transitions map with transitions for two + // containers. We expect two sets of events to be consumed + // by `waitForContainerTransition` + transitions := make(map[string]string) + transitions[firstContainerName] = apicontainerstatus.ContainerRunning.String() + transitions[secondContainerName] = apicontainerstatus.ContainerRunning.String() + + go func() { + // Send "transitions completed" messages. These are being + // sent out of order for no particular reason. We should be + // resilient to the ordering of these messages anyway. + transitionChange <- struct{}{} + transitionChangeContainer <- secondContainerName + transitionChange <- struct{}{} + transitionChangeContainer <- firstContainerName + }() + + // waitForTransition will block until it receives events + // sent by the go routine defined above + task.waitForTransition(transitions, transitionChange, transitionChangeContainer) +} + +// TestWaitForContainerTransitionsForTerminalTask verifies that the +// `waitForContainerTransition` method doesn't wait for any container +// transitions when the task's desired status is STOPPED and if all +// containers in the task are in PULLED state +func TestWaitForContainerTransitionsForTerminalTask(t *testing.T) { + acsMessages := make(chan acsTransition) + dockerMessages := make(chan dockerContainerChange) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + task := &managedTask{ + acsMessages: acsMessages, + dockerMessages: dockerMessages, + Task: &apitask.Task{ + Containers: []*apicontainer.Container{}, + KnownStatusUnsafe: apitaskstatus.TaskStopped, + }, + ctx: ctx, + } + + transitionChange := make(chan struct{}, 2) + transitionChangeContainer := make(chan string, 2) + + firstContainerName := "container1" + secondContainerName := "container2" + transitions := make(map[string]string) + transitions[firstContainerName] = apicontainerstatus.ContainerPulled.String() + transitions[secondContainerName] = apicontainerstatus.ContainerPulled.String() + + // Event though there are two keys in the transitions map, send + // only one event. This tests that `waitForContainerTransition` doesn't + // block to receive two events and will still progress + go func() { + transitionChange <- struct{}{} + transitionChangeContainer <- secondContainerName + }() + task.waitForTransition(transitions, transitionChange, transitionChangeContainer) +} + +func TestOnContainersUnableToTransitionStateForDesiredStoppedTask(t *testing.T) { + stateChangeEvents := make(chan statechange.Event) + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{}, + DesiredStatusUnsafe: apitaskstatus.TaskStopped, + }, + engine: &DockerTaskEngine{ + stateChangeEvents: stateChangeEvents, + }, + stateChangeEvents: stateChangeEvents, + ctx: context.TODO(), + } + eventsGenerated := sync.WaitGroup{} + eventsGenerated.Add(1) + + go func() { + event := <-stateChangeEvents + assert.Equal(t, event.(api.TaskStateChange).Reason, taskUnableToTransitionToStoppedReason) + eventsGenerated.Done() + }() + + task.handleContainersUnableToTransitionState() + eventsGenerated.Wait() + + assert.Equal(t, task.GetDesiredStatus(), apitaskstatus.TaskStopped) +} + +func TestOnContainersUnableToTransitionStateForDesiredRunningTask(t *testing.T) { + firstContainerName := "container1" + firstContainer := &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerCreated, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + Name: firstContainerName, + } + task := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + firstContainer, + }, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{ + dataClient: data.NewNoopClient(), + }, + ctx: context.TODO(), + } + + task.handleContainersUnableToTransitionState() + assert.Equal(t, task.GetDesiredStatus(), apitaskstatus.TaskStopped) + assert.Equal(t, task.Containers[0].GetDesiredStatus(), apicontainerstatus.ContainerStopped) +} + +// TODO: Test progressContainers workflow + +func TestHandleStoppedToSteadyStateTransition(t *testing.T) { + taskEngine := &DockerTaskEngine{} + firstContainerName := "container1" + firstContainer := &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + Name: firstContainerName, + } + secondContainerName := "container2" + secondContainer := &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + Name: secondContainerName, + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + mTask := &managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + firstContainer, + secondContainer, + }, + Arn: "task1", + }, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + ctx: ctx, + } + taskEngine.managedTasks = make(map[string]*managedTask) + taskEngine.managedTasks["task1"] = mTask + + var waitForTransitionFunctionInvocation sync.WaitGroup + waitForTransitionFunctionInvocation.Add(1) + transitionFunction := func(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { + assert.Equal(t, firstContainerName, container.Name, + "Mismatch in container reference in transition function") + waitForTransitionFunctionInvocation.Done() + return dockerapi.DockerContainerMetadata{} + } + + taskEngine.containerStatusToTransitionFunction = map[apicontainerstatus.ContainerStatus]transitionApplyFunc{ + apicontainerstatus.ContainerStopped: transitionFunction, + } + + // Received RUNNING event, known status is not STOPPED, expect this to + // be a noop. Assertions in transitionFunction asserts that as well + mTask.handleStoppedToRunningContainerTransition( + apicontainerstatus.ContainerRunning, secondContainer) + + // Start building preconditions and assertions for STOPPED -> RUNNING + // transition that will be triggered by next invocation of + // handleStoppedToRunningContainerTransition + + // This wait group ensures that a docker message is generated as a + // result of the transition function + var waitForDockerMessageAssertions sync.WaitGroup + waitForDockerMessageAssertions.Add(1) + go func() { + dockerMessage := <-mTask.dockerMessages + assert.Equal(t, apicontainerstatus.ContainerStopped, dockerMessage.event.Status, + "Mismatch in event status") + assert.Equal(t, firstContainerName, dockerMessage.container.Name, + "Mismatch in container reference in event") + waitForDockerMessageAssertions.Done() + }() + // Received RUNNING, known status is STOPPED, expect this to invoke + // transition function once + mTask.handleStoppedToRunningContainerTransition( + apicontainerstatus.ContainerRunning, firstContainer) + + // Wait for wait groups to be done + waitForTransitionFunctionInvocation.Wait() + waitForDockerMessageAssertions.Wait() + + // We now have an empty transition function map. Any further transitions + // should be noops + delete(taskEngine.containerStatusToTransitionFunction, apicontainerstatus.ContainerStopped) + // Simulate getting RUNNING event for a STOPPED container 10 times. + // All of these should be noops. 10 is chosen arbitrarily. Any number > 0 + // should be fine here + for i := 0; i < 10; i++ { + mTask.handleStoppedToRunningContainerTransition( + apicontainerstatus.ContainerRunning, firstContainer) + } +} + +func TestCleanupTask(t *testing.T) { + cfg := getTestConfig() + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockImageManager := mock_engine.NewMockImageManager(ctrl) + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + taskEngine := &DockerTaskEngine{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + state: mockState, + client: mockClient, + imageManager: mockImageManager, + } + mTask := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: testdata.LoadTask("sleep5"), + _time: mockTime, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + resourceStateChangeEvent: make(chan resourceStateChange), + cfg: taskEngine.cfg, + } + mTask.Task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + mTask.AddResource("mockResource", mockResource) + mTask.SetKnownStatus(apitaskstatus.TaskStopped) + mTask.SetSentStatus(apitaskstatus.TaskStopped) + container := mTask.Containers[0] + dockerContainer := &apicontainer.DockerContainer{ + DockerName: "dockerContainer", + } + + // Expectations for triggering cleanup + now := mTask.GetKnownStatusTime() + taskStoppedDuration := 1 * time.Minute + mockTime.EXPECT().Now().Return(now).AnyTimes() + cleanupTimeTrigger := make(chan time.Time) + mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) + go func() { + cleanupTimeTrigger <- now + }() + + // Expectations to verify that the task gets removed + mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*apicontainer.DockerContainer{container.Name: dockerContainer}, true) + mockClient.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(nil) + mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) + mockState.EXPECT().RemoveTask(mTask.Task) + mockResource.EXPECT().Cleanup() + mockResource.EXPECT().GetName() + mTask.cleanupTask(taskStoppedDuration) +} + +func TestCleanupTaskWaitsForStoppedSent(t *testing.T) { + cfg := getTestConfig() + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockImageManager := mock_engine.NewMockImageManager(ctrl) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + taskEngine := &DockerTaskEngine{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + state: mockState, + client: mockClient, + imageManager: mockImageManager, + } + mTask := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: testdata.LoadTask("sleep5"), + _time: mockTime, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + resourceStateChangeEvent: make(chan resourceStateChange), + cfg: taskEngine.cfg, + } + mTask.SetKnownStatus(apitaskstatus.TaskStopped) + mTask.SetSentStatus(apitaskstatus.TaskRunning) + container := mTask.Containers[0] + dockerContainer := &apicontainer.DockerContainer{ + DockerName: "dockerContainer", + } + + // Expectations for triggering cleanup + now := mTask.GetKnownStatusTime() + taskStoppedDuration := 1 * time.Minute + mockTime.EXPECT().Now().Return(now).AnyTimes() + cleanupTimeTrigger := make(chan time.Time) + mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) + go func() { + cleanupTimeTrigger <- now + }() + timesCalled := 0 + callsExpected := 3 + mockTime.EXPECT().Sleep(gomock.Any()).AnyTimes().Do(func(_ interface{}) { + timesCalled++ + if timesCalled == callsExpected { + mTask.SetSentStatus(apitaskstatus.TaskStopped) + } else if timesCalled > callsExpected { + t.Errorf("Sleep called too many times, called %d but expected %d", timesCalled, callsExpected) + } + }) + assert.Equal(t, apitaskstatus.TaskRunning, mTask.GetSentStatus()) + + // Expectations to verify that the task gets removed + mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return( + map[string]*apicontainer.DockerContainer{container.Name: dockerContainer}, true) + mockClient.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(nil) + mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) + mockState.EXPECT().RemoveTask(mTask.Task) + mTask.cleanupTask(taskStoppedDuration) + assert.Equal(t, apitaskstatus.TaskStopped, mTask.GetSentStatus()) +} + +func TestCleanupTaskGivesUpIfWaitingTooLong(t *testing.T) { + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockImageManager := mock_engine.NewMockImageManager(ctrl) + defer ctrl.Finish() + + cfg := getTestConfig() + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + taskEngine := &DockerTaskEngine{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + state: mockState, + client: mockClient, + imageManager: mockImageManager, + } + mTask := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: testdata.LoadTask("sleep5"), + _time: mockTime, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + cfg: taskEngine.cfg, + } + mTask.SetKnownStatus(apitaskstatus.TaskStopped) + mTask.SetSentStatus(apitaskstatus.TaskRunning) + + // Expectations for triggering cleanup + now := mTask.GetKnownStatusTime() + taskStoppedDuration := 1 * time.Minute + mockTime.EXPECT().Now().Return(now).AnyTimes() + cleanupTimeTrigger := make(chan time.Time) + mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) + go func() { + cleanupTimeTrigger <- now + }() + _maxStoppedWaitTimes = 10 + defer func() { + // reset + _maxStoppedWaitTimes = int(maxStoppedWaitTimes) + }() + mockTime.EXPECT().Sleep(gomock.Any()).Times(_maxStoppedWaitTimes) + assert.Equal(t, apitaskstatus.TaskRunning, mTask.GetSentStatus()) + + // No cleanup expected + mTask.cleanupTask(taskStoppedDuration) + assert.Equal(t, apitaskstatus.TaskRunning, mTask.GetSentStatus()) +} + +func TestCleanupTaskENIs(t *testing.T) { + cfg := getTestConfig() + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockImageManager := mock_engine.NewMockImageManager(ctrl) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + taskEngine := &DockerTaskEngine{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + state: mockState, + client: mockClient, + imageManager: mockImageManager, + } + mTask := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: testdata.LoadTask("sleep5"), + _time: mockTime, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + resourceStateChangeEvent: make(chan resourceStateChange), + cfg: taskEngine.cfg, + } + mTask.AddTaskENI(&apieni.ENI{ + ID: "TestCleanupTaskENIs", + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Primary: true, + Address: ipv4, + }, + }, + MacAddress: mac, + IPV6Addresses: []*apieni.ENIIPV6Address{ + { + Address: ipv6, + }, + }, + }) + + mTask.SetKnownStatus(apitaskstatus.TaskStopped) + mTask.SetSentStatus(apitaskstatus.TaskStopped) + container := mTask.Containers[0] + dockerContainer := &apicontainer.DockerContainer{ + DockerName: "dockerContainer", + } + + // Expectations for triggering cleanup + now := mTask.GetKnownStatusTime() + taskStoppedDuration := 1 * time.Minute + mockTime.EXPECT().Now().Return(now).AnyTimes() + cleanupTimeTrigger := make(chan time.Time) + mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) + go func() { + cleanupTimeTrigger <- now + }() + + // Expectations to verify that the task gets removed + mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*apicontainer.DockerContainer{container.Name: dockerContainer}, true) + mockClient.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(nil) + mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) + mockState.EXPECT().RemoveTask(mTask.Task) + mockState.EXPECT().RemoveENIAttachment(mac) + mockState.EXPECT().ENIByMac(mac) + mTask.cleanupTask(taskStoppedDuration) +} + +func TestTaskWaitForExecutionCredentials(t *testing.T) { + tcs := []struct { + errs []error + result bool + msg string + }{ + { + errs: []error{ + dependencygraph.CredentialsNotResolvedErr, + dependencygraph.ContainerPastDesiredStatusErr, + fmt.Errorf("other error"), + }, + result: true, + msg: "managed task should wait for credentials if the credentials dependency is not resolved", + }, + { + result: false, + msg: "managed task does not need to wait for credentials if there is no error", + }, + { + errs: []error{ + dependencygraph.ContainerPastDesiredStatusErr, + dependencygraph.DependentContainerNotResolvedErr, + fmt.Errorf("other errors"), + }, + result: false, + msg: "managed task does not need to wait for credentials if there is no credentials dependency error", + }, + } + + for _, tc := range tcs { + t.Run(fmt.Sprintf("%v", tc.errs), func(t *testing.T) { + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockTimer := mock_ttime.NewMockTimer(ctrl) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + task := &managedTask{ + ctx: ctx, + Task: &apitask.Task{ + KnownStatusUnsafe: apitaskstatus.TaskRunning, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + _time: mockTime, + acsMessages: make(chan acsTransition), + } + if tc.result { + mockTime.EXPECT().AfterFunc(gomock.Any(), gomock.Any()).Return(mockTimer) + mockTimer.EXPECT().Stop() + go func() { task.acsMessages <- acsTransition{desiredStatus: apitaskstatus.TaskRunning} }() + } + + assert.Equal(t, tc.result, task.isWaitingForACSExecutionCredentials(tc.errs), tc.msg) + }) + } +} + +func TestCleanupTaskWithInvalidInterval(t *testing.T) { + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockImageManager := mock_engine.NewMockImageManager(ctrl) + defer ctrl.Finish() + + cfg := getTestConfig() + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + taskEngine := &DockerTaskEngine{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + state: mockState, + client: mockClient, + imageManager: mockImageManager, + } + mTask := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: testdata.LoadTask("sleep5"), + _time: mockTime, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + resourceStateChangeEvent: make(chan resourceStateChange), + cfg: taskEngine.cfg, + } + + mTask.SetKnownStatus(apitaskstatus.TaskStopped) + mTask.SetSentStatus(apitaskstatus.TaskStopped) + container := mTask.Containers[0] + dockerContainer := &apicontainer.DockerContainer{ + DockerName: "dockerContainer", + } + + // Expectations for triggering cleanup + now := mTask.GetKnownStatusTime() + taskStoppedDuration := -1 * time.Minute + mockTime.EXPECT().Now().Return(now).AnyTimes() + cleanupTimeTrigger := make(chan time.Time) + mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) + go func() { + cleanupTimeTrigger <- now + }() + + // Expectations to verify that the task gets removed + mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*apicontainer.DockerContainer{container.Name: dockerContainer}, true) + mockClient.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(nil) + mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) + mockState.EXPECT().RemoveTask(mTask.Task) + mTask.cleanupTask(taskStoppedDuration) +} + +func TestCleanupTaskWithResourceHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockImageManager := mock_engine.NewMockImageManager(ctrl) + defer ctrl.Finish() + + cfg := getTestConfig() + cfg.TaskCPUMemLimit.Value = config.ExplicitlyEnabled + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + taskEngine := &DockerTaskEngine{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + state: mockState, + client: mockClient, + imageManager: mockImageManager, + } + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + mTask := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: testdata.LoadTask("sleep5TaskCgroup"), + _time: mockTime, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + resourceStateChangeEvent: make(chan resourceStateChange), + cfg: taskEngine.cfg, + } + mTask.Task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + mTask.AddResource("mockResource", mockResource) + mTask.SetKnownStatus(apitaskstatus.TaskStopped) + mTask.SetSentStatus(apitaskstatus.TaskStopped) + container := mTask.Containers[0] + dockerContainer := &apicontainer.DockerContainer{ + DockerName: "dockerContainer", + } + + // Expectations for triggering cleanup + now := mTask.GetKnownStatusTime() + taskStoppedDuration := 1 * time.Minute + mockTime.EXPECT().Now().Return(now).AnyTimes() + cleanupTimeTrigger := make(chan time.Time) + mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) + go func() { + cleanupTimeTrigger <- now + }() + + // Expectations to verify that the task gets removed + mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*apicontainer.DockerContainer{container.Name: dockerContainer}, true) + mockClient.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(nil) + mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) + mockState.EXPECT().RemoveTask(mTask.Task) + mockResource.EXPECT().GetName() + mockResource.EXPECT().Cleanup().Return(nil) + mTask.cleanupTask(taskStoppedDuration) +} + +func TestCleanupTaskWithResourceErrorPath(t *testing.T) { + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockImageManager := mock_engine.NewMockImageManager(ctrl) + defer ctrl.Finish() + + cfg := getTestConfig() + cfg.TaskCPUMemLimit.Value = config.ExplicitlyEnabled + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + taskEngine := &DockerTaskEngine{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + state: mockState, + client: mockClient, + imageManager: mockImageManager, + } + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + mTask := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: testdata.LoadTask("sleep5TaskCgroup"), + _time: mockTime, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + resourceStateChangeEvent: make(chan resourceStateChange), + cfg: taskEngine.cfg, + } + mTask.Task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource) + mTask.AddResource("mockResource", mockResource) + mTask.SetKnownStatus(apitaskstatus.TaskStopped) + mTask.SetSentStatus(apitaskstatus.TaskStopped) + container := mTask.Containers[0] + dockerContainer := &apicontainer.DockerContainer{ + DockerName: "dockerContainer", + } + + // Expectations for triggering cleanup + now := mTask.GetKnownStatusTime() + taskStoppedDuration := 1 * time.Minute + mockTime.EXPECT().Now().Return(now).AnyTimes() + cleanupTimeTrigger := make(chan time.Time) + mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) + go func() { + cleanupTimeTrigger <- now + }() + + // Expectations to verify that the task gets removed + mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*apicontainer.DockerContainer{container.Name: dockerContainer}, true) + mockClient.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(nil) + mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) + mockState.EXPECT().RemoveTask(mTask.Task) + mockResource.EXPECT().GetName() + mockResource.EXPECT().Cleanup().Return(errors.New("cleanup error")) + mTask.cleanupTask(taskStoppedDuration) +} + +func TestHandleContainerChangeUpdateContainerHealth(t *testing.T) { + eventStreamName := "TestHandleContainerChangeUpdateContainerHealth" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + containerChangeEventStream := eventstream.NewEventStream(eventStreamName, ctx) + containerChangeEventStream.StartListening() + + mTask := &managedTask{ + Task: testdata.LoadTask("sleep5TaskCgroup"), + containerChangeEventStream: containerChangeEventStream, + stateChangeEvents: make(chan statechange.Event), + ctx: context.TODO(), + engine: &DockerTaskEngine{ + dataClient: data.NewNoopClient(), + }, + } + // Discard all the statechange events + defer discardEvents(mTask.stateChangeEvents)() + + mTask.SetKnownStatus(apitaskstatus.TaskRunning) + mTask.SetSentStatus(apitaskstatus.TaskRunning) + container := mTask.Containers[0] + container.HealthCheckType = "docker" + + containerChange := dockerContainerChange{ + container: container, + event: dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: "dockerID", + Health: apicontainer.HealthStatus{ + Status: apicontainerstatus.ContainerHealthy, + Output: "health check succeed", + }, + }, + }, + } + + mTask.handleContainerChange(containerChange) + + containerHealth := container.GetHealthStatus() + assert.Equal(t, apicontainerstatus.ContainerHealthy, containerHealth.Status) + assert.Equal(t, "health check succeed", containerHealth.Output) +} + +func TestHandleContainerChangeUpdateMetadataRedundant(t *testing.T) { + eventStreamName := "TestHandleContainerChangeUpdateContainerHealth" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + containerChangeEventStream := eventstream.NewEventStream(eventStreamName, ctx) + containerChangeEventStream.StartListening() + + mTask := &managedTask{ + Task: testdata.LoadTask("sleep5TaskCgroup"), + containerChangeEventStream: containerChangeEventStream, + stateChangeEvents: make(chan statechange.Event), + ctx: context.TODO(), + engine: &DockerTaskEngine{ + dataClient: data.NewNoopClient(), + }, + } + // Discard all the statechange events + defer discardEvents(mTask.stateChangeEvents)() + + mTask.SetKnownStatus(apitaskstatus.TaskRunning) + mTask.SetSentStatus(apitaskstatus.TaskRunning) + container := mTask.Containers[0] + container.HealthCheckType = "docker" + // Container already in RUNNING status + container.SetKnownStatus(apicontainerstatus.ContainerRunning) + + timeNow := time.Now() + exitCode := exitcodes.ExitError + containerChange := dockerContainerChange{ + container: container, + event: dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: "dockerID", + Health: apicontainer.HealthStatus{ + Status: apicontainerstatus.ContainerHealthy, + Output: "health check succeed", + }, + ExitCode: &exitCode, + CreatedAt: timeNow, + }, + }, + } + + mTask.handleContainerChange(containerChange) + + containerHealth := container.GetHealthStatus() + assert.Equal(t, apicontainerstatus.ContainerHealthy, containerHealth.Status) + assert.Equal(t, "health check succeed", containerHealth.Output) + containerExitCode := container.GetKnownExitCode() + assert.Equal(t, exitCode, *containerExitCode) + containerCreateTime := container.GetCreatedAt() + assert.Equal(t, timeNow, containerCreateTime) +} + +func TestWaitForHostResources(t *testing.T) { + taskStopWG := utilsync.NewSequentialWaitGroup() + taskStopWG.Add(1, 1) + ctx, cancel := context.WithCancel(context.Background()) + + mtask := &managedTask{ + ctx: ctx, + cancel: cancel, + taskStopWG: taskStopWG, + Task: &apitask.Task{ + StartSequenceNumber: 1, + }, + } + + var waitForHostResourcesWG sync.WaitGroup + waitForHostResourcesWG.Add(1) + go func() { + mtask.waitForHostResources() + waitForHostResourcesWG.Done() + }() + + taskStopWG.Done(1) + waitForHostResourcesWG.Wait() +} + +func TestWaitForResourceTransition(t *testing.T) { + task := &managedTask{ + Task: &apitask.Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + }, + ctx: context.TODO(), + } + transition := make(chan struct{}, 1) + transitionChangeResource := make(chan string, 1) + resName := "cgroup" + // populate the transitions map with transition for the + // resource. We expect the event to be consumed + // by `waitForTransition` + transitions := make(map[string]string) + transitions[resName] = "ResourceCreated" + + go func() { + // Send "transition complete" message + transition <- struct{}{} + transitionChangeResource <- resName + }() + + // waitForTransition will block until it receives the event + // sent by the go routine defined above + task.waitForTransition(transitions, transition, transitionChangeResource) +} + +func TestApplyResourceStateHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + task := &managedTask{ + Task: &apitask.Task{ + Arn: "arn", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + }, + ctx: context.TODO(), + } + gomock.InOrder( + mockResource.EXPECT().GetName(), + mockResource.EXPECT().ApplyTransition(resourcestatus.ResourceCreated).Return(nil), + mockResource.EXPECT().GetName().AnyTimes(), + mockResource.EXPECT().StatusString(resourcestatus.ResourceCreated).AnyTimes(), + ) + assert.NoError(t, task.applyResourceState(mockResource, resourcestatus.ResourceCreated)) +} + +func TestApplyResourceStateFailures(t *testing.T) { + testCases := []struct { + Name string + ResStatus resourcestatus.ResourceStatus + Error error + }{ + { + Name: "no valid state transition", + ResStatus: resourcestatus.ResourceRemoved, + Error: errors.New("transition impossible"), + }, + { + Name: "transition error", + ResStatus: resourcestatus.ResourceCreated, + Error: errors.New("transition failed"), + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockResource := mock_taskresource.NewMockTaskResource(ctrl) + task := &managedTask{ + Task: &apitask.Task{ + Arn: "arn", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + }, + ctx: context.TODO(), + } + gomock.InOrder( + mockResource.EXPECT().GetName(), + mockResource.EXPECT().ApplyTransition(tc.ResStatus).Return(tc.Error), + mockResource.EXPECT().GetName().AnyTimes(), + mockResource.EXPECT().StatusString(tc.ResStatus).AnyTimes(), + ) + assert.Error(t, task.applyResourceState(mockResource, tc.ResStatus)) + }) + } +} + +func TestHandleVolumeResourceStateChangeAndSave(t *testing.T) { + testCases := []struct { + Name string + KnownStatus resourcestatus.ResourceStatus + DesiredKnownStatus resourcestatus.ResourceStatus + Err error + ChangedKnownStatus resourcestatus.ResourceStatus + TaskDesiredStatus apitaskstatus.TaskStatus + }{ + { + Name: "error while steady state transition", + KnownStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + DesiredKnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + Err: errors.New("transition error"), + ChangedKnownStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + TaskDesiredStatus: apitaskstatus.TaskStopped, + }, + { + Name: "steady state transition", + KnownStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + DesiredKnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + Err: nil, + ChangedKnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + TaskDesiredStatus: apitaskstatus.TaskRunning, + }, + } + volumeName := "vol" + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + res := &volume.VolumeResource{Name: volumeName} + res.SetKnownStatus(tc.KnownStatus) + mtask := managedTask{ + Task: &apitask.Task{ + Arn: "task1", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{ + dataClient: data.NewNoopClient(), + }, + } + mtask.AddResource(volumeName, res) + mtask.handleResourceStateChange(resourceStateChange{ + res, tc.DesiredKnownStatus, tc.Err, + }) + assert.Equal(t, tc.ChangedKnownStatus, res.GetKnownStatus()) + assert.Equal(t, tc.TaskDesiredStatus, mtask.GetDesiredStatus()) + }) + } +} + +func TestHandleVolumeResourceStateChangeNoSave(t *testing.T) { + testCases := []struct { + Name string + KnownStatus resourcestatus.ResourceStatus + DesiredKnownStatus resourcestatus.ResourceStatus + Err error + ChangedKnownStatus resourcestatus.ResourceStatus + TaskDesiredStatus apitaskstatus.TaskStatus + }{ + { + Name: "steady state transition already done", + KnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + DesiredKnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + Err: nil, + ChangedKnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + TaskDesiredStatus: apitaskstatus.TaskRunning, + }, + { + Name: "transition state less than known status", + DesiredKnownStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + Err: nil, + KnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + ChangedKnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + TaskDesiredStatus: apitaskstatus.TaskRunning, + }, + } + volumeName := "vol" + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + res := &volume.VolumeResource{Name: volumeName} + res.SetKnownStatus(tc.KnownStatus) + mtask := managedTask{ + Task: &apitask.Task{ + Arn: "task1", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + } + mtask.AddResource(volumeName, res) + mtask.handleResourceStateChange(resourceStateChange{ + res, tc.DesiredKnownStatus, tc.Err, + }) + assert.Equal(t, tc.ChangedKnownStatus, res.GetKnownStatus()) + assert.Equal(t, tc.TaskDesiredStatus, mtask.GetDesiredStatus()) + }) + } +} + +func TestVolumeResourceNextState(t *testing.T) { + testCases := []struct { + Name string + ResKnownStatus resourcestatus.ResourceStatus + ResDesiredStatus resourcestatus.ResourceStatus + NextState resourcestatus.ResourceStatus + ActionRequired bool + }{ + { + Name: "next state happy path", + ResKnownStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + ResDesiredStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + NextState: resourcestatus.ResourceStatus(volume.VolumeCreated), + ActionRequired: true, + }, + { + Name: "desired terminal", + ResKnownStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + ResDesiredStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + NextState: resourcestatus.ResourceStatus(volume.VolumeRemoved), + ActionRequired: false, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + res := volume.VolumeResource{} + res.SetKnownStatus(tc.ResKnownStatus) + res.SetDesiredStatus(tc.ResDesiredStatus) + mtask := managedTask{ + Task: &apitask.Task{}, + } + transition := mtask.resourceNextState(&res) + assert.Equal(t, tc.NextState, transition.nextState) + assert.Equal(t, tc.ActionRequired, transition.actionRequired) + }) + } +} + +func TestStartVolumeResourceTransitionsHappyPath(t *testing.T) { + testCases := []struct { + Name string + ResKnownStatus resourcestatus.ResourceStatus + ResDesiredStatus resourcestatus.ResourceStatus + TransitionStatus resourcestatus.ResourceStatus + StatusString string + CanTransition bool + TransitionsLen int + }{ + { + Name: "none to created", + ResKnownStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + ResDesiredStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + TransitionStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + StatusString: "CREATED", + CanTransition: true, + TransitionsLen: 1, + }, + } + volumeName := "vol" + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + res := &volume.VolumeResource{Name: volumeName} + res.SetKnownStatus(tc.ResKnownStatus) + res.SetDesiredStatus(tc.ResDesiredStatus) + + task := &managedTask{ + Task: &apitask.Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + } + task.AddResource(volumeName, res) + wg := sync.WaitGroup{} + wg.Add(1) + canTransition, transitions := task.startResourceTransitions( + func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus) { + assert.Equal(t, nextStatus, tc.TransitionStatus) + wg.Done() + }) + wg.Wait() + assert.Equal(t, tc.CanTransition, canTransition) + assert.Len(t, transitions, tc.TransitionsLen) + resTransition, ok := transitions[volumeName] + assert.True(t, ok) + assert.Equal(t, resTransition, tc.StatusString) + }) + } +} + +func TestStartVolumeResourceTransitionsEmpty(t *testing.T) { + testCases := []struct { + Name string + KnownStatus resourcestatus.ResourceStatus + DesiredStatus resourcestatus.ResourceStatus + CanTransition bool + }{ + { + Name: "known < desired", + KnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + DesiredStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + CanTransition: true, + }, + { + Name: "known equals desired", + KnownStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + DesiredStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + CanTransition: false, + }, + { + Name: "known > desired", + KnownStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + DesiredStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + CanTransition: false, + }, + } + volumeName := "vol" + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + res := &volume.VolumeResource{Name: volumeName} + res.SetKnownStatus(tc.KnownStatus) + res.SetDesiredStatus(tc.DesiredStatus) + + mtask := &managedTask{ + Task: &apitask.Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + ctx: ctx, + resourceStateChangeEvent: make(chan resourceStateChange), + } + mtask.Task.AddResource(volumeName, res) + canTransition, transitions := mtask.startResourceTransitions( + func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus) { + t.Error("Transition function should not be called when no transitions are possible") + }) + assert.Equal(t, tc.CanTransition, canTransition) + assert.Empty(t, transitions) + }) + } +} + +func getTestConfig() config.Config { + cfg := config.DefaultConfig() + cfg.TaskCPUMemLimit.Value = config.ExplicitlyDisabled + return cfg +} + +// TestContainerNextStateDependsStoppedContainer tests the container that has +// dependency on other containers' stopped status should wait for other container +// to be stopped before it can be stopped +func TestContainerNextStateDependsStoppedContainer(t *testing.T) { + testCases := []struct { + // Known status of the dependent container + knownStatus apicontainerstatus.ContainerStatus + actionRequired bool + err error + }{ + { + knownStatus: apicontainerstatus.ContainerRunning, + actionRequired: false, + err: dependencygraph.ErrContainerDependencyNotResolved, + }, + { + knownStatus: apicontainerstatus.ContainerStopped, + actionRequired: true, + err: nil, + }, + } + + containerRunning := &apicontainer.Container{ + Name: "containerrunning", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + } + containerToBeStopped := &apicontainer.Container{ + Name: "containertostop", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + DesiredStatusUnsafe: apicontainerstatus.ContainerStopped, + } + + // Add a dependency of the container's stopped status + containerToBeStopped.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) + containerToBeStopped.BuildContainerDependency(containerRunning.Name, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped) + + mtask := managedTask{ + Task: &apitask.Task{ + Arn: "task1", + Containers: []*apicontainer.Container{containerRunning, containerToBeStopped}, + }, + engine: &DockerTaskEngine{}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Transition container to stopped but has dependent container container in %s", tc.knownStatus.String()), func(t *testing.T) { + containerRunning.SetKnownStatus(tc.knownStatus) + transition := mtask.containerNextState(containerToBeStopped) + assert.Equal(t, tc.actionRequired, transition.actionRequired) + assert.Equal(t, tc.err, transition.reason) + }) + } +} diff --git a/agent/engine/task_manager_unix_test.go b/agent/engine/task_manager_unix_test.go new file mode 100644 index 00000000000..094ac191de0 --- /dev/null +++ b/agent/engine/task_manager_unix_test.go @@ -0,0 +1,455 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package engine + +import ( + "context" + "errors" + "fmt" + "os" + "sync" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/data" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + "github.com/aws/amazon-ecs-agent/agent/engine/testdata" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + mock_ttime "github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks" + "github.com/golang/mock/gomock" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/stretchr/testify/assert" +) + +// These tests use cgroup resource, which is linux specific. +// generic resource's(eg.volume) tests should be added to common test file. +func TestHandleResourceStateChangeAndSave(t *testing.T) { + testCases := []struct { + Name string + KnownStatus resourcestatus.ResourceStatus + DesiredKnownStatus resourcestatus.ResourceStatus + Err error + ChangedKnownStatus resourcestatus.ResourceStatus + TaskDesiredStatus apitaskstatus.TaskStatus + }{ + { + Name: "error while steady state transition", + KnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + DesiredKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + Err: errors.New("transition error"), + ChangedKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + TaskDesiredStatus: apitaskstatus.TaskStopped, + }, + { + Name: "steady state transition", + KnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + DesiredKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + Err: nil, + ChangedKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + TaskDesiredStatus: apitaskstatus.TaskRunning, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + res := &cgroup.CgroupResource{} + res.SetKnownStatus(tc.KnownStatus) + mtask := managedTask{ + Task: &apitask.Task{ + Arn: "task1", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + engine: &DockerTaskEngine{ + dataClient: data.NewNoopClient(), + }, + } + mtask.AddResource("cgroup", res) + mtask.handleResourceStateChange(resourceStateChange{ + res, tc.DesiredKnownStatus, tc.Err, + }) + assert.Equal(t, tc.ChangedKnownStatus, res.GetKnownStatus()) + assert.Equal(t, tc.TaskDesiredStatus, mtask.GetDesiredStatus()) + }) + } +} + +func TestHandleResourceStateChangeNoSave(t *testing.T) { + testCases := []struct { + Name string + KnownStatus resourcestatus.ResourceStatus + DesiredKnownStatus resourcestatus.ResourceStatus + Err error + ChangedKnownStatus resourcestatus.ResourceStatus + TaskDesiredStatus apitaskstatus.TaskStatus + }{ + { + Name: "steady state transition already done", + KnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + DesiredKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + Err: nil, + ChangedKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + TaskDesiredStatus: apitaskstatus.TaskRunning, + }, + { + Name: "transition state less than known status", + DesiredKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + Err: nil, + KnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + ChangedKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + TaskDesiredStatus: apitaskstatus.TaskRunning, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + res := &cgroup.CgroupResource{} + res.SetKnownStatus(tc.KnownStatus) + mtask := managedTask{ + Task: &apitask.Task{ + Arn: "task1", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + } + mtask.AddResource("cgroup", res) + mtask.handleResourceStateChange(resourceStateChange{ + res, tc.DesiredKnownStatus, tc.Err, + }) + assert.Equal(t, tc.ChangedKnownStatus, res.GetKnownStatus()) + assert.Equal(t, tc.TaskDesiredStatus, mtask.GetDesiredStatus()) + }) + } +} + +func TestResourceNextState(t *testing.T) { + testCases := []struct { + Name string + ResKnownStatus resourcestatus.ResourceStatus + ResDesiredStatus resourcestatus.ResourceStatus + NextState resourcestatus.ResourceStatus + ActionRequired bool + }{ + { + Name: "next state happy path", + ResKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + ResDesiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + NextState: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + ActionRequired: true, + }, + { + Name: "desired terminal", + ResKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + ResDesiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupRemoved), + NextState: resourcestatus.ResourceStatus(cgroup.CgroupRemoved), + ActionRequired: false, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + res := cgroup.CgroupResource{} + res.SetKnownStatus(tc.ResKnownStatus) + res.SetDesiredStatus(tc.ResDesiredStatus) + mtask := managedTask{ + Task: &apitask.Task{}, + } + transition := mtask.resourceNextState(&res) + assert.Equal(t, tc.NextState, transition.nextState) + assert.Equal(t, tc.ActionRequired, transition.actionRequired) + }) + } +} + +func TestStartResourceTransitionsHappyPath(t *testing.T) { + testCases := []struct { + Name string + ResKnownStatus resourcestatus.ResourceStatus + ResDesiredStatus resourcestatus.ResourceStatus + TransitionStatus resourcestatus.ResourceStatus + StatusString string + CanTransition bool + TransitionsLen int + }{ + { + Name: "none to created", + ResKnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupStatusNone), + ResDesiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + TransitionStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + StatusString: "CREATED", + CanTransition: true, + TransitionsLen: 1, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + res := &cgroup.CgroupResource{} + res.SetKnownStatus(tc.ResKnownStatus) + res.SetDesiredStatus(tc.ResDesiredStatus) + + task := &managedTask{ + Task: &apitask.Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + } + task.AddResource("cgroup", res) + wg := sync.WaitGroup{} + wg.Add(1) + canTransition, transitions := task.startResourceTransitions( + func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus) { + assert.Equal(t, nextStatus, tc.TransitionStatus) + wg.Done() + }) + wg.Wait() + assert.Equal(t, tc.CanTransition, canTransition) + assert.Len(t, transitions, tc.TransitionsLen) + resTransition, ok := transitions["cgroup"] + assert.True(t, ok) + assert.Equal(t, resTransition, tc.StatusString) + }) + } +} + +func TestStartResourceTransitionsEmpty(t *testing.T) { + testCases := []struct { + Name string + KnownStatus resourcestatus.ResourceStatus + DesiredStatus resourcestatus.ResourceStatus + CanTransition bool + }{ + { + Name: "known < desired", + KnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + DesiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupRemoved), + CanTransition: true, + }, + { + Name: "known equals desired", + KnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + DesiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + CanTransition: false, + }, + { + Name: "known > desired", + KnownStatus: resourcestatus.ResourceStatus(cgroup.CgroupRemoved), + DesiredStatus: resourcestatus.ResourceStatus(cgroup.CgroupCreated), + CanTransition: false, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + res := &cgroup.CgroupResource{} + res.SetKnownStatus(tc.KnownStatus) + res.SetDesiredStatus(tc.DesiredStatus) + + mtask := &managedTask{ + Task: &apitask.Task{ + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + }, + ctx: ctx, + resourceStateChangeEvent: make(chan resourceStateChange), + } + mtask.Task.AddResource("cgroup", res) + canTransition, transitions := mtask.startResourceTransitions( + func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus) { + t.Error("Transition function should not be called when no transitions are possible") + }) + assert.Equal(t, tc.CanTransition, canTransition) + assert.Empty(t, transitions) + }) + } +} + +//TestEFSNextStateWithTransitionDependencies verifies the dependencies are resolved correctly for task resource +func TestEFSVolumeNextStateWithTransitionDependencies(t *testing.T) { + testCases := []struct { + name string + resCurrentStatus resourcestatus.ResourceStatus + resDesiredStatus resourcestatus.ResourceStatus + resDependentStatus resourcestatus.ResourceStatus + dependencyCurrentStatus apicontainerstatus.ContainerStatus + dependencySatisfiedStatus apicontainerstatus.ContainerStatus + expectedResourceStatus resourcestatus.ResourceStatus + expectedTransitionActionable bool + reason error + }{ + // NONE -> CREATED transition is not allowed and not actionable + { + name: "created depends on resourceProvisioned, dependency is none", + resCurrentStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + resDesiredStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + resDependentStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + dependencyCurrentStatus: apicontainerstatus.ContainerStatusNone, + dependencySatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + expectedResourceStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + expectedTransitionActionable: false, + reason: dependencygraph.ErrContainerDependencyNotResolvedForResource, + }, + // NONE -> CREATED transition is allowed and actionable + { + name: "created depends on resourceProvisioned, dependency is resourceProvisioned", + resCurrentStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + resDesiredStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + resDependentStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + dependencyCurrentStatus: apicontainerstatus.ContainerResourcesProvisioned, + dependencySatisfiedStatus: apicontainerstatus.ContainerResourcesProvisioned, + expectedResourceStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + expectedTransitionActionable: true, + }, + // CREATED -> REMOVED transition is allowed and actionable + { + name: "removed depends on stopped, dependency is stopped", + resCurrentStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + resDesiredStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + resDependentStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + dependencyCurrentStatus: apicontainerstatus.ContainerStopped, + dependencySatisfiedStatus: apicontainerstatus.ContainerStopped, + expectedResourceStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + expectedTransitionActionable: false, + }, + // NONE -> REMOVED transition is allowed and not actionable + { + name: "created depends on created, desired is stopped, dependency is created", + resCurrentStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + resDesiredStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + resDependentStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + dependencyCurrentStatus: apicontainerstatus.ContainerCreated, + dependencySatisfiedStatus: apicontainerstatus.ContainerCreated, + expectedResourceStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + expectedTransitionActionable: false, + }, + // NONE -> REMOVED transition is not allowed and not actionable + { + name: "created depends on created, desired is stopped, dependency is none", + resCurrentStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + resDesiredStatus: resourcestatus.ResourceStatus(volume.VolumeRemoved), + resDependentStatus: resourcestatus.ResourceStatus(volume.VolumeCreated), + dependencyCurrentStatus: apicontainerstatus.ContainerStatusNone, + dependencySatisfiedStatus: apicontainerstatus.ContainerCreated, + expectedResourceStatus: resourcestatus.ResourceStatus(volume.VolumeStatusNone), + expectedTransitionActionable: false, + reason: dependencygraph.ErrContainerDependencyNotResolvedForResource, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + res, _ := volume.NewVolumeResource(ctx, "task1", apitask.EFSVolumeType, "volume1", "", false, "", nil, nil, nil) + dependencyName := "dependency" + dependency := &apicontainer.Container{ + Name: dependencyName, + KnownStatusUnsafe: tc.dependencyCurrentStatus, + } + res.BuildContainerDependency(dependencyName, tc.dependencySatisfiedStatus, tc.resDependentStatus) + + res.SetKnownStatus(tc.resCurrentStatus) + res.SetDesiredStatus(tc.resDesiredStatus) + mtask := managedTask{ + Task: &apitask.Task{ + Containers: []*apicontainer.Container{ + dependency, + }, + }, + } + transition := mtask.resourceNextState(res) + assert.Equal(t, tc.expectedResourceStatus, transition.nextState, + "Expected next state [%s] != Retrieved next state [%s]", + res.StatusString(tc.expectedResourceStatus), res.StatusString(transition.nextState)) + assert.Equal(t, tc.expectedTransitionActionable, transition.actionRequired, "transition actionable") + assert.Equal(t, tc.reason, transition.reason, "transition possible") + }) + } +} + +func TestCleanupExecEnabledTask(t *testing.T) { + cfg := getTestConfig() + ctrl := gomock.NewController(t) + mockTime := mock_ttime.NewMockTime(ctrl) + mockState := mock_dockerstate.NewMockTaskEngineState(ctrl) + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockImageManager := mock_engine.NewMockImageManager(ctrl) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + taskEngine := &DockerTaskEngine{ + ctx: ctx, + cfg: &cfg, + dataClient: data.NewNoopClient(), + state: mockState, + client: mockClient, + imageManager: mockImageManager, + } + mTask := &managedTask{ + ctx: ctx, + cancel: cancel, + Task: testdata.LoadTask("sleep5"), + _time: mockTime, + engine: taskEngine, + acsMessages: make(chan acsTransition), + dockerMessages: make(chan dockerContainerChange), + resourceStateChangeEvent: make(chan resourceStateChange), + cfg: taskEngine.cfg, + } + container := mTask.Containers[0] + enableExecCommandAgentForContainer(container, apicontainer.ManagedAgentState{}) + mTask.SetKnownStatus(apitaskstatus.TaskStopped) + mTask.SetSentStatus(apitaskstatus.TaskStopped) + + dockerContainer := &apicontainer.DockerContainer{ + DockerName: "dockerContainer", + } + tID, _ := mTask.Task.GetID() + removeAll = func(path string) error { + assert.Equal(t, fmt.Sprintf("/log/exec/%s", tID), path) + return nil + } + defer func() { + removeAll = os.RemoveAll + }() + // Expectations for triggering cleanup + now := mTask.GetKnownStatusTime() + taskStoppedDuration := 1 * time.Minute + mockTime.EXPECT().Now().Return(now).AnyTimes() + cleanupTimeTrigger := make(chan time.Time) + mockTime.EXPECT().After(gomock.Any()).Return(cleanupTimeTrigger) + go func() { + cleanupTimeTrigger <- now + }() + + // Expectations to verify that the task gets removed + mockState.EXPECT().ContainerMapByArn(mTask.Arn).Return(map[string]*apicontainer.DockerContainer{container.Name: dockerContainer}, true) + mockClient.EXPECT().RemoveContainer(gomock.Any(), dockerContainer.DockerName, gomock.Any()).Return(nil) + mockImageManager.EXPECT().RemoveContainerReferenceFromImageState(container).Return(nil) + mockState.EXPECT().RemoveTask(mTask.Task) + mTask.cleanupTask(taskStoppedDuration) +} diff --git a/agent/engine/testdata/load.go b/agent/engine/testdata/load.go new file mode 100644 index 00000000000..2be16326070 --- /dev/null +++ b/agent/engine/testdata/load.go @@ -0,0 +1,23 @@ +package testdata + +import ( + "encoding/json" + "io/ioutil" + "path/filepath" + "runtime" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" +) + +func LoadTask(name string) *apitask.Task { + _, filename, _, _ := runtime.Caller(0) + filedata, err := ioutil.ReadFile(filepath.Join(filepath.Dir(filename), "test_tasks", name+".json")) + if err != nil { + panic(err) + } + t := &apitask.Task{} + if err := json.Unmarshal(filedata, t); err != nil { + panic(err) + } + return t +} diff --git a/agent/engine/testdata/test_tasks/circular_dependency.json b/agent/engine/testdata/test_tasks/circular_dependency.json new file mode 100644 index 00000000000..5cb8e312fd8 --- /dev/null +++ b/agent/engine/testdata/test_tasks/circular_dependency.json @@ -0,0 +1,25 @@ +{ + "Arn":"arn:aws:ecs:us-west-2:123456789012:task/12345678-90ab-cdef-1234-56780abcdef1-circular-dependency", + "Family":"circular-dependency", + "Version":"2", + "Containers": + [{ + "Name": "web", + "Image": "busybox", + "Command": ["sleep", "1000"], + "Links":["web-db:web-db"] + }, + { + "Name": "web-db", + "Image": "busybox", + "Command": ["sleep", "1000"], + "volumesFrom": [{ + "sourceContainer": "web" + }] + }], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "NONE", + "KnownTime": "0001-01-01T00:00:00Z", + "SentStatus": "NONE" +} diff --git a/agent/engine/testdata/test_tasks/sleep5.json b/agent/engine/testdata/test_tasks/sleep5.json new file mode 100644 index 00000000000..8e0043b194a --- /dev/null +++ b/agent/engine/testdata/test_tasks/sleep5.json @@ -0,0 +1,38 @@ +{ + "Arn":"arn:aws:ecs:us-west-2:123456789012:task/12345678-90ab-cdef-1234-56780abcdef1", + "Family":"sleep5", + "Version":"2", + "Containers": + [ + { + "Name":"sleep5", + "Image":"busybox", + "Command":["sleep","5"], + "Cpu":10, + "Memory":10, + "Links":null, + "volumesFrom":[], + "mountPoints":[], + "portMappings":[], + "Essential":true, + "EntryPoint":null, + "environment":{}, + "overrides":{"command":null}, + "desiredStatus":"NONE", + "KnownStatus":"NONE", + "RunDependencies":null, + "IsInternal":false, + "AppliedStatus":"NONE", + "ApplyingError":null, + "SentStatus":"NONE", + "KnownExitCode":null, + "KnownPortBindingsUnsafe":null, + "StatusLock":{} + } + ], + "volumes":[], + "DesiredStatus":"RUNNING", + "KnownStatus":"NONE", + "KnownTime":"0001-01-01T00:00:00Z", + "SentStatus":"NONE" +} diff --git a/agent/engine/testdata/test_tasks/sleep5TaskCgroup.json b/agent/engine/testdata/test_tasks/sleep5TaskCgroup.json new file mode 100644 index 00000000000..d7745e53c3a --- /dev/null +++ b/agent/engine/testdata/test_tasks/sleep5TaskCgroup.json @@ -0,0 +1,40 @@ +{ + "Arn":"arn:aws:ecs:us-west-2:123456789012:task/12345678-90ab-cdef-1234-56780abcdef1", + "Family":"sleep5", + "Version":"2", + "Containers": + [ + { + "Name":"sleep5", + "Image":"busybox", + "Command":["sleep","5"], + "Cpu":10, + "Memory":10, + "Links":null, + "volumesFrom":[], + "mountPoints":[], + "portMappings":[], + "Essential":true, + "EntryPoint":null, + "environment":{}, + "overrides":{"command":null}, + "desiredStatus":"NONE", + "KnownStatus":"NONE", + "RunDependencies":null, + "IsInternal":false, + "AppliedStatus":"NONE", + "ApplyingError":null, + "SentStatus":"NONE", + "KnownExitCode":null, + "KnownPortBindingsUnsafe":null, + "StatusLock":{} + } + ], + "volumes":[], + "CPULimit": 2.0, + "Memory": 512, + "DesiredStatus":"RUNNING", + "KnownStatus":"NONE", + "KnownTime":"0001-01-01T00:00:00Z", + "SentStatus":"NONE" +} diff --git a/agent/engine/testdata/test_tasks/sleep5TwoContainers.json b/agent/engine/testdata/test_tasks/sleep5TwoContainers.json new file mode 100644 index 00000000000..38a84886406 --- /dev/null +++ b/agent/engine/testdata/test_tasks/sleep5TwoContainers.json @@ -0,0 +1,63 @@ +{ + "Arn":"arn:aws:ecs:us-west-2:123456789012:task/12345678-90ab-cdef-1234-56780abcdef1", + "Family":"sleep5", + "Version":"2", + "Containers": + [ + { + "Name":"sleep5", + "Image":"busybox", + "Command":["sleep","5"], + "Cpu":10, + "Memory":10, + "Links":null, + "volumesFrom":[], + "mountPoints":[], + "portMappings":[], + "Essential":true, + "EntryPoint":null, + "environment":{}, + "overrides":{"command":null}, + "desiredStatus":"NONE", + "KnownStatus":"NONE", + "RunDependencies":null, + "IsInternal":false, + "AppliedStatus":"NONE", + "ApplyingError":null, + "SentStatus":"NONE", + "KnownExitCode":null, + "KnownPortBindingsUnsafe":null, + "StatusLock":{} + }, + { + "Name":"sleep5-2", + "Image":"busybox", + "Command":["sleep","5"], + "Cpu":10, + "Memory":10, + "Links":null, + "volumesFrom":[], + "mountPoints":[], + "portMappings":[], + "Essential":true, + "EntryPoint":null, + "environment":{}, + "overrides":{"command":null}, + "desiredStatus":"NONE", + "KnownStatus":"NONE", + "RunDependencies":null, + "IsInternal":false, + "AppliedStatus":"NONE", + "ApplyingError":null, + "SentStatus":"NONE", + "KnownExitCode":null, + "KnownPortBindingsUnsafe":null, + "StatusLock":{} + } + ], + "volumes":[], + "DesiredStatus":"RUNNING", + "KnownStatus":"NONE", + "KnownTime":"0001-01-01T00:00:00Z", + "SentStatus":"NONE" +} diff --git a/agent/engine/testutils/docker_task_engine_equal.go b/agent/engine/testutils/docker_task_engine_equal.go new file mode 100644 index 00000000000..c5c48ba53e3 --- /dev/null +++ b/agent/engine/testutils/docker_task_engine_equal.go @@ -0,0 +1,28 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package testutils contains files that are used in tests but not elsewhere and thus can +// be excluded from the final executable. Including them in a different package +// allows this to happen +package testutils + +import ( + "github.com/aws/amazon-ecs-agent/agent/engine" + state_testutil "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/testutils" +) + +// DockerTaskEnginesEqual determines if the lhs and rhs task engines given are +// equal (as defined by their state being equal) +func DockerTaskEnginesEqual(lhs, rhs *engine.DockerTaskEngine) bool { + return state_testutil.DockerStatesEqual(lhs.State(), rhs.State()) +} diff --git a/agent/eni/netlinkwrapper/generate_mocks_linux.go b/agent/eni/netlinkwrapper/generate_mocks_linux.go new file mode 100644 index 00000000000..0ba99048ea9 --- /dev/null +++ b/agent/eni/netlinkwrapper/generate_mocks_linux.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package netlinkwrapper + +//go:generate mockgen -destination=mocks/mock_netlinkwrapper_linux.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/eni/netlinkwrapper NetLink diff --git a/agent/eni/netlinkwrapper/mocks/mock_netlinkwrapper_linux.go b/agent/eni/netlinkwrapper/mocks/mock_netlinkwrapper_linux.go new file mode 100644 index 00000000000..8d0b91e08f7 --- /dev/null +++ b/agent/eni/netlinkwrapper/mocks/mock_netlinkwrapper_linux.go @@ -0,0 +1,79 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/eni/netlinkwrapper (interfaces: NetLink) + +// Package mock_netlinkwrapper is a generated GoMock package. +package mock_netlinkwrapper + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + netlink "github.com/vishvananda/netlink" +) + +// MockNetLink is a mock of NetLink interface +type MockNetLink struct { + ctrl *gomock.Controller + recorder *MockNetLinkMockRecorder +} + +// MockNetLinkMockRecorder is the mock recorder for MockNetLink +type MockNetLinkMockRecorder struct { + mock *MockNetLink +} + +// NewMockNetLink creates a new mock instance +func NewMockNetLink(ctrl *gomock.Controller) *MockNetLink { + mock := &MockNetLink{ctrl: ctrl} + mock.recorder = &MockNetLinkMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockNetLink) EXPECT() *MockNetLinkMockRecorder { + return m.recorder +} + +// LinkByName mocks base method +func (m *MockNetLink) LinkByName(arg0 string) (netlink.Link, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LinkByName", arg0) + ret0, _ := ret[0].(netlink.Link) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LinkByName indicates an expected call of LinkByName +func (mr *MockNetLinkMockRecorder) LinkByName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LinkByName", reflect.TypeOf((*MockNetLink)(nil).LinkByName), arg0) +} + +// LinkList mocks base method +func (m *MockNetLink) LinkList() ([]netlink.Link, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LinkList") + ret0, _ := ret[0].([]netlink.Link) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LinkList indicates an expected call of LinkList +func (mr *MockNetLinkMockRecorder) LinkList() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LinkList", reflect.TypeOf((*MockNetLink)(nil).LinkList)) +} diff --git a/agent/eni/netlinkwrapper/netlink_linux.go b/agent/eni/netlinkwrapper/netlink_linux.go new file mode 100644 index 00000000000..8652c987f97 --- /dev/null +++ b/agent/eni/netlinkwrapper/netlink_linux.go @@ -0,0 +1,42 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package netlinkwrapper + +import "github.com/vishvananda/netlink" + +// NetLink Wrapper methods used from the vishvananda/netlink package +type NetLink interface { + LinkByName(name string) (netlink.Link, error) + LinkList() ([]netlink.Link, error) +} + +// NetLinkClient helps invoke the actual netlink methods +type NetLinkClient struct{} + +// New creates a new NetLink object +func New() NetLink { + return NetLinkClient{} +} + +// LinkByName finds a link by name and returns a pointer to the object +func (NetLinkClient) LinkByName(name string) (netlink.Link, error) { + return netlink.LinkByName(name) +} + +// LinkList gets a list of link devices. Equivalent to: `ip link show` +func (NetLinkClient) LinkList() ([]netlink.Link, error) { + return netlink.LinkList() +} diff --git a/agent/eni/networkutils/consts.go b/agent/eni/networkutils/consts.go new file mode 100644 index 00000000000..50fbbdb0222 --- /dev/null +++ b/agent/eni/networkutils/consts.go @@ -0,0 +1,20 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package networkutils + +const ( + pciDevicePrefix = "pci" + vifDevicePrefix = "vif" + virtualDevicePrefix = "virtual" +) diff --git a/agent/eni/networkutils/utils_linux.go b/agent/eni/networkutils/utils_linux.go new file mode 100644 index 00000000000..ccf37d51184 --- /dev/null +++ b/agent/eni/networkutils/utils_linux.go @@ -0,0 +1,157 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package networkutils is a collection of helpers for eni/watcher +package networkutils + +import ( + "context" + "path/filepath" + "strings" + "time" + + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/eni/netlinkwrapper" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/pkg/errors" + + "github.com/cihub/seelog" +) + +const ( + // macAddressBackoffMin specifies the minimum duration for the backoff + // when looking for an ENI's mac address on the host + macAddressBackoffMin = 2 * time.Millisecond + + // macAddressBackoffMax specifies the maximum duration for the backoff + // when looking for an ENI's mac address on the host + macAddressBackoffMax = 200 * time.Millisecond + + // macAddressBackoffJitter specifies the jitter multiple percentage when + // looking for an ENI's mac address on the host + macAddressBackoffJitter = 0.2 + + // macAddressBackoffMultiple specifies the backoff duration multiplier + // when looking for an ENI's mac address on the host + macAddressBackoffMultiple = 1.5 +) + +// macAddressRetriever is used to retrieve the mac address of a device. It collects +// all the information necessary to start this operation and stores the result in +// the 'macAddress' attribute +type macAddressRetriever struct { + dev string + netlinkClient netlinkwrapper.NetLink + macAddress string + // timeout specifies the timeout duration before giving up when + // looking for an ENI's mac address on the host + timeout time.Duration + ctx context.Context +} + +// GetMACAddress retrieves the MAC address of a device using netlink +func GetMACAddress(ctx context.Context, + timeout time.Duration, + dev string, + netlinkClient netlinkwrapper.NetLink) (string, error) { + retriever := &macAddressRetriever{ + dev: dev, + netlinkClient: netlinkClient, + ctx: ctx, + timeout: timeout, + } + return retriever.retrieve() +} + +// retrieve retrives the mac address of a network device. If the retrieved mac +// address is empty, it retries the operation with a timeout specified by the +// caller +func (retriever *macAddressRetriever) retrieve() (string, error) { + backoff := retry.NewExponentialBackoff(macAddressBackoffMin, macAddressBackoffMax, + macAddressBackoffJitter, macAddressBackoffMultiple) + ctx, cancel := context.WithTimeout(retriever.ctx, retriever.timeout) + defer cancel() + + err := retry.RetryWithBackoffCtx(ctx, backoff, func() error { + retErr := retriever.retrieveOnce() + if retErr != nil { + seelog.Warnf("Unable to retrieve mac address for device '%s': %v", + retriever.dev, retErr) + return retErr + } + + if retriever.macAddress == "" { + seelog.Debugf("Empty mac address for device '%s'", retriever.dev) + // Return a retriable error when mac address is empty. If the error + // is not wrapped with the RetriableError interface, RetryWithBackoffCtx + // treats them as retriable by default + return errors.Errorf("eni mac address: retrieved empty address for device %s", + retriever.dev) + } + + return nil + }) + if err != nil { + return "", err + } + // RetryWithBackoffCtx returns nil when the context is cancelled. Check if there was + // a timeout here. TODO: Fix RetryWithBackoffCtx to return ctx.Err() on context Done() + if err = ctx.Err(); err != nil { + return "", errors.Wrapf(err, "eni mac address: timed out waiting for eni device '%s'", + retriever.dev) + } + + return retriever.macAddress, nil +} + +// retrieveOnce retrieves the MAC address of a device using netlink.LinkByName +func (retriever *macAddressRetriever) retrieveOnce() error { + dev := filepath.Base(retriever.dev) + link, err := retriever.netlinkClient.LinkByName(dev) + if err != nil { + return apierrors.NewRetriableError(apierrors.NewRetriable(false), err) + } + retriever.macAddress = link.Attrs().HardwareAddr.String() + return nil +} + +// IsValidNetworkDevice is used to differentiate virtual and physical devices +// Returns true only for pci or vif interfaces +func IsValidNetworkDevice(devicePath string) bool { + /* + * DevicePath Samples: + * eth1 -> /devices/pci0000:00/0000:00:05.0/net/eth1 + * eth0 -> ../../devices/pci0000:00/0000:00:03.0/net/eth0 + * lo -> ../../devices/virtual/net/lo + */ + splitDevLink := strings.SplitN(devicePath, "devices/", 2) + if len(splitDevLink) != 2 { + seelog.Warnf("Cannot determine device validity: %s", devicePath) + return false + } + /* + * CoreOS typically employs the vif style for physical net interfaces + * Amazon Linux, Ubuntu, RHEL, Fedora, Suse use the traditional pci convention + */ + if strings.HasPrefix(splitDevLink[1], pciDevicePrefix) || strings.HasPrefix(splitDevLink[1], vifDevicePrefix) { + return true + } + if strings.HasPrefix(splitDevLink[1], virtualDevicePrefix) { + return false + } + // NOTE: Should never reach here + seelog.Criticalf("Failed to validate device path: %s", devicePath) + return false +} diff --git a/agent/eni/networkutils/utils_linux_test.go b/agent/eni/networkutils/utils_linux_test.go new file mode 100644 index 00000000000..e612582cd04 --- /dev/null +++ b/agent/eni/networkutils/utils_linux_test.go @@ -0,0 +1,137 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package networkutils + +import ( + "context" + "errors" + "net" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/vishvananda/netlink" + + mock_netlinkwrapper "github.com/aws/amazon-ecs-agent/agent/eni/netlinkwrapper/mocks" +) + +const ( + randomDevice = "eth1" + validMAC = "00:0a:95:9d:68:16" + pciDevPath = " ../../devices/pci0000:00/0000:00:03.0/net/eth1" + virtualDevPath = "../../devices/virtual/net/lo" + invalidDevPath = "../../virtual/net/lo" + incorrectDevPath = "../../devices/totally/wrong/net/path" +) + +// TestGetMACAddress checks obtaining MACAddress by using netlinkClient +func TestGetMACAddress(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + pm, _ := net.ParseMAC(validMAC) + mockNetlink.EXPECT().LinkByName(randomDevice).Return( + &netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: pm, + Name: randomDevice, + }, + }, nil) + ctx := context.TODO() + mac, err := GetMACAddress(ctx, time.Millisecond, randomDevice, mockNetlink) + assert.Nil(t, err) + assert.Equal(t, validMAC, mac) +} + +// TestGetMACAddressWithNetlinkError attempts to test the netlinkClient +// error code path +func TestGetMACAddressWithNetlinkError(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + // LinkByName returns an error. This will ensure that a non-retriable + // error is generated and halts the backoff-rety loop + mockNetlink.EXPECT().LinkByName(randomDevice).Return( + &netlink.Device{}, + errors.New("Dummy Netlink Error")) + ctx := context.TODO() + mac, err := GetMACAddress(ctx, 2*macAddressBackoffMax, randomDevice, mockNetlink) + assert.Error(t, err) + assert.Empty(t, mac) +} + +// TestGetMACAddressNotFoundRetry tests if netlink.LinkByName gets invoked +// multiple times if the mac address for a device is returned as empty string +func TestGetMACAddressNotFoundRetry(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + pm, _ := net.ParseMAC(validMAC) + gomock.InOrder( + // Return empty mac address on first invocation + mockNetlink.EXPECT().LinkByName(randomDevice).Return(&netlink.Device{}, nil), + // Return a valid mac address on first invocation. Even though the first + // invocation did not result in an error, it did return an empty mac address. + // Hence, we expect it to be retried + mockNetlink.EXPECT().LinkByName(randomDevice).Return(&netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: pm, + Name: randomDevice, + }, + }, nil), + ) + ctx := context.TODO() + // Set max retry duration to twice that of the max backoff to ensure that there's + // enough time to retry + mac, err := GetMACAddress(ctx, 2*macAddressBackoffMax, randomDevice, mockNetlink) + assert.NoError(t, err) + assert.Equal(t, validMAC, mac) +} + +// TestGetMACAddressNotFoundRetryExpires tests if the backoff-retry logic for +// retrieving mac address returns error on timeout +func TestGetMACAddressNotFoundRetryExpires(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + // Return empty mac address everytime + mockNetlink.EXPECT().LinkByName(randomDevice).Return(&netlink.Device{}, nil).MinTimes(1) + ctx := context.TODO() + // Set max retry duration to twice that of the min backoff to ensure that there's + // enough time to retry + mac, err := GetMACAddress(ctx, 2*macAddressBackoffMin, randomDevice, mockNetlink) + assert.Error(t, err) + assert.Empty(t, mac) +} + +// TestIsValidDevicePathTableTest does a table test for device path validity +func TestIsValidDevicePathTableTest(t *testing.T) { + var table = []struct { + input string + output bool + }{ + {pciDevPath, true}, + {virtualDevPath, false}, + {invalidDevPath, false}, + {incorrectDevPath, false}, + } + + for _, entry := range table { + status := IsValidNetworkDevice(entry.input) + assert.Equal(t, entry.output, status) + } +} diff --git a/agent/eni/pause/error.go b/agent/eni/pause/error.go new file mode 100644 index 00000000000..454ad0a29a7 --- /dev/null +++ b/agent/eni/pause/error.go @@ -0,0 +1,52 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pause + +// https://golang.org/src/syscall/zerrors_linux_386.go#L1382 +const noSuchFile = "no such file or directory" + +// UnsupportedPlatformError indicates an error when loading pause container +// image on an unsupported OS platform +type UnsupportedPlatformError struct { + error +} + +// UnsupportedPlatform returns true if the error is of UnsupportedPlatformError +// type +func UnsupportedPlatform(err error) bool { + _, ok := err.(UnsupportedPlatformError) + return ok +} + +// NewUnsupportedPlatformError creates a new UnsupportedPlatformError object +func NewUnsupportedPlatformError(err error) UnsupportedPlatformError { + return UnsupportedPlatformError{err} +} + +// NoSuchFileError wraps the error from the os package with the message +// "no such file error" +type NoSuchFileError struct { + error +} + +// NewNoSuchFileError creates a new NoSuchFileError object +func NewNoSuchFileError(err error) NoSuchFileError { + return NoSuchFileError{err} +} + +// IsNoSuchFileError returns true if the error is of NoSuchFileError type +func IsNoSuchFileError(err error) bool { + _, ok := err.(NoSuchFileError) + return ok +} diff --git a/agent/eni/pause/error_test.go b/agent/eni/pause/error_test.go new file mode 100644 index 00000000000..a63760634cc --- /dev/null +++ b/agent/eni/pause/error_test.go @@ -0,0 +1,51 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pause + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnsupportedPlatform(t *testing.T) { + testCases := map[error]bool{ + errors.New("error"): false, + NewUnsupportedPlatformError(errors.New("error")): true, + } + + for err, expected := range testCases { + t.Run(fmt.Sprintf("returns %t for type %s", expected, reflect.TypeOf(err)), func(t *testing.T) { + assert.Equal(t, expected, UnsupportedPlatform(err)) + }) + } +} + +func TestIsNoSuchFileError(t *testing.T) { + testCases := map[error]bool{ + errors.New("error"): false, + NewNoSuchFileError(errors.New("No such file")): true, + } + + for err, expected := range testCases { + t.Run(fmt.Sprintf("return %t for type %s", expected, reflect.TypeOf(err)), func(t *testing.T) { + assert.Equal(t, expected, IsNoSuchFileError(err)) + }) + } +} diff --git a/agent/eni/pause/generate_mocks.go b/agent/eni/pause/generate_mocks.go new file mode 100644 index 00000000000..3cb9d064488 --- /dev/null +++ b/agent/eni/pause/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pause + +//go:generate mockgen -destination=mocks/load_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/eni/pause Loader diff --git a/agent/eni/pause/load.go b/agent/eni/pause/load.go new file mode 100644 index 00000000000..ec5a2f76810 --- /dev/null +++ b/agent/eni/pause/load.go @@ -0,0 +1,36 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pause + +import ( + "context" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/docker/docker/api/types" +) + +// Loader defines an interface for loading the pause container image. This is mostly +// to facilitate mocking and testing of the LoadImage method +type Loader interface { + LoadImage(ctx context.Context, cfg *config.Config, dockerClient dockerapi.DockerClient) (*types.ImageInspect, error) + IsLoaded(dockerClient dockerapi.DockerClient) (bool, error) +} + +type loader struct{} + +// New creates a new pause image loader +func New() Loader { + return &loader{} +} diff --git a/agent/eni/pause/mocks/load_mocks.go b/agent/eni/pause/mocks/load_mocks.go new file mode 100644 index 00000000000..2e2c768bc9c --- /dev/null +++ b/agent/eni/pause/mocks/load_mocks.go @@ -0,0 +1,82 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/eni/pause (interfaces: Loader) + +// Package mock_pause is a generated GoMock package. +package mock_pause + +import ( + context "context" + reflect "reflect" + + config "github.com/aws/amazon-ecs-agent/agent/config" + dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + types "github.com/docker/docker/api/types" + gomock "github.com/golang/mock/gomock" +) + +// MockLoader is a mock of Loader interface +type MockLoader struct { + ctrl *gomock.Controller + recorder *MockLoaderMockRecorder +} + +// MockLoaderMockRecorder is the mock recorder for MockLoader +type MockLoaderMockRecorder struct { + mock *MockLoader +} + +// NewMockLoader creates a new mock instance +func NewMockLoader(ctrl *gomock.Controller) *MockLoader { + mock := &MockLoader{ctrl: ctrl} + mock.recorder = &MockLoaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockLoader) EXPECT() *MockLoaderMockRecorder { + return m.recorder +} + +// IsLoaded mocks base method +func (m *MockLoader) IsLoaded(arg0 dockerapi.DockerClient) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsLoaded", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsLoaded indicates an expected call of IsLoaded +func (mr *MockLoaderMockRecorder) IsLoaded(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsLoaded", reflect.TypeOf((*MockLoader)(nil).IsLoaded), arg0) +} + +// LoadImage mocks base method +func (m *MockLoader) LoadImage(arg0 context.Context, arg1 *config.Config, arg2 dockerapi.DockerClient) (*types.ImageInspect, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadImage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ImageInspect) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LoadImage indicates an expected call of LoadImage +func (mr *MockLoaderMockRecorder) LoadImage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadImage", reflect.TypeOf((*MockLoader)(nil).LoadImage), arg0, arg1, arg2) +} diff --git a/agent/eni/pause/pause_linux.go b/agent/eni/pause/pause_linux.go new file mode 100644 index 00000000000..a89c2f7b023 --- /dev/null +++ b/agent/eni/pause/pause_linux.go @@ -0,0 +1,91 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pause + +import ( + "context" + "fmt" + "os" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + + log "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// LoadImage helps load the pause container image for the agent +func (*loader) LoadImage(ctx context.Context, cfg *config.Config, dockerClient dockerapi.DockerClient) (*types.ImageInspect, error) { + log.Debugf("Loading pause container tarball: %s", cfg.PauseContainerTarballPath) + if err := loadFromFile(ctx, cfg.PauseContainerTarballPath, dockerClient); err != nil { + return nil, err + } + + return getPauseContainerImage( + config.DefaultPauseContainerImageName, config.DefaultPauseContainerTag, dockerClient) +} + +func (*loader) IsLoaded(dockerClient dockerapi.DockerClient) (bool, error) { + image, err := getPauseContainerImage( + config.DefaultPauseContainerImageName, config.DefaultPauseContainerTag, dockerClient) + + if err != nil { + return false, errors.Wrapf(err, + "pause container inspect: failed to inspect image: %s", config.DefaultPauseContainerImageName) + } + + if image == nil || image.ID == "" { + return false, nil + } + + return true, nil +} + +var open = os.Open + +func loadFromFile(ctx context.Context, path string, dockerClient dockerapi.DockerClient) error { + pauseContainerReader, err := open(path) + if err != nil { + if err.Error() == noSuchFile { + return NewNoSuchFileError(errors.Wrapf(err, + "pause container load: failed to read pause container image: %s", path)) + } + return errors.Wrapf(err, + "pause container load: failed to read pause container image: %s", path) + } + if err := dockerClient.LoadImage(ctx, pauseContainerReader, dockerclient.LoadImageTimeout); err != nil { + return errors.Wrapf(err, + "pause container load: failed to load pause container image: %s", path) + } + + return nil + +} + +func getPauseContainerImage(name string, tag string, dockerClient dockerapi.DockerClient) (*types.ImageInspect, error) { + imageName := fmt.Sprintf("%s:%s", name, tag) + log.Debugf("Inspecting pause container image: %s", imageName) + + image, err := dockerClient.InspectImage(imageName) + if err != nil { + return nil, errors.Wrapf(err, + "pause container load: failed to inspect image: %s", imageName) + } + + return image, nil +} diff --git a/agent/eni/pause/pause_linux_test.go b/agent/eni/pause/pause_linux_test.go new file mode 100644 index 00000000000..48894db4ab1 --- /dev/null +++ b/agent/eni/pause/pause_linux_test.go @@ -0,0 +1,239 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pause + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_sdkclient "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient/mocks" + mock_sdkclientfactory "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory/mocks" + + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + pauseTarballPath = "/path/to/pause.tar" + pauseName = "pause" + pauseTag = "tag" +) + +var defaultConfig = config.DefaultConfig() + +func mockOpen() func() { + open = func(name string) (*os.File, error) { + return nil, nil + } + return func() { + open = os.Open + } +} + +// TestLoadFromFileWithReaderError tests loadFromFile with reader error +func TestLoadFromFileWithReaderError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := dockerapi.NewDockerGoClient(sdkFactory, &defaultConfig, ctx) + assert.NoError(t, err) + + open = func(name string) (*os.File, error) { + return nil, errors.New("Dummy Reader Error") + } + defer func() { + open = os.Open + }() + + err = loadFromFile(ctx, pauseTarballPath, client) + assert.Error(t, err) +} + +// TestLoadFromFileHappyPath tests loadFromFile against happy path +func TestLoadFromFileHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := dockerapi.NewDockerGoClient(sdkFactory, &defaultConfig, ctx) + assert.NoError(t, err) + mockDockerSDK.EXPECT().ImageLoad(gomock.Any(), gomock.Any(), false).Return(types.ImageLoadResponse{}, nil) + defer mockOpen()() + + err = loadFromFile(ctx, pauseTarballPath, client) + assert.NoError(t, err) +} + +// TestLoadFromFileDockerLoadImageError tests loadFromFile against error +// from Docker clients LoadImage +func TestLoadFromFileDockerLoadImageError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := dockerapi.NewDockerGoClient(sdkFactory, &defaultConfig, ctx) + assert.NoError(t, err) + mockDockerSDK.EXPECT().ImageLoad(gomock.Any(), gomock.Any(), false).Return(types.ImageLoadResponse{}, + errors.New("Dummy Load Image Error")) + + defer mockOpen()() + + err = loadFromFile(ctx, pauseTarballPath, client) + assert.Error(t, err) +} + +func TestGetPauseContainerImageInspectImageError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := dockerapi.NewDockerGoClient(sdkFactory, &defaultConfig, ctx) + assert.NoError(t, err) + mockDockerSDK.EXPECT().ImageInspectWithRaw(gomock.Any(), pauseName+":"+pauseTag).Return( + types.ImageInspect{}, nil, errors.New("error")) + + _, err = getPauseContainerImage(pauseName, pauseTag, client) + assert.Error(t, err) +} + +func TestGetPauseContainerHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := dockerapi.NewDockerGoClient(sdkFactory, &defaultConfig, ctx) + assert.NoError(t, err) + mockDockerSDK.EXPECT().ImageInspectWithRaw(gomock.Any(), pauseName+":"+pauseTag).Return(types.ImageInspect{}, nil, nil) + + _, err = getPauseContainerImage(pauseName, pauseTag, client) + assert.NoError(t, err) +} + +func TestIsLoadedHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + pauseLoader := New() + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := dockerapi.NewDockerGoClient(sdkFactory, &defaultConfig, ctx) + assert.NoError(t, err) + mockDockerSDK.EXPECT().ImageInspectWithRaw(gomock.Any(), gomock.Any()).Return(types.ImageInspect{ID: "test123"}, nil, nil) + + isLoaded, err := pauseLoader.IsLoaded(client) + assert.NoError(t, err) + assert.True(t, isLoaded) +} + +func TestIsLoadedNotLoaded(t *testing.T) { + ctrl := gomock.NewController(t) + pauseLoader := New() + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := dockerapi.NewDockerGoClient(sdkFactory, &defaultConfig, ctx) + assert.NoError(t, err) + mockDockerSDK.EXPECT().ImageInspectWithRaw(gomock.Any(), gomock.Any()).Return(types.ImageInspect{}, nil, nil) + + isLoaded, err := pauseLoader.IsLoaded(client) + assert.NoError(t, err) + assert.False(t, isLoaded) +} + +func TestIsLoadedError(t *testing.T) { + ctrl := gomock.NewController(t) + pauseLoader := New() + defer ctrl.Finish() + + // Docker SDK tests + mockDockerSDK := mock_sdkclient.NewMockClient(ctrl) + mockDockerSDK.EXPECT().Ping(gomock.Any()).Return(types.Ping{}, nil) + sdkFactory := mock_sdkclientfactory.NewMockFactory(ctrl) + sdkFactory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDockerSDK, nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + client, err := dockerapi.NewDockerGoClient(sdkFactory, &defaultConfig, ctx) + assert.NoError(t, err) + mockDockerSDK.EXPECT().ImageInspectWithRaw(gomock.Any(), gomock.Any()).Return( + types.ImageInspect{}, nil, errors.New("error")) + + isLoaded, err := pauseLoader.IsLoaded(client) + assert.Error(t, err) + assert.False(t, isLoaded) +} diff --git a/agent/eni/pause/pause_unsupported.go b/agent/eni/pause/pause_unsupported.go new file mode 100644 index 00000000000..80727a56a86 --- /dev/null +++ b/agent/eni/pause/pause_unsupported.go @@ -0,0 +1,39 @@ +// +build !linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package pause + +import ( + "context" + "runtime" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// LoadImage returns UnsupportedPlatformError on the unsupported platform +func (*loader) LoadImage(ctx context.Context, cfg *config.Config, dockerClient dockerapi.DockerClient) (*types.ImageInspect, error) { + return nil, NewUnsupportedPlatformError(errors.Errorf( + "pause container load: unsupported platform: %s/%s", + runtime.GOOS, runtime.GOARCH)) +} + +func (*loader) IsLoaded(dockerClient dockerapi.DockerClient) (bool, error) { + return false, NewUnsupportedPlatformError(errors.Errorf( + "pause container isloaded: unsupported platform: %s/%s", + runtime.GOOS, runtime.GOARCH)) +} diff --git a/agent/eni/udevwrapper/generate_mocks_linux.go b/agent/eni/udevwrapper/generate_mocks_linux.go new file mode 100644 index 00000000000..37400e80196 --- /dev/null +++ b/agent/eni/udevwrapper/generate_mocks_linux.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package udevwrapper + +//go:generate mockgen -destination=mocks/mock_udevwrapper_linux.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper Udev diff --git a/agent/eni/udevwrapper/mocks/mock_udevwrapper_linux.go b/agent/eni/udevwrapper/mocks/mock_udevwrapper_linux.go new file mode 100644 index 00000000000..f86d93a9d19 --- /dev/null +++ b/agent/eni/udevwrapper/mocks/mock_udevwrapper_linux.go @@ -0,0 +1,77 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper (interfaces: Udev) + +// Package mock_udevwrapper is a generated GoMock package. +package mock_udevwrapper + +import ( + reflect "reflect" + + udev "github.com/deniswernert/udev" + gomock "github.com/golang/mock/gomock" +) + +// MockUdev is a mock of Udev interface +type MockUdev struct { + ctrl *gomock.Controller + recorder *MockUdevMockRecorder +} + +// MockUdevMockRecorder is the mock recorder for MockUdev +type MockUdevMockRecorder struct { + mock *MockUdev +} + +// NewMockUdev creates a new mock instance +func NewMockUdev(ctrl *gomock.Controller) *MockUdev { + mock := &MockUdev{ctrl: ctrl} + mock.recorder = &MockUdevMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockUdev) EXPECT() *MockUdevMockRecorder { + return m.recorder +} + +// Close mocks base method +func (m *MockUdev) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close +func (mr *MockUdevMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockUdev)(nil).Close)) +} + +// Monitor mocks base method +func (m *MockUdev) Monitor(arg0 chan *udev.UEvent) chan bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Monitor", arg0) + ret0, _ := ret[0].(chan bool) + return ret0 +} + +// Monitor indicates an expected call of Monitor +func (mr *MockUdevMockRecorder) Monitor(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Monitor", reflect.TypeOf((*MockUdev)(nil).Monitor), arg0) +} diff --git a/agent/eni/udevwrapper/udev_linux.go b/agent/eni/udevwrapper/udev_linux.go new file mode 100644 index 00000000000..167f73fe929 --- /dev/null +++ b/agent/eni/udevwrapper/udev_linux.go @@ -0,0 +1,29 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package udevwrapper + +import "github.com/deniswernert/udev" + +// Udev Wrapper methods used from the deniswernert/udev package +type Udev interface { + Monitor(notify chan *udev.UEvent) (shutdown chan bool) + Close() error +} + +// New returns an UDev Monitor +func New() (*udev.UDevMonitor, error) { + return udev.NewMonitor() +} diff --git a/agent/eni/udevwrapper/udev_unsupported.go b/agent/eni/udevwrapper/udev_unsupported.go new file mode 100644 index 00000000000..56110b726a4 --- /dev/null +++ b/agent/eni/udevwrapper/udev_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package udevwrapper + +import ( + "runtime" + + "github.com/pkg/errors" +) + +type Udev interface { +} + +// New returns an UDev Monitor +func New() (interface{}, error) { + return nil, errors.Errorf("udev monitor creation: unsupported platform: %s/%s", + runtime.GOOS, runtime.GOARCH) +} diff --git a/agent/eni/watcher/consts.go b/agent/eni/watcher/consts.go new file mode 100644 index 00000000000..1032afca257 --- /dev/null +++ b/agent/eni/watcher/consts.go @@ -0,0 +1,27 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package watcher + +import "time" + +const ( + udevSubsystem = "SUBSYSTEM" + udevNetSubsystem = "net" + udevPCISubsystem = "pci" + udevEventAction = "ACTION" + udevAddEvent = "add" + udevDevPath = "DEVPATH" + udevInterface = "INTERFACE" + defaultReconciliationInterval = time.Second * 30 +) diff --git a/agent/eni/watcher/watcher_linux.go b/agent/eni/watcher/watcher_linux.go new file mode 100644 index 00000000000..ce016c37753 --- /dev/null +++ b/agent/eni/watcher/watcher_linux.go @@ -0,0 +1,373 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package watcher + +import ( + "context" + "fmt" + "strings" + "time" + + log "github.com/cihub/seelog" + "github.com/deniswernert/udev" + "github.com/pkg/errors" + "github.com/vishvananda/netlink" + + "github.com/aws/amazon-ecs-agent/agent/api" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/eni/netlinkwrapper" + "github.com/aws/amazon-ecs-agent/agent/eni/networkutils" + "github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" +) + +const ( + // linkTypeDevice defines the string that's expected to be the output of + // netlink.Link.Type() method for netlink.Device type + linkTypeDevice = "device" + + // encapTypeLoopback defines the string that's set for the link.Attrs.EncapType + // field for localhost devices. The EncapType field defines the link + // encapsulation method. For localhost, it's set to "loopback" + encapTypeLoopback = "loopback" + + // sendENIStateChangeRetryTimeout specifies the timeout before giving up + // when looking for ENI in agent's state. If for whatever reason, the message + // from ACS is received after the ENI has been attached to the instance, this + // timeout duration will be used to wait for ENI message to be sent from ACS + sendENIStateChangeRetryTimeout = 6 * time.Second + + // sendENIStateChangeBackoffMin specifies minimum value for backoff when + // waiting for attachment message from ACS + sendENIStateChangeBackoffMin = 100 * time.Millisecond + + // sendENIStateChangeBackoffMax specifies maximum value for backoff when + // waiting for attachment message from ACS + sendENIStateChangeBackoffMax = 250 * time.Millisecond + + // sendENIStateChangeBackoffJitter specifies the jitter multiple percentage + // when waiting for attachment message from ACS + sendENIStateChangeBackoffJitter = 0.2 + + // sendENIStateChangeBackoffMultiple specifies the backoff duration multipler + // when waiting for the attachment message from ACS + sendENIStateChangeBackoffMultiple = 1.5 + + // macAddressRetryTimeout specifies the timeout before giving up when + // looking for an ENI's mac address on the host. It takes a few milliseconds + // for the host to learn about an ENIs mac address from netlink.LinkList(). + // We are capping off this duration to 1s assuming worst-case behavior + macAddressRetryTimeout = 2 * time.Second + + // eniStatusSentMsg is the error message to use when trying to send an eni status that's + // already been sent + eniStatusSentMsg = "eni status already sent" +) + +// UdevWatcher maintains the state of attached ENIs +// to the instance. It also has supporting elements to +// maintain consistency and update intervals +type UdevWatcher struct { + ctx context.Context + cancel context.CancelFunc + updateIntervalTicker *time.Ticker + netlinkClient netlinkwrapper.NetLink + udevMonitor udevwrapper.Udev + events chan *udev.UEvent + agentState dockerstate.TaskEngineState + eniChangeEvent chan<- statechange.Event + primaryMAC string +} + +// unmanagedENIError is used to indicate that the agent found an ENI, but the agent isn't +// aware if this ENI is being managed by ECS +type unmanagedENIError struct { + mac string +} + +// Error returns the error string for the unmanagedENIError type +func (err *unmanagedENIError) Error() string { + return fmt.Sprintf("udev watcher send ENI state change: eni not managed by ecs: %s", err.mac) +} + +// New is used to return an instance of the UdevWatcher struct +func New(ctx context.Context, primaryMAC string, udevwrap udevwrapper.Udev, + state dockerstate.TaskEngineState, stateChangeEvents chan<- statechange.Event) *UdevWatcher { + return newWatcher(ctx, primaryMAC, netlinkwrapper.New(), udevwrap, state, stateChangeEvents) +} + +// newWatcher is used to nest the return of the UdevWatcher struct +func newWatcher(ctx context.Context, + primaryMAC string, + nlWrap netlinkwrapper.NetLink, + udevWrap udevwrapper.Udev, + state dockerstate.TaskEngineState, + stateChangeEvents chan<- statechange.Event) *UdevWatcher { + + derivedContext, cancel := context.WithCancel(ctx) + return &UdevWatcher{ + ctx: derivedContext, + cancel: cancel, + netlinkClient: nlWrap, + udevMonitor: udevWrap, + events: make(chan *udev.UEvent), + agentState: state, + eniChangeEvent: stateChangeEvents, + primaryMAC: primaryMAC, + } +} + +// Init initializes a new ENI Watcher +func (udevWatcher *UdevWatcher) Init() error { + // Retry in the first reconciliation, in case the ENI is attached before we connect to ACS. + return udevWatcher.reconcileOnce(true) +} + +// Start periodically updates the state of ENIs connected to the system +func (udevWatcher *UdevWatcher) Start() { + // Udev Event Handler + go udevWatcher.eventHandler() + udevWatcher.performPeriodicReconciliation(defaultReconciliationInterval) +} + +// Stop is used to invoke the cancellation routine +func (udevWatcher *UdevWatcher) Stop() { + udevWatcher.cancel() +} + +// performPeriodicReconciliation is used to periodically invoke the +// reconciliation process based on a ticker +func (udevWatcher *UdevWatcher) performPeriodicReconciliation(updateInterval time.Duration) { + udevWatcher.updateIntervalTicker = time.NewTicker(updateInterval) + for { + select { + case <-udevWatcher.updateIntervalTicker.C: + if err := udevWatcher.reconcileOnce(false); err != nil { + log.Warnf("Udev watcher reconciliation failed: %v", err) + } + case <-udevWatcher.ctx.Done(): + udevWatcher.updateIntervalTicker.Stop() + return + } + } +} + +// reconcileOnce is used to reconcile the state of ENIs attached to the instance +func (udevWatcher *UdevWatcher) reconcileOnce(withRetry bool) error { + links, err := udevWatcher.netlinkClient.LinkList() + if err != nil { + return errors.Wrapf(err, "udev watcher: unable to retrieve network interfaces") + } + + // Return on empty list + if len(links) == 0 { + log.Info("Udev watcher reconciliation: no network interfaces discovered for reconciliation") + return nil + } + + currentState := udevWatcher.buildState(links) + + // NOTE: For correct semantics, this entire function needs to be locked. + // As we postulate the netlinkClient.LinkList() call to be expensive, we allow + // the race here. The state would be corrected during the next reconciliation loop. + + // Add new interfaces next + for mac := range currentState { + if withRetry { + go func(ctx context.Context, macAddress string, timeout time.Duration) { + if err := udevWatcher.sendENIStateChangeWithRetries(ctx, macAddress, timeout); err != nil { + log.Infof("Udev watcher event-handler: unable to send state change: %v", err) + } + }(udevWatcher.ctx, mac, sendENIStateChangeRetryTimeout) + continue + } + if err := udevWatcher.sendENIStateChange(mac); err != nil { + // skip logging status sent error as it's redundant and doesn't really indicate a problem + if strings.Contains(err.Error(), eniStatusSentMsg) { + continue + } else if _, ok := err.(*unmanagedENIError); ok { + log.Debugf("Udev watcher reconciliation: unable to send state change: %v", err) + } else { + log.Warnf("Udev watcher reconciliation: unable to send state change: %v", err) + } + } + } + return nil +} + +// sendENIStateChange handles the eni event from udev or reconcile phase +func (udevWatcher *UdevWatcher) sendENIStateChange(mac string) error { + if mac == "" { + return errors.New("udev watcher send ENI state change: empty mac address") + } + // check if this is an eni required by a task + eni, ok := udevWatcher.agentState.ENIByMac(mac) + if !ok { + return &unmanagedENIError{mac} + } + if eni.IsSent() { + return errors.Errorf("udev watcher send ENI state change: %s: %s", eniStatusSentMsg, eni.String()) + } + if eni.HasExpired() { + // Agent is aware of the ENI, but we decide not to ack it + // as it's ack timeout has expired + udevWatcher.agentState.RemoveENIAttachment(eni.MACAddress) + return errors.Errorf( + "udev watcher send ENI state change: eni status expired, no longer tracking it: %s", + eni.String()) + } + + // We found an ENI, which has the expiration time set in future and + // needs to be acknowledged as having been 'attached' to the Instance + if eni.AttachmentType == apieni.ENIAttachmentTypeInstanceENI { + go udevWatcher.emitInstanceENIAttachedEvent(eni) + } else { + go udevWatcher.emitTaskENIAttachedEvent(eni) + } + return nil +} + +// emitTaskENIChangeEvent sends a state change event for a task ENI attachment to the event channel with eni status as +// attached +func (udevWatcher *UdevWatcher) emitTaskENIAttachedEvent(eni *apieni.ENIAttachment) { + eni.Status = apieni.ENIAttached + log.Infof("Emitting task ENI attached event for: %s", eni.String()) + udevWatcher.eniChangeEvent <- api.TaskStateChange{ + TaskARN: eni.TaskARN, + Attachment: eni, + } +} + +// emitInstanceENIChangeEvent sends a state change event for an instance ENI attachment to the event channel with eni +// status as attached +func (udevWatcher *UdevWatcher) emitInstanceENIAttachedEvent(eni *apieni.ENIAttachment) { + eni.Status = apieni.ENIAttached + log.Infof("Emitting instance ENI attached event for: %s", eni.String()) + udevWatcher.eniChangeEvent <- api.NewAttachmentStateChangeEvent(eni) +} + +// buildState is used to build a state of the system for reconciliation +func (udevWatcher *UdevWatcher) buildState(links []netlink.Link) map[string]string { + state := make(map[string]string) + for _, link := range links { + if link.Type() != linkTypeDevice { + // We only care about netlink.Device types. These are created + // by udev like 'lo' and 'eth0'. Ignore other link types + continue + } + if link.Attrs().EncapType == encapTypeLoopback { + // Ignore localhost + continue + } + macAddress := link.Attrs().HardwareAddr.String() + if macAddress != "" && macAddress != udevWatcher.primaryMAC { + state[macAddress] = link.Attrs().Name + } + } + return state +} + +// eventHandler is used to manage udev net subsystem events to add/remove interfaces +func (udevWatcher *UdevWatcher) eventHandler() { + // The shutdown channel will be used to terminate the watch for udev events + shutdown := udevWatcher.udevMonitor.Monitor(udevWatcher.events) + for { + select { + case event := <-udevWatcher.events: + subsystem, ok := event.Env[udevSubsystem] + if !ok || subsystem != udevNetSubsystem { + continue + } + if event.Env[udevEventAction] != udevAddEvent { + continue + } + if !networkutils.IsValidNetworkDevice(event.Env[udevDevPath]) { + log.Debugf("Udev watcher event handler: ignoring event for invalid network device: %s", event.String()) + continue + } + netInterface := event.Env[udevInterface] + // GetMACAddres and sendENIStateChangeWithRetries can block the execution + // of this method for a few seconds in the worst-case scenario. + // Execute these within a go-routine + go func(ctx context.Context, dev string, timeout time.Duration) { + log.Debugf("Udev watcher event-handler: add interface: %s", dev) + macAddress, err := networkutils.GetMACAddress(udevWatcher.ctx, macAddressRetryTimeout, + dev, udevWatcher.netlinkClient) + if err != nil { + log.Warnf("Udev watcher event-handler: error obtaining MACAddress for interface %s", dev) + return + } + + if err := udevWatcher.sendENIStateChangeWithRetries(ctx, macAddress, timeout); err != nil { + log.Warnf("Udev watcher event-handler: unable to send state change: %v", err) + } + }(udevWatcher.ctx, netInterface, sendENIStateChangeRetryTimeout) + case <-udevWatcher.ctx.Done(): + log.Info("Stopping udev event handler") + // Send the shutdown signal and close the connection + shutdown <- true + if err := udevWatcher.udevMonitor.Close(); err != nil { + log.Warnf("Unable to close the udev monitoring socket: %v", err) + } + return + } + } +} + +// sendENIStateChangeWithRetries invokes the sendENIStateChange method, with backoff and +// retries. Retries are only effective if sendENIStateChange returns an unmanagedENIError. +// We're effectively waiting for the ENI attachment message from ACS for a network device +// at this point of time. +func (udevWatcher *UdevWatcher) sendENIStateChangeWithRetries(parentCtx context.Context, + macAddress string, + timeout time.Duration) error { + backoff := retry.NewExponentialBackoff(sendENIStateChangeBackoffMin, sendENIStateChangeBackoffMax, + sendENIStateChangeBackoffJitter, sendENIStateChangeBackoffMultiple) + ctx, cancel := context.WithTimeout(parentCtx, timeout) + defer cancel() + + err := retry.RetryWithBackoffCtx(ctx, backoff, func() error { + sendErr := udevWatcher.sendENIStateChange(macAddress) + if sendErr != nil { + if _, ok := sendErr.(*unmanagedENIError); ok { + // This can happen in two scenarios: (1) the ENI is indeed not managed by ECS (i.e. attached manually + // by customer); (2) this is an ENI attached by ECS but we have not yet received its information from + // ACS. + log.Debugf("Not sending state change because we don't know about the ENI: %v", sendErr) + return sendErr + } + // Not unmanagedENIError. Stop retrying when this happens + return apierrors.NewRetriableError(apierrors.NewRetriable(false), sendErr) + } + + return nil + }) + + if err != nil { + return err + } + // RetryWithBackoffCtx returns nil when the context is cancelled. Check if there was + // a timeout here. TODO: Fix RetryWithBackoffCtx to return ctx.Err() on context Done() + if err = ctx.Err(); err != nil { + return errors.Wrapf(err, + "udev watcher send ENI state change: timed out waiting for eni '%s' in state", macAddress) + } + + return nil +} diff --git a/agent/eni/watcher/watcher_linux_test.go b/agent/eni/watcher/watcher_linux_test.go new file mode 100644 index 00000000000..329aedd04bb --- /dev/null +++ b/agent/eni/watcher/watcher_linux_test.go @@ -0,0 +1,659 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package watcher + +import ( + "context" + "errors" + "net" + "sync" + "testing" + "time" + + "github.com/deniswernert/udev" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vishvananda/netlink" + + "github.com/aws/amazon-ecs-agent/agent/api" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/statechange" + + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_netlinkwrapper "github.com/aws/amazon-ecs-agent/agent/eni/netlinkwrapper/mocks" + mock_udevwrapper "github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper/mocks" +) + +const ( + randomDevice = "eth1" + primaryMAC = "00:0a:95:9d:68:61" + randomMAC = "00:0a:95:9d:68:16" + randomDevPath = " ../../devices/pci0000:00/0000:00:03.0/net/eth1" + incorrectDevPath = "../../devices/totally/wrong/net/path" +) + +// TestWatcherInit checks the sanity of watcher initialization +func TestWatcherInit(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.Background() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + parsedMAC, err := net.ParseMAC(randomMAC) + assert.NoError(t, err) + + primaryMACAddr, err := net.ParseMAC("00:0a:95:9d:68:61") + assert.NoError(t, err) + + taskEngineState := dockerstate.NewTaskEngineState() + taskEngineState.AddENIAttachment(&apieni.ENIAttachment{ + MACAddress: randomMAC, + AttachStatusSent: false, + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + }) + eventChannel := make(chan statechange.Event) + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, mockNetlink, nil, taskEngineState, eventChannel) + + // Init() uses netlink.LinkList() to build initial state + mockNetlink.EXPECT().LinkList().Return([]netlink.Link{ + &netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: parsedMAC, + Name: randomDevice, + }, + }, + &netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: primaryMACAddr, + Name: "lo", + EncapType: "loopback", + }, + }, + }, nil) + + waitForEvents := sync.WaitGroup{} + waitForEvents.Add(1) + var event statechange.Event + go func() { + event = <-eventChannel + assert.NotNil(t, event.(api.TaskStateChange).Attachment) + assert.Equal(t, randomMAC, event.(api.TaskStateChange).Attachment.MACAddress) + waitForEvents.Done() + }() + watcher.Init() + + waitForEvents.Wait() + + select { + case <-eventChannel: + t.Errorf("Expect no more state change event") + default: + } +} + +// TestInitWithNetlinkError checks the netlink linklist error path +func TestInitWithNetlinkError(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.Background() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + mockNetlink.EXPECT().LinkList().Return([]netlink.Link{}, + errors.New("Dummy Netlink LinkList error")) + + taskEngineState := dockerstate.NewTaskEngineState() + eventChannel := make(chan statechange.Event) + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, mockNetlink, nil, taskEngineState, eventChannel) + err := watcher.Init() + assert.Error(t, err) +} + +// TestWatcherInitWithEmptyList checks sanity of watcher upon empty list +func TestWatcherInitWithEmptyList(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.Background() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + taskEngineState := dockerstate.NewTaskEngineState() + eventChannel := make(chan statechange.Event) + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, mockNetlink, nil, taskEngineState, eventChannel) + + // Init() uses netlink.LinkList() to build initial state + mockNetlink.EXPECT().LinkList().Return([]netlink.Link{}, nil) + + err := watcher.Init() + assert.NoError(t, err) +} + +// TestReconcileENIs tests the reconciliation code path +func TestReconcileENIs(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.Background() + + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + taskEngineState := dockerstate.NewTaskEngineState() + eventChannel := make(chan statechange.Event) + + taskEngineState.AddENIAttachment(getMockAttachment()) + + mockNetlink.EXPECT().LinkList().Return(getMockNetLinkResponse(t), nil) + + var event statechange.Event + done := make(chan struct{}) + go func() { + event = <-eventChannel + done <- struct{}{} + }() + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, mockNetlink, nil, taskEngineState, eventChannel) + require.NoError(t, watcher.reconcileOnce(false)) + + <-done + assert.NotNil(t, event.(api.TaskStateChange).Attachment) + assert.Equal(t, randomMAC, event.(api.TaskStateChange).Attachment.MACAddress) + + select { + case <-eventChannel: + t.Errorf("Expect no more state change event") + default: + } +} + +func TestReconcileENIsWithRetry(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + ctx := context.Background() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + mockState := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + eventChannel := make(chan statechange.Event) + + gomock.InOrder( + mockNetlink.EXPECT().LinkList().Return(getMockNetLinkResponse(t), nil), + // Let the first one fail to check that we retry on failure. + mockState.EXPECT().ENIByMac(gomock.Any()).Return(nil, false), + mockState.EXPECT().ENIByMac(gomock.Any()).Return(getMockAttachment(), true), + ) + + var event statechange.Event + done := make(chan struct{}) + go func() { + event = <-eventChannel + done <- struct{}{} + }() + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, mockNetlink, nil, mockState, eventChannel) + require.NoError(t, watcher.reconcileOnce(true)) + + <-done + require.NotNil(t, event.(api.TaskStateChange).Attachment) + assert.Equal(t, randomMAC, event.(api.TaskStateChange).Attachment.MACAddress) + + select { + case <-eventChannel: + t.Errorf("Expect no more state change event") + default: + } +} + +func getMockAttachment() *apieni.ENIAttachment { + return &apieni.ENIAttachment{ + MACAddress: randomMAC, + AttachStatusSent: false, + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + } +} + +func getMockNetLinkResponse(t *testing.T) []netlink.Link { + parsedMAC, err := net.ParseMAC(randomMAC) + require.NoError(t, err) + + primaryMACAddr, err := net.ParseMAC("00:0a:95:9d:68:61") + require.NoError(t, err) + + return []netlink.Link{ + &netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: parsedMAC, + Name: randomDevice, + }, + }, + &netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: primaryMACAddr, + Name: "lo", + EncapType: "loopback", + }, + }, + } +} + +// TestReconcileENIsWithNetlinkErr tests reconciliation with netlink error +func TestReconcileENIsWithNetlinkErr(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.Background() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + mockNetlink.EXPECT().LinkList().Return([]netlink.Link{}, + errors.New("Dummy Netlink LinkList error")) + + taskEngineState := dockerstate.NewTaskEngineState() + eventChannel := make(chan statechange.Event) + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, mockNetlink, nil, taskEngineState, eventChannel) + assert.Error(t, watcher.reconcileOnce(false)) + + select { + case <-eventChannel: + t.Errorf("Expect no more state change event") + default: + } +} + +// TestReconcileENIsWithEmptyList checks sanity on empty list from Netlink +func TestReconcileENIsWithEmptyList(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.Background() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + + taskEngineState := dockerstate.NewTaskEngineState() + eventChannel := make(chan statechange.Event) + + mockNetlink.EXPECT().LinkList().Return([]netlink.Link{}, nil) + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, mockNetlink, nil, taskEngineState, eventChannel) + assert.NoError(t, watcher.reconcileOnce(false)) + watcher.Stop() + + select { + case <-eventChannel: + t.Errorf("Expect no more state change event") + default: + } +} + +// getUdevEventDummy builds a dummy udev.UEvent object +func getUdevEventDummy(action, subsystem, devpath string) udev.UEvent { + m := make(map[string]string, 5) + m["INTERFACE"] = "eth1" + m["IFINDEX"] = "1" + m["ACTION"] = action + m["SUBSYSTEM"] = subsystem + m["DEVPATH"] = devpath + event := udev.UEvent{ + Env: m, + } + return event +} + +// TestUdevAddEvent tests adding a device from an udev event +func TestUdevAddEvent(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.TODO() + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + mockUdev := mock_udevwrapper.NewMockUdev(mockCtrl) + parsedMAC, _ := net.ParseMAC(randomMAC) + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + eventChannel := make(chan statechange.Event) + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, mockNetlink, mockUdev, mockStateManager, eventChannel) + + shutdown := make(chan bool) + gomock.InOrder( + mockUdev.EXPECT().Monitor(watcher.events).Return(shutdown), + mockNetlink.EXPECT().LinkByName(randomDevice).Return( + &netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: parsedMAC, + Name: randomDevice, + }, + }, nil), + mockStateManager.EXPECT().ENIByMac(randomMAC).Return( + &apieni.ENIAttachment{ExpiresAt: time.Unix(time.Now().Unix()+10, 0)}, true), + ) + + // Spin off event handler + go watcher.eventHandler() + // Send event to channel + event := getUdevEventDummy(udevAddEvent, udevNetSubsystem, randomDevPath) + watcher.events <- &event + + eniChangeEvent := <-eventChannel + taskStateChange, ok := eniChangeEvent.(api.TaskStateChange) + require.True(t, ok) + assert.Equal(t, apieni.ENIAttached, taskStateChange.Attachment.Status) + + var waitForClose sync.WaitGroup + waitForClose.Add(2) + mockUdev.EXPECT().Close().Do(func() { + waitForClose.Done() + }).Return(nil) + go func() { + <-shutdown + waitForClose.Done() + }() + + go watcher.Stop() + waitForClose.Wait() +} + +// TestUdevSubsystemFilter checks the subsystem filter in the event handler +func TestUdevSubsystemFilter(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.TODO() + // Setup Mock Udev + mockUdev := mock_udevwrapper.NewMockUdev(mockCtrl) + + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, nil, mockUdev, nil, nil) + + shutdown := make(chan bool) + mockUdev.EXPECT().Monitor(watcher.events).Return(shutdown) + + // Spin off event handler + go watcher.eventHandler() + // Send event to channel + // This event shouldn't trigger the statemanager to handle HandleENIEvent + event := getUdevEventDummy(udevAddEvent, udevPCISubsystem, randomDevPath) + watcher.events <- &event + + var waitForClose sync.WaitGroup + waitForClose.Add(2) + mockUdev.EXPECT().Close().Do(func() { + waitForClose.Done() + }).Return(nil) + go func() { + <-shutdown + waitForClose.Done() + }() + + go watcher.Stop() + waitForClose.Wait() +} + +// TestUdevAddEventWithInvalidInterface attempts to add a device without +// a well defined interface +func TestUdevAddEventWithInvalidInterface(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.TODO() + + // Setup Mock Udev + mockUdev := mock_udevwrapper.NewMockUdev(mockCtrl) + // Create Watcher + watcher := newWatcher(ctx, primaryMAC, nil, mockUdev, nil, nil) + + shutdown := make(chan bool) + mockUdev.EXPECT().Monitor(watcher.events).Return(shutdown) + + // Spin off event handler + go watcher.eventHandler() + + // Send event to channel + event := getUdevEventDummy(udevAddEvent, udevNetSubsystem, incorrectDevPath) + watcher.events <- &event + + var waitForClose sync.WaitGroup + waitForClose.Add(2) + mockUdev.EXPECT().Close().Do(func() { + waitForClose.Done() + }).Return(nil) + go func() { + <-shutdown + waitForClose.Done() + }() + + go watcher.Stop() + waitForClose.Wait() +} + +// TestUdevAddEventWithoutMACAdress attempts to add a device without +// a MACAddress based on an udev event +func TestUdevAddEventWithoutMACAdress(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + ctx := context.TODO() + // Setup Mock Netlink + mockNetlink := mock_netlinkwrapper.NewMockNetLink(mockCtrl) + // Setup Mock Udev + mockUdev := mock_udevwrapper.NewMockUdev(mockCtrl) + + watcher := newWatcher(ctx, primaryMAC, mockNetlink, mockUdev, nil, nil) + + var invoked sync.WaitGroup + invoked.Add(1) + + shutdown := make(chan bool) + gomock.InOrder( + mockUdev.EXPECT().Monitor(watcher.events).Return(shutdown), + mockNetlink.EXPECT().LinkByName(randomDevice).Do(func(device string) { + invoked.Done() + }).Return( + &netlink.Device{}, + errors.New("Dummy Netlink LinkByName error")), + ) + + // Spin off event handler + go watcher.eventHandler() + + // Send event to channel + event := getUdevEventDummy(udevAddEvent, udevNetSubsystem, randomDevPath) + watcher.events <- &event + invoked.Wait() + + var waitForClose sync.WaitGroup + waitForClose.Add(2) + mockUdev.EXPECT().Close().Do(func() { + waitForClose.Done() + }).Return(nil) + go func() { + <-shutdown + waitForClose.Done() + }() + + go watcher.Stop() + waitForClose.Wait() +} + +func TestSendENIStateChange(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + eventChannel := make(chan statechange.Event) + + watcher := newWatcher(context.TODO(), primaryMAC, nil, nil, mockStateManager, eventChannel) + + mockStateManager.EXPECT().ENIByMac(randomMAC).Return(&apieni.ENIAttachment{ + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + }, true) + + go watcher.sendENIStateChange(randomMAC) + + eniChangeEvent := <-eventChannel + taskStateChange, ok := eniChangeEvent.(api.TaskStateChange) + require.True(t, ok) + assert.Equal(t, apieni.ENIAttached, taskStateChange.Attachment.Status) +} + +func TestSendENIStateChangeUnmanaged(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + watcher := newWatcher(context.TODO(), primaryMAC, nil, nil, mockStateManager, nil) + + mockStateManager.EXPECT().ENIByMac(randomMAC).Return(nil, false) + assert.Error(t, watcher.sendENIStateChange(randomMAC)) +} + +func TestSendENIStateChangeAlreadySent(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + watcher := newWatcher(context.TODO(), primaryMAC, nil, nil, mockStateManager, nil) + + mockStateManager.EXPECT().ENIByMac(randomMAC).Return(&apieni.ENIAttachment{ + AttachStatusSent: true, + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + MACAddress: randomMAC, + }, true) + + assert.Error(t, watcher.sendENIStateChange(randomMAC)) +} + +func TestSendENIStateChangeExpired(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + watcher := newWatcher(context.TODO(), primaryMAC, nil, nil, mockStateManager, nil) + + gomock.InOrder( + mockStateManager.EXPECT().ENIByMac(randomMAC).Return( + &apieni.ENIAttachment{ + AttachStatusSent: false, + ExpiresAt: time.Unix(time.Now().Unix()-10, 0), + MACAddress: randomMAC, + }, true), + mockStateManager.EXPECT().RemoveENIAttachment(randomMAC), + ) + + assert.Error(t, watcher.sendENIStateChange(randomMAC)) +} + +func TestSendENIStateChangeWithRetries(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + eventChannel := make(chan statechange.Event) + + watcher := newWatcher(context.TODO(), primaryMAC, nil, nil, mockStateManager, eventChannel) + + gomock.InOrder( + mockStateManager.EXPECT().ENIByMac(randomMAC).Return(nil, false), + mockStateManager.EXPECT().ENIByMac(randomMAC).Return(&apieni.ENIAttachment{ + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + }, true), + ) + + ctx := context.TODO() + go watcher.sendENIStateChangeWithRetries(ctx, randomMAC, sendENIStateChangeRetryTimeout) + + eniChangeEvent := <-eventChannel + taskStateChange, ok := eniChangeEvent.(api.TaskStateChange) + require.True(t, ok) + assert.Equal(t, apieni.ENIAttached, taskStateChange.Attachment.Status) +} + +func TestSendENIStateChangeWithRetriesDoesNotRetryExpiredENI(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + + watcher := newWatcher(context.TODO(), primaryMAC, nil, nil, mockStateManager, nil) + + gomock.InOrder( + // ENIByMAC returns an error for exipred ENI attachment, which should + // mean that it doesn't get retried. + mockStateManager.EXPECT().ENIByMac(randomMAC).Return( + &apieni.ENIAttachment{ + AttachStatusSent: false, + ExpiresAt: time.Unix(time.Now().Unix()-10, 0), + MACAddress: randomMAC, + }, true), + mockStateManager.EXPECT().RemoveENIAttachment(randomMAC), + ) + + ctx := context.TODO() + assert.Error(t, watcher.sendENIStateChangeWithRetries( + ctx, randomMAC, sendENIStateChangeRetryTimeout)) +} + +// TestSendENIStateChangeWithAttachmentTypeInstanceENI tests that we send the attachment state change +// of an instance level eni as an attachment state change +func TestSendENIStateChangeWithAttachmentTypeInstanceENI(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + eventChannel := make(chan statechange.Event) + + watcher := newWatcher(context.TODO(), primaryMAC, nil, nil, mockStateManager, eventChannel) + + mockStateManager.EXPECT().ENIByMac(randomMAC).Return(&apieni.ENIAttachment{ + AttachmentType: apieni.ENIAttachmentTypeInstanceENI, + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + }, true) + + go watcher.sendENIStateChange(randomMAC) + + eniChangeEvent := <-eventChannel + attachmentStateChange, ok := eniChangeEvent.(api.AttachmentStateChange) + require.True(t, ok) + assert.Equal(t, apieni.ENIAttached, attachmentStateChange.Attachment.Status) +} + +// TestSendENIStateChangeWithAttachmentTypeTaskENI tests that we send the attachment state change +// of a regular eni as a task state change +func TestSendENIStateChangeWithAttachmentTypeTaskENI(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockStateManager := mock_dockerstate.NewMockTaskEngineState(mockCtrl) + eventChannel := make(chan statechange.Event) + + watcher := newWatcher(context.TODO(), primaryMAC, nil, nil, mockStateManager, eventChannel) + + mockStateManager.EXPECT().ENIByMac(randomMAC).Return(&apieni.ENIAttachment{ + AttachmentType: apieni.ENIAttachmentTypeTaskENI, + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + }, true) + + go watcher.sendENIStateChange(randomMAC) + + eniChangeEvent := <-eventChannel + taskStateChange, ok := eniChangeEvent.(api.TaskStateChange) + require.True(t, ok) + assert.Equal(t, apieni.ENIAttached, taskStateChange.Attachment.Status) +} diff --git a/agent/eventhandler/attachment_handler.go b/agent/eventhandler/attachment_handler.go new file mode 100644 index 00000000000..e4e8222babc --- /dev/null +++ b/agent/eventhandler/attachment_handler.go @@ -0,0 +1,156 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventhandler + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/cihub/seelog" +) + +// AttachmentEventHandler is a handler that is responsible for submitting attachment state change events +// to backend +type AttachmentEventHandler struct { + // backoff is the backoff object used in submitting attachment state change + backoff retry.Backoff + + // dataClient is used to save any changes to an attachment's SentStatus + dataClient data.Client + + // attachmentARNToHandler is a map from attachment ARN to the attachmentHandler that is + // responsible for handling the attachment + attachmentARNToHandler map[string]*attachmentHandler + + // lock is used to safely access the attachmentARNToHandler map + lock sync.Mutex + + client api.ECSClient + ctx context.Context +} + +// attachmentHandler is responsible for handling a certain attachment +type attachmentHandler struct { + // attachmentARN is the arn of the attachment that the attachmentHandler is handling + attachmentARN string + + // dataClient is used to save any changes to an attachment's SentStatus + dataClient data.Client + + // backoff is the backoff object used in submitting attachment state change + backoff retry.Backoff + + // lock is used to ensure that the attached status of an attachment won't be sent multiple times + lock sync.Mutex + + client api.ECSClient + ctx context.Context +} + +// NewAttachmentEventHandler returns a new AttachmentEventHandler object +func NewAttachmentEventHandler(ctx context.Context, + dataClient data.Client, + client api.ECSClient) *AttachmentEventHandler { + return &AttachmentEventHandler{ + ctx: ctx, + client: client, + dataClient: dataClient, + attachmentARNToHandler: make(map[string]*attachmentHandler), + backoff: retry.NewExponentialBackoff(submitStateBackoffMin, submitStateBackoffMax, + submitStateBackoffJitterMultiple, submitStateBackoffMultiple), + } +} + +// AddStateChangeEvent adds a state change event to AttachmentEventHandler for it to handle +func (eventHandler *AttachmentEventHandler) AddStateChangeEvent(change statechange.Event) error { + if change.GetEventType() != statechange.AttachmentEvent { + return fmt.Errorf("eventhandler: attachment handler received unrecognized event type: %d", change.GetEventType()) + } + event, ok := change.(api.AttachmentStateChange) + if !ok { + return errors.New("eventhandler: unable to get attachment event from state change event") + } + + if event.Attachment == nil { + return fmt.Errorf("eventhandler: received malformed attachment state change event: %v", event) + } + + attachmentARN := event.Attachment.AttachmentARN + eventHandler.lock.Lock() + if _, ok := eventHandler.attachmentARNToHandler[attachmentARN]; !ok { + eventHandler.attachmentARNToHandler[attachmentARN] = &attachmentHandler{ + attachmentARN: attachmentARN, + dataClient: eventHandler.dataClient, + client: eventHandler.client, + ctx: eventHandler.ctx, + backoff: eventHandler.backoff, + } + } + eventHandler.lock.Unlock() + + attachmentHandler := eventHandler.attachmentARNToHandler[attachmentARN] + go attachmentHandler.submitAttachmentEvent(&event) + + return nil +} + +// submitAttachmentEvent submits an attachment event to backend +func (handler *attachmentHandler) submitAttachmentEvent(attachmentChange *api.AttachmentStateChange) { + // we need to lock the attachment handler to avoid sending an attachment state change for an attachment + // multiple times (this can happen when udev watcher sends multiple attached events for a certain attachment, + // for example one from udev event and one from reconciliation loop) + seelog.Debugf("AttachmentHandler: acquiring attachment lock before sending attachment state change for attachment %s", handler.attachmentARN) + handler.lock.Lock() + seelog.Debugf("AttachmentHandler: acquired attachment lock for attachment %s", handler.attachmentARN) + defer handler.lock.Unlock() + + retry.RetryWithBackoffCtx(handler.ctx, handler.backoff, func() error { + return handler.submitAttachmentEventOnce(attachmentChange) + }) +} + +func (handler *attachmentHandler) submitAttachmentEventOnce(attachmentChange *api.AttachmentStateChange) error { + if !attachmentChangeShouldBeSent(attachmentChange) { + seelog.Debugf("AttachmentHandler: not sending attachment state change [%s] as it should not be sent", attachmentChange.String()) + // if the attachment state change should not be sent, we don't need to retry anymore so return nil here + return nil + } + + seelog.Infof("AttachmentHandler: sending attachment state change: %s", attachmentChange.String()) + if err := handler.client.SubmitAttachmentStateChange(*attachmentChange); err != nil { + seelog.Errorf("AttachmentHandler: error submitting attachment state change [%s]: %v", attachmentChange.String(), err) + return err + } + seelog.Debugf("AttachmentHandler: submitted attachment state change: %s", attachmentChange.String()) + + attachmentChange.Attachment.SetSentStatus() + attachmentChange.Attachment.StopAckTimer() + + err := handler.dataClient.SaveENIAttachment(attachmentChange.Attachment) + if err != nil { + seelog.Errorf("AttachmentHandler: error saving state after submitted attachment state change [%s]: %v", attachmentChange.String(), err) + } + return nil +} + +// attachmentChangeShouldBeSent checks whether an attachment state change should be sent to backend +func attachmentChangeShouldBeSent(attachmentChange *api.AttachmentStateChange) bool { + return !attachmentChange.Attachment.HasExpired() && !attachmentChange.Attachment.IsSent() +} diff --git a/agent/eventhandler/attachment_handler_test.go b/agent/eventhandler/attachment_handler_test.go new file mode 100644 index 00000000000..98a83579a53 --- /dev/null +++ b/agent/eventhandler/attachment_handler_test.go @@ -0,0 +1,274 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventhandler + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + // small backoff value used by unit test + xSubmitStateBackoffMin = 10 * time.Millisecond + xSubmitStateBackoffMax = 30 * time.Millisecond + xSubmitStateBackoffJitterMultiple = 0.20 + xSubmitStateBackoffMultiple = 1.3 + + attachmentARN = "arn:aws:ecs:us-west-2:1234567890:attachment/abc" +) + +func TestSendAttachmentEvent(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + attachmentEvent := attachmentEvent(attachmentARN) + + timeoutFunc := func() { + t.Error("Timeout sending ENI attach status") + } + assert.NoError(t, attachmentEvent.Attachment.StartTimer(timeoutFunc)) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewAttachmentEventHandler(ctx, data.NewNoopClient(), client) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + + client.EXPECT().SubmitAttachmentStateChange(gomock.Any()).Return(nil).Do(func(change api.AttachmentStateChange) { + assert.NotNil(t, change.Attachment) + assert.Equal(t, attachmentARN, change.Attachment.AttachmentARN) + wg.Done() + }) + + require.NoError(t, handler.AddStateChangeEvent(attachmentEvent)) + + wg.Wait() +} + +func TestSendAttachmentEventRetries(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + attachmentEvent := attachmentEvent(attachmentARN) + + timeoutFunc := func() { + t.Error("Timeout sending ENI attach status") + } + assert.NoError(t, attachmentEvent.Attachment.StartTimer(timeoutFunc)) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + ctx, cancel := context.WithCancel(context.Background()) + handler := NewAttachmentEventHandler(ctx, dataClient, client) + // use smaller backoff value for unit test + handler.backoff = retry.NewExponentialBackoff(xSubmitStateBackoffMin, xSubmitStateBackoffMax, + xSubmitStateBackoffJitterMultiple, xSubmitStateBackoffMultiple) + defer cancel() + + var wg sync.WaitGroup + wg.Add(2) + + retriable := apierrors.NewRetriableError(apierrors.NewRetriable(true), errors.New("test")) + + gomock.InOrder( + client.EXPECT().SubmitAttachmentStateChange(gomock.Any()).Return(retriable).Do(func(interface{}) { wg.Done() }), + client.EXPECT().SubmitAttachmentStateChange(gomock.Any()).Return(nil).Do(func(change api.AttachmentStateChange) { + assert.NotNil(t, change.Attachment) + assert.Equal(t, attachmentARN, change.Attachment.AttachmentARN) + wg.Done() + }), + ) + + require.NoError(t, handler.AddStateChangeEvent(attachmentEvent)) + + wg.Wait() +} + +func TestSendMultipleAttachmentEventsDifferentAttachments(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + attachmentEvent1 := attachmentEvent("attachmentARN1") + attachmentEvent2 := attachmentEvent("attachmentARN2") + attachmentEvent3 := attachmentEvent("attachmentARN3") + + timeoutFunc := func() { + t.Error("Timeout sending ENI attach status") + } + assert.NoError(t, attachmentEvent1.Attachment.StartTimer(timeoutFunc)) + assert.NoError(t, attachmentEvent2.Attachment.StartTimer(timeoutFunc)) + assert.NoError(t, attachmentEvent3.Attachment.StartTimer(timeoutFunc)) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewAttachmentEventHandler(ctx, data.NewNoopClient(), client) + defer cancel() + + var wg sync.WaitGroup + wg.Add(3) + + submittedAttachments := make(map[string]bool) // note down submitted attachments + mapLock := sync.Mutex{} // lock to protect the above map + client.EXPECT().SubmitAttachmentStateChange(gomock.Any()).Times(3).Return(nil).Do(func(change api.AttachmentStateChange) { + mapLock.Lock() + defer mapLock.Unlock() + + submittedAttachments[change.Attachment.AttachmentARN] = true + wg.Done() + }) + + require.NoError(t, handler.AddStateChangeEvent(attachmentEvent1)) + require.NoError(t, handler.AddStateChangeEvent(attachmentEvent2)) + require.NoError(t, handler.AddStateChangeEvent(attachmentEvent3)) + + wg.Wait() + assert.Equal(t, 3, len(submittedAttachments)) + assert.Contains(t, submittedAttachments, "attachmentARN1") + assert.Contains(t, submittedAttachments, "attachmentARN2") + assert.Contains(t, submittedAttachments, "attachmentARN3") +} + +func TestSubmitAttachmentEventSucceeds(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + attachmentEvent := attachmentEvent(attachmentARN) + + timeoutFunc := func() { + t.Error("Timeout sending ENI attach status") + } + assert.NoError(t, attachmentEvent.Attachment.StartTimer(timeoutFunc)) + + ctx, cancel := context.WithCancel(context.Background()) + handler := &attachmentHandler{ + client: client, + dataClient: dataClient, + ctx: ctx, + } + defer cancel() + + client.EXPECT().SubmitAttachmentStateChange(gomock.Any()).Return(nil).Do(func(change api.AttachmentStateChange) { + assert.NotNil(t, change.Attachment) + assert.Equal(t, attachmentARN, change.Attachment.AttachmentARN) + }) + + handler.submitAttachmentEvent(&attachmentEvent) + + assert.True(t, attachmentEvent.Attachment.AttachStatusSent) + res, err := dataClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, res, 1) +} + +func TestSubmitAttachmentEventAttachmentExpired(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + attachmentEvent := attachmentEvent(attachmentARN) + attachmentEvent.Attachment.ExpiresAt = time.Now().Add(100 * time.Millisecond) + + // wait until eni attachment expires + time.Sleep(200 * time.Millisecond) + + ctx, cancel := context.WithCancel(context.Background()) + handler := &attachmentHandler{ + client: client, + ctx: ctx, + } + defer cancel() + + handler.submitAttachmentEvent(&attachmentEvent) + + // no SubmitAttachmentStateChange should happen and attach status should not be sent + assert.False(t, attachmentEvent.Attachment.AttachStatusSent) +} + +func TestSubmitAttachmentEventAttachmentIsSent(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + attachmentEvent := attachmentEvent(attachmentARN) + attachmentEvent.Attachment.SetSentStatus() + + timeoutFunc := func() { + t.Error("Timeout sending ENI attach status") + } + assert.NoError(t, attachmentEvent.Attachment.StartTimer(timeoutFunc)) + + ctx, cancel := context.WithCancel(context.Background()) + handler := &attachmentHandler{ + client: client, + ctx: ctx, + } + defer cancel() + + handler.submitAttachmentEvent(&attachmentEvent) + + // no SubmitAttachmentStateChange should happen + attachmentEvent.Attachment.StopAckTimer() +} + +func TestAttachmentChangeShouldBeSent(t *testing.T) { + attachmentEvent := attachmentEvent(attachmentARN) + assert.True(t, attachmentChangeShouldBeSent(&attachmentEvent)) +} + +func TestAttachmentChangeShouldBeSentAttachmentExpired(t *testing.T) { + attachmentEvent := attachmentEvent(attachmentARN) + attachmentEvent.Attachment.ExpiresAt = time.Now() + time.Sleep(10 * time.Millisecond) + + assert.False(t, attachmentChangeShouldBeSent(&attachmentEvent)) +} + +func TestAttachmentChangeShouldBeSentAttachmentIsSent(t *testing.T) { + attachmentEvent := attachmentEvent(attachmentARN) + attachmentEvent.Attachment.SetSentStatus() + assert.False(t, attachmentChangeShouldBeSent(&attachmentEvent)) +} + +func attachmentEvent(attachmentARN string) api.AttachmentStateChange { + return api.AttachmentStateChange{ + Attachment: &apieni.ENIAttachment{ + AttachmentType: apieni.ENIAttachmentTypeInstanceENI, + AttachmentARN: attachmentARN, + AttachStatusSent: false, + ExpiresAt: time.Now().Add(time.Second), + }, + } +} diff --git a/agent/eventhandler/handler.go b/agent/eventhandler/handler.go new file mode 100644 index 00000000000..80451d2072c --- /dev/null +++ b/agent/eventhandler/handler.go @@ -0,0 +1,67 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventhandler + +import ( + "context" + "fmt" + + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/cihub/seelog" +) + +// HandleEngineEvents handles state change events from the state change event channel by sending it to +// responsible event handler +func HandleEngineEvents( + ctx context.Context, + taskEngine engine.TaskEngine, + client api.ECSClient, + taskHandler *TaskHandler, + attachmentEventHandler *AttachmentEventHandler) { + for { + stateChangeEvents := taskEngine.StateChangeEvents() + + for stateChangeEvents != nil { + select { + case <-ctx.Done(): + seelog.Infof("Exiting the engine event handler.") + return + case event, ok := <-stateChangeEvents: + if !ok { + stateChangeEvents = nil + seelog.Error("Unable to handle state change event. The events channel is closed") + break + } + err := handleEngineEvent(event, client, taskHandler, attachmentEventHandler) + if err != nil { + seelog.Errorf("Handler unable to add state change event %v: %v", event, err) + } + } + } + } +} + +func handleEngineEvent(event statechange.Event, client api.ECSClient, taskHandler *TaskHandler, + attachmentEventHandler *AttachmentEventHandler) error { + switch event.GetEventType() { + case statechange.TaskEvent, statechange.ContainerEvent, statechange.ManagedAgentEvent: + return taskHandler.AddStateChangeEvent(event, client) + case statechange.AttachmentEvent: + return attachmentEventHandler.AddStateChangeEvent(event) + default: + return fmt.Errorf("unrecognized event type: %d", event.GetEventType()) + } +} diff --git a/agent/eventhandler/handler_test.go b/agent/eventhandler/handler_test.go new file mode 100644 index 00000000000..647bd719dc6 --- /dev/null +++ b/agent/eventhandler/handler_test.go @@ -0,0 +1,74 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventhandler + +import ( + "context" + "sync" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/api" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestHandleEngineEvent(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + taskHandler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + attachmentHandler := NewAttachmentEventHandler(ctx, data.NewNoopClient(), client) + defer cancel() + + var wg sync.WaitGroup + wg.Add(2) + + contEvent1 := containerEvent(taskARN) + contEvent2 := containerEvent(taskARN) + taskEvent := taskEvent(taskARN) + attachmentEvent := attachmentEvent("attachmentARN") + + timeoutFunc := func() { + t.Error("Timeout sending ENI attach status") + } + assert.NoError(t, attachmentEvent.Attachment.StartTimer(timeoutFunc)) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, 2, len(change.Containers)) + assert.Equal(t, taskARN, change.Containers[0].TaskArn) + assert.Equal(t, taskARN, change.Containers[1].TaskArn) + wg.Done() + }) + + client.EXPECT().SubmitAttachmentStateChange(gomock.Any()).Do(func(change api.AttachmentStateChange) { + assert.NotNil(t, change.Attachment) + assert.Equal(t, "attachmentARN", change.Attachment.AttachmentARN) + wg.Done() + }) + + handleEngineEvent(contEvent1, client, taskHandler, attachmentHandler) + handleEngineEvent(contEvent2, client, taskHandler, attachmentHandler) + handleEngineEvent(taskEvent, client, taskHandler, attachmentHandler) + handleEngineEvent(attachmentEvent, client, taskHandler, attachmentHandler) + + wg.Wait() +} diff --git a/agent/eventhandler/task_handler.go b/agent/eventhandler/task_handler.go new file mode 100644 index 00000000000..9301ffbb12d --- /dev/null +++ b/agent/eventhandler/task_handler.go @@ -0,0 +1,451 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventhandler + +import ( + "container/list" + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/metrics" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/cihub/seelog" +) + +const ( + // concurrentEventCalls is the maximum number of tasks that may be handled at + // once by the TaskHandler + concurrentEventCalls = 10 + + // drainEventsFrequency is the frequency at the which unsent events batched + // by the task handler are sent to the backend + minDrainEventsFrequency = 10 * time.Second + maxDrainEventsFrequency = 30 * time.Second + + submitStateBackoffMin = time.Second + submitStateBackoffMax = 30 * time.Second + submitStateBackoffJitterMultiple = 0.20 + submitStateBackoffMultiple = 1.3 +) + +// TaskHandler encapsulates the the map of a task arn to task and container events +// associated with said task +type TaskHandler struct { + // submitSemaphore for the number of tasks that may be handled at once + submitSemaphore utils.Semaphore + // taskToEvents is arn:*eventList map so events may be serialized per task + tasksToEvents map[string]*taskSendableEvents + // tasksToContainerStates is used to collect container events + // between task transitions + tasksToContainerStates map[string][]api.ContainerStateChange + // tasksToManagedAgentStates is used to collect managed agent events + tasksToManagedAgentStates map[string][]api.ManagedAgentStateChange + // taskHandlerLock is used to safely access the following maps: + // * taskToEvents + // * tasksToContainerStates + lock sync.RWMutex + + // dataClient is used to save changes to database, mainly to save + // changes of a task or container's SentStatus. + dataClient data.Client + + // min and max drain events frequency refer to the range of + // time over which a call to SubmitTaskStateChange is made. + // The actual duration is randomly distributed between these + // two + minDrainEventsFrequency time.Duration + maxDrainEventsFrequency time.Duration + + state dockerstate.TaskEngineState + client api.ECSClient + ctx context.Context +} + +// taskSendableEvents is used to group all events for a task +type taskSendableEvents struct { + // events is a list of *sendableEvents. We treat this as queue, where + // new events are added to the back of the queue and old events are + // drained from the front. `sendChange` pushes an event to the back of + // the queue. An event is removed from the queue in `submitFirstEvent` + events *list.List + // sending will check whether the list is already being handled + sending bool + //eventsListLock locks both the list and sending bool + lock sync.Mutex + // createdAt is a timestamp for when the event list was created + createdAt time.Time + // taskARN is the task arn that the event list is associated with + taskARN string +} + +// NewTaskHandler returns a pointer to TaskHandler +func NewTaskHandler(ctx context.Context, + dataClient data.Client, + state dockerstate.TaskEngineState, + client api.ECSClient) *TaskHandler { + // Create a handler and start the periodic event drain loop + taskHandler := &TaskHandler{ + ctx: ctx, + tasksToEvents: make(map[string]*taskSendableEvents), + submitSemaphore: utils.NewSemaphore(concurrentEventCalls), + tasksToContainerStates: make(map[string][]api.ContainerStateChange), + tasksToManagedAgentStates: make(map[string][]api.ManagedAgentStateChange), + dataClient: dataClient, + state: state, + client: client, + minDrainEventsFrequency: minDrainEventsFrequency, + maxDrainEventsFrequency: maxDrainEventsFrequency, + } + go taskHandler.startDrainEventsTicker() + + return taskHandler +} + +// AddStateChangeEvent queues up the state change event to be sent to ECS. +// If the event is for a container state change, it just gets added to the +// handler.tasksToContainerStates map. +// If the event is for a managed agent state change, it just gets added to the +// handler.tasksToManagedAgentStates map. +// If the event is for task state change, it triggers the non-blocking +// handler.submitTaskEvents method to submit the batched container state +// changes and the task state change to ECS +func (handler *TaskHandler) AddStateChangeEvent(change statechange.Event, client api.ECSClient) error { + handler.lock.Lock() + defer handler.lock.Unlock() + switch change.GetEventType() { + case statechange.TaskEvent: + event, ok := change.(api.TaskStateChange) + if !ok { + return errors.New("eventhandler: unable to get task event from state change event") + } + // Task event: gather all the container and managed agent events and send them + // to ECS by invoking the async submitTaskEvents method from + // the sendable event list object + handler.flushBatchUnsafe(&event, client) + return nil + + case statechange.ContainerEvent: + event, ok := change.(api.ContainerStateChange) + if !ok { + return errors.New("eventhandler: unable to get container event from state change event") + } + handler.batchContainerEventUnsafe(event) + return nil + + case statechange.ManagedAgentEvent: + event, ok := change.(api.ManagedAgentStateChange) + if !ok { + return errors.New("eventhandler: unable to get managed agent event from state change event") + } + + handler.batchManagedAgentEventUnsafe(event) + return nil + + default: + return errors.New("eventhandler: unable to determine event type from state change event") + } +} + +// startDrainEventsTicker starts a ticker that periodically drains the events queue +// by submitting state change events to the ECS backend +func (handler *TaskHandler) startDrainEventsTicker() { + derivedCtx, cancel := context.WithCancel(handler.ctx) + defer cancel() + ticker := utils.NewJitteredTicker(derivedCtx, handler.minDrainEventsFrequency, handler.maxDrainEventsFrequency) + for { + select { + case <-handler.ctx.Done(): + seelog.Infof("TaskHandler: Stopping periodic container/managed agent state change submission ticker") + return + case <-ticker: + // Gather a list of task state changes to send. This list is constructed from + // the tasksToContainerStates and tasksToManagedAgentStates maps based on the + // task arns of containers and managed agents that haven't been sent to ECS yet. + for _, taskEvent := range handler.taskStateChangesToSend() { + seelog.Infof( + "TaskHandler: Adding a state change event to send batched container/managed agent events: %s", + taskEvent.String()) + // Force start the the task state change submission + // workflow by calling AddStateChangeEvent method. + handler.AddStateChangeEvent(taskEvent, handler.client) + } + } + } +} + +// taskStateChangesToSend gets a list task state changes for container events that +// have been batched and not sent beyond the drainEventsFrequency threshold +func (handler *TaskHandler) taskStateChangesToSend() []api.TaskStateChange { + handler.lock.RLock() + defer handler.lock.RUnlock() + + events := make(map[string]api.TaskStateChange) + for taskARN := range handler.tasksToContainerStates { + // An entry for the task in tasksToContainerStates means that there + // is at least 1 container event for that task that hasn't been sent + // to ECS (has been batched). + // Make sure that the engine's task state knows about this task (as a + // safety mechanism) and add it to the list of task state changes + // that need to be sent to ECS + if task, ok := handler.state.TaskByArn(taskARN); ok { + // We do not allow the ticker to submit container state updates for + // tasks that are STOPPED. This prevents the ticker's asynchronous + // updates from clobbering container states when the task + // transitions to STOPPED, since ECS does not allow updates to + // container states once the task has moved to STOPPED. + knownStatus := task.GetKnownStatus() + if knownStatus >= apitaskstatus.TaskStopped { + continue + } + event := api.TaskStateChange{ + TaskARN: taskARN, + Status: task.GetKnownStatus(), + Task: task, + } + event.SetTaskTimestamps() + events[taskARN] = event + } + } + + for taskARN := range handler.tasksToManagedAgentStates { + if _, ok := events[taskARN]; ok { + continue + } + if task, ok := handler.state.TaskByArn(taskARN); ok { + // We do not allow the ticker to submit managed agent state updates for + // tasks that are STOPPED. This prevents the ticker's asynchronous + // updates from clobbering managed agent states when the task + // transitions to STOPPED, since ECS does not allow updates to + // managed agent states once the task has moved to STOPPED. + knownStatus := task.GetKnownStatus() + if knownStatus >= apitaskstatus.TaskStopped { + continue + } + event := api.TaskStateChange{ + TaskARN: taskARN, + Status: task.GetKnownStatus(), + Task: task, + } + event.SetTaskTimestamps() + + events[taskARN] = event + } + } + var taskEvents []api.TaskStateChange + for _, tEvent := range events { + taskEvents = append(taskEvents, tEvent) + } + return taskEvents +} + +// batchContainerEventUnsafe collects container state change events for a given task arn +func (handler *TaskHandler) batchContainerEventUnsafe(event api.ContainerStateChange) { + seelog.Infof("TaskHandler: batching container event: %s", event.String()) + handler.tasksToContainerStates[event.TaskArn] = append(handler.tasksToContainerStates[event.TaskArn], event) +} + +// batchManagedAgentEventUnsafe collects managed agent state change events for a given task arn +func (handler *TaskHandler) batchManagedAgentEventUnsafe(event api.ManagedAgentStateChange) { + seelog.Infof("TaskHandler: batching managed agent event: %s", event.String()) + handler.tasksToManagedAgentStates[event.TaskArn] = append(handler.tasksToManagedAgentStates[event.TaskArn], event) +} + +// flushBatchUnsafe attaches the task arn's container events to TaskStateChange event +// by creating the sendable event list. It then submits this event to ECS asynchronously +func (handler *TaskHandler) flushBatchUnsafe(taskStateChange *api.TaskStateChange, client api.ECSClient) { + taskStateChange.Containers = append(taskStateChange.Containers, + handler.tasksToContainerStates[taskStateChange.TaskARN]...) + // All container events for the task have now been copied to the + // task state change object. Remove them from the map + delete(handler.tasksToContainerStates, taskStateChange.TaskARN) + taskStateChange.ManagedAgents = append(taskStateChange.ManagedAgents, + handler.tasksToManagedAgentStates[taskStateChange.TaskARN]...) + // All managed agent events for the task have now been copied to the + // task state change object. Remove them from the map + delete(handler.tasksToManagedAgentStates, taskStateChange.TaskARN) + // Prepare a given event to be sent by adding it to the handler's + // eventList + event := newSendableTaskEvent(*taskStateChange) + taskEvents := handler.getTaskEventsUnsafe(event) + + // Add the event to the sendable events queue for the task and + // start sending it asynchronously if possible + taskEvents.sendChange(event, client, handler) +} + +// getTaskEventsUnsafe gets the event list for the task arn in the sendableEvent +// from taskToEvent map +func (handler *TaskHandler) getTaskEventsUnsafe(event *sendableEvent) *taskSendableEvents { + taskARN := event.taskArn() + taskEvents, ok := handler.tasksToEvents[taskARN] + + if !ok { + // We are not tracking this task arn in the tasksToEvents map. Create + // a new entry + taskEvents = &taskSendableEvents{ + events: list.New(), + sending: false, + createdAt: time.Now(), + taskARN: taskARN, + } + handler.tasksToEvents[taskARN] = taskEvents + seelog.Debugf("TaskHandler: collecting events for new task; event: %s; events: %s ", + event.toString(), taskEvents.toStringUnsafe()) + } + + return taskEvents +} + +// Continuously retries sending an event until it succeeds, sleeping between each +// attempt +func (handler *TaskHandler) submitTaskEvents(taskEvents *taskSendableEvents, client api.ECSClient, taskARN string) { + defer metrics.MetricsEngineGlobal.RecordECSClientMetric("SUBMIT_TASK_EVENTS")() + defer handler.removeTaskEvents(taskARN) + + backoff := retry.NewExponentialBackoff(submitStateBackoffMin, submitStateBackoffMax, + submitStateBackoffJitterMultiple, submitStateBackoffMultiple) + + // Mirror events.sending, but without the need to lock since this is local + // to our goroutine + done := false + // TODO: wire in the context here. Else, we have go routine leaks in tests + for !done { + // If we looped back up here, we successfully submitted an event, but + // we haven't emptied the list so we should keep submitting + backoff.Reset() + retry.RetryWithBackoff(backoff, func() error { + // Lock and unlock within this function, allowing the list to be added + // to while we're not actively sending an event + seelog.Debug("TaskHandler: Waiting on semaphore to send events...") + handler.submitSemaphore.Wait() + defer handler.submitSemaphore.Post() + + var err error + done, err = taskEvents.submitFirstEvent(handler, backoff) + return err + }) + } +} + +func (handler *TaskHandler) removeTaskEvents(taskARN string) { + handler.lock.Lock() + defer handler.lock.Unlock() + + delete(handler.tasksToEvents, taskARN) +} + +// sendChange adds the change to the sendable events queue. It triggers +// the handler's submitTaskEvents async method to submit this change if +// there's no go routines already sending changes for this event list +func (taskEvents *taskSendableEvents) sendChange(change *sendableEvent, + client api.ECSClient, + handler *TaskHandler) { + + taskEvents.lock.Lock() + defer taskEvents.lock.Unlock() + + // Add event to the queue + seelog.Infof("TaskHandler: Adding event: %s", change.toString()) + taskEvents.events.PushBack(change) + + if !taskEvents.sending { + // If a send event is not already in progress, trigger the + // submitTaskEvents to start sending changes to ECS + taskEvents.sending = true + go handler.submitTaskEvents(taskEvents, client, change.taskArn()) + } else { + seelog.Debugf( + "TaskHandler: Not submitting change as the task is already being sent: %s", + change.toString()) + } +} + +// submitFirstEvent submits the first event for the task from the event list. It +// returns true if the list became empty after submitting the event. Else, it returns +// false. An error is returned if there was an error with submitting the state change +// to ECS. The error is used by the backoff handler to backoff before retrying the +// state change submission for the first event +func (taskEvents *taskSendableEvents) submitFirstEvent(handler *TaskHandler, backoff retry.Backoff) (bool, error) { + seelog.Debug("TaskHandler: Acquiring lock for sending event...") + taskEvents.lock.Lock() + defer taskEvents.lock.Unlock() + + seelog.Debugf("TaskHandler: Acquired lock, processing event list: : %s", taskEvents.toStringUnsafe()) + + if taskEvents.events.Len() == 0 { + seelog.Debug("TaskHandler: No events left; not retrying more") + taskEvents.sending = false + return true, nil + } + + eventToSubmit := taskEvents.events.Front() + // Extract the wrapped event from the list element + event := eventToSubmit.Value.(*sendableEvent) + + if event.containerShouldBeSent() { + if err := event.send(sendContainerStatusToECS, setContainerChangeSent, "container", + handler.client, eventToSubmit, handler.dataClient, backoff, taskEvents); err != nil { + return false, err + } + } else if event.taskShouldBeSent() { + if err := event.send(sendTaskStatusToECS, setTaskChangeSent, "task", + handler.client, eventToSubmit, handler.dataClient, backoff, taskEvents); err != nil { + handleInvalidParamException(err, taskEvents.events, eventToSubmit) + return false, err + } + } else if event.taskAttachmentShouldBeSent() { + if err := event.send(sendTaskStatusToECS, setTaskAttachmentSent, "task attachment", + handler.client, eventToSubmit, handler.dataClient, backoff, taskEvents); err != nil { + handleInvalidParamException(err, taskEvents.events, eventToSubmit) + return false, err + } + } else { + // Shouldn't be sent as either a task or container change event; must have been already sent + seelog.Infof("TaskHandler: Not submitting redundant event; just removing: %s", event.toString()) + taskEvents.events.Remove(eventToSubmit) + } + + if taskEvents.events.Len() == 0 { + seelog.Debug("TaskHandler: Removed the last element, no longer sending") + taskEvents.sending = false + return true, nil + } + + return false, nil +} + +func (taskEvents *taskSendableEvents) toStringUnsafe() string { + return fmt.Sprintf("Task event list [taskARN: %s, sending: %t, createdAt: %s]", + taskEvents.taskARN, taskEvents.sending, taskEvents.createdAt.String()) +} + +// handleInvalidParamException removes the event from event queue when its parameters are +// invalid to reduce redundant API call +func handleInvalidParamException(err error, events *list.List, eventToSubmit *list.Element) { + if utils.IsAWSErrorCodeEqual(err, ecs.ErrCodeInvalidParameterException) { + event := eventToSubmit.Value.(*sendableEvent) + seelog.Warnf("TaskHandler: Event is sent with invalid parameters; just removing: %s", event.toString()) + events.Remove(eventToSubmit) + } +} diff --git a/agent/eventhandler/task_handler_test.go b/agent/eventhandler/task_handler_test.go new file mode 100644 index 00000000000..fc877452b19 --- /dev/null +++ b/agent/eventhandler/task_handler_test.go @@ -0,0 +1,707 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventhandler + +import ( + "container/list" + "context" + "strconv" + "sync" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/utils" + mock_retry "github.com/aws/amazon-ecs-agent/agent/utils/retry/mock" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +const taskARN = "taskarn" + +func TestSendsEventsOneContainer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + + // Trivial: one container, no errors + contEvent1 := containerEvent(taskARN) + contEvent2 := containerEvent(taskARN) + taskEvent2 := taskEvent(taskARN) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, 2, len(change.Containers)) + assert.Equal(t, taskARN, change.Containers[0].TaskArn) + assert.Equal(t, taskARN, change.Containers[1].TaskArn) + wg.Done() + }) + + handler.AddStateChangeEvent(contEvent1, client) + handler.AddStateChangeEvent(contEvent2, client) + handler.AddStateChangeEvent(taskEvent2, client) + + wg.Wait() +} + +func TestSendsEventsOneEventRetries(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + + var wg sync.WaitGroup + wg.Add(2) + + retriable := apierrors.NewRetriableError(apierrors.NewRetriable(true), errors.New("test")) + taskEvent := taskEvent(taskARN) + + gomock.InOrder( + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Return(retriable).Do(func(interface{}) { wg.Done() }), + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Return(nil).Do(func(interface{}) { wg.Done() }), + ) + + handler.AddStateChangeEvent(taskEvent, client) + + wg.Wait() +} + +func TestSendsEventsInvalidParametersEventsRemoved(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + + taskEvent := taskEvent(taskARN) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(interface{}) { + assert.Equal(t, 1, handler.tasksToEvents[taskARN].events.Len()) + wg.Done() + }).Return(awserr.New(ecs.ErrCodeInvalidParameterException, "", nil)) + + handler.AddStateChangeEvent(taskEvent, client) + + wg.Wait() + // Require the lock to wait for submitFirstEvent to be finished + handler.tasksToEvents[taskARN].lock.Lock() + assert.Equal(t, 0, handler.tasksToEvents[taskARN].events.Len()) + handler.tasksToEvents[taskARN].lock.Unlock() +} + +func TestSendsEventsConcurrentLimit(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + + completeStateChange := make(chan bool, concurrentEventCalls+1) + var wg sync.WaitGroup + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Times(concurrentEventCalls + 1).Do(func(interface{}) { + wg.Done() + <-completeStateChange + }) + + // Test concurrency; ensure it doesn't attempt to send more than + // concurrentEventCalls at once + wg.Add(concurrentEventCalls) + + // Put on N+1 events + for i := 0; i < concurrentEventCalls+1; i++ { + handler.AddStateChangeEvent(taskEvent("concurrent_"+strconv.Itoa(i)), client) + } + wg.Wait() + + // accept a single change event + wg.Add(1) + completeStateChange <- true + wg.Wait() + + // ensure the remaining requests are completed + for i := 0; i < concurrentEventCalls; i++ { + completeStateChange <- true + } +} + +func TestSendsEventsContainerDifferences(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + + // Test container event replacement doesn't happen + contEvent1 := containerEvent(taskARN) + contEvent2 := containerEventStopped(taskARN) + taskEvent := taskEvent(taskARN) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, 2, len(change.Containers)) + assert.Equal(t, taskARN, change.Containers[0].TaskArn) + assert.Equal(t, apicontainerstatus.ContainerRunning, change.Containers[0].Status) + assert.Equal(t, taskARN, change.Containers[1].TaskArn) + assert.Equal(t, apicontainerstatus.ContainerStopped, change.Containers[1].Status) + wg.Done() + }) + + handler.AddStateChangeEvent(contEvent1, client) + handler.AddStateChangeEvent(contEvent2, client) + handler.AddStateChangeEvent(taskEvent, client) + + wg.Wait() +} + +func TestSendsEventsTaskDifferences(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + dataClient := data.NewNoopClient() + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, dataClient, dockerstate.NewTaskEngineState(), client) + defer cancel() + + taskARNA := "taskarnA" + taskARNB := "taskarnB" + + var wg sync.WaitGroup + wg.Add(2) + + var wgAddEvent sync.WaitGroup + wgAddEvent.Add(1) + + // Test task event replacement doesn't happen + taskEventA := taskEvent(taskARNA) + contEventA1 := containerEvent(taskARNA) + + contEventB1 := containerEvent(taskARNB) + contEventB2 := containerEventStopped(taskARNB) + taskEventB := taskEventStopped(taskARNB) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, taskARNA, change.TaskARN) + wgAddEvent.Done() + wg.Done() + }) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, taskARNB, change.TaskARN) + wg.Done() + }) + + handler.AddStateChangeEvent(contEventB1, client) + handler.AddStateChangeEvent(contEventA1, client) + handler.AddStateChangeEvent(contEventB2, client) + + handler.AddStateChangeEvent(taskEventA, client) + wgAddEvent.Wait() + + handler.AddStateChangeEvent(taskEventB, client) + + wg.Wait() +} + +func TestSendsEventsDedupe(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + + taskARNA := "taskarnA" + taskARNB := "taskarnB" + + var wg sync.WaitGroup + wg.Add(1) + + // Verify that a task doesn't get sent if we already have 'sent' it + task1 := taskEvent(taskARNA) + task1.(api.TaskStateChange).Task.SetSentStatus(apitaskstatus.TaskRunning) + cont1 := containerEvent(taskARNA) + cont1.(api.ContainerStateChange).Container.SetSentStatus(apicontainerstatus.ContainerRunning) + + handler.AddStateChangeEvent(cont1, client) + handler.AddStateChangeEvent(task1, client) + + task2 := taskEvent(taskARNB) + task2.(api.TaskStateChange).Task.SetSentStatus(apitaskstatus.TaskStatusNone) + cont2 := containerEvent(taskARNB) + cont2.(api.ContainerStateChange).Container.SetSentStatus(apicontainerstatus.ContainerRunning) + + // Expect to send a task status but not a container status + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, 1, len(change.Containers)) + assert.Equal(t, taskARNB, change.Containers[0].TaskArn) + assert.Equal(t, taskARNB, change.TaskARN) + wg.Done() + }) + + handler.AddStateChangeEvent(cont2, client) + handler.AddStateChangeEvent(task2, client) + + wg.Wait() +} + +// TestCleanupTaskEventAfterSubmit tests the map of task event is removed after +// calling submittaskstatechange +func TestCleanupTaskEventAfterSubmit(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + + taskARN2 := "taskarn2" + + var wg sync.WaitGroup + wg.Add(3) + + taskEvent1 := taskEvent(taskARN) + taskEvent2 := taskEvent(taskARN) + taskEvent3 := taskEvent(taskARN2) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do( + func(change api.TaskStateChange) { + wg.Done() + }).Times(3) + + handler.AddStateChangeEvent(taskEvent1, client) + handler.AddStateChangeEvent(taskEvent2, client) + handler.AddStateChangeEvent(taskEvent3, client) + + wg.Wait() + + // Wait for task events to be removed from the tasksToEvents map + for { + if getTasksToEventsLen(handler) == 0 { + break + } + time.Sleep(time.Millisecond) + } +} + +// getTasksToEventsLen returns the length of the tasksToEvents map. It is +// used only in the test code to ascertain that map has been cleaned up +func getTasksToEventsLen(handler *TaskHandler) int { + handler.lock.RLock() + defer handler.lock.RUnlock() + + return len(handler.tasksToEvents) +} + +func containerEvent(arn string) statechange.Event { + return api.ContainerStateChange{TaskArn: arn, ContainerName: "containerName", Status: apicontainerstatus.ContainerRunning, Container: &apicontainer.Container{}} +} + +func managedAgentEvent(arn string) statechange.Event { + return api.ManagedAgentStateChange{TaskArn: arn, Container: &apicontainer.Container{}, Name: "ExecAgent", Status: apicontainerstatus.ManagedAgentRunning} +} + +func containerEventStopped(arn string) statechange.Event { + return api.ContainerStateChange{TaskArn: arn, ContainerName: "containerName", Status: apicontainerstatus.ContainerStopped, Container: &apicontainer.Container{}} +} + +func taskEvent(arn string) statechange.Event { + return api.TaskStateChange{TaskARN: arn, Status: apitaskstatus.TaskRunning, Task: &apitask.Task{}} +} + +func taskEventStopped(arn string) statechange.Event { + return api.TaskStateChange{TaskARN: arn, Status: apitaskstatus.TaskStopped, Task: &apitask.Task{}} +} + +func TestENISentStatusChange(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + task := &apitask.Task{ + Arn: taskARN, + } + + eniAttachment := &apieni.ENIAttachment{ + TaskARN: taskARN, + AttachStatusSent: false, + ExpiresAt: time.Now().Add(time.Second), + } + timeoutFunc := func() { + eniAttachment.AttachStatusSent = true + } + assert.NoError(t, eniAttachment.StartTimer(timeoutFunc)) + + sendableTaskEvent := newSendableTaskEvent(api.TaskStateChange{ + Attachment: eniAttachment, + TaskARN: taskARN, + Status: apitaskstatus.TaskStatusNone, + Task: task, + }) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Return(nil) + + events := list.New() + events.PushBack(sendableTaskEvent) + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + handler.submitTaskEvents(&taskSendableEvents{ + events: events, + }, client, taskARN) + + assert.True(t, eniAttachment.AttachStatusSent) +} + +func TestGetBatchedContainerEvents(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + + handler := &TaskHandler{ + tasksToContainerStates: map[string][]api.ContainerStateChange{ + "t1": {}, + "t2": {}, + }, + state: state, + } + + state.EXPECT().TaskByArn("t1").Return(&apitask.Task{Arn: "t1", KnownStatusUnsafe: apitaskstatus.TaskRunning}, true) + state.EXPECT().TaskByArn("t2").Return(nil, false) + + events := handler.taskStateChangesToSend() + assert.Len(t, events, 1) + assert.Equal(t, "t1", events[0].TaskARN) +} + +func TestGetBatchedContainerEventsStoppedTask(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + + handler := &TaskHandler{ + tasksToContainerStates: map[string][]api.ContainerStateChange{ + "t1": {}, + }, + state: state, + } + + state.EXPECT().TaskByArn("t1").Return(&apitask.Task{Arn: "t1", KnownStatusUnsafe: apitaskstatus.TaskStopped}, true) + + events := handler.taskStateChangesToSend() + assert.Len(t, events, 0) +} + +func TestSubmitTaskEventsWhenSubmittingTaskRunningAfterStopped(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + client := mock_api.NewMockECSClient(ctrl) + + handler := &TaskHandler{ + state: state, + submitSemaphore: utils.NewSemaphore(concurrentEventCalls), + tasksToEvents: make(map[string]*taskSendableEvents), + tasksToContainerStates: make(map[string][]api.ContainerStateChange), + client: client, + dataClient: data.NewNoopClient(), + } + + taskEvents := &taskSendableEvents{events: list.New(), + sending: false, + createdAt: time.Now(), + taskARN: taskARN, + } + + backoff := mock_retry.NewMockBackoff(ctrl) + ok, err := taskEvents.submitFirstEvent(handler, backoff) + assert.True(t, ok) + assert.NoError(t, err) + + task := &apitask.Task{} + taskEvents.events.PushBack(newSendableTaskEvent(api.TaskStateChange{ + TaskARN: taskARN, + Status: apitaskstatus.TaskStopped, + Task: task, + })) + taskEvents.events.PushBack(newSendableTaskEvent(api.TaskStateChange{ + TaskARN: taskARN, + Status: apitaskstatus.TaskRunning, + Task: task, + })) + handler.tasksToEvents[taskARN] = taskEvents + + var wg sync.WaitGroup + wg.Add(1) + gomock.InOrder( + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, apitaskstatus.TaskStopped, change.Status) + }), + backoff.EXPECT().Reset().Do(func() { + wg.Done() + }), + ) + state.EXPECT().TaskByArn(gomock.Any()).AnyTimes().Return(task, true) + ok, err = taskEvents.submitFirstEvent(handler, backoff) + // We have an unsent event for the TaskRunning transition. Hence, send() returns false + assert.False(t, ok) + assert.NoError(t, err) + wg.Wait() + + // The unsent transition is deleted from the task list. send() returns true as it + // does not have any more events to process + ok, err = taskEvents.submitFirstEvent(handler, backoff) + assert.NoError(t, err) + assert.True(t, ok) +} + +func TestSubmitTaskEventsWhenSubmittingTaskStoppedAfterRunning(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + client := mock_api.NewMockECSClient(ctrl) + + handler := &TaskHandler{ + state: state, + submitSemaphore: utils.NewSemaphore(concurrentEventCalls), + tasksToEvents: make(map[string]*taskSendableEvents), + tasksToContainerStates: make(map[string][]api.ContainerStateChange), + client: client, + dataClient: data.NewNoopClient(), + } + + taskEvents := &taskSendableEvents{events: list.New(), + sending: false, + createdAt: time.Now(), + taskARN: taskARN, + } + + backoff := mock_retry.NewMockBackoff(ctrl) + ok, err := taskEvents.submitFirstEvent(handler, backoff) + assert.True(t, ok) + assert.NoError(t, err) + + task := &apitask.Task{} + taskEvents.events.PushBack(newSendableTaskEvent(api.TaskStateChange{ + TaskARN: taskARN, + Status: apitaskstatus.TaskRunning, + Task: task, + })) + taskEvents.events.PushBack(newSendableTaskEvent(api.TaskStateChange{ + TaskARN: taskARN, + Status: apitaskstatus.TaskStopped, + Task: task, + })) + handler.tasksToEvents[taskARN] = taskEvents + + var wg sync.WaitGroup + wg.Add(1) + gomock.InOrder( + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, apitaskstatus.TaskRunning, change.Status) + }), + backoff.EXPECT().Reset().Do(func() { + wg.Done() + }), + ) + state.EXPECT().TaskByArn(gomock.Any()).AnyTimes().Return(task, true) + // We have an unsent event for the TaskStopped transition. Hence, send() returns false + ok, err = taskEvents.submitFirstEvent(handler, backoff) + assert.False(t, ok) + assert.NoError(t, err) + wg.Wait() + + wg.Add(1) + gomock.InOrder( + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, apitaskstatus.TaskStopped, change.Status) + }), + backoff.EXPECT().Reset().Do(func() { + wg.Done() + }), + ) + // The unsent transition is send and deleted from the task list. send() returns true as it + // does not have any more events to process + ok, err = taskEvents.submitFirstEvent(handler, backoff) + assert.True(t, ok) + assert.NoError(t, err) + wg.Wait() +} + +func TestSendContainerAndManagedAgentEvents(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, data.NewNoopClient(), dockerstate.NewTaskEngineState(), client) + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + + maEvent1 := managedAgentEvent(taskARN) + cEevent1 := containerEvent(taskARN) + taskEvent1 := taskEvent(taskARN) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, 1, len(change.ManagedAgents)) + assert.Equal(t, 1, len(change.Containers)) + assert.Equal(t, taskARN, change.ManagedAgents[0].TaskArn) + assert.Equal(t, taskARN, change.Containers[0].TaskArn) + wg.Done() + }) + + handler.AddStateChangeEvent(maEvent1, client) + handler.AddStateChangeEvent(cEevent1, client) + handler.AddStateChangeEvent(taskEvent1, client) + + wg.Wait() + assert.Len(t, handler.tasksToManagedAgentStates, 0) + assert.Len(t, handler.tasksToContainerStates, 0) +} + +func TestSendManagedAgentEventsTaskDifferences(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mock_api.NewMockECSClient(ctrl) + dataClient := data.NewNoopClient() + + ctx, cancel := context.WithCancel(context.Background()) + handler := NewTaskHandler(ctx, dataClient, dockerstate.NewTaskEngineState(), client) + defer cancel() + + taskARNA := "taskarnA" + taskARNB := "taskarnB" + + var wg sync.WaitGroup + wg.Add(2) + + var wgAddEvent sync.WaitGroup + wgAddEvent.Add(1) + + // Test task event replacement doesn't happen + taskEventA := taskEvent(taskARNA) + maEventA1 := managedAgentEvent(taskARNA) + + maEventB1 := managedAgentEvent(taskARNB) + taskEventB := taskEventStopped(taskARNB) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, taskARNA, change.TaskARN) + wgAddEvent.Done() + wg.Done() + }) + + client.EXPECT().SubmitTaskStateChange(gomock.Any()).Do(func(change api.TaskStateChange) { + assert.Equal(t, taskARNB, change.TaskARN) + wg.Done() + }) + + handler.AddStateChangeEvent(maEventB1, client) + handler.AddStateChangeEvent(maEventA1, client) + + handler.AddStateChangeEvent(taskEventA, client) + wgAddEvent.Wait() + + handler.AddStateChangeEvent(taskEventB, client) + + wg.Wait() +} + +func TestGetBatchedManagedAgentEvents(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + + handler := &TaskHandler{ + tasksToManagedAgentStates: map[string][]api.ManagedAgentStateChange{ + "t1": {}, + "t2": {}, + }, + state: state, + } + + state.EXPECT().TaskByArn("t1").Return(&apitask.Task{Arn: "t1", KnownStatusUnsafe: apitaskstatus.TaskRunning}, true) + state.EXPECT().TaskByArn("t2").Return(nil, false) + + events := handler.taskStateChangesToSend() + assert.Len(t, events, 1) + assert.Equal(t, "t1", events[0].TaskARN) +} + +func TestGetBatchedManagedAgentEventsStoppedTask(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + + handler := &TaskHandler{ + tasksToManagedAgentStates: map[string][]api.ManagedAgentStateChange{ + "t1": {}, + }, + state: state, + } + + state.EXPECT().TaskByArn("t1").Return(&apitask.Task{Arn: "t1", KnownStatusUnsafe: apitaskstatus.TaskStopped}, true) + + events := handler.taskStateChangesToSend() + assert.Len(t, events, 0) +} diff --git a/agent/eventhandler/task_handler_types.go b/agent/eventhandler/task_handler_types.go new file mode 100644 index 00000000000..f7559c2c032 --- /dev/null +++ b/agent/eventhandler/task_handler_types.go @@ -0,0 +1,260 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventhandler + +import ( + "container/list" + "fmt" + "sync" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/cihub/seelog" +) + +// a state change that may have a container and, optionally, a task event to +// send +type sendableEvent struct { + // Either is a contaienr event or a task event + isContainerEvent bool + + containerSent bool + containerChange api.ContainerStateChange + + taskSent bool + taskChange api.TaskStateChange + + lock sync.RWMutex +} + +func newSendableTaskEvent(event api.TaskStateChange) *sendableEvent { + return &sendableEvent{ + isContainerEvent: false, + taskSent: false, + taskChange: event, + } +} + +func (event *sendableEvent) taskArn() string { + if event.isContainerEvent { + return event.containerChange.TaskArn + } + return event.taskChange.TaskARN +} + +// taskShouldBeSent checks whether the event should be sent, this includes +// both task state change and container/managed agent state change events +func (event *sendableEvent) taskShouldBeSent() bool { + event.lock.RLock() + defer event.lock.RUnlock() + + if event.isContainerEvent { + return false + } + tevent := event.taskChange + if event.taskSent { + return false // redundant event + } + + // task and container change event should have task != nil + if tevent.Task == nil { + return false + } + + // Task event should be sent + if tevent.Task.GetSentStatus() < tevent.Status { + return true + } + + // Container event should be sent + for _, containerStateChange := range tevent.Containers { + container := containerStateChange.Container + if container.GetSentStatus() < container.GetKnownStatus() { + return true + } + } + + // Managed agent event should be sent + for _, managedAgentStateChange := range tevent.ManagedAgents { + managedAgentName := managedAgentStateChange.Name + container := managedAgentStateChange.Container + if container.GetManagedAgentSentStatus(managedAgentName) != container.GetManagedAgentStatus(managedAgentName) { + return true + } + } + return false +} + +func (event *sendableEvent) taskAttachmentShouldBeSent() bool { + event.lock.RLock() + defer event.lock.RUnlock() + if event.isContainerEvent { + return false + } + tevent := event.taskChange + return tevent.Status == apitaskstatus.TaskStatusNone && // Task Status is not set for attachments as task record has yet to be streamed down + tevent.Attachment != nil && // Task has attachment records + !tevent.Attachment.HasExpired() && // ENI attachment ack timestamp hasn't expired + !tevent.Attachment.IsSent() // Task status hasn't already been sent +} + +func (event *sendableEvent) containerShouldBeSent() bool { + event.lock.RLock() + defer event.lock.RUnlock() + if !event.isContainerEvent { + return false + } + cevent := event.containerChange + if event.containerSent || (cevent.Container != nil && cevent.Container.GetSentStatus() >= cevent.Status) { + return false + } + return true +} + +func (event *sendableEvent) setSent() { + event.lock.Lock() + defer event.lock.Unlock() + if event.isContainerEvent { + event.containerSent = true + } else { + event.taskSent = true + } +} + +// send tries to send an event, specified by 'eventToSubmit', of type +// 'eventType' to ECS +func (event *sendableEvent) send( + sendStatusToECS sendStatusChangeToECS, + setChangeSent setStatusSent, + eventType string, + client api.ECSClient, + eventToSubmit *list.Element, + dataClient data.Client, + backoff retry.Backoff, + taskEvents *taskSendableEvents) error { + + seelog.Infof("TaskHandler: Sending %s change: %s", eventType, event.toString()) + // Try submitting the change to ECS + if err := sendStatusToECS(client, event); err != nil { + seelog.Errorf("TaskHandler: Unretriable error submitting %s state change [%s]: %v", + eventType, event.toString(), err) + return err + } + // submitted; ensure we don't retry it + event.setSent() + // Mark event as sent + setChangeSent(event, dataClient) + seelog.Debugf("TaskHandler: Submitted task state change: %s", event.toString()) + taskEvents.events.Remove(eventToSubmit) + backoff.Reset() + return nil +} + +// sendStatusChangeToECS defines a function type for invoking the appropriate ECS state change API +type sendStatusChangeToECS func(client api.ECSClient, event *sendableEvent) error + +// sendContainerStatusToECS invokes the SubmitContainerStateChange API to send a +// container status change to ECS +func sendContainerStatusToECS(client api.ECSClient, event *sendableEvent) error { + return client.SubmitContainerStateChange(event.containerChange) +} + +// sendTaskStatusToECS invokes the SubmitTaskStateChange API to send a task +// status change to ECS +func sendTaskStatusToECS(client api.ECSClient, event *sendableEvent) error { + return client.SubmitTaskStateChange(event.taskChange) +} + +// setStatusSent defines a function type to mark the event as sent +type setStatusSent func(event *sendableEvent, dataClient data.Client) + +// setContainerChangeSent sets the event's container change object as sent +func setContainerChangeSent(event *sendableEvent, dataClient data.Client) { + containerChangeStatus := event.containerChange.Status + container := event.containerChange.Container + if container != nil && container.GetSentStatus() < containerChangeStatus { + updateContainerSentStatus(container, containerChangeStatus, dataClient) + } +} + +// setTaskChangeSent sets the event's task change object as sent +func setTaskChangeSent(event *sendableEvent, dataClient data.Client) { + taskChangeStatus := event.taskChange.Status + task := event.taskChange.Task + if task != nil && task.GetSentStatus() < taskChangeStatus { + updataTaskSentStatus(task, taskChangeStatus, dataClient) + } + for _, containerStateChange := range event.taskChange.Containers { + updateContainerSentStatus(containerStateChange.Container, containerStateChange.Status, dataClient) + } + for _, managedAgentStateChange := range event.taskChange.ManagedAgents { + updateManagedAgentSentStatus(managedAgentStateChange.Container, managedAgentStateChange.Name, managedAgentStateChange.Status, dataClient) + } + +} + +// setTaskAttachmentSent sets the event's task attachment object as sent +func setTaskAttachmentSent(event *sendableEvent, dataClient data.Client) { + if event.taskChange.Attachment != nil { + attachment := event.taskChange.Attachment + attachment.SetSentStatus() + attachment.StopAckTimer() + err := dataClient.SaveENIAttachment(attachment) + if err != nil { + seelog.Errorf("Failed to update attachment sent status in database for attachment %s: %v", attachment.AttachmentARN, err) + } + } +} + +func (event *sendableEvent) toString() string { + event.lock.RLock() + defer event.lock.RUnlock() + + if event.isContainerEvent { + return "ContainerChange: [" + event.containerChange.String() + fmt.Sprintf("] sent: %t", event.containerSent) + } else { + return "TaskChange: [" + event.taskChange.String() + fmt.Sprintf("] sent: %t", event.taskSent) + } +} + +func updataTaskSentStatus(task *apitask.Task, status apitaskstatus.TaskStatus, dataClient data.Client) { + task.SetSentStatus(status) + err := dataClient.SaveTask(task) + if err != nil { + seelog.Errorf("Failed to update task sent status in database for task %s: %v", task.Arn, err) + } +} + +func updateContainerSentStatus(container *apicontainer.Container, status apicontainerstatus.ContainerStatus, dataClient data.Client) { + if container.GetSentStatus() < status { + container.SetSentStatus(status) + if err := dataClient.SaveContainer(container); err != nil { + seelog.Errorf("Failed to update container sent status in database for container %s: %v", container.Name, err) + } + } +} + +func updateManagedAgentSentStatus(container *apicontainer.Container, managedAgentName string, status apicontainerstatus.ManagedAgentStatus, dataClient data.Client) { + if container.GetManagedAgentSentStatus(managedAgentName) != status { + container.UpdateManagedAgentSentStatus(managedAgentName, status) + if err := dataClient.SaveContainer(container); err != nil { + seelog.Errorf("Failed to update %s managed agent sent status in database for container %s: %v", managedAgentName, container.Name, err) + } + } +} diff --git a/agent/eventhandler/task_handler_types_test.go b/agent/eventhandler/task_handler_types_test.go new file mode 100644 index 00000000000..dd3b3766bde --- /dev/null +++ b/agent/eventhandler/task_handler_types_test.go @@ -0,0 +1,487 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventhandler + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/data" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testConainerName = "test-name" + testTaskARN = "arn:aws:ecs:us-west-2:1234567890:task/test-cluster/abc" + testAttachmentARN = "arn:aws:ecs:us-west-2:1234567890:attachment/abc" +) + +func newSendableContainerEvent(event api.ContainerStateChange) *sendableEvent { + return &sendableEvent{ + isContainerEvent: true, + containerSent: false, + containerChange: event, + } +} + +func TestShouldContainerEventBeSent(t *testing.T) { + event := newSendableContainerEvent(api.ContainerStateChange{ + Status: apicontainerstatus.ContainerStopped, + }) + assert.Equal(t, true, event.containerShouldBeSent()) + assert.Equal(t, false, event.taskShouldBeSent()) +} + +func TestShouldTaskEventBeSent(t *testing.T) { + for _, tc := range []struct { + event *sendableEvent + shouldBeSent bool + }{ + { + // We don't send a task event to backend if task status == NONE + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskStatusNone, + }, + }), + shouldBeSent: false, + }, + { + // task status == RUNNING should be sent to backend + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskRunning, + Task: &apitask.Task{}, + }), + shouldBeSent: true, + }, + { + // task event will not be sent if sent status >= task status + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskRunning, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskRunning, + }, + }), + shouldBeSent: false, + }, + { + // this is a valid event as task status >= sent status + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStopped, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskRunning, + }, + }), + shouldBeSent: true, + }, + { + // Even though the task has been sent, there's a container + // state change that needs to be sent + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskRunning, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskRunning, + }, + Containers: []api.ContainerStateChange{ + { + Container: &apicontainer.Container{ + SentStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + { + Container: &apicontainer.Container{ + SentStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + }, + }, + }, + }), + shouldBeSent: true, + }, + { + // Container state change should be sent regardless of task + // status. + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskStatusNone, + }, + Containers: []api.ContainerStateChange{ + { + Container: &apicontainer.Container{ + SentStatusUnsafe: apicontainerstatus.ContainerStatusNone, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + }, + }), + shouldBeSent: true, + }, + { + // ManagedAgent state change should be sent if SentStatus is != Status + // regardless of task status + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskStatusNone, + }, + ManagedAgents: []api.ManagedAgentStateChange{ + { + TaskArn: "test_task_arn", + Name: "test_agent", + Container: &apicontainer.Container{ + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{ + { + Name: "test_agent", + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentRunning, + SentStatus: apicontainerstatus.ManagedAgentStatusNone, + }, + }, + }, + }, + Status: apicontainerstatus.ManagedAgentStatusNone, + Reason: "test_reason", + }, + }, + }), + shouldBeSent: true, + }, + { + // ManagedAgent state change should be sent if SentStatus is != Status + // regardless of task status + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskStatusNone, + }, + ManagedAgents: []api.ManagedAgentStateChange{ + { + TaskArn: "test_task_arn", + Name: "test_agent", + Container: &apicontainer.Container{ + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{ + { + Name: "test_agent", + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentRunning, + SentStatus: apicontainerstatus.ManagedAgentStopped, + }, + }, + }, + }, + Status: apicontainerstatus.ManagedAgentStatusNone, + Reason: "test_reason", + }, + }, + }), + shouldBeSent: true, + }, + { + // ManagedAgent state change should not be sent if SentStatus is == Status + // regardless of task status + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskStatusNone, + }, + ManagedAgents: []api.ManagedAgentStateChange{ + { + TaskArn: "test_task_arn", + Name: "test_agent", + Container: &apicontainer.Container{ + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{ + { + Name: "test_agent", + ManagedAgentState: apicontainer.ManagedAgentState{ + Status: apicontainerstatus.ManagedAgentRunning, + SentStatus: apicontainerstatus.ManagedAgentRunning, + }, + }, + }, + }, + Status: apicontainerstatus.ManagedAgentStatusNone, + Reason: "test_reason", + }, + }, + }), + shouldBeSent: false, + }, + { + // All states sent, nothing to send + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskRunning, + Task: &apitask.Task{ + SentStatusUnsafe: apitaskstatus.TaskRunning, + }, + Containers: []api.ContainerStateChange{ + { + Container: &apicontainer.Container{ + SentStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + { + Container: &apicontainer.Container{ + SentStatusUnsafe: apicontainerstatus.ContainerStopped, + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + }, + }, + }, + }), + shouldBeSent: false, + }, + } { + t.Run(fmt.Sprintf("Event[%s] should be sent[%t]", tc.event.toString(), tc.shouldBeSent), func(t *testing.T) { + assert.Equal(t, tc.shouldBeSent, tc.event.taskShouldBeSent()) + assert.Equal(t, false, tc.event.containerShouldBeSent()) + assert.Equal(t, false, tc.event.taskAttachmentShouldBeSent()) + }) + } +} + +func TestShouldTaskAttachmentEventBeSent(t *testing.T) { + for _, tc := range []struct { + event *sendableEvent + attachmentShouldBeSent bool + taskShouldBeSent bool + }{ + { + // ENI Attachment is only sent if task status == NONE + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStopped, + Task: &apitask.Task{}, + }), + attachmentShouldBeSent: false, + taskShouldBeSent: true, + }, + { + // ENI Attachment is only sent if task status == NONE and if + // the event has a non nil attachment object + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + }), + attachmentShouldBeSent: false, + taskShouldBeSent: false, + }, + { + // ENI Attachment is only sent if task status == NONE and if + // the event has a non nil attachment object and if expiration + // ack timeout is set for future + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Attachment: &apieni.ENIAttachment{ + ExpiresAt: time.Unix(time.Now().Unix()-1, 0), + AttachStatusSent: false, + }, + }), + attachmentShouldBeSent: false, + taskShouldBeSent: false, + }, + { + // ENI Attachment is only sent if task status == NONE and if + // the event has a non nil attachment object and if expiration + // ack timeout is set for future and if attachment status hasn't + // already been sent + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Attachment: &apieni.ENIAttachment{ + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + AttachStatusSent: true, + }, + }), + attachmentShouldBeSent: false, + taskShouldBeSent: false, + }, + { + // Valid attachment event, ensure that its sent + event: newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Attachment: &apieni.ENIAttachment{ + ExpiresAt: time.Unix(time.Now().Unix()+10, 0), + AttachStatusSent: false, + }, + }), + attachmentShouldBeSent: true, + taskShouldBeSent: false, + }, + } { + t.Run(fmt.Sprintf("Event[%s] should be sent[attachment=%t;task=%t]", + tc.event.toString(), tc.attachmentShouldBeSent, tc.taskShouldBeSent), func(t *testing.T) { + assert.Equal(t, tc.attachmentShouldBeSent, tc.event.taskAttachmentShouldBeSent()) + assert.Equal(t, tc.taskShouldBeSent, tc.event.taskShouldBeSent()) + assert.Equal(t, false, tc.event.containerShouldBeSent()) + }) + } +} + +func TestSetTaskSentStatus(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + testManagedAgent := apicontainer.ManagedAgent{ + ManagedAgentState: apicontainer.ManagedAgentState{}, + Name: "dummyAgent", + } + testContainer := &apicontainer.Container{ + Name: testConainerName, + TaskARNUnsafe: testTaskARN, + ManagedAgentsUnsafe: []apicontainer.ManagedAgent{testManagedAgent}, + } + testTask := &apitask.Task{ + Arn: testTaskARN, + Containers: []*apicontainer.Container{testContainer}, + } + + taskRunningStateChange := newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskRunning, + Task: testTask, + Containers: []api.ContainerStateChange{ + { + Status: apicontainerstatus.ContainerRunning, + Container: testContainer, + }, + }, + ManagedAgents: []api.ManagedAgentStateChange{ + { + TaskArn: testTaskARN, + Name: "dummyAgent", + Container: testContainer, + Status: apicontainerstatus.ManagedAgentRunning, + }, + }, + }) + taskStoppedStateChange := newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStopped, + Task: testTask, + Containers: []api.ContainerStateChange{ + { + Status: apicontainerstatus.ContainerStopped, + Container: testContainer, + }, + }, + ManagedAgents: []api.ManagedAgentStateChange{ + { + TaskArn: testTaskARN, + Name: "dummyAgent", + Container: testContainer, + Status: apicontainerstatus.ManagedAgentStopped, + }, + }, + }) + + setTaskChangeSent(taskStoppedStateChange, dataClient) + assert.Equal(t, testTask.GetSentStatus(), apitaskstatus.TaskStopped) + assert.Equal(t, testContainer.GetSentStatus(), apicontainerstatus.ContainerStopped) + + updatedManagedAgent, _ := testContainer.GetManagedAgentByName("dummyAgent") + assert.Equal(t, apicontainerstatus.ManagedAgentStopped, updatedManagedAgent.SentStatus) + + setTaskChangeSent(taskRunningStateChange, dataClient) + assert.Equal(t, testTask.GetSentStatus(), apitaskstatus.TaskStopped) + assert.Equal(t, testContainer.GetSentStatus(), apicontainerstatus.ContainerStopped) + + updatedManagedAgent, _ = testContainer.GetManagedAgentByName("dummyAgent") + assert.Equal(t, apicontainerstatus.ManagedAgentRunning, updatedManagedAgent.SentStatus) + + tasks, err := dataClient.GetTasks() + require.NoError(t, err) + assert.Len(t, tasks, 1) + assert.Equal(t, apitaskstatus.TaskStopped, tasks[0].GetSentStatus()) + + containers, err := dataClient.GetContainers() + require.NoError(t, err) + assert.Len(t, containers, 1) + assert.Equal(t, apicontainerstatus.ContainerStopped, containers[0].Container.GetSentStatus()) + updatedManagedAgent, _ = containers[0].Container.GetManagedAgentByName("dummyAgent") + assert.Equal(t, apicontainerstatus.ManagedAgentRunning, updatedManagedAgent.SentStatus) +} + +func TestSetContainerSentStatus(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + testContainer := &apicontainer.Container{ + Name: testConainerName, + TaskARNUnsafe: testTaskARN, + } + + containerRunningStateChange := newSendableContainerEvent(api.ContainerStateChange{ + Status: apicontainerstatus.ContainerRunning, + Container: testContainer, + }) + containerStoppedStateChange := newSendableContainerEvent(api.ContainerStateChange{ + Status: apicontainerstatus.ContainerStopped, + Container: testContainer, + }) + + setContainerChangeSent(containerStoppedStateChange, dataClient) + assert.Equal(t, testContainer.GetSentStatus(), apicontainerstatus.ContainerStopped) + setContainerChangeSent(containerRunningStateChange, dataClient) + assert.Equal(t, testContainer.GetSentStatus(), apicontainerstatus.ContainerStopped) + + containers, err := dataClient.GetContainers() + require.NoError(t, err) + assert.Len(t, containers, 1) + assert.Equal(t, apicontainerstatus.ContainerStopped, containers[0].Container.GetSentStatus()) +} + +func TestSetAttachmentSentStatus(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + testAttachment := &apieni.ENIAttachment{ + AttachStatusSent: true, + ExpiresAt: time.Unix(time.Now().Unix()+100, 0), + AttachmentARN: testAttachmentARN, + } + require.NoError(t, testAttachment.StartTimer(func() {})) + event := newSendableTaskEvent(api.TaskStateChange{ + Status: apitaskstatus.TaskStatusNone, + Attachment: testAttachment, + }) + setTaskAttachmentSent(event, dataClient) + atts, err := dataClient.GetENIAttachments() + require.NoError(t, err) + assert.Len(t, atts, 1) + assert.True(t, atts[0].IsSent()) +} + +func newTestDataClient(t *testing.T) (data.Client, func()) { + testDir, err := ioutil.TempDir("", "agent_eventhandler_unit_test") + require.NoError(t, err) + + testClient, err := data.NewWithSetup(testDir) + + cleanup := func() { + require.NoError(t, testClient.Close()) + require.NoError(t, os.RemoveAll(testDir)) + } + return testClient, cleanup +} diff --git a/agent/eventstream/eventstream.go b/agent/eventstream/eventstream.go new file mode 100644 index 00000000000..8dac2a665e4 --- /dev/null +++ b/agent/eventstream/eventstream.go @@ -0,0 +1,132 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package handler deals with appropriately reacting to all ACS messages as well +// as maintaining the connection to ACS. +package eventstream + +import ( + "context" + "fmt" + "sync" + + "github.com/cihub/seelog" +) + +type eventHandler func(...interface{}) error + +// EventStream waiting for events and notifying the listeners by invoking +// the handler that listeners registered +type EventStream struct { + name string + open bool + event chan interface{} + handlers map[string]eventHandler + ctx context.Context + handlersLock sync.RWMutex + statusLock sync.RWMutex +} + +func NewEventStream(name string, ctx context.Context) *EventStream { + return &EventStream{ + name: name, + event: make(chan interface{}, 1), + ctx: ctx, + open: false, + handlers: make(map[string]eventHandler), + } +} + +// Subscribe adds the handler to be called into EventStream +func (eventStream *EventStream) Subscribe(name string, handler eventHandler) error { + eventStream.handlersLock.Lock() + defer eventStream.handlersLock.Unlock() + + if _, ok := eventStream.handlers[name]; ok { + return fmt.Errorf("handler %s already exists", name) + } + + eventStream.handlers[name] = handler + return nil +} + +// broadcast calls all handler's handler function +func (eventStream *EventStream) broadcast(event interface{}) { + eventStream.handlersLock.RLock() + defer eventStream.handlersLock.RUnlock() + + seelog.Debugf("Event stream %s received events, broadcasting to listeners...", eventStream.name) + + for _, handlerFunc := range eventStream.handlers { + go handlerFunc(event) + } +} + +// Unsubscribe deletes the handler from the EventStream +func (eventStream *EventStream) Unsubscribe(name string) { + eventStream.handlersLock.Lock() + defer eventStream.handlersLock.Unlock() + + for handler := range eventStream.handlers { + if handler == name { + seelog.Debugf("Unsubscribing event handler %s from event stream %s", handler, eventStream.name) + delete(eventStream.handlers, handler) + return + } + } +} + +// WriteToEventStream writes event to the event stream +func (eventStream *EventStream) WriteToEventStream(event interface{}) error { + eventStream.statusLock.RLock() + defer eventStream.statusLock.RUnlock() + + if !eventStream.open { + return fmt.Errorf("Event stream is closed") + } + eventStream.event <- event + return nil +} + +// Context returns the context of event stream +func (eventStream *EventStream) Context() context.Context { + return eventStream.ctx +} + +// listen listens to the event channel +func (eventStream *EventStream) listen() { + seelog.Infof("Event stream %s start listening...", eventStream.name) + for { + select { + case event := <-eventStream.event: + eventStream.broadcast(event) + case <-eventStream.ctx.Done(): + seelog.Infof("Event stream %s stopped listening...", eventStream.name) + + eventStream.statusLock.Lock() + eventStream.open = false + close(eventStream.event) + eventStream.statusLock.Unlock() + return + } + } +} + +// StartListening mark the event stream as open and start listening +func (eventStream *EventStream) StartListening() { + eventStream.statusLock.Lock() + defer eventStream.statusLock.Unlock() + eventStream.open = true + + go eventStream.listen() +} diff --git a/agent/eventstream/eventstream_test.go b/agent/eventstream/eventstream_test.go new file mode 100644 index 00000000000..20fcce177b9 --- /dev/null +++ b/agent/eventstream/eventstream_test.go @@ -0,0 +1,102 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package eventstream + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// TestSubscribe tests the listener subscribed to the +// event stream will be notified +func TestSubscribe(t *testing.T) { + waiter, listener := setupWaitGroupAndListener() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + waiter.Add(1) + eventStream := NewEventStream("TestSubscribe", ctx) + eventStream.Subscribe("listener", listener) + eventStream.StartListening() + + err := eventStream.WriteToEventStream(struct{}{}) + assert.NoError(t, err) + waiter.Wait() +} + +// TestUnsubscribe tests the listener unsubscribed from the +// event steam will not be notified +func TestUnsubscribe(t *testing.T) { + waiter1, listener1 := setupWaitGroupAndListener() + waiter2, listener2 := setupWaitGroupAndListener() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventStream := NewEventStream("TestUnsubscribe", ctx) + eventStream.Subscribe("listener1", listener1) + eventStream.Subscribe("listener2", listener2) + eventStream.StartListening() + + waiter1.Add(1) + waiter2.Add(1) + err := eventStream.WriteToEventStream(struct{}{}) + assert.NoError(t, err) + waiter1.Wait() + waiter2.Wait() + + eventStream.Unsubscribe("listener1") + + waiter2.Add(1) + err = eventStream.WriteToEventStream(struct{}{}) + assert.NoError(t, err) + + waiter1.Wait() + waiter2.Wait() +} + +// TestCancelEventStream tests the event stream can +// be closed by context +func TestCancelEventStream(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + eventStream := NewEventStream("TestCancelEventStream", ctx) + _, listener := setupWaitGroupAndListener() + + eventStream.Subscribe("listener", listener) + eventStream.StartListening() + cancel() + + time.Sleep(1 * time.Second) + + err := eventStream.WriteToEventStream(struct{}{}) + assert.Error(t, err) +} + +// setupWaitGroupAndListener creates a waitgroup and a function +// that decrements a WaitGroup when called +func setupWaitGroupAndListener() (*sync.WaitGroup, func(...interface{}) error) { + waiter := &sync.WaitGroup{} + listener := func(...interface{}) error { + waiter.Done() + return nil + } + return waiter, listener +} diff --git a/agent/fsx/factory/factory.go b/agent/fsx/factory/factory.go new file mode 100644 index 00000000000..242e72ebfe8 --- /dev/null +++ b/agent/fsx/factory/factory.go @@ -0,0 +1,52 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + fsxclient "github.com/aws/amazon-ecs-agent/agent/fsx" + "github.com/aws/amazon-ecs-agent/agent/httpclient" + "github.com/aws/aws-sdk-go/aws" + awscreds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/fsx" +) + +const ( + roundtripTimeout = 5 * time.Second +) + +type FSxClientCreator interface { + NewFSxClient(region string, creds credentials.IAMRoleCredentials) fsxclient.FSxClient +} + +func NewFSxClientCreator() FSxClientCreator { + return &fsxClientCreator{} +} + +type fsxClientCreator struct{} + +func (*fsxClientCreator) NewFSxClient(region string, + creds credentials.IAMRoleCredentials) fsxclient.FSxClient { + cfg := aws.NewConfig(). + WithHTTPClient(httpclient.New(roundtripTimeout, false)). + WithRegion(region). + WithCredentials( + awscreds.NewStaticCredentials(creds.AccessKeyID, creds.SecretAccessKey, + creds.SessionToken)) + sess := session.Must(session.NewSession(cfg)) + return fsx.New(sess) +} diff --git a/agent/fsx/factory/generate_mocks.go b/agent/fsx/factory/generate_mocks.go new file mode 100644 index 00000000000..8d0b457a7c5 --- /dev/null +++ b/agent/fsx/factory/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +//go:generate mockgen -destination=mocks/factory_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/fsx/factory FSxClientCreator diff --git a/agent/fsx/factory/mocks/factory_mocks.go b/agent/fsx/factory/mocks/factory_mocks.go new file mode 100644 index 00000000000..952a815dc63 --- /dev/null +++ b/agent/fsx/factory/mocks/factory_mocks.go @@ -0,0 +1,64 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/fsx/factory (interfaces: FSxClientCreator) + +// Package mock_factory is a generated GoMock package. +package mock_factory + +import ( + reflect "reflect" + + credentials "github.com/aws/amazon-ecs-agent/agent/credentials" + fsx "github.com/aws/amazon-ecs-agent/agent/fsx" + gomock "github.com/golang/mock/gomock" +) + +// MockFSxClientCreator is a mock of FSxClientCreator interface +type MockFSxClientCreator struct { + ctrl *gomock.Controller + recorder *MockFSxClientCreatorMockRecorder +} + +// MockFSxClientCreatorMockRecorder is the mock recorder for MockFSxClientCreator +type MockFSxClientCreatorMockRecorder struct { + mock *MockFSxClientCreator +} + +// NewMockFSxClientCreator creates a new mock instance +func NewMockFSxClientCreator(ctrl *gomock.Controller) *MockFSxClientCreator { + mock := &MockFSxClientCreator{ctrl: ctrl} + mock.recorder = &MockFSxClientCreatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockFSxClientCreator) EXPECT() *MockFSxClientCreatorMockRecorder { + return m.recorder +} + +// NewFSxClient mocks base method +func (m *MockFSxClientCreator) NewFSxClient(arg0 string, arg1 credentials.IAMRoleCredentials) fsx.FSxClient { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewFSxClient", arg0, arg1) + ret0, _ := ret[0].(fsx.FSxClient) + return ret0 +} + +// NewFSxClient indicates an expected call of NewFSxClient +func (mr *MockFSxClientCreatorMockRecorder) NewFSxClient(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFSxClient", reflect.TypeOf((*MockFSxClientCreator)(nil).NewFSxClient), arg0, arg1) +} diff --git a/agent/fsx/fsx.go b/agent/fsx/fsx.go new file mode 100644 index 00000000000..8a667886b6e --- /dev/null +++ b/agent/fsx/fsx.go @@ -0,0 +1,55 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsx + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/pkg/errors" +) + +// GetFileSystemDNSNames returns a map of filesystem ids and corresponding dns names +// Example: key := fs-12345678, value := amznfsxujfqr2nj.test.com +func GetFileSystemDNSNames(fileSystemIds []string, client FSxClient) (map[string]string, error) { + out, err := describeFileSystems(fileSystemIds, client) + if err != nil { + return nil, err + } + + fileSystemDNSMap := make(map[string]string) + for _, filesystem := range out.FileSystems { + fileSystemDNSMap[aws.StringValue(filesystem.FileSystemId)] = aws.StringValue(filesystem.DNSName) + } + + return fileSystemDNSMap, nil +} + +// describeFileSystems makes the api call to the AWS FSx service to retrieve filesystems info +func describeFileSystems(fileSystemIds []string, client FSxClient) (*fsx.DescribeFileSystemsOutput, error) { + var IDs []*string + for _, id := range fileSystemIds { + IDs = append(IDs, aws.String(id)) + } + + in := &fsx.DescribeFileSystemsInput{ + FileSystemIds: IDs, + } + + out, err := client.DescribeFileSystems(in) + if err != nil { + return nil, errors.Wrapf(err, "fsx describing filesystem(s) from the service for %v", fileSystemIds) + } + + return out, nil +} diff --git a/agent/fsx/fsx_test.go b/agent/fsx/fsx_test.go new file mode 100644 index 00000000000..669b060b3bf --- /dev/null +++ b/agent/fsx/fsx_test.go @@ -0,0 +1,64 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsx + +import ( + "errors" + "testing" + + mock_fsx "github.com/aws/amazon-ecs-agent/agent/fsx/mocks" + "github.com/golang/mock/gomock" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + fileSystemId = "fs-12345678" + dnsName = "test" +) + +func TestGetFileSystemDNSNames(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockFSxClient := mock_fsx.NewMockFSxClient(ctrl) + mockFSxClient.EXPECT().DescribeFileSystems(gomock.Any()).Return(&fsx.DescribeFileSystemsOutput{ + FileSystems: []*fsx.FileSystem{ + { + FileSystemId: aws.String(fileSystemId), + DNSName: aws.String(dnsName), + }, + }, + }, nil) + + out, err := GetFileSystemDNSNames([]string{fileSystemId}, mockFSxClient) + require.NoError(t, err) + assert.Equal(t, "test", out[fileSystemId]) +} + +func TestGetFileSystemDNSNamesError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockFSxClient := mock_fsx.NewMockFSxClient(ctrl) + mockFSxClient.EXPECT().DescribeFileSystems(gomock.Any()).Return(nil, errors.New("test error")) + + _, err := GetFileSystemDNSNames([]string{fileSystemId}, mockFSxClient) + assert.Error(t, err) +} diff --git a/agent/fsx/generate_mocks.go b/agent/fsx/generate_mocks.go new file mode 100644 index 00000000000..376de7e3f5c --- /dev/null +++ b/agent/fsx/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsx + +//go:generate mockgen -destination=mocks/fsx_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/fsx FSxClient diff --git a/agent/fsx/interface.go b/agent/fsx/interface.go new file mode 100644 index 00000000000..7df57b9e3fc --- /dev/null +++ b/agent/fsx/interface.go @@ -0,0 +1,20 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsx + +import "github.com/aws/aws-sdk-go/service/fsx" + +type FSxClient interface { + DescribeFileSystems(*fsx.DescribeFileSystemsInput) (*fsx.DescribeFileSystemsOutput, error) +} diff --git a/agent/fsx/mocks/fsx_mocks.go b/agent/fsx/mocks/fsx_mocks.go new file mode 100644 index 00000000000..1b1c68512f8 --- /dev/null +++ b/agent/fsx/mocks/fsx_mocks.go @@ -0,0 +1,64 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/fsx (interfaces: FSxClient) + +// Package mock_fsx is a generated GoMock package. +package mock_fsx + +import ( + reflect "reflect" + + fsx "github.com/aws/aws-sdk-go/service/fsx" + gomock "github.com/golang/mock/gomock" +) + +// MockFSxClient is a mock of FSxClient interface +type MockFSxClient struct { + ctrl *gomock.Controller + recorder *MockFSxClientMockRecorder +} + +// MockFSxClientMockRecorder is the mock recorder for MockFSxClient +type MockFSxClientMockRecorder struct { + mock *MockFSxClient +} + +// NewMockFSxClient creates a new mock instance +func NewMockFSxClient(ctrl *gomock.Controller) *MockFSxClient { + mock := &MockFSxClient{ctrl: ctrl} + mock.recorder = &MockFSxClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockFSxClient) EXPECT() *MockFSxClientMockRecorder { + return m.recorder +} + +// DescribeFileSystems mocks base method +func (m *MockFSxClient) DescribeFileSystems(arg0 *fsx.DescribeFileSystemsInput) (*fsx.DescribeFileSystemsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeFileSystems", arg0) + ret0, _ := ret[0].(*fsx.DescribeFileSystemsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeFileSystems indicates an expected call of DescribeFileSystems +func (mr *MockFSxClientMockRecorder) DescribeFileSystems(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeFileSystems", reflect.TypeOf((*MockFSxClient)(nil).DescribeFileSystems), arg0) +} diff --git a/agent/gogenerate/awssdk.go b/agent/gogenerate/awssdk.go new file mode 100644 index 00000000000..6e64d73a6be --- /dev/null +++ b/agent/gogenerate/awssdk.go @@ -0,0 +1,212 @@ +// +build codegen + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Note: This package uses the 'codegen' directive for compatibility with +// the AWS SDK's code generation scripts. +package main + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + + "github.com/aws/aws-sdk-go/private/model/api" + "github.com/aws/aws-sdk-go/private/util" + "golang.org/x/tools/imports" +) + +func main() { + os.Exit(_main()) +} + +func ShapeListWithErrors(a *api.API) []*api.Shape { + list := make([]*api.Shape, 0, len(a.Shapes)) + for _, n := range a.ShapeNames() { + list = append(list, a.Shapes[n]) + } + return list +} + +var typesOnlyTplAPI = template.Must(template.New("api").Funcs(template.FuncMap{ + "ShapeListWithErrors": ShapeListWithErrors, +}).Parse(` +{{ $shapeList := ShapeListWithErrors $ }} +{{ range $_, $s := $shapeList }} +{{ if eq $s.Type "structure"}}{{ $s.GoCode }}{{ end }} + +{{ end }} +`)) + +var ( + typesOnly bool + copyrightHeader string +) + +func init() { + types := flag.Bool("typesOnly", false, "only generate types") + copyrightFile := flag.String("copyright_file", "", "Copyright file used to add copyright header") + flag.Parse() + + if *copyrightFile == "" { + fmt.Println("copyright_file must be set") + os.Exit(1) + } + + copyrightText, err := getCopyrightFile(*copyrightFile) + if err != nil { + fmt.Println("error reading copyright_file: ", err) + os.Exit(1) + } + + copyrightText += "\nCode generated by [agent/gogenerate/awssdk.go] DO NOT EDIT." + + b := &strings.Builder{} + for i, line := range strings.Split(copyrightText, "\n") { + if i != 0 { + b.WriteString("\n") + } + b.WriteString("//") + if line != "" { + b.WriteString(" ") + b.WriteString(line) + } + } + b.WriteString("\n") + + copyrightHeader = b.String() + typesOnly = *types +} + +// return-based exit code so the 'defer' works +func _main() int { + apiFile := "./api/api-2.json" + var err error + if typesOnly { + err = genTypesOnlyAPI(apiFile) + } else { + err = genFull(apiFile) + } + if err != nil { + fmt.Println(err) + return 1 + } + return 0 +} + +func genTypesOnlyAPI(file string) error { + apiGen := &api.API{ + NoRemoveUnusedShapes: true, + NoRenameToplevelShapes: true, + NoGenStructFieldAccessors: true, + } + apiGen.Attach(file) + apiGen.Setup() + // to reset imports so that timestamp has an entry in the map. + apiGen.APIGoCode() + + var buf bytes.Buffer + err := typesOnlyTplAPI.Execute(&buf, apiGen) + if err != nil { + panic(err) + } + code := strings.TrimSpace(buf.String()) + code = util.GoFmt(code) + + // Ignore dir error, filepath will catch it for an invalid path. + os.Mkdir(apiGen.PackageName(), 0755) + // Fix imports. + codeWithImports, err := imports.Process("", []byte(fmt.Sprintf("package %s\n\n%s", apiGen.PackageName(), code)), nil) + if err != nil { + fmt.Println(err) + return err + } + outFile := filepath.Join(apiGen.PackageName(), "api.go") + err = ioutil.WriteFile(outFile, []byte(fmt.Sprintf("%s\n%s", copyrightHeader, codeWithImports)), 0644) + if err != nil { + return err + } + return nil +} + +func genFull(file string) error { + // Heavily inspired by https://github.com/aws/aws-sdk-go/blob/45ba2f817fbf625fe48cf9b51fae13e5dfba6171/internal/model/cli/gen-api/main.go#L36 + // That code is copyright Amazon + api := &api.API{} + api.Attach(file) + paginatorsFile := strings.Replace(file, "api-2.json", "paginators-1.json", -1) + if _, err := os.Stat(paginatorsFile); err == nil { + api.AttachPaginators(paginatorsFile) + } + + docsFile := strings.Replace(file, "api-2.json", "docs-2.json", -1) + if _, err := os.Stat(docsFile); err == nil { + api.AttachDocs(docsFile) + } + api.Setup() + + if err := genFile(api, api.APIGoCode(), "api.go"); err != nil { + return err + } + + if err := genFile(api, api.ServiceGoCode(), "service.go"); err != nil { + return err + } + + if err := genFile(api, api.APIErrorsGoCode(), "errors.go"); err != nil { + return err + } + + return nil +} + +func genFile(api *api.API, code string, fileName string) error { + processedCode, err := imports.Process("", + []byte(fmt.Sprintf("package %s\n\n%s", api.PackageName(), code)), + nil) + if err != nil { + fmt.Println("Error processing file imports: ", err) + return err + } + // Ignore dir error, filepath will catch it for an invalid path. + os.Mkdir(api.PackageName(), 0755) + outFile, err := os.Create(filepath.Join(api.PackageName(), fileName)) + if err != nil { + fmt.Println("Error creating file: ", err) + return err + } + defer outFile.Close() + withHeader := fmt.Sprintf("%s\n%s", copyrightHeader, processedCode) + goimports := exec.Command("goimports") + goimports.Stdin = bytes.NewBufferString(withHeader) + goimports.Stdout = outFile + err = goimports.Run() + if err != nil { + fmt.Println("Error running goimports: ", err) + return err + } + + return nil +} + +func getCopyrightFile(filename string) (string, error) { + contents, err := ioutil.ReadFile(filename) + return string(contents), err +} diff --git a/agent/gogenerate/inflections.csv b/agent/gogenerate/inflections.csv new file mode 100644 index 00000000000..80d5a24bb81 --- /dev/null +++ b/agent/gogenerate/inflections.csv @@ -0,0 +1,12 @@ + +Transport: +Ulimit: +Driver: +Networking: +Filesystem: +Soft: +Ulimits: +Domainname: +Privileged: +Readonly: + diff --git a/agent/gpu/generate_mocks.go b/agent/gpu/generate_mocks.go new file mode 100644 index 00000000000..8430c29c660 --- /dev/null +++ b/agent/gpu/generate_mocks.go @@ -0,0 +1,18 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package gpu + +//go:generate mockgen -destination=mocks/gpu_manager_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/gpu GPUManager diff --git a/agent/gpu/mocks/gpu_manager_mocks.go b/agent/gpu/mocks/gpu_manager_mocks.go new file mode 100644 index 00000000000..5e91d5426c1 --- /dev/null +++ b/agent/gpu/mocks/gpu_manager_mocks.go @@ -0,0 +1,141 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/gpu (interfaces: GPUManager) + +// Package mock_gpu is a generated GoMock package. +package mock_gpu + +import ( + reflect "reflect" + + ecs "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + gomock "github.com/golang/mock/gomock" +) + +// MockGPUManager is a mock of GPUManager interface +type MockGPUManager struct { + ctrl *gomock.Controller + recorder *MockGPUManagerMockRecorder +} + +// MockGPUManagerMockRecorder is the mock recorder for MockGPUManager +type MockGPUManagerMockRecorder struct { + mock *MockGPUManager +} + +// NewMockGPUManager creates a new mock instance +func NewMockGPUManager(ctrl *gomock.Controller) *MockGPUManager { + mock := &MockGPUManager{ctrl: ctrl} + mock.recorder = &MockGPUManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockGPUManager) EXPECT() *MockGPUManagerMockRecorder { + return m.recorder +} + +// GetDevices mocks base method +func (m *MockGPUManager) GetDevices() []*ecs.PlatformDevice { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDevices") + ret0, _ := ret[0].([]*ecs.PlatformDevice) + return ret0 +} + +// GetDevices indicates an expected call of GetDevices +func (mr *MockGPUManagerMockRecorder) GetDevices() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevices", reflect.TypeOf((*MockGPUManager)(nil).GetDevices)) +} + +// GetDriverVersion mocks base method +func (m *MockGPUManager) GetDriverVersion() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDriverVersion") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetDriverVersion indicates an expected call of GetDriverVersion +func (mr *MockGPUManagerMockRecorder) GetDriverVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDriverVersion", reflect.TypeOf((*MockGPUManager)(nil).GetDriverVersion)) +} + +// GetGPUIDsUnsafe mocks base method +func (m *MockGPUManager) GetGPUIDsUnsafe() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGPUIDsUnsafe") + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetGPUIDsUnsafe indicates an expected call of GetGPUIDsUnsafe +func (mr *MockGPUManagerMockRecorder) GetGPUIDsUnsafe() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGPUIDsUnsafe", reflect.TypeOf((*MockGPUManager)(nil).GetGPUIDsUnsafe)) +} + +// Initialize mocks base method +func (m *MockGPUManager) Initialize() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Initialize") + ret0, _ := ret[0].(error) + return ret0 +} + +// Initialize indicates an expected call of Initialize +func (mr *MockGPUManagerMockRecorder) Initialize() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockGPUManager)(nil).Initialize)) +} + +// SetDevices mocks base method +func (m *MockGPUManager) SetDevices() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDevices") +} + +// SetDevices indicates an expected call of SetDevices +func (mr *MockGPUManagerMockRecorder) SetDevices() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDevices", reflect.TypeOf((*MockGPUManager)(nil).SetDevices)) +} + +// SetDriverVersion mocks base method +func (m *MockGPUManager) SetDriverVersion(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDriverVersion", arg0) +} + +// SetDriverVersion indicates an expected call of SetDriverVersion +func (mr *MockGPUManagerMockRecorder) SetDriverVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDriverVersion", reflect.TypeOf((*MockGPUManager)(nil).SetDriverVersion), arg0) +} + +// SetGPUIDs mocks base method +func (m *MockGPUManager) SetGPUIDs(arg0 []string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetGPUIDs", arg0) +} + +// SetGPUIDs indicates an expected call of SetGPUIDs +func (mr *MockGPUManagerMockRecorder) SetGPUIDs(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetGPUIDs", reflect.TypeOf((*MockGPUManager)(nil).SetGPUIDs), arg0) +} diff --git a/agent/gpu/nvidia_gpu_manager_unix.go b/agent/gpu/nvidia_gpu_manager_unix.go new file mode 100644 index 00000000000..19cc0c8fa87 --- /dev/null +++ b/agent/gpu/nvidia_gpu_manager_unix.go @@ -0,0 +1,155 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package gpu + +import ( + "encoding/json" + "io/ioutil" + "os" + "sync" + + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// GPUManager encompasses methods to get information on GPUs and their driver +type GPUManager interface { + Initialize() error + SetGPUIDs([]string) + GetGPUIDsUnsafe() []string + SetDevices() + GetDevices() []*ecs.PlatformDevice + SetDriverVersion(string) + GetDriverVersion() string +} + +// NvidiaGPUManager is used as a wrapper for NVML APIs and implements GPUManager +// interface +type NvidiaGPUManager struct { + DriverVersion string `json:"DriverVersion"` + GPUIDs []string `json:"GPUIDs"` + GPUDevices []*ecs.PlatformDevice `json:"-"` + lock sync.RWMutex +} + +const ( + // GPUInfoDirPath is the directory where gpus and driver info are saved + GPUInfoDirPath = "/var/lib/ecs/gpu" + // NvidiaGPUInfoFilePath is the file path where gpus and driver info are saved + NvidiaGPUInfoFilePath = GPUInfoDirPath + "/nvidia-gpu-info.json" +) + +// NewNvidiaGPUManager is used to obtain NvidiaGPUManager handle +func NewNvidiaGPUManager() GPUManager { + return &NvidiaGPUManager{} +} + +// Initialize sets the fields of Nvidia GPU Manager struct +func (n *NvidiaGPUManager) Initialize() error { + if GPUInfoFileExists() { + // GPU info file found + gpuJSON, err := GetGPUInfoJSON() + if err != nil { + return errors.Wrapf(err, "could not read GPU file content") + } + var nvidiaGPUInfo NvidiaGPUManager + err = json.Unmarshal(gpuJSON, &nvidiaGPUInfo) + if err != nil { + return errors.Wrapf(err, "could not unmarshal GPU file content") + } + n.SetDriverVersion(nvidiaGPUInfo.GetDriverVersion()) + nvidiaGPUInfo.lock.RLock() + gpuIDs := nvidiaGPUInfo.GetGPUIDsUnsafe() + nvidiaGPUInfo.lock.RUnlock() + n.SetGPUIDs(gpuIDs) + n.SetDevices() + } else { + seelog.Error("Config for GPU support is enabled, but GPU information is not found; continuing without it") + } + return nil +} + +var GPUInfoFileExists = CheckForGPUInfoFile + +func CheckForGPUInfoFile() bool { + _, err := os.Stat(NvidiaGPUInfoFilePath) + return !os.IsNotExist(err) +} + +var GetGPUInfoJSON = GetGPUInfo + +func GetGPUInfo() ([]byte, error) { + gpuInfo, err := os.Open(NvidiaGPUInfoFilePath) + if err != nil { + return nil, err + } + defer gpuInfo.Close() + + gpuJSON, err := ioutil.ReadAll(gpuInfo) + if err != nil { + return nil, err + } + return gpuJSON, nil +} + +// SetGPUIDs sets the GPUIDs +func (n *NvidiaGPUManager) SetGPUIDs(gpuIDs []string) { + n.lock.Lock() + defer n.lock.Unlock() + n.GPUIDs = gpuIDs +} + +// GetGPUIDs returns the GPUIDs +func (n *NvidiaGPUManager) GetGPUIDsUnsafe() []string { + return n.GPUIDs +} + +// SetDriverVersion is a setter for nvidia driver version +func (n *NvidiaGPUManager) SetDriverVersion(version string) { + n.lock.Lock() + defer n.lock.Unlock() + n.DriverVersion = version +} + +// GetDriverVersion is a getter for nvidia driver version +func (n *NvidiaGPUManager) GetDriverVersion() string { + n.lock.RLock() + defer n.lock.RUnlock() + return n.DriverVersion +} + +func (n *NvidiaGPUManager) SetDevices() { + n.lock.Lock() + defer n.lock.Unlock() + gpuIDs := n.GetGPUIDsUnsafe() + devices := make([]*ecs.PlatformDevice, 0) + for _, gpuID := range gpuIDs { + devices = append(devices, &ecs.PlatformDevice{ + Id: aws.String(gpuID), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }) + } + n.GPUDevices = devices +} + +// GetDevices returns the GPU devices as PlatformDevices +func (n *NvidiaGPUManager) GetDevices() []*ecs.PlatformDevice { + n.lock.RLock() + defer n.lock.RUnlock() + return n.GPUDevices +} diff --git a/agent/gpu/nvidia_gpu_manager_unix_test.go b/agent/gpu/nvidia_gpu_manager_unix_test.go new file mode 100644 index 00000000000..e22781555c2 --- /dev/null +++ b/agent/gpu/nvidia_gpu_manager_unix_test.go @@ -0,0 +1,85 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package gpu + +import ( + "errors" + "reflect" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +var devices = []*ecs.PlatformDevice{ + { + Id: aws.String("id1"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, + { + Id: aws.String("id2"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, + { + Id: aws.String("id3"), + Type: aws.String(ecs.PlatformDeviceTypeGpu), + }, +} + +func TestNvidiaGPUManagerInitialize(t *testing.T) { + nvidiaGPUManager := NewNvidiaGPUManager() + GPUInfoFileExists = func() bool { + return true + } + GetGPUInfoJSON = func() ([]byte, error) { + return []byte(`{"DriverVersion":"396.44","GPUIDs":["id1","id2","id3"]}`), nil + } + defer func() { + GPUInfoFileExists = CheckForGPUInfoFile + GetGPUInfoJSON = GetGPUInfo + }() + err := nvidiaGPUManager.Initialize() + assert.NoError(t, err) + assert.Equal(t, []string{"id1", "id2", "id3"}, nvidiaGPUManager.GetGPUIDsUnsafe()) + assert.Equal(t, "396.44", nvidiaGPUManager.GetDriverVersion()) + assert.True(t, reflect.DeepEqual(devices, nvidiaGPUManager.GetDevices())) +} + +func TestNvidiaGPUManagerError(t *testing.T) { + nvidiaGPUManager := NewNvidiaGPUManager() + GPUInfoFileExists = func() bool { + return true + } + GetGPUInfoJSON = func() ([]byte, error) { + return nil, errors.New("corrupted content") + } + defer func() { + GPUInfoFileExists = CheckForGPUInfoFile + GetGPUInfoJSON = GetGPUInfo + }() + err := nvidiaGPUManager.Initialize() + assert.Error(t, err) + assert.Nil(t, nvidiaGPUManager.GetGPUIDsUnsafe()) + assert.Empty(t, nvidiaGPUManager.GetDriverVersion()) +} + +func TestSetGPUDevices(t *testing.T) { + nvidiaGPUManager := NewNvidiaGPUManager() + nvidiaGPUManager.SetGPUIDs([]string{"id1", "id2", "id3"}) + nvidiaGPUManager.SetDevices() + assert.True(t, reflect.DeepEqual(devices, nvidiaGPUManager.GetDevices())) +} diff --git a/agent/handlers/generate_mocks.go b/agent/handlers/generate_mocks.go new file mode 100644 index 00000000000..88098210446 --- /dev/null +++ b/agent/handlers/generate_mocks.go @@ -0,0 +1,17 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handlers + +//go:generate mockgen -destination=mocks/http/handlers_mocks.go -copyright_file=../../scripts/copyright_file net/http ResponseWriter +//go:generate mockgen -destination=mocks/handlers_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/handlers/utils DockerStateResolver diff --git a/agent/handlers/introspection_server_setup.go b/agent/handlers/introspection_server_setup.go new file mode 100644 index 00000000000..07f138a2237 --- /dev/null +++ b/agent/handlers/introspection_server_setup.go @@ -0,0 +1,106 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package handlers deals with the agent introspection api. +package handlers + +import ( + "context" + "encoding/json" + "net/http" + "strconv" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/engine" + handlersutils "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v1 "github.com/aws/amazon-ecs-agent/agent/handlers/v1" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/cihub/seelog" +) + +type rootResponse struct { + AvailableCommands []string +} + +func introspectionServerSetup(containerInstanceArn *string, taskEngine handlersutils.DockerStateResolver, cfg *config.Config) *http.Server { + paths := []string{v1.AgentMetadataPath, v1.TaskContainerMetadataPath, v1.LicensePath} + availableCommands := &rootResponse{paths} + // Autogenerated list of the above serverFunctions paths + availableCommandResponse, err := json.Marshal(&availableCommands) + if err != nil { + seelog.Errorf("Error marshaling JSON in introspection server setup: %s", err) + } + + defaultHandler := func(w http.ResponseWriter, r *http.Request) { + w.Write(availableCommandResponse) + } + + serverMux := http.NewServeMux() + serverMux.HandleFunc("/", defaultHandler) + + v1HandlersSetup(serverMux, containerInstanceArn, taskEngine, cfg) + + // Log all requests and then pass through to serverMux + loggingServeMux := http.NewServeMux() + loggingServeMux.Handle("/", LoggingHandler{serverMux}) + + server := &http.Server{ + Addr: ":" + strconv.Itoa(config.AgentIntrospectionPort), + Handler: loggingServeMux, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + } + + return server +} + +// v1HandlersSetup adds all handlers except CredentialsHandler in v1 package to the server mux. +func v1HandlersSetup(serverMux *http.ServeMux, + containerInstanceArn *string, + taskEngine handlersutils.DockerStateResolver, + cfg *config.Config) { + serverMux.HandleFunc(v1.AgentMetadataPath, v1.AgentMetadataHandler(containerInstanceArn, cfg)) + serverMux.HandleFunc(v1.TaskContainerMetadataPath, v1.TaskContainerMetadataHandler(taskEngine)) + serverMux.HandleFunc(v1.LicensePath, v1.LicenseHandler) +} + +// ServeIntrospectionHTTPEndpoint serves information about this agent/containerInstance and tasks +// running on it. "V1" here indicates the hostname version of this server instead +// of the handler versions, i.e. "V1" server can include "V1" and "V2" handlers. +func ServeIntrospectionHTTPEndpoint(ctx context.Context, containerInstanceArn *string, taskEngine engine.TaskEngine, cfg *config.Config) { + // Is this the right level to type assert, assuming we'd abstract multiple taskengines here? + // Revisit if we ever add another type.. + dockerTaskEngine := taskEngine.(*engine.DockerTaskEngine) + + server := introspectionServerSetup(containerInstanceArn, dockerTaskEngine, cfg) + + go func() { + <-ctx.Done() + if err := server.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout: + seelog.Infof("HTTP server Shutdown: %v", err) + } + }() + + for { + retry.RetryWithBackoff(retry.NewExponentialBackoff(time.Second, time.Minute, 0.2, 2), func() error { + if err := server.ListenAndServe(); err != http.ErrServerClosed { + seelog.Errorf("Error running introspection endpoint: %v", err) + return err + } + // server was cleanly closed via context + return nil + }) + } +} diff --git a/agent/handlers/introspection_server_setup_test.go b/agent/handlers/introspection_server_setup_test.go new file mode 100644 index 00000000000..fefdf0d4d3c --- /dev/null +++ b/agent/handlers/introspection_server_setup_test.go @@ -0,0 +1,420 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handlers + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + mock_utils "github.com/aws/amazon-ecs-agent/agent/handlers/mocks" + v1 "github.com/aws/amazon-ecs-agent/agent/handlers/v1" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testContainerInstanceArn = "test_container_instance_arn" + testClusterArn = "test_cluster_arn" + eniIPV4Address = "10.0.0.2" +) + +func TestMetadataHandler(t *testing.T) { + metadataHandler := v1.AgentMetadataHandler(utils.Strptr(testContainerInstanceArn), &config.Config{Cluster: testClusterArn}) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://localhost:"+strconv.Itoa(config.AgentIntrospectionPort), nil) + metadataHandler(w, req) + + var resp v1.MetadataResponse + json.Unmarshal(w.Body.Bytes(), &resp) + + if resp.Cluster != testClusterArn { + t.Error("Metadata returned the wrong cluster arn") + } + if *resp.ContainerInstanceArn != testContainerInstanceArn { + t.Error("Metadata returned the wrong cluster arn") + } +} + +func TestListMultipleTasks(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks") + + var tasksResponse v1.TasksResponse + err := json.Unmarshal(recorder.Body.Bytes(), &tasksResponse) + if err != nil { + t.Fatal(err) + } + + taskDiffHelper(t, testTasks, tasksResponse) +} + +func TestGetTaskByDockerID(t *testing.T) { + // stateSetupHelper uses the convention of dockerid-$arn-$containerName; the + // second task has a container named foo + recorder := performMockRequest(t, "/v1/tasks?dockerid=dockerid-task2-foo") + + var taskResponse v1.TaskResponse + err := json.Unmarshal(recorder.Body.Bytes(), &taskResponse) + if err != nil { + t.Fatal(err) + } + + taskDiffHelper(t, []*apitask.Task{testTasks[1]}, v1.TasksResponse{Tasks: []*v1.TaskResponse{&taskResponse}}) +} + +func TestGetTaskByShortDockerIDMultiple(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?dockerid=dockerid-tas") + + assert.Equal(t, http.StatusBadRequest, recorder.Code, "Expected http 400 for dockerid with multiple matches") +} + +func TestGetTaskShortByDockerID404(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?dockerid=notfound") + + assert.Equal(t, http.StatusNotFound, recorder.Code, "API did not return 404 for bad dockerid") +} + +func TestGetTaskByShortDockerID(t *testing.T) { + // stateSetupHelper uses the convention of dockerid-$arn-$containerName; the + // first task has a container name prefix of dockerid-tas + recorder := performMockRequest(t, "/v1/tasks?dockerid=dockerid-by") + + var taskResponse v1.TaskResponse + err := json.Unmarshal(recorder.Body.Bytes(), &taskResponse) + require.NoError(t, err, "unmarshal failed for get task by short docker id") + + taskDiffHelper(t, []*apitask.Task{testTasks[2]}, v1.TasksResponse{Tasks: []*v1.TaskResponse{&taskResponse}}) +} + +func TestGetTaskByDockerID404(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?dockerid=does-not-exist") + + if recorder.Code != 404 { + t.Error("API did not return 404 for bad dockerid") + } +} + +func TestGetTaskByTaskArn(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?taskarn=task1") + + var taskResponse v1.TaskResponse + err := json.Unmarshal(recorder.Body.Bytes(), &taskResponse) + if err != nil { + t.Fatal(err) + } + + taskDiffHelper(t, []*apitask.Task{testTasks[0]}, v1.TasksResponse{Tasks: []*v1.TaskResponse{&taskResponse}}) +} + +func TestGetAWSVPCTaskByTaskArn(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?taskarn=awsvpcTask") + + var taskResponse v1.TaskResponse + err := json.Unmarshal(recorder.Body.Bytes(), &taskResponse) + if err != nil { + t.Fatal(err) + } + + resp := v1.TasksResponse{Tasks: []*v1.TaskResponse{&taskResponse}} + + assert.Equal(t, eniIPV4Address, resp.Tasks[0].Containers[0].Networks[0].IPv4Addresses[0]) + taskDiffHelper(t, []*apitask.Task{testTasks[3]}, resp) +} + +func TestGetHostNeworkingTaskByTaskArn(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?taskarn=hostModeNetworkingTask") + + var taskResponse v1.TaskResponse + err := json.Unmarshal(recorder.Body.Bytes(), &taskResponse) + if err != nil { + t.Fatal(err) + } + + resp := v1.TasksResponse{Tasks: []*v1.TaskResponse{&taskResponse}} + + assert.Equal(t, uint16(80), resp.Tasks[0].Containers[0].Ports[0].ContainerPort) + assert.Equal(t, "tcp", resp.Tasks[0].Containers[0].Ports[0].Protocol) + + taskDiffHelper(t, []*apitask.Task{testTasks[4]}, resp) +} + +func TestGetBridgeNeworkingTaskByTaskArn(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?taskarn=bridgeModeNetworkingTask") + + var taskResponse v1.TaskResponse + err := json.Unmarshal(recorder.Body.Bytes(), &taskResponse) + if err != nil { + t.Fatal(err) + } + + resp := v1.TasksResponse{Tasks: []*v1.TaskResponse{&taskResponse}} + + assert.Equal(t, uint16(80), resp.Tasks[0].Containers[0].Ports[0].ContainerPort) + assert.Equal(t, "tcp", resp.Tasks[0].Containers[0].Ports[0].Protocol) + + taskDiffHelper(t, []*apitask.Task{testTasks[5]}, resp) +} + +func TestGetTaskByTaskArnNotFound(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?taskarn=doesnotexist") + + if recorder.Code != http.StatusNotFound { + t.Errorf("Expected %d for bad taskarn, but was %d", http.StatusNotFound, recorder.Code) + } +} + +func TestGetTaskByTaskArnAndDockerIDBadRequest(t *testing.T) { + recorder := performMockRequest(t, "/v1/tasks?taskarn=task2&dockerid=foo") + + if recorder.Code != http.StatusBadRequest { + t.Errorf("Expected %d for both arn and dockerid, but was %d", http.StatusBadRequest, recorder.Code) + } +} + +func TestBackendMismatchMapping(t *testing.T) { + // Test that a KnownStatus past a DesiredStatus suppresses the DesiredStatus output + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStateResolver := mock_utils.NewMockDockerStateResolver(ctrl) + + containers := []*apicontainer.Container{ + { + Name: "c1", + }, + } + testTask := &apitask.Task{ + Arn: "task1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskStopped, + Family: "test", + Version: "1", + Containers: containers, + } + + state := dockerstate.NewTaskEngineState() + stateSetupHelper(state, []*apitask.Task{testTask}) + + mockStateResolver.EXPECT().State().Return(state) + requestHandler := v1.TaskContainerMetadataHandler(mockStateResolver) + + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/v1/tasks", nil) + requestHandler(recorder, req) + + var tasksResponse v1.TasksResponse + err := json.Unmarshal(recorder.Body.Bytes(), &tasksResponse) + if err != nil { + t.Fatal(err) + } + if tasksResponse.Tasks[0].DesiredStatus != "" { + t.Error("Expected '', was ", tasksResponse.Tasks[0].DesiredStatus) + } + if tasksResponse.Tasks[0].KnownStatus != "STOPPED" { + t.Error("Expected STOPPED, was ", tasksResponse.Tasks[0].KnownStatus) + } +} + +func taskDiffHelper(t *testing.T, expected []*apitask.Task, actual v1.TasksResponse) { + if len(expected) != len(actual.Tasks) { + t.Errorf("Expected %v tasks, had %v tasks", len(expected), len(actual.Tasks)) + } + + for _, task := range expected { + // Find related actual task + var respTask *v1.TaskResponse + for _, actualTask := range actual.Tasks { + if actualTask.Arn == task.Arn { + respTask = actualTask + } + } + + if respTask == nil { + t.Errorf("Could not find matching task for arn: %v", task.Arn) + continue + } + + if respTask.DesiredStatus != task.GetDesiredStatus().String() { + t.Errorf("DesiredStatus mismatch: %v != %v", respTask.DesiredStatus, task.GetDesiredStatus()) + } + taskKnownStatus := task.GetKnownStatus().String() + if respTask.KnownStatus != taskKnownStatus { + t.Errorf("KnownStatus mismatch: %v != %v", respTask.KnownStatus, taskKnownStatus) + } + + if respTask.Family != task.Family || respTask.Version != task.Version { + t.Errorf("Family mismatch: %v:%v != %v:%v", respTask.Family, respTask.Version, task.Family, task.Version) + } + + if len(respTask.Containers) != len(task.Containers) { + t.Errorf("Expected %v containers in %v, was %v", len(task.Containers), task.Arn, len(respTask.Containers)) + continue + } + for _, respCont := range respTask.Containers { + _, ok := task.ContainerByName(respCont.Name) + if !ok { + t.Errorf("Could not find container %v", respCont.Name) + } + if respCont.DockerID == "" { + t.Error("blank dockerid") + } + } + } +} + +var testTasks = []*apitask.Task{ + { + Arn: "task1", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: "test", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "one", + }, + { + Name: "two", + }, + }, + }, + { + Arn: "task2", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: "test", + Version: "2", + Containers: []*apicontainer.Container{ + { + Name: "foo", + }, + }, + }, + { + Arn: "byShortId", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: "test", + Version: "2", + Containers: []*apicontainer.Container{ + { + Name: "shortId", + }, + }, + }, + { + Arn: "awsvpcTask", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: "test", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "awsvpc", + }, + }, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPV4Address, + }, + }, + }, + }, + }, + { + Arn: "hostModeNetworkingTask", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: "test", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "awsvpc", + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + HostPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + }, + }, + }, + { + Arn: "bridgeModeNetworkingTask", + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: "test", + Version: "1", + Containers: []*apicontainer.Container{ + { + Name: "awsvpc", + KnownPortBindingsUnsafe: []apicontainer.PortBinding{ + { + ContainerPort: 80, + HostPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + }, + }, + }, +} + +func stateSetupHelper(state dockerstate.TaskEngineState, tasks []*apitask.Task) { + for _, task := range tasks { + state.AddTask(task) + for _, container := range task.Containers { + state.AddContainer(&apicontainer.DockerContainer{ + Container: container, + DockerID: "dockerid-" + task.Arn + "-" + container.Name, + DockerName: "dockername-" + task.Arn + "-" + container.Name, + }, task) + } + } +} + +func performMockRequest(t *testing.T, path string) *httptest.ResponseRecorder { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockStateResolver := mock_utils.NewMockDockerStateResolver(ctrl) + + state := dockerstate.NewTaskEngineState() + stateSetupHelper(state, testTasks) + + mockStateResolver.EXPECT().State().Return(state) + requestHandler := introspectionServerSetup(utils.Strptr(testContainerInstanceArn), mockStateResolver, &config.Config{Cluster: testClusterArn}) + + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", path, nil) + requestHandler.Handler.ServeHTTP(recorder, req) + + return recorder +} diff --git a/agent/handlers/logging_handler.go b/agent/handlers/logging_handler.go new file mode 100644 index 00000000000..6db8697b444 --- /dev/null +++ b/agent/handlers/logging_handler.go @@ -0,0 +1,34 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handlers + +import ( + "net/http" + + "github.com/cihub/seelog" +) + +// LoggingHandler is used to log all requests for an endpoint. +type LoggingHandler struct{ h http.Handler } + +// NewLoggingHandler creates a new LoggingHandler object. +func NewLoggingHandler(handler http.Handler) LoggingHandler { + return LoggingHandler{h: handler} +} + +// ServeHTTP logs the method and remote address of the request. +func (lh LoggingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + seelog.Debug("Handling http request", "method", r.Method, "from", r.RemoteAddr) + lh.h.ServeHTTP(w, r) +} diff --git a/agent/handlers/mocks/handlers_mocks.go b/agent/handlers/mocks/handlers_mocks.go new file mode 100644 index 00000000000..94dd2e790dc --- /dev/null +++ b/agent/handlers/mocks/handlers_mocks.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/handlers/utils (interfaces: DockerStateResolver) + +// Package mock_utils is a generated GoMock package. +package mock_utils + +import ( + reflect "reflect" + + dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + gomock "github.com/golang/mock/gomock" +) + +// MockDockerStateResolver is a mock of DockerStateResolver interface +type MockDockerStateResolver struct { + ctrl *gomock.Controller + recorder *MockDockerStateResolverMockRecorder +} + +// MockDockerStateResolverMockRecorder is the mock recorder for MockDockerStateResolver +type MockDockerStateResolverMockRecorder struct { + mock *MockDockerStateResolver +} + +// NewMockDockerStateResolver creates a new mock instance +func NewMockDockerStateResolver(ctrl *gomock.Controller) *MockDockerStateResolver { + mock := &MockDockerStateResolver{ctrl: ctrl} + mock.recorder = &MockDockerStateResolverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockDockerStateResolver) EXPECT() *MockDockerStateResolverMockRecorder { + return m.recorder +} + +// State mocks base method +func (m *MockDockerStateResolver) State() dockerstate.TaskEngineState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "State") + ret0, _ := ret[0].(dockerstate.TaskEngineState) + return ret0 +} + +// State indicates an expected call of State +func (mr *MockDockerStateResolverMockRecorder) State() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "State", reflect.TypeOf((*MockDockerStateResolver)(nil).State)) +} diff --git a/agent/handlers/mocks/http/handlers_mocks.go b/agent/handlers/mocks/http/handlers_mocks.go new file mode 100644 index 00000000000..0cebb20a170 --- /dev/null +++ b/agent/handlers/mocks/http/handlers_mocks.go @@ -0,0 +1,90 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: net/http (interfaces: ResponseWriter) + +// Package mock_http is a generated GoMock package. +package mock_http + +import ( + http "net/http" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockResponseWriter is a mock of ResponseWriter interface +type MockResponseWriter struct { + ctrl *gomock.Controller + recorder *MockResponseWriterMockRecorder +} + +// MockResponseWriterMockRecorder is the mock recorder for MockResponseWriter +type MockResponseWriterMockRecorder struct { + mock *MockResponseWriter +} + +// NewMockResponseWriter creates a new mock instance +func NewMockResponseWriter(ctrl *gomock.Controller) *MockResponseWriter { + mock := &MockResponseWriter{ctrl: ctrl} + mock.recorder = &MockResponseWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockResponseWriter) EXPECT() *MockResponseWriterMockRecorder { + return m.recorder +} + +// Header mocks base method +func (m *MockResponseWriter) Header() http.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(http.Header) + return ret0 +} + +// Header indicates an expected call of Header +func (mr *MockResponseWriterMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockResponseWriter)(nil).Header)) +} + +// Write mocks base method +func (m *MockResponseWriter) Write(arg0 []byte) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", arg0) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Write indicates an expected call of Write +func (mr *MockResponseWriterMockRecorder) Write(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockResponseWriter)(nil).Write), arg0) +} + +// WriteHeader mocks base method +func (m *MockResponseWriter) WriteHeader(arg0 int) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "WriteHeader", arg0) +} + +// WriteHeader indicates an expected call of WriteHeader +func (mr *MockResponseWriterMockRecorder) WriteHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteHeader", reflect.TypeOf((*MockResponseWriter)(nil).WriteHeader), arg0) +} diff --git a/agent/handlers/task_server_setup.go b/agent/handlers/task_server_setup.go new file mode 100644 index 00000000000..f4bba9953c0 --- /dev/null +++ b/agent/handlers/task_server_setup.go @@ -0,0 +1,197 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handlers + +import ( + "context" + "net/http" + "strconv" + "time" + + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + handlersutils "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v1 "github.com/aws/amazon-ecs-agent/agent/handlers/v1" + v2 "github.com/aws/amazon-ecs-agent/agent/handlers/v2" + v3 "github.com/aws/amazon-ecs-agent/agent/handlers/v3" + v4 "github.com/aws/amazon-ecs-agent/agent/handlers/v4" + "github.com/aws/amazon-ecs-agent/agent/logger/audit" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/cihub/seelog" + "github.com/didip/tollbooth" + "github.com/gorilla/mux" +) + +const ( + // readTimeout specifies the maximum duration before timing out read of the request. + // The value is set to 5 seconds as per AWS SDK defaults. + readTimeout = 5 * time.Second + + // writeTimeout specifies the maximum duration before timing out write of the response. + // The value is set to 5 seconds as per AWS SDK defaults. + writeTimeout = 5 * time.Second +) + +func taskServerSetup(credentialsManager credentials.Manager, + auditLogger audit.AuditLogger, + state dockerstate.TaskEngineState, + ecsClient api.ECSClient, + cluster string, + statsEngine stats.Engine, + steadyStateRate int, + burstRate int, + availabilityZone string, + containerInstanceArn string) *http.Server { + muxRouter := mux.NewRouter() + + // Set this to false so that for request like "//v3//metadata/task" + // to permanently redirect(301) to "/v3/metadata/task" handler + muxRouter.SkipClean(false) + + muxRouter.HandleFunc(v1.CredentialsPath, + v1.CredentialsHandler(credentialsManager, auditLogger)) + + v2HandlersSetup(muxRouter, state, ecsClient, statsEngine, cluster, credentialsManager, auditLogger, availabilityZone, containerInstanceArn) + + v3HandlersSetup(muxRouter, state, ecsClient, statsEngine, cluster, availabilityZone, containerInstanceArn) + + v4HandlersSetup(muxRouter, state, ecsClient, statsEngine, cluster, availabilityZone, containerInstanceArn) + + limiter := tollbooth.NewLimiter(int64(steadyStateRate), nil) + limiter.SetOnLimitReached(handlersutils.LimitReachedHandler(auditLogger)) + limiter.SetBurst(burstRate) + + // Log all requests and then pass through to muxRouter. + loggingMuxRouter := mux.NewRouter() + + // rootPath is a path for any traffic to this endpoint, "root" mux name will not be used. + rootPath := "/" + handlersutils.ConstructMuxVar("root", handlersutils.AnythingRegEx) + loggingMuxRouter.Handle(rootPath, tollbooth.LimitHandler( + limiter, NewLoggingHandler(muxRouter))) + + loggingMuxRouter.SkipClean(false) + + server := http.Server{ + Addr: "127.0.0.1:" + strconv.Itoa(config.AgentCredentialsPort), + Handler: loggingMuxRouter, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + } + + return &server +} + +// v2HandlersSetup adds all handlers in v2 package to the mux router. +func v2HandlersSetup(muxRouter *mux.Router, + state dockerstate.TaskEngineState, + ecsClient api.ECSClient, + statsEngine stats.Engine, + cluster string, + credentialsManager credentials.Manager, + auditLogger audit.AuditLogger, + availabilityZone string, + containerInstanceArn string) { + muxRouter.HandleFunc(v2.CredentialsPath, v2.CredentialsHandler(credentialsManager, auditLogger)) + muxRouter.HandleFunc(v2.ContainerMetadataPath, v2.TaskContainerMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, false)) + muxRouter.HandleFunc(v2.TaskMetadataPath, v2.TaskContainerMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, false)) + muxRouter.HandleFunc(v2.TaskWithTagsMetadataPath, v2.TaskContainerMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, true)) + muxRouter.HandleFunc(v2.TaskMetadataPathWithSlash, v2.TaskContainerMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, false)) + muxRouter.HandleFunc(v2.TaskWithTagsMetadataPathWithSlash, v2.TaskContainerMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, true)) + muxRouter.HandleFunc(v2.ContainerStatsPath, v2.TaskContainerStatsHandler(state, statsEngine)) + muxRouter.HandleFunc(v2.TaskStatsPath, v2.TaskContainerStatsHandler(state, statsEngine)) + muxRouter.HandleFunc(v2.TaskStatsPathWithSlash, v2.TaskContainerStatsHandler(state, statsEngine)) +} + +// v3HandlersSetup adds all handlers in v3 package to the mux router. +func v3HandlersSetup(muxRouter *mux.Router, + state dockerstate.TaskEngineState, + ecsClient api.ECSClient, + statsEngine stats.Engine, + cluster string, + availabilityZone string, + containerInstanceArn string) { + muxRouter.HandleFunc(v3.ContainerMetadataPath, v3.ContainerMetadataHandler(state)) + muxRouter.HandleFunc(v3.TaskMetadataPath, v3.TaskMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, false)) + muxRouter.HandleFunc(v3.TaskWithTagsMetadataPath, v3.TaskMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, true)) + muxRouter.HandleFunc(v3.ContainerStatsPath, v3.ContainerStatsHandler(state, statsEngine)) + muxRouter.HandleFunc(v3.TaskStatsPath, v3.TaskStatsHandler(state, statsEngine)) + muxRouter.HandleFunc(v3.ContainerAssociationsPath, v3.ContainerAssociationsHandler(state)) + muxRouter.HandleFunc(v3.ContainerAssociationPathWithSlash, v3.ContainerAssociationHandler(state)) + muxRouter.HandleFunc(v3.ContainerAssociationPath, v3.ContainerAssociationHandler(state)) +} + +// v4HandlerSetup adda all handlers in v4 package to the mux router +func v4HandlersSetup(muxRouter *mux.Router, + state dockerstate.TaskEngineState, + ecsClient api.ECSClient, + statsEngine stats.Engine, + cluster string, + availabilityZone string, + containerInstanceArn string) { + muxRouter.HandleFunc(v4.ContainerMetadataPath, v4.ContainerMetadataHandler(state)) + muxRouter.HandleFunc(v4.TaskMetadataPath, v4.TaskMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, false)) + muxRouter.HandleFunc(v4.TaskWithTagsMetadataPath, v4.TaskMetadataHandler(state, ecsClient, cluster, availabilityZone, containerInstanceArn, true)) + muxRouter.HandleFunc(v4.ContainerStatsPath, v4.ContainerStatsHandler(state, statsEngine)) + muxRouter.HandleFunc(v4.TaskStatsPath, v4.TaskStatsHandler(state, statsEngine)) + muxRouter.HandleFunc(v4.ContainerAssociationsPath, v4.ContainerAssociationsHandler(state)) + muxRouter.HandleFunc(v4.ContainerAssociationPathWithSlash, v4.ContainerAssociationHandler(state)) + muxRouter.HandleFunc(v4.ContainerAssociationPath, v4.ContainerAssociationHandler(state)) +} + +// ServeTaskHTTPEndpoint serves task/container metadata, task/container stats, and IAM Role Credentials +// for tasks being managed by the agent. +func ServeTaskHTTPEndpoint( + ctx context.Context, + credentialsManager credentials.Manager, + state dockerstate.TaskEngineState, + ecsClient api.ECSClient, + containerInstanceArn string, + cfg *config.Config, + statsEngine stats.Engine, + availabilityZone string) { + // Create and initialize the audit log + logger, err := seelog.LoggerFromConfigAsString(audit.AuditLoggerConfig(cfg)) + if err != nil { + seelog.Errorf("Error initializing the audit log: %v", err) + // If the logger cannot be initialized, use the provided dummy seelog.LoggerInterface, seelog.Disabled. + logger = seelog.Disabled + } + + auditLogger := audit.NewAuditLog(containerInstanceArn, cfg, logger) + + server := taskServerSetup(credentialsManager, auditLogger, state, ecsClient, cfg.Cluster, statsEngine, + cfg.TaskMetadataSteadyStateRate, cfg.TaskMetadataBurstRate, availabilityZone, containerInstanceArn) + + go func() { + <-ctx.Done() + if err := server.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout: + seelog.Infof("HTTP server Shutdown: %v", err) + } + }() + + for { + retry.RetryWithBackoff(retry.NewExponentialBackoff(time.Second, time.Minute, 0.2, 2), func() error { + if err := server.ListenAndServe(); err != http.ErrServerClosed { + seelog.Errorf("Error running task api: %v", err) + return err + } + // server was cleanly closed via context + return nil + }) + } +} diff --git a/agent/handlers/task_server_setup_test.go b/agent/handlers/task_server_setup_test.go new file mode 100644 index 00000000000..0821a05b551 --- /dev/null +++ b/agent/handlers/task_server_setup_test.go @@ -0,0 +1,1833 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package handlers + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v1 "github.com/aws/amazon-ecs-agent/agent/handlers/v1" + v2 "github.com/aws/amazon-ecs-agent/agent/handlers/v2" + v3 "github.com/aws/amazon-ecs-agent/agent/handlers/v3" + v4 "github.com/aws/amazon-ecs-agent/agent/handlers/v4" + mock_audit "github.com/aws/amazon-ecs-agent/agent/logger/audit/mocks" + "github.com/aws/amazon-ecs-agent/agent/stats" + mock_stats "github.com/aws/amazon-ecs-agent/agent/stats/mock" + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + clusterName = "default" + remoteIP = "169.254.170.3" + remotePort = "32146" + taskARN = "t1" + family = "sleep" + version = "1" + containerID = "cid" + containerName = "sleepy" + pulledContainerName = "pulled" + imageName = "busybox" + imageID = "bUsYbOx" + cpu = 1024 + memory = 512 + statusRunning = "RUNNING" + statusPulled = "PULLED" + statusNone = "NONE" + containerType = "NORMAL" + containerPort = 80 + containerPortProtocol = "tcp" + eniIPv4Address = "10.0.0.2" + roleArn = "r1" + accessKeyID = "ak" + secretAccessKey = "sk" + credentialsID = "credentialsId" + v2BaseStatsPath = "/v2/stats" + v2BaseMetadataPath = "/v2/metadata" + v2BaseMetadataWithTagsPath = "/v2/metadataWithTags" + v3BasePath = "/v3/" + v4BasePath = "/v4/" + v3EndpointID = "v3eid" + availabilityzone = "us-west-2b" + containerInstanceArn = "containerInstanceArn-test" + associationType = "elastic-inference" + associationName = "dev1" + associationEncoding = "base64" + associationValue = "val" + bridgeMode = "bridge" + bridgeIPAddr = "17.0.0.3" + attachmentIndex = 0 + iPv4SubnetCIDRBlock = "172.31.32.0/20" + macAddress = "06:96:9a:ce:a6:ce" + privateDNSName = "ip-172-31-47-69.us-west-2.compute.internal" + subnetGatewayIpv4Address = "172.31.32.1/20" +) + +var ( + now = time.Now() + association = apitask.Association{ + Containers: []string{containerName}, + Content: apitask.EncodedString{ + Encoding: associationEncoding, + Value: associationValue, + }, + Name: associationName, + Type: associationType, + } + pulledAssociation = apitask.Association{ + Containers: []string{containerName, pulledContainerName}, + Content: apitask.EncodedString{ + Encoding: associationEncoding, + Value: associationValue, + }, + Name: associationName, + Type: associationType, + } + task = &apitask.Task{ + Arn: taskARN, + Associations: []apitask.Association{association}, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + MacAddress: macAddress, + PrivateDNSName: privateDNSName, + SubnetGatewayIPV4Address: subnetGatewayIpv4Address, + }, + }, + CPU: cpu, + Memory: memory, + PullStartedAtUnsafe: now, + PullStoppedAtUnsafe: now, + ExecutionStoppedAtUnsafe: now, + LaunchType: "EC2", + } + pulledTask = &apitask.Task{ + Arn: taskARN, + Associations: []apitask.Association{pulledAssociation}, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskStatusNone, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + MacAddress: macAddress, + PrivateDNSName: privateDNSName, + SubnetGatewayIPV4Address: subnetGatewayIpv4Address, + }, + }, + CPU: cpu, + Memory: memory, + PullStartedAtUnsafe: now, + PullStoppedAtUnsafe: now, + ExecutionStoppedAtUnsafe: now, + LaunchType: "EC2", + } + container = &apicontainer.Container{ + Name: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + ContainerArn: "arn:aws:ecs:ap-northnorth-1:NNN:container/NNNNNNNN-aaaa-4444-bbbb-00000000000", + KnownPortBindingsUnsafe: []apicontainer.PortBinding{ + { + ContainerPort: containerPort, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + } + pulledContainer = &apicontainer.Container{ + Name: pulledContainerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerPulled, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + ContainerArn: "arn:aws:ecs:ap-northnorth-1:NNN:container/NNNNNNNN-aaaa-4444-bbbb-00000000000", + } + dockerContainer = &apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: containerName, + Container: container, + } + pulledDockerContainer = &apicontainer.DockerContainer{ + Container: pulledContainer, + } + containerNameToDockerContainer = map[string]*apicontainer.DockerContainer{ + taskARN: dockerContainer, + } + pulledContainerNameToDockerContainer = map[string]*apicontainer.DockerContainer{ + taskARN: pulledDockerContainer, + } + labels = map[string]string{ + "foo": "bar", + } + expectedContainerResponse = v2.ContainerResponse{ + ID: containerID, + Name: containerName, + DockerName: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatus: statusRunning, + KnownStatus: statusRunning, + Limits: v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + Type: containerType, + Labels: labels, + Ports: []v1.PortResponse{ + { + ContainerPort: containerPort, + Protocol: containerPortProtocol, + HostPort: containerPort, + }, + }, + Networks: []containermetadata.Network{ + { + NetworkMode: utils.NetworkModeAWSVPC, + IPv4Addresses: []string{eniIPv4Address}, + }, + }, + } + expectedPulledContainerResponse = v2.ContainerResponse{ + Name: pulledContainerName, + Image: imageName, + ImageID: imageID, + DesiredStatus: statusRunning, + KnownStatus: statusPulled, + Limits: v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + Type: containerType, + } + expectedTaskResponse = v2.TaskResponse{ + Cluster: clusterName, + TaskARN: taskARN, + Family: family, + Revision: version, + DesiredStatus: statusRunning, + KnownStatus: statusRunning, + Containers: []v2.ContainerResponse{expectedContainerResponse}, + Limits: &v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + PullStartedAt: aws.Time(now.UTC()), + PullStoppedAt: aws.Time(now.UTC()), + ExecutionStoppedAt: aws.Time(now.UTC()), + AvailabilityZone: availabilityzone, + } + expectedAssociationsResponse = v3.AssociationsResponse{ + Associations: []string{associationName}, + } + expectedAssociationResponse = associationValue + + bridgeTask = &apitask.Task{ + Arn: taskARN, + Associations: []apitask.Association{association}, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + CPU: cpu, + Memory: memory, + PullStartedAtUnsafe: now, + PullStoppedAtUnsafe: now, + ExecutionStoppedAtUnsafe: now, + LaunchType: "EC2", + } + container1 = &apicontainer.Container{ + Name: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + KnownPortBindingsUnsafe: []apicontainer.PortBinding{ + { + ContainerPort: containerPort, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + NetworkModeUnsafe: bridgeMode, + NetworkSettingsUnsafe: &types.NetworkSettings{ + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: bridgeIPAddr, + }, + }, + } + bridgeContainer = &apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: containerName, + Container: container1, + } + containerNameToBridgeContainer = map[string]*apicontainer.DockerContainer{ + taskARN: bridgeContainer, + } + expectedBridgeContainerResponse = v2.ContainerResponse{ + ID: containerID, + Name: containerName, + DockerName: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatus: statusRunning, + KnownStatus: statusRunning, + Limits: v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + Type: containerType, + Labels: labels, + Ports: []v1.PortResponse{ + { + ContainerPort: containerPort, + Protocol: containerPortProtocol, + }, + }, + Networks: []containermetadata.Network{ + { + NetworkMode: bridgeMode, + IPv4Addresses: []string{bridgeIPAddr}, + }, + }, + } + expectedBridgeTaskResponse = v2.TaskResponse{ + Cluster: clusterName, + TaskARN: taskARN, + Family: family, + Revision: version, + DesiredStatus: statusRunning, + KnownStatus: statusRunning, + Containers: []v2.ContainerResponse{expectedBridgeContainerResponse}, + Limits: &v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + PullStartedAt: aws.Time(now.UTC()), + PullStoppedAt: aws.Time(now.UTC()), + ExecutionStoppedAt: aws.Time(now.UTC()), + AvailabilityZone: availabilityzone, + } + attachmentIndexVar = attachmentIndex + expectedV4ContainerResponse = v4.ContainerResponse{ + ContainerResponse: &v2.ContainerResponse{ + ID: containerID, + Name: containerName, + DockerName: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatus: statusRunning, + KnownStatus: statusRunning, + ContainerARN: "arn:aws:ecs:ap-northnorth-1:NNN:container/NNNNNNNN-aaaa-4444-bbbb-00000000000", + Limits: v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + Type: containerType, + Labels: labels, + Ports: []v1.PortResponse{ + { + ContainerPort: containerPort, + Protocol: containerPortProtocol, + HostPort: containerPort, + }, + }, + Networks: []containermetadata.Network{ + { + NetworkMode: utils.NetworkModeAWSVPC, + IPv4Addresses: []string{eniIPv4Address}, + }, + }, + }, + Networks: []v4.Network{{ + Network: containermetadata.Network{ + NetworkMode: utils.NetworkModeAWSVPC, + IPv4Addresses: []string{eniIPv4Address}, + }, + NetworkInterfaceProperties: v4.NetworkInterfaceProperties{ + AttachmentIndex: &attachmentIndexVar, + IPV4SubnetCIDRBlock: iPv4SubnetCIDRBlock, + MACAddress: macAddress, + PrivateDNSName: privateDNSName, + SubnetGatewayIPV4Address: subnetGatewayIpv4Address, + }}, + }, + } + expectedV4PulledContainerResponse = v4.ContainerResponse{ + ContainerResponse: &v2.ContainerResponse{ + Name: pulledContainerName, + Image: imageName, + ImageID: imageID, + DesiredStatus: statusRunning, + KnownStatus: statusPulled, + ContainerARN: "arn:aws:ecs:ap-northnorth-1:NNN:container/NNNNNNNN-aaaa-4444-bbbb-00000000000", + Limits: v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + Type: containerType, + }, + } + expectedV4TaskResponse = v4.TaskResponse{ + TaskResponse: &v2.TaskResponse{ + Cluster: clusterName, + TaskARN: taskARN, + Family: family, + Revision: version, + DesiredStatus: statusRunning, + KnownStatus: statusRunning, + Containers: []v2.ContainerResponse{expectedContainerResponse}, + Limits: &v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + PullStartedAt: aws.Time(now.UTC()), + PullStoppedAt: aws.Time(now.UTC()), + ExecutionStoppedAt: aws.Time(now.UTC()), + AvailabilityZone: availabilityzone, + LaunchType: "EC2", + }, + Containers: []v4.ContainerResponse{expectedV4ContainerResponse}, + } + expectedV4PulledTaskResponse = v4.TaskResponse{ + TaskResponse: &v2.TaskResponse{ + Cluster: clusterName, + TaskARN: taskARN, + Family: family, + Revision: version, + DesiredStatus: statusRunning, + KnownStatus: statusNone, + Containers: []v2.ContainerResponse{expectedContainerResponse, expectedPulledContainerResponse}, + Limits: &v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + PullStartedAt: aws.Time(now.UTC()), + PullStoppedAt: aws.Time(now.UTC()), + ExecutionStoppedAt: aws.Time(now.UTC()), + AvailabilityZone: availabilityzone, + LaunchType: "EC2", + }, + Containers: []v4.ContainerResponse{expectedV4ContainerResponse, expectedV4PulledContainerResponse}, + } + expectedV4BridgeContainerResponse = v4.ContainerResponse{ + ContainerResponse: &expectedBridgeContainerResponse, + Networks: []v4.Network{{ + Network: containermetadata.Network{ + NetworkMode: bridgeMode, + IPv4Addresses: []string{bridgeIPAddr}, + }, + NetworkInterfaceProperties: v4.NetworkInterfaceProperties{ + AttachmentIndex: nil, + IPV4SubnetCIDRBlock: "", + MACAddress: "", + PrivateDNSName: "", + SubnetGatewayIPV4Address: "", + }}, + }, + } + expectedV4BridgeTaskResponse = v4.TaskResponse{ + TaskResponse: &v2.TaskResponse{ + Cluster: clusterName, + TaskARN: taskARN, + Family: family, + Revision: version, + DesiredStatus: statusRunning, + KnownStatus: statusRunning, + Containers: []v2.ContainerResponse{expectedBridgeContainerResponse}, + Limits: &v2.LimitsResponse{ + CPU: aws.Float64(cpu), + Memory: aws.Int64(memory), + }, + PullStartedAt: aws.Time(now.UTC()), + PullStoppedAt: aws.Time(now.UTC()), + ExecutionStoppedAt: aws.Time(now.UTC()), + AvailabilityZone: availabilityzone, + LaunchType: "EC2", + }, + Containers: []v4.ContainerResponse{expectedV4BridgeContainerResponse}, + } +) + +func init() { + container.SetLabels(labels) + container1.SetLabels(labels) +} + +// TestInvalidPath tests if HTTP status code 404 is returned when invalid path is queried. +func TestInvalidPath(t *testing.T) { + testErrorResponsesFromServer(t, "/", nil) +} + +// TestCredentialsV1RequestWithNoArguments tests if HTTP status code 400 is returned when +// query parameters are not specified for the credentials endpoint. +func TestCredentialsV1RequestWithNoArguments(t *testing.T) { + msg := &utils.ErrorMessage{ + Code: v1.ErrNoIDInRequest, + Message: "CredentialsV1Request: No ID in the request", + HTTPErrorCode: http.StatusBadRequest, + } + testErrorResponsesFromServer(t, credentials.V1CredentialsPath, msg) +} + +// TestCredentialsV2RequestWithNoArguments tests if HTTP status code 400 is returned when +// query parameters are not specified for the credentials endpoint. +func TestCredentialsV2RequestWithNoArguments(t *testing.T) { + msg := &utils.ErrorMessage{ + Code: v1.ErrNoIDInRequest, + Message: "CredentialsV2Request: No ID in the request", + HTTPErrorCode: http.StatusBadRequest, + } + testErrorResponsesFromServer(t, credentials.V2CredentialsPath+"/", msg) +} + +// TestCredentialsV1RequestWhenCredentialsIdNotFound tests if HTTP status code 400 is returned when +// the credentials manager does not contain the credentials id specified in the query. +func TestCredentialsV1RequestWhenCredentialsIdNotFound(t *testing.T) { + expectedErrorMessage := &utils.ErrorMessage{ + Code: v1.ErrInvalidIDInRequest, + Message: fmt.Sprintf("CredentialsV1Request: Credentials not found"), + HTTPErrorCode: http.StatusBadRequest, + } + path := credentials.V1CredentialsPath + "?id=" + credentialsID + _, err := getResponseForCredentialsRequest(t, expectedErrorMessage.HTTPErrorCode, + expectedErrorMessage, path, func() (credentials.TaskIAMRoleCredentials, bool) { return credentials.TaskIAMRoleCredentials{}, false }) + assert.NoError(t, err, "Error getting response body") +} + +// TestCredentialsV2RequestWhenCredentialsIdNotFound tests if HTTP status code 400 is returned when +// the credentials manager does not contain the credentials id specified in the query. +func TestCredentialsV2RequestWhenCredentialsIdNotFound(t *testing.T) { + expectedErrorMessage := &utils.ErrorMessage{ + Code: v1.ErrInvalidIDInRequest, + Message: fmt.Sprintf("CredentialsV2Request: Credentials not found"), + HTTPErrorCode: http.StatusBadRequest, + } + path := credentials.V2CredentialsPath + "/" + credentialsID + _, err := getResponseForCredentialsRequest(t, expectedErrorMessage.HTTPErrorCode, + expectedErrorMessage, path, func() (credentials.TaskIAMRoleCredentials, bool) { return credentials.TaskIAMRoleCredentials{}, false }) + assert.NoError(t, err, "Error getting response body") +} + +// TestCredentialsV1RequestWhenCredentialsUninitialized tests if HTTP status code 500 is returned when +// the credentials manager returns empty credentials. +func TestCredentialsV1RequestWhenCredentialsUninitialized(t *testing.T) { + expectedErrorMessage := &utils.ErrorMessage{ + Code: v1.ErrCredentialsUninitialized, + Message: fmt.Sprintf("CredentialsV1Request: Credentials uninitialized for ID"), + HTTPErrorCode: http.StatusServiceUnavailable, + } + path := credentials.V1CredentialsPath + "?id=" + credentialsID + _, err := getResponseForCredentialsRequest(t, expectedErrorMessage.HTTPErrorCode, + expectedErrorMessage, path, func() (credentials.TaskIAMRoleCredentials, bool) { return credentials.TaskIAMRoleCredentials{}, true }) + assert.NoError(t, err, "Error getting response body") +} + +// TestCredentialsV2RequestWhenCredentialsUninitialized tests if HTTP status code 500 is returned when +// the credentials manager returns empty credentials. +func TestCredentialsV2RequestWhenCredentialsUninitialized(t *testing.T) { + expectedErrorMessage := &utils.ErrorMessage{ + Code: v1.ErrCredentialsUninitialized, + Message: fmt.Sprintf("CredentialsV2Request: Credentials uninitialized for ID"), + HTTPErrorCode: http.StatusServiceUnavailable, + } + path := credentials.V2CredentialsPath + "/" + credentialsID + _, err := getResponseForCredentialsRequest(t, expectedErrorMessage.HTTPErrorCode, + expectedErrorMessage, path, func() (credentials.TaskIAMRoleCredentials, bool) { return credentials.TaskIAMRoleCredentials{}, true }) + assert.NoError(t, err, "Error getting response body") +} + +// TestCredentialsV1RequestWhenCredentialsFound tests if HTTP status code 200 is returned when +// the credentials manager contains the credentials id specified in the query. +func TestCredentialsV1RequestWhenCredentialsFound(t *testing.T) { + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + RoleArn: roleArn, + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + }, + } + path := credentials.V1CredentialsPath + "?id=" + credentialsID + body, err := getResponseForCredentialsRequest(t, http.StatusOK, nil, path, func() (credentials.TaskIAMRoleCredentials, bool) { return creds, true }) + assert.NoError(t, err) + + credentials, err := parseResponseBody(body) + assert.NoError(t, err, "Error retrieving credentials") + + assert.Equal(t, roleArn, credentials.RoleArn, "Incorrect credentials received: role ARN") + assert.Equal(t, accessKeyID, credentials.AccessKeyID, "Incorrect credentials received: access key ID") + assert.Equal(t, secretAccessKey, credentials.SecretAccessKey, "Incorrect credentials received: secret access key") +} + +// TestCredentialsV2RequestWhenCredentialsFound tests if HTTP status code 200 is returned when +// the credentials manager contains the credentials id specified in the query. +func TestCredentialsV2RequestWhenCredentialsFound(t *testing.T) { + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + RoleArn: roleArn, + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + }, + } + path := credentials.V2CredentialsPath + "/" + credentialsID + body, err := getResponseForCredentialsRequest(t, http.StatusOK, nil, path, func() (credentials.TaskIAMRoleCredentials, bool) { return creds, true }) + if err != nil { + t.Fatalf("Error retrieving credentials response: %v", err) + } + + credentials, err := parseResponseBody(body) + assert.NoError(t, err, "Error retrieving credentials") + + assert.Equal(t, roleArn, credentials.RoleArn, "Incorrect credentials received: role ARN") + assert.Equal(t, accessKeyID, credentials.AccessKeyID, "Incorrect credentials received: access key ID") + assert.Equal(t, secretAccessKey, credentials.SecretAccessKey, "Incorrect credentials received: secret access key") +} + +func testErrorResponsesFromServer(t *testing.T, path string, expectedErrorMessage *utils.ErrorMessage) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + server := taskServerSetup(credentialsManager, auditLog, nil, ecsClient, "", nil, config.DefaultTaskMetadataSteadyStateRate, + config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", path, nil) + if path == credentials.V1CredentialsPath || path == credentials.V2CredentialsPath+"/" { + auditLog.EXPECT().Log(gomock.Any(), gomock.Any(), gomock.Any()) + } + + server.Handler.ServeHTTP(recorder, req) + HTTPErrorCode := http.StatusNotFound + if expectedErrorMessage != nil { + HTTPErrorCode = expectedErrorMessage.HTTPErrorCode + } + assert.Equal(t, HTTPErrorCode, recorder.Code, "Incorrect return code") + + // Only paths that are equal to /v1/credentials will return valid error responses. + if path == credentials.CredentialsPath { + errorMessage := &utils.ErrorMessage{} + json.Unmarshal(recorder.Body.Bytes(), errorMessage) + assert.Equal(t, expectedErrorMessage.Code, errorMessage.Code, "Incorrect error code") + assert.Equal(t, expectedErrorMessage.Message, errorMessage.Message, "Incorrect error message") + } +} + +// getResponseForCredentialsRequestWithParameters queries credentials for the +// given id. The getCredentials function is used to simulate getting the +// credentials object from the CredentialsManager +func getResponseForCredentialsRequest(t *testing.T, expectedStatus int, + expectedErrorMessage *utils.ErrorMessage, path string, getCredentials func() (credentials.TaskIAMRoleCredentials, bool)) (*bytes.Buffer, error) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + credentialsManager := mock_credentials.NewMockManager(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + server := taskServerSetup(credentialsManager, auditLog, nil, ecsClient, "", nil, config.DefaultTaskMetadataSteadyStateRate, + config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + + creds, ok := getCredentials() + credentialsManager.EXPECT().GetTaskCredentials(gomock.Any()).Return(creds, ok) + auditLog.EXPECT().Log(gomock.Any(), gomock.Any(), gomock.Any()) + + params := make(url.Values) + params[credentials.CredentialsIDQueryParameterName] = []string{credentialsID} + + req, _ := http.NewRequest("GET", path, nil) + server.Handler.ServeHTTP(recorder, req) + + assert.Equal(t, expectedStatus, recorder.Code, "Incorrect return code") + + if recorder.Code != http.StatusOK { + errorMessage := &utils.ErrorMessage{} + json.Unmarshal(recorder.Body.Bytes(), errorMessage) + + assert.Equal(t, expectedErrorMessage.Code, errorMessage.Code, "Incorrect error code") + assert.Equal(t, expectedErrorMessage.Message, errorMessage.Message, "Incorrect error message") + } + + return recorder.Body, nil +} + +// parseResponseBody parses the HTTP response body into an IAMRoleCredentials object. +func parseResponseBody(body *bytes.Buffer) (*credentials.IAMRoleCredentials, error) { + response, err := ioutil.ReadAll(body) + if err != nil { + return nil, fmt.Errorf("Error reading response body: %v", err) + } + var creds credentials.IAMRoleCredentials + err = json.Unmarshal(response, &creds) + if err != nil { + return nil, fmt.Errorf("Error unmarshaling: %v", err) + } + return &creds, nil +} + +func TestV2TaskMetadata(t *testing.T) { + testCases := []struct { + path string + }{ + { + v2BaseMetadataPath, + }, + { + v2BaseMetadataPath + "/", + }, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Testing path: %s", tc.path), func(t *testing.T) { + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().GetTaskByIPAddress(remoteIP).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", tc.path, nil) + req.RemoteAddr = remoteIP + ":" + remotePort + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v2.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + assert.Equal(t, expectedTaskResponse, taskResponse) + }) + } +} + +func TestV2TaskWithTagsMetadata(t *testing.T) { + testCases := []struct { + path string + }{ + { + v2BaseMetadataWithTagsPath, + }, + { + v2BaseMetadataWithTagsPath + "/", + }, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Testing path: %s", tc.path), func(t *testing.T) { + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + expectedTaskResponseWithTags := expectedTaskResponse + expectedContainerInstanceTags := map[string]string{ + "ContainerInstanceTag1": "firstTag", + "ContainerInstanceTag2": "secondTag", + } + expectedTaskResponseWithTags.ContainerInstanceTags = expectedContainerInstanceTags + expectedTaskTags := map[string]string{ + "TaskTag1": "firstTag", + "TaskTag2": "secondTag", + } + expectedTaskResponseWithTags.TaskTags = expectedTaskTags + + contInstTag1Key := "ContainerInstanceTag1" + contInstTag1Val := "firstTag" + contInstTag2Key := "ContainerInstanceTag2" + contInstTag2Val := "secondTag" + taskTag1Key := "TaskTag1" + taskTag1Val := "firstTag" + taskTag2Key := "TaskTag2" + taskTag2Val := "secondTag" + + gomock.InOrder( + state.EXPECT().GetTaskByIPAddress(remoteIP).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ecsClient.EXPECT().GetResourceTags(containerInstanceArn).Return([]*ecs.Tag{ + &ecs.Tag{ + Key: &contInstTag1Key, + Value: &contInstTag1Val, + }, + &ecs.Tag{ + Key: &contInstTag2Key, + Value: &contInstTag2Val, + }, + }, nil), + ecsClient.EXPECT().GetResourceTags(taskARN).Return([]*ecs.Tag{ + &ecs.Tag{ + Key: &taskTag1Key, + Value: &taskTag1Val, + }, + &ecs.Tag{ + Key: &taskTag2Key, + Value: &taskTag2Val, + }, + }, nil), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v2BaseMetadataWithTagsPath, nil) + req.RemoteAddr = remoteIP + ":" + remotePort + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v2.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + assert.Equal(t, expectedTaskResponseWithTags, taskResponse) + }) + } +} + +func TestV2ContainerMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().GetTaskByIPAddress(remoteIP).Return(taskARN, true), + state.EXPECT().ContainerByID(containerID).Return(dockerContainer, true), + state.EXPECT().TaskByID(containerID).Return(task, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v2BaseMetadataPath+"/"+containerID, nil) + req.RemoteAddr = remoteIP + ":" + remotePort + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var containerResponse v2.ContainerResponse + err = json.Unmarshal(res, &containerResponse) + assert.NoError(t, err) + assert.Equal(t, expectedContainerResponse, containerResponse) +} + +func TestV2ContainerStats(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + dockerStats := &types.StatsJSON{} + dockerStats.NumProcs = 2 + gomock.InOrder( + state.EXPECT().GetTaskByIPAddress(remoteIP).Return(taskARN, true), + statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, &stats.NetworkStatsPerSec{}, nil), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v2BaseStatsPath+"/"+containerID, nil) + req.RemoteAddr = remoteIP + ":" + remotePort + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var statsFromResult *types.StatsJSON + err = json.Unmarshal(res, &statsFromResult) + assert.NoError(t, err) + assert.Equal(t, dockerStats.NumProcs, statsFromResult.NumProcs) +} + +func TestV2TaskStats(t *testing.T) { + testCases := []struct { + path string + }{ + { + v2BaseStatsPath, + }, + { + v2BaseStatsPath + "/", + }, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Testing path: %s", tc.path), func(t *testing.T) { + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + dockerStats := &types.StatsJSON{} + dockerStats.NumProcs = 2 + containerMap := map[string]*apicontainer.DockerContainer{ + containerName: { + DockerID: containerID, + }, + } + gomock.InOrder( + state.EXPECT().GetTaskByIPAddress(remoteIP).Return(taskARN, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerMap, true), + statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, &stats.NetworkStatsPerSec{}, nil), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", tc.path, nil) + req.RemoteAddr = remoteIP + ":" + remotePort + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var statsFromResult map[string]*types.StatsJSON + err = json.Unmarshal(res, &statsFromResult) + assert.NoError(t, err) + containerStats, ok := statsFromResult[containerID] + assert.True(t, ok) + assert.Equal(t, dockerStats.NumProcs, containerStats.NumProcs) + }) + } +} + +func TestV3TaskMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/task", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v2.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + assert.Equal(t, expectedTaskResponse, taskResponse) +} + +func TestV3BridgeTaskMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(bridgeTask, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToBridgeContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(bridgeTask, true), + state.EXPECT().ContainerByID(containerID).Return(bridgeContainer, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/task", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v2.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + assert.Equal(t, expectedBridgeTaskResponse, taskResponse) +} + +func TestV3BridgeContainerMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().DockerIDByV3EndpointID(v3EndpointID).Return(containerID, true), + state.EXPECT().ContainerByID(containerID).Return(bridgeContainer, true), + state.EXPECT().TaskByID(containerID).Return(bridgeTask, true), + state.EXPECT().ContainerByID(containerID).Return(bridgeContainer, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID, nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var containerResponse v2.ContainerResponse + err = json.Unmarshal(res, &containerResponse) + assert.NoError(t, err) + assert.Equal(t, expectedBridgeContainerResponse, containerResponse) +} + +// Test API calls for propagating Tags to Task Metadata +func TestV3TaskMetadataWithTags(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + expectedTaskResponseWithTags := expectedTaskResponse + expectedContainerInstanceTags := map[string]string{ + "ContainerInstanceTag1": "firstTag", + "ContainerInstanceTag2": "secondTag", + } + expectedTaskResponseWithTags.ContainerInstanceTags = expectedContainerInstanceTags + expectedTaskTags := map[string]string{ + "TaskTag1": "firstTag", + "TaskTag2": "secondTag", + } + expectedTaskResponseWithTags.TaskTags = expectedTaskTags + + contInstTag1Key := "ContainerInstanceTag1" + contInstTag1Val := "firstTag" + contInstTag2Key := "ContainerInstanceTag2" + contInstTag2Val := "secondTag" + taskTag1Key := "TaskTag1" + taskTag1Val := "firstTag" + taskTag2Key := "TaskTag2" + taskTag2Val := "secondTag" + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ecsClient.EXPECT().GetResourceTags(containerInstanceArn).Return([]*ecs.Tag{ + &ecs.Tag{ + Key: &contInstTag1Key, + Value: &contInstTag1Val, + }, + &ecs.Tag{ + Key: &contInstTag2Key, + Value: &contInstTag2Val, + }, + }, nil), + ecsClient.EXPECT().GetResourceTags(taskARN).Return([]*ecs.Tag{ + &ecs.Tag{ + Key: &taskTag1Key, + Value: &taskTag1Val, + }, + &ecs.Tag{ + Key: &taskTag2Key, + Value: &taskTag2Val, + }, + }, nil), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/taskWithTags", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v2.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + assert.Equal(t, expectedTaskResponseWithTags, taskResponse) +} + +func TestV3ContainerMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().DockerIDByV3EndpointID(v3EndpointID).Return(containerID, true), + state.EXPECT().ContainerByID(containerID).Return(dockerContainer, true), + state.EXPECT().TaskByID(containerID).Return(task, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID, nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var containerResponse v2.ContainerResponse + err = json.Unmarshal(res, &containerResponse) + assert.NoError(t, err) + assert.Equal(t, expectedContainerResponse, containerResponse) +} + +func TestV3TaskStats(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + dockerStats := &types.StatsJSON{} + dockerStats.NumProcs = 2 + + containerMap := map[string]*apicontainer.DockerContainer{ + containerName: { + DockerID: containerID, + }, + } + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerMap, true), + statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, &stats.NetworkStatsPerSec{}, nil), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/task/stats", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var statsFromResult map[string]*types.StatsJSON + err = json.Unmarshal(res, &statsFromResult) + assert.NoError(t, err) + containerStats, ok := statsFromResult[containerID] + assert.True(t, ok) + assert.Equal(t, dockerStats.NumProcs, containerStats.NumProcs) +} + +func TestV3ContainerStats(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + dockerStats := &types.StatsJSON{} + dockerStats.NumProcs = 2 + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().DockerIDByV3EndpointID(v3EndpointID).Return(containerID, true), + statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, &stats.NetworkStatsPerSec{}, nil), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/stats", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var statsFromResult *types.StatsJSON + err = json.Unmarshal(res, &statsFromResult) + assert.NoError(t, err) + assert.Equal(t, dockerStats.NumProcs, statsFromResult.NumProcs) +} + +func TestV3ContainerAssociations(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().DockerIDByV3EndpointID(v3EndpointID).Return(containerID, true), + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().ContainerByID(containerID).Return(dockerContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/associations/"+associationType, nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + + var associationsResponse v3.AssociationsResponse + err = json.Unmarshal(res, &associationsResponse) + assert.NoError(t, err) + assert.Equal(t, expectedAssociationsResponse, associationsResponse) +} + +func TestV3ContainerAssociation(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/associations/"+associationType+"/"+associationName, nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + + assert.Equal(t, expectedAssociationResponse, string(res)) +} + +func TestV4TaskMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true).AnyTimes(), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true).AnyTimes(), + state.EXPECT().PulledContainerMapByArn(taskARN).Return(nil, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/task", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v4.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + + expectedV4TaskResponse.TaskResponse.Containers = nil + expectedV4ContainerResponse.ContainerResponse.Networks = nil + assert.Equal(t, expectedV4TaskResponse, taskResponse) +} + +func TestV4TaskMetadataWithPulledContainers(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(pulledTask, true).AnyTimes(), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(pulledTask, true).AnyTimes(), + state.EXPECT().PulledContainerMapByArn(taskARN).Return(pulledContainerNameToDockerContainer, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/task", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v4.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + expectedV4PulledTaskResponse.TaskResponse.Containers = nil + expectedV4ContainerResponse.ContainerResponse.Networks = nil + assert.Equal(t, expectedV4PulledTaskResponse, taskResponse) +} + +func TestV4ContainerMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().DockerIDByV3EndpointID(v3EndpointID).Return(containerID, true), + state.EXPECT().ContainerByID(containerID).Return(dockerContainer, true), + state.EXPECT().TaskByID(containerID).Return(task, true).Times(2), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "us-west-2b", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID, nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + + var containerResponse v4.ContainerResponse + err = json.Unmarshal(res, &containerResponse) + assert.NoError(t, err) + + // v4.ContainerMetadata overrides Networks properties defined in v2.ContainerResponse + // during json.Unmarshal(), values for the Networks property will be written to v4.ContainerMetadata.Networks + // instead of v4.ContainerMetadata.(v2.ContainerMetadata).Networks + // v2.ContainerMetadata.Networks should be nil + expectedV4ContainerResponse.ContainerResponse.Networks = nil + assert.Equal(t, expectedV4ContainerResponse, containerResponse) +} + +// Test API calls for propagating Tags to v4 Task Metadata +func TestV4TaskMetadataWithTags(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + expectedv4TaskResponseWithTags := expectedV4TaskResponse + expectedContainerInstanceTags := map[string]string{ + "ContainerInstanceTag1": "firstTag", + "ContainerInstanceTag2": "secondTag", + } + expectedv4TaskResponseWithTags.ContainerInstanceTags = expectedContainerInstanceTags + expectedTaskTags := map[string]string{ + "TaskTag1": "firstTag", + "TaskTag2": "secondTag", + } + expectedv4TaskResponseWithTags.TaskTags = expectedTaskTags + + contInstTag1Key := "ContainerInstanceTag1" + contInstTag1Val := "firstTag" + contInstTag2Key := "ContainerInstanceTag2" + contInstTag2Val := "secondTag" + taskTag1Key := "TaskTag1" + taskTag1Val := "firstTag" + taskTag2Key := "TaskTag2" + taskTag2Val := "secondTag" + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true).AnyTimes(), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ecsClient.EXPECT().GetResourceTags(containerInstanceArn).Return([]*ecs.Tag{ + &ecs.Tag{ + Key: &contInstTag1Key, + Value: &contInstTag1Val, + }, + &ecs.Tag{ + Key: &contInstTag2Key, + Value: &contInstTag2Val, + }, + }, nil), + ecsClient.EXPECT().GetResourceTags(taskARN).Return([]*ecs.Tag{ + &ecs.Tag{ + Key: &taskTag1Key, + Value: &taskTag1Val, + }, + &ecs.Tag{ + Key: &taskTag2Key, + Value: &taskTag2Val, + }, + }, nil), + state.EXPECT().TaskByArn(taskARN).Return(task, true).AnyTimes(), + state.EXPECT().PulledContainerMapByArn(taskARN).Return(nil, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/taskWithTags", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v4.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + + expectedv4TaskResponseWithTags.TaskResponse.Containers = nil + expectedV4ContainerResponse.ContainerResponse.Networks = nil + assert.Equal(t, expectedv4TaskResponseWithTags, taskResponse) +} + +func TestV4BridgeTaskMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(bridgeTask, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToBridgeContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(bridgeTask, true), + state.EXPECT().ContainerByID(containerID).Return(bridgeContainer, true), + state.EXPECT().PulledContainerMapByArn(taskARN).Return(nil, true), + ) + + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/task", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v4.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) + + expectedV4BridgeTaskResponse.TaskResponse.Containers = nil + expectedV4BridgeContainerResponse.ContainerResponse.Networks = nil + assert.Equal(t, expectedV4BridgeTaskResponse, taskResponse) +} + +func TestV4BridgeTaskMetadataAllowMissingContainerNetwork(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(bridgeTask, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToBridgeContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(bridgeTask, true), + state.EXPECT().ContainerByID(containerID).Return(nil, false), + state.EXPECT().PulledContainerMapByArn(taskARN).Return(nil, true), + ) + + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone, containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/task", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var taskResponse v4.TaskResponse + err = json.Unmarshal(res, &taskResponse) + assert.NoError(t, err) +} + +func TestV4BridgeContainerMetadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().DockerIDByV3EndpointID(v3EndpointID).Return(containerID, true), + state.EXPECT().ContainerByID(containerID).Return(bridgeContainer, true), + state.EXPECT().TaskByID(containerID).Return(bridgeTask, true), + state.EXPECT().ContainerByID(containerID).Return(bridgeContainer, true), + ) + + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID, nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var containerResponse v4.ContainerResponse + err = json.Unmarshal(res, &containerResponse) + assert.NoError(t, err) + + expectedV4BridgeContainerResponse.ContainerResponse.Networks = nil + assert.Equal(t, expectedV4BridgeContainerResponse, containerResponse) +} + +func TestV4TaskStats(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + dockerStats := &types.StatsJSON{} + dockerStats.NumProcs = 2 + + containerMap := map[string]*apicontainer.DockerContainer{ + containerName: { + DockerID: containerID, + }, + } + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerMap, true), + statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, &stats.NetworkStatsPerSec{}, nil), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/task/stats", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var statsFromResult map[string]*types.StatsJSON + err = json.Unmarshal(res, &statsFromResult) + assert.NoError(t, err) + containerStats, ok := statsFromResult[containerID] + assert.True(t, ok) + assert.Equal(t, dockerStats.NumProcs, containerStats.NumProcs) +} + +func TestV4ContainerStats(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + dockerStats := &types.StatsJSON{} + dockerStats.NumProcs = 2 + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().DockerIDByV3EndpointID(v3EndpointID).Return(containerID, true), + statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, &stats.NetworkStatsPerSec{}, nil), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/stats", nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + var statsFromResult *types.StatsJSON + err = json.Unmarshal(res, &statsFromResult) + assert.NoError(t, err) + assert.Equal(t, dockerStats.NumProcs, statsFromResult.NumProcs) +} + +func TestV4ContainerAssociations(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().DockerIDByV3EndpointID(v3EndpointID).Return(containerID, true), + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().ContainerByID(containerID).Return(dockerContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/associations/"+associationType, nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + + var associationsResponse v3.AssociationsResponse + err = json.Unmarshal(res, &associationsResponse) + assert.NoError(t, err) + assert.Equal(t, expectedAssociationsResponse, associationsResponse) +} + +func TestV4ContainerAssociation(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + gomock.InOrder( + state.EXPECT().TaskARNByV3EndpointID(v3EndpointID).Return(taskARN, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + ) + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", v4BasePath+v3EndpointID+"/associations/"+associationType+"/"+associationName, nil) + server.Handler.ServeHTTP(recorder, req) + res, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + assert.Equal(t, expectedAssociationResponse, string(res)) +} + +func TestTaskHTTPEndpoint301Redirect(t *testing.T) { + testPathsMap := map[string]string{ + "http://127.0.0.1/v3///task/": "http://127.0.0.1/v3/task/", + "http://127.0.0.1//v2/credentials/test": "http://127.0.0.1/v2/credentials/test", + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + + for testPath, expectedPath := range testPathsMap { + t.Run(fmt.Sprintf("Test path: %s", testPath), func(t *testing.T) { + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", testPath, nil) + server.Handler.ServeHTTP(recorder, req) + assert.Equal(t, http.StatusMovedPermanently, recorder.Code) + assert.Equal(t, expectedPath, recorder.Header().Get("Location")) + }) + } +} + +func TestTaskHTTPEndpointErrorCode404(t *testing.T) { + testPaths := []string{ + "/", + "/v2/meta", + "/v2/statss", + "/v3", + "/v3/v3-endpoint-id/", + "/v3/v3-endpoint-id/wrong-path", + "/v3/v3-endpoint-id/task/", + "/v3/v3-endpoint-id/task/stats/", + "/v3/v3-endpoint-id/task/stats/wrong-path", + "/v3/v3-endpoint-id/associtions-with-typo/elastic-inference/dev1", + "/v4/v3-endpoint-id/", + "/v4/v3-endpoint-id/wrong-path", + "/v4/v3-endpoint-id/task/", + "/v4/v3-endpoint-id/task/stats/", + "/v4/v3-endpoint-id/task/stats/wrong-path", + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + + for _, testPath := range testPaths { + t.Run(fmt.Sprintf("Test path: %s", testPath), func(t *testing.T) { + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", testPath, nil) + server.Handler.ServeHTTP(recorder, req) + assert.Equal(t, http.StatusNotFound, recorder.Code) + }) + } +} + +func TestTaskHTTPEndpointErrorCode400(t *testing.T) { + testPaths := []string{ + "/v2/metadata", + "/v2/metadata/", + "/v2/metadata/wrong-container-id", + "/v2/metadata/container-id/other-path", + "/v2/stats", + "/v2/stats/", + "/v2/stats/wrong-container-id", + "/v2/stats/container-id/other-path", + "/v3/wrong-v3-endpoint-id/stats", + "/v3/wrong-v3-endpoint-id/task/stats", + "/v3/task/stats", + "/v4/wrong-v3-endpoint-id/stats", + "/v4/wrong-v3-endpoint-id/task/stats", + "/v3/wrong-v3-endpoint-id/associations/elastic-inference", + "/v3/wrong-v3-endpoint-id/associations/elastic-inference/dev1", + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + + for _, testPath := range testPaths { + t.Run(fmt.Sprintf("Test path: %s", testPath), func(t *testing.T) { + // Make every possible call to state fail + state.EXPECT().TaskARNByV3EndpointID(gomock.Any()).Return("", false).AnyTimes() + state.EXPECT().DockerIDByV3EndpointID(gomock.Any()).Return("", false).AnyTimes() + state.EXPECT().TaskARNByV3EndpointID(gomock.Any()).Return("", false).AnyTimes() + state.EXPECT().GetTaskByIPAddress(gomock.Any()).Return("", false).AnyTimes() + + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", testPath, nil) + req.RemoteAddr = remoteIP + ":" + remotePort + server.Handler.ServeHTTP(recorder, req) + assert.Equal(t, http.StatusBadRequest, recorder.Code) + }) + } +} + +func TestTaskHTTPEndpointErrorCode500(t *testing.T) { + testPaths := []string{ + "/v3/wrong-v3-endpoint-id", + "/v3/", + "/v3/stats", + "/v3/wrong-v3-endpoint-id/task", + "/v3/task", + "/v4/wrong-v3-endpoint-id", + "/v4/", + "/v4/stats", + "/v4/wrong-v3-endpoint-id/task", + "/v4/task", + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + auditLog := mock_audit.NewMockAuditLogger(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + server := taskServerSetup(credentials.NewManager(), auditLog, state, ecsClient, clusterName, statsEngine, + config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "", containerInstanceArn) + + for _, testPath := range testPaths { + t.Run(fmt.Sprintf("Test path: %s", testPath), func(t *testing.T) { + // Make every possible call to state fail + state.EXPECT().TaskARNByV3EndpointID(gomock.Any()).Return("", false).AnyTimes() + state.EXPECT().DockerIDByV3EndpointID(gomock.Any()).Return("", false).AnyTimes() + state.EXPECT().TaskARNByV3EndpointID(gomock.Any()).Return("", false).AnyTimes() + state.EXPECT().GetTaskByIPAddress(gomock.Any()).Return("", false).AnyTimes() + + recorder := httptest.NewRecorder() + req, _ := http.NewRequest("GET", testPath, nil) + req.RemoteAddr = remoteIP + ":" + remotePort + server.Handler.ServeHTTP(recorder, req) + assert.Equal(t, http.StatusInternalServerError, recorder.Code) + }) + } +} diff --git a/agent/handlers/utils/helpers.go b/agent/handlers/utils/helpers.go new file mode 100644 index 00000000000..66a92449107 --- /dev/null +++ b/agent/handlers/utils/helpers.go @@ -0,0 +1,135 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/logger/audit" + "github.com/aws/amazon-ecs-agent/agent/logger/audit/request" + "github.com/cihub/seelog" + "github.com/gorilla/mux" +) + +const ( + // NetworkModeAWSVPC specifies the AWS VPC network mode. + NetworkModeAWSVPC = "awsvpc" + + // RequestTypeCreds specifies the request type of CredentialsHandler. + RequestTypeCreds = "credentials" + + // RequestTypeTaskMetadata specifies the task metadata request type of TaskContainerMetadataHandler. + RequestTypeTaskMetadata = "task metadata" + + // RequestTypeContainerMetadata specifies the container metadata request type of TaskContainerMetadataHandler. + RequestTypeContainerMetadata = "container metadata" + + // RequestTypeTaskStats specifies the task stats request type of StatsHandler. + RequestTypeTaskStats = "task stats" + + // RequestTypeContainerStats specifies the container stats request type of StatsHandler. + RequestTypeContainerStats = "container stats" + + // RequestTypeAgentMetadata specifies the Agent metadata request type of AgentMetadataHandler. + RequestTypeAgentMetadata = "agent metadata" + + // RequestTypeContainerAssociations specifies the container associations request type of ContainerAssociationsHandler. + RequestTypeContainerAssociations = "container associations" + + // RequestTypeContainerAssociation specifies the container association request type of ContainerAssociationHandler. + RequestTypeContainerAssociation = "container association" + + // AnythingButSlashRegEx is a regex pattern that matches any string without slash. + AnythingButSlashRegEx = "[^/]*" + + // AnythingRegEx is a regex pattern that matches anything. + AnythingRegEx = ".*" + + // AnythingButEmptyRegEx is a regex pattern that matches anything but an empty string. + AnythingButEmptyRegEx = ".+" +) + +// ErrorMessage is used to store the human-readable error Code and a descriptive Message +// that describes the error. This struct is marshalled and returned in the HTTP response. +type ErrorMessage struct { + Code string `json:"code"` + Message string `json:"message"` + HTTPErrorCode int +} + +// WriteJSONToResponse writes the header, JSON response to a ResponseWriter, and +// log the error if necessary. +func WriteJSONToResponse(w http.ResponseWriter, httpStatusCode int, responseJSON []byte, requestType string) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(httpStatusCode) + _, err := w.Write(responseJSON) + if err != nil { + seelog.Errorf( + "Unable to write %s json response message to ResponseWriter", + requestType) + } + + if httpStatusCode >= 400 && httpStatusCode <= 599 { + seelog.Errorf("HTTP response status code is '%d', request type is: %s, and response in JSON is %s", httpStatusCode, requestType, string(responseJSON)) + } +} + +// WriteResponseIfMarshalError checks the 'err' response of the json.Marshal function. +// if this function returns an error, then it has already written a response to the +// http writer, and the calling function should return. +func WriteResponseIfMarshalError(w http.ResponseWriter, err error) error { + if err != nil { + WriteJSONToResponse(w, http.StatusInternalServerError, []byte(`{}`), RequestTypeAgentMetadata) + seelog.Errorf("Error marshaling json: %s", err) + return fmt.Errorf("json marshal error") + } + return nil +} + +// ValueFromRequest returns the value of a field in the http request. The boolean value is +// set to true if the field exists in the query. +func ValueFromRequest(r *http.Request, field string) (string, bool) { + values := r.URL.Query() + _, exists := values[field] + return values.Get(field), exists +} + +// GetMuxValueFromRequest extracts the mux value from the request using a gorilla +// mux name +func GetMuxValueFromRequest(r *http.Request, gorillaMuxName string) (string, bool) { + vars := mux.Vars(r) + val, ok := vars[gorillaMuxName] + return val, ok +} + +// ConstructMuxVar constructs the mux var that is used in the gorilla/mux styled +// path, example: {id}, {id:[0-9]+}. +func ConstructMuxVar(name string, pattern string) string { + if pattern == "" { + return "{" + name + "}" + } + + return "{" + name + ":" + pattern + "}" +} + +// LimitReachedHandler logs the throttled request in the credentials audit log +func LimitReachedHandler(auditLogger audit.AuditLogger) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + logRequest := request.LogRequest{ + Request: r, + } + auditLogger.Log(logRequest, http.StatusTooManyRequests, "") + } +} diff --git a/agent/handlers/utils/helpers_test.go b/agent/handlers/utils/helpers_test.go new file mode 100644 index 00000000000..6beb7ec5f94 --- /dev/null +++ b/agent/handlers/utils/helpers_test.go @@ -0,0 +1,87 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConstructMuxVar(t *testing.T) { + testCases := []struct { + testName string + name string + pattern string + expectedVar string + }{ + { + testName: "With anything but slash pattern", + name: "muxname", + pattern: AnythingButSlashRegEx, + expectedVar: "{muxname:[^/]*}", + }, + { + testName: "With anything pattern", + name: "muxname", + pattern: AnythingRegEx, + expectedVar: "{muxname:.*}", + }, + { + testName: "With anything but empty pattern", + name: "muxname", + pattern: AnythingButEmptyRegEx, + expectedVar: "{muxname:.+}", + }, + { + testName: "Without pattern", + name: "muxname", + pattern: "", + expectedVar: "{muxname}", + }, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + assert.Equal(t, tc.expectedVar, ConstructMuxVar(tc.name, tc.pattern)) + }) + } +} + +func TestWriteJSONToResponse(t *testing.T) { + recorder := httptest.NewRecorder() + responseJSON, _ := json.Marshal("Unable to get task arn from request") + WriteJSONToResponse(recorder, http.StatusOK, responseJSON, RequestTypeTaskMetadata) + + bodyBytes, err := ioutil.ReadAll(recorder.Body) + assert.NoError(t, err) + bodyString := string(bodyBytes) + + assert.Equal(t, http.StatusOK, recorder.Code) + assert.Equal(t, `"Unable to get task arn from request"`, bodyString) +} + +func TestValueFromRequest(t *testing.T) { + r, _ := http.NewRequest("GET", "/v1/credentials?id=credid", nil) + val, ok := ValueFromRequest(r, "id") + + assert.True(t, ok) + assert.Equal(t, "credid", val) +} diff --git a/agent/handlers/utils/types.go b/agent/handlers/utils/types.go new file mode 100644 index 00000000000..cf196bc147c --- /dev/null +++ b/agent/handlers/utils/types.go @@ -0,0 +1,22 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + +// DockerStateResolver is a sub-interface for the engine.TaskEngine interface +// to make it easy to test code in this package +type DockerStateResolver interface { + State() dockerstate.TaskEngineState +} diff --git a/agent/handlers/v1/agent_metadata_handler.go b/agent/handlers/v1/agent_metadata_handler.go new file mode 100644 index 00000000000..8a026281e17 --- /dev/null +++ b/agent/handlers/v1/agent_metadata_handler.go @@ -0,0 +1,42 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v1 + +import ( + "encoding/json" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + agentversion "github.com/aws/amazon-ecs-agent/agent/version" +) + +// AgentMetadataPath is the Agent metadata path for v1 handler. +const AgentMetadataPath = "/v1/metadata" + +// AgentMetadataHandler creates response for 'v1/metadata' API. +func AgentMetadataHandler(containerInstanceArn *string, cfg *config.Config) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + resp := &MetadataResponse{ + Cluster: cfg.Cluster, + ContainerInstanceArn: containerInstanceArn, + Version: agentversion.String(), + } + responseJSON, err := json.Marshal(resp) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeAgentMetadata) + } +} diff --git a/agent/handlers/v1/credentials_handler.go b/agent/handlers/v1/credentials_handler.go new file mode 100644 index 00000000000..1fba3c0155a --- /dev/null +++ b/agent/handlers/v1/credentials_handler.go @@ -0,0 +1,154 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v1 + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + handlersutils "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + "github.com/aws/amazon-ecs-agent/agent/logger/audit" + "github.com/aws/amazon-ecs-agent/agent/logger/audit/request" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/cihub/seelog" +) + +const ( + // Error Types + + // ErrNoIDInRequest is the error code indicating that no ID was specified + ErrNoIDInRequest = "NoIdInRequest" + + // ErrInvalidIDInRequest is the error code indicating that the ID was invalid + ErrInvalidIDInRequest = "InvalidIdInRequest" + + // ErrNoCredentialsAssociated is the error code indicating no credentials are + // associated with the specified ID + ErrNoCredentialsAssociated = "NoCredentialsAssociated" + + // ErrCredentialsUninitialized is the error code indicating that credentials were + // not properly initialized. This may happen immediately after the agent is + // started, before it has completed state reconciliation. + ErrCredentialsUninitialized = "CredentialsUninitialized" + + // ErrInternalServer is the error indicating something generic went wrong + ErrInternalServer = "InternalServerError" + + // Credentials API version. + apiVersion = 1 + + // CredentialsPath specifies the relative URI path for serving task IAM credentials + CredentialsPath = credentials.V1CredentialsPath +) + +// CredentialsHandler creates response for the 'v1/credentials' API. It returns a JSON response +// containing credentials when found. The HTTP status code of 400 is returned otherwise. +func CredentialsHandler(credentialsManager credentials.Manager, auditLogger audit.AuditLogger) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + credentialsID := getCredentialsID(r) + errPrefix := fmt.Sprintf("CredentialsV%dRequest: ", apiVersion) + CredentialsHandlerImpl(w, r, auditLogger, credentialsManager, credentialsID, errPrefix) + } +} + +// CredentialsHandlerImpl is the major logic in CredentialsHandler, abstract this out +// because v2.CredentialsHandler also uses the same logic. +func CredentialsHandlerImpl(w http.ResponseWriter, r *http.Request, auditLogger audit.AuditLogger, credentialsManager credentials.Manager, credentialsID string, errPrefix string) { + responseJSON, arn, roleType, errorMessage, err := processCredentialsRequest(credentialsManager, r, credentialsID, errPrefix) + if err != nil { + errResponseJSON, err := json.Marshal(errorMessage) + if e := handlersutils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + writeCredentialsRequestResponse(w, r, errorMessage.HTTPErrorCode, audit.GetCredentialsEventType(roleType), arn, auditLogger, errResponseJSON) + return + } + + writeCredentialsRequestResponse(w, r, http.StatusOK, audit.GetCredentialsEventType(roleType), arn, auditLogger, responseJSON) +} + +// processCredentialsRequest returns the response json containing credentials for the credentials id in the request +func processCredentialsRequest(credentialsManager credentials.Manager, r *http.Request, credentialsID string, errPrefix string) ([]byte, string, string, *handlersutils.ErrorMessage, error) { + if credentialsID == "" { + errText := errPrefix + "No Credential ID in the request" + seelog.Errorf("Error processing credential request: %s", errText) + msg := &handlersutils.ErrorMessage{ + Code: ErrNoIDInRequest, + Message: errText, + HTTPErrorCode: http.StatusBadRequest, + } + return nil, "", "", msg, errors.New(errText) + } + + credentials, ok := credentialsManager.GetTaskCredentials(credentialsID) + if !ok { + errText := errPrefix + "Credentials not found" + seelog.Errorf("Error processing credential request: %s", errText) + msg := &handlersutils.ErrorMessage{ + Code: ErrInvalidIDInRequest, + Message: errText, + HTTPErrorCode: http.StatusBadRequest, + } + return nil, "", "", msg, errors.New(errText) + } + + seelog.Infof("Processing credential request, credentialType=%s taskARN=%s", + credentials.IAMRoleCredentials.RoleType, credentials.ARN) + + if utils.ZeroOrNil(credentials.ARN) && utils.ZeroOrNil(credentials.IAMRoleCredentials) { + // This can happen when the agent is restarted and is reconciling its state. + errText := errPrefix + "Credentials uninitialized for ID" + seelog.Errorf("Error processing credential request credentialType=%s taskARN=%s: %s", + credentials.IAMRoleCredentials.RoleType, credentials.ARN, errText) + msg := &handlersutils.ErrorMessage{ + Code: ErrCredentialsUninitialized, + Message: errText, + HTTPErrorCode: http.StatusServiceUnavailable, + } + return nil, "", "", msg, errors.New(errText) + } + + credentialsJSON, err := json.Marshal(credentials.IAMRoleCredentials) + if err != nil { + errText := errPrefix + "Error marshaling credentials" + seelog.Errorf("Error processing credential request credentialType=%s taskARN=%s: %s", + credentials.IAMRoleCredentials.RoleType, credentials.ARN, errText) + msg := &handlersutils.ErrorMessage{ + Code: ErrInternalServer, + Message: "Internal server error", + HTTPErrorCode: http.StatusInternalServerError, + } + return nil, "", "", msg, errors.New(errText) + } + + // Success + return credentialsJSON, credentials.ARN, credentials.IAMRoleCredentials.RoleType, nil, nil +} + +func writeCredentialsRequestResponse(w http.ResponseWriter, r *http.Request, httpStatusCode int, eventType string, arn string, auditLogger audit.AuditLogger, message []byte) { + auditLogger.Log(request.LogRequest{Request: r, ARN: arn}, httpStatusCode, eventType) + + handlersutils.WriteJSONToResponse(w, httpStatusCode, message, handlersutils.RequestTypeCreds) +} + +func getCredentialsID(r *http.Request) string { + credentialsID, ok := handlersutils.ValueFromRequest(r, credentials.CredentialsIDQueryParameterName) + if ok { + return credentialsID + } + return "" +} diff --git a/agent/handlers/v1/license_handler.go b/agent/handlers/v1/license_handler.go new file mode 100644 index 00000000000..1554f363b8a --- /dev/null +++ b/agent/handlers/v1/license_handler.go @@ -0,0 +1,34 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v1 + +import ( + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/utils" +) + +const LicensePath = "/license" + +var licenseProvider = utils.NewLicenseProvider() + +// LicenseHandler creates response for '/license' API. +func LicenseHandler(w http.ResponseWriter, h *http.Request) { + text, err := licenseProvider.GetText() + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + } else { + w.Write([]byte(text)) + } +} diff --git a/agent/handlers/v1/license_handler_test.go b/agent/handlers/v1/license_handler_test.go new file mode 100644 index 00000000000..3837e8c27a0 --- /dev/null +++ b/agent/handlers/v1/license_handler_test.go @@ -0,0 +1,57 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v1 + +import ( + "net/http" + "testing" + + mock_http "github.com/aws/amazon-ecs-agent/agent/handlers/mocks/http" + mock_utils "github.com/aws/amazon-ecs-agent/agent/utils/mocks" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" +) + +func TestLicenseHandler(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockResponseWriter := mock_http.NewMockResponseWriter(mockCtrl) + mockLicenseProvider := mock_utils.NewMockLicenseProvider(mockCtrl) + + licenseProvider = mockLicenseProvider + + text := "text here" + mockLicenseProvider.EXPECT().GetText().Return(text, nil) + mockResponseWriter.EXPECT().Write([]byte(text)) + + LicenseHandler(mockResponseWriter, nil) +} + +func TestLicenseHandlerError(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mockResponseWriter := mock_http.NewMockResponseWriter(mockCtrl) + mockLicenseProvider := mock_utils.NewMockLicenseProvider(mockCtrl) + + licenseProvider = mockLicenseProvider + + mockLicenseProvider.EXPECT().GetText().Return("", errors.New("test error")) + mockResponseWriter.EXPECT().WriteHeader(http.StatusInternalServerError) + + LicenseHandler(mockResponseWriter, nil) +} diff --git a/agent/handlers/v1/response.go b/agent/handlers/v1/response.go new file mode 100644 index 00000000000..bba57fcc1d9 --- /dev/null +++ b/agent/handlers/v1/response.go @@ -0,0 +1,185 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v1 + +import ( + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" +) + +// MetadataResponse is the schema for the metadata response JSON object +type MetadataResponse struct { + Cluster string `json:"Cluster"` + ContainerInstanceArn *string `json:"ContainerInstanceArn"` + Version string `json:"Version"` +} + +// TaskResponse is the schema for the task response JSON object +type TaskResponse struct { + Arn string `json:"Arn"` + DesiredStatus string `json:"DesiredStatus,omitempty"` + KnownStatus string `json:"KnownStatus"` + Family string `json:"Family"` + Version string `json:"Version"` + Containers []ContainerResponse `json:"Containers"` +} + +// TasksResponse is the schema for the tasks response JSON object +type TasksResponse struct { + Tasks []*TaskResponse `json:"Tasks"` +} + +// ContainerResponse is the schema for the container response JSON object +type ContainerResponse struct { + DockerID string `json:"DockerId"` + DockerName string `json:"DockerName"` + Name string `json:"Name"` + Ports []PortResponse `json:"Ports,omitempty"` + Networks []containermetadata.Network `json:"Networks,omitempty"` + Volumes []VolumeResponse `json:"Volumes,omitempty"` +} + +// VolumeResponse is the schema for the volume response JSON object +type VolumeResponse struct { + DockerName string `json:"DockerName,omitempty"` + Source string `json:"Source,omitempty"` + Destination string `json:"Destination,omitempty"` +} + +// PortResponse defines the schema for portmapping response JSON +// object. +type PortResponse struct { + ContainerPort uint16 `json:"ContainerPort,omitempty"` + Protocol string `json:"Protocol,omitempty"` + HostPort uint16 `json:"HostPort,omitempty"` +} + +// NewTaskResponse creates a TaskResponse for a task. +func NewTaskResponse(task *apitask.Task, containerMap map[string]*apicontainer.DockerContainer) *TaskResponse { + containers := []ContainerResponse{} + for _, container := range containerMap { + if container.Container.IsInternal() { + continue + } + containerResponse := NewContainerResponse(container, task.GetPrimaryENI()) + containers = append(containers, containerResponse) + } + + knownStatus := task.GetKnownStatus() + knownBackendStatus := knownStatus.BackendStatus() + desiredStatusInAgent := task.GetDesiredStatus() + desiredStatus := desiredStatusInAgent.BackendStatus() + + if (knownBackendStatus == "STOPPED" && desiredStatus != "STOPPED") || (knownBackendStatus == "RUNNING" && desiredStatus == "PENDING") { + desiredStatus = "" + } + + return &TaskResponse{ + Arn: task.Arn, + DesiredStatus: desiredStatus, + KnownStatus: knownBackendStatus, + Family: task.Family, + Version: task.Version, + Containers: containers, + } +} + +// NewContainerResponse creates ContainerResponse for a container. +func NewContainerResponse(dockerContainer *apicontainer.DockerContainer, eni *apieni.ENI) ContainerResponse { + container := dockerContainer.Container + resp := ContainerResponse{ + Name: container.Name, + DockerID: dockerContainer.DockerID, + DockerName: dockerContainer.DockerName, + } + + resp.Ports = NewPortBindingsResponse(dockerContainer, eni) + resp.Volumes = NewVolumesResponse(dockerContainer) + + if eni != nil { + resp.Networks = []containermetadata.Network{ + { + NetworkMode: utils.NetworkModeAWSVPC, + IPv4Addresses: eni.GetIPV4Addresses(), + IPv6Addresses: eni.GetIPV6Addresses(), + }, + } + } + return resp +} + +// NewPortBindingsResponse creates PortResponse for a container. +func NewPortBindingsResponse(dockerContainer *apicontainer.DockerContainer, eni *apieni.ENI) []PortResponse { + container := dockerContainer.Container + resp := []PortResponse{} + + bindings := container.GetKnownPortBindings() + + // if KnownPortBindings list is empty, then we use the port mapping + // information that was passed down from ACS. + if len(bindings) == 0 { + bindings = container.Ports + } + + for _, binding := range bindings { + port := PortResponse{ + ContainerPort: binding.ContainerPort, + Protocol: binding.Protocol.String(), + } + + if eni == nil { + port.HostPort = binding.HostPort + } else { + port.HostPort = port.ContainerPort + } + + resp = append(resp, port) + } + return resp +} + +// NewVolumesResponse creates VolumeResponse for a container +func NewVolumesResponse(dockerContainer *apicontainer.DockerContainer) []VolumeResponse { + container := dockerContainer.Container + var resp []VolumeResponse + + volumes := container.GetVolumes() + + for _, volume := range volumes { + volResp := VolumeResponse{ + DockerName: volume.Name, + Source: volume.Source, + Destination: volume.Destination, + } + + resp = append(resp, volResp) + } + return resp +} + +// NewTasksResponse creates TasksResponse for all the tasks. +func NewTasksResponse(state dockerstate.TaskEngineState) *TasksResponse { + allTasks := state.AllTasks() + taskResponses := make([]*TaskResponse, len(allTasks)) + for ndx, task := range allTasks { + containerMap, _ := state.ContainerMapByArn(task.Arn) + taskResponses[ndx] = NewTaskResponse(task, containerMap) + } + + return &TasksResponse{Tasks: taskResponses} +} diff --git a/agent/handlers/v1/response_test.go b/agent/handlers/v1/response_test.go new file mode 100644 index 00000000000..974258cdacb --- /dev/null +++ b/agent/handlers/v1/response_test.go @@ -0,0 +1,249 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v1 + +import ( + "encoding/json" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" +) + +const ( + taskARN = "t1" + family = "sleep" + version = "1" + containerID = "cid" + containerName = "sleepy" + eniIPv4Address = "10.0.0.2" + volName = "volume1" + volSource = "/var/lib/volume1" + volDestination = "/volume" +) + +func TestTaskResponse(t *testing.T) { + expectedTaskResponseMap := map[string]interface{}{ + "Arn": "t1", + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "Family": "sleep", + "Version": "1", + "Containers": []interface{}{ + map[string]interface{}{ + "DockerId": "cid", + "DockerName": "sleepy", + "Name": "sleepy", + "Ports": []interface{}{ + map[string]interface{}{ + // The number should be float here, because when we unmarshal + // something and we don't specify the number type, it will be + // set to float. + "ContainerPort": float64(80), + "Protocol": "tcp", + "HostPort": float64(80), + }, + }, + "Networks": []interface{}{ + map[string]interface{}{ + "NetworkMode": "awsvpc", + "IPv4Addresses": []interface{}{"10.0.0.2"}, + }, + }, + "Volumes": []interface{}{ + map[string]interface{}{ + "DockerName": volName, + "Source": volSource, + "Destination": volDestination, + }, + }, + }, + }, + } + + task := &apitask.Task{ + Arn: taskARN, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + }, + }, + } + + container := &apicontainer.Container{ + Name: containerName, + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + VolumesUnsafe: []types.MountPoint{ + { + Name: volName, + Source: volSource, + Destination: volDestination, + }, + }, + } + + containerNameToDockerContainer := map[string]*apicontainer.DockerContainer{ + taskARN: { + DockerID: containerID, + DockerName: containerName, + Container: container, + }, + } + + taskResponse := NewTaskResponse(task, containerNameToDockerContainer) + + taskResponseJSON, err := json.Marshal(taskResponse) + assert.NoError(t, err) + + taskResponseMap := make(map[string]interface{}) + + json.Unmarshal(taskResponseJSON, &taskResponseMap) + + assert.Equal(t, expectedTaskResponseMap, taskResponseMap) +} + +func TestContainerResponse(t *testing.T) { + expectedContainerResponseMap := map[string]interface{}{ + "DockerId": "cid", + "DockerName": "sleepy", + "Name": "sleepy", + "Ports": []interface{}{ + map[string]interface{}{ + "ContainerPort": float64(80), + "Protocol": "tcp", + "HostPort": float64(80), + }, + }, + "Networks": []interface{}{ + map[string]interface{}{ + "NetworkMode": "awsvpc", + "IPv4Addresses": []interface{}{"10.0.0.2"}, + }, + }, + "Volumes": []interface{}{ + map[string]interface{}{ + "DockerName": volName, + "Source": volSource, + "Destination": volDestination, + }, + }, + } + + container := &apicontainer.Container{ + Name: containerName, + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + VolumesUnsafe: []types.MountPoint{ + { + Name: volName, + Source: volSource, + Destination: volDestination, + }, + }, + } + + dockerContainer := &apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: containerName, + Container: container, + } + + eni := &apieni.ENI{ + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + } + + containerResponse := NewContainerResponse(dockerContainer, eni) + + containerResponseJSON, err := json.Marshal(containerResponse) + assert.NoError(t, err) + + containerResponseMap := make(map[string]interface{}) + + json.Unmarshal(containerResponseJSON, &containerResponseMap) + + assert.Equal(t, expectedContainerResponseMap, containerResponseMap) +} + +func TestPortBindingsResponse(t *testing.T) { + container := &apicontainer.Container{ + Name: containerName, + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + HostPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + } + + dockerContainer := &apicontainer.DockerContainer{ + Container: container, + } + + PortBindingsResponse := NewPortBindingsResponse(dockerContainer, nil) + + assert.Equal(t, uint16(80), PortBindingsResponse[0].ContainerPort) + assert.Equal(t, uint16(80), PortBindingsResponse[0].HostPort) + assert.Equal(t, "tcp", PortBindingsResponse[0].Protocol) +} + +func TestVolumesResponse(t *testing.T) { + container := &apicontainer.Container{ + Name: containerName, + VolumesUnsafe: []types.MountPoint{ + { + Name: volName, + Source: volSource, + Destination: volDestination, + }, + }, + } + + dockerContainer := &apicontainer.DockerContainer{ + Container: container, + } + + VolumesResponse := NewVolumesResponse(dockerContainer) + + assert.Equal(t, volName, VolumesResponse[0].DockerName) + assert.Equal(t, volSource, VolumesResponse[0].Source) + assert.Equal(t, volDestination, VolumesResponse[0].Destination) +} diff --git a/agent/handlers/v1/task_container_metadata_handler.go b/agent/handlers/v1/task_container_metadata_handler.go new file mode 100644 index 00000000000..830c68df0c3 --- /dev/null +++ b/agent/handlers/v1/task_container_metadata_handler.go @@ -0,0 +1,111 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v1 + +import ( + "encoding/json" + "net/http" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + "github.com/cihub/seelog" +) + +const ( + // TaskContainerMetadataPath is the task/container metadata path for v1 handler. + TaskContainerMetadataPath = "/v1/tasks" + dockerIDQueryField = "dockerid" + taskARNQueryField = "taskarn" + dockerShortIDLen = 12 +) + +// createTaskResponse creates JSON response and sets the http status code for the task queried. +func createTaskResponse(task *apitask.Task, found bool, resourceID string, state dockerstate.TaskEngineState) ([]byte, int) { + var err error + var responseJSON []byte + status := http.StatusOK + if found { + containerMap, _ := state.ContainerMapByArn(task.Arn) + responseJSON, err = json.Marshal(NewTaskResponse(task, containerMap)) + if err != nil { + return []byte("{}"), http.StatusInternalServerError + } + } else { + seelog.Warn("Could not find requested resource: " + resourceID) + responseJSON, err = json.Marshal(&TaskResponse{}) + if err != nil { + return []byte("{}"), http.StatusInternalServerError + } + status = http.StatusNotFound + } + return responseJSON, status +} + +// TaskContainerMetadataHandler creates response for the 'v1/tasks' API. Lists all tasks if the request +// doesn't contain any fields. Returns a Task if either of 'dockerid' or +// 'taskarn' are specified in the request. +func TaskContainerMetadataHandler(taskEngine utils.DockerStateResolver) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + var err error + var responseJSON []byte + dockerTaskEngineState := taskEngine.State() + dockerID, dockerIDExists := utils.ValueFromRequest(r, dockerIDQueryField) + taskArn, taskARNExists := utils.ValueFromRequest(r, taskARNQueryField) + var status int + if dockerIDExists && taskARNExists { + seelog.Info("Request contains both ", dockerIDQueryField, " and ", taskARNQueryField, ". Expect at most one of these.") + w.WriteHeader(http.StatusBadRequest) + w.Write(responseJSON) + return + } + if dockerIDExists { + // Create TaskResponse for the docker id in the query. + var task *apitask.Task + var found bool + if len(dockerID) > dockerShortIDLen { + task, found = dockerTaskEngineState.TaskByID(dockerID) + } else { + tasks, _ := dockerTaskEngineState.TaskByShortID(dockerID) + if len(tasks) == 0 { + task = nil + found = false + } else if len(tasks) == 1 { + task = tasks[0] + found = true + } else { + seelog.Info("Multiple tasks found for requested dockerId: " + dockerID) + w.WriteHeader(http.StatusBadRequest) + w.Write(responseJSON) + return + } + } + responseJSON, status = createTaskResponse(task, found, dockerID, dockerTaskEngineState) + w.WriteHeader(status) + } else if taskARNExists { + // Create TaskResponse for the task arn in the query. + task, found := dockerTaskEngineState.TaskByArn(taskArn) + responseJSON, status = createTaskResponse(task, found, taskArn, dockerTaskEngineState) + w.WriteHeader(status) + } else { + // List all tasks. + responseJSON, err = json.Marshal(NewTasksResponse(dockerTaskEngineState)) + if err != nil { + responseJSON = []byte("") + w.WriteHeader(http.StatusInternalServerError) + } + } + w.Write(responseJSON) + } +} diff --git a/agent/handlers/v2/credentials_handler.go b/agent/handlers/v2/credentials_handler.go new file mode 100644 index 00000000000..1b80e01ce31 --- /dev/null +++ b/agent/handlers/v2/credentials_handler.go @@ -0,0 +1,54 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v2 + +import ( + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v1 "github.com/aws/amazon-ecs-agent/agent/handlers/v1" + "github.com/aws/amazon-ecs-agent/agent/logger/audit" + "github.com/gorilla/mux" +) + +const ( + // Credentials API version. + apiVersion = 2 + + // credentialsIDMuxName is the key that's used in gorilla/mux to get the credentials ID. + credentialsIDMuxName = "credentialsIDMuxName" +) + +// CredentialsPath specifies the relative URI path for serving task IAM credentials. +// Use "AnythingRegEx" regex to handle the case where the "credentialsIDMuxName" is +// empty, this is because the name that's used to extract dynamic value in gorilla cannot +// be empty by default. If we don't do this, we will get 404 error when we access "/v2/credentials/", +// but it should be 400 error. +var CredentialsPath = credentials.V2CredentialsPath + "/" + utils.ConstructMuxVar(credentialsIDMuxName, utils.AnythingRegEx) + +// CredentialsHandler creates response for the 'v2/credentials' API. +func CredentialsHandler(credentialsManager credentials.Manager, auditLogger audit.AuditLogger) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + credentialsID := getCredentialsID(r) + errPrefix := fmt.Sprintf("CredentialsV%dRequest: ", apiVersion) + v1.CredentialsHandlerImpl(w, r, auditLogger, credentialsManager, credentialsID, errPrefix) + } +} + +func getCredentialsID(r *http.Request) string { + vars := mux.Vars(r) + return vars[credentialsIDMuxName] +} diff --git a/agent/handlers/v2/helper.go b/agent/handlers/v2/helper.go new file mode 100644 index 00000000000..aac9a761b63 --- /dev/null +++ b/agent/handlers/v2/helper.go @@ -0,0 +1,37 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v2 + +import ( + "net" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/pkg/errors" +) + +func getTaskARNByRequest(r *http.Request, state dockerstate.TaskEngineState) (string, error) { + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return "", errors.Errorf("unable to parse request's ip address: %v", err) + } + + // Get task arn for the request by looking up the ip address + taskARN, ok := state.GetTaskByIPAddress(ip) + if !ok { + return "", errors.Errorf("unable to associate '%s' with task", ip) + } + + return taskARN, nil +} diff --git a/agent/handlers/v2/response.go b/agent/handlers/v2/response.go new file mode 100644 index 00000000000..812031cd0d2 --- /dev/null +++ b/agent/handlers/v2/response.go @@ -0,0 +1,330 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v1 "github.com/aws/amazon-ecs-agent/agent/handlers/v1" + "github.com/aws/aws-sdk-go/aws" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// TaskResponse defines the schema for the task response JSON object +type TaskResponse struct { + Cluster string `json:"Cluster"` + TaskARN string `json:"TaskARN"` + Family string `json:"Family"` + Revision string `json:"Revision"` + DesiredStatus string `json:"DesiredStatus,omitempty"` + KnownStatus string `json:"KnownStatus"` + Containers []ContainerResponse `json:"Containers,omitempty"` + Limits *LimitsResponse `json:"Limits,omitempty"` + PullStartedAt *time.Time `json:"PullStartedAt,omitempty"` + PullStoppedAt *time.Time `json:"PullStoppedAt,omitempty"` + ExecutionStoppedAt *time.Time `json:"ExecutionStoppedAt,omitempty"` + AvailabilityZone string `json:"AvailabilityZone,omitempty"` + TaskTags map[string]string `json:"TaskTags,omitempty"` + ContainerInstanceTags map[string]string `json:"ContainerInstanceTags,omitempty"` + LaunchType string `json:"LaunchType,omitempty"` + Errors []ErrorResponse `json:"Errors,omitempty"` +} + +// ContainerResponse defines the schema for the container response +// JSON object +type ContainerResponse struct { + ID string `json:"DockerId"` + Name string `json:"Name"` + DockerName string `json:"DockerName"` + Image string `json:"Image"` + ImageID string `json:"ImageID"` + Ports []v1.PortResponse `json:"Ports,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` + DesiredStatus string `json:"DesiredStatus"` + KnownStatus string `json:"KnownStatus"` + ExitCode *int `json:"ExitCode,omitempty"` + Limits LimitsResponse `json:"Limits"` + CreatedAt *time.Time `json:"CreatedAt,omitempty"` + StartedAt *time.Time `json:"StartedAt,omitempty"` + FinishedAt *time.Time `json:"FinishedAt,omitempty"` + Type string `json:"Type"` + Networks []containermetadata.Network `json:"Networks,omitempty"` + Health *apicontainer.HealthStatus `json:"Health,omitempty"` + Volumes []v1.VolumeResponse `json:"Volumes,omitempty"` + LogDriver string `json:"LogDriver,omitempty"` + LogOptions map[string]string `json:"LogOptions,omitempty"` + ContainerARN string `json:"ContainerARN,omitempty"` +} + +// LimitsResponse defines the schema for task/cpu limits response +// JSON object +type LimitsResponse struct { + CPU *float64 `json:"CPU,omitempty"` + Memory *int64 `json:"Memory,omitempty"` +} + +// ErrorResponse defined the schema for error response +// JSON object +type ErrorResponse struct { + ErrorField string `json:"ErrorField,omitempty"` + ErrorCode string `json:"ErrorCode,omitempty"` + ErrorMessage string `json:"ErrorMessage,omitempty"` + StatusCode int `json:"StatusCode,omitempty"` + RequestId string `json:"RequestId,omitempty"` + ResourceARN string `json:"ResourceARN,omitempty"` +} + +// Agent versions >= 1.2.0: Null, zero, and CPU values of 1 +// are passed to Docker as two CPU shares +const minimumCPUUnit = 2 + +// NewTaskResponse creates a new response object for the task +func NewTaskResponse( + taskARN string, + state dockerstate.TaskEngineState, + ecsClient api.ECSClient, + cluster string, + az string, + containerInstanceArn string, + propagateTags bool, + includeV4Metadata bool, +) (*TaskResponse, error) { + task, ok := state.TaskByArn(taskARN) + if !ok { + return nil, errors.Errorf("v2 task response: unable to find task '%s'", taskARN) + } + + resp := &TaskResponse{ + Cluster: cluster, + TaskARN: task.Arn, + Family: task.Family, + Revision: task.Version, + DesiredStatus: task.GetDesiredStatus().String(), + KnownStatus: task.GetKnownStatus().String(), + AvailabilityZone: az, + } + if includeV4Metadata { + resp.LaunchType = task.LaunchType + } + + taskCPU := task.CPU + taskMemory := task.Memory + if taskCPU != 0 || taskMemory != 0 { + taskLimits := &LimitsResponse{} + if taskCPU != 0 { + taskLimits.CPU = &taskCPU + } + if taskMemory != 0 { + taskLimits.Memory = &taskMemory + } + resp.Limits = taskLimits + } + + if timestamp := task.GetPullStartedAt(); !timestamp.IsZero() { + resp.PullStartedAt = aws.Time(timestamp.UTC()) + } + if timestamp := task.GetPullStoppedAt(); !timestamp.IsZero() { + resp.PullStoppedAt = aws.Time(timestamp.UTC()) + } + if timestamp := task.GetExecutionStoppedAt(); !timestamp.IsZero() { + resp.ExecutionStoppedAt = aws.Time(timestamp.UTC()) + } + containerNameToDockerContainer, ok := state.ContainerMapByArn(task.Arn) + if !ok { + seelog.Warnf("V2 task response: unable to get container name mapping for task '%s'", + task.Arn) + return resp, nil + } + + for _, dockerContainer := range containerNameToDockerContainer { + containerResponse := NewContainerResponse(dockerContainer, task.GetPrimaryENI(), includeV4Metadata) + resp.Containers = append(resp.Containers, containerResponse) + } + + if propagateTags { + propagateTagsToMetadata(ecsClient, containerInstanceArn, taskARN, resp, includeV4Metadata) + } + + return resp, nil +} + +// propagateTagsToMetadata retrieves container instance and task tags from ECS +func propagateTagsToMetadata(ecsClient api.ECSClient, containerInstanceARN, taskARN string, resp *TaskResponse, includeV4Metadata bool) { + containerInstanceTags, err := ecsClient.GetResourceTags(containerInstanceARN) + + if err == nil { + resp.ContainerInstanceTags = make(map[string]string) + for _, tag := range containerInstanceTags { + resp.ContainerInstanceTags[*tag.Key] = *tag.Value + } + } else { + metadataErrorHandling(resp, err, "ContainerInstanceTags", containerInstanceARN, includeV4Metadata) + } + + taskTags, err := ecsClient.GetResourceTags(taskARN) + if err == nil { + resp.TaskTags = make(map[string]string) + for _, tag := range taskTags { + resp.TaskTags[*tag.Key] = *tag.Value + } + } else { + metadataErrorHandling(resp, err, "TaskTags", taskARN, includeV4Metadata) + } +} + +// NewContainerResponseFromState creates a new container response based on container id +// TODO: remove includeV4Metadata from NewContainerResponseFromState/NewContainerResponse +func NewContainerResponseFromState( + containerID string, + state dockerstate.TaskEngineState, + includeV4Metadata bool, +) (*ContainerResponse, error) { + dockerContainer, ok := state.ContainerByID(containerID) + if !ok { + return nil, errors.Errorf( + "v2 container response: unable to find container '%s'", containerID) + } + task, ok := state.TaskByID(containerID) + if !ok { + return nil, errors.Errorf( + "v2 container response: unable to find task for container '%s'", containerID) + } + + resp := NewContainerResponse(dockerContainer, task.GetPrimaryENI(), includeV4Metadata) + return &resp, nil +} + +// NewContainerResponse creates a new container response +// TODO: remove includeV4Metadata from NewContainerResponse +func NewContainerResponse( + dockerContainer *apicontainer.DockerContainer, + eni *apieni.ENI, + includeV4Metadata bool, +) ContainerResponse { + container := dockerContainer.Container + resp := ContainerResponse{ + ID: dockerContainer.DockerID, + Name: container.Name, + DockerName: dockerContainer.DockerName, + Image: container.Image, + ImageID: container.ImageID, + DesiredStatus: container.GetDesiredStatus().String(), + KnownStatus: container.GetKnownStatus().String(), + Limits: LimitsResponse{ + CPU: aws.Float64(float64(container.CPU)), + Memory: aws.Int64(int64(container.Memory)), + }, + Type: container.Type.String(), + ExitCode: container.GetKnownExitCode(), + Labels: container.GetLabels(), + } + + if container.CPU < minimumCPUUnit { + defaultCPU := func(val float64) *float64 { return &val }(minimumCPUUnit) + resp.Limits.CPU = defaultCPU + } + + // V4 metadata endpoint calls this function for consistency across versions, + // but needs additional metadata only available at this scope. + if includeV4Metadata { + resp.LogDriver = container.GetLogDriver() + resp.LogOptions = container.GetLogOptions() + resp.ContainerARN = container.ContainerArn + } + + // Write the container health status inside the container + if dockerContainer.Container.HealthStatusShouldBeReported() { + health := dockerContainer.Container.GetHealthStatus() + resp.Health = &health + } + + if createdAt := container.GetCreatedAt(); !createdAt.IsZero() { + createdAt = createdAt.UTC() + resp.CreatedAt = &createdAt + } + if startedAt := container.GetStartedAt(); !startedAt.IsZero() { + startedAt = startedAt.UTC() + resp.StartedAt = &startedAt + } + if finishedAt := container.GetFinishedAt(); !finishedAt.IsZero() { + finishedAt = finishedAt.UTC() + resp.FinishedAt = &finishedAt + } + + for _, binding := range container.GetKnownPortBindings() { + port := v1.PortResponse{ + ContainerPort: binding.ContainerPort, + Protocol: binding.Protocol.String(), + } + if eni == nil { + port.HostPort = binding.HostPort + } else { + port.HostPort = port.ContainerPort + } + + resp.Ports = append(resp.Ports, port) + } + + if eni != nil { + resp.Networks = []containermetadata.Network{ + { + NetworkMode: utils.NetworkModeAWSVPC, + IPv4Addresses: eni.GetIPV4Addresses(), + IPv6Addresses: eni.GetIPV6Addresses(), + }, + } + } + + resp.Volumes = v1.NewVolumesResponse(dockerContainer) + return resp +} + +// metadataErrorHandling writes an error to the logger, and append an error response +// to V4 metadata endpoint task response +func metadataErrorHandling(resp *TaskResponse, err error, field, resourceARN string, includeV4Metadata bool) { + seelog.Errorf("Task Metadata error: unable to get '%s' for '%s': %s", field, resourceARN, err.Error()) + if includeV4Metadata { + errResp := newErrorResponse(err, field, resourceARN) + resp.Errors = append(resp.Errors, *errResp) + } +} + +// newErrorResponse creates a new error response +func newErrorResponse(err error, field, resourceARN string) *ErrorResponse { + errResp := &ErrorResponse{ + ErrorField: field, + ErrorMessage: err.Error(), + ResourceARN: resourceARN, + } + + if awsErr, ok := err.(awserr.Error); ok { + errResp.ErrorCode = awsErr.Code() + errResp.ErrorMessage = awsErr.Message() + if reqErr, ok := err.(awserr.RequestFailure); ok { + errResp.StatusCode = reqErr.StatusCode() + errResp.RequestId = reqErr.RequestID() + } + } + + return errResp +} diff --git a/agent/handlers/v2/response_test.go b/agent/handlers/v2/response_test.go new file mode 100644 index 00000000000..eb1baf1db29 --- /dev/null +++ b/agent/handlers/v2/response_test.go @@ -0,0 +1,667 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v2 + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +const ( + taskARN = "t1" + cluster = "default" + family = "sleep" + version = "1" + containerID = "cid" + containerName = "sleepy" + imageName = "busybox" + imageID = "bUsYbOx" + cpu = 1024 + memory = 512 + eniIPv4Address = "10.0.0.2" + volName = "volume1" + volSource = "/var/lib/volume1" + volDestination = "/volume" + availabilityZone = "us-west-2b" + containerInstanceArn = "containerInstance-test" +) + +func TestTaskResponse(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + now := time.Now() + task := &apitask.Task{ + Arn: taskARN, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + }, + }, + CPU: cpu, + Memory: memory, + PullStartedAtUnsafe: now, + PullStoppedAtUnsafe: now, + ExecutionStoppedAtUnsafe: now, + } + container := &apicontainer.Container{ + Name: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + VolumesUnsafe: []types.MountPoint{ + { + Name: volName, + Source: volSource, + Destination: volDestination, + }, + }, + } + created := time.Now() + container.SetCreatedAt(created) + labels := map[string]string{ + "foo": "bar", + } + container.SetLabels(labels) + containerNameToDockerContainer := map[string]*apicontainer.DockerContainer{ + taskARN: { + DockerID: containerID, + DockerName: containerName, + Container: container, + }, + } + gomock.InOrder( + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ) + + taskResponse, err := NewTaskResponse(taskARN, state, ecsClient, cluster, availabilityZone, containerInstanceArn, false, false) + assert.NoError(t, err) + _, err = json.Marshal(taskResponse) + assert.NoError(t, err) + assert.Equal(t, created.UTC().String(), taskResponse.Containers[0].CreatedAt.String()) + // LaunchType should not be populated + assert.Equal(t, "", taskResponse.LaunchType) + // Log driver and Log options should not be populated + assert.Equal(t, "", taskResponse.Containers[0].LogDriver) + assert.Len(t, taskResponse.Containers[0].LogOptions, 0) + + gomock.InOrder( + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ) + // verify that 'v4' response without log driver or options returns blank fields as well + taskResponse, err = NewTaskResponse(taskARN, state, ecsClient, cluster, availabilityZone, containerInstanceArn, false, true) + assert.NoError(t, err) + _, err = json.Marshal(taskResponse) + assert.NoError(t, err) + // LaunchType should not be populated + assert.Equal(t, "", taskResponse.LaunchType) + // Log driver and Log options should not be populated + assert.Equal(t, "", taskResponse.Containers[0].LogDriver) + assert.Len(t, taskResponse.Containers[0].LogOptions, 0) +} + +func TestTaskResponseWithV4Metadata(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + now := time.Now() + task := &apitask.Task{ + Arn: taskARN, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + }, + }, + CPU: cpu, + Memory: memory, + PullStartedAtUnsafe: now, + PullStoppedAtUnsafe: now, + ExecutionStoppedAtUnsafe: now, + } + container := &apicontainer.Container{ + Name: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + VolumesUnsafe: []types.MountPoint{ + { + Name: volName, + Source: volSource, + Destination: volDestination, + }, + }, + DockerConfig: apicontainer.DockerConfig{ + HostConfig: aws.String(`{"LogConfig":{"Type":"awslogs","Config":{"awslogs-group":"myLogGroup"}}}`), + }, + } + created := time.Now() + container.SetCreatedAt(created) + labels := map[string]string{ + "foo": "bar", + } + container.SetLabels(labels) + containerNameToDockerContainer := map[string]*apicontainer.DockerContainer{ + taskARN: { + DockerID: containerID, + DockerName: containerName, + Container: container, + }, + } + gomock.InOrder( + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ) + + taskResponse, err := NewTaskResponse(taskARN, state, ecsClient, cluster, availabilityZone, containerInstanceArn, false, true) + assert.NoError(t, err) + _, err = json.Marshal(taskResponse) + assert.NoError(t, err) + assert.Equal(t, created.UTC().String(), taskResponse.Containers[0].CreatedAt.String()) + // LaunchType is populated by the v4 handler + assert.Equal(t, "", taskResponse.LaunchType) + // Log driver and config should be populated + assert.Equal(t, "awslogs", taskResponse.Containers[0].LogDriver) + assert.Equal(t, map[string]string{"awslogs-group": "myLogGroup"}, taskResponse.Containers[0].LogOptions) +} + +func TestContainerResponse(t *testing.T) { + testCases := []struct { + healthCheckType string + result bool + }{ + { + healthCheckType: "docker", + result: false, + }, + { + healthCheckType: "", + result: true, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("docker health check type: %v", tc.healthCheckType), func(t *testing.T) { + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + container := &apicontainer.Container{ + Name: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + HealthCheckType: tc.healthCheckType, + Health: apicontainer.HealthStatus{ + Status: apicontainerstatus.ContainerHealthy, + Since: aws.Time(time.Now()), + }, + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + VolumesUnsafe: []types.MountPoint{ + { + Name: volName, + Source: volSource, + Destination: volDestination, + }, + }, + } + created := time.Now() + container.SetCreatedAt(created) + labels := map[string]string{ + "foo": "bar", + } + container.SetLabels(labels) + dockerContainer := &apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: containerName, + Container: container, + } + task := &apitask.Task{ + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + }, + }, + } + gomock.InOrder( + state.EXPECT().ContainerByID(containerID).Return(dockerContainer, true), + state.EXPECT().TaskByID(containerID).Return(task, true), + ) + + containerResponse, err := NewContainerResponseFromState(containerID, state, false) + assert.NoError(t, err) + assert.Equal(t, containerResponse.Health == nil, tc.result) + _, err = json.Marshal(containerResponse) + assert.NoError(t, err) + }) + } +} + +func TestTaskResponseMarshal(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + expectedTaskResponseMap := map[string]interface{}{ + "Cluster": cluster, + "TaskARN": taskARN, + "Family": family, + "Revision": version, + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "AvailabilityZone": availabilityZone, + "Containers": []interface{}{ + map[string]interface{}{ + "DockerId": containerID, + "Name": containerName, + "DockerName": containerName, + "Image": imageName, + "ImageID": imageID, + "Ports": []interface{}{ + map[string]interface{}{ + "HostPort": float64(80), + "ContainerPort": float64(80), + "Protocol": "tcp", + }, + }, + "DesiredStatus": "NONE", + "KnownStatus": "NONE", + "Limits": map[string]interface{}{ + "CPU": float64(2), + "Memory": float64(0), + }, + "Type": "NORMAL", + "Networks": []interface{}{ + map[string]interface{}{ + "IPv4Addresses": []interface{}{ + eniIPv4Address, + }, + "NetworkMode": "awsvpc", + }, + }, + }, + }, + "ContainerInstanceTags": map[string]interface{}{ + "ContainerInstanceTag1": "firstTag", + "ContainerInstanceTag2": "secondTag", + }, + "TaskTags": map[string]interface{}{ + "TaskTag1": "firstTag", + "TaskTag2": "secondTag", + }, + } + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + + task := &apitask.Task{ + Arn: taskARN, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + }, + }, + } + + container := &apicontainer.Container{ + Name: containerName, + V3EndpointID: "", + Image: imageName, + ImageID: imageID, + KnownPortBindingsUnsafe: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + } + + containerNameToDockerContainer := map[string]*apicontainer.DockerContainer{ + taskARN: { + DockerID: containerID, + DockerName: containerName, + Container: container, + }, + } + + contInstTag1Key := "ContainerInstanceTag1" + contInstTag1Val := "firstTag" + contInstTag2Key := "ContainerInstanceTag2" + contInstTag2Val := "secondTag" + taskTag1Key := "TaskTag1" + taskTag1Val := "firstTag" + taskTag2Key := "TaskTag2" + taskTag2Val := "secondTag" + + gomock.InOrder( + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ecsClient.EXPECT().GetResourceTags(containerInstanceArn).Return([]*ecs.Tag{ + &ecs.Tag{ + Key: &contInstTag1Key, + Value: &contInstTag1Val, + }, + &ecs.Tag{ + Key: &contInstTag2Key, + Value: &contInstTag2Val, + }, + }, nil), + ecsClient.EXPECT().GetResourceTags(taskARN).Return([]*ecs.Tag{ + &ecs.Tag{ + Key: &taskTag1Key, + Value: &taskTag1Val, + }, + &ecs.Tag{ + Key: &taskTag2Key, + Value: &taskTag2Val, + }, + }, nil), + ) + + taskResponse, err := NewTaskResponse(taskARN, state, ecsClient, cluster, availabilityZone, containerInstanceArn, true, false) + assert.NoError(t, err) + + taskResponseJSON, err := json.Marshal(taskResponse) + assert.NoError(t, err) + + taskResponseMap := make(map[string]interface{}) + json.Unmarshal(taskResponseJSON, &taskResponseMap) + assert.Equal(t, expectedTaskResponseMap, taskResponseMap) +} + +func TestContainerResponseMarshal(t *testing.T) { + timeRFC3339, _ := time.Parse(time.RFC3339, "2014-11-12T11:45:26Z") + + expectedContainerResponseMap := map[string]interface{}{ + "DockerId": containerID, + "DockerName": containerName, + "Name": containerName, + "Image": imageName, + "ImageID": imageID, + "Ports": []interface{}{ + map[string]interface{}{ + "ContainerPort": float64(80), + "Protocol": "tcp", + "HostPort": float64(80), + }, + }, + "Labels": map[string]interface{}{ + "foo": "bar", + }, + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "Limits": map[string]interface{}{ + "CPU": float64(cpu), + "Memory": float64(memory), + }, + "CreatedAt": timeRFC3339.Format(time.RFC3339), + "Type": "NORMAL", + "Networks": []interface{}{ + map[string]interface{}{ + "NetworkMode": "awsvpc", + "IPv4Addresses": []interface{}{ + eniIPv4Address, + }, + }, + }, + "Health": map[string]interface{}{ + "statusSince": timeRFC3339.Format(time.RFC3339), + "status": "HEALTHY", + }, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + container := &apicontainer.Container{ + Name: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + HealthCheckType: "docker", + Health: apicontainer.HealthStatus{ + Status: apicontainerstatus.ContainerHealthy, + Since: aws.Time(timeRFC3339), + }, + KnownPortBindingsUnsafe: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + } + + container.SetCreatedAt(timeRFC3339) + labels := map[string]string{ + "foo": "bar", + } + container.SetLabels(labels) + dockerContainer := &apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: containerName, + Container: container, + } + task := &apitask.Task{ + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + }, + }, + } + gomock.InOrder( + state.EXPECT().ContainerByID(containerID).Return(dockerContainer, true), + state.EXPECT().TaskByID(containerID).Return(task, true), + ) + + containerResponse, err := NewContainerResponseFromState(containerID, state, false) + assert.NoError(t, err) + + containerResponseJSON, err := json.Marshal(containerResponse) + assert.NoError(t, err) + + containerResponseMap := make(map[string]interface{}) + json.Unmarshal(containerResponseJSON, &containerResponseMap) + assert.Equal(t, expectedContainerResponseMap, containerResponseMap) +} + +func TestTaskResponseWithV4TagsError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + now := time.Now() + task := &apitask.Task{ + Arn: taskARN, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + }, + }, + CPU: cpu, + Memory: memory, + PullStartedAtUnsafe: now, + PullStoppedAtUnsafe: now, + ExecutionStoppedAtUnsafe: now, + } + container := &apicontainer.Container{ + Name: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + VolumesUnsafe: []types.MountPoint{ + { + Name: volName, + Source: volSource, + Destination: volDestination, + }, + }, + DockerConfig: apicontainer.DockerConfig{ + HostConfig: aws.String(`{"LogConfig":{"Type":"awslogs","Config":{"awslogs-group":"myLogGroup"}}}`), + }, + } + created := time.Now() + container.SetCreatedAt(created) + labels := map[string]string{ + "foo": "bar", + } + container.SetLabels(labels) + containerNameToDockerContainer := map[string]*apicontainer.DockerContainer{ + taskARN: { + DockerID: containerID, + DockerName: containerName, + Container: container, + }, + } + + errCode := "ThrottlingException" + errMessage := "Rate exceeded" + errStatusCode := 400 + containerTagsRequestId := "cef9da77-aee7-431d-84d5-f92b2d342c51" + taskTagsRequestId := "45dbbc67-0c60-4248-855e-14fdf4c11870" + containerTagsErr := awserr.NewRequestFailure(awserr.Error(awserr.New(errCode, errMessage, errors.New(""))), errStatusCode, containerTagsRequestId) + taskTagsError := awserr.NewRequestFailure(awserr.Error(awserr.New(errCode, errMessage, errors.New(""))), errStatusCode, taskTagsRequestId) + + gomock.InOrder( + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + ecsClient.EXPECT().GetResourceTags(containerInstanceArn).Return(nil, containerTagsErr), + ecsClient.EXPECT().GetResourceTags(taskARN).Return(nil, taskTagsError), + ) + + taskWithTagsResponse, err := NewTaskResponse(taskARN, state, ecsClient, cluster, availabilityZone, containerInstanceArn, true, true) + assert.NoError(t, err) + _, err = json.Marshal(taskWithTagsResponse) + assert.NoError(t, err) + assert.Equal(t, taskWithTagsResponse.Errors[0].ErrorField, "ContainerInstanceTags") + assert.Equal(t, taskWithTagsResponse.Errors[0].ErrorCode, errCode) + assert.Equal(t, taskWithTagsResponse.Errors[0].ErrorMessage, errMessage) + assert.Equal(t, taskWithTagsResponse.Errors[0].StatusCode, errStatusCode) + assert.Equal(t, taskWithTagsResponse.Errors[0].RequestId, containerTagsRequestId) + assert.Equal(t, taskWithTagsResponse.Errors[0].ResourceARN, containerInstanceArn) + assert.Equal(t, taskWithTagsResponse.Errors[1].ErrorField, "TaskTags") + assert.Equal(t, taskWithTagsResponse.Errors[1].ErrorCode, errCode) + assert.Equal(t, taskWithTagsResponse.Errors[1].ErrorMessage, errMessage) + assert.Equal(t, taskWithTagsResponse.Errors[1].StatusCode, errStatusCode) + assert.Equal(t, taskWithTagsResponse.Errors[1].RequestId, taskTagsRequestId) + assert.Equal(t, taskWithTagsResponse.Errors[1].ResourceARN, taskARN) +} diff --git a/agent/handlers/v2/stats_response.go b/agent/handlers/v2/stats_response.go new file mode 100644 index 00000000000..9009519c82b --- /dev/null +++ b/agent/handlers/v2/stats_response.go @@ -0,0 +1,51 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v2 + +import ( + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// NewTaskStatsResponse returns a new task stats response object +func NewTaskStatsResponse(taskARN string, + state dockerstate.TaskEngineState, + statsEngine stats.Engine) (map[string]*types.StatsJSON, error) { + + containerMap, ok := state.ContainerMapByArn(taskARN) + if !ok { + return nil, errors.Errorf( + "v2 task stats response: unable to lookup containers for task %s", + taskARN) + } + + resp := make(map[string]*types.StatsJSON) + for _, dockerContainer := range containerMap { + containerID := dockerContainer.DockerID + dockerStats, _, err := statsEngine.ContainerDockerStats(taskARN, containerID) + if err != nil { + seelog.Warnf("V2 task stats response: Unable to get stats for container '%s' for task '%s': %v", + containerID, taskARN, err) + resp[containerID] = nil + continue + } + + resp[containerID] = dockerStats + } + + return resp, nil +} diff --git a/agent/handlers/v2/stats_response_test.go b/agent/handlers/v2/stats_response_test.go new file mode 100644 index 00000000000..83b7ef0e07c --- /dev/null +++ b/agent/handlers/v2/stats_response_test.go @@ -0,0 +1,67 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v2 + +import ( + "testing" + + "github.com/aws/amazon-ecs-agent/agent/stats" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + mock_stats "github.com/aws/amazon-ecs-agent/agent/stats/mock" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestTaskStatsResponseSuccess(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + + dockerStats := &types.StatsJSON{} + dockerStats.NumProcs = 2 + containerMap := map[string]*apicontainer.DockerContainer{ + containerName: { + DockerID: containerID, + }, + } + gomock.InOrder( + state.EXPECT().ContainerMapByArn(taskARN).Return(containerMap, true), + statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, &stats.NetworkStatsPerSec{}, nil), + ) + + resp, err := NewTaskStatsResponse(taskARN, state, statsEngine) + assert.NoError(t, err) + containerStats, ok := resp[containerID] + assert.True(t, ok) + assert.Equal(t, dockerStats.NumProcs, containerStats.NumProcs) +} + +func TestTaskStatsResponseError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + statsEngine := mock_stats.NewMockEngine(ctrl) + + state.EXPECT().ContainerMapByArn(taskARN).Return(nil, false) + _, err := NewTaskStatsResponse(taskARN, state, statsEngine) + assert.Error(t, err) +} diff --git a/agent/handlers/v2/task_container_metadata_handler.go b/agent/handlers/v2/task_container_metadata_handler.go new file mode 100644 index 00000000000..90e38d9d2f1 --- /dev/null +++ b/agent/handlers/v2/task_container_metadata_handler.go @@ -0,0 +1,109 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v2 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + "github.com/cihub/seelog" +) + +const ( + // metadataContainerIDMuxName is the key that's used in gorilla/mux to get the container ID + // for container metadata. + metadataContainerIDMuxName = "metadataContainerIDMuxName" + + // TaskMetadataPath specifies the relative URI path for serving task metadata. + TaskMetadataPath = "/v2/metadata" + + // TaskWithTagsMetadataPath specifies the relative URI path for serving task metadata with Container Instance and Task Tags. + TaskWithTagsMetadataPath = "/v2/metadataWithTags" + + // TaskMetadataPathWithSlash specifies the relative URI path for serving task metadata. + TaskMetadataPathWithSlash = TaskMetadataPath + "/" + + // TaskWithTagsMetadataPath specifies the relative URI path for serving task metadata with Container Instance and Task Tags. + TaskWithTagsMetadataPathWithSlash = TaskWithTagsMetadataPath + "/" +) + +// ContainerMetadataPath specifies the relative URI path for serving container metadata. +var ContainerMetadataPath = TaskMetadataPathWithSlash + utils.ConstructMuxVar(metadataContainerIDMuxName, utils.AnythingButEmptyRegEx) + +// TaskContainerMetadataHandler returns the handler method for handling task and container metadata requests. +func TaskContainerMetadataHandler(state dockerstate.TaskEngineState, ecsClient api.ECSClient, cluster, az, containerInstanceArn string, propagateTags bool) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskARN, err := getTaskARNByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("Unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeTaskMetadata) + return + } + if containerID, ok := utils.GetMuxValueFromRequest(r, metadataContainerIDMuxName); ok { + seelog.Infof("V2 task/container metadata handler: writing response for container '%s'", containerID) + WriteContainerMetadataResponse(w, containerID, state) + return + } + + seelog.Infof("V2 task/container metadata handler: writing response for task '%s'", taskARN) + WriteTaskMetadataResponse(w, taskARN, cluster, state, ecsClient, az, containerInstanceArn, propagateTags) + } +} + +// WriteContainerMetadataResponse writes the container metadata to response writer. +func WriteContainerMetadataResponse(w http.ResponseWriter, containerID string, state dockerstate.TaskEngineState) { + containerResponse, err := NewContainerResponseFromState(containerID, state, false) + if err != nil { + errResponseJSON, err := json.Marshal("Unable to generate metadata for container '" + containerID + "'") + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeContainerMetadata) + return + } + + responseJSON, err := json.Marshal(containerResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeContainerMetadata) +} + +// WriteTaskMetadataResponse writes the task metadata to response writer. +func WriteTaskMetadataResponse(w http.ResponseWriter, taskARN string, cluster string, state dockerstate.TaskEngineState, ecsClient api.ECSClient, az, containerInstanceArn string, propagateTags bool) { + // Generate a response for the task + taskResponse, err := NewTaskResponse(taskARN, state, ecsClient, cluster, az, containerInstanceArn, propagateTags, false) + if err != nil { + errResponseJSON, err := json.Marshal("Unable to generate metadata for task: '" + taskARN + "'") + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskMetadata) + return + } + + responseJSON, err := json.Marshal(taskResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeTaskMetadata) +} diff --git a/agent/handlers/v2/task_container_stats_handler.go b/agent/handlers/v2/task_container_stats_handler.go new file mode 100644 index 00000000000..65f21da9e3d --- /dev/null +++ b/agent/handlers/v2/task_container_stats_handler.go @@ -0,0 +1,110 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v2 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/cihub/seelog" +) + +const ( + // statsContainerIDMuxName is the key that's used in mux to get the container ID + // for container stats. + statsContainerIDMuxName = "statsContainerIDMuxName" + + // TaskStatsPath specifies the relative URI path for serving task stats. + TaskStatsPath = "/v2/stats" + + // TaskStatsPathWithSlash specifies the relative URI path for serving task stats. + TaskStatsPathWithSlash = TaskStatsPath + "/" +) + +// ContainerStatsPath specifies the relative URI path for serving container stats. +var ContainerStatsPath = TaskStatsPathWithSlash + utils.ConstructMuxVar(statsContainerIDMuxName, utils.AnythingButEmptyRegEx) + +// TaskContainerStatsHandler returns the handler method for handling task and container stats requests. +func TaskContainerStatsHandler(state dockerstate.TaskEngineState, statsEngine stats.Engine) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskARN, err := getTaskARNByRequest(r, state) + if err != nil { + errResponseJSON, err := json.Marshal( + fmt.Sprintf("Unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskStats) + return + } + if containerID, ok := utils.GetMuxValueFromRequest(r, statsContainerIDMuxName); ok { + seelog.Infof("V2 task/container stats handler: writing response for container '%s'", containerID) + WriteContainerStatsResponse(w, taskARN, containerID, statsEngine) + return + } + + seelog.Infof("V2 task/container stats handler: writing response for task '%s'", taskARN) + WriteTaskStatsResponse(w, taskARN, state, statsEngine) + } +} + +// WriteTaskStatsResponse writes the task stats to response writer. +func WriteTaskStatsResponse(w http.ResponseWriter, + taskARN string, + state dockerstate.TaskEngineState, + statsEngine stats.Engine) { + + taskStatsResponse, err := NewTaskStatsResponse(taskARN, state, statsEngine) + if err != nil { + seelog.Warnf("Unable to get task stats for task '%s': %v", taskARN, err) + errResponseJSON, err := json.Marshal("Unable to get task stats for: " + taskARN) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskStats) + return + } + + responseJSON, err := json.Marshal(taskStatsResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeTaskStats) +} + +// WriteContainerStatsResponse writes the container stats to response writer. +func WriteContainerStatsResponse(w http.ResponseWriter, + taskARN string, + containerID string, + statsEngine stats.Engine) { + dockerStats, _, err := statsEngine.ContainerDockerStats(taskARN, containerID) + if err != nil { + errResponseJSON, err := json.Marshal("Unable to get container stats for: " + containerID) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeContainerStats) + return + } + + responseJSON, err := json.Marshal(dockerStats) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeContainerStats) +} diff --git a/agent/handlers/v3/container_association_handler.go b/agent/handlers/v3/container_association_handler.go new file mode 100644 index 00000000000..fbaa2d5e6bc --- /dev/null +++ b/agent/handlers/v3/container_association_handler.go @@ -0,0 +1,162 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v3 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + "github.com/cihub/seelog" +) + +const ( + // associationTypeMuxName is the key that's used in gorilla/mux to get the association type. + associationTypeMuxName = "associationTypeMuxName" + // associationNameMuxName is the key that's used in gorilla/mux to get the association name. + associationNameMuxName = "associationNameMuxName" +) + +var ( + // Container associations endpoint: /v3// + ContainerAssociationsPath = fmt.Sprintf("/v3/%s/associations/%s", + utils.ConstructMuxVar(V3EndpointIDMuxName, utils.AnythingButSlashRegEx), + utils.ConstructMuxVar(associationTypeMuxName, utils.AnythingButSlashRegEx)) + // Container association endpoint: /v3/// + ContainerAssociationPath = fmt.Sprintf("/v3/%s/associations/%s/%s", + utils.ConstructMuxVar(V3EndpointIDMuxName, utils.AnythingButSlashRegEx), + utils.ConstructMuxVar(associationTypeMuxName, utils.AnythingButSlashRegEx), + utils.ConstructMuxVar(associationNameMuxName, utils.AnythingButEmptyRegEx)) + // Treat "/v3///" as a container association endpoint with empty association name (therefore invalid), to be consistent with similar situation in credentials endpoint and v3 metadata endpoint + ContainerAssociationPathWithSlash = ContainerAssociationsPath + "/" +) + +// ContainerAssociationHandler returns the handler method for handling container associations requests. +func ContainerAssociationsHandler(state dockerstate.TaskEngineState) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + containerID, err := GetContainerIDByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 container associations handler: unable to get container id from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociations) + return + } + + taskARN, err := GetTaskARNByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 container associations handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociations) + return + } + + associationType, err := GetAssociationTypeByRequest(r) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 container associations handler: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociations) + return + } + + seelog.Infof("V3 container associations handler: writing response for container '%s' for association type %s", containerID, associationType) + + writeContainerAssociationsResponse(w, containerID, taskARN, associationType, state) + } +} + +// ContainerAssociationHandler returns the handler method for handling container association requests. +func ContainerAssociationHandler(state dockerstate.TaskEngineState) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskARN, err := GetTaskARNByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 container associations handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociation) + return + } + + associationType, err := GetAssociationTypeByRequest(r) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 container associations handler: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociation) + return + } + + associationName, err := GetAssociationNameByRequest(r) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 container associations handler: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociation) + return + } + + seelog.Infof("V3 container association handler: writing response for association '%s' of type %s", associationName, associationType) + + writeContainerAssociationResponse(w, taskARN, associationType, associationName, state) + } +} + +func writeContainerAssociationsResponse(w http.ResponseWriter, containerID, taskARN, associationType string, state dockerstate.TaskEngineState) { + associationsResponse, err := NewAssociationsResponse(containerID, taskARN, associationType, state) + if err != nil { + errResponseJSON, err := json.Marshal(fmt.Sprintf("Unable to write container associations response: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeContainerAssociations) + return + } + + responseJSON, err := json.Marshal(associationsResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeContainerAssociations) +} + +func writeContainerAssociationResponse(w http.ResponseWriter, taskARN, associationType, associationName string, state dockerstate.TaskEngineState) { + associationResponse, err := NewAssociationResponse(taskARN, associationType, associationName, state) + if err != nil { + errResponseJSON, err := json.Marshal(fmt.Sprintf("Unable to write container association response: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeContainerAssociation) + return + } + + // associationResponse is assumed to be a valid json string; see comments on newAssociationResponse method for details + utils.WriteJSONToResponse(w, http.StatusOK, []byte(associationResponse), utils.RequestTypeContainerAssociation) +} diff --git a/agent/handlers/v3/container_metadata_handler.go b/agent/handlers/v3/container_metadata_handler.go new file mode 100644 index 00000000000..f247f2c9a22 --- /dev/null +++ b/agent/handlers/v3/container_metadata_handler.go @@ -0,0 +1,115 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v3 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v2 "github.com/aws/amazon-ecs-agent/agent/handlers/v2" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// ContainerMetadataPath specifies the relative URI path for serving container metadata. +var ContainerMetadataPath = "/v3/" + utils.ConstructMuxVar(V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + +// ContainerMetadataHandler returns the handler method for handling container metadata requests. +func ContainerMetadataHandler(state dockerstate.TaskEngineState) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + containerID, err := GetContainerIDByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 container metadata handler: unable to get container ID from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, responseJSON, utils.RequestTypeContainerMetadata) + return + } + containerResponse, err := GetContainerResponse(containerID, state) + if err != nil { + errResponseJSON, err := json.Marshal(err.Error()) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, errResponseJSON, utils.RequestTypeContainerMetadata) + return + } + seelog.Infof("V3 container metadata handler: writing response for container '%s'", containerID) + + responseJSON, err := json.Marshal(containerResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeContainerMetadata) + } +} + +// GetContainerResponse gets container response for v3 metadata +func GetContainerResponse(containerID string, state dockerstate.TaskEngineState) (*v2.ContainerResponse, error) { + containerResponse, err := v2.NewContainerResponseFromState(containerID, state, false) + if err != nil { + seelog.Errorf("Unable to get container metadata for container '%s'", containerID) + return nil, errors.Errorf("Unable to generate metadata for container '%s'", containerID) + } + // fill in network details if not set + if containerResponse.Networks == nil { + if containerResponse.Networks, err = GetContainerNetworkMetadata(containerID, state); err != nil { + return nil, err + } + } + return containerResponse, nil +} + +// GetContainerNetworkMetadata returns the network metadata for the container +func GetContainerNetworkMetadata(containerID string, state dockerstate.TaskEngineState) ([]containermetadata.Network, error) { + dockerContainer, ok := state.ContainerByID(containerID) + if !ok { + return nil, errors.Errorf("Unable to find container '%s'", containerID) + } + // the logic here has been reused from + // https://github.com/aws/amazon-ecs-agent/blob/0c8913ba33965cf6ffdd6253fad422458d9346bd/agent/containermetadata/parse_metadata.go#L123 + settings := dockerContainer.Container.GetNetworkSettings() + if settings == nil { + seelog.Errorf("unable to get container network response for container '%s'", containerID) + return nil, errors.Errorf("Unable to generate network response for container '%s'", containerID) + } + // This metadata is the information provided in older versions of the API + // We get the NetworkMode (Network interface name) from the HostConfig because this + // this is the network with which the container is created + ipv4AddressFromSettings := settings.IPAddress + networkModeFromHostConfig := dockerContainer.Container.GetNetworkMode() + + // Extensive Network information is not available for Docker API versions 1.17-1.20 + // Instead we only get the details of the first network + networks := make([]containermetadata.Network, 0) + if len(settings.Networks) > 0 { + for modeFromSettings, containerNetwork := range settings.Networks { + networkMode := modeFromSettings + ipv4Addresses := []string{containerNetwork.IPAddress} + network := containermetadata.Network{NetworkMode: networkMode, IPv4Addresses: ipv4Addresses} + networks = append(networks, network) + } + } else { + ipv4Addresses := []string{ipv4AddressFromSettings} + network := containermetadata.Network{NetworkMode: networkModeFromHostConfig, IPv4Addresses: ipv4Addresses} + networks = append(networks, network) + } + return networks, nil +} diff --git a/agent/handlers/v3/container_stats_handler.go b/agent/handlers/v3/container_stats_handler.go new file mode 100644 index 00000000000..3492cbe46f1 --- /dev/null +++ b/agent/handlers/v3/container_stats_handler.go @@ -0,0 +1,61 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v3 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v2 "github.com/aws/amazon-ecs-agent/agent/handlers/v2" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/cihub/seelog" +) + +// ContainerStatsPath specifies the relative URI path for serving container stats. +var ContainerStatsPath = "/v3/" + utils.ConstructMuxVar(V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/stats" + +// ContainerStatsHandler returns the handler method for handling container stats requests. +func ContainerStatsHandler(state dockerstate.TaskEngineState, statsEngine stats.Engine) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskARN, err := GetTaskARNByRequest(r, state) + if err != nil { + errResponseJSON, err := json.Marshal( + fmt.Sprintf("V3 container stats handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskStats) + return + } + + containerID, err := GetContainerIDByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 container stats handler: unable to get container ID from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerStats) + return + } + + seelog.Infof("V3 container stats handler: writing response for container '%s'", containerID) + + // v3 handler shares the same container stats response format with v2 handler. + v2.WriteContainerStatsResponse(w, taskARN, containerID, statsEngine) + } +} diff --git a/agent/handlers/v3/helper.go b/agent/handlers/v3/helper.go new file mode 100644 index 00000000000..1825fe7f570 --- /dev/null +++ b/agent/handlers/v3/helper.go @@ -0,0 +1,70 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v3 + +import ( + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + "github.com/pkg/errors" +) + +func GetTaskARNByRequest(r *http.Request, state dockerstate.TaskEngineState) (string, error) { + v3EndpointID, ok := utils.GetMuxValueFromRequest(r, V3EndpointIDMuxName) + if !ok { + return "", errors.New("unable to get v3 endpoint ID from request") + } + + // Get task Arn from the v3 endpoint ID. + taskARN, ok := state.TaskARNByV3EndpointID(v3EndpointID) + if !ok { + return "", errors.Errorf("unable to get task Arn from v3 endpoint ID: %s", v3EndpointID) + } + + return taskARN, nil +} + +func GetContainerIDByRequest(r *http.Request, state dockerstate.TaskEngineState) (string, error) { + v3EndpointID, ok := utils.GetMuxValueFromRequest(r, V3EndpointIDMuxName) + if !ok { + return "", errors.New("unable to get v3 endpoint ID from request") + } + + // Get docker ID from the v3 endpoint ID. + dockerID, ok := state.DockerIDByV3EndpointID(v3EndpointID) + if !ok { + return "", errors.Errorf("unable to get docker ID from v3 endpoint ID: %s", v3EndpointID) + } + + return dockerID, nil +} + +func GetAssociationTypeByRequest(r *http.Request) (string, error) { + associationType, ok := utils.GetMuxValueFromRequest(r, associationTypeMuxName) + if !ok { + return "", errors.New("unable to get association type from request") + } + + return associationType, nil +} + +func GetAssociationNameByRequest(r *http.Request) (string, error) { + associationType, ok := utils.GetMuxValueFromRequest(r, associationNameMuxName) + if !ok { + return "", errors.New("unable to get association name from request") + } + + return associationType, nil +} diff --git a/agent/handlers/v3/response.go b/agent/handlers/v3/response.go new file mode 100644 index 00000000000..e5a70133ec7 --- /dev/null +++ b/agent/handlers/v3/response.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v3 + +import ( + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/pkg/errors" +) + +// AssociationsResponse defines the schema for the associations response JSON object +type AssociationsResponse struct { + Associations []string `json:"Associations"` +} + +func NewAssociationsResponse(containerID, taskARN, associationType string, state dockerstate.TaskEngineState) (*AssociationsResponse, error) { + dockerContainer, ok := state.ContainerByID(containerID) + if !ok { + return nil, errors.Errorf("unable to get container name from docker id: %s", containerID) + } + containerName := dockerContainer.Container.Name + + task, ok := state.TaskByArn(taskARN) + if !ok { + return nil, errors.Errorf("unable to get task from task arn: %s", taskARN) + } + + associationNames := task.AssociationsByTypeAndContainer(associationType, containerName) + + return &AssociationsResponse{ + Associations: associationNames, + }, nil +} + +// Association response is a string that's assumed to be in valid JSON format, which will be exactly the same as +// the value of Association.Content.Value (cp is responsible to validate it and deal with it if it's not valid). We +// don't do any decoding base on the encoding, because the only encoding that cp currently sends us is 'identity'; +// we don't explicitly model the value field as a struct because we don't want to let agent's implementation depends +// on the payload format of the association (i.e. eia device for now) +func NewAssociationResponse(taskARN, associationType, associationName string, state dockerstate.TaskEngineState) (string, error) { + task, ok := state.TaskByArn(taskARN) + if !ok { + return "", errors.Errorf("unable to get task from task arn: %s", taskARN) + } + + association, ok := task.AssociationByTypeAndName(associationType, associationName) + + if !ok { + return "", errors.Errorf("unable to get association from association type %s and association name %s", associationType, associationName) + } + + return association.Content.Value, nil +} diff --git a/agent/handlers/v3/response_test.go b/agent/handlers/v3/response_test.go new file mode 100644 index 00000000000..6ac0b493ab6 --- /dev/null +++ b/agent/handlers/v3/response_test.go @@ -0,0 +1,105 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v3 + +import ( + "encoding/json" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + taskARN = "taskARN" + dockerID = "dockerID" + dockerName = "dockerName" + containerName = "containerName" + associationType = "elastic-inference" + associationName = "dev1" + associationEncoding = "base64" + associationValue = "val" +) + +var ( + container = &apicontainer.Container{ + Name: containerName, + } + + dockerContainer = &apicontainer.DockerContainer{ + DockerID: dockerID, + DockerName: dockerName, + Container: container, + } + + association = apitask.Association{ + Containers: []string{containerName}, + Content: apitask.EncodedString{ + Encoding: associationEncoding, + Value: associationValue, + }, + Name: associationName, + Type: associationType, + } + + task = &apitask.Task{ + Arn: taskARN, + Containers: []*apicontainer.Container{container}, + Associations: []apitask.Association{association}, + } +) + +func TestContainerAssociationsResponse(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + + expectedAssociationsResponseMap := map[string]interface{}{ + "Associations": []interface{}{associationName}, + } + + gomock.InOrder( + state.EXPECT().ContainerByID(dockerID).Return(dockerContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + ) + + associationsResponse, err := NewAssociationsResponse(dockerID, taskARN, associationType, state) + assert.NoError(t, err) + + associationsResponseJSON, err := json.Marshal(associationsResponse) + assert.NoError(t, err) + + associationsResponseMap := make(map[string]interface{}) + json.Unmarshal(associationsResponseJSON, &associationsResponseMap) + assert.Equal(t, expectedAssociationsResponseMap, associationsResponseMap) +} + +func TestContainerAssociationResponse(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + + state.EXPECT().TaskByArn(taskARN).Return(task, true) + + associationResponse, err := NewAssociationResponse(taskARN, associationType, associationName, state) + assert.NoError(t, err) + + // the response is expected to be the same as the association value + assert.Equal(t, associationResponse, associationValue) +} diff --git a/agent/handlers/v3/task_metadata_handler.go b/agent/handlers/v3/task_metadata_handler.go new file mode 100644 index 00000000000..5a7c007529a --- /dev/null +++ b/agent/handlers/v3/task_metadata_handler.go @@ -0,0 +1,89 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v3 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v2 "github.com/aws/amazon-ecs-agent/agent/handlers/v2" + "github.com/cihub/seelog" +) + +// v3EndpointIDMuxName is the key that's used in gorilla/mux to get the v3 endpoint ID. +const V3EndpointIDMuxName = "v3EndpointIDMuxName" + +// TaskMetadataPath specifies the relative URI path for serving task metadata. +var TaskMetadataPath = "/v3/" + utils.ConstructMuxVar(V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/task" + +// TaskWithTagsMetadataPath specifies the relative URI path for serving task metdata +// with Container Instance and Task Tags retrieved through the ECS API +var TaskWithTagsMetadataPath = "/v3/" + utils.ConstructMuxVar(V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/taskWithTags" + +// TaskMetadataHandler returns the handler method for handling task metadata requests. +func TaskMetadataHandler(state dockerstate.TaskEngineState, ecsClient api.ECSClient, cluster, az, containerInstanceArn string, propagateTags bool) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskARN, err := GetTaskARNByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V3 task metadata handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, responseJSON, utils.RequestTypeTaskMetadata) + return + } + + seelog.Infof("V3 task metadata handler: writing response for task '%s'", taskARN) + + taskResponse, err := v2.NewTaskResponse(taskARN, state, ecsClient, cluster, az, containerInstanceArn, propagateTags, false) + if err != nil { + errResponseJSON, err := json.Marshal("Unable to generate metadata for task: '" + taskARN + "'") + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, errResponseJSON, utils.RequestTypeTaskMetadata) + return + } + + task, _ := state.TaskByArn(taskARN) + if !task.IsNetworkModeAWSVPC() { + // fill in non-awsvpc network details for container responses here + responses := make([]v2.ContainerResponse, 0) + for _, containerResponse := range taskResponse.Containers { + networks, err := GetContainerNetworkMetadata(containerResponse.ID, state) + if err != nil { + errResponseJSON, err := json.Marshal(err.Error()) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, errResponseJSON, utils.RequestTypeContainerMetadata) + return + } + containerResponse.Networks = networks + responses = append(responses, containerResponse) + } + taskResponse.Containers = responses + } + responseJSON, err := json.Marshal(taskResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeTaskMetadata) + } +} diff --git a/agent/handlers/v3/task_stats_handler.go b/agent/handlers/v3/task_stats_handler.go new file mode 100644 index 00000000000..763855c69f1 --- /dev/null +++ b/agent/handlers/v3/task_stats_handler.go @@ -0,0 +1,50 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v3 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v2 "github.com/aws/amazon-ecs-agent/agent/handlers/v2" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/cihub/seelog" +) + +// TaskStatsPath specifies the relative URI path for serving task stats. +var TaskStatsPath = "/v3/" + utils.ConstructMuxVar(V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/task/stats" + +// TaskStatsHandler returns the handler method for handling task stats requests. +func TaskStatsHandler(state dockerstate.TaskEngineState, statsEngine stats.Engine) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskARN, err := GetTaskARNByRequest(r, state) + if err != nil { + errResponseJSON, err := json.Marshal( + fmt.Sprintf("V3 task stats handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskStats) + return + } + + seelog.Infof("V3 task stats handler: writing response for task '%s'", taskARN) + + // v3 handler shares the same task stats response format with v2 handler. + v2.WriteTaskStatsResponse(w, taskARN, state, statsEngine) + } +} diff --git a/agent/handlers/v4/container_association_handler.go b/agent/handlers/v4/container_association_handler.go new file mode 100644 index 00000000000..848b54a006a --- /dev/null +++ b/agent/handlers/v4/container_association_handler.go @@ -0,0 +1,164 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v4 + +import ( + "encoding/json" + "fmt" + "net/http" + + v3 "github.com/aws/amazon-ecs-agent/agent/handlers/v3" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + "github.com/cihub/seelog" +) + +const ( + // associationTypeMuxName is the key that's used in gorilla/mux to get the association type. + associationTypeMuxName = "associationTypeMuxName" + // associationNameMuxName is the key that's used in gorilla/mux to get the association name. + associationNameMuxName = "associationNameMuxName" +) + +var ( + // Container associations endpoint: /v4// + ContainerAssociationsPath = fmt.Sprintf("/v4/%s/associations/%s", + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx), + utils.ConstructMuxVar(associationTypeMuxName, utils.AnythingButSlashRegEx)) + // Container association endpoint: /v4/// + ContainerAssociationPath = fmt.Sprintf("/v4/%s/associations/%s/%s", + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx), + utils.ConstructMuxVar(associationTypeMuxName, utils.AnythingButSlashRegEx), + utils.ConstructMuxVar(associationNameMuxName, utils.AnythingButEmptyRegEx)) + // Treat "/v4///" as a container association endpoint with empty association name (therefore invalid), to be consistent with similar situation in credentials endpoint and v3 metadata endpoint + ContainerAssociationPathWithSlash = ContainerAssociationsPath + "/" +) + +// ContainerAssociationHandler returns the handler method for handling container associations requests. +func ContainerAssociationsHandler(state dockerstate.TaskEngineState) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + containerID, err := v3.GetContainerIDByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V4 container associations handler: unable to get container id from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociations) + return + } + + taskARN, err := v3.GetTaskARNByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V4 container associations handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociations) + return + } + + associationType, err := v3.GetAssociationTypeByRequest(r) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V4 container associations handler: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociations) + return + } + + seelog.Infof("V4 container associations handler: writing response for container '%s' for association type %s", containerID, associationType) + + writeContainerAssociationsResponse(w, containerID, taskARN, associationType, state) + } +} + +// ContainerAssociationHandler returns the handler method for handling container association requests. +func ContainerAssociationHandler(state dockerstate.TaskEngineState) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskARN, err := v3.GetTaskARNByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V4 container associations handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociation) + return + } + + associationType, err := v3.GetAssociationTypeByRequest(r) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V4 container associations handler: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociation) + return + } + + associationName, err := v3.GetAssociationNameByRequest(r) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V4 container associations handler: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerAssociation) + return + } + + seelog.Infof("V4 container association handler: writing response for association '%s' of type %s", associationName, associationType) + + writeContainerAssociationResponse(w, taskARN, associationType, associationName, state) + } +} + +func writeContainerAssociationsResponse(w http.ResponseWriter, containerID, taskARN, associationType string, state dockerstate.TaskEngineState) { + associationsResponse, err := v3.NewAssociationsResponse(containerID, taskARN, associationType, state) + if err != nil { + errResponseJSON, err := json.Marshal(fmt.Sprintf("Unable to write container associations response: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeContainerAssociations) + return + } + + responseJSON, err := json.Marshal(associationsResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeContainerAssociations) +} + +func writeContainerAssociationResponse(w http.ResponseWriter, taskARN, associationType, associationName string, state dockerstate.TaskEngineState) { + associationResponse, err := v3.NewAssociationResponse(taskARN, associationType, associationName, state) + if err != nil { + errResponseJSON, err := json.Marshal(fmt.Sprintf("Unable to write container association response: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeContainerAssociation) + return + } + + // associationResponse is assumed to be a valid json string; see comments on newAssociationResponse method for details + utils.WriteJSONToResponse(w, http.StatusOK, []byte(associationResponse), utils.RequestTypeContainerAssociation) +} diff --git a/agent/handlers/v4/container_metadata_handler.go b/agent/handlers/v4/container_metadata_handler.go new file mode 100644 index 00000000000..9f35fb240a0 --- /dev/null +++ b/agent/handlers/v4/container_metadata_handler.go @@ -0,0 +1,115 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v4 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v3 "github.com/aws/amazon-ecs-agent/agent/handlers/v3" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// ContainerMetadataPath specifies the relative URI path for serving container metadata. +var ContainerMetadataPath = "/v4/" + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + +// ContainerMetadataHandler returns the handler method for handling container metadata requests. +func ContainerMetadataHandler(state dockerstate.TaskEngineState) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + containerID, err := v3.GetContainerIDByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal( + fmt.Sprintf("V4 container metadata handler: unable to get container ID from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, responseJSON, utils.RequestTypeContainerMetadata) + return + } + containerResponse, err := GetContainerResponse(containerID, state) + if err != nil { + errResponseJSON, err := json.Marshal(err.Error()) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, errResponseJSON, utils.RequestTypeContainerMetadata) + return + } + seelog.Infof("V4 container metadata handler: writing response for container '%s'", containerID) + + responseJSON, err := json.Marshal(containerResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeContainerMetadata) + } +} + +// GetContainerResponse gets container response for v4 metadata +func GetContainerResponse(containerID string, state dockerstate.TaskEngineState) (*ContainerResponse, error) { + containerResponse, err := NewContainerResponse(containerID, state) + if err != nil { + seelog.Errorf("Unable to get container metadata for container '%s'", containerID) + return nil, errors.Errorf("unable to generate metadata for container '%s'", containerID) + } + + // fill in network details if not set for NON AWSVPC Task + if containerResponse.Networks == nil { + if containerResponse.Networks, err = GetContainerNetworkMetadata(containerID, state); err != nil { + return nil, err + } + } + return containerResponse, nil +} + +// GetContainerNetworkMetadata returns the network metadata for the container +func GetContainerNetworkMetadata(containerID string, state dockerstate.TaskEngineState) ([]Network, error) { + dockerContainer, ok := state.ContainerByID(containerID) + if !ok { + return nil, errors.Errorf("unable to find container '%s'", containerID) + } + // the logic here has been reused from + // https://github.com/aws/amazon-ecs-agent/blob/0c8913ba33965cf6ffdd6253fad422458d9346bd/agent/containermetadata/parse_metadata.go#L123 + settings := dockerContainer.Container.GetNetworkSettings() + if settings == nil { + return nil, errors.Errorf("unable to generate network response for container '%s'", containerID) + } + // This metadata is the information provided in older versions of the API + // We get the NetworkMode (Network interface name) from the HostConfig because this + // this is the network with which the container is created + ipv4AddressFromSettings := settings.IPAddress + networkModeFromHostConfig := dockerContainer.Container.GetNetworkMode() + + // Extensive Network information is not available for Docker API versions 1.17-1.20 + // Instead we only get the details of the first network + networks := make([]Network, 0) + if len(settings.Networks) > 0 { + for modeFromSettings, containerNetwork := range settings.Networks { + networkMode := modeFromSettings + ipv4Addresses := []string{containerNetwork.IPAddress} + network := Network{Network: containermetadata.Network{NetworkMode: networkMode, IPv4Addresses: ipv4Addresses}} + networks = append(networks, network) + } + } else { + ipv4Addresses := []string{ipv4AddressFromSettings} + network := Network{Network: containermetadata.Network{NetworkMode: networkModeFromHostConfig, IPv4Addresses: ipv4Addresses}} + networks = append(networks, network) + } + return networks, nil +} diff --git a/agent/handlers/v4/container_stats_handler.go b/agent/handlers/v4/container_stats_handler.go new file mode 100644 index 00000000000..6da7478356b --- /dev/null +++ b/agent/handlers/v4/container_stats_handler.go @@ -0,0 +1,85 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v4 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v3 "github.com/aws/amazon-ecs-agent/agent/handlers/v3" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/cihub/seelog" +) + +// ContainerStatsPath specifies the relative URI path for serving container stats. +var ContainerStatsPath = "/v4/" + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/stats" + +// ContainerStatsHandler returns the handler method for handling container stats requests. +func ContainerStatsHandler(state dockerstate.TaskEngineState, statsEngine stats.Engine) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskArn, err := v3.GetTaskARNByRequest(r, state) + if err != nil { + errResponseJSON, err := json.Marshal(fmt.Sprintf("V4 container handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskStats) + return + } + + containerID, err := v3.GetContainerIDByRequest(r, state) + if err != nil { + responseJSON, err := json.Marshal(fmt.Sprintf("V4 container stats handler: unable to get container ID from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, responseJSON, utils.RequestTypeContainerStats) + return + } + + seelog.Infof("V4 container stats handler: writing response for container '%s'", containerID) + // v4 handler shares the same container states response format with v2 handler. + WriteV4ContainerStatsResponse(w, taskArn, containerID, statsEngine) + } +} + +// WriteContainerStatsResponse writes the container stats to response writer. +func WriteV4ContainerStatsResponse(w http.ResponseWriter, + taskARN string, + containerID string, + statsEngine stats.Engine) { + dockerStats, network_rate_stats, err := statsEngine.ContainerDockerStats(taskARN, containerID) + if err != nil { + errResponseJSON, err := json.Marshal("Unable to get container stats for: " + containerID) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeContainerStats) + return + } + + containerStatsResponse := StatsResponse{ + StatsJSON: dockerStats, + Network_rate_stats: network_rate_stats, + } + + responseJSON, err := json.Marshal(containerStatsResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeContainerStats) +} diff --git a/agent/handlers/v4/response.go b/agent/handlers/v4/response.go new file mode 100644 index 00000000000..88a261a5f21 --- /dev/null +++ b/agent/handlers/v4/response.go @@ -0,0 +1,203 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v4 + +import ( + "github.com/aws/amazon-ecs-agent/agent/api" + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/containermetadata" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v2 "github.com/aws/amazon-ecs-agent/agent/handlers/v2" + "github.com/pkg/errors" +) + +// TaskResponse is the v4 Task response. It augments the v4 Container response +// with the v2 task response object. +type TaskResponse struct { + *v2.TaskResponse + Containers []ContainerResponse `json:"Containers,omitempty"` +} + +// ContainerResponse is the v4 Container response. It augments the v4 Network response +// with the v2 container response object. +type ContainerResponse struct { + *v2.ContainerResponse + Networks []Network `json:"Networks,omitempty"` +} + +// Network is the v4 Network response. It adds a bunch of information about network +// interface(s) on top of what is supported by v4. +// See `NetworkInterfaceProperties` for more details. +type Network struct { + containermetadata.Network + // NetworkInterfaceProperties specifies additional properties of the network + // of the network interface that are exposed via the metadata server. + // We currently populate this only for the `awsvpc` networking mode. + NetworkInterfaceProperties +} + +// NetworkInterfaceProperties represents additional properties we may want to expose via +// task metadata about the network interface that's attached to the container/task. We +// mostly use this to populate data about ENIs for tasks launched with `awsvpc` mode. +type NetworkInterfaceProperties struct { + // AttachmentIndex reflects the `index` specified by the customer (if any) + // while creating the task with `awsvpc` mode. + AttachmentIndex *int `json:"AttachmentIndex,omitempty"` + // MACAddress is the MAC address of the network interface. + MACAddress string `json:"MACAddress,omitempty"` + // IPv4SubnetCIDRBlock is the IPv4 CIDR address block associated with the interface's subnet. + IPV4SubnetCIDRBlock string `json:"IPv4SubnetCIDRBlock,omitempty"` + // IPv6SubnetCIDRBlock is the IPv6 CIDR address block associated with the interface's subnet. + IPv6SubnetCIDRBlock string `json:"IPv6SubnetCIDRBlock,omitempty"` + // DomainNameServers specifies the nameserver IP addresses for the network interface. + DomainNameServers []string `json:"DomainNameServers,omitempty"` + // DomainNameSearchList specifies the search list for the domain name lookup for + // the network interface. + DomainNameSearchList []string `json:"DomainNameSearchList,omitempty"` + // PrivateDNSName is the dns name assigned to this network interface. + PrivateDNSName string `json:"PrivateDNSName,omitempty"` + // SubnetGatewayIPV4Address is the IPv4 gateway address for the network interface. + SubnetGatewayIPV4Address string `json:"SubnetGatewayIpv4Address,omitempty"` +} + +// NewTaskResponse creates a new v4 response object for the task. It augments v2 task response +// with additional network interface fields. +func NewTaskResponse( + taskARN string, + state dockerstate.TaskEngineState, + ecsClient api.ECSClient, + cluster string, + az string, + containerInstanceARN string, + propagateTags bool, +) (*TaskResponse, error) { + // Construct the v2 response first. + v2Resp, err := v2.NewTaskResponse(taskARN, state, ecsClient, cluster, az, + containerInstanceARN, propagateTags, true) + if err != nil { + return nil, err + } + var containers []ContainerResponse + // Convert each container response into v4 container response. + for i, container := range v2Resp.Containers { + networks, err := toV4NetworkResponse(container.Networks, func() (*apitask.Task, bool) { + return state.TaskByArn(taskARN) + }) + if err != nil { + return nil, err + } + containers = append(containers, ContainerResponse{ + ContainerResponse: &v2Resp.Containers[i], + Networks: networks, + }) + } + + return &TaskResponse{ + TaskResponse: v2Resp, + Containers: containers, + }, nil +} + +// NewContainerResponse creates a new v4 container response based on container id. It augments +// v4 container response with additional network interface fields. +func NewContainerResponse( + containerID string, + state dockerstate.TaskEngineState, +) (*ContainerResponse, error) { + // Construct the v2 response first. + container, err := v2.NewContainerResponseFromState(containerID, state, true) + if err != nil { + return nil, err + } + // Convert v2 network responses into v4 network responses. + networks, err := toV4NetworkResponse(container.Networks, func() (*apitask.Task, bool) { + return state.TaskByID(containerID) + }) + if err != nil { + return nil, err + } + return &ContainerResponse{ + ContainerResponse: container, + Networks: networks, + }, nil +} + +// toV4NetworkResponse converts v2 network response to v4. Additional fields are only +// added if the networking mode is 'awsvpc'. The `lookup` function pointer is used to +// look up the task information in the local state based on the id, which could be +// either task arn or contianer id. +func toV4NetworkResponse( + networks []containermetadata.Network, + lookup func() (*apitask.Task, bool), +) ([]Network, error) { + var resp []Network + for _, network := range networks { + respNetwork := Network{Network: network} + if network.NetworkMode == utils.NetworkModeAWSVPC { + task, ok := lookup() + if !ok { + return nil, errors.New("v4 task response: unable to find task") + } + props, err := newNetworkInterfaceProperties(task) + if err != nil { + return nil, err + } + respNetwork.NetworkInterfaceProperties = props + } + resp = append(resp, respNetwork) + } + + return resp, nil +} + +// newNetworkInterfaceProperties creates the NetworkInterfaceProperties object for a given +// task. +func newNetworkInterfaceProperties(task *apitask.Task) (NetworkInterfaceProperties, error) { + eni := task.GetPrimaryENI() + + var attachmentIndexPtr *int + if task.IsNetworkModeAWSVPC() { + var vpcIndex = 0 + attachmentIndexPtr = &vpcIndex + } + + return NetworkInterfaceProperties{ + // TODO this is hard-coded to `0` for now. Once backend starts populating + // `Index` field for an ENI, we should set it as per that. Since we + // only support 1 ENI per task anyway, setting it to `0` is acceptable + AttachmentIndex: attachmentIndexPtr, + IPV4SubnetCIDRBlock: eni.GetIPv4SubnetCIDRBlock(), + IPv6SubnetCIDRBlock: eni.GetIPv6SubnetCIDRBlock(), + MACAddress: eni.MacAddress, + DomainNameServers: eni.DomainNameServers, + DomainNameSearchList: eni.DomainNameSearchList, + PrivateDNSName: eni.PrivateDNSName, + SubnetGatewayIPV4Address: eni.SubnetGatewayIPV4Address, + }, nil +} + +// NewPulledContainerResponse creates a new v4 container response for a pulled container. +// It augments v4 container response with an additional empty network interface field. +func NewPulledContainerResponse( + dockerContainer *apicontainer.DockerContainer, + eni *apieni.ENI, +) ContainerResponse { + resp := v2.NewContainerResponse(dockerContainer, eni, true) + return ContainerResponse{ + ContainerResponse: &resp, + } +} diff --git a/agent/handlers/v4/response_test.go b/agent/handlers/v4/response_test.go new file mode 100644 index 00000000000..c32b2e441ec --- /dev/null +++ b/agent/handlers/v4/response_test.go @@ -0,0 +1,157 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v4 + +import ( + "encoding/json" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + mock_dockerstate "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + taskARN = "t1" + cluster = "default" + family = "sleep" + version = "1" + containerID = "cid" + containerName = "sleepy" + imageName = "busybox" + imageID = "bUsYbOx" + cpu = 1024 + memory = 512 + eniIPv4Address = "192.168.0.5" + ipv4SubnetCIDRBlock = "192.168.0.0/24" + eniIPv6Address = "2600:1f18:619e:f900:8467:78b2:81c4:207d" + ipv6SubnetCIDRBlock = "2600:1f18:619e:f900::/64" + subnetGatewayIPV4Address = "192.168.0.1/24" + volName = "volume1" + volSource = "/var/lib/volume1" + volDestination = "/volume" + availabilityZone = "us-west-2b" + containerInstanceArn = "containerInstance-test" +) + +func TestNewTaskContainerResponses(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := mock_dockerstate.NewMockTaskEngineState(ctrl) + ecsClient := mock_api.NewMockECSClient(ctrl) + now := time.Now() + task := &apitask.Task{ + Arn: taskARN, + Family: family, + Version: version, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + ENIs: []*apieni.ENI{ + { + IPV4Addresses: []*apieni.ENIIPV4Address{ + { + Address: eniIPv4Address, + }, + }, + IPV6Addresses: []*apieni.ENIIPV6Address{ + { + Address: eniIPv6Address, + }, + }, + SubnetGatewayIPV4Address: subnetGatewayIPV4Address, + }, + }, + CPU: cpu, + Memory: memory, + PullStartedAtUnsafe: now, + PullStoppedAtUnsafe: now, + ExecutionStoppedAtUnsafe: now, + } + container := &apicontainer.Container{ + Name: containerName, + Image: imageName, + ImageID: imageID, + DesiredStatusUnsafe: apicontainerstatus.ContainerRunning, + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + CPU: cpu, + Memory: memory, + Type: apicontainer.ContainerNormal, + Ports: []apicontainer.PortBinding{ + { + ContainerPort: 80, + Protocol: apicontainer.TransportProtocolTCP, + }, + }, + VolumesUnsafe: []types.MountPoint{ + { + Name: volName, + Source: volSource, + Destination: volDestination, + }, + }, + } + created := time.Now() + container.SetCreatedAt(created) + labels := map[string]string{ + "foo": "bar", + } + container.SetLabels(labels) + dockerContainer := &apicontainer.DockerContainer{ + DockerID: containerID, + DockerName: containerName, + Container: container, + } + containerNameToDockerContainer := map[string]*apicontainer.DockerContainer{ + taskARN: dockerContainer, + } + gomock.InOrder( + state.EXPECT().TaskByArn(taskARN).Return(task, true), + state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true), + state.EXPECT().TaskByArn(taskARN).Return(task, true), + ) + + taskResponse, err := NewTaskResponse(taskARN, state, ecsClient, cluster, availabilityZone, containerInstanceArn, false) + require.NoError(t, err) + _, err = json.Marshal(taskResponse) + require.NoError(t, err) + assert.Equal(t, created.UTC().String(), taskResponse.Containers[0].CreatedAt.String()) + assert.Equal(t, ipv4SubnetCIDRBlock, taskResponse.Containers[0].Networks[0].IPV4SubnetCIDRBlock) + assert.Equal(t, eniIPv6Address, taskResponse.Containers[0].Networks[0].IPv6Addresses[0]) + assert.Equal(t, ipv6SubnetCIDRBlock, taskResponse.Containers[0].Networks[0].IPv6SubnetCIDRBlock) + assert.Equal(t, subnetGatewayIPV4Address, taskResponse.Containers[0].Networks[0].SubnetGatewayIPV4Address) + + gomock.InOrder( + state.EXPECT().ContainerByID(containerID).Return(dockerContainer, true), + state.EXPECT().TaskByID(containerID).Return(task, true).Times(2), + ) + containerResponse, err := NewContainerResponse(containerID, state) + require.NoError(t, err) + _, err = json.Marshal(containerResponse) + require.NoError(t, err) + assert.Equal(t, created.UTC().String(), containerResponse.CreatedAt.String()) + assert.Equal(t, "192.168.0.0/24", containerResponse.Networks[0].IPV4SubnetCIDRBlock) + assert.Equal(t, subnetGatewayIPV4Address, containerResponse.Networks[0].SubnetGatewayIPV4Address) +} diff --git a/agent/handlers/v4/stats_response.go b/agent/handlers/v4/stats_response.go new file mode 100644 index 00000000000..ebaa914ca09 --- /dev/null +++ b/agent/handlers/v4/stats_response.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v4 + +import ( + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// StatsResponse is the v4 Stats response. It augments the v4 Stats response +// with the docker stats. +type StatsResponse struct { + *types.StatsJSON + Network_rate_stats *stats.NetworkStatsPerSec `json:"network_rate_stats,omitempty"` +} + +// NewV4TaskStatsResponse returns a new v4 task stats response object +func NewV4TaskStatsResponse(taskARN string, + state dockerstate.TaskEngineState, + statsEngine stats.Engine) (map[string]StatsResponse, error) { + + containerMap, ok := state.ContainerMapByArn(taskARN) + if !ok { + return nil, errors.Errorf( + "v4 task stats response: unable to lookup containers for task %s", + taskARN) + } + + resp := make(map[string]StatsResponse) + for _, dockerContainer := range containerMap { + containerID := dockerContainer.DockerID + dockerStats, network_rate_stats, err := statsEngine.ContainerDockerStats(taskARN, containerID) + if err != nil { + seelog.Warnf("V4 task stats response: Unable to get stats for container '%s' for task '%s': %v", + containerID, taskARN, err) + resp[containerID] = StatsResponse{} + continue + } + + statsResponse := StatsResponse{ + StatsJSON: dockerStats, + Network_rate_stats: network_rate_stats, + } + + resp[containerID] = statsResponse + } + + return resp, nil +} diff --git a/agent/handlers/v4/task_metadata_handler.go b/agent/handlers/v4/task_metadata_handler.go new file mode 100644 index 00000000000..a315d8ac361 --- /dev/null +++ b/agent/handlers/v4/task_metadata_handler.go @@ -0,0 +1,90 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v4 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v3 "github.com/aws/amazon-ecs-agent/agent/handlers/v3" + "github.com/cihub/seelog" +) + +// TaskMetadataPath specifies the relative URI path for serving task metadata. +var TaskMetadataPath = "/v4/" + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/task" + +// TaskWithTagsMetadataPath specifies the relative URI path for serving task metdata +// with Container Instance and Task Tags retrieved through the ECS API +var TaskWithTagsMetadataPath = "/v4/" + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/taskWithTags" + +// TaskMetadataHandler returns the handler method for handling task metadata requests. +func TaskMetadataHandler(state dockerstate.TaskEngineState, ecsClient api.ECSClient, cluster, az, containerInstanceArn string, propagateTags bool) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + var taskArn, err = v3.GetTaskARNByRequest(r, state) + if err != nil { + ResponseJSON, err := json.Marshal(fmt.Sprintf("V4 task metadata handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, ResponseJSON, utils.RequestTypeTaskMetadata) + return + } + + seelog.Infof("V4 taskMetadata handler: Writing response for task '%s'", taskArn) + + taskResponse, err := NewTaskResponse(taskArn, state, ecsClient, cluster, az, containerInstanceArn, propagateTags) + if err != nil { + errResponseJson, err := json.Marshal("Unable to generate metadata for v4 task: '" + taskArn + "'") + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusInternalServerError, errResponseJson, utils.RequestTypeTaskMetadata) + return + } + + task, _ := state.TaskByArn(taskArn) + // for non-awsvpc task mode + if !task.IsNetworkModeAWSVPC() { + // fill in non-awsvpc network details for container responses here + responses := make([]ContainerResponse, 0) + for _, containerResponse := range taskResponse.Containers { + networks, err := GetContainerNetworkMetadata(containerResponse.ID, state) + if err != nil { + seelog.Warnf("Error retrieving network metadata for container %s - %s", containerResponse.ID, err) + } + containerResponse.Networks = networks + responses = append(responses, containerResponse) + } + taskResponse.Containers = responses + } + + pulledContainers, _ := state.PulledContainerMapByArn(task.Arn) + // Convert each pulled container into v4 container response + // and append pulled containers to taskResponse.Containers + for _, dockerContainer := range pulledContainers { + taskResponse.Containers = append(taskResponse.Containers, + NewPulledContainerResponse(dockerContainer, task.GetPrimaryENI())) + } + + responseJSON, err := json.Marshal(taskResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeTaskMetadata) + } +} diff --git a/agent/handlers/v4/task_stats_handler.go b/agent/handlers/v4/task_stats_handler.go new file mode 100644 index 00000000000..6536bf9e5db --- /dev/null +++ b/agent/handlers/v4/task_stats_handler.go @@ -0,0 +1,69 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package v4 + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/handlers/utils" + v3 "github.com/aws/amazon-ecs-agent/agent/handlers/v3" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/cihub/seelog" +) + +var TaskStatsPath = "/v4/" + utils.ConstructMuxVar(v3.V3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/task/stats" + +func TaskStatsHandler(state dockerstate.TaskEngineState, statsEngine stats.Engine) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + taskArn, err := v3.GetTaskARNByRequest(r, state) + if err != nil { + errResponseJSON, err := json.Marshal(fmt.Sprintf("V4 task stats handler: unable to get task arn from request: %s", err.Error())) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskStats) + return + } + seelog.Infof("V4 tasks stats handler: writing response for task '%s'", taskArn) + WriteV4TaskStatsResponse(w, taskArn, state, statsEngine) + } +} + +// WriteV4TaskStatsResponse writes the task stats to response writer. +func WriteV4TaskStatsResponse(w http.ResponseWriter, + taskARN string, + state dockerstate.TaskEngineState, + statsEngine stats.Engine) { + + taskStatsResponse, err := NewV4TaskStatsResponse(taskARN, state, statsEngine) + if err != nil { + seelog.Warnf("Unable to get task stats for task '%s': %v", taskARN, err) + errResponseJSON, err := json.Marshal("Unable to get task stats for: " + taskARN) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskStats) + return + } + + responseJSON, err := json.Marshal(taskStatsResponse) + if e := utils.WriteResponseIfMarshalError(w, err); e != nil { + return + } + seelog.Infof("V4 Stats response json is %v", responseJSON) + utils.WriteJSONToResponse(w, http.StatusOK, responseJSON, utils.RequestTypeTaskStats) +} diff --git a/agent/httpclient/httpclient.go b/agent/httpclient/httpclient.go new file mode 100644 index 00000000000..0f69e363def --- /dev/null +++ b/agent/httpclient/httpclient.go @@ -0,0 +1,92 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package httpclient provides a thin, but testable, wrapper around http.Client. +// It adds an ECS header to requests and provides an interface +package httpclient + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/utils/cipher" + "github.com/aws/amazon-ecs-agent/agent/version" +) + +// Taken from the default http.Client behavior +const defaultDialTimeout = 30 * time.Second +const defaultDialKeepalive = 30 * time.Second + +//go:generate mockgen -destination=mock/$GOFILE -copyright_file=../../scripts/copyright_file net/http RoundTripper + +type ecsRoundTripper struct { + insecureSkipVerify bool + transport http.RoundTripper +} + +func userAgent() string { + return fmt.Sprintf("%s (%s) (+http://aws.amazon.com/ecs/)", version.String(), config.OSType) +} + +func (client *ecsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", userAgent()) + return client.transport.RoundTrip(req) +} + +func (client *ecsRoundTripper) CancelRequest(req *http.Request) { + if def, ok := client.transport.(*http.Transport); ok { + def.CancelRequest(req) + } +} + +// New returns an ECS httpClient with a roundtrip timeout of the given duration +func New(timeout time.Duration, insecureSkipVerify bool) *http.Client { + // Transport is the transport requests will be made over + // Note, these defaults are taken from the golang http library. We do not + // explicitly do not use theirs to avoid changing their behavior. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: defaultDialTimeout, + KeepAlive: defaultDialKeepalive, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } + + transport.TLSClientConfig = &tls.Config{} + cipher.WithSupportedCipherSuites(transport.TLSClientConfig) + transport.TLSClientConfig.InsecureSkipVerify = insecureSkipVerify + + client := &http.Client{ + Transport: &ecsRoundTripper{insecureSkipVerify, transport}, + Timeout: timeout, + } + + return client +} + +// OverridableTransport is a transport that provides an override for testing purposes. +type OverridableTransport interface { + SetTransport(http.RoundTripper) +} + +// SetTransport allows you to set the transport. It is exposed so tests may +// override it. It fulfills an interface to allow calling this function via +// assertion to the exported interface +func (client *ecsRoundTripper) SetTransport(transport http.RoundTripper) { + client.transport = transport +} diff --git a/agent/httpclient/httpclient_test.go b/agent/httpclient/httpclient_test.go new file mode 100644 index 00000000000..d94582d8596 --- /dev/null +++ b/agent/httpclient/httpclient_test.go @@ -0,0 +1,48 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package httpclient + +import ( + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/utils/cipher" + "github.com/stretchr/testify/assert" +) + +func TestNewHttpClient(t *testing.T) { + expectedResult := New(time.Duration(10), true) + transport := expectedResult.Transport.(*ecsRoundTripper) + assert.Equal(t, cipher.SupportedCipherSuites, transport.transport.(*http.Transport).TLSClientConfig.CipherSuites) + assert.Equal(t, true, transport.transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify) +} + +func TestNewHttpClientProxy(t *testing.T) { + proxy_url := "127.0.0.1:1234" + // Set Proxy + os.Setenv("HTTP_PROXY", proxy_url) + defer os.Unsetenv("HTTP_PROXY") + + client := New(time.Duration(10*time.Second), true) + _, err := client.Get("http://www.amazon.com") + // Client won't be able to connect because we have given a arbitrary proxy + assert.Error(t, err) + // Error message should contain the proxy url which shows that client tried to use the proxy url to connect + assert.True(t, strings.Contains(err.Error(), proxy_url), "proxy url not found in: %s", err.Error()) +} diff --git a/agent/httpclient/mock/gomock_helpers.go b/agent/httpclient/mock/gomock_helpers.go new file mode 100644 index 00000000000..f0dd46fbf44 --- /dev/null +++ b/agent/httpclient/mock/gomock_helpers.go @@ -0,0 +1,90 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package mock_http + +import ( + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/golang/mock/gomock" +) + +// Helpers for testing request equality + +type httpMatcher struct { + method string + url string +} + +func NewHTTPSimpleMatcher(method, url string) gomock.Matcher { + return &httpMatcher{method, url} +} + +func (m *httpMatcher) Matches(x interface{}) bool { + req, ok := x.(*http.Request) + if !ok { + return false + } + if req.Method != m.method { + return false + } + u, err := url.Parse(m.url) + if err != nil { + return false + } + if req.URL.String() != u.String() { + return false + } + return true +} + +func SuccessResponse(body string) *http.Response { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMinor: 1, + ProtoMajor: 1, + Body: ioutil.NopCloser(strings.NewReader(body)), + ContentLength: int64(len(body)), + } +} + +func (m *httpMatcher) String() string { + return "HTTP Matcher: " + m.method + " " + m.url +} + +type httpOpMatcher struct { + operation string +} + +func (m *httpOpMatcher) Matches(x interface{}) bool { + req, ok := x.(*http.Request) + if !ok { + return false + } + return req.Header.Get("x-amz-target") == m.operation +} + +func (m *httpOpMatcher) String() string { + return "HTTP Operation matcher: " + m.operation +} + +func NewHTTPOperationMatcher(op string) gomock.Matcher { + return &httpOpMatcher{ + operation: op, + } +} diff --git a/agent/httpclient/mock/httpclient.go b/agent/httpclient/mock/httpclient.go new file mode 100644 index 00000000000..d45bf9ddec3 --- /dev/null +++ b/agent/httpclient/mock/httpclient.go @@ -0,0 +1,64 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: net/http (interfaces: RoundTripper) + +// Package mock_http is a generated GoMock package. +package mock_http + +import ( + http "net/http" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockRoundTripper is a mock of RoundTripper interface +type MockRoundTripper struct { + ctrl *gomock.Controller + recorder *MockRoundTripperMockRecorder +} + +// MockRoundTripperMockRecorder is the mock recorder for MockRoundTripper +type MockRoundTripperMockRecorder struct { + mock *MockRoundTripper +} + +// NewMockRoundTripper creates a new mock instance +func NewMockRoundTripper(ctrl *gomock.Controller) *MockRoundTripper { + mock := &MockRoundTripper{ctrl: ctrl} + mock.recorder = &MockRoundTripperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRoundTripper) EXPECT() *MockRoundTripperMockRecorder { + return m.recorder +} + +// RoundTrip mocks base method +func (m *MockRoundTripper) RoundTrip(arg0 *http.Request) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RoundTrip", arg0) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RoundTrip indicates an expected call of RoundTrip +func (mr *MockRoundTripperMockRecorder) RoundTrip(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoundTrip", reflect.TypeOf((*MockRoundTripper)(nil).RoundTrip), arg0) +} diff --git a/agent/logger/audit/audit_log.go b/agent/logger/audit/audit_log.go new file mode 100644 index 00000000000..2b3ae434c70 --- /dev/null +++ b/agent/logger/audit/audit_log.go @@ -0,0 +1,102 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package audit + +import ( + "fmt" + "strconv" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/logger" + "github.com/aws/amazon-ecs-agent/agent/logger/audit/request" +) + +type AuditLogger interface { + Log(r request.LogRequest, httpResponseCode int, eventType string) + GetContainerInstanceArn() string + GetCluster() string +} + +type InfoLogger interface { + Info(i ...interface{}) +} + +type auditLog struct { + containerInstanceArn string + cluster string + logger InfoLogger + cfg *config.Config +} + +func NewAuditLog(containerInstanceArn string, cfg *config.Config, logger InfoLogger) AuditLogger { + return &auditLog{ + cluster: cfg.Cluster, + containerInstanceArn: containerInstanceArn, + logger: logger, + cfg: cfg, + } +} + +// Log will construct an audit log entry log and log that entry to the audit log +// using the underlying logger (which implements the audit.InfoLogger interface). +func (a *auditLog) Log(r request.LogRequest, httpResponseCode int, eventType string) { + if !a.cfg.CredentialsAuditLogDisabled { + auditLogEntry := constructAuditLogEntry(r, httpResponseCode, eventType, a.GetCluster(), + a.GetContainerInstanceArn()) + + a.logger.Info(auditLogEntry) + } +} + +func constructAuditLogEntry(r request.LogRequest, httpResponseCode int, eventType string, + cluster string, containerInstanceArn string) string { + commonAuditLogFields := constructCommonAuditLogEntryFields(r, httpResponseCode) + auditLogTypeFields := constructAuditLogEntryByType(eventType, cluster, containerInstanceArn) + + return fmt.Sprintf("%s %s", commonAuditLogFields, auditLogTypeFields) +} + +func (a *auditLog) GetCluster() string { + return a.cluster +} + +func (a *auditLog) GetContainerInstanceArn() string { + return a.containerInstanceArn +} + +func AuditLoggerConfig(cfg *config.Config) string { + config := ` + + + ` + if cfg.CredentialsAuditLogFile != "" { + if logger.Config.RolloverType == "size" { + config += ` + ` + } else { + config += ` + ` + } + } + config += ` + + + + + +` + return config +} diff --git a/agent/logger/audit/audit_log_test.go b/agent/logger/audit/audit_log_test.go new file mode 100644 index 00000000000..7a7f79df69c --- /dev/null +++ b/agent/logger/audit/audit_log_test.go @@ -0,0 +1,222 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package audit + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_infologger "github.com/aws/amazon-ecs-agent/agent/logger/audit/mocks" + "github.com/aws/amazon-ecs-agent/agent/logger/audit/request" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + dummyContainerInstanceArn = "containerInstanceArn" + dummyCluster = "cluster" + dummyEventType = "someEvent" + dummyRemoteAddress = "rAddr" + dummyURL = "http://foo.com" + dummyURLPath + "?id=foo" + dummyURLPath = "/urlPath" + dummyURLV2 = "http://foo.com" + credentials.V2CredentialsPath + "/" + taskARN + dummyUserAgent = "userAgent" + dummyResponseCode = 400 + dummyRoleType = "TaskExecution" + taskARN = "task-arn-1" + + commonAuditLogEntryFieldCount = 6 + getCredentialsEntryFieldCount = 4 +) + +func TestWritingToAuditLog(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockInfoLogger := mock_infologger.NewMockInfoLogger(ctrl) + + req, _ := http.NewRequest("GET", "foo", nil) + req.RemoteAddr = dummyRemoteAddress + parsedURL, err := url.Parse(dummyURL) + if err != nil { + t.Fatal("error parsing dummyUrl") + } + req.URL = parsedURL + req.Header.Set("User-Agent", dummyUserAgent) + + cfg := &config.Config{ + Cluster: dummyCluster, + CredentialsAuditLogFile: "foo.txt", + } + + auditLogger := NewAuditLog(dummyContainerInstanceArn, cfg, mockInfoLogger) + assert.Equal(t, dummyCluster, auditLogger.GetCluster(), "Cluster is not initialized properly") + assert.Equal(t, dummyContainerInstanceArn, auditLogger.GetContainerInstanceArn(), "ContainerInstanceArn is not initialized properly") + + mockInfoLogger.EXPECT().Info(gomock.Any()).Do(func(logLine string) { + verifyAuditLogEntryResult(logLine, taskARN, dummyURLPath, t) + }) + + auditLogger.Log(request.LogRequest{Request: req, ARN: taskARN}, dummyResponseCode, GetCredentialsEventType(dummyRoleType)) +} + +func TestWritingToAuditLogV2(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockInfoLogger := mock_infologger.NewMockInfoLogger(ctrl) + + req, _ := http.NewRequest("GET", "foo", nil) + req.RemoteAddr = dummyRemoteAddress + parsedURL, err := url.Parse(dummyURLV2) + if err != nil { + t.Fatal("error parsing dummyUrl") + } + req.URL = parsedURL + req.Header.Set("User-Agent", dummyUserAgent) + + cfg := &config.Config{ + Cluster: dummyCluster, + CredentialsAuditLogFile: "foo.txt", + } + + auditLogger := NewAuditLog(dummyContainerInstanceArn, cfg, mockInfoLogger) + assert.Equal(t, dummyCluster, auditLogger.GetCluster(), "Cluster is not initialized properly") + assert.Equal(t, dummyContainerInstanceArn, auditLogger.GetContainerInstanceArn(), "ContainerInstanceArn is not initialized properly") + + mockInfoLogger.EXPECT().Info(gomock.Any()).Do(func(logLine string) { + verifyAuditLogEntryResult(logLine, taskARN, credentials.V2CredentialsPath, t) + }) + + auditLogger.Log(request.LogRequest{Request: req, ARN: taskARN}, dummyResponseCode, GetCredentialsEventType(dummyRoleType)) +} + +func TestWritingErrorsToAuditLog(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockInfoLogger := mock_infologger.NewMockInfoLogger(ctrl) + + req, _ := http.NewRequest("GET", "foo", nil) + req.RemoteAddr = dummyRemoteAddress + parsedURL, err := url.Parse(dummyURL) + if err != nil { + t.Fatal("error parsing dummyUrl") + } + req.URL = parsedURL + req.Header.Set("User-Agent", dummyUserAgent) + + cfg := &config.Config{ + Cluster: dummyCluster, + CredentialsAuditLogFile: "foo.txt", + } + + auditLogger := NewAuditLog(dummyContainerInstanceArn, cfg, mockInfoLogger) + assert.Equal(t, dummyCluster, auditLogger.GetCluster(), "Cluster is not initialized properly") + assert.Equal(t, dummyContainerInstanceArn, auditLogger.GetContainerInstanceArn(), "ContainerInstanceArn is not initialized properly") + + mockInfoLogger.EXPECT().Info(gomock.Any()).Do(func(logLine string) { + verifyAuditLogEntryResult(logLine, "-", dummyURLPath, t) + }) + + auditLogger.Log(request.LogRequest{Request: req, ARN: ""}, dummyResponseCode, GetCredentialsEventType(dummyRoleType)) +} + +func TestWritingToAuditLogWhenDisabled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockInfoLogger := mock_infologger.NewMockInfoLogger(ctrl) + + req, _ := http.NewRequest("GET", "foo", nil) + + cfg := &config.Config{ + Cluster: dummyCluster, + CredentialsAuditLogFile: "foo.txt", + CredentialsAuditLogDisabled: true, + } + + auditLogger := NewAuditLog(dummyContainerInstanceArn, cfg, mockInfoLogger) + assert.Equal(t, dummyCluster, auditLogger.GetCluster(), "Cluster is not initialized properly") + assert.Equal(t, dummyContainerInstanceArn, auditLogger.GetContainerInstanceArn(), "ContainerInstanceArn is not initialized properly") + + mockInfoLogger.EXPECT().Info(gomock.Any()).Times(0) + + auditLogger.Log(request.LogRequest{Request: req, ARN: taskARN}, dummyResponseCode, GetCredentialsEventType(dummyRoleType)) +} + +func TestConstructCommonAuditLogEntryFields(t *testing.T) { + req, _ := http.NewRequest("GET", "foo", nil) + req.RemoteAddr = dummyRemoteAddress + parsedURL, err := url.Parse(dummyURL) + if err != nil { + t.Fatal("error parsing dummyUrl") + } + req.URL = parsedURL + req.Header.Set("User-Agent", dummyUserAgent) + + result := constructCommonAuditLogEntryFields(request.LogRequest{Request: req, ARN: taskARN}, dummyResponseCode) + + verifyCommonAuditLogEntryFieldResult(result, taskARN, dummyURLPath, t) +} + +func TestConstructAuditLogEntryByTypeGetCredentials(t *testing.T) { + result := constructAuditLogEntryByType(GetCredentialsEventType(dummyRoleType), dummyCluster, + dummyContainerInstanceArn) + verifyConstructAuditLogEntryGetCredentialsResult(result, t) +} + +func verifyAuditLogEntryResult(logLine string, expectedTaskArn string, expectedURLPath string, t *testing.T) { + tokens := strings.Split(logLine, " ") + assert.Equal(t, commonAuditLogEntryFieldCount+getCredentialsEntryFieldCount, len(tokens), "Incorrect number of tokens in audit log entry") + verifyCommonAuditLogEntryFieldResult(strings.Join(tokens[:commonAuditLogEntryFieldCount], " "), expectedTaskArn, expectedURLPath, t) + verifyConstructAuditLogEntryGetCredentialsResult(strings.Join(tokens[commonAuditLogEntryFieldCount:], " "), t) +} + +func verifyCommonAuditLogEntryFieldResult(result string, expectedTaskArn string, expectedURLPath string, t *testing.T) { + tokens := strings.Split(result, " ") + assert.Equal(t, commonAuditLogEntryFieldCount, len(tokens), "Incorrect number of tokens in common audit log entry") + + respCode, _ := strconv.Atoi(tokens[1]) + assert.Equal(t, dummyResponseCode, respCode, "response code does not match") + assert.Equal(t, dummyRemoteAddress, tokens[2], "remoted address does not match") + assert.Equal(t, fmt.Sprintf(`"%s"`, expectedURLPath), tokens[3], "URL path does not match") + assert.Equal(t, fmt.Sprintf(`"%s"`, dummyUserAgent), tokens[4], "User Agent does not match") + assert.Equal(t, expectedTaskArn, tokens[5], "ARN for credentials does not match") +} + +func verifyConstructAuditLogEntryGetCredentialsResult(result string, t *testing.T) { + tokens := strings.Split(result, " ") + + assert.Equal(t, getCredentialsEntryFieldCount, len(tokens), "Incorrect number of tokens in GetCredentials audit log entry") + assert.Equal(t, GetCredentialsEventType(dummyRoleType), tokens[0], "event type does not match") + + auditLogVersion, _ := strconv.Atoi(tokens[1]) + assert.Equal(t, getCredentialsAuditLogVersion, auditLogVersion, "version does not match") + assert.Equal(t, dummyCluster, tokens[2], "cluster does not match") + assert.Equal(t, dummyContainerInstanceArn, tokens[3], "containerInstanceArn does not match") +} + +func TestConstructAuditLogEntryByTypeUnknownType(t *testing.T) { + result := constructAuditLogEntryByType("unknownEvent", dummyCluster, dummyContainerInstanceArn) + assert.Equal(t, "", result, "unknown event type should not return an entry") +} diff --git a/agent/logger/audit/entry_types.go b/agent/logger/audit/entry_types.go new file mode 100644 index 00000000000..4ea7d81ed2e --- /dev/null +++ b/agent/logger/audit/entry_types.go @@ -0,0 +1,133 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package audit + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/logger/audit/request" + log "github.com/cihub/seelog" +) + +const ( + getCredentialsEventType = "GetCredentials" + getCredentialsTaskExecutionEventType = "GetCredentialsExecutionRole" + getCredentialsInvalidRoleTypeEventType = "GetCredentialsInvalidRoleType" + + // getCredentialsAuditLogVersion is the version of the audit log + // Version '1', the fields are: + // 1. event time + // 2. response code + // 3. source ip address + // 4. url + // 5. user agent + // 6. arn for the entity associated with credentials + // 7. event type ('GetCredentials') + // 8. version + // 9. cluster + // 10. container instance arn + + // Version '2', following fields were modified + // 7. event type ('GetCredentials, GetCredentialsExecutionRole') + + getCredentialsAuditLogVersion = 2 +) + +type commonAuditLogEntryFields struct { + eventTime string + responseCode int + srcAddr string + theURL string + userAgent string + arn string +} + +// GetCredentialsEventType is the type for a GetCredentials request +func GetCredentialsEventType(roleType string) string { + switch roleType { + case credentials.ApplicationRoleType: + return getCredentialsEventType + case credentials.ExecutionRoleType: + return getCredentialsTaskExecutionEventType + default: + return getCredentialsInvalidRoleTypeEventType + } +} + +func (c *commonAuditLogEntryFields) string() string { + return fmt.Sprintf("%s %d %s %s %s %s", c.eventTime, c.responseCode, c.srcAddr, c.theURL, c.userAgent, c.arn) +} + +type getCredentialsAuditLogEntryFields struct { + eventType string + version int + cluster string + containerInstanceArn string +} + +func (g *getCredentialsAuditLogEntryFields) string() string { + return fmt.Sprintf("%s %d %s %s", g.eventType, g.version, g.cluster, g.containerInstanceArn) +} + +func constructCommonAuditLogEntryFields(r request.LogRequest, httpResponseCode int) string { + httpRequest := r.Request + url := httpRequest.URL.Path + // V2CredentialsPath contains the credentials ID, which should not be logged + if strings.HasPrefix(url, credentials.V2CredentialsPath+"/") { + url = credentials.V2CredentialsPath + } + fields := &commonAuditLogEntryFields{ + eventTime: time.Now().UTC().Format(time.RFC3339), + responseCode: httpResponseCode, + srcAddr: populateField(httpRequest.RemoteAddr), + theURL: populateField(fmt.Sprintf(`"%s"`, url)), + userAgent: populateField(fmt.Sprintf(`"%s"`, httpRequest.UserAgent())), + arn: populateField(r.ARN), + } + return fields.string() +} + +func constructAuditLogEntryByType(eventType string, cluster string, containerInstanceArn string) string { + switch eventType { + case getCredentialsEventType: + fields := &getCredentialsAuditLogEntryFields{ + eventType: eventType, + version: getCredentialsAuditLogVersion, + cluster: populateField(cluster), + containerInstanceArn: populateField(containerInstanceArn), + } + return fields.string() + case getCredentialsTaskExecutionEventType: + fields := &getCredentialsAuditLogEntryFields{ + eventType: eventType, + version: getCredentialsAuditLogVersion, + cluster: populateField(cluster), + containerInstanceArn: populateField(containerInstanceArn), + } + return fields.string() + default: + log.Warn(fmt.Sprintf("Unknown eventType: %s", eventType)) + return "" + } +} + +func populateField(logField string) string { + if logField == "" { + logField = "-" + } + return logField +} diff --git a/agent/logger/audit/generate_mocks.go b/agent/logger/audit/generate_mocks.go new file mode 100644 index 00000000000..fb39c5b4329 --- /dev/null +++ b/agent/logger/audit/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package audit + +//go:generate mockgen -destination=mocks/audit_log_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/logger/audit AuditLogger,InfoLogger diff --git a/agent/logger/audit/mocks/audit_log_mocks.go b/agent/logger/audit/mocks/audit_log_mocks.go new file mode 100644 index 00000000000..cf20f1e46d4 --- /dev/null +++ b/agent/logger/audit/mocks/audit_log_mocks.go @@ -0,0 +1,128 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/logger/audit (interfaces: AuditLogger,InfoLogger) + +// Package mock_audit is a generated GoMock package. +package mock_audit + +import ( + reflect "reflect" + + request "github.com/aws/amazon-ecs-agent/agent/logger/audit/request" + gomock "github.com/golang/mock/gomock" +) + +// MockAuditLogger is a mock of AuditLogger interface +type MockAuditLogger struct { + ctrl *gomock.Controller + recorder *MockAuditLoggerMockRecorder +} + +// MockAuditLoggerMockRecorder is the mock recorder for MockAuditLogger +type MockAuditLoggerMockRecorder struct { + mock *MockAuditLogger +} + +// NewMockAuditLogger creates a new mock instance +func NewMockAuditLogger(ctrl *gomock.Controller) *MockAuditLogger { + mock := &MockAuditLogger{ctrl: ctrl} + mock.recorder = &MockAuditLoggerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAuditLogger) EXPECT() *MockAuditLoggerMockRecorder { + return m.recorder +} + +// GetCluster mocks base method +func (m *MockAuditLogger) GetCluster() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCluster") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetCluster indicates an expected call of GetCluster +func (mr *MockAuditLoggerMockRecorder) GetCluster() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCluster", reflect.TypeOf((*MockAuditLogger)(nil).GetCluster)) +} + +// GetContainerInstanceArn mocks base method +func (m *MockAuditLogger) GetContainerInstanceArn() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetContainerInstanceArn") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetContainerInstanceArn indicates an expected call of GetContainerInstanceArn +func (mr *MockAuditLoggerMockRecorder) GetContainerInstanceArn() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerInstanceArn", reflect.TypeOf((*MockAuditLogger)(nil).GetContainerInstanceArn)) +} + +// Log mocks base method +func (m *MockAuditLogger) Log(arg0 request.LogRequest, arg1 int, arg2 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Log", arg0, arg1, arg2) +} + +// Log indicates an expected call of Log +func (mr *MockAuditLoggerMockRecorder) Log(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Log", reflect.TypeOf((*MockAuditLogger)(nil).Log), arg0, arg1, arg2) +} + +// MockInfoLogger is a mock of InfoLogger interface +type MockInfoLogger struct { + ctrl *gomock.Controller + recorder *MockInfoLoggerMockRecorder +} + +// MockInfoLoggerMockRecorder is the mock recorder for MockInfoLogger +type MockInfoLoggerMockRecorder struct { + mock *MockInfoLogger +} + +// NewMockInfoLogger creates a new mock instance +func NewMockInfoLogger(ctrl *gomock.Controller) *MockInfoLogger { + mock := &MockInfoLogger{ctrl: ctrl} + mock.recorder = &MockInfoLoggerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockInfoLogger) EXPECT() *MockInfoLoggerMockRecorder { + return m.recorder +} + +// Info mocks base method +func (m *MockInfoLogger) Info(arg0 ...interface{}) { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Info", varargs...) +} + +// Info indicates an expected call of Info +func (mr *MockInfoLoggerMockRecorder) Info(arg0 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockInfoLogger)(nil).Info), arg0...) +} diff --git a/agent/logger/audit/request/request.go b/agent/logger/audit/request/request.go new file mode 100644 index 00000000000..ea9493c90db --- /dev/null +++ b/agent/logger/audit/request/request.go @@ -0,0 +1,21 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package request + +import "net/http" + +type LogRequest struct { + Request *http.Request + ARN string +} diff --git a/agent/logger/buffer_pool.go b/agent/logger/buffer_pool.go new file mode 100644 index 00000000000..633c635c0a2 --- /dev/null +++ b/agent/logger/buffer_pool.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "bytes" + "sync" +) + +var ( + bufferPool = &logBufferPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +) + +type logBufferPool struct { + pool *sync.Pool +} + +func (p *logBufferPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *logBufferPool) Get() *bytes.Buffer { + buf := p.pool.Get().(*bytes.Buffer) + buf.Reset() + return buf +} diff --git a/agent/logger/buffer_pool_test.go b/agent/logger/buffer_pool_test.go new file mode 100644 index 00000000000..367fc06c871 --- /dev/null +++ b/agent/logger/buffer_pool_test.go @@ -0,0 +1,32 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLogBufferPool(t *testing.T) { + buf1 := bufferPool.Get() + buf1.WriteString("test") + assert.NotNil(t, buf1) + bufferPool.Put(buf1) + buf2 := bufferPool.Get() + assert.NotNil(t, buf2) + assert.Empty(t, buf2.Bytes()) +} diff --git a/agent/logger/eventlog_windows.go b/agent/logger/eventlog_windows.go new file mode 100644 index 00000000000..9996f2207ac --- /dev/null +++ b/agent/logger/eventlog_windows.go @@ -0,0 +1,85 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "github.com/cihub/seelog" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc/eventlog" +) + +/* +TODO: Make this whole thing better + +What's here right now is a stub just so that agent logs can appear in the Event Log. Longer term, we should do a few +things to make this better: + +1) Don't use init() and a package-global variable +2) Conform to the MSDN guidelines about event log data. + +References to MSDN guidelines can be found here: +* https://msdn.microsoft.com/en-us/library/windows/desktop/aa363632(v=vs.85).aspx +* https://msdn.microsoft.com/en-us/library/windows/desktop/aa363648(v=vs.85).aspx +* https://msdn.microsoft.com/en-us/library/windows/desktop/aa363661(v=vs.85).aspx +* https://msdn.microsoft.com/en-us/library/windows/desktop/aa363669(v=vs.85).aspx +*/ + +const ( + eventLogName = "AmazonECSAgent" + eventLogID = 999 +) + +// eventLogReceiver fulfills the seelog.CustomReceiver interface +type eventLogReceiver struct{} + +var eventLog *eventlog.Log + +func init() { + eventlog.InstallAsEventCreate(eventLogName, windows.EVENTLOG_INFORMATION_TYPE|windows.EVENTLOG_WARNING_TYPE|windows.EVENTLOG_ERROR_TYPE) + var err error + eventLog, err = eventlog.Open(eventLogName) + if err != nil { + panic(err) + } +} + +// registerPlatformLogger registers the eventLogReceiver +func registerPlatformLogger() { + seelog.RegisterReceiver("wineventlog", &eventLogReceiver{}) +} + +// platformLogConfig exposes log configuration for the event log receiver +func platformLogConfig() string { + return ` + ` +} + +// ReceiveMessage receives a log line from seelog and emits it to the Windows event log +func (r *eventLogReceiver) ReceiveMessage(message string, level seelog.LogLevel, context seelog.LogContextInterface) error { + switch level { + case seelog.DebugLvl, seelog.InfoLvl: + return eventLog.Info(eventLogID, message) + case seelog.WarnLvl: + return eventLog.Warning(eventLogID, message) + case seelog.ErrorLvl, seelog.CriticalLvl: + return eventLog.Error(eventLogID, message) + } + return nil +} + +func (r *eventLogReceiver) AfterParse(initArgs seelog.CustomReceiverInitArgs) error { return nil } +func (r *eventLogReceiver) Flush() {} +func (r *eventLogReceiver) Close() error { return nil } diff --git a/agent/logger/field/constants.go b/agent/logger/field/constants.go new file mode 100644 index 00000000000..a056689c8da --- /dev/null +++ b/agent/logger/field/constants.go @@ -0,0 +1,32 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package field + +const ( + TaskARN = "taskARN" + Container = "container" + ManagedAgent = "managedAgent" + KnownStatus = "knownStatus" + DesiredStatus = "desiredStatus" + SentStatus = "sentStatus" + FailedStatus = "failedStatus" + Sequence = "sequence" + Reason = "reason" + Status = "status" + RuntimeID = "runtimeID" + Resource = "resource" + Error = "error" + Event = "event" + Image = "image" +) diff --git a/agent/logger/format.go b/agent/logger/format.go new file mode 100644 index 00000000000..777578b4b97 --- /dev/null +++ b/agent/logger/format.go @@ -0,0 +1,109 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/cihub/seelog" +) + +// This will be used as a trick for seelog formatter to identify messages formatted by our seelogMessageFormatter +const ( + structuredTxtFormatPrefix = "logger=structured " + structuredJsonFormatPrefix = `"logger":"structured",` +) + +type seelogMessageFormatter interface { + Format(message string, fields ...Fields) string +} + +type messageJsonFormatter struct { +} + +func (f *messageJsonFormatter) Format(message string, fields ...Fields) string { + var fieldsBuf *bytes.Buffer + if len(fields) > 0 { + fieldsBuf = bufferPool.Get() + defer bufferPool.Put(fieldsBuf) + enc := json.NewEncoder(fieldsBuf) + fieldsMap := make(map[string]interface{}) + for _, fi := range fields { + for k, v := range fi { + if vErr, ok := v.(error); ok { + fieldsMap[k] = vErr.Error() + } else { + fieldsMap[k] = v + } + } + } + if err := enc.Encode(fieldsMap); err != nil { + // fallback to default message in the event of any error + seelog.Debug(fmt.Errorf("failed to marshal message fields to JSON, %v", err)) + } else { + // Removing the trailing line break (https://golang.org/pkg/encoding/json/#Encoder.Encode) and the closing bracket + fieldsBuf.Truncate(fieldsBuf.Len() - 2) + } + } + + msgBuf := bufferPool.Get() + defer bufferPool.Put(msgBuf) + msgBuf.WriteString(structuredJsonFormatPrefix) + msgBuf.WriteString(`"msg":`) + msgBuf.WriteString(fmt.Sprintf("%q", message)) + if fieldsBuf != nil && fieldsBuf.Len() > 0 { + msgBuf.WriteByte(',') + msgBuf.Write(fieldsBuf.Bytes()[1:]) + } + return msgBuf.String() +} + +type messageTextFormatter struct { +} + +func (f *messageTextFormatter) Format(message string, fields ...Fields) string { + buf := bufferPool.Get() + defer bufferPool.Put(buf) + buf.WriteString(structuredTxtFormatPrefix) + buf.WriteString("msg=") + buf.WriteString(fmt.Sprintf("%q", message)) + for _, fi := range fields { + for k, v := range fi { + f.appendKeyValue(buf, k, v) + } + } + return buf.String() +} + +func (f *messageTextFormatter) appendKeyValue(buf *bytes.Buffer, key string, value interface{}) { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + buf.WriteString(key) + buf.WriteByte('=') + + var strVal string + switch t := value.(type) { + case string: + strVal = fmt.Sprintf("%q", t) + case error: + strVal = fmt.Sprintf("%q", t.Error()) + default: + strVal = fmt.Sprintf("%+v", t) + } + + buf.WriteString(strVal) +} diff --git a/agent/logger/format_test.go b/agent/logger/format_test.go new file mode 100644 index 00000000000..11347118f41 --- /dev/null +++ b/agent/logger/format_test.go @@ -0,0 +1,67 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "errors" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMessageTextFormatter_Format(t *testing.T) { + f := messageTextFormatter{} + result := f.Format("this is my message", Fields{ + "k1": "v1", + "k2": 2, + "k3": errors.New("the error"), + "k4": struct { + subK4_1 string + subK4_2 int + }{ + "subK4_1_Value", + 3, + }, + }) + assert.True(t, strings.HasPrefix(result, `logger=structured `), `expected result to begin with: logger=structured `) + assert.True(t, strings.Contains(result, `msg="this is my message" `), `expected result to start with: msg="this is my message"`) + assert.True(t, strings.Contains(result, `k1="v1"`), `expected result to contain: k1="v1"`) + assert.True(t, strings.Contains(result, `k2=2`), `expected result to contain: k2=2`) + assert.True(t, strings.Contains(result, `k3="the error"`), `expected result to contain: k3="the error"`) + assert.True(t, strings.Contains(result, `k4={subK4_1:subK4_1_Value subK4_2:3}`), `expected result to contain: k4={subK4_1:subK4_1_Value subK4_2:3}`) +} + +func TestMessageJsonFormatter_Format(t *testing.T) { + f := messageJsonFormatter{} + result := f.Format("this is my message", Fields{ + "k1": "v1", + "k2": 2, + "k3": errors.New("the error"), + "k4": struct { + SubK4_1 string `json:"subK4_1"` + SubK4_2 int `json:"subK4_2"` + }{ + "subK4_1_Value", + 3, + }, + }) + assert.True(t, strings.HasPrefix(result, `"logger":"structured",`), `expected result to begin with: "logger":"structured"`) + require.JSONEq(t, + `{"logger":"structured","k1":"v1","k2":2,"k3":"the error","k4":{"subK4_1":"subK4_1_Value","subK4_2":3},"msg":"this is my message"}`, + "{"+result+"}") +} diff --git a/agent/logger/generate_mocks.go b/agent/logger/generate_mocks.go new file mode 100644 index 00000000000..e3180852ca5 --- /dev/null +++ b/agent/logger/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +//go:generate mockgen -destination=mocks/logger_mocks.go -copyright_file=../../scripts/copyright_file github.com/cihub/seelog CustomReceiver diff --git a/agent/logger/global.go b/agent/logger/global.go new file mode 100644 index 00000000000..c88f2295872 --- /dev/null +++ b/agent/logger/global.go @@ -0,0 +1,74 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "sync" + + "github.com/cihub/seelog" +) + +var ( + loggerMux = &sync.RWMutex{} + globalStructuredLogger *structuredLogger +) + +func init() { + loggerMux.Lock() + defer loggerMux.Unlock() + globalStructuredLogger = newStructuredLogger(DEFAULT_OUTPUT_FORMAT) +} + +func setGlobalLogger(logger seelog.LoggerInterface, logFormat string) { + loggerMux.Lock() + defer loggerMux.Unlock() + seelog.ReplaceLogger(logger) + globalStructuredLogger = newStructuredLogger(logFormat) +} + +func getGlobalStructuredLogger() *structuredLogger { + loggerMux.RLock() + defer loggerMux.RUnlock() + return globalStructuredLogger +} + +func Trace(message string, fields ...Fields) { + l := getGlobalStructuredLogger() + l.Trace(message, fields...) +} + +func Debug(message string, fields ...Fields) { + l := getGlobalStructuredLogger() + l.Debug(message, fields...) +} + +func Info(message string, fields ...Fields) { + l := getGlobalStructuredLogger() + l.Info(message, fields...) +} + +func Warn(message string, fields ...Fields) { + l := getGlobalStructuredLogger() + l.Warn(message, fields...) +} + +func Error(message string, fields ...Fields) { + l := getGlobalStructuredLogger() + l.Error(message, fields...) +} + +func Critical(message string, fields ...Fields) { + l := getGlobalStructuredLogger() + l.Critical(message, fields...) +} diff --git a/agent/logger/global_test.go b/agent/logger/global_test.go new file mode 100644 index 00000000000..02d71d3b640 --- /dev/null +++ b/agent/logger/global_test.go @@ -0,0 +1,54 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "testing" + + "github.com/cihub/seelog" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + mock_seelog "github.com/aws/amazon-ecs-agent/agent/logger/mocks" +) + +func TestGlobal_Init(t *testing.T) { + gl := getGlobalStructuredLogger() + assert.NotNil(t, gl) + assert.Equal(t, defaultStructuredTextFormatter, gl.formatter) +} + +func TestSetGlobalLogger(t *testing.T) { + defer globalLoggerBackup()() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockReceiver := mock_seelog.NewMockCustomReceiver(ctrl) + mockReceiver.EXPECT().Flush().AnyTimes() + mockReceiver.EXPECT().Close().AnyTimes() + seeLog, err := seelog.LoggerFromCustomReceiver(mockReceiver) + require.NoError(t, err) + + prevGlobalStructuredLogger := getGlobalStructuredLogger() + setGlobalLogger(seeLog, jsonFmt) + + loggerMux.RLock() + defer loggerMux.RUnlock() + assert.Equal(t, seeLog, seelog.Current) + assert.NotNil(t, globalStructuredLogger) + assert.NotEqual(t, prevGlobalStructuredLogger, globalStructuredLogger) +} diff --git a/agent/logger/log.go b/agent/logger/log.go new file mode 100644 index 00000000000..76da69db1a3 --- /dev/null +++ b/agent/logger/log.go @@ -0,0 +1,287 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/cihub/seelog" +) + +const ( + LOGLEVEL_ENV_VAR = "ECS_LOGLEVEL" + LOGLEVEL_ON_INSTANCE_ENV_VAR = "ECS_LOGLEVEL_ON_INSTANCE" + LOGFILE_ENV_VAR = "ECS_LOGFILE" + LOG_DRIVER_ENV_VAR = "ECS_LOG_DRIVER" + LOG_ROLLOVER_TYPE_ENV_VAR = "ECS_LOG_ROLLOVER_TYPE" + LOG_OUTPUT_FORMAT_ENV_VAR = "ECS_LOG_OUTPUT_FORMAT" + LOG_MAX_FILE_SIZE_ENV_VAR = "ECS_LOG_MAX_FILE_SIZE_MB" + LOG_MAX_ROLL_COUNT_ENV_VAR = "ECS_LOG_MAX_ROLL_COUNT" + + logFmt = "logfmt" + jsonFmt = "json" + + DEFAULT_LOGLEVEL = "info" + DEFAULT_LOGLEVEL_WHEN_DRIVER_SET = "off" + DEFAULT_ROLLOVER_TYPE = "date" + DEFAULT_OUTPUT_FORMAT = logFmt + DEFAULT_MAX_FILE_SIZE float64 = 10 + DEFAULT_MAX_ROLL_COUNT int = 24 +) + +type logConfig struct { + RolloverType string + MaxRollCount int + MaxFileSizeMB float64 + logfile string + driverLevel string + instanceLevel string + outputFormat string + lock sync.Mutex +} + +var Config *logConfig + +func ecsMsgFormatter(params string) seelog.FormatterFunc { + return func(message string, level seelog.LogLevel, context seelog.LogContextInterface) interface{} { + buf := bufferPool.Get() + defer bufferPool.Put(buf) + // temporary measure to make this change backwards compatible as we update to structured logs + if strings.HasPrefix(message, structuredTxtFormatPrefix) { + message = strings.TrimPrefix(message, structuredTxtFormatPrefix) + buf.WriteString(message) + } else if strings.HasPrefix(message, structuredJsonFormatPrefix) { + message = strings.TrimPrefix(message, structuredJsonFormatPrefix) + message = strings.TrimRight(message, ",") + buf.WriteByte('{') + buf.WriteString(message) + buf.WriteByte('}') + } else { + buf.WriteString(message) + } + return buf.String() + } +} + +func logfmtFormatter(params string) seelog.FormatterFunc { + return func(message string, level seelog.LogLevel, context seelog.LogContextInterface) interface{} { + buf := bufferPool.Get() + defer bufferPool.Put(buf) + buf.WriteString("level=") + buf.WriteString(level.String()) + buf.WriteByte(' ') + buf.WriteString("time=") + buf.WriteString(context.CallTime().UTC().Format(time.RFC3339)) + buf.WriteByte(' ') + // temporary measure to make this change backwards compatible as we update to structured logs + if strings.HasPrefix(message, structuredTxtFormatPrefix) { + message = strings.TrimPrefix(message, structuredTxtFormatPrefix) + buf.WriteString(message) + } else { + buf.WriteString("msg=") + buf.WriteString(fmt.Sprintf("%q", message)) + buf.WriteByte(' ') + buf.WriteString("module=") + buf.WriteString(context.FileName()) + } + buf.WriteByte('\n') + return buf.String() + } +} + +func jsonFormatter(params string) seelog.FormatterFunc { + return func(message string, level seelog.LogLevel, context seelog.LogContextInterface) interface{} { + buf := bufferPool.Get() + defer bufferPool.Put(buf) + buf.WriteString(`{"level":"`) + buf.WriteString(level.String()) + buf.WriteString(`","time":"`) + buf.WriteString(context.CallTime().UTC().Format(time.RFC3339)) + buf.WriteString(`",`) + // temporary measure to make this change backwards compatible as we update to structured logs + if strings.HasPrefix(message, structuredJsonFormatPrefix) { + message = strings.TrimPrefix(message, structuredJsonFormatPrefix) + message = strings.TrimRight(message, ",") + buf.WriteString(message) + buf.WriteByte('}') + } else { + buf.WriteString(`"msg":`) + buf.WriteString(fmt.Sprintf("%q", message)) + buf.WriteString(`,"module":"`) + buf.WriteString(context.FileName()) + buf.WriteString(`"}`) + } + buf.WriteByte('\n') + return buf.String() + } +} + +func reloadConfig() { + logger, err := seelog.LoggerFromConfigAsString(seelogConfig()) + if err != nil { + seelog.Error(err) + return + } + setGlobalLogger(logger, Config.outputFormat) +} + +func seelogConfig() string { + c := ` + + + + ` + c += platformLogConfig() + c += ` + ` + if Config.logfile != "" { + c += ` + ` + if Config.RolloverType == "size" { + c += ` + ` + } else { + c += ` + ` + } + c += ` + ` + } + c += ` + + + + + + +` + + return c +} + +func getLevelList(fileLevel string) string { + levelLists := map[string]string{ + "debug": "debug,info,warn,error,critical", + "info": "info,warn,error,critical", + "warn": "warn,error,critical", + "error": "error,critical", + "critical": "critical", + "off": "off", + } + return levelLists[fileLevel] +} + +// SetLevel sets the log levels for logging +func SetLevel(driverLogLevel, instanceLogLevel string) { + levels := map[string]string{ + "debug": "debug", + "info": "info", + "warn": "warn", + "error": "error", + "crit": "critical", + "none": "off", + } + + parsedDriverLevel, driverOk := levels[strings.ToLower(driverLogLevel)] + parsedInstanceLevel, instanceOk := levels[strings.ToLower(instanceLogLevel)] + + if instanceOk || driverOk { + Config.lock.Lock() + defer Config.lock.Unlock() + if instanceOk { + Config.instanceLevel = parsedInstanceLevel + } + if driverOk { + Config.driverLevel = parsedDriverLevel + } + reloadConfig() + } +} + +// GetLevel gets the log level +func GetLevel() string { + Config.lock.Lock() + defer Config.lock.Unlock() + + return Config.driverLevel +} + +func setInstanceLevelDefault() string { + if logDriver := os.Getenv(LOG_DRIVER_ENV_VAR); logDriver != "" { + return DEFAULT_LOGLEVEL_WHEN_DRIVER_SET + } + if loglevel := os.Getenv(LOGLEVEL_ENV_VAR); loglevel != "" { + return loglevel + } + return DEFAULT_LOGLEVEL +} + +func init() { + Config = &logConfig{ + logfile: os.Getenv(LOGFILE_ENV_VAR), + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: setInstanceLevelDefault(), + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } +} + +func InitSeelog() { + if err := seelog.RegisterCustomFormatter("EcsAgentLogfmt", logfmtFormatter); err != nil { + seelog.Error(err) + } + if err := seelog.RegisterCustomFormatter("EcsAgentJson", jsonFormatter); err != nil { + seelog.Error(err) + } + if err := seelog.RegisterCustomFormatter("EcsMsg", ecsMsgFormatter); err != nil { + seelog.Error(err) + } + + SetLevel(os.Getenv(LOGLEVEL_ENV_VAR), os.Getenv(LOGLEVEL_ON_INSTANCE_ENV_VAR)) + + if RolloverType := os.Getenv(LOG_ROLLOVER_TYPE_ENV_VAR); RolloverType != "" { + Config.RolloverType = RolloverType + } + if outputFormat := os.Getenv(LOG_OUTPUT_FORMAT_ENV_VAR); outputFormat != "" { + Config.outputFormat = outputFormat + } + if MaxRollCount := os.Getenv(LOG_MAX_ROLL_COUNT_ENV_VAR); MaxRollCount != "" { + i, err := strconv.Atoi(MaxRollCount) + if err == nil { + Config.MaxRollCount = i + } else { + seelog.Error("Invalid value for "+LOG_MAX_ROLL_COUNT_ENV_VAR, err) + } + } + if MaxFileSizeMB := os.Getenv(LOG_MAX_FILE_SIZE_ENV_VAR); MaxFileSizeMB != "" { + f, err := strconv.ParseFloat(MaxFileSizeMB, 64) + if err == nil { + Config.MaxFileSizeMB = f + } else { + seelog.Error("Invalid value for "+LOG_MAX_FILE_SIZE_ENV_VAR, err) + } + } + + registerPlatformLogger() + reloadConfig() +} diff --git a/agent/logger/log_init_test.go b/agent/logger/log_init_test.go new file mode 100644 index 00000000000..dc5e6187100 --- /dev/null +++ b/agent/logger/log_init_test.go @@ -0,0 +1,23 @@ +// +build unit integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +func init() { + // We don't want to call InitSeelog automatically (e.g. calling it from init()) + // because some internal clients use ECS Agent as a library. + // However we want to call InitSeelog automatically from all tests. + InitSeelog() +} diff --git a/agent/logger/log_test.go b/agent/logger/log_test.go new file mode 100644 index 00000000000..25a57c3b59c --- /dev/null +++ b/agent/logger/log_test.go @@ -0,0 +1,269 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "os" + "testing" + "time" + + "github.com/cihub/seelog" + "github.com/stretchr/testify/require" +) + +func TestEcsMsgFormat(t *testing.T) { + logfmt := ecsMsgFormatter("") + out := logfmt("This is my log message", seelog.InfoLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.Equal(t, "This is my log message", s) +} + +func TestEcsMsgFormat_Structured(t *testing.T) { + logfmt := ecsMsgFormatter("") + fm := defaultStructuredTextFormatter.Format("This is my log message") + out := logfmt(fm, seelog.InfoLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.Equal(t, `msg="This is my log message"`, s) + + fm = defaultStructuredJsonFormatter.Format("This is my log message") + out = logfmt(fm, seelog.InfoLvl, &LogContextMock{}) + s, ok = out.(string) + require.True(t, ok) + require.JSONEq(t, `{"msg":"This is my log message"}`, s) +} + +func TestLogfmtFormat(t *testing.T) { + logfmt := logfmtFormatter("") + out := logfmt("This is my log message", seelog.InfoLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.Equal(t, `level=info time=2018-10-01T01:02:03Z msg="This is my log message" module=mytestmodule.go +`, s) +} + +func TestLogfmtFormat_Structured(t *testing.T) { + logfmt := logfmtFormatter("") + fm := defaultStructuredTextFormatter.Format("This is my log message") + out := logfmt(fm, seelog.InfoLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.Equal(t, "level=info time=2018-10-01T01:02:03Z msg=\"This is my log message\"\n", s) +} + +func TestJSONFormat(t *testing.T) { + jsonF := jsonFormatter("") + out := jsonF("This is my log message", seelog.InfoLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.JSONEq(t, `{"level": "info", "time": "2018-10-01T01:02:03Z", "msg": "This is my log message", "module": "mytestmodule.go"}`, s) +} + +func TestJSONFormat_Structured(t *testing.T) { + jsonF := jsonFormatter("") + fm := defaultStructuredJsonFormatter.Format(`This is my log message "escaped"`) + out := jsonF(fm, seelog.InfoLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.JSONEq(t, `{"level": "info", "time": "2018-10-01T01:02:03Z", "msg": "This is my log message \"escaped\""}`, s) +} + +func TestLogfmtFormat_debug(t *testing.T) { + logfmt := logfmtFormatter("") + out := logfmt("This is my log message", seelog.DebugLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.Equal(t, `level=debug time=2018-10-01T01:02:03Z msg="This is my log message" module=mytestmodule.go +`, s) +} + +func TestLogfmtFormat_Structured_debug(t *testing.T) { + logfmt := logfmtFormatter("") + fm := defaultStructuredTextFormatter.Format("This is my log message") + out := logfmt(fm, seelog.DebugLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.Equal(t, `level=debug time=2018-10-01T01:02:03Z msg="This is my log message" +`, s) +} + +func TestJSONFormat_debug(t *testing.T) { + jsonF := jsonFormatter("") + out := jsonF("This is my log message", seelog.DebugLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.JSONEq(t, `{"level": "debug", "time": "2018-10-01T01:02:03Z", "msg": "This is my log message", "module": "mytestmodule.go"}`, s) +} + +func TestJSONFormat_Structured_debug(t *testing.T) { + jsonF := jsonFormatter("") + fm := defaultStructuredJsonFormatter.Format("This is my log message") + out := jsonF(fm, seelog.DebugLvl, &LogContextMock{}) + s, ok := out.(string) + require.True(t, ok) + require.JSONEq(t, `{"level": "debug", "time": "2018-10-01T01:02:03Z", "msg": "This is my log message"}`, s) +} + +func TestSetLevel(t *testing.T) { + resetEnv := func() { + os.Unsetenv(LOGLEVEL_ENV_VAR) + os.Unsetenv(LOGLEVEL_ON_INSTANCE_ENV_VAR) + os.Unsetenv(LOG_DRIVER_ENV_VAR) + } + resetEnv() + + testcases := []struct { + name string + logDriver string + loglevel string + loglevelInstance string + expectedLoglevel string + expectedLoglevelInstance string + }{ + { + name: "nothing set", + logDriver: "", + loglevel: "", + loglevelInstance: "", + expectedLoglevel: "info", + expectedLoglevelInstance: "info", + }, + { + name: "only loglevel", + logDriver: "", + loglevel: "debug", + loglevelInstance: "", + expectedLoglevel: "debug", + expectedLoglevelInstance: "debug", + }, + { + name: "only on instance", + logDriver: "", + loglevel: "", + loglevelInstance: "debug", + expectedLoglevel: "info", + expectedLoglevelInstance: "debug", + }, + { + name: "both levels no driver", + logDriver: "", + loglevel: "warn", + loglevelInstance: "crit", + expectedLoglevel: "warn", + expectedLoglevelInstance: "critical", + }, + { + name: "loglevel and driver", + logDriver: "journald", + loglevel: "debug", + loglevelInstance: "", + expectedLoglevel: "debug", + expectedLoglevelInstance: "off", + }, + { + name: "loglevel on instance and driver", + logDriver: "journald", + loglevel: "", + loglevelInstance: "debug", + expectedLoglevel: "info", + expectedLoglevelInstance: "debug", + }, + { + name: "both levels and driver", + logDriver: "journald", + loglevel: "warn", + loglevelInstance: "debug", + expectedLoglevel: "warn", + expectedLoglevelInstance: "debug", + }, + { + name: "only driver", + logDriver: "journald", + loglevel: "", + loglevelInstance: "", + expectedLoglevel: "info", + expectedLoglevelInstance: "off", + }, + } + + for _, test := range testcases { + t.Run(test.name, func(t *testing.T) { + defer resetEnv() + + os.Setenv(LOGLEVEL_ENV_VAR, test.loglevel) + os.Setenv(LOGLEVEL_ON_INSTANCE_ENV_VAR, test.loglevelInstance) + os.Setenv(LOG_DRIVER_ENV_VAR, test.logDriver) + + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: setInstanceLevelDefault(), + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + SetLevel(os.Getenv(LOGLEVEL_ENV_VAR), os.Getenv(LOGLEVEL_ON_INSTANCE_ENV_VAR)) + require.Equal(t, test.expectedLoglevel, Config.driverLevel) + require.Equal(t, test.expectedLoglevelInstance, Config.instanceLevel) + }) + } +} + +type LogContextMock struct{} + +// Caller's function name. +func (l *LogContextMock) Func() string { + return "" +} + +// Caller's line number. +func (l *LogContextMock) Line() int { + return 0 +} + +// Caller's file short path (in slashed form). +func (l *LogContextMock) ShortPath() string { + return "" +} + +// Caller's file full path (in slashed form). +func (l *LogContextMock) FullPath() string { + return "" +} + +// Caller's file name (without path). +func (l *LogContextMock) FileName() string { + return "mytestmodule.go" +} + +// True if the context is correct and may be used. +// If false, then an error in context evaluation occurred and +// all its other data may be corrupted. +func (l *LogContextMock) IsValid() bool { + return true +} + +// Time when log function was called. +func (l *LogContextMock) CallTime() time.Time { + return time.Date(2018, time.October, 1, 1, 2, 3, 0, time.UTC) +} + +// Custom context that can be set by calling logger.SetContext +func (l *LogContextMock) CustomContext() interface{} { + return map[string]string{} +} diff --git a/agent/logger/log_unix_test.go b/agent/logger/log_unix_test.go new file mode 100644 index 00000000000..75bcd15cca3 --- /dev/null +++ b/agent/logger/log_unix_test.go @@ -0,0 +1,321 @@ +// +build !windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSeelogConfig_Default(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_WithoutLogFile(t *testing.T) { + Config = &logConfig{ + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_DebugLevel(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: "debug", + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_SizeRollover(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: "size", + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_SizeRolloverFileSizeChange(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: "size", + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: 15, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_SizeRolloverRollCountChange(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: "size", + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: 15, + MaxRollCount: 10, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_JSONOutput(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: "json", + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: 10, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_NoOnInstanceLog(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL_WHEN_DRIVER_SET, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_DifferentLevels(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: "warn", + instanceLevel: "critical", + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfig_FileLevelDefault(t *testing.T) { + os.Setenv(LOG_DRIVER_ENV_VAR, "awslogs") + defer os.Unsetenv(LOG_DRIVER_ENV_VAR) + + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: setInstanceLevelDefault(), + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + +`, c) +} diff --git a/agent/logger/log_windows_test.go b/agent/logger/log_windows_test.go new file mode 100644 index 00000000000..acc871a722f --- /dev/null +++ b/agent/logger/log_windows_test.go @@ -0,0 +1,331 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSeelogConfigWindows_Default(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_WithoutLogFile(t *testing.T) { + Config = &logConfig{ + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_DebugLevel(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: "debug", + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_SizeRollover(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: "size", + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_SizeRolloverFileSizeChange(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: "size", + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: 15, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_SizeRolloverRollCountChange(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: "size", + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: 15, + MaxRollCount: 10, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_JSONOutput(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: "json", + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: 10, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_NoOnInstanceLog(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: DEFAULT_LOGLEVEL_WHEN_DRIVER_SET, + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_DifferentLevels(t *testing.T) { + Config = &logConfig{ + logfile: "foo.log", + driverLevel: "warn", + instanceLevel: "critical", + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} + +func TestSeelogConfigWindows_FileLevelDefault(t *testing.T) { + os.Setenv(LOG_DRIVER_ENV_VAR, "awslogs") + defer os.Unsetenv(LOG_DRIVER_ENV_VAR) + + Config = &logConfig{ + logfile: "foo.log", + driverLevel: DEFAULT_LOGLEVEL, + instanceLevel: setInstanceLevelDefault(), + RolloverType: DEFAULT_ROLLOVER_TYPE, + outputFormat: DEFAULT_OUTPUT_FORMAT, + MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE, + MaxRollCount: DEFAULT_MAX_ROLL_COUNT, + } + c := seelogConfig() + require.Equal(t, ` + + + + + + + + + + + + + + + +`, c) +} diff --git a/agent/logger/mocks/logger_mocks.go b/agent/logger/mocks/logger_mocks.go new file mode 100644 index 00000000000..1970b6fe491 --- /dev/null +++ b/agent/logger/mocks/logger_mocks.go @@ -0,0 +1,103 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/cihub/seelog (interfaces: CustomReceiver) + +// Package mock_seelog is a generated GoMock package. +package mock_seelog + +import ( + reflect "reflect" + + seelog "github.com/cihub/seelog" + gomock "github.com/golang/mock/gomock" +) + +// MockCustomReceiver is a mock of CustomReceiver interface +type MockCustomReceiver struct { + ctrl *gomock.Controller + recorder *MockCustomReceiverMockRecorder +} + +// MockCustomReceiverMockRecorder is the mock recorder for MockCustomReceiver +type MockCustomReceiverMockRecorder struct { + mock *MockCustomReceiver +} + +// NewMockCustomReceiver creates a new mock instance +func NewMockCustomReceiver(ctrl *gomock.Controller) *MockCustomReceiver { + mock := &MockCustomReceiver{ctrl: ctrl} + mock.recorder = &MockCustomReceiverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockCustomReceiver) EXPECT() *MockCustomReceiverMockRecorder { + return m.recorder +} + +// AfterParse mocks base method +func (m *MockCustomReceiver) AfterParse(arg0 seelog.CustomReceiverInitArgs) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AfterParse", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// AfterParse indicates an expected call of AfterParse +func (mr *MockCustomReceiverMockRecorder) AfterParse(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AfterParse", reflect.TypeOf((*MockCustomReceiver)(nil).AfterParse), arg0) +} + +// Close mocks base method +func (m *MockCustomReceiver) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close +func (mr *MockCustomReceiverMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockCustomReceiver)(nil).Close)) +} + +// Flush mocks base method +func (m *MockCustomReceiver) Flush() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Flush") +} + +// Flush indicates an expected call of Flush +func (mr *MockCustomReceiverMockRecorder) Flush() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockCustomReceiver)(nil).Flush)) +} + +// ReceiveMessage mocks base method +func (m *MockCustomReceiver) ReceiveMessage(arg0 string, arg1 seelog.LogLevel, arg2 seelog.LogContextInterface) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReceiveMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReceiveMessage indicates an expected call of ReceiveMessage +func (mr *MockCustomReceiverMockRecorder) ReceiveMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReceiveMessage", reflect.TypeOf((*MockCustomReceiver)(nil).ReceiveMessage), arg0, arg1, arg2) +} diff --git a/agent/logger/platform_unix.go b/agent/logger/platform_unix.go new file mode 100644 index 00000000000..8dabff5d776 --- /dev/null +++ b/agent/logger/platform_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +// registerPlatformLogger does nothing on Linux +func registerPlatformLogger() {} + +// platformLogConfig does nothing on Linux +func platformLogConfig() string { return "" } diff --git a/agent/logger/structured_logger.go b/agent/logger/structured_logger.go new file mode 100644 index 00000000000..05126f52a0f --- /dev/null +++ b/agent/logger/structured_logger.go @@ -0,0 +1,76 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the Licensl. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this fill. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the Licensl. + +package logger + +import "github.com/cihub/seelog" + +var ( + defaultStructuredJsonFormatter = &messageJsonFormatter{} + defaultStructuredTextFormatter = &messageTextFormatter{} +) + +type Fields map[string]interface{} + +func (f Fields) Merge(f2 Fields) { + for k, v := range f2 { + f[k] = v + } +} + +type StructuredLogger interface { + Trace(message string, fields ...Fields) + Debug(message string, fields ...Fields) + Info(message string, fields ...Fields) + Warn(message string, fields ...Fields) + Error(message string, fields ...Fields) + Critical(message string, fields ...Fields) +} + +func newStructuredLogger(logFormat string) *structuredLogger { + l := &structuredLogger{} + if logFormat == jsonFmt { + l.formatter = defaultStructuredJsonFormatter + } else { + l.formatter = defaultStructuredTextFormatter + } + return l +} + +type structuredLogger struct { + formatter seelogMessageFormatter +} + +func (l *structuredLogger) Trace(message string, fields ...Fields) { + seelog.Trace(l.formatter.Format(message, fields...)) +} + +func (l *structuredLogger) Debug(message string, fields ...Fields) { + seelog.Debug(l.formatter.Format(message, fields...)) +} + +func (l *structuredLogger) Info(message string, fields ...Fields) { + seelog.Info(l.formatter.Format(message, fields...)) +} + +func (l *structuredLogger) Warn(message string, fields ...Fields) { + seelog.Warn(l.formatter.Format(message, fields...)) +} + +func (l *structuredLogger) Error(message string, fields ...Fields) { + seelog.Error(l.formatter.Format(message, fields...)) +} + +func (l *structuredLogger) Critical(message string, fields ...Fields) { + seelog.Critical(l.formatter.Format(message, fields...)) +} diff --git a/agent/logger/structured_logger_test.go b/agent/logger/structured_logger_test.go new file mode 100644 index 00000000000..c255a42a31a --- /dev/null +++ b/agent/logger/structured_logger_test.go @@ -0,0 +1,151 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package logger + +import ( + "os" + "testing" + + "github.com/cihub/seelog" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + mock_seelog "github.com/aws/amazon-ecs-agent/agent/logger/mocks" +) + +func init() { + format := os.Getenv(LOG_OUTPUT_FORMAT_ENV_VAR) + if format == "" { + os.Setenv(LOG_OUTPUT_FORMAT_ENV_VAR, logFmt) + } + reloadConfig() +} + +func globalLoggerBackup() func() { + loggerMux.RLock() + seeLoggerBkp := seelog.Current + structuredLoggerBkp := globalStructuredLogger + loggerMux.RUnlock() + + return func() { + loggerMux.Lock() + defer loggerMux.Unlock() + seelog.ReplaceLogger(seeLoggerBkp) + globalStructuredLogger = structuredLoggerBkp + } +} + +func TestNewStructuredLogger(t *testing.T) { + sl := newStructuredLogger(logFmt) + assert.Equal(t, defaultStructuredTextFormatter, sl.formatter) + + sl = newStructuredLogger(jsonFmt) + assert.Equal(t, defaultStructuredJsonFormatter, sl.formatter) +} + +// note: this also tests the global structured logger functions +func TestStructuredLogger(t *testing.T) { + defer globalLoggerBackup()() + sl := newStructuredLogger(logFmt) + for _, tc := range []struct { + expectedLevel seelog.LogLevel + funcToTest []func(string, ...Fields) + }{ + { + seelog.LogLevel(seelog.TraceLvl), + []func(string, ...Fields){ + sl.Trace, + Trace, + }, + }, + { + seelog.LogLevel(seelog.DebugLvl), + []func(string, ...Fields){ + sl.Debug, + Debug, + }, + }, + {seelog.LogLevel(seelog.InfoLvl), + []func(string, ...Fields){ + sl.Info, + Info, + }, + }, + {seelog.LogLevel(seelog.WarnLvl), + []func(string, ...Fields){ + sl.Warn, + Warn, + }, + }, + {seelog.LogLevel(seelog.ErrorLvl), + []func(string, ...Fields){ + sl.Error, + Error, + }, + }, + {seelog.LogLevel(seelog.CriticalLvl), + []func(string, ...Fields){ + sl.Critical, + Critical, + }, + }, + } { + t.Run(tc.expectedLevel.String(), func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockReceiver := mock_seelog.NewMockCustomReceiver(ctrl) + + seeLog, err := seelog.LoggerFromCustomReceiver(mockReceiver) + require.NoError(t, err) + setGlobalLogger(seeLog, logFmt) + + mockReceiver.EXPECT().ReceiveMessage( + `logger=structured msg="Live long and prosper 🖖🏼" f1="field value with \"quotation\" marks"`, + tc.expectedLevel, + gomock.Any(), + ).Times(2) + mockReceiver.EXPECT().Flush().AnyTimes() + mockReceiver.EXPECT().Close().AnyTimes() + + for _, logfunc := range tc.funcToTest { + logfunc("Live long and prosper 🖖🏼", Fields{ + "f1": `field value with "quotation" marks`, + }) + } + }) + } +} + +func BenchmarkStructuredLogger_Info(b *testing.B) { + b.ResetTimer() + for n := 0; n < b.N; n++ { + Info("Live long and prosper 🖖🏼", Fields{ + "field1": "field value 1", + "field2": "field value 2", + "field3": "field value 3", + }) + } +} + +func BenchmarkSeelog_Info(b *testing.B) { + for n := 0; n < b.N; n++ { + seelog.Infof("Live long and prosper 🖖🏼 %s=%s %s=%s %s=%s", + "field1", "field value 1", + "field2", "field value 2", + "field3", "field value 3") + } +} diff --git a/agent/metrics/generic_metrics_client.go b/agent/metrics/generic_metrics_client.go new file mode 100644 index 00000000000..7aab5a633b8 --- /dev/null +++ b/agent/metrics/generic_metrics_client.go @@ -0,0 +1,130 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package metrics + +import ( + "context" + "crypto/md5" + "fmt" + "math/rand" + "strconv" + "sync" + "time" + + "github.com/cihub/seelog" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + callTimeout = 2 * time.Minute +) + +// A GenericMetricsClient records 3 metrics: +// 1) A Prometheus summary vector representing call durations for different API calls +// 2) A durations guage vector that updates the last recorded duration for the API call +// allowing for a time series view in the Prometheus browser +// 3) A counter vector that increments call counts for each API call +// The outstandingCalls map allows Fired CallStarts to be matched with Fired CallEnds +type GenericMetrics struct { + durationVec *prometheus.SummaryVec + durations *prometheus.GaugeVec + counterVec *prometheus.CounterVec + lock sync.RWMutex + outstandingCalls map[string]time.Time +} + +func Init() { + rand.Seed(time.Now().UnixNano()) +} + +// This function creates a hash of the callName and a randomly generated number. +// The hash serves to match the call invokation with the call's end, which is +// required to compute the call duration. The final metric is observed when the +// FireCallEnd function is called. +// If callID is empty, then we initiate the call with FireCallStart +// We use a channel holding 1 bool to ensure that the FireCallEnd is called AFTER +// the FireCallStart (because these are done in separate go routines) +func (gm *GenericMetrics) RecordCall(callID, callName string, callTime time.Time, callStarted chan bool) string { + if callID == "" { + hashData := []byte("GenericMetrics-" + callName + strconv.FormatFloat(float64(rand.Float32()), 'f', -1, 32)) + hash := fmt.Sprintf("%x", md5.Sum(hashData)) + // Go routines are utilized to avoid blocking the main thread in case of + // resource contention with var outstandingCalls + go gm.FireCallStart(hash, callName, callTime, callStarted) + go gm.IncrementCallCount(callName) + return hash + } else { + go gm.FireCallEnd(callID, callName, callTime, callStarted) + return "" + } +} + +func (gm *GenericMetrics) FireCallStart(callHash, callName string, timestamp time.Time, callStarted chan bool) { + gm.lock.Lock() + defer gm.lock.Unlock() + // Check map to see if call is outstanding, otherwise, store in map + if _, found := gm.outstandingCalls[callHash]; !found { + gm.outstandingCalls[callHash] = timestamp + } else { + seelog.Errorf("Call is already outstanding: %s", callName) + } + callStarted <- true +} + +func (gm *GenericMetrics) FireCallEnd(callHash, callName string, timestamp time.Time, callStarted chan bool) { + defer func() { + if r := recover(); r != nil { + seelog.Errorf("FireCallEnd for %s panicked. Recovering quietly: %s", callName, r) + } + }() + // We will block until the FireCallStart complement has completed + ctx, cancel := context.WithTimeout(context.Background(), callTimeout) + defer cancel() + select { + case <-ctx.Done(): + seelog.Errorf("FireCallEnd timed out with %s", callName) + gm.lock.Lock() + delete(gm.outstandingCalls, callHash) + gm.lock.Unlock() + return + case <-callStarted: + break + } + + gm.lock.Lock() + defer gm.lock.Unlock() + // Check map to see if call is outstanding and calculate duration + if timeStart, found := gm.outstandingCalls[callHash]; found { + seconds := timestamp.Sub(timeStart) + gm.durationVec.WithLabelValues(callName).Observe(seconds.Seconds()) + gm.durations.WithLabelValues(callName).Set(seconds.Seconds()) + delete(gm.outstandingCalls, callHash) + } else { + seelog.Errorf("Call is not outstanding: %s", callName) + } +} + +// This function increments the call count for a specific API call +// This is invoked at the API call's start, whereas the duration metrics +// are updated at the API call's end. +func (gm *GenericMetrics) IncrementCallCount(callName string) { + defer func() { + if r := recover(); r != nil { + seelog.Errorf("IncrementCallCount for %s panicked. Recovering quietly: %s", callName, r) + } + }() + gm.lock.Lock() + defer gm.lock.Unlock() + gm.counterVec.WithLabelValues(callName).Inc() +} diff --git a/agent/metrics/interface.go b/agent/metrics/interface.go new file mode 100644 index 00000000000..c7ef2ea4c9f --- /dev/null +++ b/agent/metrics/interface.go @@ -0,0 +1,79 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package metrics + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" +) + +// MetricsClient defines the behavior for any Client that uses the +// MetricsEngine_ interface to collect metrics for Prometheus. +// Clients should all have the same type of metrics collected +// (Durations, counts, etc.) +// In particular, the API calls we monitor in the Agent code (TaskEngine, +// Docker, ECSClient, etc.) utilize call durations and counts which are +// encapsulated in the GenericMetricsClient. +// This interface is extensible for future clients that require different +// collection behaviors. +type MetricsClient interface { + // RecordCall is responsible for accepting a call ID, call Name, + // and timestamp for the call. + // The specific behavior of RecordCall is dependent on the type of client + // that is used. It is the responsibility of the MetricsEngine to call the + // appropriate RecordCall(...) method. + // This method is the defining function for this interface. + // We use a channel holding 1 bool to ensure that the FireCallEnd is called AFTER + // the FireCallStart (because these are done in separate go routines) + RecordCall(string, string, time.Time, chan bool) string + + // In order to record call duration, we must fire a method's start and end + // at different times (once at the beginning and once at the end). Ideally, + // RecordCall should handle this through an execution and returned function + // to be deferred (see docker_client.go for usage examples) + FireCallStart(string, string, time.Time, chan bool) + FireCallEnd(string, string, time.Time, chan bool) + + // This function will increment the call count. The metric for call duration + // should ideally have the accurate call count as well, but the Prometheus + // summary metric is updated for count when the FireCallEnd(...) function is + // called. When a FireCallStart(...) is called but its FireCallEnd(...) is not, + // we will see a discrepancy between the Summary's count and this Gauge count + IncrementCallCount(string) +} + +// MetricsEngine_ is an interface that drives metric collection over +// all existing MetricsClients. The APIType parameter corresponds to +// the type of MetricsClient RecordCall(...) function that will be used. +// The MetricsEngine_ should be instantiated at Agent startup time with +// Agent configurations and context passed as parameters +type MetricsEngine_ interface { + // This init function initializes the Global MetricsEngine variable that + // can be accessed throughout the Agent codebase on packages that the + // metrics package does not depend on (cyclic dependencies not allowed). + MustInit(*config.Config) + + // This function calls a specific APIType's Client's RecordCall(...) method. + // As discussed in the comments for MetricsClient, different Clients have + // have different RecordCall(...) behaviors. Wrapper functions for this + // function should be created like recordGenericMetric(...) in metrics_engine.go + // to record 2 metrics (function invokation and deferred function end). + recordMetric(APIType, string, string) + + // publishMetrics is used to listen on defined ports on localhost + // for incoming requests to expose metrics. An externally run Prometheus + // server can then 'scrape' the exposed metrics for collection and storage + publishMetrics() +} diff --git a/agent/metrics/metrics_engine.go b/agent/metrics/metrics_engine.go new file mode 100644 index 00000000000..b0e0c73113f --- /dev/null +++ b/agent/metrics/metrics_engine.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package metrics + +import ( + "fmt" + "net/http" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/cihub/seelog" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +type APIType int32 +type MetricsEngine struct { + collection bool + cfg *config.Config + Registry *prometheus.Registry + managedMetrics map[APIType]MetricsClient +} + +const ( + DockerAPI APIType = iota + TaskEngine + StateManager + ECSClient +) + +// Maintained list of APIs for which we collect metrics. MetricsClients will be +// initialized using Factory method when a MetricsEngine is created. +var ( + managedAPIs = map[APIType]string{ + DockerAPI: "Docker_API", + TaskEngine: "Task_Engine", + StateManager: "State_Manager", + ECSClient: "ECS_Client", + } + MetricsEngineGlobal *MetricsEngine = &MetricsEngine{ + collection: false, + } +) + +// Function called during Agent start up to expose metrics on a local endpoint +func PublishMetrics() { + if MetricsEngineGlobal.collection { + MetricsEngineGlobal.publishMetrics() + } +} + +// Initializes the Global MetricsEngine used throughout Agent +// Currently, we use the Prometheus Global Default Registerer, which also collecs +// basic Go application metrics that we use (like memory usage). +// In future cases, we can use a custom Prometheus Registry to group metrics. +// For unit testing purposes, we only focus on API calls and use our own Registry +func MustInit(cfg *config.Config, registry ...*prometheus.Registry) { + if !cfg.PrometheusMetricsEnabled { + return + } + var registryToUse *prometheus.Registry + if len(registry) > 0 { + registryToUse = registry[0] + } else { + registryToUse = prometheus.DefaultRegisterer.(*prometheus.Registry) + } + MetricsEngineGlobal = NewMetricsEngine(cfg, registryToUse) + MetricsEngineGlobal.collection = true +} + +// We create a MetricsClient for all managed APIs (APIs for which we will collect +// metrics) +func NewMetricsEngine(cfg *config.Config, registry *prometheus.Registry) *MetricsEngine { + metricsEngine := &MetricsEngine{ + cfg: cfg, + Registry: registry, + managedMetrics: make(map[APIType]MetricsClient), + } + for managedAPI := range managedAPIs { + aClient := NewMetricsClient(managedAPI, metricsEngine.Registry) + metricsEngine.managedMetrics[managedAPI] = aClient + } + return metricsEngine +} + +// Wrapper function that allows APIs to call a single function +func (engine *MetricsEngine) RecordDockerMetric(callName string) func() { + return engine.recordGenericMetric(DockerAPI, callName) +} + +// Wrapper function that allows APIs to call a single function +func (engine *MetricsEngine) RecordTaskEngineMetric(callName string) func() { + return engine.recordGenericMetric(TaskEngine, callName) +} + +// Wrapper function that allows APIs to call a single function +func (engine *MetricsEngine) RecordStateManagerMetric(callName string) func() { + return engine.recordGenericMetric(StateManager, callName) +} + +// Wrapper function that allows APIs to call a single function +func (engine *MetricsEngine) RecordECSClientMetric(callName string) func() { + return engine.recordGenericMetric(ECSClient, callName) +} + +// Records a call's start and returns a function to be deferred. +// Wrapper functions will use this function for GenericMetricsClients. +// If Metrics collection is enabled from the cfg, we record a metric with callID +// as an empty string (signaling a call start), and then return a function to +// record a second metric with a non-empty callID. +// We use a channel holding 1 bool to ensure that the FireCallEnd is called AFTER +// the FireCallStart (because these are done in separate go routines) +// Recording a metric in an API needs only a wrapper function that supplies the +// APIType and called using the following format: +// defer metrics.MetricsEngineGlobal.RecordMetricWrapper(callName)() +func (engine *MetricsEngine) recordGenericMetric(apiType APIType, callName string) func() { + callStarted := make(chan bool, 1) + if engine == nil || !engine.collection { + return func() { + } + } + callID := engine.recordMetric(apiType, callName, "", callStarted) + return func() { + engine.recordMetric(apiType, callName, callID, callStarted) + } +} + +func (engine *MetricsEngine) recordMetric(apiType APIType, callName, callID string, callStarted chan bool) string { + return engine.managedMetrics[apiType].RecordCall(callID, callName, time.Now(), callStarted) +} + +// Function that exposes all Agent Metrics on a given port. +func (engine *MetricsEngine) publishMetrics() { + go func() { + // Because we are using the DefaultRegisterer in Prometheus, we can use + // the promhttp.Handler() function. In future cases for custom registers, + // we can use promhttp.HandlerFor(customRegisterer, promhttp.HandlerOpts{}) + http.Handle("/metrics", promhttp.Handler()) + err := http.ListenAndServe(fmt.Sprintf(":%d", config.AgentPrometheusExpositionPort), nil) + if err != nil { + seelog.Errorf("Error publishing metrics: %s", err.Error()) + } + }() +} diff --git a/agent/metrics/metrics_factory.go b/agent/metrics/metrics_factory.go new file mode 100644 index 00000000000..6e521c638ac --- /dev/null +++ b/agent/metrics/metrics_factory.go @@ -0,0 +1,82 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package metrics + +import ( + "time" + + "github.com/cihub/seelog" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + AgentNamespace = "AgentMetrics" + DockerSubsystem = "DockerAPI" + TaskEngineSubsystem = "TaskEngine" + StateManagerSubsystem = "StateManager" + ECSClientSubsystem = "ECSClient" +) + +// A factory method that enables various MetricsClients to be created. +func NewMetricsClient(api APIType, registry *prometheus.Registry) MetricsClient { + switch api { + case DockerAPI: + return NewGenericMetricsClient(DockerSubsystem, registry) + case TaskEngine: + return NewGenericMetricsClient(TaskEngineSubsystem, registry) + case StateManager: + return NewGenericMetricsClient(StateManagerSubsystem, registry) + case ECSClient: + return NewGenericMetricsClient(ECSClientSubsystem, registry) + default: + seelog.Error("Unmanaged MetricsClient cannot be created.") + return nil + } +} + +func NewGenericMetricsClient(subsystem string, registry *prometheus.Registry) *GenericMetrics { + aDurationVec := prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Namespace: AgentNamespace, + Subsystem: subsystem, + Name: "duration_seconds", + Help: subsystem + " call duration in seconds", + Objectives: make(map[float64]float64), + }, []string{"Call"}) + registry.MustRegister(aDurationVec) + + aCounterVec := prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: AgentNamespace, + Subsystem: subsystem, + Name: "call_count", + }, []string{"Call"}) + registry.MustRegister(aCounterVec) + + aGaugeVec := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: AgentNamespace, + Subsystem: subsystem, + Name: "call_duration", + Help: subsystem + " call duration in seconds individual", + }, + []string{"Call"}) + registry.MustRegister(aGaugeVec) + + genericMetrics := &GenericMetrics{ + durationVec: aDurationVec, + counterVec: aCounterVec, + durations: aGaugeVec, + outstandingCalls: make(map[string]time.Time), + } + return genericMetrics +} diff --git a/agent/metrics/metrics_test.go b/agent/metrics/metrics_test.go new file mode 100644 index 00000000000..04664f9dace --- /dev/null +++ b/agent/metrics/metrics_test.go @@ -0,0 +1,217 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package metrics + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" +) + +// Create default config for Metrics. PrometheusMetricsEnabled is set to false +// by default, so it must be turned on +func getTestConfig() config.Config { + cfg := config.DefaultConfig() + cfg.PrometheusMetricsEnabled = true + return cfg +} + +// Tests if MetricsEngineGlobal variable is initialized and if all managed +// MetricsClients are initialized +func TestMetricsEngineInit(t *testing.T) { + defer func() { + MetricsEngineGlobal = &MetricsEngine{ + collection: false, + } + }() + cfg := getTestConfig() + MustInit(&cfg, prometheus.NewRegistry()) + assert.NotNil(t, MetricsEngineGlobal) + assert.Equal(t, len(MetricsEngineGlobal.managedMetrics), len(managedAPIs)) +} + +// Tests if a default config will start Prometheus metrics. Should be disabled +// by default. +func TestDisablePrometheusMetrics(t *testing.T) { + defer func() { + MetricsEngineGlobal = &MetricsEngine{ + collection: false, + } + }() + cfg := getTestConfig() + cfg.PrometheusMetricsEnabled = false + MustInit(&cfg, prometheus.NewRegistry()) + PublishMetrics() + assert.False(t, MetricsEngineGlobal.collection) +} + +// Mimicks metric collection of Docker API calls through Go routines. The method +// call to record a metric is the same used by various clients throughout Agent. +// We sleep the go routine to simulate "work" being done. +// We can determine the expected values for the metrics and create a map of them, +// which will then be used to verify the accuracy of the metrics collected. +func TestMetricCollection(t *testing.T) { + defer func() { + MetricsEngineGlobal = &MetricsEngine{ + collection: false, + } + }() + cfg := getTestConfig() + MustInit(&cfg, prometheus.NewRegistry()) + MetricsEngineGlobal.collection = true + + var DockerMetricSleepTime1 time.Duration = 1 * time.Second + var DockerMetricSleepTime2 time.Duration = 2 * time.Second + + var wg sync.WaitGroup + wg.Add(20) + + // These Go routines simulate metrics collection + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + defer MetricsEngineGlobal.RecordDockerMetric("START")() + time.Sleep(DockerMetricSleepTime1) + }() + } + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + defer MetricsEngineGlobal.RecordDockerMetric("START")() + time.Sleep(DockerMetricSleepTime2) + }() + } + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + defer MetricsEngineGlobal.RecordDockerMetric("STOP")() + time.Sleep(DockerMetricSleepTime1) + }() + } + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + defer MetricsEngineGlobal.RecordDockerMetric("STOP")() + time.Sleep(DockerMetricSleepTime2) + }() + } + wg.Wait() + time.Sleep(time.Second) + + // This will gather all collected metrics and store them in a MetricFamily list + // All metric families can be printed by looping over this variable using + // fmt.Println(proto.MarshalTextString(metricFamilies[n])) where n = index + metricFamilies, err := MetricsEngineGlobal.Registry.Gather() + assert.NoError(t, err) + + // Here we set up the expectations. These are known values which make verfication + // easier. + expected := make(metricMap) + expected["AgentMetrics_DockerAPI_call_count"] = make(map[string][]interface{}) + expected["AgentMetrics_DockerAPI_call_count"]["CallSTART"] = []interface{}{ + "COUNTER", + 10.0, + } + expected["AgentMetrics_DockerAPI_call_count"]["CallSTOP"] = []interface{}{ + "COUNTER", + 10.0, + } + expected["AgentMetrics_DockerAPI_call_duration"] = make(map[string][]interface{}) + expected["AgentMetrics_DockerAPI_call_duration"]["CallSTART"] = []interface{}{ + "GUAGE", + 0.0, + } + expected["AgentMetrics_DockerAPI_call_duration"]["CallSTOP"] = []interface{}{ + "GUAGE", + 0.0, + } + expected["AgentMetrics_DockerAPI_duration_seconds"] = make(map[string][]interface{}) + expected["AgentMetrics_DockerAPI_duration_seconds"]["CallSTART"] = []interface{}{ + "SUMMARY", + 1.5, + } + expected["AgentMetrics_DockerAPI_duration_seconds"]["CallSTOP"] = []interface{}{ + "SUMMARY", + 1.5, + } + // We will do a simple tree search to verify all metrics in metricsFamilies + // are as expected + assert.True(t, verifyStats(metricFamilies, expected), "Metrics are not accurate") +} + +// A type for storing a Tree-based map. We map the MetricName to a map of metrics +// under that name. This second map indexes by MetricLabelName+MetricLabelValue to +// a slice MetricType and MetricValue. +//MetricName:metricLabelName+metricLabelValue:[metricType, metricValue] +type metricMap map[string]map[string][]interface{} + +// In order to verify the MetricFamily with the expected metric values, we do a simple +// tree search to verify that all stats in the MetricFamily coincide with the expected +// metric values. +// This method only verifes that all metrics in var metricsReceived are present in +// var expectedMetrics +func verifyStats(metricsReceived []*dto.MetricFamily, expectedMetrics metricMap) bool { + var threshhold float64 = 0.1 // Maximum threshhold for two metrics being equal + for _, metricFamily := range metricsReceived { + if metricList, found := expectedMetrics[metricFamily.GetName()]; found { + for _, metric := range metricFamily.GetMetric() { + if aMetric, found := metricList[metric.GetLabel()[0].GetName()+metric.GetLabel()[0].GetValue()]; found { + metricTypeExpected := string(aMetric[0].(string)) + metricValExpected := float64(aMetric[1].(float64)) + switch metricTypeExpected { + case "GUAGE": + continue + case "COUNTER": + if !compareDiff(metricValExpected, metric.GetCounter().GetValue(), threshhold) { + fmt.Printf("Does not match SUMMARY. Expected: %f, Received: %f\n", metricValExpected, metric.GetCounter().GetValue()) + return false + } + case "SUMMARY": + if !compareDiff(metricValExpected, metric.GetSummary().GetSampleSum()/float64(metric.GetSummary().GetSampleCount()), threshhold) { + fmt.Printf("Does not match SUMMARY. Expected: %f, Received: %f\n", metricValExpected, metric.GetSummary().GetSampleSum()/float64(metric.GetSummary().GetSampleCount())) + return false + } + default: + fmt.Println("Metric Type not recognized") + return false + } + } else { + fmt.Println("MetricLabel Name and Value combo not found") + return false + } + } + } else { + fmt.Println("MetricName not found") + return false + } + } + return true +} + +// Helper function to determine if two values (a and b) are within a percentage of a +func compareDiff(a, b, deltaMin float64) bool { + diff := a - b + if diff < 0 { + diff = -diff + } + return diff <= (a * deltaMin) +} diff --git a/agent/s3/factory/factory.go b/agent/s3/factory/factory.go new file mode 100644 index 00000000000..b6ae683860c --- /dev/null +++ b/agent/s3/factory/factory.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/httpclient" + s3client "github.com/aws/amazon-ecs-agent/agent/s3" + + "github.com/aws/aws-sdk-go/aws" + awscreds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +const ( + bucketLocationDefault = "us-east-1" + roundtripTimeout = 5 * time.Second +) + +type S3ClientCreator interface { + NewS3ClientForBucket(bucket, region string, creds credentials.IAMRoleCredentials) (s3client.S3Client, error) +} + +func NewS3ClientCreator() S3ClientCreator { + return &s3ClientCreator{} +} + +type s3ClientCreator struct{} + +// NewS3Client returns a new S3 client based on the region of the bucket. +func (*s3ClientCreator) NewS3ClientForBucket(bucket, region string, + creds credentials.IAMRoleCredentials) (s3client.S3Client, error) { + cfg := aws.NewConfig(). + WithHTTPClient(httpclient.New(roundtripTimeout, false)). + WithCredentials( + awscreds.NewStaticCredentials(creds.AccessKeyID, creds.SecretAccessKey, + creds.SessionToken)).WithRegion(region) + sess := session.Must(session.NewSession(cfg)) + + svc := s3.New(sess) + bucketRegion, err := getRegionFromBucket(svc, bucket) + if err != nil { + return nil, err + } + + sessWithRegion := session.Must(session.NewSession(cfg.WithRegion(bucketRegion))) + return s3manager.NewDownloaderWithClient(s3.New(sessWithRegion)), nil +} + +func getRegionFromBucket(svc *s3.S3, bucket string) (string, error) { + input := &s3.GetBucketLocationInput{ + Bucket: aws.String(bucket), + } + result, err := svc.GetBucketLocation(input) + if err != nil { + return "", err + } + if result.LocationConstraint == nil { // GetBucketLocation returns nil for bucket in us-east-1. + return bucketLocationDefault, nil + } + + return aws.StringValue(result.LocationConstraint), nil +} diff --git a/agent/s3/factory/generate_mocks.go b/agent/s3/factory/generate_mocks.go new file mode 100644 index 00000000000..cb5a41ab3e7 --- /dev/null +++ b/agent/s3/factory/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +//go:generate mockgen -destination=mocks/factory_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/s3/factory S3ClientCreator diff --git a/agent/s3/factory/mocks/factory_mocks.go b/agent/s3/factory/mocks/factory_mocks.go new file mode 100644 index 00000000000..1b675987b2d --- /dev/null +++ b/agent/s3/factory/mocks/factory_mocks.go @@ -0,0 +1,65 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/s3/factory (interfaces: S3ClientCreator) + +// Package mock_factory is a generated GoMock package. +package mock_factory + +import ( + reflect "reflect" + + credentials "github.com/aws/amazon-ecs-agent/agent/credentials" + s3 "github.com/aws/amazon-ecs-agent/agent/s3" + gomock "github.com/golang/mock/gomock" +) + +// MockS3ClientCreator is a mock of S3ClientCreator interface +type MockS3ClientCreator struct { + ctrl *gomock.Controller + recorder *MockS3ClientCreatorMockRecorder +} + +// MockS3ClientCreatorMockRecorder is the mock recorder for MockS3ClientCreator +type MockS3ClientCreatorMockRecorder struct { + mock *MockS3ClientCreator +} + +// NewMockS3ClientCreator creates a new mock instance +func NewMockS3ClientCreator(ctrl *gomock.Controller) *MockS3ClientCreator { + mock := &MockS3ClientCreator{ctrl: ctrl} + mock.recorder = &MockS3ClientCreatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockS3ClientCreator) EXPECT() *MockS3ClientCreatorMockRecorder { + return m.recorder +} + +// NewS3ClientForBucket mocks base method +func (m *MockS3ClientCreator) NewS3ClientForBucket(arg0, arg1 string, arg2 credentials.IAMRoleCredentials) (s3.S3Client, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewS3ClientForBucket", arg0, arg1, arg2) + ret0, _ := ret[0].(s3.S3Client) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewS3ClientForBucket indicates an expected call of NewS3ClientForBucket +func (mr *MockS3ClientCreatorMockRecorder) NewS3ClientForBucket(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewS3ClientForBucket", reflect.TypeOf((*MockS3ClientCreator)(nil).NewS3ClientForBucket), arg0, arg1, arg2) +} diff --git a/agent/s3/generate_mocks.go b/agent/s3/generate_mocks.go new file mode 100644 index 00000000000..ba2fccdbe03 --- /dev/null +++ b/agent/s3/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3 + +//go:generate mockgen -destination=mocks/s3_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/s3 S3Client diff --git a/agent/s3/interface.go b/agent/s3/interface.go new file mode 100644 index 00000000000..f68807e79b6 --- /dev/null +++ b/agent/s3/interface.go @@ -0,0 +1,27 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3 + +import ( + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +// S3Client interface wraps the S3 API. +type S3Client interface { + DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*s3manager.Downloader)) (n int64, err error) +} diff --git a/agent/s3/mocks/s3_mocks.go b/agent/s3/mocks/s3_mocks.go new file mode 100644 index 00000000000..97f6de8c006 --- /dev/null +++ b/agent/s3/mocks/s3_mocks.go @@ -0,0 +1,72 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/s3 (interfaces: S3Client) + +// Package mock_s3 is a generated GoMock package. +package mock_s3 + +import ( + context "context" + io "io" + reflect "reflect" + + s3 "github.com/aws/aws-sdk-go/service/s3" + s3manager "github.com/aws/aws-sdk-go/service/s3/s3manager" + gomock "github.com/golang/mock/gomock" +) + +// MockS3Client is a mock of S3Client interface +type MockS3Client struct { + ctrl *gomock.Controller + recorder *MockS3ClientMockRecorder +} + +// MockS3ClientMockRecorder is the mock recorder for MockS3Client +type MockS3ClientMockRecorder struct { + mock *MockS3Client +} + +// NewMockS3Client creates a new mock instance +func NewMockS3Client(ctrl *gomock.Controller) *MockS3Client { + mock := &MockS3Client{ctrl: ctrl} + mock.recorder = &MockS3ClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockS3Client) EXPECT() *MockS3ClientMockRecorder { + return m.recorder +} + +// DownloadWithContext mocks base method +func (m *MockS3Client) DownloadWithContext(arg0 context.Context, arg1 io.WriterAt, arg2 *s3.GetObjectInput, arg3 ...func(*s3manager.Downloader)) (int64, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DownloadWithContext", varargs...) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DownloadWithContext indicates an expected call of DownloadWithContext +func (mr *MockS3ClientMockRecorder) DownloadWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadWithContext", reflect.TypeOf((*MockS3Client)(nil).DownloadWithContext), varargs...) +} diff --git a/agent/s3/s3.go b/agent/s3/s3.go new file mode 100644 index 00000000000..71c86b2579f --- /dev/null +++ b/agent/s3/s3.go @@ -0,0 +1,52 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3 + +import ( + "context" + "io" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/pkg/errors" +) + +const ( + s3ARNRegex = `arn:([^:]+):s3:::([^/]+)/(.+)` +) + +// DownloadFile downloads a file from s3 and writes it with the writer. +func DownloadFile(bucket, key string, timeout time.Duration, w io.WriterAt, client S3Client) error { + input := &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + _, err := client.DownloadWithContext(ctx, w, input) + return err +} + +// ParseS3ARN parses an s3 ARN. +func ParseS3ARN(s3ARN string) (bucket string, key string, err error) { + exp := regexp.MustCompile(s3ARNRegex) + match := exp.FindStringSubmatch(s3ARN) + if len(match) != 4 { + return "", "", errors.Errorf("invalid s3 arn: %s", s3ARN) + } + return match[2], match[3], nil +} diff --git a/agent/s3/s3_test.go b/agent/s3/s3_test.go new file mode 100644 index 00000000000..d983d8312ee --- /dev/null +++ b/agent/s3/s3_test.go @@ -0,0 +1,79 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3 + +import ( + "errors" + "io" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + s3sdk "github.com/aws/aws-sdk-go/service/s3" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + mock_s3 "github.com/aws/amazon-ecs-agent/agent/s3/mocks" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" +) + +const ( + testBucket = "testbucket" + testKey = "testkey" + testTimeout = 1 * time.Second +) + +func TestDownloadFile(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockFile := mock_oswrapper.NewMockFile() + mockS3Client := mock_s3.NewMockS3Client(ctrl) + + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Do(func(ctx aws.Context, + w io.WriterAt, input *s3sdk.GetObjectInput) { + assert.Equal(t, testBucket, aws.StringValue(input.Bucket)) + assert.Equal(t, testKey, aws.StringValue(input.Key)) + }) + + err := DownloadFile(testBucket, testKey, testTimeout, mockFile, mockS3Client) + assert.NoError(t, err) +} + +func TestDownloadFileError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockS3Client := mock_s3.NewMockS3Client(ctrl) + mockFile := mock_oswrapper.NewMockFile() + + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Return(int64(0), errors.New("test error")) + + err := DownloadFile(testBucket, testKey, testTimeout, mockFile, mockS3Client) + assert.Error(t, err) +} + +func TestParseS3ARN(t *testing.T) { + bucket, key, err := ParseS3ARN("arn:aws:s3:::bucket/key") + assert.NoError(t, err) + assert.Equal(t, "bucket", bucket) + assert.Equal(t, "key", key) +} + +func TestParseS3ARNInvalid(t *testing.T) { + _, _, err := ParseS3ARN("arn:aws:xxx:::xxx") + assert.Error(t, err) +} diff --git a/agent/sighandlers/debug_handler.go b/agent/sighandlers/debug_handler.go new file mode 100644 index 00000000000..721575f16b6 --- /dev/null +++ b/agent/sighandlers/debug_handler.go @@ -0,0 +1,41 @@ +// +build !windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sighandlers + +import ( + "os" + "os/signal" + "runtime" + "syscall" + "time" + + "github.com/cihub/seelog" +) + +func StartDebugHandler() { + signalChannel := make(chan os.Signal, 1) + signal.Notify(signalChannel, syscall.SIGUSR1) + go func() { + for range signalChannel { + // "640 k ought to be enough for anybody." - Bill Gates, 1981 + stackDump := make([]byte, 640*1024) + n := runtime.Stack(stackDump, true) + // Resize the buffer to the size of the actual stack + stackDump = stackDump[:n] + seelog.Criticalf("====== STACKTRACE ======\n%v\n%s\n====== /STACKTRACE ======", time.Now(), stackDump) + } + }() +} diff --git a/agent/sighandlers/debug_handler_windows.go b/agent/sighandlers/debug_handler_windows.go new file mode 100644 index 00000000000..38a2aee475d --- /dev/null +++ b/agent/sighandlers/debug_handler_windows.go @@ -0,0 +1,19 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sighandlers + +func StartDebugHandler() { +} diff --git a/agent/sighandlers/exitcodes/exitcodes.go b/agent/sighandlers/exitcodes/exitcodes.go new file mode 100644 index 00000000000..3382455508f --- /dev/null +++ b/agent/sighandlers/exitcodes/exitcodes.go @@ -0,0 +1,31 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package exitcodes defines agent exit codes +package exitcodes + +const ( + // ExitSuccess indicates there's no need to restart the agent and everything + // is okay + ExitSuccess = 0 + // ExitError (as well as unspecified exit codes) indicate a fatal error + // occurred, but the agent should be restarted + ExitError = 1 + // ExitTerminal indicates the agent has exited unsuccessfully, but should + // not be restarted + ExitTerminal = 5 + // ExitUpdate indicates that the agent has written an update file to the + // configured location and this file should be used instead when restarting + // the agent + ExitUpdate = 42 +) diff --git a/agent/sighandlers/termination_handler.go b/agent/sighandlers/termination_handler.go new file mode 100644 index 00000000000..279b516e194 --- /dev/null +++ b/agent/sighandlers/termination_handler.go @@ -0,0 +1,139 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package sighandlers handle signals and behave appropriately. +// SIGTERM: +// Flush state to disk and exit +// SIGUSR1: +// Print a dump of goroutines to the logger and DON'T exit +package sighandlers + +import ( + "context" + "errors" + "os" + "os/signal" + "syscall" + "time" + + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes" + + "github.com/cihub/seelog" + bolt "github.com/etcd-io/bbolt" +) + +const ( + engineDisableTimeout = 5 * time.Second + finalSaveTimeout = 3 * time.Second +) + +// TerminationHandler defines a handler used for terminating the agent +type TerminationHandler func(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) + +// StartDefaultTerminationHandler defines a default termination handler suitable for running in a process +func StartDefaultTerminationHandler(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine, cancel context.CancelFunc) { + // when we receive a termination signal, first save the state, then + // cancel the agent's context so other goroutines can exit cleanly. + signalC := make(chan os.Signal, 2) + signal.Notify(signalC, os.Interrupt, syscall.SIGTERM) + + sig := <-signalC + seelog.Infof("Agent received termination signal: %s", sig.String()) + + err := FinalSave(state, dataClient, taskEngine) + if err != nil { + seelog.Criticalf("Error saving state before final shutdown: %v", err) + // Terminal because it's a sigterm; the user doesn't want it to restart + os.Exit(exitcodes.ExitTerminal) + } + cancel() +} + +// FinalSave should be called immediately before exiting, and only before +// exiting, in order to flush tasks to disk. It waits a short timeout for state +// to settle if necessary. If unable to reach a steady-state and save within +// this short timeout, it returns an error +func FinalSave(state dockerstate.TaskEngineState, dataClient data.Client, taskEngine engine.TaskEngine) error { + engineDisabled := make(chan error) + + disableTimer := time.AfterFunc(engineDisableTimeout, func() { + engineDisabled <- errors.New("final save: timed out waiting for TaskEngine to settle") + }) + + go func() { + seelog.Debug("Shutting down task engine for final save") + taskEngine.Disable() + disableTimer.Stop() + engineDisabled <- nil + }() + + disableErr := <-engineDisabled + + stateSaved := make(chan error) + saveTimer := time.AfterFunc(finalSaveTimeout, func() { + stateSaved <- errors.New("final save: timed out trying to save to disk") + }) + go func() { + seelog.Debug("Saving state before shutting down") + saveStateAll(state, dataClient) + saveTimer.Stop() + stateSaved <- nil + }() + + saveErr := <-stateSaved + + if disableErr != nil || saveErr != nil { + return apierrors.NewMultiError(disableErr, saveErr) + } + return nil +} + +func saveStateAll(state dockerstate.TaskEngineState, dataClient data.Client) { + // save all tasks state data + for _, task := range state.AllTasks() { + if err := dataClient.SaveTask(task); err != nil { + seelog.Errorf("Failed to save data for task %s: %v", task.Arn, err) + } + } + + // save all container and docker container state data + for _, containerId := range state.GetAllContainerIDs() { + container, ok := state.ContainerByID(containerId) + if !ok { + seelog.Errorf("Unable to find container for %s", containerId) + } + if err := dataClient.SaveDockerContainer(container); err != nil { + seelog.Errorf("Failed to save data for docker container %s: %v", container.Container.Name, err) + } + } + + // save all image state data + for _, imageState := range state.AllImageStates() { + if err := dataClient.SaveImageState(imageState); err != nil { + seelog.Errorf("Failed to save data for image state %s:, %v", imageState.GetImageID(), err) + } + } + + // save all eni attachment data + for _, eniAttachment := range state.AllENIAttachments() { + if err := dataClient.SaveENIAttachment(eniAttachment); err != nil { + seelog.Errorf("Failed to save data for eni attachment %s: %v", eniAttachment.AttachmentARN, err) + } + } + // Wait to ensure data is saved. + time.Sleep(bolt.DefaultMaxBatchDelay) +} diff --git a/agent/sighandlers/termination_handler_test.go b/agent/sighandlers/termination_handler_test.go new file mode 100644 index 00000000000..daccc9db610 --- /dev/null +++ b/agent/sighandlers/termination_handler_test.go @@ -0,0 +1,113 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sighandlers + +import ( + "io/ioutil" + "os" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/engine/image" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + imageId = "test-imageId" + taskARN = "arn:aws:ecs:region:account-id:task/task-id" + eniAttachmentArn = "arn:aws:ecs:us-west-2:1234567890:attachment/abc" +) + +func TestFinalSave(t *testing.T) { + dataClient, cleanup := newTestDataClient(t) + defer cleanup() + + state := dockerstate.NewTaskEngineState() + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, + nil, nil, state, nil, nil, nil) + + task := &apitask.Task{ + Arn: taskARN, + Family: "test", + Version: "1", + } + + dockerContainer := &apicontainer.DockerContainer{ + DockerID: "docker-id", + DockerName: "docker-name", + Container: &apicontainer.Container{ + Name: "container-name", + TaskARNUnsafe: taskARN, + }, + } + + eniAttachment := &apieni.ENIAttachment{ + TaskARN: taskARN, + AttachmentARN: eniAttachmentArn, + AttachStatusSent: false, + } + imageState := &image.ImageState{ + Image: &image.Image{ + ImageID: imageId, + }, + PullSucceeded: false, + } + + state.AddTask(task) + state.AddContainer(dockerContainer, task) + state.AddImageState(imageState) + state.AddENIAttachment(eniAttachment) + + assert.NoError(t, FinalSave(state, dataClient, taskEngine)) + + // check if all states are saved to db on FinalSave + tasks, err := dataClient.GetTasks() + assert.NoError(t, err) + assert.Len(t, tasks, 1) + + dockerContainers, err := dataClient.GetContainers() + assert.NoError(t, err) + assert.Len(t, dockerContainers, 1) + + attachments, err := dataClient.GetENIAttachments() + assert.NoError(t, err) + assert.Len(t, attachments, 1) + + imageStates, err := dataClient.GetImageStates() + assert.NoError(t, err) + assert.Len(t, imageStates, 1) +} + +func newTestDataClient(t *testing.T) (data.Client, func()) { + testDir, err := ioutil.TempDir("", "termination_handler_unit_test") + require.NoError(t, err) + + testClient, err := data.NewWithSetup(testDir) + + cleanup := func() { + require.NoError(t, testClient.Close()) + require.NoError(t, os.RemoveAll(testDir)) + } + return testClient, cleanup +} diff --git a/agent/ssm/factory/factory.go b/agent/ssm/factory/factory.go new file mode 100644 index 00000000000..0e68cf1b3c1 --- /dev/null +++ b/agent/ssm/factory/factory.go @@ -0,0 +1,53 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/httpclient" + ssmclient "github.com/aws/amazon-ecs-agent/agent/ssm" + "github.com/aws/aws-sdk-go/aws" + awscreds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ssm" +) + +const ( + roundtripTimeout = 5 * time.Second +) + +type SSMClientCreator interface { + NewSSMClient(region string, creds credentials.IAMRoleCredentials) ssmclient.SSMClient +} + +func NewSSMClientCreator() SSMClientCreator { + return &ssmClientCreator{} +} + +type ssmClientCreator struct{} + +//SSM Client will automatically retry 3 times when has throttling error +func (*ssmClientCreator) NewSSMClient(region string, + creds credentials.IAMRoleCredentials) ssmclient.SSMClient { + cfg := aws.NewConfig(). + WithHTTPClient(httpclient.New(roundtripTimeout, false)). + WithRegion(region). + WithCredentials( + awscreds.NewStaticCredentials(creds.AccessKeyID, creds.SecretAccessKey, + creds.SessionToken)) + sess := session.Must(session.NewSession(cfg)) + return ssm.New(sess) +} diff --git a/agent/ssm/factory/generate_mocks.go b/agent/ssm/factory/generate_mocks.go new file mode 100644 index 00000000000..190a32a1dde --- /dev/null +++ b/agent/ssm/factory/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +//go:generate mockgen -destination=mocks/factory_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/ssm/factory SSMClientCreator diff --git a/agent/ssm/factory/mocks/factory_mocks.go b/agent/ssm/factory/mocks/factory_mocks.go new file mode 100644 index 00000000000..d94918f34df --- /dev/null +++ b/agent/ssm/factory/mocks/factory_mocks.go @@ -0,0 +1,64 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/ssm/factory (interfaces: SSMClientCreator) + +// Package mock_factory is a generated GoMock package. +package mock_factory + +import ( + reflect "reflect" + + credentials "github.com/aws/amazon-ecs-agent/agent/credentials" + ssm "github.com/aws/amazon-ecs-agent/agent/ssm" + gomock "github.com/golang/mock/gomock" +) + +// MockSSMClientCreator is a mock of SSMClientCreator interface +type MockSSMClientCreator struct { + ctrl *gomock.Controller + recorder *MockSSMClientCreatorMockRecorder +} + +// MockSSMClientCreatorMockRecorder is the mock recorder for MockSSMClientCreator +type MockSSMClientCreatorMockRecorder struct { + mock *MockSSMClientCreator +} + +// NewMockSSMClientCreator creates a new mock instance +func NewMockSSMClientCreator(ctrl *gomock.Controller) *MockSSMClientCreator { + mock := &MockSSMClientCreator{ctrl: ctrl} + mock.recorder = &MockSSMClientCreatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSSMClientCreator) EXPECT() *MockSSMClientCreatorMockRecorder { + return m.recorder +} + +// NewSSMClient mocks base method +func (m *MockSSMClientCreator) NewSSMClient(arg0 string, arg1 credentials.IAMRoleCredentials) ssm.SSMClient { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewSSMClient", arg0, arg1) + ret0, _ := ret[0].(ssm.SSMClient) + return ret0 +} + +// NewSSMClient indicates an expected call of NewSSMClient +func (mr *MockSSMClientCreatorMockRecorder) NewSSMClient(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSSMClient", reflect.TypeOf((*MockSSMClientCreator)(nil).NewSSMClient), arg0, arg1) +} diff --git a/agent/ssm/generate_mocks.go b/agent/ssm/generate_mocks.go new file mode 100644 index 00000000000..95560847536 --- /dev/null +++ b/agent/ssm/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ssm + +//go:generate mockgen -destination=mocks/ssm_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/ssm SSMClient diff --git a/agent/ssm/interface.go b/agent/ssm/interface.go new file mode 100644 index 00000000000..4a599029aa7 --- /dev/null +++ b/agent/ssm/interface.go @@ -0,0 +1,22 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ssm + +import ( + "github.com/aws/aws-sdk-go/service/ssm" +) + +type SSMClient interface { + GetParameters(*ssm.GetParametersInput) (*ssm.GetParametersOutput, error) +} diff --git a/agent/ssm/mocks/ssm_mocks.go b/agent/ssm/mocks/ssm_mocks.go new file mode 100644 index 00000000000..98dbae13115 --- /dev/null +++ b/agent/ssm/mocks/ssm_mocks.go @@ -0,0 +1,64 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/ssm (interfaces: SSMClient) + +// Package mock_ssm is a generated GoMock package. +package mock_ssm + +import ( + reflect "reflect" + + ssm "github.com/aws/aws-sdk-go/service/ssm" + gomock "github.com/golang/mock/gomock" +) + +// MockSSMClient is a mock of SSMClient interface +type MockSSMClient struct { + ctrl *gomock.Controller + recorder *MockSSMClientMockRecorder +} + +// MockSSMClientMockRecorder is the mock recorder for MockSSMClient +type MockSSMClientMockRecorder struct { + mock *MockSSMClient +} + +// NewMockSSMClient creates a new mock instance +func NewMockSSMClient(ctrl *gomock.Controller) *MockSSMClient { + mock := &MockSSMClient{ctrl: ctrl} + mock.recorder = &MockSSMClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSSMClient) EXPECT() *MockSSMClientMockRecorder { + return m.recorder +} + +// GetParameters mocks base method +func (m *MockSSMClient) GetParameters(arg0 *ssm.GetParametersInput) (*ssm.GetParametersOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetParameters", arg0) + ret0, _ := ret[0].(*ssm.GetParametersOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetParameters indicates an expected call of GetParameters +func (mr *MockSSMClientMockRecorder) GetParameters(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetParameters", reflect.TypeOf((*MockSSMClient)(nil).GetParameters), arg0) +} diff --git a/agent/ssm/ssm.go b/agent/ssm/ssm.go new file mode 100644 index 00000000000..81d8149bb5f --- /dev/null +++ b/agent/ssm/ssm.go @@ -0,0 +1,77 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ssm + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/pkg/errors" +) + +// GetSecretFromSSM makes the api call to the AWS SSM parameter store to +// retrieve secrets value in batches +func GetSecretsFromSSM(names []string, client SSMClient) (map[string]string, error) { + return getParameters(names, client, true) +} + +// GetParametersFromSSM makes the api call to the AWS SSM parameter store to +// retrieve parameter value in batches +func GetParametersFromSSM(names []string, client SSMClient) (map[string]string, error) { + return getParameters(names, client, false) +} + +func getParameters(names []string, client SSMClient, withDecryption bool) (map[string]string, error) { + var params []*string + for _, name := range names { + params = append(params, aws.String(name)) + } + + in := &ssm.GetParametersInput{ + Names: params, + WithDecryption: aws.Bool(withDecryption), + } + + out, err := client.GetParameters(in) + if err != nil { + return nil, err + } + + return extractSSMValues(out) +} + +func extractSSMValues(out *ssm.GetParametersOutput) (map[string]string, error) { + if out == nil { + return nil, errors.New( + "empty response") + } + + if len(out.InvalidParameters) != 0 { + var stringValues []string + for _, invalid := range out.InvalidParameters { + stringValues = append(stringValues, aws.StringValue(invalid)) + } + return nil, fmt.Errorf( + "invalid parameters: %s", strings.Join(stringValues, ",")) + } + + parameterValues := make(map[string]string) + for _, parameter := range out.Parameters { + parameterValues[aws.StringValue(parameter.Name)] = aws.StringValue(parameter.Value) + } + + return parameterValues, nil +} diff --git a/agent/ssm/ssm_test.go b/agent/ssm/ssm_test.go new file mode 100644 index 00000000000..269824815a3 --- /dev/null +++ b/agent/ssm/ssm_test.go @@ -0,0 +1,127 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ssm + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/stretchr/testify/assert" +) + +const ( + invalidParam1 = "/test/invalidParams" + validParam1 = "/test/param1" + validValue1 = "secret1" +) + +type mockGetParameters struct { + SSMClient + Resp ssm.GetParametersOutput +} + +func (m mockGetParameters) GetParameters(input *ssm.GetParametersInput) (*ssm.GetParametersOutput, error) { + return &m.Resp, nil +} + +func TestGetSecretsFromSSM(t *testing.T) { + param1 := ssm.Parameter{ + Name: aws.String(validParam1), + Value: aws.String(validValue1), + } + + cases := []struct { + Name string + Resp ssm.GetParametersOutput + ShouldError bool + }{ + { + Name: "SuccessWithNoInvalidParameters", + Resp: ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{¶m1}, + }, + ShouldError: false, + }, + { + Name: "ErrorWithInvalidParameters", + Resp: ssm.GetParametersOutput{ + InvalidParameters: []*string{aws.String(invalidParam1)}, + Parameters: []*ssm.Parameter{¶m1}, + }, + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + ssmClient := mockGetParameters{Resp: c.Resp} + names := []string{validParam1, invalidParam1} + _, err := GetSecretsFromSSM(names, ssmClient) + + if c.ShouldError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestGetParametersFromSSM(t *testing.T) { + param1 := ssm.Parameter{ + Name: aws.String(validParam1), + Value: aws.String(validValue1), + } + + cases := []struct { + Name string + Resp ssm.GetParametersOutput + ShouldError bool + }{ + { + Name: "SuccessWithNoInvalidParameters", + Resp: ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{¶m1}, + }, + ShouldError: false, + }, + { + Name: "ErrorWithInvalidParameters", + Resp: ssm.GetParametersOutput{ + InvalidParameters: []*string{aws.String(invalidParam1)}, + Parameters: []*ssm.Parameter{¶m1}, + }, + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + ssmClient := mockGetParameters{Resp: c.Resp} + names := []string{validParam1, invalidParam1} + _, err := GetParametersFromSSM(names, ssmClient) + + if c.ShouldError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/agent/statechange/statechange.go b/agent/statechange/statechange.go new file mode 100644 index 00000000000..cd95cecbcf6 --- /dev/null +++ b/agent/statechange/statechange.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statechange + +const ( + // ContainerEvent is used to define the container state transition events + // emitted by the engine + ContainerEvent EventType = iota + + // TaskEvent is used to define the task state transition events emitted by + // the engine + TaskEvent + + // AttachmentEvent is used to define the attachment state transition events + // emitted by ENI watcher + AttachmentEvent + + // ManagedAgentEvent is used to define the managed agent state transition events + // emitted by the engine + ManagedAgentEvent +) + +// Event defines the type of state change event +type EventType int32 + +// Event is used to abstract away the two transition event types +// passed up through a single channel from the the engine +type Event interface { + // GetEventType implementations should return one the enums defined above to + // identify the type of event being emitted + GetEventType() EventType +} diff --git a/agent/statemanager/dependencies/dependencies_windows.go b/agent/statemanager/dependencies/dependencies_windows.go new file mode 100644 index 00000000000..345bbeb9451 --- /dev/null +++ b/agent/statemanager/dependencies/dependencies_windows.go @@ -0,0 +1,45 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package dependencies + +import ( + "golang.org/x/sys/windows/registry" +) + +//go:generate mockgen -destination=mocks/mocks_windows.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/statemanager/dependencies WindowsRegistry,RegistryKey + +// WindowsRegistry is an interface for the package-level methods in the golang.org/x/sys/windows/registry package +type WindowsRegistry interface { + CreateKey(k registry.Key, path string, access uint32) (newk RegistryKey, openedExisting bool, err error) + OpenKey(k registry.Key, path string, access uint32) (RegistryKey, error) +} + +// RegistryKey is an interface for the registry.Key type +type RegistryKey interface { + GetStringValue(name string) (val string, valtype uint32, err error) + SetStringValue(name, value string) error + Close() error +} + +type StdRegistry struct{} + +func (StdRegistry) CreateKey(k registry.Key, path string, access uint32) (RegistryKey, bool, error) { + return registry.CreateKey(k, path, access) +} + +func (StdRegistry) OpenKey(k registry.Key, path string, access uint32) (RegistryKey, error) { + return registry.OpenKey(k, path, access) +} diff --git a/agent/statemanager/dependencies/mocks/mocks_windows.go b/agent/statemanager/dependencies/mocks/mocks_windows.go new file mode 100644 index 00000000000..34bc71bc12b --- /dev/null +++ b/agent/statemanager/dependencies/mocks/mocks_windows.go @@ -0,0 +1,120 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Automatically generated by MockGen. DO NOT EDIT! +// Source: github.com/aws/amazon-ecs-agent/agent/statemanager/dependencies (interfaces: WindowsRegistry,RegistryKey) + +package mock_dependencies + +import ( + dependencies "github.com/aws/amazon-ecs-agent/agent/statemanager/dependencies" + gomock "github.com/golang/mock/gomock" + registry "golang.org/x/sys/windows/registry" +) + +// Mock of WindowsRegistry interface +type MockWindowsRegistry struct { + ctrl *gomock.Controller + recorder *_MockWindowsRegistryRecorder +} + +// Recorder for MockWindowsRegistry (not exported) +type _MockWindowsRegistryRecorder struct { + mock *MockWindowsRegistry +} + +func NewMockWindowsRegistry(ctrl *gomock.Controller) *MockWindowsRegistry { + mock := &MockWindowsRegistry{ctrl: ctrl} + mock.recorder = &_MockWindowsRegistryRecorder{mock} + return mock +} + +func (_m *MockWindowsRegistry) EXPECT() *_MockWindowsRegistryRecorder { + return _m.recorder +} + +func (_m *MockWindowsRegistry) CreateKey(_param0 registry.Key, _param1 string, _param2 uint32) (dependencies.RegistryKey, bool, error) { + ret := _m.ctrl.Call(_m, "CreateKey", _param0, _param1, _param2) + ret0, _ := ret[0].(dependencies.RegistryKey) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +func (_mr *_MockWindowsRegistryRecorder) CreateKey(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateKey", arg0, arg1, arg2) +} + +func (_m *MockWindowsRegistry) OpenKey(_param0 registry.Key, _param1 string, _param2 uint32) (dependencies.RegistryKey, error) { + ret := _m.ctrl.Call(_m, "OpenKey", _param0, _param1, _param2) + ret0, _ := ret[0].(dependencies.RegistryKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockWindowsRegistryRecorder) OpenKey(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "OpenKey", arg0, arg1, arg2) +} + +// Mock of RegistryKey interface +type MockRegistryKey struct { + ctrl *gomock.Controller + recorder *_MockRegistryKeyRecorder +} + +// Recorder for MockRegistryKey (not exported) +type _MockRegistryKeyRecorder struct { + mock *MockRegistryKey +} + +func NewMockRegistryKey(ctrl *gomock.Controller) *MockRegistryKey { + mock := &MockRegistryKey{ctrl: ctrl} + mock.recorder = &_MockRegistryKeyRecorder{mock} + return mock +} + +func (_m *MockRegistryKey) EXPECT() *_MockRegistryKeyRecorder { + return _m.recorder +} + +func (_m *MockRegistryKey) Close() error { + ret := _m.ctrl.Call(_m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRegistryKeyRecorder) Close() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Close") +} + +func (_m *MockRegistryKey) GetStringValue(_param0 string) (string, uint32, error) { + ret := _m.ctrl.Call(_m, "GetStringValue", _param0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(uint32) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +func (_mr *_MockRegistryKeyRecorder) GetStringValue(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "GetStringValue", arg0) +} + +func (_m *MockRegistryKey) SetStringValue(_param0 string, _param1 string) error { + ret := _m.ctrl.Call(_m, "SetStringValue", _param0, _param1) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRegistryKeyRecorder) SetStringValue(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SetStringValue", arg0, arg1) +} diff --git a/agent/statemanager/dependencies/noop.go b/agent/statemanager/dependencies/noop.go new file mode 100644 index 00000000000..0f34e39c988 --- /dev/null +++ b/agent/statemanager/dependencies/noop.go @@ -0,0 +1,14 @@ +package dependencies + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. diff --git a/agent/statemanager/generate_mocks.go b/agent/statemanager/generate_mocks.go new file mode 100644 index 00000000000..f3da5c88b0d --- /dev/null +++ b/agent/statemanager/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statemanager + +//go:generate mockgen -destination=mocks/statemanager_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/statemanager StateManager diff --git a/agent/statemanager/helper_unix_test.go b/agent/statemanager/helper_unix_test.go new file mode 100644 index 00000000000..cf097e835e3 --- /dev/null +++ b/agent/statemanager/helper_unix_test.go @@ -0,0 +1,20 @@ +// +build !windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statemanager_test + +func setupWindowsTest(path string) (func(), error) { + return func() {}, nil +} diff --git a/agent/statemanager/helper_windows_test.go b/agent/statemanager/helper_windows_test.go new file mode 100644 index 00000000000..676b49058fe --- /dev/null +++ b/agent/statemanager/helper_windows_test.go @@ -0,0 +1,47 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statemanager_test + +import ( + "fmt" + "os" + "time" + + "golang.org/x/sys/windows/registry" +) + +func setupWindowsTest(path string) (func(), error) { + key, _, err := registry.CreateKey(registry.LOCAL_MACHINE, `SOFTWARE\Amazon\ECS Agent\State File`, registry.SET_VALUE|registry.CREATE_SUB_KEY) + if err != nil { + return nil, err + } + defer key.Close() + valueName := fmt.Sprintf("test_path_%d", time.Now().UnixNano()) + os.Setenv("ZZZ_I_KNOW_SETTING_TEST_VALUES_IN_PRODUCTION_IS_NOT_SUPPORTED", valueName) + err = key.SetStringValue(valueName, path) + if err != nil { + return nil, err + } + cleanup := func() { + os.Unsetenv("ZZZ_I_KNOW_SETTING_TEST_VALUES_IN_PRODUCTION_IS_NOT_SUPPORTED") + key, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Amazon\ECS Agent\State File`, registry.ALL_ACCESS) + if err != nil { + return + } + key.DeleteValue(valueName) + } + return cleanup, nil +} diff --git a/agent/statemanager/mocks/statemanager_mocks.go b/agent/statemanager/mocks/statemanager_mocks.go new file mode 100644 index 00000000000..8f5c0c4c522 --- /dev/null +++ b/agent/statemanager/mocks/statemanager_mocks.go @@ -0,0 +1,90 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/statemanager (interfaces: StateManager) + +// Package mock_statemanager is a generated GoMock package. +package mock_statemanager + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockStateManager is a mock of StateManager interface +type MockStateManager struct { + ctrl *gomock.Controller + recorder *MockStateManagerMockRecorder +} + +// MockStateManagerMockRecorder is the mock recorder for MockStateManager +type MockStateManagerMockRecorder struct { + mock *MockStateManager +} + +// NewMockStateManager creates a new mock instance +func NewMockStateManager(ctrl *gomock.Controller) *MockStateManager { + mock := &MockStateManager{ctrl: ctrl} + mock.recorder = &MockStateManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockStateManager) EXPECT() *MockStateManagerMockRecorder { + return m.recorder +} + +// ForceSave mocks base method +func (m *MockStateManager) ForceSave() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ForceSave") + ret0, _ := ret[0].(error) + return ret0 +} + +// ForceSave indicates an expected call of ForceSave +func (mr *MockStateManagerMockRecorder) ForceSave() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceSave", reflect.TypeOf((*MockStateManager)(nil).ForceSave)) +} + +// Load mocks base method +func (m *MockStateManager) Load() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Load") + ret0, _ := ret[0].(error) + return ret0 +} + +// Load indicates an expected call of Load +func (mr *MockStateManagerMockRecorder) Load() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockStateManager)(nil).Load)) +} + +// Save mocks base method +func (m *MockStateManager) Save() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Save") + ret0, _ := ret[0].(error) + return ret0 +} + +// Save indicates an expected call of Save +func (mr *MockStateManagerMockRecorder) Save() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Save", reflect.TypeOf((*MockStateManager)(nil).Save)) +} diff --git a/agent/statemanager/noop_state_manager.go b/agent/statemanager/noop_state_manager.go new file mode 100644 index 00000000000..1becc90ff01 --- /dev/null +++ b/agent/statemanager/noop_state_manager.go @@ -0,0 +1,39 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statemanager + +// NoopStateManager is a state manager that succeeds for all reads/writes without +// even trying; it allows disabling of state serialization by being a drop-in +// replacement so no other code need be concerned with it. +type NoopStateManager struct{} + +// NewNoopStateManager constructs a state manager +func NewNoopStateManager() StateManager { + return &NoopStateManager{} +} + +// Save does nothing, successfully +func (nsm *NoopStateManager) Save() error { + return nil +} + +// ForceSave does nothing, successfully +func (nsm *NoopStateManager) ForceSave() error { + return nil +} + +// Load does nothing, successfully +func (nsm *NoopStateManager) Load() error { + return nil +} diff --git a/agent/statemanager/state_manager.go b/agent/statemanager/state_manager.go new file mode 100644 index 00000000000..41f469d7050 --- /dev/null +++ b/agent/statemanager/state_manager.go @@ -0,0 +1,349 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package statemanager implements simple constructs for saving and restoring +// state from disk. +// It provides the interface for a StateManager which can read/write arbitrary +// json data from/to disk. +package statemanager + +import ( + "encoding/json" + "errors" + "os" + "strconv" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/metrics" + + "github.com/cihub/seelog" +) + +/* + +DEPRECATED: state manager is no longer used to store agent state and it remains here + only for backward compatibility purpose (loading state from an old agent that uses + the state manager - see agent/app/data.go, so do not remove it although it's deprecated). + You no longer need to update ECSDataVersion when changing data structure + in agent. + +Agent now uses boltDB to store its state. See github.com/aws/amazon-ecs-agent/agent/data + package for the new data persistent interface and use that if you need to deal with data persistence. + +*/ + +const ( + // ECSDataVersion is the current version of saved data. Any backwards or + // forwards incompatible changes to the data-format should increment this number + // and retain the ability to read old data versions. + // Version changes: + // 1) initial + // 2) + // a) Add 'ACSSeqNum' top level field (backwards compatible; technically + // forwards compatible but could cause resource constraint violations) + // b) remove 'DEAD', 'UNKNOWN' state from ever being marshalled (backward and + // forward compatible) + // 3) Add 'Protocol' field to 'portMappings' and 'KnownPortBindings' + // 4) Add 'DockerConfig' struct + // 5) Add 'ImageStates' struct as part of ImageManager + // 6) + // a) Refactor 'Internal' field in 'apicontainer.Container' to 'Type' enum + // b) Add 'ContainerResourcesProvisioned' as a new 'ContainerStatus' enum + // c) Add 'SteadyStateStatus' field to 'Container' struct + // d) Add 'ENIAttachments' struct + // e) Deprecate 'SteadyStateDependencies' in favor of 'TransitionDependencySet' + // 7) + // a) Add 'MetadataUpdated' field to 'apicontainer.Container' + // b) Add 'DomainNameServers' and 'DomainNameSearchList' in `api.ENI` + // 8) + // a) Add 'UseExecutionRole' in `api.ECRAuthData` + // b) Add `executionCredentialsID` in `apitask.Task` + // c) Add 'LogsAuthStrategy' field to 'apicontainer.Container' + // d) Added task cgroup related fields ('CPU', 'Memory', 'MemoryCPULimitsEnabled') to 'apitask.Task' + // 9) Add 'ipToTask' map to state file + // 10) Add 'healthCheckType' field in 'apicontainer.Container' + // 11) + // a) Add 'PrivateDNSName' field to 'api.ENI' + // b) Remove `AppliedStatus` field form 'apicontainer.Container' + // 12) Deprecate 'TransitionDependencySet' and add new 'TransitionDependenciesMap' in 'apicontainer.Container' + // 13) Add 'resources' field to 'api.task.task' + // 14) Add 'PlatformFields' field to 'api.task.task' + // 15) Add 'PIDMode' and 'IPCMode' fields to 'api.task.task' + // 16) Add 'V3EndpointID' field to 'Container' struct + // 17) + // a) Add 'secrets' field to 'apicontainer.Container' + // b) Add 'ssmsecret' field to 'resources' + // 18) + // a) Add 'AvailabilityZone' field to the TaskResponse struct + // b) Add 'asmsecret' field to 'resources' + // 19) + // a) Add 'Associations' field to 'api.task.task' + // b) Add 'GPUIDs' field to 'apicontainer.Container' + // c) Add 'NvidiaRuntime' field to 'api.task.task' + // 20) + // a) Add 'DependsOn' field to 'apicontainer.Container' + // b) Add 'StartTime' field to 'api.container.Container' + // c) Add 'StopTime' field to 'api.container.Container' + // 21) Add 'target' field to the Secret struct + // 22) + // a) Add 'attachmentType' field to 'api.ENIAttachment' + // b) Add 'InterfaceAssociationProtocol' field to 'api.ENI' + // c) Add 'InterfaceVlanProperties' field to 'api.ENI' + // 23) + // a) Add 'RuntimeID' field to 'apicontainer.Container' + // b) Add 'FirelensConfig' field to 'Container' struct + // c) Add 'firelens' field to 'resources' + // 24) + // a) Add 'imageDigest' field to 'apicontainer.Container' + // b) Add 'Region', 'ExecutionCredentialsID', 'ExternalConfigType', 'ExternalConfigValue' and 'NetworkMode' to + // firelens task resource. + // 25) Add `seqNumTaskManifest` int field + // 26) Add 'credentialspec' field to 'resources' + // 27) + // a) Add 'authorizationConfig', 'transitEncryption' and 'transitEncryptionPort' to 'taskresource.volume.EFSVolumeConfig' + // b) Add 'pauseContainerPID' field to 'taskresource.volume.VolumeResource' + // 28) Add 'envfile' field to 'resources' + // 29) Add 'fsxwindowsfileserver' field to 'resources' + // 30) Add 'ExecCommandAgentMetadata' field to 'apicontainer.Container' + // 31) Add 'ManagedAgentsUnsafe' field to 'apicontainer.Container' + + ECSDataVersion = 31 + + // ecsDataFile specifies the filename in the ECS_DATADIR + ecsDataFile = "ecs_agent_data.json" + + // minSaveInterval specifies how frequently to flush to disk + minSaveInterval = 10 * time.Second +) + +// Saveable types should be able to be json serializable and deserializable +// Properly, this should have json.Marshaler/json.Unmarshaler here, but string +// and so on can be marshaled/unmarshaled sanely but don't fit those interfaces. +type Saveable interface{} + +// Saver is a type that can be saved +type Saver interface { + Save() error + ForceSave() error +} + +// Option functions are functions that may be used as part of constructing a new +// StateManager +type Option func(StateManager) + +type saveableState map[string]*Saveable +type intermediateSaveableState map[string]json.RawMessage + +// State is a struct of all data that should be saveable/loadable to disk. Each +// element should be json-serializable. +// +// Note, changing this to work with BinaryMarshaler or another more compact +// format would be fine, but everything already needs a json representation +// since that's our wire format and the extra space taken / IO-time is expected +// to be fairly negligible. +type state struct { + Data saveableState + + Version int +} + +type intermediateState struct { + Data intermediateSaveableState +} + +type versionOnlyState struct { + Version int +} + +type platformDependencies interface{} + +// A StateManager can load and save state from disk. +// Load is not expected to return an error if there is no state to load. +type StateManager interface { + Saver + Load() error +} + +type basicStateManager struct { + statePath string // The path to a file in which state can be serialized + + state *state // pointers to the data we should save / load into + + saveTimesLock sync.Mutex // guards save times + lastSave time.Time //the last time a save completed + nextPlannedSave time.Time //the next time a save is planned + + savingLock sync.Mutex // guards marshal, write, move (on Linux), and load (on Windows) + + platformDependencies platformDependencies // platform-specific dependencies +} + +// NewStateManager constructs a new StateManager which saves data at the +// location specified in cfg and operates under the given options. +// The returned StateManager will not save more often than every 10 seconds and +// will not reliably return errors with Save, but will log them appropriately. +func NewStateManager(cfg *config.Config, options ...Option) (StateManager, error) { + fi, err := os.Stat(cfg.DataDir) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, errors.New("State manager DataDir must exist") + } + + state := &state{ + Data: make(saveableState), + Version: ECSDataVersion, + } + manager := &basicStateManager{ + statePath: cfg.DataDir, + state: state, + } + + for _, option := range options { + option(manager) + } + + manager.platformDependencies = newPlatformDependencies() + + return manager, nil +} + +// AddSaveable is an option that adds a given saveable as one that should be saved +// under the given name. The name must be the same across uses of the +// statemanager (e.g. program invocations) for it to be serialized and +// deserialized correctly. +func AddSaveable(name string, saveable Saveable) Option { + return (Option)(func(m StateManager) { + manager, ok := m.(*basicStateManager) + if !ok { + seelog.Critical("Unable to add to state manager; unknown instantiation") + return + } + manager.state.Data[name] = &saveable + }) +} + +// Save triggers a save to file, though respects a minimum save interval to wait +// between saves. +func (manager *basicStateManager) Save() error { + defer metrics.MetricsEngineGlobal.RecordStateManagerMetric("SAVE")() + manager.saveTimesLock.Lock() + defer manager.saveTimesLock.Unlock() + if time.Since(manager.lastSave) >= minSaveInterval { + // we can just save + err := manager.ForceSave() + manager.lastSave = time.Now() + manager.nextPlannedSave = time.Time{} // re-zero it; assume all pending desires to save are fulfilled + return err + } else if manager.nextPlannedSave.IsZero() { + // No save planned yet, we should plan one. + next := manager.lastSave.Add(minSaveInterval) + manager.nextPlannedSave = next + go func() { + time.Sleep(time.Until(next)) + manager.Save() + }() + } + // else nextPlannedSave wasn't Zero so there's a save planned elsewhere that'll + // fulfill this + return nil +} + +// ForceSave saves the given State to a file. It is an atomic operation on POSIX +// systems (by Renaming over the target file). +// This function logs errors at will and does not necessarily expect the caller +// to handle the error because there's little a caller can do in general other +// than just keep going. +// In addition, the StateManager internally buffers save requests in order to +// only save at most every STATE_SAVE_INTERVAL. +func (manager *basicStateManager) ForceSave() error { + manager.savingLock.Lock() + defer manager.savingLock.Unlock() + seelog.Info("Saving state!") + s := manager.state + s.Version = ECSDataVersion + + data, err := json.Marshal(s) + if err != nil { + seelog.Error("Error saving state; could not marshal data; this is odd", "err", err) + return err + } + return manager.writeFile(data) +} + +// Load reads state off the disk from the well-known filepath and loads it into +// the passed State object. +func (manager *basicStateManager) Load() error { + s := manager.state + seelog.Info("Loading state!") + data, err := manager.readFile() + if err != nil { + seelog.Error("Error reading existing state file", "err", err) + return err + } + if data == nil { + return nil + } + // Dry-run to make sure this is a version we can understand + err = manager.dryRun(data) + if err != nil { + return err + } + // Now load it into the actual state. The reason we do this with the + // intermediate state is that we *must* unmarshal directly into the + // "saveable" pointers we were given in AddSaveable; if we unmarshal + // directly into a map with values of pointers, those pointers are lost. + // We *must* unmarshal this way because the existing pointers could have + // semi-initialized data (and are actually expected to) + + var intermediate intermediateState + err = json.Unmarshal(data, &intermediate) + if err != nil { + seelog.Debug("Could not unmarshal into intermediate") + return err + } + + for key, rawJSON := range intermediate.Data { + actualPointer, ok := manager.state.Data[key] + if !ok { + seelog.Error("Loading state: potentially malformed json key of " + key) + continue + } + err = json.Unmarshal(rawJSON, actualPointer) + if err != nil { + seelog.Debug("Could not unmarshal into actual") + return err + } + } + + seelog.Debug("Loaded state!", "state", s) + return nil +} + +func (manager *basicStateManager) dryRun(data []byte) error { + // Dry-run to make sure this is a version we can understand + tmps := versionOnlyState{} + err := json.Unmarshal(data, &tmps) + if err != nil { + seelog.Critical("Could not unmarshal existing state; corrupted data?", "err", err, "data", data) + return err + } + if tmps.Version > ECSDataVersion { + strversion := strconv.Itoa(tmps.Version) + return errors.New("Unsupported data format: Version " + strversion + " not " + strconv.Itoa(ECSDataVersion)) + } + return nil +} diff --git a/agent/statemanager/state_manager_test.go b/agent/statemanager/state_manager_test.go new file mode 100644 index 00000000000..5fd77523353 --- /dev/null +++ b/agent/statemanager/state_manager_test.go @@ -0,0 +1,516 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statemanager_test + +import ( + "path/filepath" + "testing" + "time" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/statemanager" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStateManagerNonexistantDirectory(t *testing.T) { + cfg := &config.Config{DataDir: "/path/to/directory/that/doesnt/exist"} + _, err := statemanager.NewStateManager(cfg) + assert.Error(t, err) +} + +func TestLoadsV1DataCorrectly(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v1", "1", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v1", "1")} + + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), + nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, cluster, "test") + assert.True(t, sequenceNumber == 0) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + var deadTask *apitask.Task + for _, task := range tasks { + if task.Arn == "arn:aws:ecs:us-west-2:1234567890:task/f44b4fc9-adb0-4f4f-9dff-871512310588" { + deadTask = task + } + } + + require.NotNil(t, deadTask) + assert.Equal(t, deadTask.GetSentStatus(), apitaskstatus.TaskStopped) + assert.Equal(t, deadTask.Containers[0].SentStatusUnsafe, apicontainerstatus.ContainerStopped) + assert.Equal(t, deadTask.Containers[0].DesiredStatusUnsafe, apicontainerstatus.ContainerStopped) + assert.Equal(t, deadTask.Containers[0].KnownStatusUnsafe, apicontainerstatus.ContainerStopped) + + exitCode := deadTask.Containers[0].KnownExitCodeUnsafe + require.NotNil(t, exitCode) + assert.Equal(t, *exitCode, 128) + + expected, _ := time.Parse(time.RFC3339, "2015-04-28T17:29:48.129140193Z") + assert.Equal(t, deadTask.GetKnownStatusTime(), expected) +} + +func TestLoadsV13DataCorrectly(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v13", "1", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v13", "1")} + + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + + assert.Equal(t, "test-statefile", cluster) + assert.EqualValues(t, 0, sequenceNumber) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + var deadTask *apitask.Task + for _, task := range tasks { + if task.Arn == "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/6a86da4c40ed4a0b94cf359e840dda98" { + deadTask = task + } + } + require.NotNil(t, deadTask) + assert.Equal(t, deadTask.GetSentStatus(), apitaskstatus.TaskStopped) + assert.Equal(t, deadTask.Containers[0].SentStatusUnsafe, apicontainerstatus.ContainerStopped) + assert.Equal(t, deadTask.Containers[0].DesiredStatusUnsafe, apicontainerstatus.ContainerStopped) + assert.Equal(t, deadTask.Containers[0].KnownStatusUnsafe, apicontainerstatus.ContainerStopped) + + exitCode := deadTask.Containers[0].KnownExitCodeUnsafe + require.NotNil(t, exitCode) + assert.Equal(t, *exitCode, 128) + + expected, _ := time.Parse(time.RFC3339, "2019-06-26T15:56:54.353114618Z") + assert.Equal(t, deadTask.GetKnownStatusTime(), expected) + +} + +// verify that the state manager correctly loads the existing container health check related fields in state file. +// if we change those fields in the future, we should modify this test to test the new fields +func TestLoadsDataForContainerHealthCheckTask(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v10", "container-health-check", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v10", "container-health-check")} + + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/e4e6c98c-aa44-4146-baf9-431b04c0d162", task.Arn) + assert.Equal(t, "chc-state", task.Family) + assert.Equal(t, 1, len(task.Containers)) + + container := task.Containers[0] + assert.Equal(t, "container_1", container.Name) + assert.Equal(t, "docker", container.HealthCheckType) + assert.NotNil(t, container.DockerConfig) + assert.Equal(t, "{\"HealthCheck\":{\"Test\":[\"CMD\",\"echo\",\"hello\"],\"Interval\":30000000000,\"Timeout\":5000000000,\"Retries\":3}}", *container.DockerConfig.Config) +} + +// verify that the state manager correctly loads the existing private registry related fields in state file. +// if we change those fields in the future, we should modify this test to test the new fields +func TestLoadsDataForPrivateRegistryTask(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v14", "private-registry", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v14", "private-registry")} + + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + + assert.Equal(t, "test-statefile", cluster) + assert.EqualValues(t, 0, sequenceNumber) + + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/19e8453e9a404cb58d55aaa0df65b4f5", task.Arn) + assert.Equal(t, "private-registry-state", task.Family) + assert.Equal(t, 1, len(task.Containers)) + + container := task.Containers[0] + assert.Equal(t, "container_1", container.Name) + assert.NotNil(t, container.RegistryAuthentication) + + registryAuth := container.RegistryAuthentication + assert.Equal(t, "asm", registryAuth.Type) + assert.NotNil(t, registryAuth.ASMAuthData) + + asmAuthData := registryAuth.ASMAuthData + assert.Equal(t, "arn:aws:secretsmanager:us-west-2:1234567890:secret:FunctionalTest-PrivateRegistryAuth-I0nqxs", asmAuthData.CredentialsParameter) + assert.Equal(t, "us-west-2", asmAuthData.Region) +} + +// verify that the state manager correctly loads ssm secrets related fields in state file +func TestLoadsDataForSecretsTask(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v17", "secrets", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v17", "secrets")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "test-statefile", cluster) + assert.EqualValues(t, 0, sequenceNumber) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/7e1bfc28fe764a789a721710258337ff", task.Arn) + assert.Equal(t, "test-secret-state", task.Family) + assert.Equal(t, 1, len(task.Containers)) + container := task.Containers[0] + assert.Equal(t, "container_1", container.Name) + assert.NotNil(t, container.Secrets) + secret := container.Secrets[0] + assert.Equal(t, "ENVIRONMENT_VARIABLE", secret.Type) + assert.Equal(t, "mysecret", secret.Name) + assert.Equal(t, "us-west-2", secret.Region) + assert.Equal(t, "/ecs/secrets/test", secret.ValueFrom) + assert.Equal(t, "ssm", secret.Provider) +} + +func TestLoadsDataForAddingAvailabilityZoneInTask(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v18", "availabilityZone", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v18", "availabilityZone")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID, availabilityZone string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + statemanager.AddSaveable("AvailabilityZone", &availabilityZone), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", task.Arn) + assert.Equal(t, 1, len(task.Containers)) + assert.Equal(t, "us-west-2c", availabilityZone) +} + +// verify that the state manager correctly loads asm secrets related fields in state file +func TestLoadsDataForASMSecretsTask(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v18", "secrets", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v18", "secrets")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", task.Arn) + assert.Equal(t, "secrets-state", task.Family) + assert.Equal(t, 1, len(task.Containers)) + container := task.Containers[0] + assert.Equal(t, "container_1", container.Name) + assert.NotNil(t, container.Secrets) + secret := container.Secrets[1] + assert.Equal(t, "ENVIRONMENT_VARIABLE", secret.Type) + assert.Equal(t, "asm-secret", secret.Name) + assert.Equal(t, "us-west-2", secret.Region) + assert.Equal(t, "secret-value-from", secret.ValueFrom) + assert.Equal(t, "asm", secret.Provider) +} + +// verify that the state manager correctly loads container ordering related fields in state file +func TestLoadsDataForContainerOrdering(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v20", "containerOrdering", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v20", "containerOrdering")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", task.Arn) + assert.Equal(t, "container-ordering-state", task.Family) + assert.Equal(t, 2, len(task.Containers)) + + dependsOn := task.Containers[1].DependsOnUnsafe + assert.Equal(t, "container_1", dependsOn[0].ContainerName) + assert.Equal(t, "START", dependsOn[0].Condition) +} + +func TestLoadsDataForPerContainerTimeouts(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v20", "perContainerTimeouts", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v20", "perContainerTimeouts")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", task.Arn) + assert.Equal(t, "per-container-timeouts", task.Family) + assert.Equal(t, 1, len(task.Containers)) + + c1 := task.Containers[0] + assert.Equal(t, uint(10), c1.StartTimeout) + assert.Equal(t, uint(10), c1.StopTimeout) +} + +func TestLoadsDataForContainerRuntimeID(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v23", "perContainerRuntimeID", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v23", "perContainerRuntimeID")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:123456789012:task/70947c96-f64e-483a-a612-3fd4303546e7", task.Arn) + assert.Equal(t, "sleep360", task.Family) + assert.Equal(t, 1, len(task.Containers)) + assert.Equal(t, "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90", task.Containers[0].RuntimeID) +} + +func TestLoadsDataForContainerImageDigest(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v24", "perContainerImageDigest", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v24", "perContainerImageDigest")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:123456789012:task/20ceacb3-2806-4701-a4d4-098515cebfe9", task.Arn) + assert.Equal(t, "sleep360", task.Family) + assert.Equal(t, 1, len(task.Containers)) + assert.Equal(t, "sha256:9f1003c480699be56815db0f8146ad2e22efea85129b5b5983d0e0fb52d9ab70", task.Containers[0].ImageDigest) +} + +func TestLoadsDataSeqTaskManifest(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v25", "seqNumTaskManifest", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v25", "seqNumTaskManifest")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber, seqNumTaskManifest int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + statemanager.AddSaveable("seqNumTaskManifest", &seqNumTaskManifest), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + assert.EqualValues(t, 7, seqNumTaskManifest) +} + +func TestLoadsDataForEnvFiles(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v28", "environmentFiles", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v28", "environmentFiles")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:123456789011:task/70947c96-f64e-483a-a612-3fd4303546e7", task.Arn) + assert.Equal(t, "sleep360", task.Family) +} diff --git a/agent/statemanager/state_manager_unix.go b/agent/statemanager/state_manager_unix.go new file mode 100644 index 00000000000..8366b08af81 --- /dev/null +++ b/agent/statemanager/state_manager_unix.go @@ -0,0 +1,101 @@ +// +build !windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statemanager + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/cihub/seelog" +) + +/* +On Linux, the basic approach for attempting to ensure that the state file is +written out correctly relies on behaviors of Linux and the ext* family of +filesystems. + +On each save, the agent creates a new temporary file where it +writes out the json object. Once the file is written, it gets renamed to the +well-known name of the state file. Under the assumption of Linux + ext*, this +is an atomic operation; rename is changing the hard link of the well-known file +to point to the inode of the temporary file. The original file inode now has no +links, and is considered free space once the opened file handles to that inode +are closed. + +On each load, the agent opens a well-known file name for the state file and +reads it. +*/ + +func newPlatformDependencies() platformDependencies { + return nil +} + +func (manager *basicStateManager) readFile() ([]byte, error) { + // Note that even if Save overwrites the file we're looking at here, we + // still hold the old inode and should read the old data so no locking is + // needed (given Linux and the ext* family of fs at least). + file, err := os.Open(filepath.Join(manager.statePath, ecsDataFile)) + if err != nil { + if os.IsNotExist(err) { + // Happens every first run; not a real error + return nil, nil + } + return nil, err + } + return ioutil.ReadAll(file) +} + +func (manager *basicStateManager) writeFile(data []byte) error { + // Make our temp-file on the same volume as our data-file to ensure we can + // actually move it atomically; cross-device renaming will error out. + tmpfile, err := ioutil.TempFile(manager.statePath, "tmp_ecs_agent_data") + if err != nil { + seelog.Errorf("Error saving state; could not create temp file to save state, err: %v", err) + return err + } + _, err = tmpfile.Write(data) + if err != nil { + seelog.Errorf("Error saving state; could not write to temp file to save state, err: %v", err) + return err + } + + // flush temp state file to disk + err = tmpfile.Sync() + if err != nil { + seelog.Errorf("Error flusing state file, err: %v", err) + return err + } + + err = os.Rename(tmpfile.Name(), filepath.Join(manager.statePath, ecsDataFile)) + if err != nil { + seelog.Errorf("Error saving state; could not move to data file, err: %v", err) + return err + } + + stateDir, err := os.Open(manager.statePath) + if err != nil { + seelog.Errorf("Error opening state path, err: %v", err) + return err + } + + // sync directory entry of the new state file to disk + err = stateDir.Sync() + if err != nil { + seelog.Errorf("Error syncing state file directory entry, err: %v", err) + } + return err +} diff --git a/agent/statemanager/state_manager_unix_test.go b/agent/statemanager/state_manager_unix_test.go new file mode 100644 index 00000000000..0fa08bc19a9 --- /dev/null +++ b/agent/statemanager/state_manager_unix_test.go @@ -0,0 +1,331 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package statemanager_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + engine_testutils "github.com/aws/amazon-ecs-agent/agent/engine/testutils" + "github.com/aws/amazon-ecs-agent/agent/statemanager" + "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStateManager(t *testing.T) { + tmpDir, err := ioutil.TempDir("/tmp", "ecs_statemanager_test") + assert.Nil(t, err) + defer os.RemoveAll(tmpDir) + cfg := &config.Config{DataDir: tmpDir} + manager, err := statemanager.NewStateManager(cfg) + assert.Nil(t, err, "Error loading manager") + + err = manager.Load() + assert.Nil(t, err, "Expected loading a non-existent file to not be an error") + + // Now let's make some state to save + containerInstanceArn := "" + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), + nil, nil, nil) + + manager, err = statemanager.NewStateManager(cfg, statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn)) + require.Nil(t, err) + + containerInstanceArn = "containerInstanceArn" + + testTask := &apitask.Task{Arn: "test-arn"} + taskEngine.(*engine.DockerTaskEngine).State().AddTask(testTask) + + err = manager.Save() + require.Nil(t, err, "Error saving state") + + assertFileMode(t, filepath.Join(tmpDir, "ecs_agent_data.json")) + + // Now make sure we can load that state sanely + loadedTaskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), + nil, nil, nil) + var loadedContainerInstanceArn string + + manager, err = statemanager.NewStateManager(cfg, statemanager.AddSaveable("TaskEngine", &loadedTaskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &loadedContainerInstanceArn)) + require.Nil(t, err) + + err = manager.Load() + require.Nil(t, err, "Error loading state") + + assert.Equal(t, containerInstanceArn, loadedContainerInstanceArn, "Did not load containerInstanceArn correctly") + + if !engine_testutils.DockerTaskEnginesEqual(loadedTaskEngine.(*engine.DockerTaskEngine), (taskEngine.(*engine.DockerTaskEngine))) { + t.Error("Did not load taskEngine correctly") + } + + // I'd rather double check .Equal there; let's make sure ListTasks agrees. + tasks, err := loadedTaskEngine.ListTasks() + assert.Nil(t, err, "Error listing tasks") + require.Equal(t, 1, len(tasks), "Should have a task!") + assert.Equal(t, "test-arn", tasks[0].Arn, "Wrong arn") +} + +func assertFileMode(t *testing.T, path string) { + info, err := os.Stat(path) + assert.Nil(t, err) + + mode := info.Mode() + assert.Equal(t, os.FileMode(0600), mode, "Wrong file mode") +} + +// verify that the state manager correctly loads the existing task networking related fields in state file. +// if we change those fields in the future, we should modify this test to test the new fields +func TestLoadsDataForAWSVPCTask(t *testing.T) { + testCases := []struct { + dir string + name string + }{ + {"task-networking", "original_v11_encoding_scheme"}, + {"task-networking-refactor", "refactored_v11_encoding_scheme"}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v11", tc.dir)} + + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + + assert.Equal(t, "state-file", cluster) + + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2", task.Arn) + assert.Equal(t, "task-networking-state", task.Family) + assert.NotNil(t, task.ENIs) + + eni := task.ENIs[0] + assert.Equal(t, "eni-089ba8329b8e3f6ec", eni.ID) + assert.Equal(t, "ip-172-31-10-246.us-west-2.compute.internal", eni.GetHostname()) + + ipv4Addresses := eni.GetIPV4Addresses() + assert.Equal(t, 1, len(ipv4Addresses)) + assert.Equal(t, "172.31.10.246", ipv4Addresses[0]) + }) + } + +} + +// verify that the state manager correctly loads gpu related fields in state file +func TestLoadsDataForGPU(t *testing.T) { + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v18", "gpu")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "state-file", cluster) + assert.EqualValues(t, 0, sequenceNumber) + + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", task.Arn) + assert.Equal(t, "gpu-state", task.Family) + assert.Equal(t, 1, len(task.Containers)) + assert.Equal(t, 2, len(task.Associations)) + + association1 := task.Associations[0] + assert.Equal(t, "container_1", association1.Containers[0]) + assert.Equal(t, "gpu", association1.Type) + assert.Equal(t, "0", association1.Name) + + association2 := task.Associations[1] + assert.Equal(t, "container_1", association2.Containers[0]) + assert.Equal(t, "gpu", association2.Type) + assert.Equal(t, "1", association2.Name) + + container := task.Containers[0] + assert.Equal(t, "container_1", container.Name) + assert.Equal(t, []string{"0", "1"}, container.GPUIDs) + assert.Equal(t, "0,1", container.Environment["NVIDIA_VISIBLE_DEVICES"]) +} + +func TestLoadsDataForFirelensTask(t *testing.T) { + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v23", "firelens")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "test-logrouter-p2", cluster) + assert.EqualValues(t, 0, sequenceNumber) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Len(t, tasks, 1) + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/265f161270634aeaab7d0e88c29389c3", task.Arn) + assert.Len(t, task.Containers, 2) + foundFirelensContainer := false + for _, container := range task.Containers { + if container.FirelensConfig != nil { + foundFirelensContainer = true + assert.Equal(t, "fluentd", container.FirelensConfig.Type) + assert.Equal(t, "true", container.FirelensConfig.Options["enable-ecs-log-metadata"]) + } + } + assert.True(t, foundFirelensContainer) + + assert.Len(t, task.ResourcesMapUnsafe, 2) + assert.Len(t, task.ResourcesMapUnsafe["firelens"], 1) + firelensResource, ok := task.ResourcesMapUnsafe["firelens"][0].(*firelens.FirelensResource) + assert.True(t, ok) + assert.Equal(t, "test-logrouter-p2", firelensResource.GetCluster()) + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/265f161270634aeaab7d0e88c29389c3", firelensResource.GetTaskARN()) + assert.Equal(t, "test-firelens-beta-2-bridge:1", firelensResource.GetTaskDefinition()) + assert.Contains(t, firelensResource.GetContainerToLogOptions(), "app") + assert.True(t, firelensResource.GetECSMetadataEnabled()) + assert.Equal(t, "i-1234567890", firelensResource.GetEC2InstanceID()) + assert.Equal(t, "/data/firelens/265f161270634aeaab7d0e88c29389c3", firelensResource.GetResourceDir()) + assert.Equal(t, resourcestatus.ResourceCreated, firelensResource.GetKnownStatus()) + assert.Equal(t, resourcestatus.ResourceCreated, firelensResource.GetDesiredStatus()) +} + +func TestLoadsDataForFirelensTaskWithExternalConfig(t *testing.T) { + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v24", "firelens")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "test-logrouter-p2", cluster) + assert.EqualValues(t, 0, sequenceNumber) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Len(t, tasks, 1) + task := tasks[0] + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/027002862d4b485ba3da5e935e0bc10c", task.Arn) + assert.Len(t, task.Containers, 2) + foundFirelensContainer := false + for _, container := range task.Containers { + if container.FirelensConfig != nil { + foundFirelensContainer = true + assert.Equal(t, "fluentbit", container.FirelensConfig.Type) + assert.Equal(t, "file", container.FirelensConfig.Options["config-file-type"]) + assert.Equal(t, "/tmp/dummy.conf", container.FirelensConfig.Options["config-file-value"]) + } + } + assert.True(t, foundFirelensContainer) + + assert.Len(t, task.ResourcesMapUnsafe, 2) + assert.Len(t, task.ResourcesMapUnsafe["firelens"], 1) + firelensResource, ok := task.ResourcesMapUnsafe["firelens"][0].(*firelens.FirelensResource) + assert.True(t, ok) + assert.Equal(t, "test-logrouter-p2", firelensResource.GetCluster()) + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/027002862d4b485ba3da5e935e0bc10c", firelensResource.GetTaskARN()) + assert.Equal(t, "test-firelens-local-bit:2", firelensResource.GetTaskDefinition()) + assert.Contains(t, firelensResource.GetContainerToLogOptions(), "app") + assert.True(t, firelensResource.GetECSMetadataEnabled()) + assert.Equal(t, "i-1234567890", firelensResource.GetEC2InstanceID()) + assert.Equal(t, "/data/firelens/027002862d4b485ba3da5e935e0bc10c", firelensResource.GetResourceDir()) + assert.Equal(t, "file", firelensResource.GetExternalConfigType()) + assert.Equal(t, "/tmp/dummy.conf", firelensResource.GetExternalConfigValue()) + assert.Equal(t, "xxx", firelensResource.GetExecutionCredentialsID()) + assert.Equal(t, "us-west-2", firelensResource.GetRegion()) + assert.Equal(t, "bridge", firelensResource.GetNetworkMode()) + assert.Equal(t, resourcestatus.ResourceCreated, firelensResource.GetKnownStatus()) + assert.Equal(t, resourcestatus.ResourceCreated, firelensResource.GetDesiredStatus()) +} + +func TestLoadsDataForEFSGATask(t *testing.T) { + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v27", "efs")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + require.NoError(t, err) + err = stateManager.Load() + require.NoError(t, err) + tasks, err := taskEngine.ListTasks() + require.NoError(t, err) + assert.Len(t, tasks, 1) + task := tasks[0] + + taskVolumes := task.Volumes + require.Len(t, taskVolumes, 1) + require.Len(t, task.ResourcesMapUnsafe, 2) + require.Len(t, task.ResourcesMapUnsafe["dockerVolume"], 1) + volumeResource, ok := task.ResourcesMapUnsafe["dockerVolume"][0].(*taskresourcevolume.VolumeResource) + require.True(t, ok) + assert.Equal(t, "efs-html", volumeResource.Name) + assert.Equal(t, "123", volumeResource.GetPauseContainerPID()) + assert.Equal(t, "ecs-test-efs-creds-ap-awsvpc-2-efs-html-xxx", volumeResource.VolumeConfig.DockerVolumeName) + assert.Equal(t, "amazon-ecs-volume-plugin", volumeResource.VolumeConfig.Driver) + require.NotNil(t, volumeResource.VolumeConfig.DriverOpts) + assert.Equal(t, "fs-xxx:/", volumeResource.VolumeConfig.DriverOpts["device"]) + assert.Equal(t, "tls,tlsport=20050,iam,awscredsuri=/v2/credentials/xxx,accesspoint=fsap-xxx,netns=/proc/123/ns/net", volumeResource.VolumeConfig.DriverOpts["o"]) + assert.Equal(t, "efs", volumeResource.VolumeConfig.DriverOpts["type"]) +} diff --git a/agent/statemanager/state_manager_win_test.go b/agent/statemanager/state_manager_win_test.go new file mode 100644 index 00000000000..fc75d44a9c6 --- /dev/null +++ b/agent/statemanager/state_manager_win_test.go @@ -0,0 +1,85 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statemanager_test + +import ( + "path/filepath" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + "github.com/aws/amazon-ecs-agent/agent/statemanager" + "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" + "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadsDataForGMSATask(t *testing.T) { + cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v26", "gmsa", "ecs_agent_data.json")) + require.Nil(t, err, "Failed to set up test") + defer cleanup() + + cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v26", "gmsa")} + taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + + stateManager, err := statemanager.NewStateManager(cfg, + statemanager.AddSaveable("TaskEngine", taskEngine), + statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn), + statemanager.AddSaveable("Cluster", &cluster), + statemanager.AddSaveable("EC2InstanceID", &savedInstanceID), + statemanager.AddSaveable("SeqNum", &sequenceNumber), + ) + + assert.NoError(t, err) + err = stateManager.Load() + assert.NoError(t, err) + assert.Equal(t, "gmsa-test", cluster) + assert.EqualValues(t, 0, sequenceNumber) + tasks, err := taskEngine.ListTasks() + assert.NoError(t, err) + assert.Equal(t, 1, len(tasks)) + task := tasks[0] + + assert.Equal(t, "arn:aws:ecs:ap-northeast-1:1234567890:task/b8e2bd3c-c82a-4b43-9bde-199ae05b49a5", task.Arn) + assert.Equal(t, "gmsa-test", task.Family) + assert.Equal(t, 1, len(task.Containers)) + container := task.Containers[0] + assert.Equal(t, "windows_sample_app", container.Name) + assert.NotNil(t, container.DockerConfig.HostConfig) + + resource, ok := task.GetCredentialSpecResource() + assert.True(t, ok) + assert.NotEmpty(t, resource) + + credSpecResource := resource[0].(*credentialspec.CredentialSpecResource) + + assert.Equal(t, status.ResourceCreated, credSpecResource.GetDesiredStatus()) + assert.Equal(t, status.ResourceCreated, credSpecResource.GetKnownStatus()) + + credSpecMap := credSpecResource.CredSpecMap + assert.NotEmpty(t, credSpecMap) + + testCredSpec := "credentialspec:file://WebApp01.json" + expectedCredSpec := "credentialspec=file://WebApp01.json" + + targetCredSpec, err := credSpecResource.GetTargetMapping(testCredSpec) + assert.NoError(t, err) + assert.Equal(t, expectedCredSpec, targetCredSpec) +} diff --git a/agent/statemanager/state_manager_windows.go b/agent/statemanager/state_manager_windows.go new file mode 100644 index 00000000000..3b5b52d1893 --- /dev/null +++ b/agent/statemanager/state_manager_windows.go @@ -0,0 +1,185 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package statemanager + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/aws/amazon-ecs-agent/agent/statemanager/dependencies" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + "github.com/cihub/seelog" + + "golang.org/x/sys/windows/registry" +) + +const ( + ecsDataFileRootKey = registry.LOCAL_MACHINE + ecsDataFileKeyPath = `SOFTWARE\Amazon\ECS Agent\State File` + ecsDataFileValueName = "path" + // overrideValueNameEnv is the name of an environment variable that can be used to override the + // name of the registry value queried for the state file path. This is useful in integration- + // and functional-tests, but should not be set for any non-test use-case. + overrideValueNameEnv = "ZZZ_I_KNOW_SETTING_TEST_VALUES_IN_PRODUCTION_IS_NOT_SUPPORTED" +) + +/* +On Windows, the basic approach for attempting to ensure that the state file is +written out correctly relies on the Windows Registry and documented behavior of +the Win32 API. + +On each save, the agent creates a new file where it writes out the json object. +Once the file is written, it gets flushed to disk using the Win32 +FlushFileBuffers API. After the file is flushed to disk, a registry key is +updated to indicate the new file name. Finally, the old file retrieved from the +registry key is deleted. + +On each load, the agent reads a well-known registry key to find the name of the +file to load. +*/ + +type windowsDependencies struct { + registry dependencies.WindowsRegistry +} + +func newPlatformDependencies() platformDependencies { + return windowsDependencies{ + registry: dependencies.StdRegistry{}, + } +} + +var readAll = ioutil.ReadAll + +var isNotExist = os.IsNotExist + +var open = func(name string) (oswrapper.File, error) { + return os.Open(name) +} + +func (manager *basicStateManager) readFile() ([]byte, error) { + manager.savingLock.Lock() + defer manager.savingLock.Unlock() + path, err := manager.getPath() + if err != nil { + if err == registry.ErrNotExist { + // Happens on the first run; not a real error + return nil, nil + } + return nil, err + } + file, err := open(filepath.Clean(path)) + if err != nil { + if isNotExist(err) { + // Happens every first run; not a real error + return nil, nil + } + return nil, err + } + defer file.Close() + return readAll(file) +} + +func (manager *basicStateManager) getPath() (string, error) { + deps := manager.platformDependencies.(windowsDependencies) + key, err := deps.registry.OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, registry.READ) + if err != nil { + return "", err + } + defer key.Close() + val, _, err := key.GetStringValue(valueName()) + if err != nil { + return "", err + } + return val, nil +} + +func valueName() string { + valueName := ecsDataFileValueName + if os.Getenv(overrideValueNameEnv) != "" { + valueName = os.Getenv(overrideValueNameEnv) + } + return valueName +} + +var remove = os.Remove + +var tempFile = func(dir, pattern string) (oswrapper.File, error) { + return ioutil.TempFile(dir, pattern) +} + +func (manager *basicStateManager) writeFile(data []byte) error { + oldFile, err := manager.getPath() + if err != nil { + if err != registry.ErrNotExist { + return err + } + oldFile = "" + } + dataFile, err := tempFile(manager.statePath, ecsDataFile) + if err != nil { + seelog.Errorf("Error saving state; could not create file to save state: %v", err) + return err + } + defer dataFile.Close() + _, err = dataFile.Write(data) + if err != nil { + seelog.Errorf("Error saving state; could not write to file to save state: %s %v ", dataFile.Name(), err) + return err + } + + // this calls FlushFileBuffers, see https://golang.org/src/syscall/syscall_windows.go#L523 + // TODO Investigate performance differences between FlushFileBuffers and unbuffered IO. + err = dataFile.Sync() + if err != nil { + seelog.Errorf("Error saving state; could not sync file to save state: %s %v", dataFile.Name(), err) + return err + } + err = dataFile.Close() + if err != nil { + seelog.Errorf("Error saving state; could not close file to save state: %s %v", dataFile.Name(), err) + return err + } + err = manager.savePath(dataFile.Name()) + if err != nil { + seelog.Errorf("Failed to save the data file path: %v", err) + return err + } + if oldFile == "" { + return nil + } + + err = remove(oldFile) + if err != nil { + seelog.Errorf("Error removing old file %s; err %v", oldFile, err) + return err + } + return nil +} + +func (manager *basicStateManager) savePath(path string) error { + deps := manager.platformDependencies.(windowsDependencies) + key, existed, err := deps.registry.CreateKey(ecsDataFileRootKey, ecsDataFileKeyPath, registry.SET_VALUE|registry.CREATE_SUB_KEY) + if err != nil { + seelog.Errorf("Failed to create registry key %s %v", ecsDataFileKeyPath, err) + return err + } + defer key.Close() + if !existed { + seelog.Infof(`Created new registry key: HKEY_LOCAL_MACHINE\%s`, ecsDataFileKeyPath) + } + return key.SetStringValue(valueName(), path) +} diff --git a/agent/statemanager/state_manager_windows_test.go b/agent/statemanager/state_manager_windows_test.go new file mode 100644 index 00000000000..55457c0acf0 --- /dev/null +++ b/agent/statemanager/state_manager_windows_test.go @@ -0,0 +1,342 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package statemanager + +import ( + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/config" + mock_dependencies "github.com/aws/amazon-ecs-agent/agent/statemanager/dependencies/mocks" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows/registry" +) + +var ( + testError = errors.New("test error") + + mockNameImpl = func() string { + return `C:\new.json` + } + mockSyncImpl = func() error { + return nil + } +) + +func mockTempFileError() func() { + otempFile := tempFile + tempFile = func(dir, pattern string) (oswrapper.File, error) { + return nil, testError + } + + return func() { + tempFile = otempFile + } +} + +func mockOpenError() func() { + tempOpen := open + open = func(name string) (oswrapper.File, error) { + return nil, testError + } + + return func() { + open = tempOpen + } +} + +func setup(t *testing.T, options ...Option) ( + *mock_dependencies.MockWindowsRegistry, + *mock_dependencies.MockRegistryKey, + oswrapper.File, + StateManager, func()) { + ctrl := gomock.NewController(t) + mockRegistry := mock_dependencies.NewMockWindowsRegistry(ctrl) + mockKey := mock_dependencies.NewMockRegistryKey(ctrl) + mockFile := mocks.NewMockFile() + + // TODO set this to platform-specific tmpdir + tmpDir, err := ioutil.TempDir("", "ecs_statemanager_test") + assert.Nil(t, err) + cleanup := func() { + ctrl.Finish() + os.RemoveAll(tmpDir) + } + cfg := &config.Config{DataDir: tmpDir} + manager, err := NewStateManager(cfg, options...) + assert.Nil(t, err) + basicManager := manager.(*basicStateManager) + basicManager.platformDependencies = windowsDependencies{ + registry: mockRegistry, + } + return mockRegistry, mockKey, mockFile, basicManager, cleanup +} + +func TestStateManagerLoadNoRegistryKey(t *testing.T) { + mockRegistry, _, _, manager, cleanup := setup(t) + defer cleanup() + + // TODO figure out why gomock does not like registry.READ as the mode + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(nil, registry.ErrNotExist) + + err := manager.Load() + assert.Nil(t, err, "Expected loading a non-existent file to not be an error") +} + +func TestStateManagerLoadNoFile(t *testing.T) { + mockRegistry, mockKey, _, manager, cleanup := setup(t) + defer cleanup() + + defer mockOpenError()() + isNotExist = func(err error) bool { + return true + } + defer func() { + isNotExist = os.IsNotExist + }() + + // TODO figure out why gomock does not like registry.READ as the mode + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil) + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(`C:\data.json`, uint32(0), nil) + mockKey.EXPECT().Close() + + err := manager.Load() + assert.Nil(t, err, "Expected loading a non-existent file to not be an error") +} + +func TestStateManagerLoadError(t *testing.T) { + mockRegistry, mockKey, _, manager, cleanup := setup(t) + defer cleanup() + + defer mockOpenError()() + isNotExist = func(err error) bool { + return false + } + defer func() { + isNotExist = os.IsNotExist + }() + + // TODO figure out why gomock does not like registry.READ as the mode + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil) + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(`C:\data.json`, uint32(0), nil) + mockKey.EXPECT().Close() + + err := manager.Load() + assert.Equal(t, testError, err, "Expected error opening file to be an error") +} + +func TestStateManagerLoadState(t *testing.T) { + containerInstanceArn := "" + mockRegistry, mockKey, mockFile, manager, cleanup := setup(t, AddSaveable("ContainerInstanceArn", &containerInstanceArn)) + defer cleanup() + + data := `{"Version":1,"Data":{"ContainerInstanceArn":"foo"}}` + // TODO figure out why gomock does not like registry.READ as the mode + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil) + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(`C:\data.json`, uint32(0), nil) + mockKey.EXPECT().Close() + + tempOpen := open + open = func(name string) (oswrapper.File, error) { + return mockFile, nil + } + readAll = func(r io.Reader) ([]byte, error) { + return []byte(data), nil + } + defer func() { + open = tempOpen + readAll = ioutil.ReadAll + }() + + err := manager.Load() + assert.Nil(t, err, "Expected loading correctly") + assert.Equal(t, "foo", containerInstanceArn) +} + +func TestStateManagerLoadV1Data(t *testing.T) { + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + mockRegistry, mockKey, _, manager, cleanup := setup(t, + AddSaveable("ContainerInstanceArn", &containerInstanceArn), + AddSaveable("Cluster", &cluster), + AddSaveable("EC2InstanceID", &savedInstanceID), + AddSaveable("SeqNum", &sequenceNumber)) + defer cleanup() + dataFile, err := os.Open(filepath.Join(".", "testdata", "v1", "1", ecsDataFile)) + assert.Nil(t, err, "Error opening test data") + defer dataFile.Close() + + // TODO figure out why gomock does not like registry.READ as the mode + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil) + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(`C:\data.json`, uint32(0), nil) + mockKey.EXPECT().Close() + + tempOpen := open + open = func(name string) (oswrapper.File, error) { + return dataFile, nil + } + readAll = func(r io.Reader) ([]byte, error) { + return ioutil.ReadAll(dataFile) + } + defer func() { + open = tempOpen + readAll = ioutil.ReadAll + }() + err = manager.Load() + assert.Nil(t, err, "Error loading state") + assert.Equal(t, "test", cluster, "Wrong cluster") + assert.Equal(t, int64(0), sequenceNumber, "v1 should give a sequence number of 0") + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:container-instance/a9f8e650-e66e-466d-9b0e-3cbce3ba5245", containerInstanceArn) + assert.Equal(t, "i-00000000", savedInstanceID) +} + +func TestStateManagerLoadV13Data(t *testing.T) { + var containerInstanceArn, cluster, savedInstanceID string + var sequenceNumber int64 + + mockRegistry, mockKey, _, manager, cleanup := setup(t, + AddSaveable("ContainerInstanceArn", &containerInstanceArn), + AddSaveable("Cluster", &cluster), + AddSaveable("EC2InstanceID", &savedInstanceID), + AddSaveable("SeqNum", &sequenceNumber)) + defer cleanup() + + dataFile, err := os.Open(filepath.Join(".", "testdata", "v13", "1", ecsDataFile)) + assert.Nil(t, err, "Error opening test data") + defer dataFile.Close() + + tempOpen := open + open = func(name string) (oswrapper.File, error) { + return dataFile, nil + } + defer func() { + open = tempOpen + }() + + // TODO figure out why gomock does not like registry.READ as the mode + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil) + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(`C:\data.json`, uint32(0), nil) + mockKey.EXPECT().Close() + err = manager.Load() + assert.Nil(t, err, "Error loading state") + assert.Equal(t, "test-statefile", cluster, "Wrong cluster") + assert.Equal(t, int64(0), sequenceNumber, "v13 should give a sequence number of 0") + assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:container-instance/test-statefile/b9c2c229d5ce4f52851f3e9e6d4db894", containerInstanceArn) + assert.Equal(t, "i-1234567890", savedInstanceID) +} + +func TestStateManagerSaveCreateFileError(t *testing.T) { + mockRegistry, mockKey, _, manager, cleanup := setup(t) + defer cleanup() + + // TODO figure out why gomock does not like registry.READ as the mode + gomock.InOrder( + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil), + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(`C:\data.json`, uint32(0), nil), + mockKey.EXPECT().Close(), + ) + defer mockTempFileError()() + + err := manager.Save() + assert.Equal(t, testError, err, "expected error creating file") +} + +func TestStateManagerSaveSyncFileError(t *testing.T) { + mockRegistry, mockKey, mockFile, manager, cleanup := setup(t) + defer cleanup() + + mockFile.(*mocks.MockFile).NameImpl = mockNameImpl + mockFile.(*mock_oswrapper.MockFile).SyncImpl = func() error { + return errors.New("test error") + } + defer mockTempFileError()() + + // TODO figure out why gomock does not like registry.READ as the mode + gomock.InOrder( + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil), + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(`C:\data.json`, uint32(0), nil), + mockKey.EXPECT().Close(), + ) + err := manager.Save() + assert.Equal(t, testError, err, "expected error creating file") +} + +func TestStateManagerSave(t *testing.T) { + mockRegistry, mockKey, mockFile, manager, cleanup := setup(t) + defer cleanup() + + otempFile := tempFile + tempFile = func(dir, pattern string) (oswrapper.File, error) { + return mockFile, nil + } + remove = func(name string) error { + return nil + } + mockFile.(*mocks.MockFile).NameImpl = mockNameImpl + mockFile.(*mock_oswrapper.MockFile).SyncImpl = mockSyncImpl + defer func() { + tempFile = otempFile + remove = os.Remove + }() + + // TODO figure out why gomock does not like registry.READ as the mode + gomock.InOrder( + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil), + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(`C:\old.json`, uint32(0), nil), + mockKey.EXPECT().Close(), + mockRegistry.EXPECT().CreateKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, false, nil), + mockKey.EXPECT().SetStringValue(ecsDataFileValueName, `C:\new.json`), + mockKey.EXPECT().Close(), + ) + err := manager.Save() + assert.Nil(t, err) +} + +func TestStateManagerNoOldStateRemoval(t *testing.T) { + mockRegistry, mockKey, mockFile, manager, cleanup := setup(t) + defer cleanup() + + otempFile := tempFile + tempFile = func(dir, pattern string) (oswrapper.File, error) { + return mockFile, nil + } + mockFile.(*mock_oswrapper.MockFile).NameImpl = mockNameImpl + mockFile.(*mock_oswrapper.MockFile).SyncImpl = mockSyncImpl + + defer func() { + tempFile = otempFile + }() + + gomock.InOrder( + mockRegistry.EXPECT().OpenKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, nil), + mockKey.EXPECT().GetStringValue(ecsDataFileValueName).Return(``, uint32(0), nil), + mockKey.EXPECT().Close(), + mockRegistry.EXPECT().CreateKey(ecsDataFileRootKey, ecsDataFileKeyPath, gomock.Any()).Return(mockKey, false, nil), + mockKey.EXPECT().SetStringValue(ecsDataFileValueName, `C:\new.json`), + mockKey.EXPECT().Close(), + ) + err := manager.Save() + assert.Nil(t, err) +} + +// TODO TestStateManagerSave + errors diff --git a/agent/statemanager/testdata/v1/1/ecs_agent_data.json b/agent/statemanager/testdata/v1/1/ecs_agent_data.json new file mode 100644 index 00000000000..41cdf319fa3 --- /dev/null +++ b/agent/statemanager/testdata/v1/1/ecs_agent_data.json @@ -0,0 +1,435 @@ +{ + "Data": { + "Cluster": "test", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/a9f8e650-e66e-466d-9b0e-3cbce3ba5245", + "EC2InstanceID": "i-00000000", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/f44b4fc9-adb0-4f4f-9dff-871512310588", + "Family": "nginx", + "Version": "2", + "Containers": [ + { + "Name": "nginx", + "Image": "nginx", + "Command": null, + "Cpu": 10, + "Memory": 10, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [ + { + "ContainerPort": 80, + "HostPort": 80, + "BindIp": "" + } + ], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "DEAD", + "KnownStatus": "DEAD", + "RunDependencies": null, + "IsInternal": false, + "AppliedStatus": "CREATED", + "ApplyingError": { + "error": "API error (500): Cannot start container d1d1293c78bd44124ca841f0f17f48502426fd972545adee2f41aa5899bd41ea: Error starting userland proxy: listen tcp 0.0.0.0:80: bind: address already in use\n" + }, + "SentStatus": "DEAD", + "KnownExitCode": 128, + "KnownPortBindings": null, + "StatusLock": {} + } + ], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "DEAD", + "KnownTime": "2015-04-28T17:29:48.129140193Z", + "SentStatus": "DEAD" + }, + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/5bde044d-3425-4b8d-86a6-8a00b090c0c5", + "Family": "sleep5", + "Version": "2", + "Containers": [ + { + "Name": "sleep5", + "Image": "busybox", + "Command": [ + "sleep", + "5" + ], + "Cpu": 10, + "Memory": 10, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "DEAD", + "KnownStatus": "DEAD", + "RunDependencies": null, + "IsInternal": false, + "AppliedStatus": "RUNNING", + "ApplyingError": null, + "SentStatus": "DEAD", + "KnownExitCode": 137, + "KnownPortBindings": [], + "StatusLock": {} + } + ], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "DEAD", + "KnownTime": "2015-04-28T17:29:48.130765849Z", + "SentStatus": "DEAD" + }, + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/86601083-0fad-4985-abf1-c097d3db0a89", + "Family": "datavolume-example", + "Version": "5", + "Containers": [ + { + "Name": "data-volume-container", + "Image": "busybox", + "Command": null, + "Cpu": 100, + "Memory": 100, + "Links": null, + "volumesFrom": [], + "mountPoints": [ + { + "sourceVolume": "host", + "containerPath": "/data", + "readOnly": true + }, + { + "sourceVolume": "local", + "containerPath": "/data2", + "readOnly": false + } + ], + "portMappings": [], + "Essential": false, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "RUNNING", + "KnownStatus": "DEAD", + "IsInternal": false, + "AppliedStatus": "RUNNING", + "ApplyingError": null, + "SentStatus": "DEAD", + "KnownExitCode": 0, + "KnownPortBindings": [], + "StatusLock": {} + }, + { + "Name": "consumer1", + "Image": "busybox", + "Command": [ + "sleep", + "10" + ], + "Cpu": 100, + "Memory": 100, + "Links": null, + "volumesFrom": [ + { + "sourceContainer": "data-volume-container", + "readOnly": false + } + ], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "RUNNING", + "KnownStatus": "CREATED", + "RunDependencies": null, + "IsInternal": false, + "AppliedStatus": "CREATED", + "ApplyingError": null, + "SentStatus": "CREATED", + "KnownExitCode": null, + "KnownPortBindings": null, + "StatusLock": {} + }, + { + "Name": "consumer2", + "Image": "busybox", + "Command": [ + "sleep", + "10" + ], + "Cpu": 100, + "Memory": 100, + "Links": null, + "volumesFrom": [ + { + "sourceContainer": "data-volume-container", + "readOnly": false + } + ], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "RUNNING", + "KnownStatus": "CREATED", + "RunDependencies": null, + "IsInternal": false, + "AppliedStatus": "CREATED", + "ApplyingError": null, + "SentStatus": "CREATED", + "KnownExitCode": null, + "KnownPortBindings": null, + "StatusLock": {} + } + ], + "volumes": [ + { + "host": { + "sourcePath": "/tmp/host/path" + }, + "name": "host", + "type": "host" + }, + { + "host": {}, + "name": "local", + "type": "host" + } + ], + "DesiredStatus": "RUNNING", + "KnownStatus": "CREATED", + "KnownTime": "2015-04-28T17:33:31.650356347Z", + "SentStatus": "CREATED" + } + ], + "IdToContainer": { + "11ce82977cfb440101f612ba73039d845d4fcb869a02c711db0b82ad0d73117a": { + "DockerId": "11ce82977cfb440101f612ba73039d845d4fcb869a02c711db0b82ad0d73117a", + "DockerName": "ecs-sleep5-2-sleep5-f8d2b3c7f9ad98c5d501", + "Container": { + "Name": "sleep5", + "Image": "busybox", + "Command": [ + "sleep", + "5" + ], + "Cpu": 10, + "Memory": 10, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "DEAD", + "KnownStatus": "DEAD", + "RunDependencies": null, + "IsInternal": false, + "AppliedStatus": "RUNNING", + "ApplyingError": null, + "SentStatus": "DEAD", + "KnownExitCode": 137, + "KnownPortBindings": [], + "StatusLock": {} + } + }, + "a88f6da162828c9d8e4dedffe8cceca9774758d07cde7c0f06ec7d4880514847": { + "DockerId": "a88f6da162828c9d8e4dedffe8cceca9774758d07cde7c0f06ec7d4880514847", + "DockerName": "ecs-datavolume-example-5-consumer1-a49dc899a9e9a59b0e00", + "Container": { + "Name": "consumer1", + "Image": "busybox", + "Command": [ + "sleep", + "10" + ], + "Cpu": 100, + "Memory": 100, + "Links": null, + "volumesFrom": [ + { + "sourceContainer": "data-volume-container", + "readOnly": false + } + ], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "RUNNING", + "KnownStatus": "CREATED", + "RunDependencies": null, + "IsInternal": false, + "AppliedStatus": "CREATED", + "ApplyingError": null, + "SentStatus": "CREATED", + "KnownExitCode": null, + "KnownPortBindings": null, + "StatusLock": {} + } + }, + "c4b79e59ae7df0479d50004680f6a4804d6af1efb492eb6e77aa50c11b2f1027": { + "DockerId": "c4b79e59ae7df0479d50004680f6a4804d6af1efb492eb6e77aa50c11b2f1027", + "DockerName": "ecs-datavolume-example-5-consumer2-acee8693c1a5b6941300", + "Container": { + "Name": "consumer2", + "Image": "busybox", + "Command": [ + "sleep", + "10" + ], + "Cpu": 100, + "Memory": 100, + "Links": null, + "volumesFrom": [ + { + "sourceContainer": "data-volume-container", + "readOnly": false + } + ], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "RUNNING", + "KnownStatus": "CREATED", + "RunDependencies": null, + "IsInternal": false, + "AppliedStatus": "CREATED", + "ApplyingError": null, + "SentStatus": "CREATED", + "KnownExitCode": null, + "KnownPortBindings": null, + "StatusLock": {} + } + }, + "c8bd6f423f9206f805d21ab4e3676bb5cc42ba0fe071f48d76de70853c8e1766": { + "DockerId": "c8bd6f423f9206f805d21ab4e3676bb5cc42ba0fe071f48d76de70853c8e1766", + "DockerName": "ecs-datavolume-example-5-data-volume-container-94feacb5c6ec87f8b301", + "Container": { + "Name": "data-volume-container", + "Image": "busybox", + "Command": null, + "Cpu": 100, + "Memory": 100, + "Links": null, + "volumesFrom": [], + "mountPoints": [ + { + "sourceVolume": "host", + "containerPath": "/data", + "readOnly": true + }, + { + "sourceVolume": "local", + "containerPath": "/data2", + "readOnly": false + } + ], + "portMappings": [], + "Essential": false, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "RUNNING", + "KnownStatus": "DEAD", + "IsInternal": false, + "AppliedStatus": "RUNNING", + "ApplyingError": null, + "SentStatus": "DEAD", + "KnownExitCode": 0, + "KnownPortBindings": [], + "StatusLock": {} + } + }, + "d1d1293c78bd44124ca841f0f17f48502426fd972545adee2f41aa5899bd41ea": { + "DockerId": "d1d1293c78bd44124ca841f0f17f48502426fd972545adee2f41aa5899bd41ea", + "DockerName": "ecs-nginx-2-nginx-e293f2f8c0c48cbd6c00", + "Container": { + "Name": "nginx", + "Image": "nginx", + "Command": null, + "Cpu": 10, + "Memory": 10, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [ + { + "ContainerPort": 80, + "HostPort": 80, + "BindIp": "" + } + ], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "desiredStatus": "DEAD", + "KnownStatus": "DEAD", + "RunDependencies": null, + "IsInternal": false, + "AppliedStatus": "CREATED", + "ApplyingError": { + "error": "API error (500): Cannot start container d1d1293c78bd44124ca841f0f17f48502426fd972545adee2f41aa5899bd41ea: Error starting userland proxy: listen tcp 0.0.0.0:80: bind: address already in use\n" + }, + "SentStatus": "DEAD", + "KnownExitCode": 128, + "KnownPortBindings": null, + "StatusLock": {} + } + } + }, + "IdToTask": { + "11ce82977cfb440101f612ba73039d845d4fcb869a02c711db0b82ad0d73117a": "arn:aws:ecs:us-west-2:1234567890:task/5bde044d-3425-4b8d-86a6-8a00b090c0c5", + "263004cd102ccd9af4b656a622c8368aa82aa994aa680f90e9a4ad6ea0cad829": "arn:aws:ecs:us-west-2:1234567890:task/86601083-0fad-4985-abf1-c097d3db0a89", + "a88f6da162828c9d8e4dedffe8cceca9774758d07cde7c0f06ec7d4880514847": "arn:aws:ecs:us-west-2:1234567890:task/86601083-0fad-4985-abf1-c097d3db0a89", + "c4b79e59ae7df0479d50004680f6a4804d6af1efb492eb6e77aa50c11b2f1027": "arn:aws:ecs:us-west-2:1234567890:task/86601083-0fad-4985-abf1-c097d3db0a89", + "c8bd6f423f9206f805d21ab4e3676bb5cc42ba0fe071f48d76de70853c8e1766": "arn:aws:ecs:us-west-2:1234567890:task/86601083-0fad-4985-abf1-c097d3db0a89", + "d1d1293c78bd44124ca841f0f17f48502426fd972545adee2f41aa5899bd41ea": "arn:aws:ecs:us-west-2:1234567890:task/f44b4fc9-adb0-4f4f-9dff-871512310588" + } + } + }, + "Version": 1 +} \ No newline at end of file diff --git a/agent/statemanager/testdata/v10/container-health-check/ecs_agent_data.json b/agent/statemanager/testdata/v10/container-health-check/ecs_agent_data.json new file mode 100644 index 00000000000..fe5e7979b19 --- /dev/null +++ b/agent/statemanager/testdata/v10/container-health-check/ecs_agent_data.json @@ -0,0 +1,160 @@ +{ + "Data":{ + "Cluster":"state-file", + "ContainerInstanceArn":"arn:aws:ecs:us-west-2:1234567890:container-instance/87c7c2f3-4525-46ec-89ac-8767155bf632", + "EC2InstanceID":"i-0da29eb1a8a98768b", + "TaskEngine":{ + "Tasks":[ + { + "Arn":"arn:aws:ecs:us-west-2:1234567890:task/e4e6c98c-aa44-4146-baf9-431b04c0d162", + "Family":"chc-state", + "Version":"1", + "Containers":[ + { + "Name":"container_1", + "Image":"amazonlinux:1", + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command":[ + "sleep", + "3600" + ], + "Cpu":0, + "Memory":512, + "Links":null, + "volumesFrom":[ + + ], + "mountPoints":[ + + ], + "portMappings":[ + + ], + "Essential":true, + "EntryPoint":null, + "environment":{ + + }, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":"{\"HealthCheck\":{\"Test\":[\"CMD\",\"echo\",\"hello\"],\"Interval\":30000000000,\"Timeout\":5000000000,\"Retries\":3}}", + "hostConfig":"{\"CapAdd\":[],\"CapDrop\":[]}", + "version":"1.24" + }, + "registryAuthentication":null, + "healthCheckType":"docker", + "LogsAuthStrategy":"", + "desiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "TransitionDependencySet":{ + "ContainerDependencies":null + }, + "RunDependencies":null, + "IsInternal":"NORMAL", + "AppliedStatus":"NONE", + "ApplyingError":null, + "SentStatus":"RUNNING", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null + } + ], + "volumes":[ + + ], + "DesiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "KnownTime":"2018-10-04T16:57:58.212188513Z", + "PullStartedAt":"2018-10-04T16:57:51.702445248Z", + "PullStoppedAt":"2018-10-04T16:57:57.494235837Z", + "ExecutionStoppedAt":"0001-01-01T00:00:00Z", + "SentStatus":"RUNNING", + "StartSequenceNumber":2, + "StopSequenceNumber":0, + "executionCredentialsID":"", + "ENI":null, + "MemoryCPULimitsEnabled":true + } + ], + "IdToContainer":{ + "3b4837307b5eb0da6fe5c36be26d0ba17dc81917cbb8d901c08b0f6ad3f37b57":{ + "DockerId":"3b4837307b5eb0da6fe5c36be26d0ba17dc81917cbb8d901c08b0f6ad3f37b57", + "DockerName":"ecs-chc-state-1-container1-c0fd8ad8bbc1c8bf3d00", + "Container":{ + "Name":"container_1", + "Image":"amazonlinux:1", + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command":[ + "sleep", + "3600" + ], + "Cpu":0, + "Memory":512, + "Links":null, + "volumesFrom":[ + + ], + "mountPoints":[ + + ], + "portMappings":[ + + ], + "Essential":true, + "EntryPoint":null, + "environment":{ + + }, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":"{\"HealthCheck\":{\"Test\":[\"CMD\",\"echo\",\"hello\"],\"Interval\":30000000000,\"Timeout\":5000000000,\"Retries\":3}}", + "hostConfig":"{\"CapAdd\":[],\"CapDrop\":[]}", + "version":"1.24" + }, + "registryAuthentication":null, + "healthCheckType":"docker", + "LogsAuthStrategy":"", + "desiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "TransitionDependencySet":{ + "ContainerDependencies":null + }, + "RunDependencies":null, + "IsInternal":"NORMAL", + "AppliedStatus":"NONE", + "ApplyingError":null, + "SentStatus":"RUNNING", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null + } + } + }, + "IdToTask":{ + "3b4837307b5eb0da6fe5c36be26d0ba17dc81917cbb8d901c08b0f6ad3f37b57":"arn:aws:ecs:us-west-2:1234567890:task/e4e6c98c-aa44-4146-baf9-431b04c0d162" + }, + "ImageStates":[ + { + "Image":{ + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Names":[ + "amazonlinux:1" + ], + "Size":165452304 + }, + "PulledAt":"2018-10-04T16:57:57.493946935Z", + "LastUsedAt":"2018-10-04T16:57:57.493948461Z" + } + ], + "ENIAttachments":null, + "IPToTask":{ + + } + } + }, + "Version":10 +} diff --git a/agent/statemanager/testdata/v11/task-networking-refactor/ecs_agent_data.json b/agent/statemanager/testdata/v11/task-networking-refactor/ecs_agent_data.json new file mode 100644 index 00000000000..1f4f26f117d --- /dev/null +++ b/agent/statemanager/testdata/v11/task-networking-refactor/ecs_agent_data.json @@ -0,0 +1,266 @@ +{ + "Data":{ + "Cluster":"state-file", + "ContainerInstanceArn":"arn:aws:ecs:us-west-2:1234567890:container-instance/3825ec5c-d63c-4752-b9c5-71fc45f5cac4", + "EC2InstanceID":"i-0da29eb1a8a98768b", + "TaskEngine":{ + "Tasks":[ + { + "Arn":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2", + "Family":"task-networking-state", + "Version":"1", + "Containers":[ + { + "Name":"container_1", + "Image":"amazonlinux:1", + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command":[ + "sleep", + "3600" + ], + "Cpu":100, + "Memory":512, + "Links":null, + "volumesFrom":[ + + ], + "mountPoints":[ + + ], + "portMappings":null, + "Essential":true, + "EntryPoint":null, + "environment":{ + + }, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":"{}", + "hostConfig":"{\"NetworkMode\":\"awsvpc\",\"CapAdd\":[],\"CapDrop\":[]}", + "version":"1.18" + }, + "registryAuthentication":null, + "LogsAuthStrategy":"", + "desiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "TransitionDependencySet":{ + "ContainerDependencies":[ + { + "ContainerName":"~internal~ecs~pause", + "SatisfiedStatus":"RESOURCES_PROVISIONED", + "DependentStatus":"PULLED" + } + ] + }, + "RunDependencies":null, + "IsInternal":"NORMAL", + "ApplyingError":null, + "SentStatus":"RUNNING", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null + }, + { + "Name":"~internal~ecs~pause", + "Image":"amazon/amazon-ecs-pause:0.1.0", + "ImageID":"", + "Command":null, + "Cpu":0, + "Memory":0, + "Links":null, + "volumesFrom":null, + "mountPoints":null, + "portMappings":null, + "Essential":true, + "EntryPoint":null, + "environment":null, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":null, + "hostConfig":null, + "version":null + }, + "registryAuthentication":null, + "LogsAuthStrategy":"", + "desiredStatus":"RESOURCES_PROVISIONED", + "KnownStatus":"RESOURCES_PROVISIONED", + "TransitionDependencySet":{ + "ContainerDependencies":null + }, + "RunDependencies":null, + "IsInternal":"CNI_PAUSE", + "ApplyingError":null, + "SentStatus":"NONE", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null, + "SteadyStateStatus":"RESOURCES_PROVISIONED" + } + ], + "volumes":[ + + ], + "DesiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "KnownTime":"2018-10-04T17:35:02.384835623Z", + "PullStartedAt":"2018-10-04T17:35:00.297708436Z", + "PullStoppedAt":"2018-10-04T17:35:01.803432136Z", + "ExecutionStoppedAt":"0001-01-01T00:00:00Z", + "SentStatus":"RUNNING", + "StartSequenceNumber":5, + "StopSequenceNumber":0, + "executionCredentialsID":"", + "ENI":[{ + "ec2Id":"eni-089ba8329b8e3f6ec", + "IPV4Addresses":[ + { + "Primary":true, + "Address":"172.31.10.246" + } + ], + "IPV6Addresses":null, + "MacAddress":"0a:4b:c1:bb:4e:7c", + "PrivateDNSName":"ip-172-31-10-246.us-west-2.compute.internal" + }], + "MemoryCPULimitsEnabled":true + } + ], + "IdToContainer":{ + "30746c302c68333a4a71cd7312fcdd12d64df048ab703fc085277512ac7fc136":{ + "DockerId":"30746c302c68333a4a71cd7312fcdd12d64df048ab703fc085277512ac7fc136", + "DockerName":"ecs-task-networking-state-1-internalecspause-96fff5abcbe0ecc34b00", + "Container":{ + "Name":"~internal~ecs~pause", + "Image":"amazon/amazon-ecs-pause:0.1.0", + "ImageID":"", + "Command":null, + "Cpu":0, + "Memory":0, + "Links":null, + "volumesFrom":null, + "mountPoints":null, + "portMappings":null, + "Essential":true, + "EntryPoint":null, + "environment":null, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":null, + "hostConfig":null, + "version":null + }, + "registryAuthentication":null, + "LogsAuthStrategy":"", + "desiredStatus":"RESOURCES_PROVISIONED", + "KnownStatus":"RESOURCES_PROVISIONED", + "TransitionDependencySet":{ + "ContainerDependencies":null + }, + "RunDependencies":null, + "IsInternal":"CNI_PAUSE", + "ApplyingError":null, + "SentStatus":"NONE", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null, + "SteadyStateStatus":"RESOURCES_PROVISIONED" + } + }, + "459fcba75ba51f3cb269be9fd7250ed8797ffa28ad7821fbff87c5f1c9980030":{ + "DockerId":"459fcba75ba51f3cb269be9fd7250ed8797ffa28ad7821fbff87c5f1c9980030", + "DockerName":"ecs-task-networking-state-1-container1-d2efcbe2fdc0a0fedd01", + "Container":{ + "Name":"container_1", + "Image":"amazonlinux:1", + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command":[ + "sleep", + "3600" + ], + "Cpu":100, + "Memory":512, + "Links":null, + "volumesFrom":[ + + ], + "mountPoints":[ + + ], + "portMappings":null, + "Essential":true, + "EntryPoint":null, + "environment":{ + + }, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":"{}", + "hostConfig":"{\"NetworkMode\":\"awsvpc\",\"CapAdd\":[],\"CapDrop\":[]}", + "version":"1.18" + }, + "registryAuthentication":null, + "LogsAuthStrategy":"", + "desiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "TransitionDependencySet":{ + "ContainerDependencies":[ + { + "ContainerName":"~internal~ecs~pause", + "SatisfiedStatus":"RESOURCES_PROVISIONED", + "DependentStatus":"PULLED" + } + ] + }, + "RunDependencies":null, + "IsInternal":"NORMAL", + "ApplyingError":null, + "SentStatus":"RUNNING", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null + } + } + }, + "IdToTask":{ + "30746c302c68333a4a71cd7312fcdd12d64df048ab703fc085277512ac7fc136":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2", + "459fcba75ba51f3cb269be9fd7250ed8797ffa28ad7821fbff87c5f1c9980030":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2" + }, + "ImageStates":[ + { + "Image":{ + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Names":[ + "amazonlinux:1" + ], + "Size":165452304 + }, + "PulledAt":"2018-10-04T17:35:01.803012118Z", + "LastUsedAt":"2018-10-04T17:35:01.803013369Z" + } + ], + "ENIAttachments":[ + { + "taskArn":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2", + "attachmentArn":"arn:aws:ecs:us-west-2:1234567890:attachment/10b82317-3346-4be3-8f90-250a5bac1eff", + "attachSent":true, + "macAddress":"0a:4b:c1:bb:4e:7c", + "status":1, + "expiresAt":"2018-10-04T17:37:46.286823554Z" + } + ], + "IPToTask":{ + "169.254.172.2":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2" + } + } + }, + "Version":11 +} + diff --git a/agent/statemanager/testdata/v11/task-networking/ecs_agent_data.json b/agent/statemanager/testdata/v11/task-networking/ecs_agent_data.json new file mode 100644 index 00000000000..585fe69a663 --- /dev/null +++ b/agent/statemanager/testdata/v11/task-networking/ecs_agent_data.json @@ -0,0 +1,265 @@ +{ + "Data":{ + "Cluster":"state-file", + "ContainerInstanceArn":"arn:aws:ecs:us-west-2:1234567890:container-instance/3825ec5c-d63c-4752-b9c5-71fc45f5cac4", + "EC2InstanceID":"i-0da29eb1a8a98768b", + "TaskEngine":{ + "Tasks":[ + { + "Arn":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2", + "Family":"task-networking-state", + "Version":"1", + "Containers":[ + { + "Name":"container_1", + "Image":"amazonlinux:1", + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command":[ + "sleep", + "3600" + ], + "Cpu":100, + "Memory":512, + "Links":null, + "volumesFrom":[ + + ], + "mountPoints":[ + + ], + "portMappings":null, + "Essential":true, + "EntryPoint":null, + "environment":{ + + }, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":"{}", + "hostConfig":"{\"NetworkMode\":\"awsvpc\",\"CapAdd\":[],\"CapDrop\":[]}", + "version":"1.18" + }, + "registryAuthentication":null, + "LogsAuthStrategy":"", + "desiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "TransitionDependencySet":{ + "ContainerDependencies":[ + { + "ContainerName":"~internal~ecs~pause", + "SatisfiedStatus":"RESOURCES_PROVISIONED", + "DependentStatus":"PULLED" + } + ] + }, + "RunDependencies":null, + "IsInternal":"NORMAL", + "ApplyingError":null, + "SentStatus":"RUNNING", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null + }, + { + "Name":"~internal~ecs~pause", + "Image":"amazon/amazon-ecs-pause:0.1.0", + "ImageID":"", + "Command":null, + "Cpu":0, + "Memory":0, + "Links":null, + "volumesFrom":null, + "mountPoints":null, + "portMappings":null, + "Essential":true, + "EntryPoint":null, + "environment":null, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":null, + "hostConfig":null, + "version":null + }, + "registryAuthentication":null, + "LogsAuthStrategy":"", + "desiredStatus":"RESOURCES_PROVISIONED", + "KnownStatus":"RESOURCES_PROVISIONED", + "TransitionDependencySet":{ + "ContainerDependencies":null + }, + "RunDependencies":null, + "IsInternal":"CNI_PAUSE", + "ApplyingError":null, + "SentStatus":"NONE", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null, + "SteadyStateStatus":"RESOURCES_PROVISIONED" + } + ], + "volumes":[ + + ], + "DesiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "KnownTime":"2018-10-04T17:35:02.384835623Z", + "PullStartedAt":"2018-10-04T17:35:00.297708436Z", + "PullStoppedAt":"2018-10-04T17:35:01.803432136Z", + "ExecutionStoppedAt":"0001-01-01T00:00:00Z", + "SentStatus":"RUNNING", + "StartSequenceNumber":5, + "StopSequenceNumber":0, + "executionCredentialsID":"", + "ENI":{ + "ec2Id":"eni-089ba8329b8e3f6ec", + "IPV4Addresses":[ + { + "Primary":true, + "Address":"172.31.10.246" + } + ], + "IPV6Addresses":null, + "MacAddress":"0a:4b:c1:bb:4e:7c", + "PrivateDNSName":"ip-172-31-10-246.us-west-2.compute.internal" + }, + "MemoryCPULimitsEnabled":true + } + ], + "IdToContainer":{ + "30746c302c68333a4a71cd7312fcdd12d64df048ab703fc085277512ac7fc136":{ + "DockerId":"30746c302c68333a4a71cd7312fcdd12d64df048ab703fc085277512ac7fc136", + "DockerName":"ecs-task-networking-state-1-internalecspause-96fff5abcbe0ecc34b00", + "Container":{ + "Name":"~internal~ecs~pause", + "Image":"amazon/amazon-ecs-pause:0.1.0", + "ImageID":"", + "Command":null, + "Cpu":0, + "Memory":0, + "Links":null, + "volumesFrom":null, + "mountPoints":null, + "portMappings":null, + "Essential":true, + "EntryPoint":null, + "environment":null, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":null, + "hostConfig":null, + "version":null + }, + "registryAuthentication":null, + "LogsAuthStrategy":"", + "desiredStatus":"RESOURCES_PROVISIONED", + "KnownStatus":"RESOURCES_PROVISIONED", + "TransitionDependencySet":{ + "ContainerDependencies":null + }, + "RunDependencies":null, + "IsInternal":"CNI_PAUSE", + "ApplyingError":null, + "SentStatus":"NONE", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null, + "SteadyStateStatus":"RESOURCES_PROVISIONED" + } + }, + "459fcba75ba51f3cb269be9fd7250ed8797ffa28ad7821fbff87c5f1c9980030":{ + "DockerId":"459fcba75ba51f3cb269be9fd7250ed8797ffa28ad7821fbff87c5f1c9980030", + "DockerName":"ecs-task-networking-state-1-container1-d2efcbe2fdc0a0fedd01", + "Container":{ + "Name":"container_1", + "Image":"amazonlinux:1", + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command":[ + "sleep", + "3600" + ], + "Cpu":100, + "Memory":512, + "Links":null, + "volumesFrom":[ + + ], + "mountPoints":[ + + ], + "portMappings":null, + "Essential":true, + "EntryPoint":null, + "environment":{ + + }, + "overrides":{ + "command":null + }, + "dockerConfig":{ + "config":"{}", + "hostConfig":"{\"NetworkMode\":\"awsvpc\",\"CapAdd\":[],\"CapDrop\":[]}", + "version":"1.18" + }, + "registryAuthentication":null, + "LogsAuthStrategy":"", + "desiredStatus":"RUNNING", + "KnownStatus":"RUNNING", + "TransitionDependencySet":{ + "ContainerDependencies":[ + { + "ContainerName":"~internal~ecs~pause", + "SatisfiedStatus":"RESOURCES_PROVISIONED", + "DependentStatus":"PULLED" + } + ] + }, + "RunDependencies":null, + "IsInternal":"NORMAL", + "ApplyingError":null, + "SentStatus":"RUNNING", + "metadataFileUpdated":false, + "KnownExitCode":null, + "KnownPortBindings":null + } + } + }, + "IdToTask":{ + "30746c302c68333a4a71cd7312fcdd12d64df048ab703fc085277512ac7fc136":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2", + "459fcba75ba51f3cb269be9fd7250ed8797ffa28ad7821fbff87c5f1c9980030":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2" + }, + "ImageStates":[ + { + "Image":{ + "ImageID":"sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Names":[ + "amazonlinux:1" + ], + "Size":165452304 + }, + "PulledAt":"2018-10-04T17:35:01.803012118Z", + "LastUsedAt":"2018-10-04T17:35:01.803013369Z" + } + ], + "ENIAttachments":[ + { + "taskArn":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2", + "attachmentArn":"arn:aws:ecs:us-west-2:1234567890:attachment/10b82317-3346-4be3-8f90-250a5bac1eff", + "attachSent":true, + "macAddress":"0a:4b:c1:bb:4e:7c", + "status":1, + "expiresAt":"2018-10-04T17:37:46.286823554Z" + } + ], + "IPToTask":{ + "169.254.172.2":"arn:aws:ecs:us-west-2:1234567890:task/fad405be-8705-4175-877b-db50109a15f2" + } + } + }, + "Version":11 +} diff --git a/agent/statemanager/testdata/v13/1/ecs_agent_data.json b/agent/statemanager/testdata/v13/1/ecs_agent_data.json new file mode 100644 index 00000000000..61fb1fe17ef --- /dev/null +++ b/agent/statemanager/testdata/v13/1/ecs_agent_data.json @@ -0,0 +1,139 @@ +{ + "Data": { + "Cluster": "test-statefile", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/test-statefile/b9c2c229d5ce4f52851f3e9e6d4db894", + "EC2InstanceID": "i-1234567890", + "TaskEngine": { + "ENIAttachments": null, + "IPToTask": {}, + "IdToContainer": { + "c3b86269ef62df4a27a4b612906f1f9776fa91747389b10d2361e488b223e44a": { + "Container": { + "ApplyingError": null, + "Command": [ + "sh", + "-c", + "exit 128" + ], + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "Image": "busybox", + "ImageID": "sha256:e4db68de4ff27c2adfea0c54bbb73a61a42f5b667c326de4d7d5b19ab71c6a3b", + "IsInternal": "NORMAL", + "KnownExitCode": 128, + "KnownPortBindings": null, + "KnownStatus": "STOPPED", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 512, + "Name": "container_1", + "RunDependencies": null, + "SentStatus": "STOPPED", + "TransitionDependencySet": {}, + "desiredStatus": "STOPPED", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"NetworkMode\":\"bridge\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": null, + "volumesFrom": [] + }, + "DockerId": "c3b86269ef62df4a27a4b612906f1f9776fa91747389b10d2361e488b223e44a", + "DockerName": "ecs-test-v13-td-2-container1-c694d49bb9b9aad0e801" + } + }, + "IdToTask": { + "c3b86269ef62df4a27a4b612906f1f9776fa91747389b10d2361e488b223e44a": "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/6a86da4c40ed4a0b94cf359e840dda98" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:e4db68de4ff27c2adfea0c54bbb73a61a42f5b667c326de4d7d5b19ab71c6a3b", + "Names": [ + "busybox" + ], + "Size": 1223886 + }, + "LastUsedAt": "2019-06-26T15:56:53.636435606Z", + "PullSucceeded": true, + "PulledAt": "2019-06-26T15:56:53.636433654Z" + } + ], + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/6a86da4c40ed4a0b94cf359e840dda98", + "Containers": [ + { + "ApplyingError": null, + "Command": [ + "sh", + "-c", + "exit 128" + ], + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "Image": "busybox", + "ImageID": "sha256:e4db68de4ff27c2adfea0c54bbb73a61a42f5b667c326de4d7d5b19ab71c6a3b", + "IsInternal": "NORMAL", + "KnownExitCode": 128, + "KnownPortBindings": null, + "KnownStatus": "STOPPED", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 512, + "Name": "container_1", + "RunDependencies": null, + "SentStatus": "STOPPED", + "TransitionDependencySet": {}, + "desiredStatus": "STOPPED", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"NetworkMode\":\"bridge\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": null, + "volumesFrom": [] + } + ], + "DesiredStatus": "STOPPED", + "ENI": null, + "ExecutionStoppedAt": "2019-06-26T15:56:54.353046931Z", + "Family": "test-v13-td", + "KnownStatus": "STOPPED", + "KnownTime": "2019-06-26T15:56:54.353114618Z", + "PullStartedAt": "2019-06-26T15:56:52.350237867Z", + "PullStoppedAt": "2019-06-26T15:56:53.636768284Z", + "SentStatus": "STOPPED", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "Version": "2", + "executionCredentialsID": "", + "resources": {}, + "volumes": [] + } + ] + } + }, + "Version": 13 +} \ No newline at end of file diff --git a/agent/statemanager/testdata/v14/private-registry/ecs_agent_data.json b/agent/statemanager/testdata/v14/private-registry/ecs_agent_data.json new file mode 100644 index 00000000000..ef862adfeb9 --- /dev/null +++ b/agent/statemanager/testdata/v14/private-registry/ecs_agent_data.json @@ -0,0 +1,181 @@ +{ + "Data": { + "Cluster": "test-statefile", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/test-statefile/dca5eca99f9c4567866b3d1fba1bb562", + "EC2InstanceID": "i-1234567890", + "TaskEngine": { + "ENIAttachments": null, + "IPToTask": {}, + "IdToContainer": { + "ecs-private-registry-state-1-container1-b2f386f88dcac192ac01": { + "Container": { + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: unauthorized: incorrect username or password\n", + "name": "CannotPullContainerError" + }, + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "Image": "amazonlinux:1", + "ImageID": "", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "STOPPED", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 512, + "Name": "container_1", + "RunDependencies": null, + "SentStatus": "STOPPED", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "asm-auth", + "RequiredStatus": 1 + } + ] + } + }, + "desiredStatus": "STOPPED", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": { + "credentialsParameter": "arn:aws:secretsmanager:us-west-2:1234567890:secret:FunctionalTest-PrivateRegistryAuth-I0nqxs", + "region": "us-west-2" + }, + "ecrAuthData": null, + "type": "asm" + }, + "volumesFrom": [] + }, + "DockerId": "", + "DockerName": "ecs-private-registry-state-1-container1-b2f386f88dcac192ac01" + } + }, + "IdToTask": { + "ecs-private-registry-state-1-container1-b2f386f88dcac192ac01": "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/19e8453e9a404cb58d55aaa0df65b4f5" + }, + "ImageStates": null, + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/19e8453e9a404cb58d55aaa0df65b4f5", + "Containers": [ + { + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: unauthorized: incorrect username or password\n", + "name": "CannotPullContainerError" + }, + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "Image": "amazonlinux:1", + "ImageID": "", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "STOPPED", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 512, + "Name": "container_1", + "RunDependencies": null, + "SentStatus": "STOPPED", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "asm-auth", + "RequiredStatus": 1 + } + ] + } + }, + "desiredStatus": "STOPPED", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": { + "credentialsParameter": "arn:aws:secretsmanager:us-west-2:1234567890:secret:FunctionalTest-PrivateRegistryAuth-I0nqxs", + "region": "us-west-2" + }, + "ecrAuthData": null, + "type": "asm" + }, + "volumesFrom": [] + } + ], + "DesiredStatus": "STOPPED", + "ENI": null, + "ExecutionStoppedAt": "2019-06-26T16:05:45.362191633Z", + "Family": "private-registry-state", + "KnownStatus": "STOPPED", + "KnownTime": "2019-06-26T16:05:45.362255714Z", + "PlatformFields": {}, + "PullStartedAt": "2019-06-26T16:05:30.556076442Z", + "PullStoppedAt": "2019-06-26T16:05:45.358424513Z", + "SentStatus": "STOPPED", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "Version": "1", + "executionCredentialsID": "f3cb8a19-cf56-4b79-ad58-691f3e669cf7", + "resources": { + "asm-auth": [ + { + "asmResources": [ + { + "credentialsParameter": "arn:aws:secretsmanager:us-west-2:1234567890:secret:FunctionalTest-PrivateRegistryAuth-I0nqxs", + "region": "us-west-2" + } + ], + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "REMOVED", + "executionCredentialsID": "f3cb8a19-cf56-4b79-ad58-691f3e669cf7", + "knownStatus": "REMOVED", + "taskARN": "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/19e8453e9a404cb58d55aaa0df65b4f5" + } + ] + }, + "volumes": [] + } + ] + } + }, + "Version": 14 +} \ No newline at end of file diff --git a/agent/statemanager/testdata/v17/secrets/ecs_agent_data.json b/agent/statemanager/testdata/v17/secrets/ecs_agent_data.json new file mode 100644 index 00000000000..f248be93e84 --- /dev/null +++ b/agent/statemanager/testdata/v17/secrets/ecs_agent_data.json @@ -0,0 +1,200 @@ +{ + "Data": { + "Cluster": "test-statefile", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/test-statefile/17a22ed5d51c41d08da4fb4587139895", + "EC2InstanceID": "i-1234567890", + "TaskEngine": { + "ENIAttachments": null, + "IPToTask": {}, + "IdToContainer": { + "8bd8a324d0a4580523752142ae8f5110210438fa457472ff3802b5ef51b9c5fe": { + "Container": { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "Image": "amazonlinux:1", + "ImageID": "sha256:a89f4a191d4c1061567bd8ba7275620ef344145bc01e7e33592f1766b9ce95d1", + "IsInternal": "NORMAL", + "KnownExitCode": 0, + "KnownPortBindings": null, + "KnownStatus": "STOPPED", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 512, + "Name": "container_1", + "RunDependencies": null, + "SentStatus": "STOPPED", + "TransitionDependencySet": { + "2": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "ssmsecret", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "79de7f83-becd-4750-ab4c-26ccc3ee533c", + "desiredStatus": "STOPPED", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/79de7f83-becd-4750-ab4c-26ccc3ee533c", + "mysecret": "test-value" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": null, + "secrets": [ + { + "containerPath": "", + "name": "mysecret", + "provider": "ssm", + "region": "us-west-2", + "type": "ENVIRONMENT_VARIABLE", + "valueFrom": "/ecs/secrets/test" + } + ], + "volumesFrom": [] + }, + "DockerId": "8bd8a324d0a4580523752142ae8f5110210438fa457472ff3802b5ef51b9c5fe", + "DockerName": "ecs-test-secret-state-1-container1-b2d6ad8ec694d2effd01" + } + }, + "IdToTask": { + "8bd8a324d0a4580523752142ae8f5110210438fa457472ff3802b5ef51b9c5fe": "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/7e1bfc28fe764a789a721710258337ff" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:a89f4a191d4c1061567bd8ba7275620ef344145bc01e7e33592f1766b9ce95d1", + "Names": [ + "amazonlinux:1" + ], + "Size": 166660454 + }, + "LastUsedAt": "2019-06-26T16:17:25.56290834Z", + "PullSucceeded": true, + "PulledAt": "2019-06-26T16:17:25.56290639Z" + } + ], + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/7e1bfc28fe764a789a721710258337ff", + "Containers": [ + { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "Image": "amazonlinux:1", + "ImageID": "sha256:a89f4a191d4c1061567bd8ba7275620ef344145bc01e7e33592f1766b9ce95d1", + "IsInternal": "NORMAL", + "KnownExitCode": 0, + "KnownPortBindings": null, + "KnownStatus": "STOPPED", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 512, + "Name": "container_1", + "RunDependencies": null, + "SentStatus": "STOPPED", + "TransitionDependencySet": { + "2": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "ssmsecret", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "79de7f83-becd-4750-ab4c-26ccc3ee533c", + "desiredStatus": "STOPPED", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/79de7f83-becd-4750-ab4c-26ccc3ee533c", + "mysecret": "test-value" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": null, + "secrets": [ + { + "containerPath": "", + "name": "mysecret", + "provider": "ssm", + "region": "us-west-2", + "type": "ENVIRONMENT_VARIABLE", + "valueFrom": "/ecs/secrets/test" + } + ], + "volumesFrom": [] + } + ], + "DesiredStatus": "STOPPED", + "ENI": null, + "ExecutionStoppedAt": "2019-06-26T16:17:27.09538479Z", + "Family": "test-secret-state", + "KnownStatus": "STOPPED", + "KnownTime": "2019-06-26T16:17:27.095441158Z", + "PlatformFields": {}, + "PullStartedAt": "2019-06-26T16:17:21.01236906Z", + "PullStoppedAt": "2019-06-26T16:17:25.563231235Z", + "SentStatus": "STOPPED", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "Version": "1", + "executionCredentialsID": "fc5b9636-a935-400b-9e06-13c5490099de", + "resources": { + "ssmsecret": [ + { + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "REMOVED", + "executionCredentialsID": "fc5b9636-a935-400b-9e06-13c5490099de", + "knownStatus": "CREATED", + "secretResources": { + "us-west-2": [ + { + "containerPath": "", + "name": "mysecret", + "provider": "ssm", + "region": "us-west-2", + "type": "ENVIRONMENT_VARIABLE", + "valueFrom": "/ecs/secrets/test" + } + ] + }, + "taskARN": "arn:aws:ecs:us-west-2:1234567890:task/test-statefile/7e1bfc28fe764a789a721710258337ff" + } + ] + }, + "volumes": [] + } + ] + } + }, + "Version": 17 +} \ No newline at end of file diff --git a/agent/statemanager/testdata/v18/availabilityZone/ecs_agent_data.json b/agent/statemanager/testdata/v18/availabilityZone/ecs_agent_data.json new file mode 100644 index 00000000000..c4dba80d63f --- /dev/null +++ b/agent/statemanager/testdata/v18/availabilityZone/ecs_agent_data.json @@ -0,0 +1,194 @@ +{ + "Data": { + "AvailabilityZone": "us-west-2c", + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/46efd519-df3f-4096-8f34-faebb1747752", + "EC2InstanceID": "i-0da29eb1a8a98768b", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", + "Family": "secrets-state", + "Version": "1", + "Containers": [ + { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "secrets": [ + { + "name": "ssm-secret", + "valueFrom": "secret-value-from", + "provider": "ssm", + "containerPath": null, + "type": "ENVIRONMENT_VARIABLES", + "region": "us-west-2" + } + ], + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "ssmsecret", + "RequiredStatus": 1 + } + ] + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "resources": { + "ssmsecret": [ + { + "taskARN": "/ecs/33425c99-5db7-45fb-8244-bc94d00661e4", + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244", + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "CREATED", + "knownStatus": "CREATED", + "secretResources": { + "us-west-2": [ + { + "name": "ssm-secret", + "valueFrom": "secret-value-from", + "provider": "ssm", + "containerPath": null, + "type": "ENVIRONMENT_VARIABLES", + "region": "us-west-2" + } + ] + } + } + ] + }, + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2018-10-04T18:05:49.121835686Z", + "PullStartedAt": "2018-10-04T18:05:34.359798761Z", + "PullStoppedAt": "2018-10-04T18:05:48.445985904Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244", + "ENI": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": { + "DockerId": "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956", + "DockerName": "ecs-private-registry-state-1-container1-a68ef4b6e0fba38d3500", + "Container": { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "ssmsecret", + "RequiredStatus": 1 + } + ] + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Names": [ + "amazonlinux:1" + ], + "Size": 165452304 + }, + "PulledAt": "2018-10-04T18:05:48.445644088Z", + "LastUsedAt": "2018-10-04T18:05:48.445645342Z", + "PullSucceeded": false + } + ], + "ENIAttachments": null, + "IPToTask": {} + } + }, + "Version": 18 +} diff --git a/agent/statemanager/testdata/v18/gpu/ecs_agent_data.json b/agent/statemanager/testdata/v18/gpu/ecs_agent_data.json new file mode 100644 index 00000000000..099e4058e24 --- /dev/null +++ b/agent/statemanager/testdata/v18/gpu/ecs_agent_data.json @@ -0,0 +1,172 @@ +{ + "Data": { + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/46efd519-df3f-4096-8f34-faebb1747752", + "EC2InstanceID": "i-0da29eb1a8a98768b", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", + "Family": "gpu-state", + "Version": "1", + "Containers": [ + { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "GPUIDs": ["0", "1"], + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": { + "NVIDIA_VISIBLE_DEVICES": "0,1" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "secrets": [], + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "Associations": [ + { + "Containers": ["container_1"], + "Content": { + "Encoding": "base64", + "Value": "val" + }, + "Name": "0", + "Type": "gpu" + }, + { + "Containers": ["container_1"], + "Content": { + "Encoding": "base64", + "Value": "val" + }, + "Name": "1", + "Type": "gpu" + } + ], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2018-10-04T18:05:49.121835686Z", + "PullStartedAt": "2018-10-04T18:05:34.359798761Z", + "PullStoppedAt": "2018-10-04T18:05:48.445985904Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244", + "ENI": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": { + "DockerId": "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956", + "DockerName": "ecs-private-registry-state-1-container1-a68ef4b6e0fba38d3500", + "Container": { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Names": [ + "amazonlinux:1" + ], + "Size": 165452304 + }, + "PulledAt": "2018-10-04T18:05:48.445644088Z", + "LastUsedAt": "2018-10-04T18:05:48.445645342Z", + "PullSucceeded": false + } + ], + "ENIAttachments": null, + "IPToTask": {} + } + }, + "Version": 18 +} diff --git a/agent/statemanager/testdata/v18/secrets/ecs_agent_data.json b/agent/statemanager/testdata/v18/secrets/ecs_agent_data.json new file mode 100644 index 00000000000..7f6404920c5 --- /dev/null +++ b/agent/statemanager/testdata/v18/secrets/ecs_agent_data.json @@ -0,0 +1,228 @@ +{ + "Data": { + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/46efd519-df3f-4096-8f34-faebb1747752", + "EC2InstanceID": "i-0da29eb1a8a98768b", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", + "Family": "secrets-state", + "Version": "1", + "Containers": [ + { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "secrets": [ + { + "name": "ssm-secret", + "valueFrom": "secret-value-from", + "provider": "ssm", + "containerPath": null, + "type": "ENVIRONMENT_VARIABLE", + "region": "us-west-2" + }, + { + "name": "asm-secret", + "valueFrom": "secret-value-from", + "provider": "asm", + "containerPath": null, + "type": "ENVIRONMENT_VARIABLE", + "region": "us-west-2" + } + ], + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "ssmsecret", + "RequiredStatus": 1 + }, + { + "Name": "asmsecret", + "RequiredStatus": 1 + } + ] + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "resources": { + "asmsecret": [ + { + "taskARN": "/ecs/33425c99-5db7-45fb-8244-bc94d00661e4", + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244", + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "CREATED", + "knownStatus": "CREATED", + "secretResources": { + "secret-value-from_us-west-2": { + "name": "asm-secret", + "valueFrom": "secret-value-from", + "provider": "asm", + "containerPath": null, + "type": "ENVIRONMENT_VARIABLE", + "region": "us-west-2" + } + } + } + ], + "ssmsecret": [ + { + "taskARN": "/ecs/33425c99-5db7-45fb-8244-bc94d00661e4", + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244", + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "CREATED", + "knownStatus": "CREATED", + "secretResources": { + "us-west-2": [ + { + "name": "ssm-secret", + "valueFrom": "secret-value-from", + "provider": "ssm", + "containerPath": null, + "type": "ENVIRONMENT_VARIABLE", + "region": "us-west-2" + } + ] + } + } + ] + }, + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2018-10-04T18:05:49.121835686Z", + "PullStartedAt": "2018-10-04T18:05:34.359798761Z", + "PullStoppedAt": "2018-10-04T18:05:48.445985904Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244", + "ENI": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": { + "DockerId": "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956", + "DockerName": "ecs-private-registry-state-1-container1-a68ef4b6e0fba38d3500", + "Container": { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "ssmsecret", + "RequiredStatus": 1 + }, + { + "Name": "asmsecret", + "RequiredStatus": 1 + } + ] + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Names": [ + "amazonlinux:1" + ], + "Size": 165452304 + }, + "PulledAt": "2018-10-04T18:05:48.445644088Z", + "LastUsedAt": "2018-10-04T18:05:48.445645342Z", + "PullSucceeded": false + } + ], + "ENIAttachments": null, + "IPToTask": {} + } + }, + "Version": 18 +} diff --git a/agent/statemanager/testdata/v20/containerOrdering/ecs_agent_data.json b/agent/statemanager/testdata/v20/containerOrdering/ecs_agent_data.json new file mode 100644 index 00000000000..994fc65281f --- /dev/null +++ b/agent/statemanager/testdata/v20/containerOrdering/ecs_agent_data.json @@ -0,0 +1,225 @@ +{ + "Data": { + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/46efd519-df3f-4096-8f34-faebb1747752", + "EC2InstanceID": "i-0da29eb1a8a98768b", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", + "Family": "container-ordering-state", + "Version": "1", + "Containers": [ + { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "GPUIDs": ["0", "1"], + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": { + "NVIDIA_VISIBLE_DEVICES": "0,1" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "secrets": [], + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + }, + { + "Name": "container_2", + "DependsOn": [ + { + "ContainerName":"container_1", + "Condition":"START" + } + ], + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "GPUIDs": [], + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "secrets": [], + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "Associations": [ + { + "Containers": ["container_1"], + "Content": { + "Encoding": "base64", + "Value": "val" + }, + "Name": "0", + "Type": "gpu" + }, + { + "Containers": ["container_1"], + "Content": { + "Encoding": "base64", + "Value": "val" + }, + "Name": "1", + "Type": "gpu" + } + ], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2018-10-04T18:05:49.121835686Z", + "PullStartedAt": "2018-10-04T18:05:34.359798761Z", + "PullStoppedAt": "2018-10-04T18:05:48.445985904Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244", + "ENI": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": { + "DockerId": "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956", + "DockerName": "ecs-private-registry-state-1-container1-a68ef4b6e0fba38d3500", + "Container": { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Names": [ + "amazonlinux:1" + ], + "Size": 165452304 + }, + "PulledAt": "2018-10-04T18:05:48.445644088Z", + "LastUsedAt": "2018-10-04T18:05:48.445645342Z", + "PullSucceeded": false + } + ], + "ENIAttachments": null, + "IPToTask": {} + } + }, + "Version": 20 +} diff --git a/agent/statemanager/testdata/v20/perContainerTimeouts/ecs_agent_data.json b/agent/statemanager/testdata/v20/perContainerTimeouts/ecs_agent_data.json new file mode 100644 index 00000000000..09a5fdd6659 --- /dev/null +++ b/agent/statemanager/testdata/v20/perContainerTimeouts/ecs_agent_data.json @@ -0,0 +1,169 @@ +{ + "Data": { + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/46efd519-df3f-4096-8f34-faebb1747752", + "EC2InstanceID": "i-0da29eb1a8a98768b", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", + "Family": "per-container-timeouts", + "Version": "1", + "Containers": [ + { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "GPUIDs": ["0", "1"], + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "StartTimeout":10, + "StopTimeout":10, + "EntryPoint": null, + "environment": { + "NVIDIA_VISIBLE_DEVICES": "0,1" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "secrets": [], + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "Associations": [ + { + "Containers": ["container_1"], + "Content": { + "Encoding": "base64", + "Value": "val" + }, + "Name": "0", + "Type": "gpu" + }, + { + "Containers": ["container_1"], + "Content": { + "Encoding": "base64", + "Value": "val" + }, + "Name": "1", + "Type": "gpu" + } + ], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2018-10-04T18:05:49.121835686Z", + "PullStartedAt": "2018-10-04T18:05:34.359798761Z", + "PullStoppedAt": "2018-10-04T18:05:48.445985904Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244", + "ENI": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": { + "DockerId": "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956", + "DockerName": "ecs-private-registry-state-1-container1-a68ef4b6e0fba38d3500", + "Container": { + "Name": "container_1", + "Image": "amazonlinux:1", + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Command": [ + "sleep", + "3600" + ], + "Cpu": 0, + "Memory": 512, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "Essential": true, + "EntryPoint": null, + "environment": {}, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": { + "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n", + "name": "CannotPullContainerError" + }, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e", + "Names": [ + "amazonlinux:1" + ], + "Size": 165452304 + }, + "PulledAt": "2018-10-04T18:05:48.445644088Z", + "LastUsedAt": "2018-10-04T18:05:48.445645342Z", + "PullSucceeded": false + } + ], + "ENIAttachments": null, + "IPToTask": {} + } + }, + "Version": 20 +} diff --git a/agent/statemanager/testdata/v23/firelens/ecs_agent_data.json b/agent/statemanager/testdata/v23/firelens/ecs_agent_data.json new file mode 100644 index 00000000000..1600c60d1c9 --- /dev/null +++ b/agent/statemanager/testdata/v23/firelens/ecs_agent_data.json @@ -0,0 +1,419 @@ +{ + "Data": { + "Cluster": "test-logrouter-p2", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/test-logrouter-p2/bf8f0029b6da498b8e3a167579c721d7", + "EC2InstanceID": "i-1234567890", + "TaskEngine": { + "ENIAttachments": null, + "IPToTask": {}, + "IdToContainer": { + "6585bbdb401743666b9f9ac4063a36a2cb39524004802076dad6bda126493968": { + "Container": { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-color-logger:latest", + "ImageID": "sha256:0e26a3f19b993b2bddb414d4d489522e43de4219ec342801f21cd5936ffea7aa", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "app", + "RunDependencies": null, + "RuntimeID": "6585bbdb401743666b9f9ac4063a36a2cb39524004802076dad6bda126493968", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "d586807a-74cf-408f-80e3-c7cb913423bf", + "dependsOn": [ + { + "condition": "START", + "containerName": "log_router" + } + ], + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"LogConfig\":{\"Type\":\"awsfirelens\",\"Config\":{\"include-pattern\":\"^.*[aeiou]$\",\"log_group_name\":\"firelens-testing\",\"auto_create_stream\":\"true\",\"exclude-pattern\":\"^[a-z][aeiou].*$\",\"use_tag_as_stream\":\"true\",\"@type\":\"cloudwatch_logs\",\"region\":\"us-west-2\"}},\"NetworkMode\":\"bridge\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.19" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/d586807a-74cf-408f-80e3-c7cb913423bf" + }, + "firelensConfiguration": null, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": null, + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "1234567890", + "useExecutionRole": false + }, + "type": "ecr" + }, + "secrets": null, + "volumesFrom": [] + }, + "DockerId": "6585bbdb401743666b9f9ac4063a36a2cb39524004802076dad6bda126493968", + "DockerName": "ecs-test-firelens-beta-2-bridge-1-app-c882f18df494a0bc5e00" + }, + "7523c7b248edb5e51c3724d5d0216fab69f52504952560d63ce7b6d72a3c1228": { + "Container": { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-fluentd:latest", + "ImageID": "sha256:67e3b4c5816f0a114035084cb189c1051ec16fbfd669346ae6fef4b42e7820b8", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "log_router", + "RunDependencies": null, + "RuntimeID": "7523c7b248edb5e51c3724d5d0216fab69f52504952560d63ce7b6d72a3c1228", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + }, + "2": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "firelens", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "f00460f5-99c5-47d3-98fd-4b252043ea24", + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{\"User\":\"0\"}", + "hostConfig": "{\"NetworkMode\":\"bridge\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/f00460f5-99c5-47d3-98fd-4b252043ea24", + "FLUENT_UID": "0" + }, + "firelensConfiguration": { + "options": { + "enable-ecs-log-metadata": "true" + }, + "type": "fluentd" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": null, + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "1234567890", + "useExecutionRole": false + }, + "type": "ecr" + }, + "secrets": null, + "volumesFrom": [] + }, + "DockerId": "7523c7b248edb5e51c3724d5d0216fab69f52504952560d63ce7b6d72a3c1228", + "DockerName": "ecs-test-firelens-beta-2-bridge-1-logrouter-c6c4b6e0c5ebacdce401" + } + }, + "IdToTask": { + "6585bbdb401743666b9f9ac4063a36a2cb39524004802076dad6bda126493968": "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/265f161270634aeaab7d0e88c29389c3", + "7523c7b248edb5e51c3724d5d0216fab69f52504952560d63ce7b6d72a3c1228": "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/265f161270634aeaab7d0e88c29389c3" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:67e3b4c5816f0a114035084cb189c1051ec16fbfd669346ae6fef4b42e7820b8", + "Names": [ + "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-fluentd:latest" + ], + "Size": 384467001 + }, + "LastUsedAt": "2019-09-03T18:24:49.202062631Z", + "PullSucceeded": true, + "PulledAt": "2019-09-03T18:24:49.202060787Z" + }, + { + "Image": { + "ImageID": "sha256:0e26a3f19b993b2bddb414d4d489522e43de4219ec342801f21cd5936ffea7aa", + "Names": [ + "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-color-logger:latest" + ], + "Size": 918081091 + }, + "LastUsedAt": "2019-09-03T18:24:49.718464448Z", + "PullSucceeded": true, + "PulledAt": "2019-09-03T18:24:49.718462609Z" + } + ], + "Tasks": [ + { + "AppMesh": null, + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/265f161270634aeaab7d0e88c29389c3", + "Containers": [ + { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-color-logger:latest", + "ImageID": "sha256:0e26a3f19b993b2bddb414d4d489522e43de4219ec342801f21cd5936ffea7aa", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "app", + "RunDependencies": null, + "RuntimeID": "6585bbdb401743666b9f9ac4063a36a2cb39524004802076dad6bda126493968", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "d586807a-74cf-408f-80e3-c7cb913423bf", + "dependsOn": [ + { + "condition": "START", + "containerName": "log_router" + } + ], + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"LogConfig\":{\"Type\":\"awsfirelens\",\"Config\":{\"include-pattern\":\"^.*[aeiou]$\",\"log_group_name\":\"firelens-testing\",\"auto_create_stream\":\"true\",\"exclude-pattern\":\"^[a-z][aeiou].*$\",\"use_tag_as_stream\":\"true\",\"@type\":\"cloudwatch_logs\",\"region\":\"us-west-2\"}},\"NetworkMode\":\"bridge\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.19" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/d586807a-74cf-408f-80e3-c7cb913423bf" + }, + "firelensConfiguration": null, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": null, + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "1234567890", + "useExecutionRole": false + }, + "type": "ecr" + }, + "secrets": null, + "volumesFrom": [] + }, + { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-fluentd:latest", + "ImageID": "sha256:67e3b4c5816f0a114035084cb189c1051ec16fbfd669346ae6fef4b42e7820b8", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "log_router", + "RunDependencies": null, + "RuntimeID": "7523c7b248edb5e51c3724d5d0216fab69f52504952560d63ce7b6d72a3c1228", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + }, + "2": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "firelens", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "f00460f5-99c5-47d3-98fd-4b252043ea24", + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{\"User\":\"0\"}", + "hostConfig": "{\"NetworkMode\":\"bridge\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/f00460f5-99c5-47d3-98fd-4b252043ea24", + "FLUENT_UID": "0" + }, + "firelensConfiguration": { + "options": { + "enable-ecs-log-metadata": "true" + }, + "type": "fluentd" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": null, + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "1234567890", + "useExecutionRole": false + }, + "type": "ecr" + }, + "secrets": null, + "volumesFrom": [] + } + ], + "DesiredStatus": "RUNNING", + "ENI": null, + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "Family": "test-firelens-beta-2-bridge", + "KnownStatus": "RUNNING", + "KnownTime": "2019-09-03T18:24:50.156251123Z", + "MemoryCPULimitsEnabled": true, + "PlatformFields": {}, + "PullStartedAt": "2019-09-03T18:24:48.952680959Z", + "PullStoppedAt": "2019-09-03T18:24:49.723104163Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "Version": "1", + "associations": [], + "executionCredentialsID": "", + "resources": { + "cgroup": [ + { + "cgroupMountPath": "/sys/fs/cgroup", + "cgroupRoot": "/ecs/test-logrouter-p2/265f161270634aeaab7d0e88c29389c3", + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "CREATED", + "knownStatus": "CREATED", + "resourceSpec": { + "cpu": { + "shares": 2 + } + } + } + ], + "firelens": [ + { + "AppliedStatus": "NONE", + "Cluster": "test-logrouter-p2", + "ContainersToLogOptions": { + "app": { + "@type": "cloudwatch_logs", + "auto_create_stream": "true", + "exclude-pattern": "^[a-z][aeiou].*$", + "include-pattern": "^.*[aeiou]$", + "log_group_name": "firelens-testing", + "region": "us-west-2", + "use_tag_as_stream": "true" + } + }, + "CreatedAt": "0001-01-01T00:00:00Z", + "DesiredStatus": "CREATED", + "EC2InstanceID": "i-1234567890", + "ECSMetadataEnabled": true, + "FirelensConfigType": "fluentd", + "KnownStatus": "CREATED", + "ResourceDir": "/data/firelens/265f161270634aeaab7d0e88c29389c3", + "TaskARN": "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/265f161270634aeaab7d0e88c29389c3", + "TaskDefinition": "test-firelens-beta-2-bridge:1", + "TerminalReason": "" + } + ] + }, + "volumes": [] + } + ] + }, + "availabilityZone": "us-west-2c" + }, + "Version": 23 +} \ No newline at end of file diff --git a/agent/statemanager/testdata/v23/perContainerRuntimeID/ecs_agent_data.json b/agent/statemanager/testdata/v23/perContainerRuntimeID/ecs_agent_data.json new file mode 100644 index 00000000000..a6e8a2770f0 --- /dev/null +++ b/agent/statemanager/testdata/v23/perContainerRuntimeID/ecs_agent_data.json @@ -0,0 +1,155 @@ +{ + "Data": { + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:123456789012:container-instance/ea27e41b-c6e4-45a9-a7a0-484c95abece7", + "EC2InstanceID": "i-e94fe598fd890e38f", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:123456789012:task/70947c96-f64e-483a-a612-3fd4303546e7", + "Family": "sleep360", + "Version": "6", + "Containers": [ + { + "Name": "sleep", + "RuntimeID": "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90", + "V3EndpointID": "6d4b6283-452e-42ef-bafc-7e7f5a6dac99", + "Image": "busybox", + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "Command": [ + "sleep", + "360" + ], + "Cpu": 10, + "GPUIDs": null, + "Memory": 100, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "secrets": null, + "Essential": true, + "EntryPoint": null, + "environment": { + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI": "/v2/credentials/16105516-51c7-41a2-ad8a-ba7411f309a0", + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/6d4b6283-452e-42ef-bafc-7e7f5a6dac99" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "associations": [], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2019-08-06T21:59:04.217198554Z", + "PullStartedAt": "2019-08-06T21:59:01.88671907Z", + "PullStoppedAt": "2019-08-06T21:59:03.799514307Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90": { + "DockerId": "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90", + "DockerName": "ecs-sleep360-6-sleep-a2b4d9d6ef938afc6f00", + "Container": { + "Name": "sleep", + "RuntimeID": "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90", + "V3EndpointID": "6d4b6283-452e-42ef-bafc-7e7f5a6dac99", + "Image": "busybox", + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "Command": [ + "sleep", + "360" + ], + "Cpu": 10, + "GPUIDs": null, + "Memory": 100, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "secrets": null, + "Essential": true, + "EntryPoint": null, + "environment": { + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI": "/v2/credentials/16105516-51c7-41a2-ad8a-ba7411f309a0", + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/6d4b6283-452e-42ef-bafc-7e7f5a6dac99" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90": "arn:aws:ecs:us-west-2:123456789012:task/70947c96-f64e-483a-a612-3fd4303546e7" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "Names": [ + "busybox" + ], + "Size": 1223894 + }, + "PulledAt": "2019-08-06T21:59:03.797764725Z", + "LastUsedAt": "2019-08-06T21:59:03.797764824Z", + "PullSucceeded": true + } + ], + "ENIAttachments": null, + "IPToTask": {} + }, + "availabilityZone": "us-west-2b" + }, + "Version": 23 +} diff --git a/agent/statemanager/testdata/v24/firelens/ecs_agent_data.json b/agent/statemanager/testdata/v24/firelens/ecs_agent_data.json new file mode 100644 index 00000000000..342b16a0c95 --- /dev/null +++ b/agent/statemanager/testdata/v24/firelens/ecs_agent_data.json @@ -0,0 +1,429 @@ +{ + "Data": { + "Cluster": "test-logrouter-p2", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/test-logrouter-p2/19bd66a91d924f51abacfc254185d387", + "EC2InstanceID": "i-1234567890", + "TaskEngine": { + "ENIAttachments": null, + "IPToTask": {}, + "IdToContainer": { + "22fe9caa93540ba0ec76e4f70d97ddd3df941d1380e443c0cc0257f4bc12d637": { + "Container": { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-color-logger:latest", + "ImageID": "sha256:0e26a3f19b993b2bddb414d4d489522e43de4219ec342801f21cd5936ffea7aa", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "app", + "RunDependencies": null, + "RuntimeID": "22fe9caa93540ba0ec76e4f70d97ddd3df941d1380e443c0cc0257f4bc12d637", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "fd9933d3-cf9e-4aad-bab6-03c239545b91", + "dependsOn": [ + { + "condition": "START", + "containerName": "log_router" + } + ], + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"LogConfig\":{\"Type\":\"awsfirelens\",\"Config\":{\"log_group_name\":\"firelens-testing-config\",\"log_stream_prefix\":\"from-fluent-bit\",\"region\":\"us-west-2\",\"exclude-pattern\":\"^[a-z][aeiou].*$\",\"auto_create_group\":\"true\",\"Name\":\"cloudwatch\"}},\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.19" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/fd9933d3-cf9e-4aad-bab6-03c239545b91", + "FLUENT_HOST": "172.17.0.7", + "FLUENT_PORT": "24224" + }, + "firelensConfiguration": null, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": null, + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "1234567890", + "useExecutionRole": true + }, + "type": "ecr" + }, + "secrets": null, + "volumesFrom": [] + }, + "DockerId": "22fe9caa93540ba0ec76e4f70d97ddd3df941d1380e443c0cc0257f4bc12d637", + "DockerName": "ecs-test-firelens-local-bit-2-app-e89e9beddefed9f50700" + }, + "416fb4c0f8caa47f027841dab8f7ae42aa2b44129aa2bc926a61851d15598df0": { + "Container": { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "1234567890.dkr.ecr.us-west-2.amazonaws.com/fluentbit-dconfig:latest", + "ImageID": "sha256:9f7e531a1512de4c039d5938fb210f153fdf52dbcc66b2f37e1487a595edc7ca", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "log_router", + "RunDependencies": null, + "RuntimeID": "416fb4c0f8caa47f027841dab8f7ae42aa2b44129aa2bc926a61851d15598df0", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + }, + "2": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "firelens", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "1530b2c2-49d4-47fe-8e4d-77067e4bec7b", + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{\"User\":\"0\"}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/1530b2c2-49d4-47fe-8e4d-77067e4bec7b", + "FLB_LOG_LEVEL": "debug" + }, + "firelensConfiguration": { + "options": { + "config-file-type": "file", + "config-file-value": "/tmp/dummy.conf" + }, + "type": "fluentbit" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": null, + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "1234567890", + "useExecutionRole": true + }, + "type": "ecr" + }, + "secrets": null, + "volumesFrom": [] + }, + "DockerId": "416fb4c0f8caa47f027841dab8f7ae42aa2b44129aa2bc926a61851d15598df0", + "DockerName": "ecs-test-firelens-local-bit-2-logrouter-96ab96cfb5bbc4e02e00" + } + }, + "IdToTask": { + "22fe9caa93540ba0ec76e4f70d97ddd3df941d1380e443c0cc0257f4bc12d637": "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/027002862d4b485ba3da5e935e0bc10c", + "416fb4c0f8caa47f027841dab8f7ae42aa2b44129aa2bc926a61851d15598df0": "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/027002862d4b485ba3da5e935e0bc10c" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:9f7e531a1512de4c039d5938fb210f153fdf52dbcc66b2f37e1487a595edc7ca", + "Names": [ + "1234567890.dkr.ecr.us-west-2.amazonaws.com/fluentbit-dconfig:latest" + ], + "Size": 420002313 + }, + "LastUsedAt": "2019-09-03T19:04:05.502729056Z", + "PullSucceeded": true, + "PulledAt": "2019-09-03T19:04:05.502727281Z" + }, + { + "Image": { + "ImageID": "sha256:0e26a3f19b993b2bddb414d4d489522e43de4219ec342801f21cd5936ffea7aa", + "Names": [ + "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-color-logger:latest" + ], + "Size": 918081091 + }, + "LastUsedAt": "2019-09-03T19:04:05.993122467Z", + "PullSucceeded": true, + "PulledAt": "2019-09-03T19:04:05.993120681Z" + } + ], + "Tasks": [ + { + "AppMesh": null, + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/027002862d4b485ba3da5e935e0bc10c", + "Containers": [ + { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "1234567890.dkr.ecr.us-west-2.amazonaws.com/firelens-color-logger:latest", + "ImageID": "sha256:0e26a3f19b993b2bddb414d4d489522e43de4219ec342801f21cd5936ffea7aa", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "app", + "RunDependencies": null, + "RuntimeID": "22fe9caa93540ba0ec76e4f70d97ddd3df941d1380e443c0cc0257f4bc12d637", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "fd9933d3-cf9e-4aad-bab6-03c239545b91", + "dependsOn": [ + { + "condition": "START", + "containerName": "log_router" + } + ], + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"LogConfig\":{\"Type\":\"awsfirelens\",\"Config\":{\"log_group_name\":\"firelens-testing-config\",\"log_stream_prefix\":\"from-fluent-bit\",\"region\":\"us-west-2\",\"exclude-pattern\":\"^[a-z][aeiou].*$\",\"auto_create_group\":\"true\",\"Name\":\"cloudwatch\"}},\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.19" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/fd9933d3-cf9e-4aad-bab6-03c239545b91", + "FLUENT_HOST": "172.17.0.7", + "FLUENT_PORT": "24224" + }, + "firelensConfiguration": null, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": null, + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "1234567890", + "useExecutionRole": true + }, + "type": "ecr" + }, + "secrets": null, + "volumesFrom": [] + }, + { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "1234567890.dkr.ecr.us-west-2.amazonaws.com/fluentbit-dconfig:latest", + "ImageID": "sha256:9f7e531a1512de4c039d5938fb210f153fdf52dbcc66b2f37e1487a595edc7ca", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "log_router", + "RunDependencies": null, + "RuntimeID": "416fb4c0f8caa47f027841dab8f7ae42aa2b44129aa2bc926a61851d15598df0", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + }, + "2": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "firelens", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "1530b2c2-49d4-47fe-8e4d-77067e4bec7b", + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{\"User\":\"0\"}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/1530b2c2-49d4-47fe-8e4d-77067e4bec7b", + "FLB_LOG_LEVEL": "debug" + }, + "firelensConfiguration": { + "options": { + "config-file-type": "file", + "config-file-value": "/tmp/dummy.conf" + }, + "type": "fluentbit" + }, + "metadataFileUpdated": false, + "mountPoints": [], + "overrides": { + "command": null + }, + "portMappings": [], + "registryAuthentication": { + "asmAuthData": null, + "ecrAuthData": { + "endpointOverride": "", + "region": "us-west-2", + "registryId": "1234567890", + "useExecutionRole": true + }, + "type": "ecr" + }, + "secrets": null, + "volumesFrom": [] + } + ], + "DesiredStatus": "RUNNING", + "ENI": null, + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "Family": "test-firelens-local-bit", + "KnownStatus": "RUNNING", + "KnownTime": "2019-09-03T19:04:06.432697537Z", + "MemoryCPULimitsEnabled": true, + "PlatformFields": {}, + "PullStartedAt": "2019-09-03T19:04:05.261572073Z", + "PullStoppedAt": "2019-09-03T19:04:05.997383779Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "Version": "2", + "associations": [], + "executionCredentialsID": "d534bb23-a438-410d-ba92-e7c07bfa13b5", + "resources": { + "cgroup": [ + { + "cgroupMountPath": "/sys/fs/cgroup", + "cgroupRoot": "/ecs/test-logrouter-p2/027002862d4b485ba3da5e935e0bc10c", + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "CREATED", + "knownStatus": "CREATED", + "resourceSpec": { + "cpu": { + "shares": 2 + } + } + } + ], + "firelens": [ + { + "AppliedStatus": "NONE", + "Cluster": "test-logrouter-p2", + "ContainersToLogOptions": { + "app": { + "Name": "cloudwatch", + "auto_create_group": "true", + "exclude-pattern": "^[a-z][aeiou].*$", + "log_group_name": "firelens-testing-config", + "log_stream_prefix": "from-fluent-bit", + "region": "us-west-2" + } + }, + "CreatedAt": "0001-01-01T00:00:00Z", + "DesiredStatus": "CREATED", + "EC2InstanceID": "i-1234567890", + "ECSMetadataEnabled": true, + "ExecutionCredentialsID": "xxx", + "ExternalConfigType": "file", + "ExternalConfigValue": "/tmp/dummy.conf", + "FirelensConfigType": "fluentbit", + "KnownStatus": "CREATED", + "NetworkMode": "bridge", + "Region": "us-west-2", + "ResourceDir": "/data/firelens/027002862d4b485ba3da5e935e0bc10c", + "TaskARN": "arn:aws:ecs:us-west-2:1234567890:task/test-logrouter-p2/027002862d4b485ba3da5e935e0bc10c", + "TaskDefinition": "test-firelens-local-bit:2", + "TerminalReason": "" + } + ] + }, + "volumes": [] + } + ] + }, + "availabilityZone": "us-west-2c" + }, + "Version": 24 +} \ No newline at end of file diff --git a/agent/statemanager/testdata/v24/perContainerImageDigest/ecs_agent_data.json b/agent/statemanager/testdata/v24/perContainerImageDigest/ecs_agent_data.json new file mode 100644 index 00000000000..99e0d3876d3 --- /dev/null +++ b/agent/statemanager/testdata/v24/perContainerImageDigest/ecs_agent_data.json @@ -0,0 +1,179 @@ +{ + "Data": { + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:123456789012:container-instance/8fad4d88-d718-4718-a371-7d2a193fe22b", + "EC2InstanceID": "i-e94fe598fd890e38f", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:123456789012:task/20ceacb3-2806-4701-a4d4-098515cebfe9", + "Family": "sleep360", + "Version": "2", + "Containers": [ + { + "Name": "sleep", + "RuntimeID": "61de9f7eab11bc4cbcdc17baad35bfc880f97b5b3c8a55de65593650c35102e0", + "V3EndpointID": "22d0d19f-29e1-435d-8757-08b16e22f161", + "Image": "busybox", + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "ImageDigest": "sha256:9f1003c480699be56815db0f8146ad2e22efea85129b5b5983d0e0fb52d9ab70", + "Command": [ + "sleep", + "360" + ], + "Cpu": 10, + "GPUIDs": null, + "Memory": 100, + "Links": null, + "firelensConfiguration": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "secrets": null, + "Essential": true, + "EntryPoint": null, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/22d0d19f-29e1-435d-8757-08b16e22f161" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "associations": [], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2019-09-03T22:48:59.077558105Z", + "PullStartedAt": "2019-09-03T22:48:56.184445771Z", + "PullStoppedAt": "2019-09-03T22:48:58.659663871Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "61de9f7eab11bc4cbcdc17baad35bfc880f97b5b3c8a55de65593650c35102e0": { + "DockerId": "61de9f7eab11bc4cbcdc17baad35bfc880f97b5b3c8a55de65593650c35102e0", + "DockerName": "ecs-sleep360-2-sleep-88eb808e8ee9ca94f101", + "Container": { + "Name": "sleep", + "RuntimeID": "61de9f7eab11bc4cbcdc17baad35bfc880f97b5b3c8a55de65593650c35102e0", + "V3EndpointID": "22d0d19f-29e1-435d-8757-08b16e22f161", + "Image": "busybox", + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "ImageDigest": "sha256:9f1003c480699be56815db0f8146ad2e22efea85129b5b5983d0e0fb52d9ab70", + "Command": [ + "sleep", + "360" + ], + "Cpu": 10, + "GPUIDs": null, + "Memory": 100, + "Links": null, + "firelensConfiguration": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "secrets": null, + "Essential": true, + "EntryPoint": null, + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/22d0d19f-29e1-435d-8757-08b16e22f161" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "1": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + } + ] + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "61de9f7eab11bc4cbcdc17baad35bfc880f97b5b3c8a55de65593650c35102e0": "arn:aws:ecs:us-west-2:123456789012:task/20ceacb3-2806-4701-a4d4-098515cebfe9" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "Names": [ + "busybox" + ], + "Size": 1223894 + }, + "PulledAt": "2019-09-03T22:48:58.65791348Z", + "LastUsedAt": "2019-09-03T22:48:58.657913576Z", + "PullSucceeded": true + } + ], + "ENIAttachments": null, + "IPToTask": {} + }, + "availabilityZone": "us-west-2b" + }, + "Version": 24 +} diff --git a/agent/statemanager/testdata/v25/seqNumTaskManifest/ecs_agent_data.json b/agent/statemanager/testdata/v25/seqNumTaskManifest/ecs_agent_data.json new file mode 100644 index 00000000000..e8fec643752 --- /dev/null +++ b/agent/statemanager/testdata/v25/seqNumTaskManifest/ecs_agent_data.json @@ -0,0 +1,157 @@ +{ + "Data": { + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:123456789012:container-instance/ea27e41b-c6e4-45a9-a7a0-484c95abece7", + "EC2InstanceID": "i-e94fe598fd890e38f", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:123456789012:task/70947c96-f64e-483a-a612-3fd4303546e7", + "Family": "sleep360", + "Version": "6", + "Containers": [ + { + "Name": "sleep", + "RuntimeID": "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90", + "V3EndpointID": "6d4b6283-452e-42ef-bafc-7e7f5a6dac99", + "Image": "busybox", + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "Command": [ + "sleep", + "360" + ], + "Cpu": 10, + "GPUIDs": null, + "Memory": 100, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "secrets": null, + "Essential": true, + "EntryPoint": null, + "environment": { + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI": "/v2/credentials/16105516-51c7-41a2-ad8a-ba7411f309a0", + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/6d4b6283-452e-42ef-bafc-7e7f5a6dac99" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "associations": [], + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2019-08-06T21:59:04.217198554Z", + "PullStartedAt": "2019-08-06T21:59:01.88671907Z", + "PullStoppedAt": "2019-08-06T21:59:03.799514307Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90": { + "DockerId": "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90", + "DockerName": "ecs-sleep360-6-sleep-a2b4d9d6ef938afc6f00", + "Container": { + "Name": "sleep", + "RuntimeID": "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90", + "V3EndpointID": "6d4b6283-452e-42ef-bafc-7e7f5a6dac99", + "Image": "busybox", + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "Command": [ + "sleep", + "360" + ], + "Cpu": 10, + "GPUIDs": null, + "Memory": 100, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "secrets": null, + "Essential": true, + "EntryPoint": null, + "environment": { + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI": "/v2/credentials/16105516-51c7-41a2-ad8a-ba7411f309a0", + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/6d4b6283-452e-42ef-bafc-7e7f5a6dac99" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "c00bc15085ef16b4c6b259f7dfc198a0a36b1ea1004c6de778bdc4b6629a7f90": "arn:aws:ecs:us-west-2:123456789012:task/70947c96-f64e-483a-a612-3fd4303546e7" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:bdc74663f4992a185e2775f63abcc1a19186cc5ec22fa2957db8ee88ad09dceb", + "Names": [ + "busybox" + ], + "Size": 1223894 + }, + "PulledAt": "2019-08-06T21:59:03.797764725Z", + "LastUsedAt": "2019-08-06T21:59:03.797764824Z", + "PullSucceeded": true + } + ], + "ENIAttachments": null, + "IPToTask": {} + }, + "availabilityZone": "us-west-2b", + "seqNumTaskManifest": 7 + + }, + "Version": 24 +} diff --git a/agent/statemanager/testdata/v26/gmsa/ecs_agent_data.json b/agent/statemanager/testdata/v26/gmsa/ecs_agent_data.json new file mode 100644 index 00000000000..0a5c02735b1 --- /dev/null +++ b/agent/statemanager/testdata/v26/gmsa/ecs_agent_data.json @@ -0,0 +1,231 @@ +{ + "Data": { + "Cluster": "gmsa-test", + "ContainerInstanceArn": "arn:aws:ecs:ap-northeast-1:1234567890:container-instance/e3f3aa05-ab9a-43a4-a4b8-736fc113bdb8", + "EC2InstanceID": "i-021fda4c225662a98", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:ap-northeast-1:1234567890:task/b8e2bd3c-c82a-4b43-9bde-199ae05b49a5", + "Family": "gmsa-test", + "Version": "5", + "Containers": [ + { + "Name": "windows_sample_app", + "RuntimeID": "c7c4ce17a76d30f26f48743faa9331867ffc0ae53971929f100124836ded7be7", + "V3EndpointID": "9cd43f50-acff-42fe-829e-3ed4aa68f637", + "Image": "microsoft/iis", + "ImageID": "sha256:281fc8d130220f569fcdaf500cf8ebc614f5f7227188432f73bc9da19bdbae1b", + "ImageDigest": "", + "Command": [ + "New-Item -Path C:\\inetpub\\wwwroot\\index.html -ItemType file -Value ' Amazon ECS Sample App

Amazon ECS Sample App

Congratulations!

Your application is now running on a container in Amazon ECS.

' -Force ; C:\\ServiceMonitor.exe w3svc" + ], + "Cpu": 512, + "GPUIDs": null, + "Memory": 768, + "Links": null, + "firelensConfiguration": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [ + { + "ContainerPort": 80, + "HostPort": 8080, + "BindIp": "", + "Protocol": "tcp" + } + ], + "secrets": null, + "Essential": true, + "EntryPoint": [ + "powershell", + "-Command" + ], + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/9cd43f50-acff-42fe-829e-3ed4aa68f637" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"SecurityOpt\":[\"credentialspec:file://WebApp01.json\"],\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "2": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "credentialspec", + "RequiredStatus": 1 + } + ] + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": [ + { + "ContainerPort": 80, + "HostPort": 8080, + "BindIp": "0.0.0.0", + "Protocol": "tcp" + } + ] + } + ], + "associations": [], + "resources": { + "credentialspec": [ + { + "taskARN": "arn:aws:ecs:ap-northeast-1:1234567890:task/b8e2bd3c-c82a-4b43-9bde-199ae05b49a5", + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "CREATED", + "knownStatus": "CREATED", + "credentialSpecResources": [ + "credentialspec:file://WebApp01.json" + ], + "CredSpecMap": { + "credentialspec:file://WebApp01.json": "credentialspec=file://WebApp01.json" + }, + "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244" + } + ] + }, + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2019-11-14T21:26:31.5329611Z", + "PullStartedAt": "2019-11-14T21:26:24.4911792Z", + "PullStoppedAt": "2019-11-14T21:26:27.0614396Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 3, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "PlatformFields": { + "cpuUnbounded": false, + "memoryUnbounded": false + } + } + ], + "IdToContainer": { + "c7c4ce17a76d30f26f48743faa9331867ffc0ae53971929f100124836ded7be7": { + "DockerId": "c7c4ce17a76d30f26f48743faa9331867ffc0ae53971929f100124836ded7be7", + "DockerName": "ecs-gmsa-test-5-windowssampleapp-d28d85d3c994a7ccaf01", + "Container": { + "Name": "windows_sample_app", + "RuntimeID": "c7c4ce17a76d30f26f48743faa9331867ffc0ae53971929f100124836ded7be7", + "V3EndpointID": "9cd43f50-acff-42fe-829e-3ed4aa68f637", + "Image": "microsoft/iis", + "ImageID": "sha256:281fc8d130220f569fcdaf500cf8ebc614f5f7227188432f73bc9da19bdbae1b", + "ImageDigest": "", + "Command": [ + "New-Item -Path C:\\inetpub\\wwwroot\\index.html -ItemType file -Value ' Amazon ECS Sample App

Amazon ECS Sample App

Congratulations!

Your application is now running on a container in Amazon ECS.

' -Force ; C:\\ServiceMonitor.exe w3svc" + ], + "Cpu": 512, + "GPUIDs": null, + "Memory": 768, + "Links": null, + "firelensConfiguration": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [ + { + "ContainerPort": 80, + "HostPort": 8080, + "BindIp": "", + "Protocol": "tcp" + } + ], + "secrets": null, + "Essential": true, + "EntryPoint": [ + "powershell", + "-Command" + ], + "environment": { + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/9cd43f50-acff-42fe-829e-3ed4aa68f637" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"SecurityOpt\":[\"credentialspec:file://WebApp01.json\"],\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "TransitionDependencySet": { + "2": { + "ContainerDependencies": null, + "ResourceDependencies": [ + { + "Name": "credentialspec", + "RequiredStatus": 1 + } + ] + } + }, + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": [ + { + "ContainerPort": 80, + "HostPort": 8080, + "BindIp": "0.0.0.0", + "Protocol": "tcp" + } + ] + } + } + }, + "IdToTask": { + "c7c4ce17a76d30f26f48743faa9331867ffc0ae53971929f100124836ded7be7": "arn:aws:ecs:ap-northeast-1:1234567890:task/b8e2bd3c-c82a-4b43-9bde-199ae05b49a5" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:281fc8d130220f569fcdaf500cf8ebc614f5f7227188432f73bc9da19bdbae1b", + "Names": [ + "microsoft/iis" + ], + "Size": 5005450904 + }, + "PulledAt": "2019-11-14T21:23:43.5555065Z", + "LastUsedAt": "2019-11-14T21:23:43.5555065Z", + "PullSucceeded": true + } + ], + "ENIAttachments": null, + "IPToTask": {} + }, + "availabilityZone": "ap-northeast-1a", + "latestSeqNumberTaskManifest": 3 + }, + "Version": 26 +} diff --git a/agent/statemanager/testdata/v27/efs/ecs_agent_data.json b/agent/statemanager/testdata/v27/efs/ecs_agent_data.json new file mode 100644 index 00000000000..508a27ee975 --- /dev/null +++ b/agent/statemanager/testdata/v27/efs/ecs_agent_data.json @@ -0,0 +1,424 @@ +{ + "Data": { + "Cluster": "test-efs-creds", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/test-efs-creds/xxx", + "EC2InstanceID": "i-xxx", + "TaskEngine": { + "ENIAttachments": [ + { + "attachSent": true, + "attachmentArn": "arn:aws:ecs:us-west-2:1234567890:attachment/xxx", + "attachmentType": "task-eni", + "expiresAt": "2020-02-20T01:56:56.489789117Z", + "macAddress": "02:99:ec:49:83:44", + "status": 1, + "taskArn": "arn:aws:ecs:us-west-2:1234567890:task/test-efs-creds/xxx" + } + ], + "IPToTask": { + "169.254.172.2": "arn:aws:ecs:us-west-2:1234567890:task/test-efs-creds/xxx" + }, + "IdToContainer": { + "xxx": { + "Container": { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "amazon/amazon-ecs-pause:0.1.0", + "ImageDigest": "", + "ImageID": "", + "IsInternal": "CNI_PAUSE", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RESOURCES_PROVISIONED", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 0, + "Name": "~internal~ecs~pause", + "RunDependencies": null, + "RuntimeID": "xxx", + "SentStatus": "NONE", + "StartTimeout": 0, + "SteadyStateStatus": "RESOURCES_PROVISIONED", + "StopTimeout": 0, + "TransitionDependencySet": { + "5": { + "ContainerDependencies": [ + { + "ContainerName": "sleep", + "SatisfiedStatus": "STOPPED" + } + ], + "ResourceDependencies": [ + { + "Name": "efs-html", + "RequiredStatus": 2 + } + ] + } + }, + "V3EndpointID": "", + "desiredStatus": "RESOURCES_PROVISIONED", + "dockerConfig": { + "config": null, + "hostConfig": null, + "version": null + }, + "environment": null, + "firelensConfiguration": null, + "metadataFileUpdated": false, + "mountPoints": null, + "overrides": { + "command": null + }, + "portMappings": null, + "registryAuthentication": null, + "secrets": null, + "volumesFrom": null + }, + "DockerId": "xxx", + "DockerName": "ecs-test-efs-creds-ap-awsvpc-2-internalecspause-xxx" + }, + "xxx": { + "Container": { + "ApplyingError": null, + "Command": [ + "sh", + "-c", + "sleep infinity" + ], + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "busybox:latest", + "ImageDigest": "", + "ImageID": "sha256:6d5fcfe5ff170471fcc3c8b47631d6d71202a1fd44cf3c147e50c8de21cf0648", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "sleep", + "RunDependencies": null, + "RuntimeID": "xxx", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": [ + { + "ContainerName": "~internal~ecs~pause", + "SatisfiedStatus": "RESOURCES_PROVISIONED" + } + ], + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + }, + { + "Name": "efs-html", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "xxx", + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"NetworkMode\":\"awsvpc\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.25" + }, + "environment": { + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI": "/v2/credentials/xxx", + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/xxx" + }, + "firelensConfiguration": null, + "metadataFileUpdated": false, + "mountPoints": [ + { + "containerPath": "/tmp/test", + "readOnly": false, + "sourceVolume": "efs-html" + } + ], + "overrides": { + "command": null + }, + "portMappings": null, + "registryAuthentication": null, + "secrets": null, + "volumesFrom": [] + }, + "DockerId": "xxx", + "DockerName": "ecs-test-efs-creds-ap-awsvpc-2-sleep-xxx" + } + }, + "IdToTask": { + "xxx": "arn:aws:ecs:us-west-2:1234567890:task/test-efs-creds/xxx", + "xxx": "arn:aws:ecs:us-west-2:1234567890:task/test-efs-creds/xxx" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:6d5fcfe5ff170471fcc3c8b47631d6d71202a1fd44cf3c147e50c8de21cf0648", + "Names": [ + "busybox:latest" + ], + "Size": 1219782 + }, + "LastUsedAt": "2020-02-20T01:54:04.88572636Z", + "PullSucceeded": true, + "PulledAt": "2020-02-20T01:54:04.885724524Z" + } + ], + "Tasks": [ + { + "AppMesh": null, + "Arn": "arn:aws:ecs:us-west-2:1234567890:task/test-efs-creds/xxx", + "Containers": [ + { + "ApplyingError": null, + "Command": [ + "sh", + "-c", + "sleep infinity" + ], + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "busybox:latest", + "ImageDigest": "", + "ImageID": "sha256:6d5fcfe5ff170471fcc3c8b47631d6d71202a1fd44cf3c147e50c8de21cf0648", + "IsInternal": "NORMAL", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RUNNING", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 128, + "Name": "sleep", + "RunDependencies": null, + "RuntimeID": "xxx", + "SentStatus": "RUNNING", + "StartTimeout": 0, + "StopTimeout": 0, + "TransitionDependencySet": { + "1": { + "ContainerDependencies": [ + { + "ContainerName": "~internal~ecs~pause", + "SatisfiedStatus": "RESOURCES_PROVISIONED" + } + ], + "ResourceDependencies": [ + { + "Name": "cgroup", + "RequiredStatus": 1 + }, + { + "Name": "efs-html", + "RequiredStatus": 1 + } + ] + } + }, + "V3EndpointID": "xxx", + "desiredStatus": "RUNNING", + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"NetworkMode\":\"awsvpc\",\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.25" + }, + "environment": { + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI": "/v2/credentials/xxx", + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/xxx" + }, + "firelensConfiguration": null, + "metadataFileUpdated": false, + "mountPoints": [ + { + "containerPath": "/tmp/test", + "readOnly": false, + "sourceVolume": "efs-html" + } + ], + "overrides": { + "command": null + }, + "portMappings": null, + "registryAuthentication": null, + "secrets": null, + "volumesFrom": [] + }, + { + "ApplyingError": null, + "Command": null, + "Cpu": 0, + "EntryPoint": null, + "Essential": true, + "GPUIDs": null, + "Image": "amazon/amazon-ecs-pause:0.1.0", + "ImageDigest": "", + "ImageID": "", + "IsInternal": "CNI_PAUSE", + "KnownExitCode": null, + "KnownPortBindings": null, + "KnownStatus": "RESOURCES_PROVISIONED", + "Links": null, + "LogsAuthStrategy": "", + "Memory": 0, + "Name": "~internal~ecs~pause", + "RunDependencies": null, + "RuntimeID": "xxx", + "SentStatus": "NONE", + "StartTimeout": 0, + "SteadyStateStatus": "RESOURCES_PROVISIONED", + "StopTimeout": 0, + "TransitionDependencySet": { + "5": { + "ContainerDependencies": [ + { + "ContainerName": "sleep", + "SatisfiedStatus": "STOPPED" + } + ], + "ResourceDependencies": [ + { + "Name": "efs-html", + "RequiredStatus": 2 + } + ] + } + }, + "V3EndpointID": "", + "desiredStatus": "RESOURCES_PROVISIONED", + "dockerConfig": { + "config": null, + "hostConfig": null, + "version": null + }, + "environment": null, + "firelensConfiguration": null, + "metadataFileUpdated": false, + "mountPoints": null, + "overrides": { + "command": null + }, + "portMappings": null, + "registryAuthentication": null, + "secrets": null, + "volumesFrom": null + } + ], + "DesiredStatus": "RUNNING", + "ENI": [ + { + "IPV4Addresses": [ + { + "Address": "172.31.23.80", + "Primary": true + } + ], + "IPV6Addresses": null, + "InterfaceAssociationProtocol": "default", + "InterfaceVlanProperties": { + "TrunkInterfaceMacAddress": "", + "VlanID": "" + }, + "MacAddress": "02:99:ec:49:83:44", + "PrivateDNSName": "ip-172-31-23-80.us-west-2.compute.internal", + "SubnetGatewayIPV4Address": "172.31.16.1/20", + "ec2Id": "eni-07113203d7c904702" + } + ], + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "Family": "test-efs-creds-ap-awsvpc", + "KnownStatus": "RUNNING", + "KnownTime": "2020-02-20T01:54:05.363137128Z", + "MemoryCPULimitsEnabled": true, + "PlatformFields": {}, + "PullStartedAt": "2020-02-20T01:54:03.576860185Z", + "PullStoppedAt": "2020-02-20T01:54:04.88978339Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 5, + "StopSequenceNumber": 0, + "Version": "2", + "associations": [], + "executionCredentialsID": "", + "resources": { + "cgroup": [ + { + "cgroupMountPath": "/sys/fs/cgroup", + "cgroupRoot": "/ecs/test-efs-creds/xxx", + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "CREATED", + "knownStatus": "CREATED", + "resourceSpec": { + "cpu": { + "shares": 2 + } + } + } + ], + "dockerVolume": [ + { + "createdAt": "0001-01-01T00:00:00Z", + "desiredStatus": "CREATED", + "dockerVolumeConfiguration": { + "autoprovision": false, + "dockerVolumeName": "ecs-test-efs-creds-ap-awsvpc-2-efs-html-xxx", + "driver": "amazon-ecs-volume-plugin", + "driverOpts": { + "device": "fs-xxx:/", + "o": "tls,tlsport=20050,iam,awscredsuri=/v2/credentials/xxx,accesspoint=fsap-xxx,netns=/proc/123/ns/net", + "type": "efs" + }, + "labels": {}, + "mountPoint": "mpabs", + "scope": "task" + }, + "knownStatus": "CREATED", + "name": "efs-html", + "pauseContainerPID": "123" + } + ] + }, + "volumes": [ + { + "efsVolumeConfiguration": { + "autoprovision": false, + "dockerVolumeName": "ecs-test-efs-creds-ap-awsvpc-2-efs-html-xxx", + "driver": "amazon-ecs-volume-plugin", + "driverOpts": { + "device": "fs-xxx:/", + "o": "tls,tlsport=20050,iam,awscredsuri=/v2/credentials/xxx,accesspoint=fsap-xxx,netns=/proc/123/ns/net", + "type": "efs" + }, + "labels": {}, + "mountPoint": "mpabc", + "scope": "task" + }, + "name": "efs-html", + "type": "efs" + } + ] + } + ] + }, + "availabilityZone": "us-west-2b", + "latestSeqNumberTaskManifest": 5 + }, + "Version": 27 +} \ No newline at end of file diff --git a/agent/statemanager/testdata/v28/environmentFiles/ecs_agent_data.json b/agent/statemanager/testdata/v28/environmentFiles/ecs_agent_data.json new file mode 100644 index 00000000000..ebd2c534460 --- /dev/null +++ b/agent/statemanager/testdata/v28/environmentFiles/ecs_agent_data.json @@ -0,0 +1,168 @@ +{ + "Data": { + "Cluster": "state-file", + "ContainerInstanceArn": "arn:aws:ecs:us-west-2:123456789011:container-instance/ea27e41b-c6e4-45a9-a7a0-484c95abece7", + "EC2InstanceID": "i-0e38e94fed89f598f", + "TaskEngine": { + "Tasks": [ + { + "Arn": "arn:aws:ecs:us-west-2:123456789011:task/70947c96-f64e-483a-a612-3fd4303546e7", + "Family": "sleep360", + "Version": "6", + "Containers": [ + { + "Name": "sleep", + "RuntimeID": "8a0a36b66de778bdc4c00bc15085ef1902c6b1ea16b4b259f7dfc199a7f004c6", + "V3EndpointID": "7e7f5a6d-42ef-452e-bafc-6d4b6283ac99", + "Image": "busybox", + "ImageID": "sha256:e2722fa29573abec09dcebcc1a19186cc6bdc74663f4992a1855db8ee88ad75f", + "Command": [ + "sleep", + "360" + ], + "Cpu": 10, + "GPUIDs": null, + "Memory": 100, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "secrets": null, + "Essential": true, + "EntryPoint": null, + "environment": { + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI": "/v2/credentials/7411f309-41a2-51c7-ad8a-ba16105516a0", + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/7e7f5a6d-42ef-452e-bafc-6d4b6283ac99" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + ], + "associations": [], + "resources": { + "envfile": [ + { + "environmentFilesSource": [ + {"type": "s3", "value": "arn:aws:s3:::bucket/key1.env"}, + {"type": "s3", "value": "arn:aws:s3:::bucket/key2.env"} + ], + "taskARN": "arn:aws:ecs:us-west-2:123456789011:task/70947c96-f64e-483a-a612-3fd4303546e7", + "desiredStatus": "CREATED", + "knownStatus": "CREATED", + "executionCredentialsID": "" + } + ] + }, + "volumes": [], + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "KnownTime": "2019-08-06T21:59:04.217198554Z", + "PullStartedAt": "2019-08-06T21:59:01.88671907Z", + "PullStoppedAt": "2019-08-06T21:59:03.799514307Z", + "ExecutionStoppedAt": "0001-01-01T00:00:00Z", + "SentStatus": "RUNNING", + "StartSequenceNumber": 2, + "StopSequenceNumber": 0, + "executionCredentialsID": "", + "ENI": null, + "AppMesh": null, + "MemoryCPULimitsEnabled": true, + "PlatformFields": {} + } + ], + "IdToContainer": { + "8a0a36b66de778bdc4c00bc15085ef1902c6b1ea16b4b259f7dfc199a7f004c6": { + "DockerId": "8a0a36b66de778bdc4c00bc15085ef1902c6b1ea16b4b259f7dfc199a7f004c6", + "DockerName": "ecs-sleep360-6-sleep-a2b4d9d6ef938afc6f00", + "Container": { + "Name": "sleep", + "RuntimeID": "8a0a36b66de778bdc4c00bc15085ef1902c6b1ea16b4b259f7dfc199a7f004c6", + "V3EndpointID": "7e7f5a6d-42ef-452e-bafc-6d4b6283ac99", + "Image": "busybox", + "ImageID": "sha256:e2722fa29573abec09dcebcc1a19186cc6bdc74663f4992a1855db8ee88ad75f", + "Command": [ + "sleep", + "360" + ], + "Cpu": 10, + "GPUIDs": null, + "Memory": 100, + "Links": null, + "volumesFrom": [], + "mountPoints": [], + "portMappings": [], + "secrets": null, + "Essential": true, + "EntryPoint": null, + "environment": { + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI": "/v2/credentials/7411f309-41a2-51c7-ad8a-ba16105516a0", + "AWS_EXECUTION_ENV": "AWS_ECS_EC2", + "ECS_CONTAINER_METADATA_URI": "http://169.254.170.2/v3/7e7f5a6d-42ef-452e-bafc-6d4b6283ac99" + }, + "overrides": { + "command": null + }, + "dockerConfig": { + "config": "{}", + "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}", + "version": "1.17" + }, + "registryAuthentication": null, + "LogsAuthStrategy": "", + "StartTimeout": 0, + "StopTimeout": 0, + "desiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "RunDependencies": null, + "IsInternal": "NORMAL", + "ApplyingError": null, + "SentStatus": "RUNNING", + "metadataFileUpdated": false, + "KnownExitCode": null, + "KnownPortBindings": null + } + } + }, + "IdToTask": { + "8a0a36b66de778bdc4c00bc15085ef1902c6b1ea16b4b259f7dfc199a7f004c6": "arn:aws:ecs:us-west-2:123456789011:task/70947c96-f64e-483a-a612-3fd4303546e7" + }, + "ImageStates": [ + { + "Image": { + "ImageID": "sha256:e2722fa29573abec09dcebcc1a19186cc6bdc74663f4992a1855db8ee88ad75f", + "Names": [ + "busybox" + ], + "Size": 1223894 + }, + "PulledAt": "2019-08-06T21:59:03.797764725Z", + "LastUsedAt": "2019-08-06T21:59:03.797764824Z", + "PullSucceeded": true + } + ], + "ENIAttachments": null, + "IPToTask": {} + } + }, + "Version": 27 +} diff --git a/agent/stats/common_test.go b/agent/stats/common_test.go new file mode 100644 index 00000000000..e7598e83608 --- /dev/null +++ b/agent/stats/common_test.go @@ -0,0 +1,345 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "fmt" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/data" + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/statechange" + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + + "github.com/aws/aws-sdk-go/aws" + + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + sdkClient "github.com/docker/docker/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + // checkPointSleep is the sleep duration in milliseconds between + // starting/stopping containers in the test code. + checkPointSleep = 5 * time.Second + testContainerHealthImageName = "amazon/amazon-ecs-containerhealthcheck:make" + + // defaultDockerTimeoutSeconds is the timeout for dialing the docker remote API. + defaultDockerTimeoutSeconds = 10 * time.Second + + // waitForCleanupSleep is the sleep duration in milliseconds + // for the waiting after container cleanup before checking the state of the manager. + waitForCleanupSleep = 10 * time.Millisecond + + taskArn = "gremlin" + taskDefinitionFamily = "docker-gremlin" + taskDefinitionVersion = "1" + containerName = "gremlin-container" +) + +var ( + cfg = config.DefaultConfig() + defaultCluster = "default" + defaultContainerInstance = "ci" +) + +func init() { + cfg.EngineAuthData = config.NewSensitiveRawMessage([]byte{}) + cfg.ImagePullBehavior = config.ImagePullPreferCachedBehavior +} + +// parseNanoTime returns the time object from a string formatted with RFC3339Nano layout. +func parseNanoTime(value string) time.Time { + ts, _ := time.Parse(time.RFC3339Nano, value) + return ts +} + +// eventStream returns the event stream used to receive container change events +func eventStream(name string) *eventstream.EventStream { + eventStream := eventstream.NewEventStream(name, context.Background()) + eventStream.StartListening() + return eventStream +} + +// createGremlin creates the gremlin container using the docker client. +// It is used only in the test code. +func createGremlin(client *sdkClient.Client, netMode string) (*dockercontainer.ContainerCreateCreatedBody, error) { + containerGremlin, err := client.ContainerCreate(context.TODO(), + &dockercontainer.Config{ + Image: testImageName, + }, + &dockercontainer.HostConfig{ + NetworkMode: dockercontainer.NetworkMode(netMode), + }, + &network.NetworkingConfig{}, + "") + + return &containerGremlin, err +} + +func createHealthContainer(client *sdkClient.Client) (*dockercontainer.ContainerCreateCreatedBody, error) { + container, err := client.ContainerCreate(context.TODO(), + &dockercontainer.Config{ + Image: testContainerHealthImageName, + }, + &dockercontainer.HostConfig{}, + &network.NetworkingConfig{}, + "") + + return &container, err +} + +type IntegContainerMetadataResolver struct { + containerIDToTask map[string]*apitask.Task + containerIDToDockerContainer map[string]*apicontainer.DockerContainer +} + +func newIntegContainerMetadataResolver() *IntegContainerMetadataResolver { + resolver := IntegContainerMetadataResolver{ + containerIDToTask: make(map[string]*apitask.Task), + containerIDToDockerContainer: make(map[string]*apicontainer.DockerContainer), + } + + return &resolver +} + +func (resolver *IntegContainerMetadataResolver) ResolveTask(containerID string) (*apitask.Task, error) { + task, exists := resolver.containerIDToTask[containerID] + if !exists { + return nil, fmt.Errorf("unmapped container") + } + + return task, nil +} + +func (resolver *IntegContainerMetadataResolver) ResolveContainer(containerID string) (*apicontainer.DockerContainer, error) { + container, exists := resolver.containerIDToDockerContainer[containerID] + if !exists { + return nil, fmt.Errorf("unmapped container") + } + + return container, nil +} + +func validateInstanceMetrics(t *testing.T, engine *DockerStatsEngine) { + metadata, taskMetrics, err := engine.GetInstanceMetrics() + assert.NoError(t, err, "gettting instance metrics failed") + assert.NoError(t, validateMetricsMetadata(metadata), "validating metadata failed") + assert.Len(t, taskMetrics, 1, "incorrect number of tasks") + + taskMetric := taskMetrics[0] + assert.Equal(t, aws.StringValue(taskMetric.TaskDefinitionFamily), taskDefinitionFamily, "unexpected task definition family") + assert.Equal(t, aws.StringValue(taskMetric.TaskDefinitionVersion), taskDefinitionVersion, "unexpected task definition version") + assert.NoError(t, validateContainerMetrics(taskMetric.ContainerMetrics, 1), "validating container metrics failed") +} + +func validateContainerMetrics(containerMetrics []*ecstcs.ContainerMetric, expected int) error { + if len(containerMetrics) != expected { + return fmt.Errorf("Mismatch in number of ContainerStatsSet elements. Expected: %d, Got: %d", expected, len(containerMetrics)) + } + for _, containerMetric := range containerMetrics { + if *containerMetric.ContainerName == "" { + return fmt.Errorf("ContainerName is empty") + } + if containerMetric.CpuStatsSet == nil { + return fmt.Errorf("CPUStatsSet is nil") + } + if containerMetric.MemoryStatsSet == nil { + return fmt.Errorf("MemoryStatsSet is nil") + } + if containerMetric.NetworkStatsSet == nil { + return fmt.Errorf("NetworkStatsSet is nil") + } + if containerMetric.StorageStatsSet == nil { + return fmt.Errorf("StorageStatsSet is nil") + } + } + return nil +} + +func validateIdleContainerMetrics(t *testing.T, engine *DockerStatsEngine) { + metadata, taskMetrics, err := engine.GetInstanceMetrics() + assert.NoError(t, err, "getting instance metrics failed") + assert.NoError(t, validateMetricsMetadata(metadata), "validating metadata failed") + + assert.True(t, aws.BoolValue(metadata.Idle), "expected idle metadata to be true") + assert.True(t, aws.BoolValue(metadata.Fin), "fin not set to true when idle") + assert.Len(t, taskMetrics, 0, "expected empty task metrics") +} + +func validateMetricsMetadata(metadata *ecstcs.MetricsMetadata) error { + if metadata == nil { + return fmt.Errorf("Metadata is nil") + } + if *metadata.Cluster != defaultCluster { + return fmt.Errorf("Expected cluster in metadata to be: %s, got %s", + defaultCluster, *metadata.Cluster) + } + if *metadata.ContainerInstance != defaultContainerInstance { + return fmt.Errorf("Expected container instance in metadata to be %s, got %s", + defaultContainerInstance, *metadata.ContainerInstance) + } + if len(*metadata.MessageId) == 0 { + return fmt.Errorf("Empty MessageId") + } + + return nil +} + +func validateHealthMetricsMetadata(metadata *ecstcs.HealthMetadata) error { + if metadata == nil { + return fmt.Errorf("metadata is nil") + } + + if aws.StringValue(metadata.Cluster) != defaultCluster { + return fmt.Errorf("expected cluster in metadata to be: %s, got %s", + defaultCluster, aws.StringValue(metadata.Cluster)) + } + + if aws.StringValue(metadata.ContainerInstance) != defaultContainerInstance { + return fmt.Errorf("expected container instance in metadata to be %s, got %s", + defaultContainerInstance, aws.StringValue(metadata.ContainerInstance)) + } + if len(aws.StringValue(metadata.MessageId)) == 0 { + return fmt.Errorf("empty MessageId") + } + + return nil +} + +func validateContainerHealthMetrics(metrics []*ecstcs.ContainerHealth, expected int) error { + if len(metrics) != expected { + return fmt.Errorf("mismatch in number of ContainerHealth elements. Expected: %d, Got: %d", + expected, len(metrics)) + } + for _, health := range metrics { + if aws.StringValue(health.ContainerName) == "" { + return fmt.Errorf("container name is empty") + } + if aws.StringValue(health.HealthStatus) == "" { + return fmt.Errorf("container health status is empty") + } + if aws.TimeValue(health.StatusSince).IsZero() { + return fmt.Errorf("container health status change timestamp is empty") + } + } + return nil +} + +func validateTaskHealthMetrics(t *testing.T, engine *DockerStatsEngine) { + healthMetadata, healthMetrics, err := engine.GetTaskHealthMetrics() + assert.NoError(t, err, "getting task health metrics failed") + require.Len(t, healthMetrics, 1) + assert.NoError(t, validateHealthMetricsMetadata(healthMetadata), "validating health metedata failed") + assert.Equal(t, aws.StringValue(healthMetrics[0].TaskArn), taskArn, "task arn not expected") + assert.Equal(t, aws.StringValue(healthMetrics[0].TaskDefinitionFamily), taskDefinitionFamily, "task definition family not expected") + assert.Equal(t, aws.StringValue(healthMetrics[0].TaskDefinitionVersion), taskDefinitionVersion, "task definition version not expected") + assert.NoError(t, validateContainerHealthMetrics(healthMetrics[0].Containers, 1)) +} + +func validateEmptyTaskHealthMetrics(t *testing.T, engine *DockerStatsEngine) { + // If the metrics is empty, no health metrics will be send, the metadata won't be used + // no need to check the metadata here + _, healthMetrics, err := engine.GetTaskHealthMetrics() + assert.NoError(t, err, "getting task health failed") + assert.Len(t, healthMetrics, 0, "no health metrics was expected") +} + +func createFakeContainerStats() []*ContainerStats { + netStats := &NetworkStats{ + RxBytes: 796, + RxDropped: 6, + RxErrors: 0, + RxPackets: 10, + TxBytes: 8192, + TxDropped: 5, + TxErrors: 0, + TxPackets: 60, + } + return []*ContainerStats{ + {22400432, 1839104, uint64(100), uint64(200), netStats, parseNanoTime("2015-02-12T21:22:05.131117533Z")}, + {116499979, 3649536, uint64(300), uint64(400), netStats, parseNanoTime("2015-02-12T21:22:05.232291187Z")}, + } +} + +type MockTaskEngine struct { +} + +func (engine *MockTaskEngine) GetAdditionalAttributes() []*ecs.Attribute { + return nil +} + +func (engine *MockTaskEngine) Init(ctx context.Context) error { + return nil +} +func (engine *MockTaskEngine) MustInit(ctx context.Context) { +} + +func (engine *MockTaskEngine) StateChangeEvents() chan statechange.Event { + return make(chan statechange.Event) +} + +func (engine *MockTaskEngine) SetDataClient(data.Client) { +} + +func (engine *MockTaskEngine) AddTask(*apitask.Task) { +} + +func (engine *MockTaskEngine) ListTasks() ([]*apitask.Task, error) { + return nil, nil +} + +func (engine *MockTaskEngine) GetTaskByArn(arn string) (*apitask.Task, bool) { + return nil, false +} + +func (engine *MockTaskEngine) UnmarshalJSON([]byte) error { + return nil +} + +func (engine *MockTaskEngine) MarshalJSON() ([]byte, error) { + return make([]byte, 0), nil +} + +func (engine *MockTaskEngine) Version() (string, error) { + return "", nil +} + +func (engine *MockTaskEngine) LoadState() error { + return nil +} + +func (engine *MockTaskEngine) SaveState() error { + return nil +} + +func (engine *MockTaskEngine) Capabilities() []*ecs.Attribute { + return nil +} + +func (engine *MockTaskEngine) Disable() { +} + +func (engine *MockTaskEngine) Info() (types.Info, error) { + return types.Info{}, nil +} diff --git a/agent/stats/common_unix_test.go b/agent/stats/common_unix_test.go new file mode 100644 index 00000000000..85a8966278c --- /dev/null +++ b/agent/stats/common_unix_test.go @@ -0,0 +1,35 @@ +//+build !windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "os" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + ecsengine "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/utils" + + sdkClient "github.com/docker/docker/client" +) + +var ( + testImageName = "amazon/amazon-ecs-gremlin:make" + endpoint = utils.DefaultIfBlank(os.Getenv(ecsengine.DockerEndpointEnvVariable), ecsengine.DockerDefaultEndpoint) + client, _ = sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + sdkClientFactory = sdkclientfactory.NewFactory(context.TODO(), endpoint) + ctx = context.TODO() +) diff --git a/agent/stats/common_windows_test.go b/agent/stats/common_windows_test.go new file mode 100644 index 00000000000..52dfaa6cfdb --- /dev/null +++ b/agent/stats/common_windows_test.go @@ -0,0 +1,35 @@ +//+build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "os" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory" + ecsengine "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/utils" + + sdkClient "github.com/docker/docker/client" +) + +var ( + testImageName = "amazon/amazon-ecs-stats:make" + endpoint = utils.DefaultIfBlank(os.Getenv(ecsengine.DockerEndpointEnvVariable), "npipe:////./pipe/docker_engine") + client, _ = sdkClient.NewClientWithOpts(sdkClient.WithHost(endpoint), sdkClient.WithVersion(sdkclientfactory.GetDefaultVersion().String())) + sdkClientFactory = sdkclientfactory.NewFactory(context.TODO(), endpoint) + ctx = context.TODO() +) diff --git a/agent/stats/container.go b/agent/stats/container.go new file mode 100644 index 00000000000..9e2a412e7a0 --- /dev/null +++ b/agent/stats/container.go @@ -0,0 +1,153 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/stats/resolver" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/cihub/seelog" +) + +func newStatsContainer(dockerID string, client dockerapi.DockerClient, resolver resolver.ContainerMetadataResolver, + cfg *config.Config) (*StatsContainer, error) { + dockerContainer, err := resolver.ResolveContainer(dockerID) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + return &StatsContainer{ + containerMetadata: &ContainerMetadata{ + DockerID: dockerID, + Name: dockerContainer.Container.Name, + NetworkMode: dockerContainer.Container.GetNetworkMode(), + }, + ctx: ctx, + cancel: cancel, + client: client, + resolver: resolver, + config: cfg, + }, nil +} + +func (container *StatsContainer) StartStatsCollection() { + // queue will be sized to hold enough stats for 4 publishing intervals. + var queueSize int + if container.config != nil && container.config.PollMetrics.Enabled() { + pollingInterval := container.config.PollingMetricsWaitDuration.Seconds() + queueSize = int(config.DefaultContainerMetricsPublishInterval.Seconds() / pollingInterval * 4) + } else { + // for streaming stats we assume 1 stat every second + queueSize = int(config.DefaultContainerMetricsPublishInterval.Seconds() * 4) + } + container.statsQueue = NewQueue(queueSize) + go container.collect() +} + +func (container *StatsContainer) StopStatsCollection() { + container.cancel() +} + +func (container *StatsContainer) collect() { + dockerID := container.containerMetadata.DockerID + backoff := retry.NewExponentialBackoff(time.Second*1, time.Second*10, 0.5, 2) + for { + err := container.processStatsStream() + select { + case <-container.ctx.Done(): + seelog.Infof("Container [%s]: Stopping stats collection", dockerID) + return + default: + if err != nil { + d := backoff.Duration() + seelog.Debugf("Container [%s]: Error processing stats stream of container, backing off %s before reopening", dockerID, d) + time.Sleep(d) + } + // We were disconnected from the stats stream. + // Check if the container is terminal. If it is, stop collecting metrics. + // We might sometimes miss events from docker task engine and this helps + // in reconciling the state. + terminal, err := container.terminal() + if err != nil { + // Error determining if the container is terminal. This means that the container + // id could not be resolved to a container that is being tracked by the + // docker task engine. If the docker task engine has already removed + // the container from its state, there's no point in stats engine tracking the + // container. So, clean-up anyway. + seelog.Warnf("Container [%s]: Error determining if the container is terminal, stopping stats collection: %v", dockerID, err) + container.StopStatsCollection() + return + } else if terminal { + seelog.Infof("Container [%s]: container is terminal, stopping stats collection", dockerID) + container.StopStatsCollection() + return + } + } + } +} + +func (container *StatsContainer) processStatsStream() error { + dockerID := container.containerMetadata.DockerID + seelog.Debugf("Collecting stats for container %s", dockerID) + if container.client == nil { + return errors.New("container processStatsStream: Client is not set.") + } + dockerStats, errC := container.client.Stats(container.ctx, dockerID, dockerclient.StatsInactivityTimeout) + + returnError := false + for { + select { + case <-container.ctx.Done(): + return nil + case err := <-errC: + select { + case <-container.ctx.Done(): + // ignore error when container.ctx.Done() + default: + seelog.Warnf("Error encountered processing metrics stream from docker, this may affect cloudwatch metric accuracy: %s", err) + returnError = true + } + case rawStat, ok := <-dockerStats: + if !ok { + if returnError { + return fmt.Errorf("error encountered processing metrics stream from docker") + } + return nil + } + err := validateDockerStats(rawStat) + if err != nil { + return err + } + + if err := container.statsQueue.Add(rawStat); err != nil { + seelog.Warnf("Container [%s]: error converting stats for container: %v", dockerID, err) + } + } + } +} + +func (container *StatsContainer) terminal() (bool, error) { + dockerContainer, err := container.resolver.ResolveContainer(container.containerMetadata.DockerID) + if err != nil { + return false, err + } + return dockerContainer.Container.KnownTerminal(), nil +} diff --git a/agent/stats/container_test.go b/agent/stats/container_test.go new file mode 100644 index 00000000000..35d126c6c40 --- /dev/null +++ b/agent/stats/container_test.go @@ -0,0 +1,210 @@ +//+build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "encoding/json" + "fmt" + "math" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + mock_resolver "github.com/aws/amazon-ecs-agent/agent/stats/resolver/mock" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" +) + +type StatTestData struct { + timestamp time.Time + cpuTime uint64 + memBytes uint64 +} + +var statsData = []*StatTestData{ + {parseNanoTime("2015-02-12T21:22:05.131117533Z"), 22400432, 1839104}, + {parseNanoTime("2015-02-12T21:22:05.232291187Z"), 116499979, 3649536}, + {parseNanoTime("2015-02-12T21:22:05.333776335Z"), 248503503, 3649536}, + {parseNanoTime("2015-02-12T21:22:05.434753595Z"), 372167097, 3649536}, + {parseNanoTime("2015-02-12T21:22:05.535746779Z"), 502862518, 3649536}, + {parseNanoTime("2015-02-12T21:22:05.638709495Z"), 638485801, 3649536}, + {parseNanoTime("2015-02-12T21:22:05.739985398Z"), 780707806, 3649536}, + {parseNanoTime("2015-02-12T21:22:05.840941705Z"), 911624529, 3649536}, +} + +func TestContainerStatsCollection(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + + dockerID := "container1" + ctx, cancel := context.WithCancel(context.TODO()) + statChan := make(chan *types.StatsJSON) + errC := make(chan error) + mockDockerClient.EXPECT().Stats(ctx, dockerID, dockerclient.StatsInactivityTimeout).Return(statChan, errC) + go func() { + for _, stat := range statsData { + // doing this with json makes me sad, but is the easiest way to + // deal with the types.StatsJSON.MemoryStats inner struct + jsonStat := fmt.Sprintf(` + { + "memory_stats": {"usage":%d, "privateworkingset":%d}, + "cpu_stats":{ + "cpu_usage":{ + "percpu_usage":[%d], + "total_usage":%d + } + } + }`, stat.memBytes, stat.memBytes, stat.cpuTime, stat.cpuTime) + dockerStat := &types.StatsJSON{} + json.Unmarshal([]byte(jsonStat), dockerStat) + dockerStat.Read = stat.timestamp + statChan <- dockerStat + } + }() + + container := &StatsContainer{ + containerMetadata: &ContainerMetadata{ + DockerID: dockerID, + }, + ctx: ctx, + cancel: cancel, + client: mockDockerClient, + } + container.StartStatsCollection() + time.Sleep(checkPointSleep) + container.StopStatsCollection() + cpuStatsSet, err := container.statsQueue.GetCPUStatsSet() + if err != nil { + t.Fatal("Error gettting cpu stats set:", err) + } + if *cpuStatsSet.Min == math.MaxFloat64 || math.IsNaN(*cpuStatsSet.Min) { + t.Error("Min value incorrectly set: ", *cpuStatsSet.Min) + } + if *cpuStatsSet.Max == -math.MaxFloat64 || math.IsNaN(*cpuStatsSet.Max) { + t.Error("Max value incorrectly set: ", *cpuStatsSet.Max) + } + if *cpuStatsSet.SampleCount == 0 { + t.Error("Samplecount is 0") + } + if *cpuStatsSet.Sum == 0 { + t.Error("Sum value incorrectly set: ", *cpuStatsSet.Sum) + } + + memStatsSet, err := container.statsQueue.GetMemoryStatsSet() + if err != nil { + t.Error("Error gettting cpu stats set:", err) + } + if *memStatsSet.Min == math.MaxFloat64 { + t.Error("Min value incorrectly set: ", *memStatsSet.Min) + } + if *memStatsSet.Max == 0 { + t.Error("Max value incorrectly set: ", *memStatsSet.Max) + } + if *memStatsSet.SampleCount == 0 { + t.Error("Samplecount is 0") + } + if *memStatsSet.Sum == 0 { + t.Error("Sum value incorrectly set: ", *memStatsSet.Sum) + } +} + +func TestContainerStatsCollectionReconnection(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + resolver := mock_resolver.NewMockContainerMetadataResolver(ctrl) + + dockerID := "container1" + ctx, cancel := context.WithCancel(context.TODO()) + + statChan := make(chan *types.StatsJSON) + errChan := make(chan error) + go func() { errChan <- fmt.Errorf("test error") }() + closedChan := make(chan *types.StatsJSON) + close(closedChan) + + mockContainer := &apicontainer.DockerContainer{ + DockerID: dockerID, + Container: &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + } + gomock.InOrder( + mockDockerClient.EXPECT().Stats(ctx, dockerID, dockerclient.StatsInactivityTimeout).Return(closedChan, errChan), + resolver.EXPECT().ResolveContainer(dockerID).Return(mockContainer, nil), + mockDockerClient.EXPECT().Stats(ctx, dockerID, dockerclient.StatsInactivityTimeout).Return(closedChan, nil), + resolver.EXPECT().ResolveContainer(dockerID).Return(mockContainer, nil), + mockDockerClient.EXPECT().Stats(ctx, dockerID, dockerclient.StatsInactivityTimeout).Return(statChan, nil), + ) + + container := &StatsContainer{ + containerMetadata: &ContainerMetadata{ + DockerID: dockerID, + }, + ctx: ctx, + cancel: cancel, + client: mockDockerClient, + resolver: resolver, + } + container.StartStatsCollection() + time.Sleep(checkPointSleep) + container.StopStatsCollection() +} + +func TestContainerStatsCollectionStopsIfContainerIsTerminal(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + resolver := mock_resolver.NewMockContainerMetadataResolver(ctrl) + + dockerID := "container1" + ctx, cancel := context.WithCancel(context.TODO()) + + closedChan := make(chan *types.StatsJSON) + close(closedChan) + errC := make(chan error) + + statsErr := fmt.Errorf("test error") + mockContainer := &apicontainer.DockerContainer{ + DockerID: dockerID, + Container: &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + }, + } + gomock.InOrder( + mockDockerClient.EXPECT().Stats(ctx, dockerID, dockerclient.StatsInactivityTimeout).Return(closedChan, errC), + resolver.EXPECT().ResolveContainer(dockerID).Return(mockContainer, statsErr), + ) + + container := &StatsContainer{ + containerMetadata: &ContainerMetadata{ + DockerID: dockerID, + }, + ctx: ctx, + cancel: cancel, + client: mockDockerClient, + resolver: resolver, + } + container.StartStatsCollection() + select { + case <-ctx.Done(): + } +} diff --git a/agent/stats/engine.go b/agent/stats/engine.go new file mode 100644 index 00000000000..fdef6dfb834 --- /dev/null +++ b/agent/stats/engine.go @@ -0,0 +1,795 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +//go:generate mockgen -destination=mock/$GOFILE -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/stats Engine + +import ( + "context" + "fmt" + "strconv" + "sync" + "time" + + "github.com/cihub/seelog" + "github.com/pborman/uuid" + "github.com/pkg/errors" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + ecsengine "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/stats/resolver" + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" +) + +const ( + containerChangeHandler = "DockerStatsEngineDockerEventsHandler" + queueResetThreshold = 2 * dockerclient.StatsInactivityTimeout + hostNetworkMode = "host" + noneNetworkMode = "none" +) + +var ( + // EmptyMetricsError indicates an error for a task when there are no container + // metrics to report + EmptyMetricsError = errors.New("stats engine: no task metrics to report") + // EmptyHealthMetricsError indicates an error for a task when there are no container + // health metrics to report + EmptyHealthMetricsError = errors.New("stats engine: no task health metrics to report") +) + +// DockerContainerMetadataResolver implements ContainerMetadataResolver for +// DockerTaskEngine. +type DockerContainerMetadataResolver struct { + dockerTaskEngine *ecsengine.DockerTaskEngine +} + +// Engine defines methods to be implemented by the engine struct. It is +// defined to make testing easier. +type Engine interface { + GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) + ContainerDockerStats(taskARN string, containerID string) (*types.StatsJSON, *NetworkStatsPerSec, error) + GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) +} + +// DockerStatsEngine is used to monitor docker container events and to report +// utilization metrics of the same. + +type DockerStatsEngine struct { + ctx context.Context + stopEngine context.CancelFunc + client dockerapi.DockerClient + cluster string + containerInstanceArn string + lock sync.RWMutex + config *config.Config + containerChangeEventStream *eventstream.EventStream + resolver resolver.ContainerMetadataResolver + // tasksToContainers maps task arns to a map of container ids to StatsContainer objects. + tasksToContainers map[string]map[string]*StatsContainer + // tasksToHealthCheckContainers map task arns to the containers that has health check enabled + tasksToHealthCheckContainers map[string]map[string]*StatsContainer + // tasksToDefinitions maps task arns to task definition name and family metadata objects. + tasksToDefinitions map[string]*taskDefinition + taskToTaskStats map[string]*StatsTask +} + +// ResolveTask resolves the api task object, given container id. +func (resolver *DockerContainerMetadataResolver) ResolveTask(dockerID string) (*apitask.Task, error) { + if resolver.dockerTaskEngine == nil { + return nil, fmt.Errorf("Docker task engine uninitialized") + } + task, found := resolver.dockerTaskEngine.State().TaskByID(dockerID) + if !found { + return nil, fmt.Errorf("Could not map docker id to task: %s", dockerID) + } + + return task, nil +} + +func (resolver *DockerContainerMetadataResolver) ResolveTaskByARN(taskArn string) (*apitask.Task, error) { + if resolver.dockerTaskEngine == nil { + return nil, fmt.Errorf("docker task engine uninitialized") + } + task, found := resolver.dockerTaskEngine.State().TaskByArn(taskArn) + if !found { + return nil, fmt.Errorf("could not map task arn to task: %s", taskArn) + } + return task, nil + +} + +// ResolveContainer resolves the api container object, given container id. +func (resolver *DockerContainerMetadataResolver) ResolveContainer(dockerID string) (*apicontainer.DockerContainer, error) { + if resolver.dockerTaskEngine == nil { + return nil, fmt.Errorf("Docker task engine uninitialized") + } + container, found := resolver.dockerTaskEngine.State().ContainerByID(dockerID) + if !found { + return nil, fmt.Errorf("Could not map docker id to container: %s", dockerID) + } + + return container, nil +} + +// NewDockerStatsEngine creates a new instance of the DockerStatsEngine object. +// MustInit() must be called to initialize the fields of the new event listener. +func NewDockerStatsEngine(cfg *config.Config, client dockerapi.DockerClient, containerChangeEventStream *eventstream.EventStream) *DockerStatsEngine { + return &DockerStatsEngine{ + client: client, + resolver: nil, + config: cfg, + tasksToContainers: make(map[string]map[string]*StatsContainer), + tasksToHealthCheckContainers: make(map[string]map[string]*StatsContainer), + tasksToDefinitions: make(map[string]*taskDefinition), + taskToTaskStats: make(map[string]*StatsTask), + containerChangeEventStream: containerChangeEventStream, + } +} + +// synchronizeState goes through all the containers on the instance to synchronize the state on agent start +func (engine *DockerStatsEngine) synchronizeState() error { + listContainersResponse := engine.client.ListContainers(engine.ctx, false, dockerclient.ListContainersTimeout) + if listContainersResponse.Error != nil { + return listContainersResponse.Error + } + + for _, containerID := range listContainersResponse.DockerIDs { + engine.addAndStartStatsContainer(containerID) + } + + return nil +} + +// addAndStartStatsContainer add the container into stats engine and start collecting the container stats +func (engine *DockerStatsEngine) addAndStartStatsContainer(containerID string) { + engine.lock.Lock() + defer engine.lock.Unlock() + statsContainer, statsTaskContainer, err := engine.addContainerUnsafe(containerID) + if err != nil { + seelog.Debugf("Adding container to stats watch list failed, container: %s, err: %v", containerID, err) + return + } + + if engine.config.DisableMetrics.Enabled() || statsContainer == nil { + return + } + + statsContainer.StartStatsCollection() + + task, err := engine.resolver.ResolveTask(containerID) + if err != nil { + return + } + + dockerContainer, errResolveContainer := engine.resolver.ResolveContainer(containerID) + if errResolveContainer != nil { + seelog.Debugf("Could not map container ID to container, container: %s, err: %s", containerID, err) + return + } + + if task.IsNetworkModeAWSVPC() { + // Start stats collector only for pause container + if statsTaskContainer != nil && dockerContainer.Container.Type == apicontainer.ContainerCNIPause { + statsTaskContainer.StartStatsCollection() + } else { + seelog.Debugf("stats task container is nil, cannot start task stats collection") + } + } + +} + +// MustInit initializes fields of the DockerStatsEngine object. +func (engine *DockerStatsEngine) MustInit(ctx context.Context, taskEngine ecsengine.TaskEngine, cluster string, containerInstanceArn string) error { + derivedCtx, cancel := context.WithCancel(ctx) + engine.stopEngine = cancel + + engine.ctx = derivedCtx + // TODO ensure that this is done only once per engine object + seelog.Info("Initializing stats engine") + engine.cluster = cluster + engine.containerInstanceArn = containerInstanceArn + + var err error + engine.resolver, err = newDockerContainerMetadataResolver(taskEngine) + if err != nil { + return err + } + + // Subscribe to the container change event stream + err = engine.containerChangeEventStream.Subscribe(containerChangeHandler, engine.handleDockerEvents) + if err != nil { + return fmt.Errorf("Failed to subscribe to container change event stream, err %v", err) + } + err = engine.synchronizeState() + if err != nil { + seelog.Warnf("Synchronize the container state failed, err: %v", err) + } + + go engine.waitToStop() + return nil +} + +// Shutdown cleans up the resources after the stats engine. +func (engine *DockerStatsEngine) Shutdown() { + engine.stopEngine() + engine.Disable() +} + +// Disable prevents this engine from managing any additional tasks. +func (engine *DockerStatsEngine) Disable() { + engine.lock.Lock() +} + +// waitToStop waits for the container change event stream close ans stop collection metrics +func (engine *DockerStatsEngine) waitToStop() { + // Waiting for the event stream to close + <-engine.containerChangeEventStream.Context().Done() + seelog.Debug("Event stream closed, stop listening to the event stream") + engine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + engine.removeAll() +} + +// removeAll stops the periodic usage data collection for all containers +func (engine *DockerStatsEngine) removeAll() { + engine.lock.Lock() + defer engine.lock.Unlock() + + for task, containers := range engine.tasksToContainers { + for _, statsContainer := range containers { + statsContainer.StopStatsCollection() + } + delete(engine.tasksToContainers, task) + } + + for task := range engine.tasksToHealthCheckContainers { + delete(engine.tasksToContainers, task) + } +} + +func (engine *DockerStatsEngine) addToStatsTaskMapUnsafe(task *apitask.Task, dockerContainerName string, + containerType apicontainer.ContainerType) { + var statsTaskContainer *StatsTask + if task.IsNetworkModeAWSVPC() && containerType == apicontainer.ContainerCNIPause { + // Excluding the pause container + numberOfContainers := len(task.Containers) - 1 + var taskExists bool + statsTaskContainer, taskExists = engine.taskToTaskStats[task.Arn] + if !taskExists { + containerInspect, err := engine.client.InspectContainer(engine.ctx, dockerContainerName, + dockerclient.InspectContainerTimeout) + if err != nil { + return + } + containerpid := strconv.Itoa(containerInspect.State.Pid) + statsTaskContainer, err = newStatsTaskContainer(task.Arn, containerpid, numberOfContainers, + engine.resolver, engine.config.PollingMetricsWaitDuration) + if err != nil { + return + } + engine.taskToTaskStats[task.Arn] = statsTaskContainer + } else { + statsTaskContainer.TaskMetadata.NumberContainers = numberOfContainers + } + } +} + +// addContainerUnsafe adds a container to the map of containers being watched. +func (engine *DockerStatsEngine) addContainerUnsafe(dockerID string) (*StatsContainer, *StatsTask, error) { + // Make sure that this container belongs to a task and that the task + // is not terminal. + task, err := engine.resolver.ResolveTask(dockerID) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not map container to task, ignoring container: %s", dockerID) + } + + if len(task.Arn) == 0 || len(task.Family) == 0 { + return nil, nil, errors.Errorf("stats add container: invalid task fields, arn: %s, familiy: %s", task.Arn, task.Family) + } + + if task.GetKnownStatus().Terminal() { + return nil, nil, errors.Errorf("stats add container: task is terminal, ignoring container: %s, task: %s", dockerID, task.Arn) + } + + statsContainer, err := newStatsContainer(dockerID, engine.client, engine.resolver, engine.config) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not map docker container ID to container, ignoring container: %s", dockerID) + } + + seelog.Debugf("Adding container to stats watch list, id: %s, task: %s", dockerID, task.Arn) + engine.tasksToDefinitions[task.Arn] = &taskDefinition{family: task.Family, version: task.Version} + + dockerContainer, errResolveContainer := engine.resolver.ResolveContainer(dockerID) + if errResolveContainer != nil { + seelog.Debugf("Could not map container ID to container, container: %s, err: %s", dockerID, err) + } + + watchStatsContainer := false + if !engine.config.DisableMetrics.Enabled() { + // Adding container to the map for collecting stats + watchStatsContainer = engine.addToStatsContainerMapUnsafe(task.Arn, dockerID, statsContainer, + engine.containerMetricsMapUnsafe) + if errResolveContainer == nil { + engine.addToStatsTaskMapUnsafe(task, dockerContainer.DockerName, dockerContainer.Container.Type) + } + } + + if errResolveContainer == nil && dockerContainer.Container.HealthStatusShouldBeReported() { + // Track the container health status + engine.addToStatsContainerMapUnsafe(task.Arn, dockerID, statsContainer, engine.healthCheckContainerMapUnsafe) + seelog.Debugf("Adding container to stats health check watch list, id: %s, task: %s", dockerID, task.Arn) + } + + if !watchStatsContainer { + return nil, nil, nil + } + return statsContainer, engine.taskToTaskStats[task.Arn], nil +} + +func (engine *DockerStatsEngine) containerMetricsMapUnsafe() map[string]map[string]*StatsContainer { + return engine.tasksToContainers +} + +func (engine *DockerStatsEngine) healthCheckContainerMapUnsafe() map[string]map[string]*StatsContainer { + return engine.tasksToHealthCheckContainers +} + +// addToStatsContainerMapUnsafe adds the statscontainer into stats for tracking and returns a boolean indicates +// whether this container should be tracked for collecting metrics +func (engine *DockerStatsEngine) addToStatsContainerMapUnsafe( + taskARN, containerID string, + statsContainer *StatsContainer, + statsMapToUpdate func() map[string]map[string]*StatsContainer) bool { + + taskToContainerMap := statsMapToUpdate() + + // Check if this container is already being watched. + _, taskExists := taskToContainerMap[taskARN] + if taskExists { + // task arn exists in map. + _, containerExists := taskToContainerMap[taskARN][containerID] + if containerExists { + // container arn exists in map. + seelog.Debugf("Container already being watched, ignoring, id: %s", containerID) + return false + } + } else { + // Create a map for the task arn if it doesn't exist yet. + taskToContainerMap[taskARN] = make(map[string]*StatsContainer) + } + taskToContainerMap[taskARN][containerID] = statsContainer + + return true +} + +// GetInstanceMetrics gets all task metrics and instance metadata from stats engine. +func (engine *DockerStatsEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) { + var taskMetrics []*ecstcs.TaskMetric + idle := engine.isIdle() + metricsMetadata := &ecstcs.MetricsMetadata{ + Cluster: aws.String(engine.cluster), + ContainerInstance: aws.String(engine.containerInstanceArn), + Idle: aws.Bool(idle), + MessageId: aws.String(uuid.NewRandom().String()), + } + + if idle { + seelog.Debug("Instance is idle. No task metrics to report") + fin := true + metricsMetadata.Fin = &fin + return metricsMetadata, taskMetrics, nil + } + + engine.lock.Lock() + defer engine.lock.Unlock() + + for taskArn := range engine.tasksToContainers { + containerMetrics, err := engine.taskContainerMetricsUnsafe(taskArn) + if err != nil { + seelog.Debugf("Error getting container metrics for task: %s, err: %v", taskArn, err) + continue + } + + if len(containerMetrics) == 0 { + seelog.Debugf("Empty containerMetrics for task, ignoring, task: %s", taskArn) + continue + } + + taskDef, exists := engine.tasksToDefinitions[taskArn] + if !exists { + seelog.Debugf("Could not map task to definition, task: %s", taskArn) + continue + } + + metricTaskArn := taskArn + taskMetric := &ecstcs.TaskMetric{ + TaskArn: &metricTaskArn, + TaskDefinitionFamily: &taskDef.family, + TaskDefinitionVersion: &taskDef.version, + ContainerMetrics: containerMetrics, + } + taskMetrics = append(taskMetrics, taskMetric) + } + + if len(taskMetrics) == 0 { + // Not idle. Expect taskMetrics to be there. + return nil, nil, EmptyMetricsError + } + + engine.resetStatsUnsafe() + return metricsMetadata, taskMetrics, nil +} + +// GetTaskHealthMetrics returns the container health metrics +func (engine *DockerStatsEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) { + var taskHealths []*ecstcs.TaskHealth + metadata := &ecstcs.HealthMetadata{ + Cluster: aws.String(engine.cluster), + ContainerInstance: aws.String(engine.containerInstanceArn), + MessageId: aws.String(uuid.NewRandom().String()), + } + + if !engine.containerHealthsToMonitor() { + return metadata, taskHealths, nil + } + + engine.lock.RLock() + defer engine.lock.RUnlock() + + for taskARN := range engine.tasksToHealthCheckContainers { + taskHealth := engine.getTaskHealthUnsafe(taskARN) + if taskHealth == nil { + continue + } + taskHealths = append(taskHealths, taskHealth) + } + + if len(taskHealths) == 0 { + return nil, nil, EmptyHealthMetricsError + } + + return metadata, taskHealths, nil +} + +func (engine *DockerStatsEngine) isIdle() bool { + engine.lock.RLock() + defer engine.lock.RUnlock() + + return len(engine.tasksToContainers) == 0 +} + +func (engine *DockerStatsEngine) containerHealthsToMonitor() bool { + engine.lock.RLock() + defer engine.lock.RUnlock() + + return len(engine.tasksToHealthCheckContainers) != 0 +} + +// stopTrackingContainerUnsafe removes the StatsContainer from stats engine and +// returns true if the container is stopped or no longer tracked in agent. Otherwise +// it does nothing and return false +func (engine *DockerStatsEngine) stopTrackingContainerUnsafe(container *StatsContainer, taskARN string) bool { + terminal, err := container.terminal() + if err != nil { + // Error determining if the container is terminal. This means that the container + // id could not be resolved to a container that is being tracked by the + // docker task engine. If the docker task engine has already removed + // the container from its state, there's no point in stats engine tracking the + // container. So, clean-up anyway. + seelog.Warnf("Error determining if the container %s is terminal, removing from stats, err: %v", container.containerMetadata.DockerID, err) + engine.doRemoveContainerUnsafe(container, taskARN) + return true + } + if terminal { + // Container is in known terminal state. Stop collection metrics. + seelog.Infof("Container %s is terminal, removing from stats", container.containerMetadata.DockerID) + engine.doRemoveContainerUnsafe(container, taskARN) + return true + } + + return false +} + +func (engine *DockerStatsEngine) getTaskHealthUnsafe(taskARN string) *ecstcs.TaskHealth { + // Acquire the task definition information + taskDefinition, ok := engine.tasksToDefinitions[taskARN] + if !ok { + seelog.Debugf("Could not map task to definitions, task: %s", taskARN) + return nil + } + // Check all the stats container for the task + containers, ok := engine.tasksToHealthCheckContainers[taskARN] + if !ok { + seelog.Debugf("Could not map task to health containers, task: %s", taskARN) + return nil + } + + // Aggregate container health information for all the containers in the task + var containerHealths []*ecstcs.ContainerHealth + for _, container := range containers { + // check if the container is stopped/untracked, and remove it from stats + //engine if needed + if engine.stopTrackingContainerUnsafe(container, taskARN) { + continue + } + dockerContainer, err := engine.resolver.ResolveContainer(container.containerMetadata.DockerID) + if err != nil { + seelog.Debugf("Could not resolve the Docker ID in agent state: %s", container.containerMetadata.DockerID) + continue + } + + // Check if the container has health check enabled + if !dockerContainer.Container.HealthStatusShouldBeReported() { + continue + } + + healthInfo := dockerContainer.Container.GetHealthStatus() + if healthInfo.Since == nil { + // container was started but the health status isn't ready + healthInfo.Since = aws.Time(time.Now()) + } + containerHealth := &ecstcs.ContainerHealth{ + ContainerName: aws.String(dockerContainer.Container.Name), + HealthStatus: aws.String(healthInfo.Status.BackendStatus()), + StatusSince: aws.Time(healthInfo.Since.UTC()), + } + containerHealths = append(containerHealths, containerHealth) + } + + if len(containerHealths) == 0 { + return nil + } + + taskHealth := &ecstcs.TaskHealth{ + Containers: containerHealths, + TaskArn: aws.String(taskARN), + TaskDefinitionFamily: aws.String(taskDefinition.family), + TaskDefinitionVersion: aws.String(taskDefinition.version), + } + + return taskHealth +} + +// handleDockerEvents must be called after openEventstream; it processes each +// event that it reads from the docker event stream. +func (engine *DockerStatsEngine) handleDockerEvents(events ...interface{}) error { + for _, event := range events { + dockerContainerChangeEvent, ok := event.(dockerapi.DockerContainerChangeEvent) + if !ok { + return fmt.Errorf("Unexpected event received, expected docker container change event") + } + + switch dockerContainerChangeEvent.Status { + case apicontainerstatus.ContainerRunning: + engine.addAndStartStatsContainer(dockerContainerChangeEvent.DockerID) + case apicontainerstatus.ContainerStopped: + engine.removeContainer(dockerContainerChangeEvent.DockerID) + default: + seelog.Debugf("Ignoring event for container, id: %s, status: %d", dockerContainerChangeEvent.DockerID, dockerContainerChangeEvent.Status) + } + } + + return nil +} + +// removeContainer deletes the container from the map of containers being watched. +// It also stops the periodic usage data collection for the container. +func (engine *DockerStatsEngine) removeContainer(dockerID string) { + engine.lock.Lock() + defer engine.lock.Unlock() + + // Make sure that this container belongs to a task. + task, err := engine.resolver.ResolveTask(dockerID) + if err != nil { + seelog.Debugf("Could not map container to task, ignoring, err: %v, id: %s", err, dockerID) + return + } + + _, taskExists := engine.tasksToContainers[task.Arn] + if !taskExists { + seelog.Debugf("Container not being watched, id: %s", dockerID) + return + } + + // task arn exists in map. + container, containerExists := engine.tasksToContainers[task.Arn][dockerID] + if !containerExists { + // container arn does not exist in map. + seelog.Debugf("Container not being watched, id: %s", dockerID) + return + } + + engine.doRemoveContainerUnsafe(container, task.Arn) +} + +// newDockerContainerMetadataResolver returns a new instance of DockerContainerMetadataResolver. +func newDockerContainerMetadataResolver(taskEngine ecsengine.TaskEngine) (*DockerContainerMetadataResolver, error) { + dockerTaskEngine, ok := taskEngine.(*ecsengine.DockerTaskEngine) + if !ok { + // Error type casting docker task engine. + return nil, fmt.Errorf("Could not load docker task engine") + } + + resolver := &DockerContainerMetadataResolver{ + dockerTaskEngine: dockerTaskEngine, + } + + return resolver, nil +} + +// taskContainerMetricsUnsafe gets all container metrics for a task arn. +func (engine *DockerStatsEngine) taskContainerMetricsUnsafe(taskArn string) ([]*ecstcs.ContainerMetric, error) { + containerMap, taskExists := engine.tasksToContainers[taskArn] + if !taskExists { + return nil, fmt.Errorf("task not found") + } + + var containerMetrics []*ecstcs.ContainerMetric + for _, container := range containerMap { + dockerID := container.containerMetadata.DockerID + // Check if the container is terminal. If it is, make sure that it is + // cleaned up properly. We might sometimes miss events from docker task + // engine and this helps in reconciling the state. The tcs client's + // GetInstanceMetrics probe is used as the trigger for this. + if engine.stopTrackingContainerUnsafe(container, taskArn) { + continue + } + + // CPU and Memory are both critical, so skip the container if either of these fail. + cpuStatsSet, err := container.statsQueue.GetCPUStatsSet() + if err != nil { + seelog.Infof("cloudwatch metrics for container %v not collected, reason (cpu): %v", dockerID, err) + continue + } + memoryStatsSet, err := container.statsQueue.GetMemoryStatsSet() + if err != nil { + seelog.Infof("cloudwatch metrics for container %v not collected, reason (memory): %v", dockerID, err) + continue + } + + containerMetric := &ecstcs.ContainerMetric{ + ContainerName: &container.containerMetadata.Name, + CpuStatsSet: cpuStatsSet, + MemoryStatsSet: memoryStatsSet, + } + + storageStatsSet, err := container.statsQueue.GetStorageStatsSet() + if err != nil { + seelog.Warnf("Error getting storage stats, err: %v, container: %v", err, dockerID) + } else { + containerMetric.StorageStatsSet = storageStatsSet + } + + task, err := engine.resolver.ResolveTask(dockerID) + if err != nil { + seelog.Warnf("Task not found for container ID: %s", dockerID) + } else { + // send network stats for default/bridge/nat/awsvpc network modes + if !task.IsNetworkModeAWSVPC() && container.containerMetadata.NetworkMode != hostNetworkMode && + container.containerMetadata.NetworkMode != noneNetworkMode { + networkStatsSet, err := container.statsQueue.GetNetworkStatsSet() + if err != nil { + // we log the error and still continue to publish cpu, memory stats + seelog.Warnf("Error getting network stats: %v, container: %v", err, dockerID) + } else { + containerMetric.NetworkStatsSet = networkStatsSet + } + } else if task.IsNetworkModeAWSVPC() { + taskStatsMap, taskExistsInTaskStats := engine.taskToTaskStats[taskArn] + if !taskExistsInTaskStats { + return nil, fmt.Errorf("task not found") + } + if dockerContainer, err := engine.resolver.ResolveContainer(dockerID); err != nil { + seelog.Debugf("Could not map container ID to container, container: %s, err: %s", dockerID, err) + } else { + // do not add network stats for pause container + if dockerContainer.Container.Type != apicontainer.ContainerCNIPause { + networkStats, err := taskStatsMap.StatsQueue.GetNetworkStatsSet() + if err != nil { + seelog.Warnf("error getting network stats: %v, task: %v", err, taskArn) + } else { + containerMetric.NetworkStatsSet = networkStats + } + } + } + } + } + containerMetrics = append(containerMetrics, containerMetric) + } + return containerMetrics, nil +} + +func (engine *DockerStatsEngine) doRemoveContainerUnsafe(container *StatsContainer, taskArn string) { + container.StopStatsCollection() + dockerID := container.containerMetadata.DockerID + delete(engine.tasksToContainers[taskArn], dockerID) + seelog.Debugf("Deleted container from tasks, id: %s", dockerID) + + if len(engine.tasksToContainers[taskArn]) == 0 { + // No containers in task, delete task arn from map. + delete(engine.tasksToContainers, taskArn) + // No need to verify if the key exists in tasksToDefinitions. + // Delete will do nothing if the specified key doesn't exist. + delete(engine.tasksToDefinitions, taskArn) + seelog.Debugf("Deleted task from tasks, arn: %s", taskArn) + } + + // Remove the container from health container watch list + if _, ok := engine.tasksToHealthCheckContainers[taskArn][dockerID]; !ok { + return + } + + delete(engine.tasksToHealthCheckContainers[taskArn], dockerID) + if len(engine.tasksToHealthCheckContainers[taskArn]) == 0 { + delete(engine.tasksToHealthCheckContainers, taskArn) + seelog.Debugf("Deleted task from container health watch list, arn: %s", taskArn) + } +} + +// resetStatsUnsafe resets stats for all watched containers. +func (engine *DockerStatsEngine) resetStatsUnsafe() { + for _, containerMap := range engine.tasksToContainers { + for _, container := range containerMap { + container.statsQueue.Reset() + } + } +} + +// ContainerDockerStats returns the last stored raw docker stats object for a container +func (engine *DockerStatsEngine) ContainerDockerStats(taskARN string, containerID string) (*types.StatsJSON, *NetworkStatsPerSec, error) { + engine.lock.RLock() + defer engine.lock.RUnlock() + + containerIDToStatsContainer, ok := engine.tasksToContainers[taskARN] + taskToTaskStats := engine.taskToTaskStats + if !ok { + return nil, nil, errors.Errorf("stats engine: task '%s' for container '%s' not found", + taskARN, containerID) + } + + container, ok := containerIDToStatsContainer[containerID] + if !ok { + return nil, nil, errors.Errorf("stats engine: container not found: %s", containerID) + } + containerStats := container.statsQueue.GetLastStat() + containerNetworkRateStats := container.statsQueue.GetLastNetworkStatPerSec() + + // Insert network stats in container stats + task, err := engine.resolver.ResolveTaskByARN(taskARN) + if err != nil { + return nil, nil, errors.Errorf("stats engine: task '%s' not found", + taskARN) + } + + if task.IsNetworkModeAWSVPC() { + taskStats, ok := taskToTaskStats[taskARN] + if ok { + if taskStats.StatsQueue.GetLastStat() != nil { + containerStats.Networks = taskStats.StatsQueue.GetLastStat().Networks + } + containerNetworkRateStats = taskStats.StatsQueue.GetLastNetworkStatPerSec() + } else { + seelog.Warnf("Network stats not found for container %s", containerID) + } + } + + return containerStats, containerNetworkRateStats, nil +} diff --git a/agent/stats/engine_integ_test.go b/agent/stats/engine_integ_test.go new file mode 100644 index 00000000000..133f16b820f --- /dev/null +++ b/agent/stats/engine_integ_test.go @@ -0,0 +1,737 @@ +//+build integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + ecsengine "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" + + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var dockerClient dockerapi.DockerClient + +func init() { + dockerClient, _ = dockerapi.NewDockerGoClient(sdkClientFactory, &cfg, ctx) +} + +func createRunningTask() *apitask.Task { + return &apitask.Task{ + Arn: taskArn, + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: taskDefinitionFamily, + Version: taskDefinitionVersion, + Containers: []*apicontainer.Container{ + { + Name: containerName, + }, + }, + } +} + +func TestStatsEngineWithExistingContainersWithoutHealth(t *testing.T) { + // Create a new docker stats engine + engine := NewDockerStatsEngine(&cfg, dockerClient, eventStream("TestStatsEngineWithExistingContainersWithoutHealth")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + // Create a container to get the container id. + container, err := createGremlin(client, "default") + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, container.ID, &timeout) + + containerChangeEventStream := eventStream("TestStatsEngineWithExistingContainersWithoutHealth") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + testTask := createRunningTask() + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "gremlin", + Container: testTask.Containers[0], + }, + testTask) + + // Simulate container start prior to listener initialization. + time.Sleep(checkPointSleep) + err = engine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + require.NoError(t, err, "initializing stats engine failed") + defer engine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + // Wait for the stats collection go routine to start. + time.Sleep(checkPointSleep) + validateInstanceMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) + + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + time.Sleep(waitForCleanupSleep) + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) +} + +func TestStatsEngineWithNewContainersWithoutHealth(t *testing.T) { + // Create a new docker stats engine + engine := NewDockerStatsEngine(&cfg, dockerClient, eventStream("TestStatsEngineWithNewContainers")) + defer engine.removeAll() + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + container, err := createGremlin(client, "default") + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + + containerChangeEventStream := eventStream("TestStatsEngineWithNewContainers") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + testTask := createRunningTask() + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "gremlin", + Container: testTask.Containers[0], + }, + testTask) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err = engine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + require.NoError(t, err, "initializing stats engine failed") + defer engine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, container.ID, &timeout) + + // Write the container change event to event stream + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + // Wait for the stats collection go routine to start. + time.Sleep(checkPointSleep) + validateInstanceMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) + + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + // Write the container change event to event stream + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + time.Sleep(waitForCleanupSleep) + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) +} + +func TestStatsEngineWithExistingContainers(t *testing.T) { + // Create a new docker stats engine + engine := NewDockerStatsEngine(&cfg, dockerClient, eventStream("TestStatsEngineWithExistingContainers")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Create a container to get the container id. + container, err := createHealthContainer(client) + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, container.ID, &timeout) + + containerChangeEventStream := eventStream("TestStatsEngineWithExistingContainers") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + testTask := createRunningTask() + // enable container health check for this container + testTask.Containers[0].HealthCheckType = "docker" + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "container-health", + Container: testTask.Containers[0], + }, + testTask) + + // Simulate container start prior to listener initialization. + time.Sleep(checkPointSleep) + err = engine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + assert.NoError(t, err, "initializing stats engine failed") + defer engine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + // Wait for the stats collection go routine to start. + time.Sleep(checkPointSleep) + + // Verify the metrics of the container + validateInstanceMetrics(t, engine) + + // Verify the health metrics of container + validateTaskHealthMetrics(t, engine) + + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "write to container change event stream failed") + + time.Sleep(waitForCleanupSleep) + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) +} + +func TestStatsEngineWithNewContainers(t *testing.T) { + // Create a new docker stats engine + engine := NewDockerStatsEngine(&cfg, dockerClient, eventStream("TestStatsEngineWithNewContainers")) + defer engine.removeAll() + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + container, err := createHealthContainer(client) + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + + containerChangeEventStream := eventStream("TestStatsEngineWithNewContainers") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + + testTask := createRunningTask() + // enable health check of the container + testTask.Containers[0].HealthCheckType = "docker" + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "container-health", + Container: testTask.Containers[0], + }, + testTask) + + err = engine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + require.NoError(t, err, "initializing stats engine failed") + defer engine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, container.ID, &timeout) + + // Write the container change event to event stream + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + // Wait for the stats collection go routine to start. + time.Sleep(checkPointSleep) + validateInstanceMetrics(t, engine) + // Verify the health metrics of container + validateTaskHealthMetrics(t, engine) + + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + + // Write the container change event to event stream + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + time.Sleep(waitForCleanupSleep) + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) +} + +func TestStatsEngineWithNewContainersWithPolling(t *testing.T) { + // additional config fields to use polling instead of stream + cfg.PollMetrics = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + cfg.PollingMetricsWaitDuration = 1 * time.Second + // Create a new docker client with new config + dockerClientForNewContainersWithPolling, _ := dockerapi.NewDockerGoClient(sdkClientFactory, &cfg, ctx) + // Create a new docker stats engine + engine := NewDockerStatsEngine(&cfg, dockerClientForNewContainersWithPolling, eventStream("TestStatsEngineWithNewContainers")) + defer engine.removeAll() + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + container, err := createHealthContainer(client) + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + + containerChangeEventStream := eventStream("TestStatsEngineWithNewContainers") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + + testTask := createRunningTask() + // enable health check of the container + testTask.Containers[0].HealthCheckType = "docker" + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "container-health", + Container: testTask.Containers[0], + }, + testTask) + + err = engine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + require.NoError(t, err, "initializing stats engine failed") + defer engine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, container.ID, &timeout) + + // Write the container change event to event stream + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + // Wait for the stats collection go routine to start. + time.Sleep(10 * time.Second) + validateInstanceMetrics(t, engine) + // Verify the health metrics of container + validateTaskHealthMetrics(t, engine) + + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + + // Write the container change event to event stream + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + time.Sleep(waitForCleanupSleep) + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) + + // reset cfg, currently cfg is shared by all tests. + cfg.PollMetrics = config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled} + cfg.PollingMetricsWaitDuration = config.DefaultPollingMetricsWaitDuration +} + +func TestStatsEngineWithDockerTaskEngine(t *testing.T) { + containerChangeEventStream := eventStream("TestStatsEngineWithDockerTaskEngine") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + container, err := createHealthContainer(client) + require.NoError(t, err, "creating container failed") + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + unmappedContainer, err := createHealthContainer(client) + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, unmappedContainer.ID, types.ContainerRemoveOptions{Force: true}) + testTask := createRunningTask() + // enable the health check of the container + testTask.Containers[0].HealthCheckType = "docker" + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "container-health", + Container: testTask.Containers[0], + }, + testTask) + + // Create a new docker stats engine + statsEngine := NewDockerStatsEngine(&cfg, dockerClient, containerChangeEventStream) + err = statsEngine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + require.NoError(t, err, "initializing stats engine failed") + defer statsEngine.removeAll() + defer statsEngine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, container.ID, &timeout) + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, unmappedContainer.ID, &timeout) + + err = containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + err = containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: unmappedContainer.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + // Wait for the stats collection go routine to start. + time.Sleep(checkPointSleep) + validateInstanceMetrics(t, statsEngine) + validateTaskHealthMetrics(t, statsEngine) + + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + + err = containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + time.Sleep(waitForCleanupSleep) + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, statsEngine) + validateEmptyTaskHealthMetrics(t, statsEngine) +} + +func TestStatsEngineWithDockerTaskEngineMissingRemoveEvent(t *testing.T) { + containerChangeEventStream := eventStream("TestStatsEngineWithDockerTaskEngineMissingRemoveEvent") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + container, err := createHealthContainer(client) + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + testTask := createRunningTask() + // enable container health check of this container + testTask.Containers[0].HealthCheckType = "docker" + testTask.Containers[0].KnownStatusUnsafe = apicontainerstatus.ContainerStopped + + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "container-health", + Container: testTask.Containers[0], + }, + testTask) + + // Create a new docker stats engine + statsEngine := NewDockerStatsEngine(&cfg, dockerClient, containerChangeEventStream) + err = statsEngine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + require.NoError(t, err, "initializing stats engine failed") + defer statsEngine.removeAll() + defer statsEngine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + + err = containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerRunning, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + // Wait for the stats collection go routine to start. + time.Sleep(checkPointSleep) + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + err = client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + require.NoError(t, err, "removing container failed") + + time.Sleep(checkPointSleep) + + // Simulate tcs client invoking GetInstanceMetrics. + _, _, err = statsEngine.GetInstanceMetrics() + assert.Error(t, err, "expect error 'no task metrics tp report' when getting instance metrics") + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, statsEngine) + validateEmptyTaskHealthMetrics(t, statsEngine) +} + +func TestStatsEngineWithNetworkStatsDefaultMode(t *testing.T) { + testNetworkModeStats(t, "default", false) +} + +func testNetworkModeStats(t *testing.T, networkMode string, statsEmpty bool) { + // Create a new docker stats engine + engine := NewDockerStatsEngine(&cfg, dockerClient, eventStream("TestStatsEngineWithNetworkStats")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + // Create a container to get the container id. + container, err := createGremlin(client, networkMode) + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, container.ID, &timeout) + + containerChangeEventStream := eventStream("TestStatsEngineWithNetworkStats") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + testTask := createRunningTask() + + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "gremlin", + Container: testTask.Containers[0], + }, + testTask) + + // Inspect the container and populate the container's network mode + // This is done as part of Task Engine + // https://github.com/aws/amazon-ecs-agent/blob/d2456beb048d36bfe18159ad7f35ca6b78bb9ee9/agent/engine/docker_task_engine.go#L364 + dockerContainer, err := client.ContainerInspect(ctx, container.ID) + require.NoError(t, err, "inspecting container failed") + netMode := string(dockerContainer.HostConfig.NetworkMode) + testTask.Containers[0].SetNetworkMode(netMode) + + // Simulate container start prior to listener initialization. + time.Sleep(checkPointSleep) + err = engine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + require.NoError(t, err, "initializing stats engine failed") + defer engine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + // Wait for the stats collection go routine to start. + time.Sleep(checkPointSleep) + _, taskMetrics, err := engine.GetInstanceMetrics() + assert.NoError(t, err, "getting instance metrics failed") + taskMetric := taskMetrics[0] + for _, containerMetric := range taskMetric.ContainerMetrics { + if statsEmpty { + assert.Nil(t, containerMetric.NetworkStatsSet, "network stats should be empty for %s network mode", networkMode) + } else { + assert.NotNil(t, containerMetric.NetworkStatsSet, "network stats should be non-empty for %s network mode", networkMode) + } + } + + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + time.Sleep(waitForCleanupSleep) + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) +} + +func TestStorageStats(t *testing.T) { + // Create a new docker stats engine + engine := NewDockerStatsEngine(&cfg, dockerClient, eventStream("TestStatsEngineWithStorageStats")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // Assign ContainerStop timeout to addressable variable + timeout := defaultDockerTimeoutSeconds + + // Create a container to get the container id. + container, err := createGremlin(client, "default") + require.NoError(t, err, "creating container failed") + defer client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) + + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + + err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(t, err, "starting container failed") + defer client.ContainerStop(ctx, container.ID, &timeout) + + containerChangeEventStream := eventStream("TestStatsEngineWithStorageStats") + taskEngine := ecsengine.NewTaskEngine(&config.Config{}, nil, nil, containerChangeEventStream, + nil, dockerstate.NewTaskEngineState(), nil, nil, nil) + testTask := createRunningTask() + + // Populate Tasks and Container map in the engine. + dockerTaskEngine := taskEngine.(*ecsengine.DockerTaskEngine) + dockerTaskEngine.State().AddTask(testTask) + dockerTaskEngine.State().AddContainer( + &apicontainer.DockerContainer{ + DockerID: container.ID, + DockerName: "gremlin", + Container: testTask.Containers[0], + }, + testTask) + + // Inspect the container and populate the container's network mode + dockerContainer, err := client.ContainerInspect(ctx, container.ID) + require.NoError(t, err, "inspecting container failed") + // Using default network mode + netMode := string(dockerContainer.HostConfig.NetworkMode) + testTask.Containers[0].SetNetworkMode(netMode) + + // Simulate container start prior to listener initialization. + time.Sleep(checkPointSleep) + err = engine.MustInit(ctx, taskEngine, defaultCluster, defaultContainerInstance) + require.NoError(t, err, "initializing stats engine failed") + defer engine.containerChangeEventStream.Unsubscribe(containerChangeHandler) + + // Wait for the stats collection go routine to start. + time.Sleep(checkPointSleep) + _, taskMetrics, err := engine.GetInstanceMetrics() + assert.NoError(t, err, "getting instance metrics failed") + taskMetric := taskMetrics[0] + for _, containerMetric := range taskMetric.ContainerMetrics { + assert.NotNil(t, containerMetric.StorageStatsSet, "storage stats should be non-empty") + } + + err = client.ContainerStop(ctx, container.ID, &timeout) + require.NoError(t, err, "stopping container failed") + + err = engine.containerChangeEventStream.WriteToEventStream(dockerapi.DockerContainerChangeEvent{ + Status: apicontainerstatus.ContainerStopped, + DockerContainerMetadata: dockerapi.DockerContainerMetadata{ + DockerID: container.ID, + }, + }) + assert.NoError(t, err, "failed to write to container change event stream") + time.Sleep(waitForCleanupSleep) + + // Should not contain any metrics after cleanup. + validateIdleContainerMetrics(t, engine) + validateEmptyTaskHealthMetrics(t, engine) +} diff --git a/agent/stats/engine_test.go b/agent/stats/engine_test.go new file mode 100644 index 00000000000..352655df97c --- /dev/null +++ b/agent/stats/engine_test.go @@ -0,0 +1,526 @@ +//+build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "fmt" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + mock_resolver "github.com/aws/amazon-ecs-agent/agent/stats/resolver/mock" + + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStatsEngineAddRemoveContainers(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + resolver := mock_resolver.NewMockContainerMetadataResolver(ctrl) + mockDockerClient := mock_dockerapi.NewMockDockerClient(ctrl) + t1 := &apitask.Task{Arn: "t1", Family: "f1"} + t2 := &apitask.Task{Arn: "t2", Family: "f2"} + t3 := &apitask.Task{Arn: "t3"} + name := "testContainer" + networkMode := "bridge" + resolver.EXPECT().ResolveTask("c1").AnyTimes().Return(t1, nil) + resolver.EXPECT().ResolveTask("c2").AnyTimes().Return(t1, nil) + resolver.EXPECT().ResolveTask("c3").AnyTimes().Return(t2, nil) + resolver.EXPECT().ResolveTask("c4").AnyTimes().Return(nil, fmt.Errorf("unmapped container")) + resolver.EXPECT().ResolveTask("c5").AnyTimes().Return(t2, nil) + resolver.EXPECT().ResolveTask("c6").AnyTimes().Return(t3, nil) + resolver.EXPECT().ResolveContainer(gomock.Any()).AnyTimes().Return(&apicontainer.DockerContainer{ + Container: &apicontainer.Container{ + Name: name, + NetworkModeUnsafe: networkMode, + }, + }, nil) + mockStatsChannel := make(chan *types.StatsJSON) + defer close(mockStatsChannel) + mockDockerClient.EXPECT().Stats(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockStatsChannel, nil).AnyTimes() + + engine := NewDockerStatsEngine(&cfg, nil, eventStream("TestStatsEngineAddRemoveContainers")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + engine.resolver = resolver + engine.client = mockDockerClient + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + defer engine.removeAll() + + engine.addAndStartStatsContainer("c1") + engine.addAndStartStatsContainer("c1") + + if len(engine.tasksToContainers) != 1 { + t.Errorf("Adding containers failed. Expected num tasks = 1, got: %d", len(engine.tasksToContainers)) + } + + containers, _ := engine.tasksToContainers["t1"] + if len(containers) != 1 { + t.Error("Adding duplicate containers failed.") + } + _, exists := containers["c1"] + if !exists { + t.Error("Container c1 not found in engine") + } + + engine.addAndStartStatsContainer("c2") + containers, _ = engine.tasksToContainers["t1"] + _, exists = containers["c2"] + if !exists { + t.Error("Container c2 not found in engine") + } + + for _, statsContainer := range containers { + assert.Equal(t, name, statsContainer.containerMetadata.Name) + assert.Equal(t, networkMode, statsContainer.containerMetadata.NetworkMode) + for _, fakeContainerStats := range createFakeContainerStats() { + statsContainer.statsQueue.add(fakeContainerStats) + } + } + + // Ensure task shows up in metrics. + containerMetrics, err := engine.taskContainerMetricsUnsafe("t1") + if err != nil { + t.Errorf("Error getting container metrics: %v", err) + } + err = validateContainerMetrics(containerMetrics, 2) + if err != nil { + t.Errorf("Error validating container metrics: %v", err) + } + + metadata, taskMetrics, err := engine.GetInstanceMetrics() + if err != nil { + t.Errorf("Error gettting instance metrics: %v", err) + } + + err = validateMetricsMetadata(metadata) + require.NoError(t, err) + require.Len(t, taskMetrics, 1, "Incorrect number of tasks.") + err = validateContainerMetrics(taskMetrics[0].ContainerMetrics, 2) + require.NoError(t, err) + require.Equal(t, "t1", *taskMetrics[0].TaskArn) + + // Ensure that only valid task shows up in metrics. + _, err = engine.taskContainerMetricsUnsafe("t2") + if err == nil { + t.Error("Expected non-empty error for non existent task") + } + + engine.removeContainer("c1") + containers, _ = engine.tasksToContainers["t1"] + _, exists = containers["c1"] + if exists { + t.Error("Container c1 not removed from engine") + } + engine.removeContainer("c2") + containers, _ = engine.tasksToContainers["t1"] + _, exists = containers["c2"] + if exists { + t.Error("Container c2 not removed from engine") + } + engine.addAndStartStatsContainer("c3") + containers, _ = engine.tasksToContainers["t2"] + _, exists = containers["c3"] + if !exists { + t.Error("Container c3 not found in engine") + } + + _, _, err = engine.GetInstanceMetrics() + if err == nil { + t.Error("Expected non-empty error for empty stats.") + } + engine.removeContainer("c3") + + // Should get an error while adding this container due to unmapped + // container to task. + engine.addAndStartStatsContainer("c4") + validateIdleContainerMetrics(t, engine) + + // Should get an error while adding this container due to unmapped + // task arn to task definition family. + engine.addAndStartStatsContainer("c6") + validateIdleContainerMetrics(t, engine) +} + +func TestStatsEngineMetadataInStatsSets(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + resolver := mock_resolver.NewMockContainerMetadataResolver(mockCtrl) + mockDockerClient := mock_dockerapi.NewMockDockerClient(mockCtrl) + t1 := &apitask.Task{Arn: "t1", Family: "f1"} + resolver.EXPECT().ResolveTask("c1").AnyTimes().Return(t1, nil) + resolver.EXPECT().ResolveContainer(gomock.Any()).AnyTimes().Return(&apicontainer.DockerContainer{ + Container: &apicontainer.Container{ + Name: "test", + }, + }, nil) + mockDockerClient.EXPECT().Stats(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + resolver.EXPECT().ResolveTaskByARN(gomock.Any()).Return(t1, nil).AnyTimes() + + engine := NewDockerStatsEngine(&cfg, nil, eventStream("TestStatsEngineMetadataInStatsSets")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + engine.resolver = resolver + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + engine.client = mockDockerClient + engine.addAndStartStatsContainer("c1") + ts1 := parseNanoTime("2015-02-12T21:22:05.131117533Z") + ts2 := parseNanoTime("2015-02-12T21:22:05.232291187Z") + containerStats := createFakeContainerStats() + dockerStats := []*types.StatsJSON{{}, {}} + dockerStats[0].Read = ts1 + dockerStats[1].Read = ts2 + containers, _ := engine.tasksToContainers["t1"] + for _, statsContainer := range containers { + for i := 0; i < 2; i++ { + statsContainer.statsQueue.add(containerStats[i]) + statsContainer.statsQueue.setLastStat(dockerStats[i]) + } + } + metadata, taskMetrics, err := engine.GetInstanceMetrics() + if err != nil { + t.Errorf("Error gettting instance metrics: %v", err) + } + if len(taskMetrics) != 1 { + t.Fatalf("Incorrect number of tasks. Expected: 1, got: %d", len(taskMetrics)) + } + err = validateContainerMetrics(taskMetrics[0].ContainerMetrics, 1) + if err != nil { + t.Errorf("Error validating container metrics: %v", err) + } + if *taskMetrics[0].TaskArn != "t1" { + t.Errorf("Incorrect task arn. Expected: t1, got: %s", *taskMetrics[0].TaskArn) + } + err = validateMetricsMetadata(metadata) + if err != nil { + t.Errorf("Error validating metadata: %v", err) + } + + dockerStat, _, err := engine.ContainerDockerStats("t1", "c1") + assert.NoError(t, err) + assert.Equal(t, ts2, dockerStat.Read) + + engine.removeContainer("c1") + validateIdleContainerMetrics(t, engine) +} + +func TestStatsEngineInvalidTaskEngine(t *testing.T) { + statsEngine := NewDockerStatsEngine(&cfg, nil, eventStream("TestStatsEngineInvalidTaskEngine")) + taskEngine := &MockTaskEngine{} + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err := statsEngine.MustInit(ctx, taskEngine, "", "") + if err == nil { + t.Error("Expected error in engine initialization, got nil") + } +} + +func TestStatsEngineUninitialized(t *testing.T) { + engine := NewDockerStatsEngine(&cfg, nil, eventStream("TestStatsEngineUninitialized")) + defer engine.removeAll() + + engine.resolver = &DockerContainerMetadataResolver{} + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + engine.addAndStartStatsContainer("c1") + validateIdleContainerMetrics(t, engine) +} + +func TestStatsEngineTerminalTask(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + resolver := mock_resolver.NewMockContainerMetadataResolver(mockCtrl) + resolver.EXPECT().ResolveTask("c1").Return(&apitask.Task{ + Arn: "t1", + KnownStatusUnsafe: apitaskstatus.TaskStopped, + Family: "f1", + }, nil) + engine := NewDockerStatsEngine(&cfg, nil, eventStream("TestStatsEngineTerminalTask")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + defer engine.removeAll() + + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + engine.resolver = resolver + + engine.addAndStartStatsContainer("c1") + validateIdleContainerMetrics(t, engine) +} + +func TestGetTaskHealthMetrics(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + containerID := "containerID" + resolver := mock_resolver.NewMockContainerMetadataResolver(mockCtrl) + resolver.EXPECT().ResolveContainer(containerID).Return(&apicontainer.DockerContainer{ + DockerID: containerID, + Container: &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + HealthCheckType: "docker", + Health: apicontainer.HealthStatus{ + Status: apicontainerstatus.ContainerHealthy, + Since: aws.Time(time.Now()), + }, + }, + }, nil).Times(3) + + engine := NewDockerStatsEngine(&cfg, nil, eventStream("TestGetTaskHealthMetrics")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + engine.containerInstanceArn = "container_instance" + + containerToStats := make(map[string]*StatsContainer) + var err error + containerToStats[containerID], err = newStatsContainer(containerID, nil, resolver, nil) + assert.NoError(t, err) + engine.tasksToHealthCheckContainers["t1"] = containerToStats + engine.tasksToDefinitions["t1"] = &taskDefinition{ + family: "f1", + version: "1", + } + + engine.resolver = resolver + metadata, taskHealth, err := engine.GetTaskHealthMetrics() + assert.NoError(t, err) + + assert.Equal(t, aws.StringValue(metadata.ContainerInstance), "container_instance") + assert.Len(t, taskHealth, 1) + assert.Len(t, taskHealth[0].Containers, 1) + assert.Equal(t, aws.StringValue(taskHealth[0].Containers[0].HealthStatus), "HEALTHY") +} + +func TestGetTaskHealthMetricsStoppedContainer(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + containerID := "containerID" + resolver := mock_resolver.NewMockContainerMetadataResolver(mockCtrl) + resolver.EXPECT().ResolveContainer(containerID).Return(&apicontainer.DockerContainer{ + DockerID: containerID, + Container: &apicontainer.Container{ + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + HealthCheckType: "docker", + Health: apicontainer.HealthStatus{ + Status: apicontainerstatus.ContainerHealthy, + Since: aws.Time(time.Now()), + }, + }, + }, nil).Times(2) + + engine := NewDockerStatsEngine(&cfg, nil, eventStream("TestGetTaskHealthMetrics")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + engine.containerInstanceArn = "container_instance" + + containerToStats := make(map[string]*StatsContainer) + var err error + containerToStats[containerID], err = newStatsContainer(containerID, nil, resolver, nil) + assert.NoError(t, err) + engine.tasksToHealthCheckContainers["t1"] = containerToStats + engine.tasksToDefinitions["t1"] = &taskDefinition{ + family: "f1", + version: "1", + } + + engine.resolver = resolver + _, _, err = engine.GetTaskHealthMetrics() + assert.Error(t, err, "empty metrics should cause an error") +} + +// TestMetricsDisabled tests container won't call docker api to collect stats +// but will track container health when metrics is disabled in agent. +func TestMetricsDisabled(t *testing.T) { + disableMetricsConfig := cfg + disableMetricsConfig.DisableMetrics = config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled} + + containerID := "containerID" + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + resolver := mock_resolver.NewMockContainerMetadataResolver(mockCtrl) + resolver.EXPECT().ResolveTask(containerID).Return(&apitask.Task{ + Arn: "t1", + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: "f1", + }, nil) + resolver.EXPECT().ResolveContainer(containerID).Return(&apicontainer.DockerContainer{ + DockerID: containerID, + Container: &apicontainer.Container{ + HealthCheckType: "docker", + }, + }, nil).Times(2) + + engine := NewDockerStatsEngine(&disableMetricsConfig, nil, eventStream("TestMetricsDisabled")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + engine.resolver = resolver + engine.addAndStartStatsContainer(containerID) + + assert.Len(t, engine.tasksToContainers, 0, "No containers should be tracked if metrics is disabled") + assert.Len(t, engine.tasksToHealthCheckContainers, 1) +} + +func TestSynchronizeOnRestart(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + containerID := "containerID" + statsChan := make(chan *types.StatsJSON) + statsStarted := make(chan struct{}) + client := mock_dockerapi.NewMockDockerClient(ctrl) + resolver := mock_resolver.NewMockContainerMetadataResolver(ctrl) + + engine := NewDockerStatsEngine(&cfg, client, eventStream("TestSynchronizeOnRestart")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + engine.resolver = resolver + + client.EXPECT().ListContainers(gomock.Any(), false, gomock.Any()).Return(dockerapi.ListContainersResponse{ + DockerIDs: []string{containerID}, + }) + client.EXPECT().Stats(gomock.Any(), containerID, gomock.Any()).Do(func(ctx context.Context, id string, inactivityTimeout time.Duration) { + statsStarted <- struct{}{} + }).Return(statsChan, nil) + + testTask := &apitask.Task{ + Arn: "t1", + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Family: "f1", + } + + resolver.EXPECT().ResolveTask(containerID).Return(testTask, nil).Times(2) + resolver.EXPECT().ResolveTaskByARN(gomock.Any()).Return(testTask, nil).AnyTimes() + resolver.EXPECT().ResolveContainer(containerID).Return(&apicontainer.DockerContainer{ + DockerID: containerID, + Container: &apicontainer.Container{ + HealthCheckType: "docker", + }, + }, nil).Times(3) + err := engine.synchronizeState() + assert.NoError(t, err) + + assert.Len(t, engine.tasksToContainers, 1) + assert.Len(t, engine.tasksToHealthCheckContainers, 1) + + statsContainer := engine.tasksToContainers["t1"][containerID] + assert.NotNil(t, statsContainer) + <-statsStarted + statsContainer.StopStatsCollection() +} + +func TestTaskNetworkStatsSet(t *testing.T) { + var networkModes = []struct { + ENIs []*apieni.ENI + NetworkMode string + StatsEmpty bool + }{ + {nil, "default", false}, + } + for _, tc := range networkModes { + testNetworkModeStats(t, tc.NetworkMode, tc.ENIs, tc.StatsEmpty) + } +} + +func testNetworkModeStats(t *testing.T, netMode string, enis []*apieni.ENI, emptyStats bool) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + resolver := mock_resolver.NewMockContainerMetadataResolver(mockCtrl) + mockDockerClient := mock_dockerapi.NewMockDockerClient(mockCtrl) + + testContainer := &apicontainer.DockerContainer{ + Container: &apicontainer.Container{ + Name: "test", + NetworkModeUnsafe: netMode, + Type: apicontainer.ContainerCNIPause, + }, + } + + t1 := &apitask.Task{ + Arn: "t1", + Family: "f1", + ENIs: enis, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{ + {Name: "test"}, + {Name: "test1"}, + }, + } + + resolver.EXPECT().ResolveTask("c1").AnyTimes().Return(t1, nil) + resolver.EXPECT().ResolveTaskByARN(gomock.Any()).Return(t1, nil).AnyTimes() + + resolver.EXPECT().ResolveContainer(gomock.Any()).AnyTimes().Return(testContainer, nil) + mockDockerClient.EXPECT().Stats(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + + mockDockerClient.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "test", + State: &types.ContainerState{Pid: 23}, + }, + }, nil).AnyTimes() + engine := NewDockerStatsEngine(&cfg, nil, eventStream("TestTaskNetworkStatsSet")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + engine.resolver = resolver + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + engine.client = mockDockerClient + engine.addAndStartStatsContainer("c1") + ts1 := parseNanoTime("2015-02-12T21:22:05.131117533Z") + containerStats := createFakeContainerStats() + dockerStats := []*types.StatsJSON{{}, {}} + dockerStats[0].Read = ts1 + containers, _ := engine.tasksToContainers["t1"] + for _, statsContainer := range containers { + for i := 0; i < 2; i++ { + statsContainer.statsQueue.add(containerStats[i]) + statsContainer.statsQueue.setLastStat(dockerStats[i]) + } + } + _, taskMetrics, err := engine.GetInstanceMetrics() + assert.NoError(t, err) + assert.Len(t, taskMetrics, 1) + for _, containerMetric := range taskMetrics[0].ContainerMetrics { + if emptyStats { + assert.Nil(t, containerMetric.NetworkStatsSet, "network stats should be empty") + } else { + assert.NotNil(t, containerMetric.NetworkStatsSet, "network stats should be non-empty") + } + } +} diff --git a/agent/stats/engine_unix_integ_test.go b/agent/stats/engine_unix_integ_test.go new file mode 100644 index 00000000000..547bc3b21aa --- /dev/null +++ b/agent/stats/engine_unix_integ_test.go @@ -0,0 +1,34 @@ +// +build !windows,integration + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "testing" +) + +func TestStatsEngineWithNetworkStatsDifferentModes(t *testing.T) { + var networkModes = []struct { + NetworkMode string + StatsEmpty bool + }{ + {"bridge", false}, + {"host", true}, + {"none", true}, + } + for _, tc := range networkModes { + testNetworkModeStats(t, tc.NetworkMode, tc.StatsEmpty) + } +} diff --git a/agent/stats/engine_unix_test.go b/agent/stats/engine_unix_test.go new file mode 100644 index 00000000000..6d28ab7e85e --- /dev/null +++ b/agent/stats/engine_unix_test.go @@ -0,0 +1,126 @@ +//+build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "testing" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + mock_resolver "github.com/aws/amazon-ecs-agent/agent/stats/resolver/mock" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestLinuxTaskNetworkStatsSet(t *testing.T) { + var networkModes = []struct { + ENIs []*apieni.ENI + NetworkMode string + StatsEmpty bool + }{ + {[]*apieni.ENI{{ID: "ec2Id"}}, "", true}, + {nil, "host", true}, + {nil, "bridge", false}, + {nil, "none", true}, + } + for _, tc := range networkModes { + testNetworkModeStats(t, tc.NetworkMode, tc.ENIs, tc.StatsEmpty) + } +} + +func TestNetworkModeStatsAWSVPCMode(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + resolver := mock_resolver.NewMockContainerMetadataResolver(mockCtrl) + mockDockerClient := mock_dockerapi.NewMockDockerClient(mockCtrl) + + testContainer := &apicontainer.DockerContainer{ + Container: &apicontainer.Container{ + Name: "test", + Type: apicontainer.ContainerCNIPause, + }, + } + + testContainer1 := &apicontainer.DockerContainer{ + Container: &apicontainer.Container{ + Name: "test1", + }, + } + + t1 := &apitask.Task{ + Arn: "t1", + Family: "f1", + ENIs: []*apieni.ENI{{ID: "ec2Id"}}, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{ + {Name: "test"}, + {Name: "test1"}, + }, + } + + resolver.EXPECT().ResolveTask("c1").AnyTimes().Return(t1, nil) + resolver.EXPECT().ResolveTask("c2").AnyTimes().Return(t1, nil) + resolver.EXPECT().ResolveTaskByARN(gomock.Any()).Return(t1, nil).AnyTimes() + + resolver.EXPECT().ResolveContainer("c1").AnyTimes().Return(testContainer, nil) + resolver.EXPECT().ResolveContainer("c2").AnyTimes().Return(testContainer1, nil) + mockDockerClient.EXPECT().Stats(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + + mockDockerClient.EXPECT().InspectContainer(gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "test", + State: &types.ContainerState{Pid: 23}, + }, + }, nil).AnyTimes() + engine := NewDockerStatsEngine(&cfg, nil, eventStream("TestTaskNetworkStatsSet")) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + engine.ctx = ctx + engine.resolver = resolver + engine.cluster = defaultCluster + engine.containerInstanceArn = defaultContainerInstance + engine.client = mockDockerClient + engine.addAndStartStatsContainer("c1") + engine.addAndStartStatsContainer("c2") + ts1 := parseNanoTime("2015-02-12T21:22:05.131117533Z") + containerStats := createFakeContainerStats() + dockerStats := []*types.StatsJSON{{}, {}} + dockerStats[0].Read = ts1 + containers, _ := engine.tasksToContainers["t1"] + taskContainers, _ := engine.taskToTaskStats["t1"] + for _, statsContainer := range containers { + for i := 0; i < 2; i++ { + statsContainer.statsQueue.add(containerStats[i]) + statsContainer.statsQueue.setLastStat(dockerStats[i]) + taskContainers.StatsQueue.add(containerStats[i]) + } + } + _, taskMetrics, err := engine.GetInstanceMetrics() + assert.NoError(t, err) + assert.Len(t, taskMetrics, 1) + for _, containerMetric := range taskMetrics[0].ContainerMetrics { + if *containerMetric.ContainerName == "test1" { + assert.NotNil(t, containerMetric.NetworkStatsSet, "network stats should be non-empty") + } else { + assert.Nil(t, containerMetric.NetworkStatsSet, "network stats should be empty for pause") + } + } +} diff --git a/agent/stats/mock/engine.go b/agent/stats/mock/engine.go new file mode 100644 index 00000000000..3ffcb5b5c43 --- /dev/null +++ b/agent/stats/mock/engine.go @@ -0,0 +1,99 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/stats (interfaces: Engine) + +// Package mock_stats is a generated GoMock package. +package mock_stats + +import ( + reflect "reflect" + + stats "github.com/aws/amazon-ecs-agent/agent/stats" + ecstcs "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + types "github.com/docker/docker/api/types" + gomock "github.com/golang/mock/gomock" +) + +// MockEngine is a mock of Engine interface +type MockEngine struct { + ctrl *gomock.Controller + recorder *MockEngineMockRecorder +} + +// MockEngineMockRecorder is the mock recorder for MockEngine +type MockEngineMockRecorder struct { + mock *MockEngine +} + +// NewMockEngine creates a new mock instance +func NewMockEngine(ctrl *gomock.Controller) *MockEngine { + mock := &MockEngine{ctrl: ctrl} + mock.recorder = &MockEngineMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockEngine) EXPECT() *MockEngineMockRecorder { + return m.recorder +} + +// ContainerDockerStats mocks base method +func (m *MockEngine) ContainerDockerStats(arg0, arg1 string) (*types.StatsJSON, *stats.NetworkStatsPerSec, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainerDockerStats", arg0, arg1) + ret0, _ := ret[0].(*types.StatsJSON) + ret1, _ := ret[1].(*stats.NetworkStatsPerSec) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ContainerDockerStats indicates an expected call of ContainerDockerStats +func (mr *MockEngineMockRecorder) ContainerDockerStats(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerDockerStats", reflect.TypeOf((*MockEngine)(nil).ContainerDockerStats), arg0, arg1) +} + +// GetInstanceMetrics mocks base method +func (m *MockEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetInstanceMetrics") + ret0, _ := ret[0].(*ecstcs.MetricsMetadata) + ret1, _ := ret[1].([]*ecstcs.TaskMetric) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetInstanceMetrics indicates an expected call of GetInstanceMetrics +func (mr *MockEngineMockRecorder) GetInstanceMetrics() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceMetrics", reflect.TypeOf((*MockEngine)(nil).GetInstanceMetrics)) +} + +// GetTaskHealthMetrics mocks base method +func (m *MockEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskHealthMetrics") + ret0, _ := ret[0].(*ecstcs.HealthMetadata) + ret1, _ := ret[1].([]*ecstcs.TaskHealth) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetTaskHealthMetrics indicates an expected call of GetTaskHealthMetrics +func (mr *MockEngineMockRecorder) GetTaskHealthMetrics() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskHealthMetrics", reflect.TypeOf((*MockEngine)(nil).GetTaskHealthMetrics)) +} diff --git a/agent/stats/queue.go b/agent/stats/queue.go new file mode 100644 index 00000000000..d06e26bb107 --- /dev/null +++ b/agent/stats/queue.go @@ -0,0 +1,479 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "fmt" + "math" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" +) + +const ( + // BytesInMiB is the number of bytes in a MebiByte. + BytesInMiB = 1024 * 1024 + MaxCPUUsagePerc float32 = 1024 * 1024 + NanoSecToSec float32 = 1000000000 +) + +// Queue abstracts a queue using UsageStats slice. +type Queue struct { + buffer []UsageStats + maxSize int + lastStat *types.StatsJSON + lastNetworkStatPerSec *NetworkStatsPerSec + lock sync.RWMutex +} + +// NewQueue creates a queue. +func NewQueue(maxSize int) *Queue { + return &Queue{ + buffer: make([]UsageStats, 0, maxSize), + maxSize: maxSize, + } +} + +// Reset resets the queue's buffer so that only new metrics added after +// this point will be sent to the backend when calling stat getter functions like +// GetCPUStatsSet, GetMemoryStatSet, etc. +func (queue *Queue) Reset() { + queue.lock.Lock() + defer queue.lock.Unlock() + for i := range queue.buffer { + queue.buffer[i].sent = true + } +} + +// Add adds a new set of container stats to the queue. +func (queue *Queue) Add(dockerStat *types.StatsJSON) error { + queue.setLastStat(dockerStat) + stat, err := dockerStatsToContainerStats(dockerStat) + if err != nil { + return err + } + queue.add(stat) + return nil +} + +func (queue *Queue) setLastStat(stat *types.StatsJSON) { + queue.lock.Lock() + defer queue.lock.Unlock() + + queue.lastStat = stat +} + +func (queue *Queue) add(rawStat *ContainerStats) { + queue.lock.Lock() + defer queue.lock.Unlock() + + queueLength := len(queue.buffer) + stat := UsageStats{ + CPUUsagePerc: float32(nan32()), + MemoryUsageInMegs: uint32(rawStat.memoryUsage / BytesInMiB), + StorageReadBytes: rawStat.storageReadBytes, + StorageWriteBytes: rawStat.storageWriteBytes, + NetworkStats: rawStat.networkStats, + Timestamp: rawStat.timestamp, + cpuUsage: rawStat.cpuUsage, + sent: false, + } + if queueLength != 0 { + // % utilization can be calculated only when queue is non-empty. + lastStat := queue.buffer[queueLength-1] + timeSinceLastStat := float32(rawStat.timestamp.Sub(lastStat.Timestamp).Nanoseconds()) + if timeSinceLastStat <= 0 { + // if we got a duplicate timestamp, set cpu percentage to the same value as the previous stat + seelog.Errorf("Received a docker stat object with duplicate timestamp") + stat.CPUUsagePerc = lastStat.CPUUsagePerc + if stat.NetworkStats != nil { + stat.NetworkStats.RxBytesPerSecond = lastStat.NetworkStats.RxBytesPerSecond + stat.NetworkStats.TxBytesPerSecond = lastStat.NetworkStats.TxBytesPerSecond + } + } else { + cpuUsageSinceLastStat := float32(rawStat.cpuUsage - lastStat.cpuUsage) + stat.CPUUsagePerc = 100 * cpuUsageSinceLastStat / timeSinceLastStat + + //calculate per second Network metrics + if stat.NetworkStats != nil { + rxBytesSinceLastStat := float32(stat.NetworkStats.RxBytes - lastStat.NetworkStats.RxBytes) + txBytesSinceLastStat := float32(stat.NetworkStats.TxBytes - lastStat.NetworkStats.TxBytes) + stat.NetworkStats.RxBytesPerSecond = NanoSecToSec * (rxBytesSinceLastStat / timeSinceLastStat) + stat.NetworkStats.TxBytesPerSecond = NanoSecToSec * (txBytesSinceLastStat / timeSinceLastStat) + } + } + + if queueLength >= queue.maxSize { + // Remove first element if queue is full. + queue.buffer = queue.buffer[1:queueLength] + } + + if stat.CPUUsagePerc > MaxCPUUsagePerc { + // what in the world happened + seelog.Errorf("Calculated CPU usage percent (%.1f) is larger than backend maximum (%.1f). lastStatTS=%s lastStatCPUTime=%d thisStatTS=%s thisStatCPUTime=%d queueLength=%d", + stat.CPUUsagePerc, MaxCPUUsagePerc, lastStat.Timestamp.Format(time.RFC3339Nano), lastStat.cpuUsage, rawStat.timestamp.Format(time.RFC3339Nano), rawStat.cpuUsage, queueLength) + } + + if stat.NetworkStats != nil { + networkStatPerSec := &NetworkStatsPerSec{ + RxBytesPerSecond: stat.NetworkStats.RxBytesPerSecond, + TxBytesPerSecond: stat.NetworkStats.TxBytesPerSecond, + } + queue.lastNetworkStatPerSec = networkStatPerSec + } + } + + queue.buffer = append(queue.buffer, stat) +} + +// GetLastStat returns the last recorded raw statistics object from docker +func (queue *Queue) GetLastStat() *types.StatsJSON { + queue.lock.RLock() + defer queue.lock.RUnlock() + + return queue.lastStat +} + +func (queue *Queue) GetLastNetworkStatPerSec() *NetworkStatsPerSec { + queue.lock.RLock() + defer queue.lock.RUnlock() + + return queue.lastNetworkStatPerSec +} + +// GetCPUStatsSet gets the stats set for CPU utilization. +func (queue *Queue) GetCPUStatsSet() (*ecstcs.CWStatsSet, error) { + return queue.getCWStatsSet(getCPUUsagePerc) +} + +// GetMemoryStatsSet gets the stats set for memory utilization. +func (queue *Queue) GetMemoryStatsSet() (*ecstcs.CWStatsSet, error) { + return queue.getCWStatsSet(getMemoryUsagePerc) +} + +// GetStorageStatsSet gets the stats set for aggregate storage +func (queue *Queue) GetStorageStatsSet() (*ecstcs.StorageStatsSet, error) { + storageStatsSet := &ecstcs.StorageStatsSet{} + var err error + storageStatsSet.ReadSizeBytes, err = queue.getULongStatsSet(getStorageReadBytes) + if err != nil { + seelog.Warnf("Error getting storage read size bytes: %v", err) + } + storageStatsSet.WriteSizeBytes, err = queue.getULongStatsSet(getStorageWriteBytes) + if err != nil { + seelog.Warnf("Error getting storage write size bytes: %v", err) + } + return storageStatsSet, err +} + +// GetNetworkStatsSet gets the stats set for network metrics. +func (queue *Queue) GetNetworkStatsSet() (*ecstcs.NetworkStatsSet, error) { + networkStatsSet := &ecstcs.NetworkStatsSet{} + var err error + networkStatsSet.RxBytes, err = queue.getULongStatsSet(getNetworkRxBytes) + if err != nil { + seelog.Warnf("Error getting network rx bytes: %v", err) + } + networkStatsSet.RxDropped, err = queue.getULongStatsSet(getNetworkRxDropped) + if err != nil { + seelog.Warnf("Error getting network rx dropped: %v", err) + } + networkStatsSet.RxErrors, err = queue.getULongStatsSet(getNetworkRxErrors) + if err != nil { + seelog.Warnf("Error getting network rx errors: %v", err) + } + networkStatsSet.RxPackets, err = queue.getULongStatsSet(getNetworkRxPackets) + if err != nil { + seelog.Warnf("Error getting network rx packets: %v", err) + } + networkStatsSet.TxBytes, err = queue.getULongStatsSet(getNetworkTxBytes) + if err != nil { + seelog.Warnf("Error getting network tx bytes: %v", err) + } + networkStatsSet.TxDropped, err = queue.getULongStatsSet(getNetworkTxDropped) + if err != nil { + seelog.Warnf("Error getting network tx dropped: %v", err) + } + networkStatsSet.TxErrors, err = queue.getULongStatsSet(getNetworkTxErrors) + if err != nil { + seelog.Warnf("Error getting network tx errors: %v", err) + } + networkStatsSet.TxPackets, err = queue.getULongStatsSet(getNetworkTxPackets) + if err != nil { + seelog.Warnf("Error getting network tx packets: %v", err) + } + networkStatsSet.RxBytesPerSecond, err = queue.getUDoubleCWStatsSet(getNetworkRxPacketsPerSecond) + if err != nil { + seelog.Warnf("Error getting network rx bytes per second: %v", err) + } + networkStatsSet.TxBytesPerSecond, err = queue.getUDoubleCWStatsSet(getNetworkTxPacketsPerSecond) + if err != nil { + seelog.Warnf("Error getting network tx bytes per second: %v", err) + } + return networkStatsSet, err +} + +func getNetworkRxBytes(s *UsageStats) uint64 { + if s.NetworkStats != nil { + return s.NetworkStats.RxBytes + } + return uint64(0) +} + +func getNetworkRxDropped(s *UsageStats) uint64 { + if s.NetworkStats != nil { + return s.NetworkStats.RxDropped + } + return uint64(0) +} + +func getNetworkRxErrors(s *UsageStats) uint64 { + if s.NetworkStats != nil { + return s.NetworkStats.RxErrors + } + return uint64(0) +} + +func getNetworkRxPackets(s *UsageStats) uint64 { + if s.NetworkStats != nil { + return s.NetworkStats.RxPackets + } + return uint64(0) +} + +func getNetworkTxBytes(s *UsageStats) uint64 { + if s.NetworkStats != nil { + return s.NetworkStats.TxBytes + } + return uint64(0) +} + +func getNetworkTxDropped(s *UsageStats) uint64 { + if s.NetworkStats != nil { + return s.NetworkStats.TxDropped + } + return uint64(0) +} + +func getNetworkTxErrors(s *UsageStats) uint64 { + if s.NetworkStats != nil { + return s.NetworkStats.TxErrors + } + return uint64(0) +} + +func getNetworkTxPackets(s *UsageStats) uint64 { + if s.NetworkStats != nil { + return s.NetworkStats.TxPackets + } + return uint64(0) +} + +func getNetworkRxPacketsPerSecond(s *UsageStats) float64 { + if s.NetworkStats != nil { + return float64(s.NetworkStats.RxBytesPerSecond) + } + return float64(0) +} + +func getNetworkTxPacketsPerSecond(s *UsageStats) float64 { + if s.NetworkStats != nil { + return float64(s.NetworkStats.TxBytesPerSecond) + } + return float64(0) +} + +func getCPUUsagePerc(s *UsageStats) float64 { + return float64(s.CPUUsagePerc) +} + +func getMemoryUsagePerc(s *UsageStats) float64 { + return float64(s.MemoryUsageInMegs) +} + +func getStorageReadBytes(s *UsageStats) uint64 { + return s.StorageReadBytes +} + +func getStorageWriteBytes(s *UsageStats) uint64 { + return s.StorageWriteBytes +} + +// getInt64WithOverflow truncates a uint64 to fit an int64 +// it returns overflow as a second int64 +func getInt64WithOverflow(uintStat uint64) (int64, int64) { + if uintStat > math.MaxInt64 { + overflow := int64(uintStat % uint64(math.MaxInt64)) + return math.MaxInt64, overflow + } + return int64(uintStat), int64(0) +} + +type getUsageFloatFunc func(*UsageStats) float64 +type getUsageIntFunc func(*UsageStats) uint64 + +// getCWStatsSet gets the stats set for either CPU or Memory based on the +// function pointer. +func (queue *Queue) getCWStatsSet(getUsageFloat getUsageFloatFunc) (*ecstcs.CWStatsSet, error) { + queue.lock.Lock() + defer queue.lock.Unlock() + + queueLength := len(queue.buffer) + if queueLength < 2 { + // Need at least 2 data points to calculate this. + return nil, fmt.Errorf("need at least 2 data points in queue to calculate CW stats set") + } + + var min, max, sum float64 + var sampleCount int64 + min = math.MaxFloat64 + max = -math.MaxFloat64 + sum = 0 + sampleCount = 0 + + for _, stat := range queue.buffer { + if stat.sent { + // don't send stats to TACS if already sent + continue + } + thisStat := getUsageFloat(&stat) + if math.IsNaN(thisStat) { + continue + } + + min = math.Min(min, thisStat) + max = math.Max(max, thisStat) + sampleCount++ + sum += thisStat + } + + // don't emit metrics when sampleCount == 0 + if sampleCount == 0 { + return nil, fmt.Errorf("need at least 1 non-NaN data points in queue to calculate CW stats set") + } + + return &ecstcs.CWStatsSet{ + Max: &max, + Min: &min, + SampleCount: &sampleCount, + Sum: &sum, + }, nil +} + +// getULongStatsSet gets the stats set for the specified raw stat type +// stats come from docker as uint64 type, and by neccesity are packed into int64 type +// where there is overflow (math.MaxInt64 + 1 or greater) +// we capture the excess in optional overflow fields. +func (queue *Queue) getULongStatsSet(getUsageInt getUsageIntFunc) (*ecstcs.ULongStatsSet, error) { + queue.lock.Lock() + defer queue.lock.Unlock() + + queueLength := len(queue.buffer) + if queueLength < 2 { + // Need at least 2 data points to calculate this. + return nil, fmt.Errorf("need at least 2 data points in the queue to calculate int stats") + } + + var min, max, sum uint64 + var sampleCount int64 + min = math.MaxUint64 + max = 0 + sum = 0 + sampleCount = 0 + + for _, stat := range queue.buffer { + if stat.sent { + // don't send stats to TACS if already sent + continue + } + thisStat := getUsageInt(&stat) + if thisStat < min { + min = thisStat + } + if thisStat > max { + max = thisStat + } + sum += thisStat + sampleCount++ + } + + // don't emit metrics when sampleCount == 0 + if sampleCount == 0 { + return nil, fmt.Errorf("need at least 1 non-NaN data points in queue to calculate CW stats set") + } + + baseMin, overflowMin := getInt64WithOverflow(min) + baseMax, overflowMax := getInt64WithOverflow(max) + baseSum, overflowSum := getInt64WithOverflow(sum) + + return &ecstcs.ULongStatsSet{ + Max: &baseMax, + OverflowMax: &overflowMax, + Min: &baseMin, + OverflowMin: &overflowMin, + SampleCount: &sampleCount, + Sum: &baseSum, + OverflowSum: &overflowSum, + }, nil +} + +// getUDoubleCWStatsSet gets the stats set for per second network metrics +func (queue *Queue) getUDoubleCWStatsSet(getUsageFloat getUsageFloatFunc) (*ecstcs.UDoubleCWStatsSet, error) { + queue.lock.Lock() + defer queue.lock.Unlock() + + queueLength := len(queue.buffer) + if queueLength < 2 { + // Need at least 2 data points to calculate this. + return nil, fmt.Errorf("need at least 2 data points in queue to calculate CW stats set") + } + + var min, max, sum float64 + var sampleCount int64 + min = math.MaxFloat64 + max = -math.MaxFloat64 + sum = 0 + sampleCount = 0 + + for _, stat := range queue.buffer { + if stat.sent { + // don't send stats to TACS if already sent + continue + } + thisStat := getUsageFloat(&stat) + if math.IsNaN(thisStat) { + continue + } + + min = math.Min(min, thisStat) + max = math.Max(max, thisStat) + sampleCount++ + sum += thisStat + } + + // don't emit metrics when sampleCount == 0 + if sampleCount == 0 { + return nil, fmt.Errorf("need at least 1 non-NaN data points in queue to calculate CW stats set") + } + + return &ecstcs.UDoubleCWStatsSet{ + Max: &max, + Min: &min, + SampleCount: &sampleCount, + Sum: &sum, + }, nil +} diff --git a/agent/stats/queue_test.go b/agent/stats/queue_test.go new file mode 100644 index 00000000000..dc3675929e5 --- /dev/null +++ b/agent/stats/queue_test.go @@ -0,0 +1,718 @@ +//+build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "math" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + predictableHighMemoryUtilizationInBytes = 7377772544 + + // predictableHighMemoryUtilizationInMiB is the expected Memory usage in MiB for + // the "predictableHighMemoryUtilizationInBytes" value (7377772544 / (1024 * 1024)) + predictableHighMemoryUtilizationInMiB = 7035 + // the "predictableInt64Overflow" requires 3 metrics to guarantee overflow + // note math.MaxInt64 is odd, so integer division will trim off 1 + predictableInt64Overflow = math.MaxInt64 / int64(2) +) + +var now time.Time = time.Now() + +func getTimestamps() []time.Time { + return []time.Time{ + now.Add(-time.Millisecond * 2100), + now.Add(-time.Millisecond * 2000), + now.Add(-time.Millisecond * 1900), + now.Add(-time.Millisecond * 1800), + now.Add(-time.Millisecond * 1700), + now.Add(-time.Millisecond * 1600), + now.Add(-time.Millisecond * 1500), + now.Add(-time.Millisecond * 1400), + now.Add(-time.Millisecond * 1300), + now.Add(-time.Millisecond * 1200), + now.Add(-time.Millisecond * 1100), + now.Add(-time.Millisecond * 1000), + now.Add(-time.Millisecond * 900), + now.Add(-time.Millisecond * 800), + now.Add(-time.Millisecond * 700), + now.Add(-time.Millisecond * 600), + now.Add(-time.Millisecond * 500), + now.Add(-time.Millisecond * 400), + now.Add(-time.Millisecond * 300), + now.Add(-time.Millisecond * 200), + now.Add(-time.Millisecond * 100), + now, + } +} + +func getUintStats() []uint64 { + return []uint64{ + 22400432, + 116499979, + 248503503, + 372167097, + 502862518, + 638485801, + 780707806, + 911624529, + 1047689820, + 1177013119, + 1313474186, + 1449445062, + 1586294238, + 1719604012, + 1837238842, + 1974606362, + 2112444996, + 2248922292, + 2382142527, + 2516445820, + 2653783456, + 2666483380, + } +} + +func getRandomMemoryUtilizationInBytes() []uint64 { + return []uint64{ + 1839104, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 3649536, + 716800, + } +} + +func getPredictableHighMemoryUtilizationInBytes(size int) []uint64 { + var memBytes []uint64 + for i := 0; i < size; i++ { + memBytes = append(memBytes, predictableHighMemoryUtilizationInBytes) + } + return memBytes +} + +func getLargeInt64Stats(size int) []uint64 { + var uintStats []uint64 + for i := 0; i < size; i++ { + uintStats = append(uintStats, uint64(predictableInt64Overflow)) + } + return uintStats +} + +func getContainerStats(predictableHighUtilization bool) []*ContainerStats { + timestamps := getTimestamps() + cpuTimes := getUintStats() + var memoryUtilizationInBytes []uint64 + var uintStats []uint64 + if predictableHighUtilization { + memoryUtilizationInBytes = getPredictableHighMemoryUtilizationInBytes(len(cpuTimes)) + uintStats = getLargeInt64Stats(len(cpuTimes)) + } else { + memoryUtilizationInBytes = getRandomMemoryUtilizationInBytes() + uintStats = getUintStats() + } + stats := []*ContainerStats{} + for i, time := range timestamps { + stats = append(stats, &ContainerStats{ + cpuUsage: cpuTimes[i], + memoryUsage: memoryUtilizationInBytes[i], + storageReadBytes: uintStats[i], + storageWriteBytes: uintStats[i], + networkStats: &NetworkStats{ + RxBytes: uintStats[i], + RxDropped: 0, + RxErrors: uintStats[i], + RxPackets: uintStats[i], + TxBytes: uintStats[i], + TxDropped: uintStats[i], + TxErrors: 0, + TxPackets: uintStats[i], + }, + timestamp: time}) + } + return stats +} + +func createQueue(size int, predictableHighUtilization bool) *Queue { + stats := getContainerStats(predictableHighUtilization) + queue := NewQueue(size) + for _, stat := range stats { + queue.add(stat) + } + return queue +} + +func TestQueueReset(t *testing.T) { + queue := NewQueue(10) + // empty queue should throw errors getting stats sets: + _, err := queue.GetCPUStatsSet() + require.Error(t, err) + _, err = queue.GetMemoryStatsSet() + require.Error(t, err) + _, err = queue.GetStorageStatsSet() + require.Error(t, err) + _, err = queue.GetNetworkStatsSet() + require.Error(t, err) + + // add some metrics, and getting stats sets should now succeed: + stats := getContainerStats(false) + for i := 0; i < 4; i++ { + queue.add(stats[i]) + } + _, err = queue.GetCPUStatsSet() + require.NoError(t, err) + _, err = queue.GetMemoryStatsSet() + require.NoError(t, err) + _, err = queue.GetStorageStatsSet() + require.NoError(t, err) + _, err = queue.GetNetworkStatsSet() + require.NoError(t, err) + + // after resetting the queue, there are no metrics to send and getting stats sets should error again: + queue.Reset() + _, err = queue.GetCPUStatsSet() + require.Error(t, err) + _, err = queue.GetMemoryStatsSet() + require.Error(t, err) + _, err = queue.GetStorageStatsSet() + require.Error(t, err) + _, err = queue.GetNetworkStatsSet() + require.Error(t, err) + + // add single stat to queue and getting stats sets should succeed again: + queue.add(stats[4]) + _, err = queue.GetCPUStatsSet() + require.NoError(t, err) + _, err = queue.GetMemoryStatsSet() + require.NoError(t, err) + _, err = queue.GetStorageStatsSet() + require.NoError(t, err) + _, err = queue.GetNetworkStatsSet() + require.NoError(t, err) +} + +func TestQueueAddRemove(t *testing.T) { + timestamps := getTimestamps() + queueLength := 5 + // Set predictableHighUtilization to false, expect random values when aggregated. + queue := createQueue(queueLength, false) + buf := queue.buffer + require.Len(t, buf, queueLength, "Buffer size is incorrect.") + + timestampsIndex := len(timestamps) - len(buf) + for i, stat := range buf { + if stat.Timestamp != timestamps[timestampsIndex+i] { + t.Errorf("Unexpected value for Stats element in buffer. expected %s got %s", timestamps[timestampsIndex+i], stat.Timestamp) + } + } + + cpuStatsSet, err := queue.GetCPUStatsSet() + require.NoError(t, err) + require.NotEqual(t, math.MaxFloat64, *cpuStatsSet.Min) + require.NotEqual(t, math.NaN(), *cpuStatsSet.Min) + require.NotEqual(t, -math.MaxFloat64, *cpuStatsSet.Max) + require.NotEqual(t, math.NaN(), *cpuStatsSet.Max) + require.Equal(t, int64(queueLength), *cpuStatsSet.SampleCount) + require.Equal(t, int(554), int(*cpuStatsSet.Sum)) + + memStatsSet, err := queue.GetMemoryStatsSet() + require.NoError(t, err) + require.NotEqual(t, math.MaxFloat64, *memStatsSet.Min) + require.NotEqual(t, math.NaN(), *memStatsSet.Min) + require.NotEqual(t, -math.MaxFloat64, *memStatsSet.Max) + require.NotEqual(t, math.NaN(), *memStatsSet.Max) + require.Equal(t, int64(queueLength), *memStatsSet.SampleCount) + require.Equal(t, int(12), int(*memStatsSet.Sum)) + + storageStatsSet, err := queue.GetStorageStatsSet() + require.NoError(t, err) + // assuming min is initialized to math.MaxUint64 then truncated + storageReadStatsSet := storageStatsSet.ReadSizeBytes + require.NotEqual(t, math.MaxInt64, *storageReadStatsSet.Min) + require.NotEqual(t, math.MaxInt64, *storageReadStatsSet.OverflowMin) + require.NotEqual(t, 0, *storageReadStatsSet.Max) + require.Equal(t, int64(queueLength), *storageReadStatsSet.SampleCount) + require.Equal(t, int64(12467777475), *storageReadStatsSet.Sum) + + storageWriteStatsSet := storageStatsSet.WriteSizeBytes + require.NotEqual(t, math.MaxInt64, *storageWriteStatsSet.Min) + require.NotEqual(t, 0, *storageWriteStatsSet.Max) + require.Equal(t, int64(queueLength), *storageWriteStatsSet.SampleCount) + require.Equal(t, int64(12467777475), *storageWriteStatsSet.Sum) + + netStatsSet, err := queue.GetNetworkStatsSet() + require.NoError(t, err, "error getting network stats set") + validateNetStatsSet(t, netStatsSet, queueLength) +} + +func validateNetStatsSet(t *testing.T, netStats *ecstcs.NetworkStatsSet, queueLen int) { + // checking only the fields RxBytes, RxDropped, TxBytes, TxErrors since others are similar + assert.NotEqual(t, int64(math.MaxInt64), *netStats.RxBytes.Min, "incorrect rxbytes min") + assert.Equal(t, int64(0), *netStats.RxBytes.OverflowMin, "incorrect rxbytes overlfowMin") + assert.NotEqual(t, int64(0), *netStats.RxBytes.Max, "incorrect rxbytes max") + assert.Equal(t, int64(0), *netStats.RxBytes.OverflowMax, "incorrect rxbytes overlfowMax") + assert.Equal(t, int64(queueLen), *netStats.RxBytes.SampleCount, "incorrect rxbytes sampleCount") + assert.NotEqual(t, int64(0), *netStats.RxBytes.Sum, "incorrect rxbytes sum") + assert.Equal(t, int64(0), *netStats.RxBytes.OverflowSum, "incorrect rxbytes overlfowSum") + + assert.Equal(t, int64(0), *netStats.RxDropped.Min, "incorrect RxDropped min") + assert.Equal(t, int64(0), *netStats.RxDropped.OverflowMin, "incorrect RxDropped overlfowMin") + assert.Equal(t, int64(0), *netStats.RxDropped.Max, "incorrect RxDropped max") + assert.Equal(t, int64(0), *netStats.RxDropped.OverflowMax, "incorrect RxDropped overlfowMax") + assert.Equal(t, int64(queueLen), *netStats.RxDropped.SampleCount, "incorrect RxDropped sampleCount") + assert.Equal(t, int64(0), *netStats.RxDropped.Sum, "incorrect RxDropped sum") + assert.Equal(t, int64(0), *netStats.RxDropped.OverflowSum, "incorrect RxDropped overlfowSum") + + assert.NotEqual(t, int64(math.MaxInt64), *netStats.TxBytes.Min, "incorrect TxBytes min") + assert.Equal(t, int64(0), *netStats.TxBytes.OverflowMin, "incorrect TxBytes overlfowMin") + assert.NotEqual(t, int64(0), *netStats.TxBytes.Max, "incorrect TxBytes max") + assert.Equal(t, int64(0), *netStats.TxBytes.OverflowMax, "incorrect TxBytes overlfowMax") + assert.Equal(t, int64(queueLen), *netStats.TxBytes.SampleCount, "incorrect TxBytes sampleCount") + assert.NotEqual(t, int64(0), *netStats.TxBytes.Sum, "incorrect TxBytes sum") + assert.Equal(t, int64(0), *netStats.TxBytes.OverflowSum, "incorrect TxBytes overlfowSum") + + assert.Equal(t, int64(0), *netStats.TxErrors.Min, "incorrect TxErrors min") + assert.Equal(t, int64(0), *netStats.TxErrors.OverflowMin, "incorrect TxErrors overlfowMin") + assert.Equal(t, int64(0), *netStats.TxErrors.Max, "incorrect TxErrors max") + assert.Equal(t, int64(0), *netStats.TxErrors.OverflowMax, "incorrect TxErrors overlfowMax") + assert.Equal(t, int64(queueLen), *netStats.TxErrors.SampleCount, "incorrect TxErrors sampleCount") + assert.Equal(t, int64(0), *netStats.TxErrors.Sum, "incorrect TxErrors sum") + assert.Equal(t, int64(0), *netStats.TxErrors.OverflowSum, "incorrect TxErrors overlfowSum") + + assert.NotNil(t, *netStats.RxBytesPerSecond, "incorrect RxBytesPerSecond set") + assert.Equal(t, float64(1.26999248e+08), *netStats.RxBytesPerSecond.Min, "incorrect RxBytesPerSecond min") + assert.Equal(t, float64(1.373376384e+09), *netStats.RxBytesPerSecond.Max, "incorrect RxBytesPerSecond max") + assert.Equal(t, int64(queueLen), *netStats.RxBytesPerSecond.SampleCount, "incorrect RxBytesPerSecond sampleCount") + assert.Equal(t, float64(5.540383824e+09), *netStats.RxBytesPerSecond.Sum, "incorrect RxBytesPerSecond sum") + + assert.NotNil(t, *netStats.TxBytesPerSecond, "incorrect TxBytesPerSecond set") + assert.Equal(t, float64(1.26999248e+08), *netStats.TxBytesPerSecond.Min, "incorrect TxBytesPerSecond min") + assert.Equal(t, float64(1.373376384e+09), *netStats.TxBytesPerSecond.Max, "incorrect TxBytesPerSecond max") + assert.Equal(t, int64(queueLen), *netStats.TxBytesPerSecond.SampleCount, "incorrect TxBytesPerSecond sampleCount") + assert.Equal(t, float64(5.540383824e+09), *netStats.TxBytesPerSecond.Sum, "incorrect TxBytesPerSecond sum") +} + +func TestQueueUintStats(t *testing.T) { + queueLength := 3 + queue := createQueue(queueLength, true) + buf := queue.buffer + if len(buf) != queueLength { + t.Errorf("Buffer size is incorrect. Expected: %d, Got: %d", queueLength, len(buf)) + } + + storageStatsSet, err := queue.GetStorageStatsSet() + storageReadStatsSet := storageStatsSet.ReadSizeBytes + + if err != nil { + t.Error("Error getting storage read stats set:", err) + } + // assuming min is initialized to math.MaxUint64 then truncated + // min/max should be the same as predictableInt64Overflow + // their overflow should be 0 + assert.Equal(t, *storageReadStatsSet.Min, predictableInt64Overflow) + assert.Equal(t, *storageReadStatsSet.OverflowMin, int64(0)) + assert.Equal(t, *storageReadStatsSet.Max, predictableInt64Overflow) + assert.Equal(t, *storageReadStatsSet.OverflowMax, int64(0)) + // the sum of three predictableInt64Overflow should be equal to MaxInt64 + // with an overflow of predictableInt64Overflow - 1 + // (see the definition of predictableInt64Overflow for why -1) + assert.Equal(t, *storageReadStatsSet.Sum, int64(math.MaxInt64)) + assert.Equal(t, *storageReadStatsSet.OverflowSum, predictableInt64Overflow-1) +} + +func TestQueueAddPredictableHighMemoryUtilization(t *testing.T) { + timestamps := getTimestamps() + queueLength := 5 + // Set predictableHighUtilization to true + // This lets us compare the computed values against pre-computed expected values + queue := createQueue(queueLength, true) + buf := queue.buffer + require.Len(t, buf, queueLength, "Buffer size is incorrect.") + + timestampsIndex := len(timestamps) - len(buf) + for i, stat := range buf { + if stat.Timestamp != timestamps[timestampsIndex+i] { + t.Error("Unexpected value for Stats element in buffer") + } + } + + memStatsSet, err := queue.GetMemoryStatsSet() + if err != nil { + t.Error("Error getting memory stats set:", err) + } + + // Test if both min and max for memory utilization are set to 7035MiB + // Also test if sum == queue length * 7035 + expectedMemoryUsageInMiB := float64(predictableHighMemoryUtilizationInMiB) + expectedMemoryUsageInMiBSum := expectedMemoryUsageInMiB * float64(queueLength) + require.Equal(t, *memStatsSet.Min, expectedMemoryUsageInMiB) + require.Equal(t, *memStatsSet.Max, expectedMemoryUsageInMiB) + require.Equal(t, *memStatsSet.SampleCount, int64(queueLength)) + require.Equal(t, *memStatsSet.Sum, expectedMemoryUsageInMiBSum) +} + +// tests just below and just above the threshold (+/- 1) of int64 +func TestUintOverflow(t *testing.T) { + var underUint, overUint uint64 + underUint = uint64(math.MaxInt64 - 1) + overUint = uint64(math.MaxInt64 + 1) + + baseUnderUint, overflowUnderUint := getInt64WithOverflow(underUint) + baseMaxInt, overflowMaxInt := getInt64WithOverflow(uint64(math.MaxInt64)) + baseOverUint, overflowOverUint := getInt64WithOverflow(overUint) + + assert.Equal(t, baseUnderUint, int64(math.MaxInt64-1)) + assert.Equal(t, overflowUnderUint, int64(0)) + assert.Equal(t, baseMaxInt, int64(math.MaxInt64)) + assert.Equal(t, overflowMaxInt, int64(0)) + assert.Equal(t, baseOverUint, int64(math.MaxInt64)) + assert.Equal(t, overflowOverUint, int64(1)) +} + +func TestCpuStatsSetNotSetToInfinity(t *testing.T) { + // timestamps will be used to simulate +Inf CPU Usage + // timestamps[0] = timestamps[1] + now := time.Now() + timestamps := []time.Time{ + now.Add(-time.Microsecond * 1000), + now.Add(-time.Microsecond * 1000), + now, + } + cpuTimes := []uint64{ + 0, + 10000000, + 20000000, + } + + // Create and add container stats + queueLength := 3 + queue := NewQueue(queueLength) + for i, time := range timestamps { + queue.add(&ContainerStats{cpuUsage: cpuTimes[i], memoryUsage: 1024, timestamp: time}) + } + cpuStatsSet, err := queue.GetCPUStatsSet() + if err != nil { + t.Errorf("Error getting cpu stats set: %v", err) + } + require.Equal(t, float64(1000), *cpuStatsSet.Max) + require.Equal(t, float64(1000), *cpuStatsSet.Min) + require.Equal(t, float64(1000), *cpuStatsSet.Sum) + require.Equal(t, int64(1), *cpuStatsSet.SampleCount) +} + +func TestCPUStatSetFailsWhenSampleCountIsZero(t *testing.T) { + timestamps := []time.Time{ + parseNanoTime("2015-02-12T21:22:05.131117533Z"), + parseNanoTime("2015-02-12T21:22:05.131117533Z"), + } + cpuTimes := []uint64{ + 22400432, + 116499979, + } + memoryUtilizationInBytes := []uint64{ + 3649536, + 3649536, + } + // create a queue + queue := NewQueue(3) + + for i, time := range timestamps { + queue.add(&ContainerStats{cpuUsage: cpuTimes[i], memoryUsage: memoryUtilizationInBytes[i], timestamp: time}) + } + + // if two cpu had identical timestamps, + // then there will not be enough valid cpu percentage stats to create + // a valid CpuStatsSet, and this function call should fail. + _, err := queue.GetCPUStatsSet() + require.Error(t, err) +} + +func TestCPUStatsWithIdenticalTimestampsGetSameUsagePercent(t *testing.T) { + now := time.Now() + timestamps := []time.Time{ + now.Add(-time.Nanosecond * 2), + now.Add(-time.Nanosecond * 1), + now, + now, + } + cpuTimes := []uint64{ + 0, + 1, + 3, + 4, + } + + // create a queue + queue := NewQueue(4) + + for i, time := range timestamps { + queue.add(&ContainerStats{cpuUsage: cpuTimes[i], memoryUsage: 3649536, timestamp: time}) + } + + // if there were three cpu metrics, and two had identical timestamps, + // then there will not be enough valid cpu percentage stats to create + // a valid CpuStatsSet, and this function call should fail. + statSet, err := queue.GetCPUStatsSet() + require.NoError(t, err) + require.Equal(t, float64(200), *statSet.Max) + require.Equal(t, float64(100), *statSet.Min) + require.Equal(t, int64(3), *statSet.SampleCount) + require.Equal(t, float64(500), *statSet.Sum) +} + +func TestHugeCPUUsagePercentDoesntGetCapped(t *testing.T) { + now := time.Now() + timestamps := []time.Time{ + now.Add(-time.Nanosecond * 2), + now.Add(-time.Nanosecond * 1), + now, + } + cpuTimes := []uint64{ + 0, + 1, + 300000000, + } + // create a queue + queue := NewQueue(4) + + for i, time := range timestamps { + queue.add(&ContainerStats{cpuUsage: cpuTimes[i], memoryUsage: 3649536, timestamp: time}) + } + + statSet, err := queue.GetCPUStatsSet() + require.NoError(t, err) + require.Equal(t, float64(30000001024), *statSet.Max) + require.Equal(t, float64(100), *statSet.Min) + require.Equal(t, int64(2), *statSet.SampleCount) + require.Equal(t, float64(30000001124), *statSet.Sum) +} + +// If there are only 2 datapoints, and both have the same timestamp, +// then sample count will be 0 for per sec metrics and GetNetworkStats should return error +func TestPerSecNetworkStatSetFailsWhenSampleCountIsZero(t *testing.T) { + timestamps := []time.Time{ + parseNanoTime("2015-02-12T21:22:05.131117533Z"), + parseNanoTime("2015-02-12T21:22:05.131117533Z"), + } + cpuTimes := []uint64{ + 22400432, + 116499979, + } + memoryUtilizationInBytes := []uint64{ + 3649536, + 3649536, + } + + bytesReceivedTransmitted := []uint64{ + 364953689, + 364953689, + } + + queue := NewQueue(3) + + for i, time := range timestamps { + queue.add(&ContainerStats{ + cpuUsage: cpuTimes[i], + memoryUsage: memoryUtilizationInBytes[i], + networkStats: &NetworkStats{ + RxBytes: bytesReceivedTransmitted[i], + RxDropped: 0, + RxErrors: bytesReceivedTransmitted[i], + RxPackets: bytesReceivedTransmitted[i], + TxBytes: bytesReceivedTransmitted[i], + TxDropped: bytesReceivedTransmitted[i], + TxErrors: 0, + TxPackets: bytesReceivedTransmitted[i], + RxBytesPerSecond: float32(nan32()), + TxBytesPerSecond: float32(nan32()), + }, + timestamp: time}) + } + + // if we have identical timestamps and 2 datapoints + // then there will not be enough valid network stats to create + // a valid network stats set, and this function call should fail. + stats, err := queue.GetNetworkStatsSet() + require.Errorf(t, err, "Received unexpected network stats set %v", stats) +} + +// If there are only 3 datapoints in total and among them 2 are identical, then GetNetworkStats should not return error +func TestPerSecNetworkStatSetPassWithThreeDatapoints(t *testing.T) { + timestamps := []time.Time{ + parseNanoTime("2015-02-12T21:22:05.131117533Z"), + parseNanoTime("2015-02-12T21:22:05.131117533Z"), + parseNanoTime("2015-02-12T21:32:05.131117533Z"), + } + cpuTimes := []uint64{ + 22400432, + 116499979, + 115436856, + } + memoryUtilizationInBytes := []uint64{ + 3649536, + 3649536, + 3649536, + } + + bytesReceivedTransmitted := []uint64{ + 364953689, + 364953689, + 364953689, + } + + queue := NewQueue(3) + + for i, time := range timestamps { + queue.add(&ContainerStats{ + cpuUsage: cpuTimes[i], + memoryUsage: memoryUtilizationInBytes[i], + networkStats: &NetworkStats{ + RxBytes: bytesReceivedTransmitted[i], + RxDropped: 0, + RxErrors: bytesReceivedTransmitted[i], + RxPackets: bytesReceivedTransmitted[i], + TxBytes: bytesReceivedTransmitted[i], + TxDropped: bytesReceivedTransmitted[i], + TxErrors: 0, + TxPackets: bytesReceivedTransmitted[i], + RxBytesPerSecond: float32(nan32()), + TxBytesPerSecond: float32(nan32()), + }, + timestamp: time}) + } + + // if we have identical timestamps and 2 datapoints + // then there will not be enough valid network stats to create + // a valid network stats set, and this function call should fail. + stats, err := queue.GetNetworkStatsSet() + require.NoErrorf(t, err, "Received unexpected network stats set %v", stats) +} + +// If there are only 2 datapoints, and both have different timestamp, GetNetworkStats should not return error +func TestPerSecNetworkStatSetPassWithTwoDatapoints(t *testing.T) { + timestamps := []time.Time{ + parseNanoTime("2015-02-12T21:22:05.131117533Z"), + parseNanoTime("2015-02-12T21:32:05.131117533Z"), + } + cpuTimes := []uint64{ + 22400432, + 116499979, + } + memoryUtilizationInBytes := []uint64{ + 3649536, + 3649536, + } + + bytesReceivedTransmitted := []uint64{ + 364953689, + 364953689, + } + + queue := NewQueue(3) + + for i, time := range timestamps { + queue.add(&ContainerStats{ + cpuUsage: cpuTimes[i], + memoryUsage: memoryUtilizationInBytes[i], + networkStats: &NetworkStats{ + RxBytes: bytesReceivedTransmitted[i], + RxDropped: 0, + RxErrors: bytesReceivedTransmitted[i], + RxPackets: bytesReceivedTransmitted[i], + TxBytes: bytesReceivedTransmitted[i], + TxDropped: bytesReceivedTransmitted[i], + TxErrors: 0, + TxPackets: bytesReceivedTransmitted[i], + RxBytesPerSecond: float32(nan32()), + TxBytesPerSecond: float32(nan32()), + }, + timestamp: time}) + } + + // if we have identical timestamps and 2 datapoints + // then there will not be enough valid network stats to create + // a valid network stats set, and this function call should fail. + stats, err := queue.GetNetworkStatsSet() + require.NoErrorf(t, err, "Received unexpected network stats set %v", stats) +} + +// If there are only 1 datapoint, then GetNetworkStats should return error +func TestPerSecNetworkStatSetFailWithOneDatapoint(t *testing.T) { + timestamps := []time.Time{ + parseNanoTime("2015-02-12T21:22:05.131117533Z"), + } + + cpuTimes := []uint64{ + 22400432, + } + memoryUtilizationInBytes := []uint64{ + 3649536, + } + + bytesReceivedTransmitted := []uint64{ + 364953689, + } + + queue := NewQueue(3) + + for i, time := range timestamps { + queue.add(&ContainerStats{ + cpuUsage: cpuTimes[i], + memoryUsage: memoryUtilizationInBytes[i], + networkStats: &NetworkStats{ + RxBytes: bytesReceivedTransmitted[i], + RxDropped: 0, + RxErrors: bytesReceivedTransmitted[i], + RxPackets: bytesReceivedTransmitted[i], + TxBytes: bytesReceivedTransmitted[i], + TxDropped: bytesReceivedTransmitted[i], + TxErrors: 0, + TxPackets: bytesReceivedTransmitted[i], + RxBytesPerSecond: float32(nan32()), + TxBytesPerSecond: float32(nan32()), + }, + timestamp: time}) + } + + // if we have identical timestamps and 2 datapoints + // then there will not be enough valid network stats to create + // a valid network stats set, and this function call should fail. + stats, err := queue.GetNetworkStatsSet() + require.Errorf(t, err, "Received unexpected network stats set %v", stats) +} diff --git a/agent/stats/resolver/mock/resolver.go b/agent/stats/resolver/mock/resolver.go new file mode 100644 index 00000000000..6e32c42056e --- /dev/null +++ b/agent/stats/resolver/mock/resolver.go @@ -0,0 +1,95 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/stats/resolver (interfaces: ContainerMetadataResolver) + +// Package mock_resolver is a generated GoMock package. +package mock_resolver + +import ( + reflect "reflect" + + container "github.com/aws/amazon-ecs-agent/agent/api/container" + task "github.com/aws/amazon-ecs-agent/agent/api/task" + gomock "github.com/golang/mock/gomock" +) + +// MockContainerMetadataResolver is a mock of ContainerMetadataResolver interface +type MockContainerMetadataResolver struct { + ctrl *gomock.Controller + recorder *MockContainerMetadataResolverMockRecorder +} + +// MockContainerMetadataResolverMockRecorder is the mock recorder for MockContainerMetadataResolver +type MockContainerMetadataResolverMockRecorder struct { + mock *MockContainerMetadataResolver +} + +// NewMockContainerMetadataResolver creates a new mock instance +func NewMockContainerMetadataResolver(ctrl *gomock.Controller) *MockContainerMetadataResolver { + mock := &MockContainerMetadataResolver{ctrl: ctrl} + mock.recorder = &MockContainerMetadataResolverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockContainerMetadataResolver) EXPECT() *MockContainerMetadataResolverMockRecorder { + return m.recorder +} + +// ResolveContainer mocks base method +func (m *MockContainerMetadataResolver) ResolveContainer(arg0 string) (*container.DockerContainer, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResolveContainer", arg0) + ret0, _ := ret[0].(*container.DockerContainer) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResolveContainer indicates an expected call of ResolveContainer +func (mr *MockContainerMetadataResolverMockRecorder) ResolveContainer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveContainer", reflect.TypeOf((*MockContainerMetadataResolver)(nil).ResolveContainer), arg0) +} + +// ResolveTask mocks base method +func (m *MockContainerMetadataResolver) ResolveTask(arg0 string) (*task.Task, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResolveTask", arg0) + ret0, _ := ret[0].(*task.Task) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResolveTask indicates an expected call of ResolveTask +func (mr *MockContainerMetadataResolverMockRecorder) ResolveTask(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveTask", reflect.TypeOf((*MockContainerMetadataResolver)(nil).ResolveTask), arg0) +} + +// ResolveTaskByARN mocks base method +func (m *MockContainerMetadataResolver) ResolveTaskByARN(arg0 string) (*task.Task, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResolveTaskByARN", arg0) + ret0, _ := ret[0].(*task.Task) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResolveTaskByARN indicates an expected call of ResolveTaskByARN +func (mr *MockContainerMetadataResolverMockRecorder) ResolveTaskByARN(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveTaskByARN", reflect.TypeOf((*MockContainerMetadataResolver)(nil).ResolveTaskByARN), arg0) +} diff --git a/agent/stats/resolver/resolver.go b/agent/stats/resolver/resolver.go new file mode 100644 index 00000000000..8491a83574f --- /dev/null +++ b/agent/stats/resolver/resolver.go @@ -0,0 +1,28 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package resolver + +import ( + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" +) + +//go:generate mockgen -destination=mock/$GOFILE -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/stats/resolver ContainerMetadataResolver + +// ContainerMetadataResolver defines methods to resolve meta-data. +type ContainerMetadataResolver interface { + ResolveTask(string) (*apitask.Task, error) + ResolveContainer(string) (*apicontainer.DockerContainer, error) + ResolveTaskByARN(string) (*apitask.Task, error) +} diff --git a/agent/stats/task_linux.go b/agent/stats/task_linux.go new file mode 100644 index 00000000000..419d61ef05a --- /dev/null +++ b/agent/stats/task_linux.go @@ -0,0 +1,257 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "fmt" + "time" + + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/ecscni" + "github.com/aws/amazon-ecs-agent/agent/eni/netlinkwrapper" + "github.com/aws/amazon-ecs-agent/agent/stats/resolver" + "github.com/aws/amazon-ecs-agent/agent/utils/nswrapper" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/containernetworking/plugins/pkg/ns" + + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + dockerstats "github.com/docker/docker/api/types" + netlinklib "github.com/vishvananda/netlink" +) + +const ( + // linkTypeDevice defines the string that's expected to be the output of + // netlink.Link.Type() method for netlink.Device type. + linkTypeDevice = "device" + linkTypeVlan = "vlan" + // encapTypeLoopback defines the string that's set for the link.Attrs.EncapType + // field for localhost devices. The EncapType field defines the link + // encapsulation method. For localhost, it's set to "loopback". + encapTypeLoopback = "loopback" +) + +// StatsTask abstracts methods to gather and aggregate network data for a task. Used only for AWSVPC mode. +type StatsTask struct { + StatsQueue *Queue + TaskMetadata *TaskMetadata + Ctx context.Context + Cancel context.CancelFunc + Resolver resolver.ContainerMetadataResolver + nswrapperinterface nswrapper.NS + netlinkinterface netlinkwrapper.NetLink + metricPublishInterval time.Duration +} + +func newStatsTaskContainer(taskARN string, containerPID string, numberOfContainers int, + resolver resolver.ContainerMetadataResolver, publishInterval time.Duration) (*StatsTask, error) { + nsAgent := nswrapper.NewNS() + netlinkclient := netlinkwrapper.New() + + ctx, cancel := context.WithCancel(context.Background()) + return &StatsTask{ + TaskMetadata: &TaskMetadata{ + TaskArn: taskARN, + ContainerPID: containerPID, + NumberContainers: numberOfContainers, + }, + Ctx: ctx, + Cancel: cancel, + Resolver: resolver, + netlinkinterface: netlinkclient, + nswrapperinterface: nsAgent, + metricPublishInterval: publishInterval, + }, nil +} + +func (task *StatsTask) StartStatsCollection() { + queueSize := int(config.DefaultContainerMetricsPublishInterval.Seconds() * 4) + task.StatsQueue = NewQueue(queueSize) + task.StatsQueue.Reset() + go task.collect() +} + +func (task *StatsTask) StopStatsCollection() { + task.Cancel() +} + +func (taskStat *StatsTask) collect() { + taskArn := taskStat.TaskMetadata.TaskArn + backoff := retry.NewExponentialBackoff(time.Second*1, time.Second*10, 0.5, 2) + + for { + err := taskStat.processStatsStream() + select { + case <-taskStat.Ctx.Done(): + seelog.Debugf("Stopping stats collection for taskStat %s", taskArn) + return + default: + if err != nil { + d := backoff.Duration() + time.Sleep(d) + seelog.Debugf("Error querying stats for task %s: %v", taskArn, err) + } + // We were disconnected from the stats stream. + // Check if the task is terminal. If it is, stop collecting metrics. + terminal, err := taskStat.terminal() + if err != nil { + // Error determining if the task is terminal. clean-up anyway. + seelog.Warnf("Error determining if the task %s is terminal, stopping stats collection: %v", + taskArn, err) + taskStat.StopStatsCollection() + } else if terminal { + seelog.Infof("Task %s is terminal, stopping stats collection", taskArn) + taskStat.StopStatsCollection() + } + } + } +} + +func (taskStat *StatsTask) processStatsStream() error { + taskArn := taskStat.TaskMetadata.TaskArn + awsvpcNetworkStats, errC := taskStat.getAWSVPCNetworkStats() + + returnError := false + for { + select { + case <-taskStat.Ctx.Done(): + seelog.Info("task context is done") + return nil + case err := <-errC: + seelog.Warnf("Error encountered processing metrics stream from host, this may affect "+ + "cloudwatch metric accuracy: %s", err) + returnError = true + case rawStat, ok := <-awsvpcNetworkStats: + if !ok { + if returnError { + return fmt.Errorf("error encountered processing metrics stream from host") + } + return nil + } + if err := taskStat.StatsQueue.Add(rawStat); err != nil { + seelog.Warnf("Task [%s]: error converting stats: %v", taskArn, err) + } + } + + } +} + +func (taskStat *StatsTask) terminal() (bool, error) { + resolvedTask, err := taskStat.Resolver.ResolveTaskByARN(taskStat.TaskMetadata.TaskArn) + if err != nil { + return false, err + } + return resolvedTask.GetKnownStatus() == apitaskstatus.TaskStopped, nil +} + +func getDevicesList(linkList []netlinklib.Link) []string { + var deviceNames []string + for _, link := range linkList { + if link.Type() != linkTypeDevice && link.Type() != linkTypeVlan { + // We only care about netlink.Device/netlink.Vlan types. Ignore other link types. + continue + } + if link.Attrs().EncapType == encapTypeLoopback { + // Ignore localhost + continue + } + deviceNames = append(deviceNames, link.Attrs().Name) + } + return deviceNames +} + +func (taskStat *StatsTask) populateNIDeviceList(containerPID string) ([]string, error) { + var err error + var deviceList []string + netNSPath := fmt.Sprintf(ecscni.NetnsFormat, containerPID) + err = taskStat.nswrapperinterface.WithNetNSPath(netNSPath, func(ns.NetNS) error { + linksInTaskNetNS, linkErr := taskStat.netlinkinterface.LinkList() + deviceNames := getDevicesList(linksInTaskNetNS) + deviceList = append(deviceList, deviceNames...) + return linkErr + }) + return deviceList, err +} + +func linkStatsToDockerStats(netLinkStats *netlinklib.LinkStatistics, numberOfContainers uint64) dockerstats.NetworkStats { + networkStats := dockerstats.NetworkStats{ + RxBytes: netLinkStats.RxBytes / numberOfContainers, + RxPackets: netLinkStats.RxPackets / numberOfContainers, + RxErrors: netLinkStats.RxErrors / numberOfContainers, + RxDropped: netLinkStats.RxDropped / numberOfContainers, + TxBytes: netLinkStats.TxBytes / numberOfContainers, + TxPackets: netLinkStats.TxPackets / numberOfContainers, + TxErrors: netLinkStats.TxErrors / numberOfContainers, + TxDropped: netLinkStats.TxDropped / numberOfContainers, + } + return networkStats +} + +func (taskStat *StatsTask) getAWSVPCNetworkStats() (<-chan *types.StatsJSON, <-chan error) { + + errC := make(chan error) + statsC := make(chan *dockerstats.StatsJSON) + if taskStat.TaskMetadata.NumberContainers > 0 { + go func() { + defer close(statsC) + statPollTicker := time.NewTicker(taskStat.metricPublishInterval) + defer statPollTicker.Stop() + for range statPollTicker.C { + if len(taskStat.TaskMetadata.DeviceName) == 0 { + var err error + taskStat.TaskMetadata.DeviceName, err = taskStat.populateNIDeviceList(taskStat.TaskMetadata.ContainerPID) + if err != nil { + errC <- err + return + } + } + networkStats := make(map[string]dockerstats.NetworkStats, len(taskStat.TaskMetadata.DeviceName)) + for _, device := range taskStat.TaskMetadata.DeviceName { + var link netlinklib.Link + err := taskStat.nswrapperinterface.WithNetNSPath(fmt.Sprintf(ecscni.NetnsFormat, + taskStat.TaskMetadata.ContainerPID), + func(ns.NetNS) error { + var linkErr error + if link, linkErr = taskStat.netlinkinterface.LinkByName(device); linkErr != nil { + return linkErr + } + return nil + }) + + if err != nil { + errC <- err + return + } + netLinkStats := link.Attrs().Statistics + networkStats[link.Attrs().Name] = linkStatsToDockerStats(netLinkStats, + uint64(taskStat.TaskMetadata.NumberContainers)) + } + + dockerStats := &types.StatsJSON{ + Networks: networkStats, + Stats: types.Stats{ + Read: time.Now(), + }, + } + statsC <- dockerStats + } + }() + } + + return statsC, errC +} diff --git a/agent/stats/task_linux_test.go b/agent/stats/task_linux_test.go new file mode 100644 index 00000000000..6ae14067b29 --- /dev/null +++ b/agent/stats/task_linux_test.go @@ -0,0 +1,191 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "context" + "errors" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + mock_netlink "github.com/aws/amazon-ecs-agent/agent/eni/netlinkwrapper/mocks" + mock_resolver "github.com/aws/amazon-ecs-agent/agent/stats/resolver/mock" + mock_nswrapper "github.com/aws/amazon-ecs-agent/agent/utils/nswrapper/mocks" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/vishvananda/netlink" +) + +func TestTaskStatsCollection(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + resolver := mock_resolver.NewMockContainerMetadataResolver(ctrl) + mockNS := mock_nswrapper.NewMockNS(ctrl) + mockNetLink := mock_netlink.NewMockNetLink(ctrl) + + containerPID := "23" + taskId := "task1" + ctx, cancel := context.WithCancel(context.TODO()) + numberOfContainers := 2 + taskStats := &StatsTask{ + TaskMetadata: &TaskMetadata{ + TaskArn: taskId, + ContainerPID: containerPID, + DeviceName: []string{"device1", "device2"}, + NumberContainers: numberOfContainers, + }, + Ctx: ctx, + Cancel: cancel, + Resolver: resolver, + netlinkinterface: mockNetLink, + nswrapperinterface: mockNS, + metricPublishInterval: time.Second, + } + + testTask := &apitask.Task{ + Containers: []*apicontainer.Container{ + {Name: "c1"}, + {Name: "c2", Type: apicontainer.ContainerCNIPause}, + }, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + } + resolver.EXPECT().ResolveTaskByARN(gomock.Any()).Return(testTask, nil).AnyTimes() + mockNS.EXPECT().WithNetNSPath(gomock.Any(), + gomock.Any()).Do(func(nsPath interface{}, toRun func(n ns.NetNS) error) error { + return toRun(nil) + }).AnyTimes() + mockNetLink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + Name: "name", + Statistics: &netlink.LinkStatistics{ + RxPackets: uint64(2), + RxBytes: uint64(50), + }, + }, + }, nil).AnyTimes() + + taskStats.StartStatsCollection() + time.Sleep(checkPointSleep) + taskStats.StopStatsCollection() + + networkStatsSet, err := taskStats.StatsQueue.GetNetworkStatsSet() + + assert.NoError(t, err) + assert.NotNil(t, networkStatsSet) + rxSum := (*networkStatsSet.RxBytes.SampleCount * (int64(50))) / int64(numberOfContainers) + assert.EqualValues(t, rxSum, *networkStatsSet.RxBytes.Sum) +} + +func TestTaskStatsCollectionError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + resolver := mock_resolver.NewMockContainerMetadataResolver(ctrl) + mockNS := mock_nswrapper.NewMockNS(ctrl) + mockNetLink := mock_netlink.NewMockNetLink(ctrl) + + containerPID := "23" + taskId := "task1" + ctx, cancel := context.WithCancel(context.TODO()) + + taskStats := &StatsTask{ + TaskMetadata: &TaskMetadata{ + TaskArn: taskId, + ContainerPID: containerPID, + DeviceName: []string{"device1", "device2"}, + NumberContainers: 2, + }, + Ctx: ctx, + Cancel: cancel, + Resolver: resolver, + netlinkinterface: mockNetLink, + nswrapperinterface: mockNS, + metricPublishInterval: time.Second, + } + + testTask := &apitask.Task{ + Containers: []*apicontainer.Container{ + {Name: "c1"}, + {Name: "c2", Type: apicontainer.ContainerCNIPause}, + }, + KnownStatusUnsafe: apitaskstatus.TaskRunning, + } + resolver.EXPECT().ResolveTaskByARN(gomock.Any()).Return(testTask, nil).AnyTimes() + err := errors.New("emit macho dwarf: elf header corrupted") + + mockNS.EXPECT().WithNetNSPath(gomock.Any(), gomock.Any()).Return(err) + mockNS.EXPECT().WithNetNSPath(gomock.Any(), + gomock.Any()).Do(func(nsPath interface{}, toRun func(n ns.NetNS) error) error { + return toRun(nil) + }).AnyTimes() + mockNetLink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{ + LinkAttrs: netlink.LinkAttrs{ + Name: "name", + Statistics: &netlink.LinkStatistics{ + RxPackets: uint64(2), + RxBytes: uint64(50), + }, + }, + }, nil).AnyTimes() + + taskStats.StartStatsCollection() + time.Sleep(checkPointSleep) + taskStats.StopStatsCollection() + + networkStatsSet, err := taskStats.StatsQueue.GetNetworkStatsSet() + assert.NoError(t, err) + assert.EqualValues(t, 50, *networkStatsSet.RxBytes.Sum) + assert.EqualValues(t, 2, *networkStatsSet.RxPackets.Sum) + assert.EqualValues(t, 2, *networkStatsSet.RxPackets.SampleCount) + assert.EqualValues(t, 2, *networkStatsSet.TxPackets.SampleCount) +} + +func TestGetDeviceList(t *testing.T) { + + link1 := &netlink.GenericLink{ + LinkType: linkTypeDevice, + LinkAttrs: netlink.LinkAttrs{ + Name: "link1device", + }, + } + link2 := &netlink.GenericLink{ + LinkType: linkTypeVlan, + LinkAttrs: netlink.LinkAttrs{ + Name: "link2device", + }, + } + link3 := &netlink.GenericLink{ + LinkType: "randomLinkType", + LinkAttrs: netlink.LinkAttrs{ + Name: "link3device", + }, + } + link4 := &netlink.GenericLink{ + LinkAttrs: netlink.LinkAttrs{ + EncapType: encapTypeLoopback, + Name: "link4device", + }, + LinkType: linkTypeVlan, + } + linkList := []netlink.Link{link1, link2, link3, link4} + + deviceNames := getDevicesList(linkList) + + assert.ElementsMatch(t, []string{"link1device", "link2device"}, deviceNames) +} diff --git a/agent/stats/task_unspecified.go b/agent/stats/task_unspecified.go new file mode 100644 index 00000000000..07086610414 --- /dev/null +++ b/agent/stats/task_unspecified.go @@ -0,0 +1,43 @@ +// +build !linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "time" + + "github.com/aws/amazon-ecs-agent/agent/stats/resolver" + "github.com/pkg/errors" +) + +// Dummy StatsTasks +type StatsTask struct { + // AWSVPC network stats only supported on linux + TaskMetadata *TaskMetadata + StatsQueue *Queue +} + +func newStatsTaskContainer(taskARN string, containerPID string, numberOfContainers int, + resolver resolver.ContainerMetadataResolver, publishInterval time.Duration) (*StatsTask, error) { + return nil, errors.New("Unsupported platform") +} + +func (task *StatsTask) StartStatsCollection() { + // AWSVPC network stats only supported on linux +} + +func (task *StatsTask) StopStatsCollection() { + // AWSVPC network stats only supported on linux +} diff --git a/agent/stats/types.go b/agent/stats/types.go new file mode 100644 index 00000000000..113a4213fac --- /dev/null +++ b/agent/stats/types.go @@ -0,0 +1,99 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "time" + + "context" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/stats/resolver" +) + +// ContainerStats encapsulates the raw CPU and memory utilization from cgroup fs. +type ContainerStats struct { + cpuUsage uint64 + memoryUsage uint64 + storageReadBytes uint64 + storageWriteBytes uint64 + networkStats *NetworkStats + timestamp time.Time +} + +// NetworkStats contains the network stats information for a container +type NetworkStats struct { + RxBytes uint64 `json:"rxBytes"` + RxDropped uint64 `json:"rxDropped"` + RxErrors uint64 `json:"rxErrors"` + RxPackets uint64 `json:"rxPackets"` + TxBytes uint64 `json:"txBytes"` + TxDropped uint64 `json:"txDropped"` + TxErrors uint64 `json:"txErrors"` + TxPackets uint64 `json:"txPackets"` + RxBytesPerSecond float32 `json:"rxBytesPerSecond"` + TxBytesPerSecond float32 `json:"txBytesPerSecond"` +} + +// UsageStats abstracts the format in which the queue stores data. +type UsageStats struct { + CPUUsagePerc float32 `json:"cpuUsagePerc"` + MemoryUsageInMegs uint32 `json:"memoryUsageInMegs"` + StorageReadBytes uint64 `json:"storageReadBytes"` + StorageWriteBytes uint64 `json:"storageWriteBytes"` + NetworkStats *NetworkStats `json:"networkStats"` + Timestamp time.Time `json:"timestamp"` + cpuUsage uint64 + // sent indicates if the stat has been sent to TACS already. + sent bool +} + +// ContainerMetadata contains meta-data information for a container. +type ContainerMetadata struct { + DockerID string `json:"-"` + Name string `json:"-"` + NetworkMode string `json:"-"` +} + +// TaskMetadata contains meta-data information for a task. +type TaskMetadata struct { + TaskArn string `json:"-"` + // ContainerPID is the PID of the pause container in the awsvpc task. + ContainerPID string `json:"-"` + DeviceName []string `json:"-"` + NumberContainers int `json:"-"` +} + +// StatsContainer abstracts methods to gather and aggregate utilization data for a container. +type StatsContainer struct { + containerMetadata *ContainerMetadata + ctx context.Context + cancel context.CancelFunc + client dockerapi.DockerClient + statsQueue *Queue + resolver resolver.ContainerMetadataResolver + config *config.Config +} + +// taskDefinition encapsulates family and version strings for a task definition +type taskDefinition struct { + family string + version string +} + +type NetworkStatsPerSec struct { + RxBytesPerSecond float32 `json:"rx_bytes_per_sec"` + TxBytesPerSecond float32 `json:"tx_bytes_per_sec"` +} diff --git a/agent/stats/unix_test_stats.json b/agent/stats/unix_test_stats.json new file mode 100644 index 00000000000..8188d2251ea --- /dev/null +++ b/agent/stats/unix_test_stats.json @@ -0,0 +1,83 @@ +{ + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 202, + "minor": 192, + "op": "Read", + "value": 1 + }, + { + "major": 202, + "minor": 192, + "op": "Write", + "value": 5 + }, + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 1 + }, + { + "major": 202, + "minor": 26368, + "op": "Write", + "value": 5 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 1 + }, + { + "major": 253, + "minor": 1, + "op": "Write", + "value": 5 + } + ] + }, + "cpu_stats": { + "cpu_usage": { + "total_usage": 262857821518, + "percpu_usage": [ + 129951031737, + 132906789781 + ] + }, + "online_cpus": 2 + }, + "memory_stats":{ + "usage": 30, + "max_usage": 100, + "stats": { + "cache": 20, + "rss": 10 + }, + "privateworkingset": 10 + }, + "networks": { + "eth0": { + "rx_bytes": 796, + "rx_packets": 10, + "rx_errors": 0, + "rx_dropped": 0, + "tx_bytes": 8192, + "tx_packets": 112, + "tx_errors": 0, + "tx_dropped": 5 + }, + "eth1": { + "rx_bytes": 300, + "rx_packets": 4, + "rx_errors": 0, + "rx_dropped": 1, + "tx_bytes": 800, + "tx_packets": 11, + "tx_errors": 0, + "tx_dropped": 5 + } + } +} diff --git a/agent/stats/utils.go b/agent/stats/utils.go new file mode 100644 index 00000000000..9459dd817ab --- /dev/null +++ b/agent/stats/utils.go @@ -0,0 +1,49 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "math" + "runtime" + + "github.com/docker/docker/api/types" +) + +var numCores = uint64(runtime.NumCPU()) + +// nan32 returns a 32bit NaN. +func nan32() float32 { + return (float32)(math.NaN()) +} + +func getNetworkStats(dockerStats *types.StatsJSON) *NetworkStats { + if dockerStats.Networks == nil { + return nil + } + networkStats := &NetworkStats{} + for _, netStats := range dockerStats.Networks { + networkStats.RxBytes += netStats.RxBytes + networkStats.RxDropped += netStats.RxDropped + networkStats.RxErrors += netStats.RxErrors + networkStats.RxPackets += netStats.RxPackets + + networkStats.TxBytes += netStats.TxBytes + networkStats.TxDropped += netStats.TxDropped + networkStats.TxErrors += netStats.TxErrors + networkStats.TxPackets += netStats.TxPackets + } + networkStats.RxBytesPerSecond = float32(nan32()) + networkStats.TxBytesPerSecond = float32(nan32()) + return networkStats +} diff --git a/agent/stats/utils_test.go b/agent/stats/utils_test.go new file mode 100644 index 00000000000..e3ed80e9b8f --- /dev/null +++ b/agent/stats/utils_test.go @@ -0,0 +1,84 @@ +//+build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "encoding/json" + "fmt" + "math" + "testing" + + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" +) + +const ( + // below is the sum of each field in each network interface json in unix_test_stats.json + expectedRxBytes = uint64(1096) + expectedRxPackets = uint64(14) + expectedRxDropped = uint64(1) + expectedRxErrors = uint64(0) + expectedTxBytes = uint64(8992) + expectedTxPackets = uint64(123) + expectedTxDropped = uint64(10) + expectedTxErrors = uint64(0) +) + +func TestDockerStatsToContainerStatsMemUsage(t *testing.T) { + jsonStat := fmt.Sprintf(` + { + "cpu_stats":{ + "cpu_usage":{ + "percpu_usage":[%d, %d, %d, %d], + "total_usage":%d + } + }, + "memory_stats":{ + "usage": %d, + "max_usage": %d, + "stats": { + "cache": %d, + "rss": %d + }, + "privateworkingset": %d + } + }`, 1, 2, 3, 4, 100, 30, 100, 20, 10, 10) + dockerStat := &types.StatsJSON{} + json.Unmarshal([]byte(jsonStat), dockerStat) + containerStats, err := dockerStatsToContainerStats(dockerStat) + if err != nil { + t.Errorf("Error converting container stats: %v", err) + } + if containerStats == nil { + t.Fatal("containerStats should not be nil") + } + if containerStats.memoryUsage != 10 { + t.Error("Unexpected value for memoryUsage", containerStats.memoryUsage) + } +} + +func validateNetworkMetrics(t *testing.T, netStats *NetworkStats) { + assert.Equal(t, expectedRxBytes, netStats.RxBytes) + assert.Equal(t, expectedRxPackets, netStats.RxPackets) + assert.Equal(t, expectedRxDropped, netStats.RxDropped) + assert.Equal(t, expectedRxErrors, netStats.RxErrors) + assert.Equal(t, expectedTxBytes, netStats.TxBytes) + assert.Equal(t, expectedTxPackets, netStats.TxPackets) + assert.Equal(t, expectedTxDropped, netStats.TxDropped) + assert.Equal(t, expectedTxErrors, netStats.TxErrors) + assert.True(t, math.IsNaN(float64(netStats.RxBytesPerSecond))) + assert.True(t, math.IsNaN(float64(netStats.TxBytesPerSecond))) +} diff --git a/agent/stats/utils_unix.go b/agent/stats/utils_unix.go new file mode 100644 index 00000000000..db4053c4ddb --- /dev/null +++ b/agent/stats/utils_unix.go @@ -0,0 +1,68 @@ +// +build !windows +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "fmt" + + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" +) + +// dockerStatsToContainerStats returns a new object of the ContainerStats object from docker stats. +func dockerStatsToContainerStats(dockerStats *types.StatsJSON) (*ContainerStats, error) { + cpuUsage := dockerStats.CPUStats.CPUUsage.TotalUsage / numCores + memoryUsage := dockerStats.MemoryStats.Usage - dockerStats.MemoryStats.Stats["cache"] + storageReadBytes, storageWriteBytes := getStorageStats(dockerStats) + networkStats := getNetworkStats(dockerStats) + return &ContainerStats{ + cpuUsage: cpuUsage, + memoryUsage: memoryUsage, + storageReadBytes: storageReadBytes, + storageWriteBytes: storageWriteBytes, + networkStats: networkStats, + timestamp: dockerStats.Read, + }, nil +} + +func validateDockerStats(dockerStats *types.StatsJSON) error { + // The length of PercpuUsage represents the number of cores in an instance. + if len(dockerStats.CPUStats.CPUUsage.PercpuUsage) == 0 || numCores == uint64(0) { + return fmt.Errorf("invalid container statistics reported, no cpu core usage reported") + } + return nil +} + +func getStorageStats(dockerStats *types.StatsJSON) (uint64, uint64) { + // initialize block io and loop over stats to aggregate + if dockerStats.BlkioStats.IoServiceBytesRecursive == nil { + seelog.Debug("Storage stats not reported for container") + return uint64(0), uint64(0) + } + storageReadBytes := uint64(0) + storageWriteBytes := uint64(0) + for _, blockStat := range dockerStats.BlkioStats.IoServiceBytesRecursive { + switch op := blockStat.Op; op { + case "Read": + storageReadBytes += blockStat.Value + case "Write": + storageWriteBytes += blockStat.Value + default: + //ignoring "Async", "Total", "Sum", etc + continue + } + } + return storageReadBytes, storageWriteBytes +} diff --git a/agent/stats/utils_unix_test.go b/agent/stats/utils_unix_test.go new file mode 100644 index 00000000000..d91042c1063 --- /dev/null +++ b/agent/stats/utils_unix_test.go @@ -0,0 +1,59 @@ +// +build !windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package stats + +import ( + "encoding/json" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDockerStatsToContainerStats(t *testing.T) { + // numCores is a global variable in package agent/stats + // which denotes the number of cpu cores + // TODO should we take this from the docker stats `online_cpus`? + numCores = 4 + inputJsonFile, _ := filepath.Abs("./unix_test_stats.json") + jsonBytes, _ := ioutil.ReadFile(inputJsonFile) + dockerStat := &types.StatsJSON{} + json.Unmarshal([]byte(jsonBytes), dockerStat) + containerStats, err := dockerStatsToContainerStats(dockerStat) + assert.NoError(t, err, "converting container stats failed") + require.NotNil(t, containerStats, "containerStats should not be nil") + assert.Equal(t, uint64(65714455379), containerStats.cpuUsage, "unexpected value for cpuUsage", containerStats.cpuUsage) + // storage bytes check + assert.Equal(t, uint64(3), containerStats.storageReadBytes, "unexpected value for storageReadBytes", containerStats.storageReadBytes) + assert.Equal(t, uint64(15), containerStats.storageWriteBytes, "Unexpected value for storageWriteBytes", containerStats.storageWriteBytes) + // network stats check + netStats := containerStats.networkStats + assert.NotNil(t, netStats, "networkStats should not be nil") + validateNetworkMetrics(t, netStats) +} + +func TestDockerStatsToContainerStatsEmptyCpuUsageGeneratesError(t *testing.T) { + inputJsonFile, _ := filepath.Abs("./unix_test_stats.json") + jsonBytes, _ := ioutil.ReadFile(inputJsonFile) + dockerStat := &types.StatsJSON{} + json.Unmarshal([]byte(jsonBytes), dockerStat) + // empty the PercpuUsage array + dockerStat.CPUStats.CPUUsage.PercpuUsage = make([]uint64, 0) + err := validateDockerStats(dockerStat) + assert.Error(t, err, "expected error converting container stats with empty PercpuUsage") +} diff --git a/agent/stats/utils_windows.go b/agent/stats/utils_windows.go new file mode 100644 index 00000000000..8e5c5f69678 --- /dev/null +++ b/agent/stats/utils_windows.go @@ -0,0 +1,45 @@ +// +build windows +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package stats + +import ( + "fmt" + + "github.com/docker/docker/api/types" +) + +// dockerStatsToContainerStats returns a new object of the ContainerStats object from docker stats. +func dockerStatsToContainerStats(dockerStats *types.StatsJSON) (*ContainerStats, error) { + cpuUsage := (dockerStats.CPUStats.CPUUsage.TotalUsage * 100) / numCores + memoryUsage := dockerStats.MemoryStats.PrivateWorkingSet + networkStats := getNetworkStats(dockerStats) + storageReadBytes := dockerStats.StorageStats.ReadSizeBytes + storageWriteBytes := dockerStats.StorageStats.WriteSizeBytes + return &ContainerStats{ + cpuUsage: cpuUsage, + memoryUsage: memoryUsage, + timestamp: dockerStats.Read, + storageReadBytes: storageReadBytes, + storageWriteBytes: storageWriteBytes, + networkStats: networkStats, + }, nil +} + +func validateDockerStats(dockerStats *types.StatsJSON) error { + if numCores == uint64(0) { + return fmt.Errorf("invalid container statistics reported, no cpu core usage reported") + } + return nil +} diff --git a/agent/stats/utils_windows_test.go b/agent/stats/utils_windows_test.go new file mode 100644 index 00000000000..544292096b4 --- /dev/null +++ b/agent/stats/utils_windows_test.go @@ -0,0 +1,65 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package stats + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDockerStatsToContainerStatsZeroCoresGeneratesError(t *testing.T) { + numCores = uint64(0) + // not using windows_test_stats.json here to save file open/read time + jsonStat := fmt.Sprintf(` + { + "cpu_stats":{ + "cpu_usage":{ + "total_usage":%d + } + } + }`, 100) + dockerStat := &types.StatsJSON{} + json.Unmarshal([]byte(jsonStat), dockerStat) + err := validateDockerStats(dockerStat) + assert.Error(t, err, "expected error converting container stats with zero cpu cores") +} + +func TestDockerStatsToContainerStats(t *testing.T) { + numCores = 4 + inputJsonFile, _ := filepath.Abs("./windows_test_stats.json") + jsonBytes, _ := ioutil.ReadFile(inputJsonFile) + dockerStat := &types.StatsJSON{} + json.Unmarshal([]byte(jsonBytes), dockerStat) + containerStats, err := dockerStatsToContainerStats(dockerStat) + assert.NoError(t, err, "converting container stats failed") + require.NotNil(t, containerStats, "containerStats should not be nil") + netStats := containerStats.networkStats + assert.NotNil(t, netStats, "networkStats should not be nil") + validateNetworkMetrics(t, netStats) + assert.Equal(t, uint64(2500), containerStats.cpuUsage, + "unexpected value for cpuUsage", containerStats.cpuUsage) + assert.Equal(t, uint64(3), containerStats.storageReadBytes, + "unexpected value for storageReadBytes", containerStats.storageReadBytes) + assert.Equal(t, uint64(15), containerStats.storageWriteBytes, + "Unexpected value for storageWriteBytes", containerStats.storageWriteBytes) + +} diff --git a/agent/stats/windows_test_stats.json b/agent/stats/windows_test_stats.json new file mode 100644 index 00000000000..5a1ddc7a484 --- /dev/null +++ b/agent/stats/windows_test_stats.json @@ -0,0 +1,64 @@ +{ + "blkio_stats": { + "io_service_bytes_recursive": null, + "io_serviced_recursive": null, + "io_queue_recursive": null, + "io_service_time_recursive": null, + "io_wait_time_recursive": null, + "io_merged_recursive": null, + "io_time_recursive": null, + "sectors_recursive": null + }, + "num_procs": 8, + "storage_stats": { + "read_count_normalized": 1, + "read_size_bytes": 3, + "write_count_normalized": 1, + "write_size_bytes": 15 + }, + "cpu_stats": { + "cpu_usage": { + "total_usage": 100 + }, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "precpu_stats": { + "cpu_usage": { + "total_usage": 0, + "usage_in_kernelmode": 0, + "usage_in_usermode": 0 + }, + "throttling_data": { + "periods": 0, + "throttled_periods": 0, + "throttled_time": 0 + } + }, + "memory_stats": { + "commitbytes": 114061312, + "commitpeakbytes": 139227136, + "privateworkingset": 85532672 + }, + "networks": { + "eth0": { + "rx_bytes": 796, + "rx_packets": 10, + "rx_dropped": 0, + "tx_bytes": 8192, + "tx_packets": 112, + "tx_dropped": 5 + }, + "eth1": { + "rx_bytes": 300, + "rx_packets": 4, + "rx_dropped": 1, + "tx_bytes": 800, + "tx_packets": 11, + "tx_dropped": 5 + } + } +} diff --git a/agent/taskresource/asmauth/asmauth.go b/agent/taskresource/asmauth/asmauth.go new file mode 100644 index 00000000000..3dc52362fc7 --- /dev/null +++ b/agent/taskresource/asmauth/asmauth.go @@ -0,0 +1,430 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asmauth + +import ( + "encoding/json" + "sync" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/asm" + "github.com/aws/amazon-ecs-agent/agent/asm/factory" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + + "github.com/cihub/seelog" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +const ( + // ResourceName is the name of the ASM auth resource + ResourceName = "asm-auth" +) + +// ASMAuthResource represents private registry credentials as a task resource. +// These credentials are stored in AWS Secrets Manager +type ASMAuthResource struct { + taskARN string + createdAt time.Time + desiredStatusUnsafe resourcestatus.ResourceStatus + knownStatusUnsafe resourcestatus.ResourceStatus + // appliedStatus is the status that has been "applied" (e.g., we've called some + // operation such as 'Create' on the resource) but we don't yet know that the + // application was successful, which may then change the known status. This is + // used while progressing resource states in progressTask() of task manager + appliedStatus resourcestatus.ResourceStatus + resourceStatusToTransitionFunction map[resourcestatus.ResourceStatus]func() error + credentialsManager credentials.Manager + executionCredentialsID string + + // required for asm private registry auth + requiredASMResources []*apicontainer.ASMAuthData + dockerAuthData map[string]types.AuthConfig + // asmClientCreator is a factory interface that creates new ASM clients. This is + // needed mostly for testing as we're creating an asm client per every item in + // the requiredASMResources list. Each of these items could be from different + // regions. + // TODO: Refactor this struct so that each ASMAuthData gets associated with + // exactly one ASMAuthResource object + asmClientCreator factory.ClientCreator + + // terminalReason should be set for resource creation failures. This ensures + // the resource object carries some context for why provisoning failed. + terminalReason string + terminalReasonOnce sync.Once + + // lock is used for fields that are accessed and updated concurrently + lock sync.RWMutex +} + +// NewASMAuthResource creates a new ASMAuthResource object +func NewASMAuthResource(taskARN string, + asmRequirements []*apicontainer.ASMAuthData, + executionCredentialsID string, + credentialsManager credentials.Manager, + asmClientCreator factory.ClientCreator) *ASMAuthResource { + + c := &ASMAuthResource{ + taskARN: taskARN, + requiredASMResources: asmRequirements, + credentialsManager: credentialsManager, + executionCredentialsID: executionCredentialsID, + asmClientCreator: asmClientCreator, + } + + c.initStatusToTransition() + return c +} + +func (auth *ASMAuthResource) initStatusToTransition() { + resourceStatusToTransitionFunction := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(ASMAuthStatusCreated): auth.Create, + } + auth.resourceStatusToTransitionFunction = resourceStatusToTransitionFunction +} + +func (auth *ASMAuthResource) setTerminalReason(reason string) { + auth.terminalReasonOnce.Do(func() { + seelog.Infof("ASM Auth: setting terminal reason for asm auth resource in task: [%s]", auth.taskARN) + auth.terminalReason = reason + }) +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (auth *ASMAuthResource) GetTerminalReason() string { + return auth.terminalReason +} + +// SetDesiredStatus safely sets the desired status of the resource +func (auth *ASMAuthResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + auth.lock.Lock() + defer auth.lock.Unlock() + + auth.desiredStatusUnsafe = status +} + +// GetDesiredStatus safely returns the desired status of the task +func (auth *ASMAuthResource) GetDesiredStatus() resourcestatus.ResourceStatus { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.desiredStatusUnsafe +} + +// GetName safely returns the name of the resource +func (auth *ASMAuthResource) GetName() string { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return ResourceName +} + +// DesiredTerminal returns true if the cgroup's desired status is REMOVED +func (auth *ASMAuthResource) DesiredTerminal() bool { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.desiredStatusUnsafe == resourcestatus.ResourceStatus(ASMAuthStatusRemoved) +} + +// KnownCreated returns true if the cgroup's known status is CREATED +func (auth *ASMAuthResource) KnownCreated() bool { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.knownStatusUnsafe == resourcestatus.ResourceStatus(ASMAuthStatusCreated) +} + +// TerminalStatus returns the last transition state of cgroup +func (auth *ASMAuthResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(ASMAuthStatusRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (auth *ASMAuthResource) NextKnownState() resourcestatus.ResourceStatus { + return auth.GetKnownStatus() + 1 +} + +// ApplyTransition calls the function required to move to the specified status +func (auth *ASMAuthResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + transitionFunc, ok := auth.resourceStatusToTransitionFunction[nextState] + if !ok { + return errors.Errorf("resource [%s]: transition to %s impossible", auth.GetName(), + auth.StatusString(nextState)) + } + return transitionFunc() +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (auth *ASMAuthResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(ASMAuthStatusCreated) +} + +// SetKnownStatus safely sets the currently known status of the resource +func (auth *ASMAuthResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + auth.lock.Lock() + defer auth.lock.Unlock() + + auth.knownStatusUnsafe = status + auth.updateAppliedStatusUnsafe(status) +} + +// updateAppliedStatusUnsafe updates the resource transitioning status +func (auth *ASMAuthResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if auth.appliedStatus == resourcestatus.ResourceStatus(ASMAuthStatusNone) { + return + } + + // Check if the resource transition has already finished + if auth.appliedStatus <= knownStatus { + auth.appliedStatus = resourcestatus.ResourceStatus(ASMAuthStatusNone) + } +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (auth *ASMAuthResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + auth.lock.Lock() + defer auth.lock.Unlock() + + if auth.appliedStatus != resourcestatus.ResourceStatus(ASMAuthStatusNone) { + // return false to indicate the set operation failed + return false + } + + auth.appliedStatus = status + return true +} + +// GetKnownStatus safely returns the currently known status of the task +func (auth *ASMAuthResource) GetKnownStatus() resourcestatus.ResourceStatus { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.knownStatusUnsafe +} + +// StatusString returns the string of the cgroup resource status +func (auth *ASMAuthResource) StatusString(status resourcestatus.ResourceStatus) string { + return ASMAuthStatus(status).String() +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (auth *ASMAuthResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + auth.lock.Lock() + defer auth.lock.Unlock() + + auth.createdAt = createdAt +} + +// GetCreatedAt sets the timestamp for resource's creation time +func (auth *ASMAuthResource) GetCreatedAt() time.Time { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.createdAt +} + +// Create fetches credentials from ASM +func (auth *ASMAuthResource) Create() error { + seelog.Infof("ASM Auth: Retrieving credentials for containers in task: [%s]", auth.taskARN) + if auth.dockerAuthData == nil { + auth.dockerAuthData = make(map[string]types.AuthConfig) + } + for _, a := range auth.GetRequiredASMResources() { + err := auth.retrieveASMDockerAuthData(a) + if err != nil { + auth.setTerminalReason(err.Error()) + return err + } + } + + return nil +} + +func (auth *ASMAuthResource) retrieveASMDockerAuthData(asmAuthData *apicontainer.ASMAuthData) error { + secretID := asmAuthData.CredentialsParameter + if _, ok := auth.GetASMDockerAuthConfig(secretID); ok { + // resource for this secretID already retrieved + return nil + } + + executionCredentials, ok := auth.credentialsManager.GetTaskCredentials(auth.GetExecutionCredentialsID()) + if !ok { + // No need to log here. managedTask.applyResourceState already does that + return errors.New("asm resource: unable to find execution role credentials") + } + iamCredentials := executionCredentials.GetIAMRoleCredentials() + asmClient := auth.asmClientCreator.NewASMClient(asmAuthData.Region, iamCredentials) + seelog.Debugf("ASM Auth: Retrieving resource with ID [%s] in task: [%s]", secretID, auth.taskARN) + dac, err := asm.GetDockerAuthFromASM(secretID, asmClient) + if err != nil { + return err + } + + auth.lock.Lock() + defer auth.lock.Unlock() + + // put retrieved dac in dockerAuthMap + auth.dockerAuthData[secretID] = dac + + return nil +} + +// GetRequiredASMResources returns the list of ASMAuthData that has to be +// retrieved from AWS Secrets Manager +func (auth *ASMAuthResource) GetRequiredASMResources() []*apicontainer.ASMAuthData { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.requiredASMResources +} + +// GetExecutionCredentialsID returns the execution role's credential ID +func (auth *ASMAuthResource) GetExecutionCredentialsID() string { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.executionCredentialsID +} + +// Cleanup removes the asm auth resource created for the task +func (auth *ASMAuthResource) Cleanup() error { + auth.clearASMDockerAuthConfig() + return nil +} + +// clearASMDockerAuthConfig cycles through the collection of docker private +// registry auth data and removes them from the task +func (auth *ASMAuthResource) clearASMDockerAuthConfig() { + auth.lock.Lock() + defer auth.lock.Unlock() + + for k := range auth.dockerAuthData { + delete(auth.dockerAuthData, k) + } +} + +// GetASMDockerAuthConfig retrieves the docker private registry auth data from +// the task +func (auth *ASMAuthResource) GetASMDockerAuthConfig(secretID string) (types.AuthConfig, bool) { + auth.lock.RLock() + defer auth.lock.RUnlock() + + d, ok := auth.dockerAuthData[secretID] + return d, ok +} + +func (auth *ASMAuthResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { + auth.initStatusToTransition() + auth.credentialsManager = resourceFields.CredentialsManager + auth.asmClientCreator = resourceFields.ASMClientCreator + if taskKnownStatus < status.TaskPulled && // Containers in the task need to be pulled + taskDesiredStatus <= status.TaskRunning { // and the task is not terminal. + // Reset the ASM resource's known status as None so that the NONE -> CREATED + // transition gets triggered + auth.SetKnownStatus(resourcestatus.ResourceStatusNone) + } +} + +type asmAuthResourceJSON struct { + TaskARN string `json:"taskARN"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + DesiredStatus *ASMAuthStatus `json:"desiredStatus"` + KnownStatus *ASMAuthStatus `json:"knownStatus"` + RequiredASMResources []*apicontainer.ASMAuthData `json:"asmResources"` + ExecutionCredentialsID string `json:"executionCredentialsID"` +} + +// MarshalJSON serialises the ASMAuthResource struct to JSON +func (auth *ASMAuthResource) MarshalJSON() ([]byte, error) { + if auth == nil { + return nil, errors.New("asm-auth resource is nil") + } + createdAt := auth.GetCreatedAt() + return json.Marshal(asmAuthResourceJSON{ + TaskARN: auth.taskARN, + CreatedAt: &createdAt, + DesiredStatus: func() *ASMAuthStatus { + desiredState := auth.GetDesiredStatus() + status := ASMAuthStatus(desiredState) + return &status + }(), + KnownStatus: func() *ASMAuthStatus { + knownState := auth.GetKnownStatus() + status := ASMAuthStatus(knownState) + return &status + }(), + RequiredASMResources: auth.GetRequiredASMResources(), + ExecutionCredentialsID: auth.GetExecutionCredentialsID(), + }) +} + +// UnmarshalJSON deserialises the raw JSON to a ASMAuthResource struct +func (auth *ASMAuthResource) UnmarshalJSON(b []byte) error { + temp := asmAuthResourceJSON{} + + if err := json.Unmarshal(b, &temp); err != nil { + return err + } + + if temp.DesiredStatus != nil { + auth.SetDesiredStatus(resourcestatus.ResourceStatus(*temp.DesiredStatus)) + } + if temp.KnownStatus != nil { + auth.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) + } + if temp.CreatedAt != nil && !temp.CreatedAt.IsZero() { + auth.SetCreatedAt(*temp.CreatedAt) + } + if temp.RequiredASMResources != nil { + auth.requiredASMResources = temp.RequiredASMResources + } + auth.taskARN = temp.TaskARN + auth.executionCredentialsID = temp.ExecutionCredentialsID + + return nil +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (auth *ASMAuthResource) GetAppliedStatus() resourcestatus.ResourceStatus { + auth.lock.RLock() + defer auth.lock.RUnlock() + + return auth.appliedStatus +} + +func (auth *ASMAuthResource) DependOnTaskNetwork() bool { + return false +} + +func (auth *ASMAuthResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (auth *ASMAuthResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/asmauth/asmauth_test.go b/agent/taskresource/asmauth/asmauth_test.go new file mode 100644 index 00000000000..feb05137d0c --- /dev/null +++ b/agent/taskresource/asmauth/asmauth_test.go @@ -0,0 +1,146 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asmauth + +import ( + "encoding/json" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/asm" + mock_factory "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks" + mock_secretsmanageriface "github.com/aws/amazon-ecs-agent/agent/asm/mocks" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + executionCredentialsID = "exec-creds-id" + region = "us-west-2" + secretID = "meaning-of-life" + username = "irene" + password = "sher" + taskARN = "task1" +) + +var ( + asmAuthDataVal string + requiredASMResources []*apicontainer.ASMAuthData +) + +func init() { + asmAuthDataBytes, _ := json.Marshal(&asm.AuthDataValue{ + Username: aws.String(username), + Password: aws.String(password), + }) + asmAuthDataVal = string(asmAuthDataBytes) + requiredASMResources = []*apicontainer.ASMAuthData{ + { + CredentialsParameter: secretID, + Region: region, + }, + } +} + +func TestCreateAndGet(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + asmSecretValue := &secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(asmAuthDataVal), + } + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true), + asmClientCreator.EXPECT().NewASMClient(region, iamRoleCreds).Return(mockASMClient), + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) { + assert.Equal(t, aws.StringValue(in.SecretId), secretID) + }).Return(asmSecretValue, nil), + ) + asmRes := &ASMAuthResource{ + executionCredentialsID: executionCredentialsID, + requiredASMResources: requiredASMResources, + credentialsManager: credentialsManager, + asmClientCreator: asmClientCreator, + } + require.NoError(t, asmRes.Create()) + dac, ok := asmRes.GetASMDockerAuthConfig(secretID) + require.True(t, ok) + assert.Equal(t, dac.Username, username) + assert.Equal(t, dac.Password, password) +} + +func TestMarshalUnmarshalJSON(t *testing.T) { + asmResIn := &ASMAuthResource{ + taskARN: taskARN, + executionCredentialsID: executionCredentialsID, + createdAt: time.Now(), + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredASMResources: requiredASMResources, + } + + bytes, err := json.Marshal(asmResIn) + require.NoError(t, err) + + asmResOut := &ASMAuthResource{} + err = json.Unmarshal(bytes, asmResOut) + require.NoError(t, err) + assert.Equal(t, asmResIn.taskARN, asmResOut.taskARN) + assert.WithinDuration(t, asmResIn.createdAt, asmResOut.createdAt, time.Microsecond) + assert.Equal(t, asmResIn.desiredStatusUnsafe, asmResOut.desiredStatusUnsafe) + assert.Equal(t, asmResIn.knownStatusUnsafe, asmResOut.knownStatusUnsafe) + assert.Equal(t, asmResIn.executionCredentialsID, asmResOut.executionCredentialsID) + assert.Equal(t, len(asmResIn.requiredASMResources), len(asmResOut.requiredASMResources)) + assert.Equal(t, asmResIn.requiredASMResources[0], asmResOut.requiredASMResources[0]) +} + +func TestInitialize(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + asmRes := &ASMAuthResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + } + asmRes.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + ASMClientCreator: asmClientCreator, + CredentialsManager: credentialsManager, + }, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + assert.Equal(t, resourcestatus.ResourceStatusNone, asmRes.GetKnownStatus()) + assert.Equal(t, resourcestatus.ResourceCreated, asmRes.GetDesiredStatus()) + +} diff --git a/agent/taskresource/asmauth/asmauthstatus.go b/agent/taskresource/asmauth/asmauthstatus.go new file mode 100644 index 00000000000..924dbd53db4 --- /dev/null +++ b/agent/taskresource/asmauth/asmauthstatus.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asmauth + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +type ASMAuthStatus resourcestatus.ResourceStatus + +const ( + // is the zero state of a task resource + ASMAuthStatusNone ASMAuthStatus = iota + // represents a task resource which has been created + ASMAuthStatusCreated + // represents a task resource which has been cleaned up + ASMAuthStatusRemoved +) + +var asmAuthStatusMap = map[string]ASMAuthStatus{ + "NONE": ASMAuthStatusNone, + "CREATED": ASMAuthStatusCreated, + "REMOVED": ASMAuthStatusRemoved, +} + +// StatusString returns a human readable string representation of this object +func (as ASMAuthStatus) String() string { + for k, v := range asmAuthStatusMap { + if v == as { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type +func (as *ASMAuthStatus) MarshalJSON() ([]byte, error) { + if as == nil { + return nil, errors.New("asm-auth resource status is nil") + } + return []byte(`"` + as.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data +func (as *ASMAuthStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *as = ASMAuthStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *as = ASMAuthStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := asmAuthStatusMap[string(strStatus)] + if !ok { + *as = ASMAuthStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *as = stat + return nil +} diff --git a/agent/taskresource/asmauth/asmauthstatus_test.go b/agent/taskresource/asmauth/asmauthstatus_test.go new file mode 100644 index 00000000000..15554204643 --- /dev/null +++ b/agent/taskresource/asmauth/asmauthstatus_test.go @@ -0,0 +1,157 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asmauth + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusString(t *testing.T) { + cases := []struct { + Name string + InASMAuthStatus ASMAuthStatus + OutASMAuthStatus string + }{ + { + Name: "ToStringASMAuthStatusNone", + InASMAuthStatus: ASMAuthStatusNone, + OutASMAuthStatus: "NONE", + }, + { + Name: "ToStringASMAuthStatusCreated", + InASMAuthStatus: ASMAuthStatusCreated, + OutASMAuthStatus: "CREATED", + }, + { + Name: "ToStringASMAuthStatusRemoved", + InASMAuthStatus: ASMAuthStatusRemoved, + OutASMAuthStatus: "REMOVED", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + assert.Equal(t, c.OutASMAuthStatus, c.InASMAuthStatus.String()) + }) + } +} + +func TestMarshalNilASMAuthStatus(t *testing.T) { + var status *ASMAuthStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Error(t, err) +} + +func TestMarshalASMAuthStatus(t *testing.T) { + cases := []struct { + Name string + InASMAuthStatus ASMAuthStatus + OutASMAuthStatus string + }{ + { + Name: "MarshallASMAuthStatusNone", + InASMAuthStatus: ASMAuthStatusNone, + OutASMAuthStatus: "\"NONE\"", + }, + { + Name: "MarshallASMAuthStatusCreated", + InASMAuthStatus: ASMAuthStatusCreated, + OutASMAuthStatus: "\"CREATED\"", + }, + { + Name: "MarshallASMAuthStatusRemoved", + InASMAuthStatus: ASMAuthStatusRemoved, + OutASMAuthStatus: "\"REMOVED\"", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + bytes, err := c.InASMAuthStatus.MarshalJSON() + + assert.NoError(t, err) + assert.Equal(t, c.OutASMAuthStatus, string(bytes[:])) + }) + } + +} + +func TestUnmarshalASMAuthStatus(t *testing.T) { + cases := []struct { + Name string + InASMAuthStatus string + OutASMAuthStatus ASMAuthStatus + ShouldError bool + }{ + { + Name: "UnmarshallASMAuthStatusNone", + InASMAuthStatus: "\"NONE\"", + OutASMAuthStatus: ASMAuthStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallASMAuthStatusCreated", + InASMAuthStatus: "\"CREATED\"", + OutASMAuthStatus: ASMAuthStatusCreated, + ShouldError: false, + }, + { + Name: "UnmarshallASMAuthStatusRemoved", + InASMAuthStatus: "\"REMOVED\"", + OutASMAuthStatus: ASMAuthStatusRemoved, + ShouldError: false, + }, + { + Name: "UnmarshallASMAuthStatusNull", + InASMAuthStatus: "null", + OutASMAuthStatus: ASMAuthStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallASMAuthStatusNonString", + InASMAuthStatus: "1", + OutASMAuthStatus: ASMAuthStatusNone, + ShouldError: true, + }, + { + Name: "UnmarshallASMAuthStatusUnmappedStatus", + InASMAuthStatus: "\"LOL\"", + OutASMAuthStatus: ASMAuthStatusNone, + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + + var status ASMAuthStatus + err := json.Unmarshal([]byte(c.InASMAuthStatus), &status) + + if c.ShouldError { + assert.Error(t, err) + } else { + + assert.NoError(t, err) + assert.Equal(t, c.OutASMAuthStatus, status) + } + }) + } +} diff --git a/agent/taskresource/asmsecret/asmsecret.go b/agent/taskresource/asmsecret/asmsecret.go new file mode 100644 index 00000000000..8ace7eb21c1 --- /dev/null +++ b/agent/taskresource/asmsecret/asmsecret.go @@ -0,0 +1,536 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asmsecret + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/cihub/seelog" + "github.com/pkg/errors" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/asm" + "github.com/aws/amazon-ecs-agent/agent/asm/factory" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/secretsmanager" +) + +const ( + // ResourceName is the name of the asmsecret resource + ResourceName = "asmsecret" + arnDelimiter = ":" + asmARNResourceFormat = "secret:{secretID}" + asmARNResourceWithParametersFormat = "secret:secretID:jsonKey:versionStage:versionID" +) + +// ASMSecretResource represents secrets as a task resource. +// The secrets are stored in AWS Secrets Manager. +type ASMSecretResource struct { + taskARN string + createdAt time.Time + desiredStatusUnsafe resourcestatus.ResourceStatus + knownStatusUnsafe resourcestatus.ResourceStatus + // appliedStatus is the status that has been "applied" (e.g., we've called some + // operation such as 'Create' on the resource) but we don't yet know that the + // application was successful, which may then change the known status. This is + // used while progressing resource states in progressTask() of task manager + appliedStatus resourcestatus.ResourceStatus + resourceStatusToTransitionFunction map[resourcestatus.ResourceStatus]func() error + credentialsManager credentials.Manager + executionCredentialsID string + + // map to store all asm deduped secrets in the task, key is a combination of valueFrom and region + requiredSecrets map[string]apicontainer.Secret + // map to store secret values, key is a combination of valueFrom and region + secretData map[string]string + + // ssmClientCreator is a factory interface that creates new SSM clients. This is + // needed mostly for testing. + asmClientCreator factory.ClientCreator + + // terminalReason should be set for resource creation failures. This ensures + // the resource object carries some context for why provisioning failed. + terminalReason string + terminalReasonOnce sync.Once + + // lock is used for fields that are accessed and updated concurrently + lock sync.RWMutex +} + +// NewASMSecretResource creates a new ASMSecretResource object +func NewASMSecretResource(taskARN string, + asmSecrets map[string]apicontainer.Secret, + executionCredentialsID string, + credentialsManager credentials.Manager, + asmClientCreator factory.ClientCreator) *ASMSecretResource { + + s := &ASMSecretResource{ + taskARN: taskARN, + requiredSecrets: asmSecrets, + credentialsManager: credentialsManager, + executionCredentialsID: executionCredentialsID, + asmClientCreator: asmClientCreator, + } + + s.initStatusToTransition() + return s +} + +func (secret *ASMSecretResource) initStatusToTransition() { + resourceStatusToTransitionFunction := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(ASMSecretCreated): secret.Create, + } + secret.resourceStatusToTransitionFunction = resourceStatusToTransitionFunction +} + +func (secret *ASMSecretResource) setTerminalReason(reason string) { + secret.terminalReasonOnce.Do(func() { + seelog.Infof("ASM secret resource: setting terminal reason for asm secret resource in task: [%s]", secret.taskARN) + secret.terminalReason = reason + }) +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (secret *ASMSecretResource) GetTerminalReason() string { + return secret.terminalReason +} + +// SetDesiredStatus safely sets the desired status of the resource +func (secret *ASMSecretResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + secret.lock.Lock() + defer secret.lock.Unlock() + + secret.desiredStatusUnsafe = status +} + +// GetDesiredStatus safely returns the desired status of the task +func (secret *ASMSecretResource) GetDesiredStatus() resourcestatus.ResourceStatus { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.desiredStatusUnsafe +} + +// GetName safely returns the name of the resource +func (secret *ASMSecretResource) GetName() string { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return ResourceName +} + +// DesiredTerminal returns true if the secret's desired status is REMOVED +func (secret *ASMSecretResource) DesiredTerminal() bool { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.desiredStatusUnsafe == resourcestatus.ResourceStatus(ASMSecretRemoved) +} + +// KnownCreated returns true if the secret's known status is CREATED +func (secret *ASMSecretResource) KnownCreated() bool { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.knownStatusUnsafe == resourcestatus.ResourceStatus(ASMSecretCreated) +} + +// TerminalStatus returns the last transition state of asmsecret +func (secret *ASMSecretResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(ASMSecretRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (secret *ASMSecretResource) NextKnownState() resourcestatus.ResourceStatus { + return secret.GetKnownStatus() + 1 +} + +// ApplyTransition calls the function required to move to the specified status +func (secret *ASMSecretResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + transitionFunc, ok := secret.resourceStatusToTransitionFunction[nextState] + if !ok { + return errors.Errorf("resource [%s]: transition to %s impossible", secret.GetName(), + secret.StatusString(nextState)) + } + return transitionFunc() +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (secret *ASMSecretResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(ASMSecretCreated) +} + +// SetKnownStatus safely sets the currently known status of the resource +func (secret *ASMSecretResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + secret.lock.Lock() + defer secret.lock.Unlock() + + secret.knownStatusUnsafe = status + secret.updateAppliedStatusUnsafe(status) +} + +// updateAppliedStatusUnsafe updates the resource transitioning status +func (secret *ASMSecretResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if secret.appliedStatus == resourcestatus.ResourceStatus(ASMSecretStatusNone) { + return + } + + // Check if the resource transition has already finished + if secret.appliedStatus <= knownStatus { + secret.appliedStatus = resourcestatus.ResourceStatus(ASMSecretStatusNone) + } +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (secret *ASMSecretResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + secret.lock.Lock() + defer secret.lock.Unlock() + + if secret.appliedStatus != resourcestatus.ResourceStatus(ASMSecretStatusNone) { + // return false to indicate the set operation failed + return false + } + + secret.appliedStatus = status + return true +} + +// GetKnownStatus safely returns the currently known status of the task +func (secret *ASMSecretResource) GetKnownStatus() resourcestatus.ResourceStatus { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.knownStatusUnsafe +} + +// StatusString returns the string of the cgroup resource status +func (secret *ASMSecretResource) StatusString(status resourcestatus.ResourceStatus) string { + return ASMSecretStatus(status).String() +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (secret *ASMSecretResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + secret.lock.Lock() + defer secret.lock.Unlock() + + secret.createdAt = createdAt +} + +// GetCreatedAt sets the timestamp for resource's creation time +func (secret *ASMSecretResource) GetCreatedAt() time.Time { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.createdAt +} + +// It spins up multiple goroutines in order to retrieve values in parallel. +func (secret *ASMSecretResource) Create() error { + + // To fail fast, check execution role first + executionCredentials, ok := secret.credentialsManager.GetTaskCredentials(secret.getExecutionCredentialsID()) + if !ok { + // No need to log here. managedTask.applyResourceState already does that + err := errors.New("ASM secret resource: unable to find execution role credentials") + secret.setTerminalReason(err.Error()) + return err + } + iamCredentials := executionCredentials.GetIAMRoleCredentials() + + var wg sync.WaitGroup + + // Get the maximum number of errors to be returned, which will be one error per goroutine + errorEvents := make(chan error, len(secret.requiredSecrets)) + + seelog.Infof("ASM secret resource: retrieving secrets for containers in task: [%s]", secret.taskARN) + secret.secretData = make(map[string]string) + + for _, asmsecret := range secret.getRequiredSecrets() { + wg.Add(1) + // Spin up goroutine per secret to speed up processing time + go secret.retrieveASMSecretValue(asmsecret, iamCredentials, &wg, errorEvents) + } + + wg.Wait() + close(errorEvents) + + if len(errorEvents) > 0 { + var terminalReasons []string + for err := range errorEvents { + terminalReasons = append(terminalReasons, err.Error()) + } + + errorString := strings.Join(terminalReasons, ";") + secret.setTerminalReason(errorString) + return errors.New(errorString) + } + return nil +} + +// retrieveASMSecretValue reads secret value from cache first, if not exists, call GetSecretFromASM to retrieve value +// AWS secrets Manager +func (secret *ASMSecretResource) retrieveASMSecretValue(apiSecret apicontainer.Secret, iamCredentials credentials.IAMRoleCredentials, wg *sync.WaitGroup, errorEvents chan error) { + defer wg.Done() + + asmClient := secret.asmClientCreator.NewASMClient(apiSecret.Region, iamCredentials) + seelog.Infof("ASM secret resource: retrieving resource for secret %v in region %s for task: [%s]", apiSecret.ValueFrom, apiSecret.Region, secret.taskARN) + input, jsonKey, err := getASMParametersFromInput(apiSecret.ValueFrom) + if err != nil { + errorEvents <- fmt.Errorf("trying to retrieve secret with value %s resulted in error: %v", apiSecret.ValueFrom, err) + return + } + + if input.SecretId == nil { + errorEvents <- fmt.Errorf("could not find a secretsmanager secretID from value %s", apiSecret.ValueFrom) + return + + } + + secretValue, err := asm.GetSecretFromASMWithInput(input, asmClient, jsonKey) + if err != nil { + errorEvents <- fmt.Errorf("fetching secret data from AWS Secrets Manager in region %s: %v", apiSecret.Region, err) + return + } + + secret.lock.Lock() + defer secret.lock.Unlock() + + // put secret value in secretData + secretKey := apiSecret.GetSecretResourceCacheKey() + secret.secretData[secretKey] = secretValue +} + +func pointerOrNil(in string) *string { + if in == "" { + return nil + } + + return aws.String(in) +} + +// Agent follows what Cloudformation does here with using Dynamic References to specify Template Values +// in the format secret-id:json-key:version-stage:version-id +// the input will always be a full ARN for ASM +func getASMParametersFromInput(valueFrom string) (input *secretsmanager.GetSecretValueInput, jsonKey string, err error) { + arnObj, err := arn.Parse(valueFrom) + if err != nil { + seelog.Warnf("Unable to parse ARN %s when trying to retrieve ASM secret", valueFrom) + return nil, "", err + } + + input = &secretsmanager.GetSecretValueInput{} + + paramValues := strings.Split(arnObj.Resource, arnDelimiter) // arnObj.Resource looks like secret:secretID:... + if len(paramValues) == len(strings.Split(asmARNResourceFormat, arnDelimiter)) { + input.SecretId = &valueFrom + return input, "", nil + } + if len(paramValues) != len(strings.Split(asmARNResourceWithParametersFormat, arnDelimiter)) { + // can't tell what input this is, throw some error + err = errors.New("an invalid ARN format for the AWS Secrets Manager secret was specified. Specify a valid ARN and try again.") + return nil, "", err + } + + input.SecretId = pointerOrNil(reconstructASMARN(arnObj)) + jsonKey = paramValues[2] + input.VersionStage = pointerOrNil(paramValues[3]) + input.VersionId = pointerOrNil(paramValues[4]) + + return input, jsonKey, nil +} + +// this method is to reconstruct an ASM ARN that has the enhancement parameters +// attached to it. in order to call secretsmanager:GetSecretValue, the entire ARN +// (including the 6 character special identifier tacked on by ASM) is required or +// just the secret name itself is required. +func reconstructASMARN(arnARN arn.ARN) string { + // arn resource should look like secret:secretID:jsonKey:versionStage:versionID + secretIDAndParams := strings.Split(arnARN.Resource, arnDelimiter) + // reconstruct the secret id without the parameters + secretID := fmt.Sprintf("%s%s%s", secretIDAndParams[0], arnDelimiter, secretIDAndParams[1]) + secretIDARN := arn.ARN{ + Partition: arnARN.Partition, + Service: arnARN.Service, + Region: arnARN.Region, + AccountID: arnARN.AccountID, + Resource: secretID, + }.String() + + return secretIDARN +} + +// getRequiredSecrets returns the requiredSecrets field of asmsecret task resource +func (secret *ASMSecretResource) getRequiredSecrets() map[string]apicontainer.Secret { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.requiredSecrets +} + +// getExecutionCredentialsID returns the execution role's credential ID +func (secret *ASMSecretResource) getExecutionCredentialsID() string { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.executionCredentialsID +} + +// Cleanup removes the secret value created for the task +func (secret *ASMSecretResource) Cleanup() error { + secret.clearASMSecretValue() + return nil +} + +// clearASMSecretValue cycles through the collection of secret value data and +// removes them from the task +func (secret *ASMSecretResource) clearASMSecretValue() { + secret.lock.Lock() + defer secret.lock.Unlock() + + for key := range secret.secretData { + delete(secret.secretData, key) + } +} + +// GetCachedSecretValue retrieves the secret value from secretData field +func (secret *ASMSecretResource) GetCachedSecretValue(secretKey string) (string, bool) { + secret.lock.RLock() + defer secret.lock.RUnlock() + + s, ok := secret.secretData[secretKey] + return s, ok +} + +// SetCachedSecretValue set the secret value in the secretData field given the key and value +func (secret *ASMSecretResource) SetCachedSecretValue(secretKey string, secretValue string) { + secret.lock.Lock() + defer secret.lock.Unlock() + + if secret.secretData == nil { + secret.secretData = make(map[string]string) + } + + secret.secretData[secretKey] = secretValue +} + +func (secret *ASMSecretResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { + secret.initStatusToTransition() + secret.credentialsManager = resourceFields.CredentialsManager + secret.asmClientCreator = resourceFields.ASMClientCreator + + // if task hasn't turn to 'created' status, and it's desire status is 'running' + // the resource status needs to be reset to 'NONE' status so the secret value + // will be retrieved again + if taskKnownStatus < status.TaskCreated && + taskDesiredStatus <= status.TaskRunning { + secret.SetKnownStatus(resourcestatus.ResourceStatusNone) + } +} + +type ASMSecretResourceJSON struct { + TaskARN string `json:"taskARN"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + DesiredStatus *ASMSecretStatus `json:"desiredStatus"` + KnownStatus *ASMSecretStatus `json:"knownStatus"` + RequiredSecrets map[string]apicontainer.Secret `json:"secretResources"` + ExecutionCredentialsID string `json:"executionCredentialsID"` +} + +// MarshalJSON serialises the ASMSecretResource struct to JSON +func (secret *ASMSecretResource) MarshalJSON() ([]byte, error) { + if secret == nil { + return nil, errors.New("asmsecret resource is nil") + } + createdAt := secret.GetCreatedAt() + return json.Marshal(ASMSecretResourceJSON{ + TaskARN: secret.taskARN, + CreatedAt: &createdAt, + DesiredStatus: func() *ASMSecretStatus { + desiredState := secret.GetDesiredStatus() + s := ASMSecretStatus(desiredState) + return &s + }(), + KnownStatus: func() *ASMSecretStatus { + knownState := secret.GetKnownStatus() + s := ASMSecretStatus(knownState) + return &s + }(), + RequiredSecrets: secret.getRequiredSecrets(), + ExecutionCredentialsID: secret.getExecutionCredentialsID(), + }) +} + +// UnmarshalJSON deserialises the raw JSON to a ASMSecretResource struct +func (secret *ASMSecretResource) UnmarshalJSON(b []byte) error { + temp := ASMSecretResourceJSON{} + + if err := json.Unmarshal(b, &temp); err != nil { + return err + } + + if temp.DesiredStatus != nil { + secret.SetDesiredStatus(resourcestatus.ResourceStatus(*temp.DesiredStatus)) + } + if temp.KnownStatus != nil { + secret.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) + } + if temp.CreatedAt != nil && !temp.CreatedAt.IsZero() { + secret.SetCreatedAt(*temp.CreatedAt) + } + if temp.RequiredSecrets != nil { + secret.requiredSecrets = temp.RequiredSecrets + } + secret.taskARN = temp.TaskARN + secret.executionCredentialsID = temp.ExecutionCredentialsID + + return nil +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (secret *ASMSecretResource) GetAppliedStatus() resourcestatus.ResourceStatus { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.appliedStatus +} + +func (secret *ASMSecretResource) DependOnTaskNetwork() bool { + return false +} + +func (secret *ASMSecretResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (secret *ASMSecretResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/asmsecret/asmsecret_test.go b/agent/taskresource/asmsecret/asmsecret_test.go new file mode 100644 index 00000000000..29e47fcbe9a --- /dev/null +++ b/agent/taskresource/asmsecret/asmsecret_test.go @@ -0,0 +1,331 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asmsecret + +import ( + "encoding/json" + "errors" + "fmt" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + mock_factory "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks" + mock_secretsmanageriface "github.com/aws/amazon-ecs-agent/agent/asm/mocks" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + executionCredentialsID = "exec-creds-id" + region1 = "us-west-2" + region2 = "us-east-1" + secretName1 = "db_username_1" + secretName2 = "db_username_2" + secretID = "secretID" + valueFrom1 = "arn:aws:secretsmanager:region:account-id:secret:" + secretID + regionKeyWest = "us-west-2" + regionKeyEast = "us-east-1" + secretCacheJoinChar = "_" + secretValue = "secret-value" + taskARN = "task1" + valueFromParams = valueFrom1 + ":json-key:version-stage:version-id" + valueFromWrongFormat = valueFrom1 + ":wrong:format" + secretValueJson = "{\"json-key\": \"" + secretValue + "\",\"some-other-key\": \"secret2\"}" +) + +var secretKeyWest1 = fmt.Sprintf("%s%s%s", valueFrom1, secretCacheJoinChar, regionKeyWest) +var secretKeyEast1 = fmt.Sprintf("%s%s%s", valueFrom1, secretCacheJoinChar, regionKeyEast) +var secretKeyParams = fmt.Sprintf("%s%s%s", valueFromParams, secretCacheJoinChar, regionKeyWest) + +func TestCreateWithMultipleASMCall(t *testing.T) { + requiredSecretData := map[string]apicontainer.Secret{ + secretKeyWest1: sampleSecret(secretName1, valueFrom1, region1), + secretKeyEast1: sampleSecret(secretName2, valueFrom1, region2), + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + asmSecretValue := &secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(secretValue), + } + + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + asmClientCreator.EXPECT().NewASMClient(region1, iamRoleCreds).Return(mockASMClient) + asmClientCreator.EXPECT().NewASMClient(region2, iamRoleCreds).Return(mockASMClient) + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) { + assert.Equal(t, valueFrom1, aws.StringValue(in.SecretId)) + }).Return(asmSecretValue, nil).Times(2) + + asmRes := &ASMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + asmClientCreator: asmClientCreator, + } + require.NoError(t, asmRes.Create()) + + value1, ok := asmRes.GetCachedSecretValue(secretKeyWest1) + require.True(t, ok) + assert.Equal(t, secretValue, value1) + + value2, ok := asmRes.GetCachedSecretValue(secretKeyEast1) + require.True(t, ok) + assert.Equal(t, secretValue, value2) +} + +func TestCreateReturnMultipleErrors(t *testing.T) { + + requiredSecretData := map[string]apicontainer.Secret{ + secretKeyWest1: sampleSecret(secretName1, valueFrom1, region1), + secretKeyEast1: sampleSecret(secretName2, valueFrom1, region2), + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + asmSecretValue := &secretsmanager.GetSecretValueOutput{} + + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + asmClientCreator.EXPECT().NewASMClient(region1, iamRoleCreds).Return(mockASMClient) + asmClientCreator.EXPECT().NewASMClient(region2, iamRoleCreds).Return(mockASMClient) + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) { + assert.Equal(t, valueFrom1, aws.StringValue(in.SecretId)) + }).Return(asmSecretValue, errors.New("error response")).Times(2) + + asmRes := &ASMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + asmClientCreator: asmClientCreator, + } + + assert.Error(t, asmRes.Create()) + expectedError1 := fmt.Sprintf("fetching secret data from AWS Secrets Manager in region %s: secret %s: error response", region1, valueFrom1) + expectedError2 := fmt.Sprintf("fetching secret data from AWS Secrets Manager in region %s: secret %s: error response", region2, valueFrom1) + assert.Contains(t, asmRes.GetTerminalReason(), expectedError1) + assert.Contains(t, asmRes.GetTerminalReason(), expectedError2) +} + +func TestCreateReturnError(t *testing.T) { + requiredSecretData := map[string]apicontainer.Secret{ + secretKeyWest1: sampleSecret(secretName1, valueFrom1, region1), + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + asmSecretValue := &secretsmanager.GetSecretValueOutput{} + + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true), + asmClientCreator.EXPECT().NewASMClient(region1, iamRoleCreds).Return(mockASMClient), + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) { + assert.Equal(t, valueFrom1, aws.StringValue(in.SecretId)) + }).Return(asmSecretValue, errors.New("error response")), + ) + asmRes := &ASMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + asmClientCreator: asmClientCreator, + } + + assert.Error(t, asmRes.Create()) + expectedError := fmt.Sprintf("fetching secret data from AWS Secrets Manager in region %s: secret %s: error response", region1, valueFrom1) + assert.Equal(t, expectedError, asmRes.GetTerminalReason()) +} + +func TestMarshalUnmarshalJSON(t *testing.T) { + requiredSecretData := map[string]apicontainer.Secret{ + secretKeyWest1: sampleSecret(secretName1, valueFrom1, region1), + } + + asmResIn := &ASMSecretResource{ + taskARN: taskARN, + executionCredentialsID: executionCredentialsID, + createdAt: time.Now(), + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredSecrets: requiredSecretData, + } + + bytes, err := json.Marshal(asmResIn) + require.NoError(t, err) + + asmResOut := &ASMSecretResource{} + err = json.Unmarshal(bytes, asmResOut) + require.NoError(t, err) + assert.Equal(t, asmResIn.taskARN, asmResOut.taskARN) + assert.WithinDuration(t, asmResIn.createdAt, asmResOut.createdAt, time.Microsecond) + assert.Equal(t, asmResIn.desiredStatusUnsafe, asmResOut.desiredStatusUnsafe) + assert.Equal(t, asmResIn.knownStatusUnsafe, asmResOut.knownStatusUnsafe) + assert.Equal(t, asmResIn.executionCredentialsID, asmResOut.executionCredentialsID) + assert.Equal(t, len(asmResIn.requiredSecrets), len(asmResOut.requiredSecrets)) + assert.Equal(t, asmResIn.requiredSecrets[secretKeyWest1], asmResOut.requiredSecrets[secretKeyWest1]) +} + +func TestInitialize(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + asmRes := &ASMSecretResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + } + asmRes.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + ASMClientCreator: asmClientCreator, + CredentialsManager: credentialsManager, + }, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + assert.Equal(t, resourcestatus.ResourceStatusNone, asmRes.GetKnownStatus()) + assert.Equal(t, resourcestatus.ResourceCreated, asmRes.GetDesiredStatus()) + +} + +func TestClearASMSecretValue(t *testing.T) { + secretValues := map[string]string{ + "db_name": "db_value", + "secret_name": "secret_value", + } + + asmRes := &ASMSecretResource{ + secretData: secretValues, + } + asmRes.clearASMSecretValue() + assert.Equal(t, 0, len(asmRes.secretData)) +} + +func TestCreateWithASMParametersWrongFormat(t *testing.T) { + requiredSecretData := map[string]apicontainer.Secret{ + secretKeyWest1: sampleSecret(secretName1, valueFromWrongFormat, region1), + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + asmClientCreator.EXPECT().NewASMClient(region1, iamRoleCreds).Return(mockASMClient) + + asmRes := &ASMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + asmClientCreator: asmClientCreator, + } + + assert.Error(t, asmRes.Create()) + expectedError := fmt.Sprintf("trying to retrieve secret with value %s resulted in error: "+ + "an invalid ARN format for the AWS Secrets Manager secret was specified. Specify a valid ARN and try again.", valueFromWrongFormat) + assert.Contains(t, asmRes.GetTerminalReason(), expectedError) +} + +func TestCreateWithASMParametersJSONKeySpecified(t *testing.T) { + requiredSecretData := map[string]apicontainer.Secret{ + secretKeyParams: sampleSecret(secretName1, valueFromParams, region1), + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + asmClientCreator := mock_factory.NewMockClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + asmSecretValue := &secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(secretValueJson), + } + + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + asmClientCreator.EXPECT().NewASMClient(region1, iamRoleCreds).Return(mockASMClient) + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) { + assert.Equal(t, valueFrom1, aws.StringValue(in.SecretId)) + }).Return(asmSecretValue, nil) + + asmRes := &ASMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + asmClientCreator: asmClientCreator, + } + require.NoError(t, asmRes.Create()) + + value, ok := asmRes.GetCachedSecretValue(secretKeyParams) + require.True(t, ok) + assert.Equal(t, secretValue, value) +} + +func sampleSecret(secretName string, valueFrom string, region string) apicontainer.Secret { + return apicontainer.Secret{ + Name: secretName, + ValueFrom: valueFrom, + Region: region, + Provider: "asm", + } +} diff --git a/agent/taskresource/asmsecret/asmsecretstatus.go b/agent/taskresource/asmsecret/asmsecretstatus.go new file mode 100644 index 00000000000..384ce5f272a --- /dev/null +++ b/agent/taskresource/asmsecret/asmsecretstatus.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asmsecret + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +type ASMSecretStatus resourcestatus.ResourceStatus + +const ( + // is the zero state of a task resource + ASMSecretStatusNone ASMSecretStatus = iota + // represents a task resource which has been created + ASMSecretCreated + // represents a task resource which has been cleaned up + ASMSecretRemoved +) + +var asmSecretStatusMap = map[string]ASMSecretStatus{ + "NONE": ASMSecretStatusNone, + "CREATED": ASMSecretCreated, + "REMOVED": ASMSecretRemoved, +} + +// StatusString returns a human readable string representation of this object +func (as ASMSecretStatus) String() string { + for k, v := range asmSecretStatusMap { + if v == as { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type +func (as *ASMSecretStatus) MarshalJSON() ([]byte, error) { + if as == nil { + return nil, errors.New("asmsecret resource status is nil") + } + return []byte(`"` + as.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data +func (as *ASMSecretStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *as = ASMSecretStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *as = ASMSecretStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := asmSecretStatusMap[string(strStatus)] + if !ok { + *as = ASMSecretStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *as = stat + return nil +} diff --git a/agent/taskresource/asmsecret/asmsecretstatus_test.go b/agent/taskresource/asmsecret/asmsecretstatus_test.go new file mode 100644 index 00000000000..920e3598d15 --- /dev/null +++ b/agent/taskresource/asmsecret/asmsecretstatus_test.go @@ -0,0 +1,157 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package asmsecret + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusString(t *testing.T) { + cases := []struct { + Name string + InASMSecretStatus ASMSecretStatus + OutASMSecretStatus string + }{ + { + Name: "ToStringASMSecretStatusNone", + InASMSecretStatus: ASMSecretStatusNone, + OutASMSecretStatus: "NONE", + }, + { + Name: "ToStringASMSecretCreated", + InASMSecretStatus: ASMSecretCreated, + OutASMSecretStatus: "CREATED", + }, + { + Name: "ToStringASMSecretRemoved", + InASMSecretStatus: ASMSecretRemoved, + OutASMSecretStatus: "REMOVED", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + assert.Equal(t, c.OutASMSecretStatus, c.InASMSecretStatus.String()) + }) + } +} + +func TestMarshalNilASMSecretStatus(t *testing.T) { + var status *ASMSecretStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Error(t, err) +} + +func TestMarshalASMSecretStatus(t *testing.T) { + cases := []struct { + Name string + InASMSecretStatus ASMSecretStatus + OutASMSecretStatus string + }{ + { + Name: "MarshallASMSecretStatusNone", + InASMSecretStatus: ASMSecretStatusNone, + OutASMSecretStatus: "\"NONE\"", + }, + { + Name: "MarshallASMSecretCreated", + InASMSecretStatus: ASMSecretCreated, + OutASMSecretStatus: "\"CREATED\"", + }, + { + Name: "MarshallASMSecretRemoved", + InASMSecretStatus: ASMSecretRemoved, + OutASMSecretStatus: "\"REMOVED\"", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + bytes, err := c.InASMSecretStatus.MarshalJSON() + + assert.NoError(t, err) + assert.Equal(t, c.OutASMSecretStatus, string(bytes[:])) + }) + } + +} + +func TestUnmarshalASMSecretStatus(t *testing.T) { + cases := []struct { + Name string + InASMSecretStatus string + OutASMSecretStatus ASMSecretStatus + ShouldError bool + }{ + { + Name: "UnmarshallASMSecretStatusNone", + InASMSecretStatus: "\"NONE\"", + OutASMSecretStatus: ASMSecretStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallASMSecretCreated", + InASMSecretStatus: "\"CREATED\"", + OutASMSecretStatus: ASMSecretCreated, + ShouldError: false, + }, + { + Name: "UnmarshallASMSecretRemoved", + InASMSecretStatus: "\"REMOVED\"", + OutASMSecretStatus: ASMSecretRemoved, + ShouldError: false, + }, + { + Name: "UnmarshallASMSecretStatusNull", + InASMSecretStatus: "null", + OutASMSecretStatus: ASMSecretStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallASMSecretStatusNonString", + InASMSecretStatus: "1", + OutASMSecretStatus: ASMSecretStatusNone, + ShouldError: true, + }, + { + Name: "UnmarshallASMSecretStatusUnmappedStatus", + InASMSecretStatus: "\"LOL\"", + OutASMSecretStatus: ASMSecretStatusNone, + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + + var status ASMSecretStatus + err := json.Unmarshal([]byte(c.InASMSecretStatus), &status) + + if c.ShouldError { + assert.Error(t, err) + } else { + + assert.NoError(t, err) + assert.Equal(t, c.OutASMSecretStatus, status) + } + }) + } +} diff --git a/agent/taskresource/cgroup/cgroup.go b/agent/taskresource/cgroup/cgroup.go new file mode 100644 index 00000000000..74ca49f75f4 --- /dev/null +++ b/agent/taskresource/cgroup/cgroup.go @@ -0,0 +1,396 @@ +// +build linux +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cgroup + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + control "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" + "github.com/cihub/seelog" + "github.com/containerd/cgroups" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + memorySubsystem = "/memory" + memoryUseHierarchy = "memory.use_hierarchy" + rootReadOnlyPermissions = os.FileMode(400) + resourceName = "cgroup" + resourceProvisioningError = "CgroupError: Agent could not create task's platform resources" +) + +var ( + enableMemoryHierarchy = []byte(strconv.Itoa(1)) +) + +// CgroupResource represents Cgroup resource +type CgroupResource struct { + taskARN string + control control.Control + cgroupRoot string + cgroupMountPath string + resourceSpec specs.LinuxResources + ioutil ioutilwrapper.IOUtil + createdAt time.Time + desiredStatusUnsafe resourcestatus.ResourceStatus + knownStatusUnsafe resourcestatus.ResourceStatus + // appliedStatus is the status that has been "applied" (e.g., we've called some + // operation such as 'Create' on the resource) but we don't yet know that the + // application was successful, which may then change the known status. This is + // used while progressing resource states in progressTask() of task manager + appliedStatus resourcestatus.ResourceStatus + statusToTransitions map[resourcestatus.ResourceStatus]func() error + // lock is used for fields that are accessed and updated concurrently + lock sync.RWMutex +} + +// NewCgroupResource is used to return an object that implements the Resource interface +func NewCgroupResource(taskARN string, + control control.Control, + ioutil ioutilwrapper.IOUtil, + cgroupRoot string, + cgroupMountPath string, + resourceSpec specs.LinuxResources) *CgroupResource { + c := &CgroupResource{ + taskARN: taskARN, + control: control, + ioutil: ioutil, + cgroupRoot: cgroupRoot, + cgroupMountPath: cgroupMountPath, + resourceSpec: resourceSpec, + } + c.initializeResourceStatusToTransitionFunction() + return c +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (cgroup *CgroupResource) GetTerminalReason() string { + // for cgroups we can send up a static string because this is an + // implementation detail and unrelated to customer resources + return resourceProvisioningError +} + +func (cgroup *CgroupResource) initializeResourceStatusToTransitionFunction() { + resourceStatusToTransitionFunction := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(CgroupCreated): cgroup.Create, + } + cgroup.statusToTransitions = resourceStatusToTransitionFunction +} + +func (cgroup *CgroupResource) SetIOUtil(ioutil ioutilwrapper.IOUtil) { + cgroup.ioutil = ioutil +} + +// SetDesiredStatus safely sets the desired status of the resource +func (cgroup *CgroupResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + cgroup.lock.Lock() + defer cgroup.lock.Unlock() + + cgroup.desiredStatusUnsafe = status +} + +// GetDesiredStatus safely returns the desired status of the task +func (cgroup *CgroupResource) GetDesiredStatus() resourcestatus.ResourceStatus { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + + return cgroup.desiredStatusUnsafe +} + +// GetName safely returns the name of the resource +func (cgroup *CgroupResource) GetName() string { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + + return resourceName +} + +// DesiredTerminal returns true if the cgroup's desired status is REMOVED +func (cgroup *CgroupResource) DesiredTerminal() bool { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + + return cgroup.desiredStatusUnsafe == resourcestatus.ResourceStatus(CgroupRemoved) +} + +// KnownCreated returns true if the cgroup's known status is CREATED +func (cgroup *CgroupResource) KnownCreated() bool { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + + return cgroup.knownStatusUnsafe == resourcestatus.ResourceStatus(CgroupCreated) +} + +// TerminalStatus returns the last transition state of cgroup +func (cgroup *CgroupResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(CgroupRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (cgroup *CgroupResource) NextKnownState() resourcestatus.ResourceStatus { + return cgroup.GetKnownStatus() + 1 +} + +// ApplyTransition calls the function required to move to the specified status +func (cgroup *CgroupResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + transitionFunc, ok := cgroup.statusToTransitions[nextState] + if !ok { + seelog.Errorf("Cgroup Resource [%s]: unsupported desired state transition [%s]: %s", + cgroup.taskARN, cgroup.GetName(), cgroup.StatusString(nextState)) + return fmt.Errorf("resource [%s]: transition to %s impossible", cgroup.GetName(), + cgroup.StatusString(nextState)) + } + return transitionFunc() +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (cgroup *CgroupResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(CgroupCreated) +} + +// SetKnownStatus safely sets the currently known status of the resource +func (cgroup *CgroupResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + cgroup.lock.Lock() + defer cgroup.lock.Unlock() + + cgroup.knownStatusUnsafe = status + cgroup.updateAppliedStatusUnsafe(status) +} + +// updateAppliedStatusUnsafe updates the resource transitioning status +func (cgroup *CgroupResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if cgroup.appliedStatus == resourcestatus.ResourceStatus(CgroupStatusNone) { + return + } + + // Check if the resource transition has already finished + if cgroup.appliedStatus <= knownStatus { + cgroup.appliedStatus = resourcestatus.ResourceStatus(CgroupStatusNone) + } +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (cgroup *CgroupResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + cgroup.lock.Lock() + defer cgroup.lock.Unlock() + + if cgroup.appliedStatus != resourcestatus.ResourceStatus(CgroupStatusNone) { + // return false to indicate the set operation failed + return false + } + + cgroup.appliedStatus = status + return true +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (cgroup *CgroupResource) GetAppliedStatus() resourcestatus.ResourceStatus { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + + return cgroup.appliedStatus +} + +// GetKnownStatus safely returns the currently known status of the task +func (cgroup *CgroupResource) GetKnownStatus() resourcestatus.ResourceStatus { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + + return cgroup.knownStatusUnsafe +} + +// StatusString returns the string of the cgroup resource status +func (cgroup *CgroupResource) StatusString(status resourcestatus.ResourceStatus) string { + return CgroupStatus(status).String() +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (cgroup *CgroupResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + cgroup.lock.Lock() + defer cgroup.lock.Unlock() + + cgroup.createdAt = createdAt +} + +// GetCreatedAt sets the timestamp for resource's creation time +func (cgroup *CgroupResource) GetCreatedAt() time.Time { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + + return cgroup.createdAt +} + +// Create creates cgroup root for the task +func (cgroup *CgroupResource) Create() error { + err := cgroup.setupTaskCgroup() + if err != nil { + seelog.Criticalf("Cgroup resource [%s]: unable to setup cgroup root: %v", cgroup.taskARN, err) + return err + } + return nil +} + +func (cgroup *CgroupResource) setupTaskCgroup() error { + cgroupRoot := cgroup.cgroupRoot + seelog.Debugf("Cgroup resource [%s]: setting up cgroup at: %s", cgroup.taskARN, cgroupRoot) + + if cgroup.control.Exists(cgroupRoot) { + seelog.Debugf("Cgroup resource [%s]: cgroup at %s already exists, skipping creation", cgroup.taskARN, cgroupRoot) + return nil + } + + cgroupSpec := control.Spec{ + Root: cgroupRoot, + Specs: &cgroup.resourceSpec, + } + + _, err := cgroup.control.Create(&cgroupSpec) + if err != nil { + return fmt.Errorf("cgroup resource [%s]: setup cgroup: unable to create cgroup at %s: %w", cgroup.taskARN, cgroupRoot, err) + } + + // enabling cgroup memory hierarchy by doing 'echo 1 > memory.use_hierarchy' + memoryHierarchyPath := filepath.Join(cgroup.cgroupMountPath, memorySubsystem, cgroupRoot, memoryUseHierarchy) + err = cgroup.ioutil.WriteFile(memoryHierarchyPath, enableMemoryHierarchy, rootReadOnlyPermissions) + if err != nil { + return fmt.Errorf("cgroup resource [%s]: setup cgroup: unable to set use hierarchy flag: %w", cgroup.taskARN, err) + } + + return nil +} + +// Cleanup removes the cgroup root created for the task +func (cgroup *CgroupResource) Cleanup() error { + err := cgroup.control.Remove(cgroup.cgroupRoot) + // Explicitly handle cgroup deleted error + if err != nil { + if errors.Is(err, cgroups.ErrCgroupDeleted) { + seelog.Warnf("Cgroup at %s has already been removed: %v", cgroup.cgroupRoot, err) + return nil + } + return fmt.Errorf("resource: cleanup cgroup: unable to remove cgroup at %s: %w", cgroup.cgroupRoot, err) + } + return nil +} + +// cgroupResourceJSON duplicates CgroupResource fields, only for marshalling and unmarshalling purposes +type cgroupResourceJSON struct { + CgroupRoot string `json:"cgroupRoot"` + CgroupMountPath string `json:"cgroupMountPath"` + CreatedAt time.Time `json:"createdAt,omitempty"` + DesiredStatus *CgroupStatus `json:"desiredStatus"` + KnownStatus *CgroupStatus `json:"knownStatus"` + LinuxSpec specs.LinuxResources `json:"resourceSpec"` +} + +// MarshalJSON marshals CgroupResource object using duplicate struct CgroupResourceJSON +func (cgroup *CgroupResource) MarshalJSON() ([]byte, error) { + if cgroup == nil { + return nil, errors.New("cgroup resource is nil") + } + return json.Marshal(cgroupResourceJSON{ + cgroup.cgroupRoot, + cgroup.cgroupMountPath, + cgroup.GetCreatedAt(), + func() *CgroupStatus { + desiredState := cgroup.GetDesiredStatus() + status := CgroupStatus(desiredState) + return &status + }(), + func() *CgroupStatus { + knownState := cgroup.GetKnownStatus() + status := CgroupStatus(knownState) + return &status + }(), + cgroup.resourceSpec, + }) +} + +// UnmarshalJSON unmarshals CgroupResource object using duplicate struct CgroupResourceJSON +func (cgroup *CgroupResource) UnmarshalJSON(b []byte) error { + temp := cgroupResourceJSON{} + + if err := json.Unmarshal(b, &temp); err != nil { + return err + } + + cgroup.cgroupRoot = temp.CgroupRoot + cgroup.cgroupMountPath = temp.CgroupMountPath + cgroup.resourceSpec = temp.LinuxSpec + if temp.DesiredStatus != nil { + cgroup.SetDesiredStatus(resourcestatus.ResourceStatus(*temp.DesiredStatus)) + } + if temp.KnownStatus != nil { + cgroup.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) + } + return nil +} + +// GetCgroupRoot returns cgroup root of the resource +func (cgroup *CgroupResource) GetCgroupRoot() string { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + return cgroup.cgroupRoot +} + +// GetCgroupMountPath returns cgroup mount path of the resource +func (cgroup *CgroupResource) GetCgroupMountPath() string { + cgroup.lock.RLock() + defer cgroup.lock.RUnlock() + return cgroup.cgroupMountPath +} + +// Initialize initializes the resource fileds in cgroup +func (cgroup *CgroupResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { + cgroup.lock.Lock() + defer cgroup.lock.Unlock() + + cgroup.initializeResourceStatusToTransitionFunction() + cgroup.ioutil = resourceFields.IOUtil + cgroup.control = resourceFields.Control +} + +func (cgroup *CgroupResource) DependOnTaskNetwork() bool { + return false +} + +func (cgroup *CgroupResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (cgroup *CgroupResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/cgroup/cgroup_test.go b/agent/taskresource/cgroup/cgroup_test.go new file mode 100644 index 00000000000..53a2a027f13 --- /dev/null +++ b/agent/taskresource/cgroup/cgroup_test.go @@ -0,0 +1,173 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cgroup + +import ( + "errors" + "fmt" + "testing" + "time" + + cgroup "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" + mock_cgroups "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory/mock" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/mock_control" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + mock_ioutilwrapper "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper/mocks" + "github.com/containerd/cgroups" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/stretchr/testify/assert" + + "github.com/golang/mock/gomock" +) + +const ( + validTaskArn = "arn:aws:ecs:region:account-id:task/task-id" + invalidTaskArn = "invalid:task::arn" + cgroupMountPath = "/sys/fs/cgroup" + taskName = "sleep5TaskCgroup" + taskID = "taskID" +) + +func TestCreateHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockControl := mock_control.NewMockControl(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + + cgroupMemoryPath := fmt.Sprintf("/sys/fs/cgroup/memory/ecs/%s/memory.use_hierarchy", taskID) + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + + gomock.InOrder( + mockControl.EXPECT().Exists(gomock.Any()).Return(false), + mockControl.EXPECT().Create(gomock.Any()).Return(nil, nil), + mockIO.EXPECT().WriteFile(cgroupMemoryPath, gomock.Any(), gomock.Any()).Return(nil), + ) + cgroupResource := NewCgroupResource("taskArn", mockControl, mockIO, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + assert.NoError(t, cgroupResource.Create()) +} + +func TestCreateCgroupPathExists(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockControl := mock_control.NewMockControl(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + + gomock.InOrder( + mockControl.EXPECT().Exists(gomock.Any()).Return(true), + ) + + cgroupResource := NewCgroupResource("taskArn", mockControl, mockIO, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + assert.NoError(t, cgroupResource.Create()) +} + +func TestCreateCgroupError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockControl := mock_control.NewMockControl(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockCgroup := mock_cgroups.NewMockCgroup(ctrl) + + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + + gomock.InOrder( + mockControl.EXPECT().Exists(gomock.Any()).Return(false), + mockControl.EXPECT().Create(gomock.Any()).Return(mockCgroup, errors.New("cgroup create error")), + ) + + cgroupResource := NewCgroupResource("taskArn", mockControl, mockIO, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + assert.Error(t, cgroupResource.Create()) +} + +func TestCleanupHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockControl := mock_control.NewMockControl(ctrl) + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + + mockControl.EXPECT().Remove(cgroupRoot).Return(nil) + + cgroupResource := NewCgroupResource("taskArn", mockControl, nil, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + assert.NoError(t, cgroupResource.Cleanup()) +} + +func TestCleanupRemoveError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockControl := mock_control.NewMockControl(ctrl) + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + + mockControl.EXPECT().Remove(gomock.Any()).Return(errors.New("cgroup remove error")) + + cgroupResource := NewCgroupResource("taskArn", mockControl, nil, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + assert.Error(t, cgroupResource.Cleanup()) +} + +func TestCleanupCgroupDeletedError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockControl := mock_control.NewMockControl(ctrl) + cgroupRoot := fmt.Sprintf("/ecs/%s", taskID) + + err := cgroups.ErrCgroupDeleted + wrappedErr := fmt.Errorf("cgroup remove: unable to obtain controller: %w", err) + // check that the wrapped err unwraps to cgroups.ErrCgroupDeleted + assert.True(t, errors.Is(wrappedErr, cgroups.ErrCgroupDeleted)) + mockControl.EXPECT().Remove(gomock.Any()).Return(wrappedErr) + + cgroupResource := NewCgroupResource("taskArn", mockControl, nil, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + // the ErrCgroupDeleted is caught and logs a warning, returns no error + assert.NoError(t, cgroupResource.Cleanup()) +} + +func TestMarshal(t *testing.T) { + cgroupStr := "{\"cgroupRoot\":\"/ecs/taskid\",\"cgroupMountPath\":\"/sys/fs/cgroup\"," + + "\"createdAt\":\"0001-01-01T00:00:00Z\",\"desiredStatus\":\"CREATED\",\"knownStatus\":\"NONE\",\"resourceSpec\":{}}" + + cgroupRoot := "/ecs/taskid" + cgroupMountPath := "/sys/fs/cgroup" + + cgroup := NewCgroupResource("", cgroup.New(), nil, cgroupRoot, cgroupMountPath, specs.LinuxResources{}) + cgroup.SetDesiredStatus(resourcestatus.ResourceStatus(CgroupCreated)) + cgroup.SetKnownStatus(resourcestatus.ResourceStatus(CgroupStatusNone)) + + bytes, err := cgroup.MarshalJSON() + assert.NoError(t, err) + assert.Equal(t, cgroupStr, string(bytes[:])) +} + +func TestUnmarshal(t *testing.T) { + cgroupRoot := "/ecs/taskid" + cgroupMountPath := "/sys/fs/cgroup" + bytes := []byte("{\"CgroupRoot\":\"/ecs/taskid\",\"CgroupMountPath\":\"/sys/fs/cgroup\"," + + "\"CreatedAt\":\"0001-01-01T00:00:00Z\",\"DesiredStatus\":\"CREATED\",\"KnownStatus\":\"NONE\"}") + unmarshalledCgroup := &CgroupResource{} + err := unmarshalledCgroup.UnmarshalJSON(bytes) + assert.NoError(t, err) + + assert.Equal(t, cgroupRoot, unmarshalledCgroup.GetCgroupRoot()) + assert.Equal(t, cgroupMountPath, unmarshalledCgroup.GetCgroupMountPath()) + assert.Equal(t, time.Time{}, unmarshalledCgroup.GetCreatedAt()) + assert.Equal(t, resourcestatus.ResourceStatus(CgroupCreated), unmarshalledCgroup.GetDesiredStatus()) + assert.Equal(t, resourcestatus.ResourceStatus(CgroupStatusNone), unmarshalledCgroup.GetKnownStatus()) +} diff --git a/agent/taskresource/cgroup/cgroup_unsupported.go b/agent/taskresource/cgroup/cgroup_unsupported.go new file mode 100644 index 00000000000..dd737e89377 --- /dev/null +++ b/agent/taskresource/cgroup/cgroup_unsupported.go @@ -0,0 +1,147 @@ +// +build !linux +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cgroup + +import ( + "errors" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +// CgroupResource represents Cgroup resource +type CgroupResource struct{} + +// SetDesiredStatus safely sets the desired status of the resource +func (c *CgroupResource) SetDesiredStatus(status resourcestatus.ResourceStatus) {} + +// GetDesiredStatus safely returns the desired status of the task +func (c *CgroupResource) GetDesiredStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// GetName safely returns the name of the resource +func (c *CgroupResource) GetName() string { + return "undefined" +} + +// DesiredTerminal returns true if the cgroup's desired status is REMOVED +func (c *CgroupResource) DesiredTerminal() bool { + return false +} + +// KnownCreated returns true if the cgroup's known status is CREATED +func (c *CgroupResource) KnownCreated() bool { + return false +} + +// TerminalStatus returns the last transition state of cgroup +func (c *CgroupResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// NextKnownState returns the state that the resource should progress to based +// on its `KnownState`. +func (c *CgroupResource) NextKnownState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// ApplyTransition calls the function required to move to the specified status +func (c *CgroupResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + return errors.New("unsupported platform") +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (c *CgroupResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// SetKnownStatus safely sets the currently known status of the resource +func (c *CgroupResource) SetKnownStatus(status resourcestatus.ResourceStatus) {} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (c *CgroupResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + return false +} + +// GetKnownStatus safely returns the currently known status of the task +func (c *CgroupResource) GetKnownStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (c *CgroupResource) GetAppliedStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (c *CgroupResource) SetCreatedAt(createdAt time.Time) {} + +// GetCreatedAt sets the timestamp for resource's creation time +func (c *CgroupResource) GetCreatedAt() time.Time { + return time.Time{} +} + +// Create creates cgroup root for the task +func (c *CgroupResource) Create() error { + return errors.New("unsupported platform") +} + +// Cleanup removes the cgroup root created for the task +func (c *CgroupResource) Cleanup() error { + return errors.New("unsupported platform") +} + +// StatusString returns the string of the cgroup resource status +func (c *CgroupResource) StatusString(status resourcestatus.ResourceStatus) string { + return "undefined" +} + +func (c *CgroupResource) GetTerminalReason() string { + return "undefined" +} + +// MarshalJSON marshals CgroupResource object +func (c *CgroupResource) MarshalJSON() ([]byte, error) { + return nil, errors.New("unsupported platform") +} + +// UnmarshalJSON unmarshals CgroupResource object +func (c *CgroupResource) UnmarshalJSON(b []byte) error { + return errors.New("unsupported platform") +} + +// Initialize fills the resource fileds +func (cgroup *CgroupResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { +} + +func (cgroup *CgroupResource) DependOnTaskNetwork() bool { + return false +} + +func (cgroup *CgroupResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (cgroup *CgroupResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/cgroup/cgroupstatus.go b/agent/taskresource/cgroup/cgroupstatus.go new file mode 100644 index 00000000000..1d54410b064 --- /dev/null +++ b/agent/taskresource/cgroup/cgroupstatus.go @@ -0,0 +1,80 @@ +// +build linux +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cgroup + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +// CgroupStatus defines resource statuses for cgroups +type CgroupStatus resourcestatus.ResourceStatus + +const ( + // CgroupStatusNone is the zero state of a task resource + CgroupStatusNone CgroupStatus = iota + // CgroupCreated represents a task resource which has been created + CgroupCreated + // CgroupRemoved represents a task resource which has been cleaned up + CgroupRemoved +) + +var cgroupStatusMap = map[string]CgroupStatus{ + "NONE": CgroupStatusNone, + "CREATED": CgroupCreated, + "REMOVED": CgroupRemoved, +} + +// String returns a human readable string representation of this object +func (cs CgroupStatus) String() string { + for k, v := range cgroupStatusMap { + if v == cs { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type +func (cs *CgroupStatus) MarshalJSON() ([]byte, error) { + if cs == nil { + return nil, nil + } + return []byte(`"` + cs.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data +func (cs *CgroupStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *cs = CgroupStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *cs = CgroupStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := cgroupStatusMap[string(strStatus)] + if !ok { + *cs = CgroupStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *cs = stat + return nil +} diff --git a/agent/taskresource/cgroup/cgroupstatus_test.go b/agent/taskresource/cgroup/cgroupstatus_test.go new file mode 100644 index 00000000000..b3efd763d1f --- /dev/null +++ b/agent/taskresource/cgroup/cgroupstatus_test.go @@ -0,0 +1,88 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cgroup + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCgroupStatusString(t *testing.T) { + var resourceStatus CgroupStatus + + resourceStatus = CgroupStatusNone + assert.Equal(t, resourceStatus.String(), "NONE") + resourceStatus = CgroupCreated + assert.Equal(t, resourceStatus.String(), "CREATED") + resourceStatus = CgroupRemoved + assert.Equal(t, resourceStatus.String(), "REMOVED") +} + +func TestMarshalCgroupStatus(t *testing.T) { + status := CgroupStatusNone + bytes, err := status.MarshalJSON() + + assert.NoError(t, err) + assert.Equal(t, `"NONE"`, string(bytes[:])) +} + +func TestMarshalNilCgroupStatus(t *testing.T) { + var status *CgroupStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Nil(t, err) +} + +type testCgroupStatus struct { + SomeStatus CgroupStatus `json:"status"` +} + +func TestUnmarshalCgroupStatus(t *testing.T) { + status := CgroupStatusNone + + err := json.Unmarshal([]byte(`"CREATED"`), &status) + assert.NoError(t, err) + assert.Equal(t, CgroupCreated, status, "CREATED should unmarshal to CREATED, not "+status.String()) + + var testStatus testCgroupStatus + err = json.Unmarshal([]byte(`{"status":"REMOVED"}`), &testStatus) + assert.NoError(t, err) + assert.Equal(t, CgroupRemoved, testStatus.SomeStatus, "REMOVED should unmarshal to REMOVED, not "+testStatus.SomeStatus.String()) +} + +func TestUnmarshalNullCgroupStatus(t *testing.T) { + status := CgroupCreated + err := json.Unmarshal([]byte("null"), &status) + assert.NoError(t, err) + assert.Equal(t, CgroupStatusNone, status, "null should unmarshal to None, not "+status.String()) +} + +func TestUnmarshalNonStringCgroupStatusDefaultNone(t *testing.T) { + status := CgroupCreated + err := json.Unmarshal([]byte(`1`), &status) + assert.NotNil(t, err) + assert.Equal(t, CgroupStatusNone, status, "non-string status should unmarshal to None, not "+status.String()) +} + +func TestUnmarshalUnmappedCgroupStatusDefaultNone(t *testing.T) { + status := CgroupRemoved + err := json.Unmarshal([]byte(`"SOMEOTHER"`), &status) + assert.NotNil(t, err) + assert.Equal(t, CgroupStatusNone, status, "Unmapped status should unmarshal to None, not "+status.String()) +} diff --git a/agent/taskresource/cgroup/control/cgroup_controller_linux.go b/agent/taskresource/cgroup/control/cgroup_controller_linux.go new file mode 100644 index 00000000000..edb3b69222e --- /dev/null +++ b/agent/taskresource/cgroup/control/cgroup_controller_linux.go @@ -0,0 +1,109 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package control + +import ( + "fmt" + + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory" + + "github.com/cihub/seelog" + "github.com/containerd/cgroups" + "github.com/pkg/errors" +) + +// control is used to implement the cgroup Control interface +type control struct { + factory.CgroupFactory +} + +// New is used to obtain a new cgroup control object +func New() Control { + return newControl(&factory.GlobalCgroupFactory{}) +} + +// newControl helps setup the cgroup controller +func newControl(cgroupFact factory.CgroupFactory) Control { + return &control{ + cgroupFact, + } +} + +// Create creates a new cgroup based off the spec post validation +func (c *control) Create(cgroupSpec *Spec) (cgroups.Cgroup, error) { + // Validate incoming spec + err := validateCgroupSpec(cgroupSpec) + if err != nil { + return nil, fmt.Errorf("cgroup create: failed to validate spec: %w", err) + } + + // Create cgroup + seelog.Infof("Creating cgroup %s", cgroupSpec.Root) + controller, err := c.New(cgroups.V1, cgroups.StaticPath(cgroupSpec.Root), cgroupSpec.Specs) + + if err != nil { + return nil, fmt.Errorf("cgroup create: unable to create controller: %w", err) + } + + return controller, nil +} + +// Remove is used to delete the cgroup +func (c *control) Remove(cgroupPath string) error { + seelog.Debugf("Removing cgroup %s", cgroupPath) + + controller, err := c.Load(cgroups.V1, cgroups.StaticPath(cgroupPath)) + if err != nil { + // use the %w verb to wrap the error to be unwrapped by errors.Is() + return fmt.Errorf("cgroup remove: unable to obtain controller: %w", err) + } + + // Delete cgroup + err = controller.Delete() + if err != nil { + return fmt.Errorf("cgroup remove: unable to delete cgroup: %w", err) + } + return nil +} + +// Exists is used to verify the existence of a cgroup +func (c *control) Exists(cgroupPath string) bool { + seelog.Debugf("Checking existence of cgroup: %s", cgroupPath) + + controller, err := c.Load(cgroups.V1, cgroups.StaticPath(cgroupPath)) + if err != nil || controller == nil { + return false + } + + return true +} + +// validateCgroupSpec checks the cgroup spec for valid path and specifications +func validateCgroupSpec(cgroupSpec *Spec) error { + if cgroupSpec == nil { + return errors.New("cgroup spec validator: empty cgroup spec") + } + + if cgroupSpec.Root == "" { + return errors.New("cgroup spec validator: invalid cgroup root") + } + + // Validate the linux resource specs + if cgroupSpec.Specs == nil { + return errors.New("cgroup spec validator: empty linux resource spec") + } + return nil +} diff --git a/agent/taskresource/cgroup/control/cgroup_controller_linux_test.go b/agent/taskresource/cgroup/control/cgroup_controller_linux_test.go new file mode 100644 index 00000000000..cf1148ee062 --- /dev/null +++ b/agent/taskresource/cgroup/control/cgroup_controller_linux_test.go @@ -0,0 +1,182 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package control + +import ( + "errors" + "testing" + + mock_cgroups "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory/mock" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory/mock_factory" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/stretchr/testify/assert" + + "github.com/golang/mock/gomock" +) + +const ( + testCgroupRoot = "/ecs/foo" +) + +func TestCreateHappyCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroup := mock_cgroups.NewMockCgroup(ctrl) + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + mockCgroupFactory.EXPECT().New(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockCgroup, nil) + + testSpecs := &specs.LinuxResources{} + + control := newControl(mockCgroupFactory) + + res, err := control.Create(&Spec{testCgroupRoot, testSpecs}) + assert.Equal(t, mockCgroup, res) + assert.NoError(t, err) +} + +func TestCreateErrorCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + mockCgroupFactory.EXPECT().New(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("cgroup error")) + + testSpecs := &specs.LinuxResources{} + + control := newControl(mockCgroupFactory) + + res, err := control.Create(&Spec{testCgroupRoot, testSpecs}) + assert.Nil(t, res) + assert.Error(t, err) +} + +func TestCreateWithBadSpecs(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + cg := newControl(mockCgroupFactory) + + testCases := []struct { + spec *Spec + name string + }{ + {&Spec{"", nil}, "empty root and nil spec"}, + {&Spec{"/ecs/foo", nil}, "root with nil spec"}, + {&Spec{"", &specs.LinuxResources{}}, "empty root with spec"}, + {&Spec{}, "empty spec"}, + {nil, "nil spec"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + control, err := cg.Create(tc.spec) + assert.Error(t, err, "Create should return an error") + assert.Nil(t, control, "Create call should not return a controller") + }) + } +} + +func TestRemoveHappyCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroup := mock_cgroups.NewMockCgroup(ctrl) + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + gomock.InOrder( + mockCgroupFactory.EXPECT().Load(gomock.Any(), gomock.Any()).Return(mockCgroup, nil), + mockCgroup.EXPECT().Delete().Return(nil), + ) + + control := newControl(mockCgroupFactory) + + assert.NoError(t, control.Remove(testCgroupRoot)) +} + +func TestRemoveDoingErrorCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + mockCgroupFactory.EXPECT().Load(gomock.Any(), gomock.Any()).Return(nil, errors.New("unable to load")) + + control := newControl(mockCgroupFactory) + + assert.Error(t, control.Remove(testCgroupRoot)) +} + +func TestRemoveErrorCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroup := mock_cgroups.NewMockCgroup(ctrl) + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + gomock.InOrder( + mockCgroupFactory.EXPECT().Load(gomock.Any(), gomock.Any()).Return(mockCgroup, nil), + mockCgroup.EXPECT().Delete().Return(errors.New("cgroup error")), + ) + + control := newControl(mockCgroupFactory) + + assert.Error(t, control.Remove(testCgroupRoot)) +} + +func TestExistsHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroup := mock_cgroups.NewMockCgroup(ctrl) + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + mockCgroupFactory.EXPECT().Load(gomock.Any(), gomock.Any()).Return(mockCgroup, nil) + + control := newControl(mockCgroupFactory) + + assert.True(t, control.Exists(testCgroupRoot)) +} + +func TestExistsErrorPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + mockCgroupFactory.EXPECT().Load(gomock.Any(), gomock.Any()).Return(nil, errors.New("cgroups error")) + + control := newControl(mockCgroupFactory) + + assert.False(t, control.Exists(testCgroupRoot)) +} + +func TestExistsErrorPathWithLoadError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + mockCgroupFactory.EXPECT().Load(gomock.Any(), gomock.Any()).Return(nil, nil) + + control := newControl(mockCgroupFactory) + + assert.False(t, control.Exists(testCgroupRoot)) +} diff --git a/agent/taskresource/cgroup/control/factory/factory_linux.go b/agent/taskresource/cgroup/control/factory/factory_linux.go new file mode 100644 index 00000000000..a77b0b6b22f --- /dev/null +++ b/agent/taskresource/cgroup/control/factory/factory_linux.go @@ -0,0 +1,42 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +import ( + "github.com/containerd/cgroups" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// CgroupFactory wraps around the global functions exposed by the cgroups library +type CgroupFactory interface { + // New is used to create a new cgroup hierarchy + New(hierarchy cgroups.Hierarchy, path cgroups.Path, specs *specs.LinuxResources) (cgroups.Cgroup, error) + // Load is used to load the cgroup based off the cgroup path + Load(hierarchy cgroups.Hierarchy, path cgroups.Path) (cgroups.Cgroup, error) +} + +// GlobalCgroupFactory calls the cgroups library global functions +type GlobalCgroupFactory struct{} + +// Load is used to load the cgroup hierarchy based off the cgroup path +func (c *GlobalCgroupFactory) Load(hierarchy cgroups.Hierarchy, path cgroups.Path) (cgroups.Cgroup, error) { + return cgroups.Load(hierarchy, path) +} + +// New is used to create a new cgroup hierarchy +func (c *GlobalCgroupFactory) New(hierarchy cgroups.Hierarchy, path cgroups.Path, specs *specs.LinuxResources) (cgroups.Cgroup, error) { + return cgroups.New(hierarchy, path, specs) +} diff --git a/agent/taskresource/cgroup/control/factory/generate_mocks_linux.go b/agent/taskresource/cgroup/control/factory/generate_mocks_linux.go new file mode 100644 index 00000000000..a577a5d18bb --- /dev/null +++ b/agent/taskresource/cgroup/control/factory/generate_mocks_linux.go @@ -0,0 +1,19 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package factory + +//go:generate mockgen -destination=mock/mock_cgroups_linux.go -copyright_file=../../../../../scripts/copyright_file github.com/containerd/cgroups Cgroup +//go:generate mockgen -destination=mock_factory/mock_cgroup_factory_linux.go -copyright_file=../../../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory CgroupFactory diff --git a/agent/taskresource/cgroup/control/factory/mock/mock_cgroups_linux.go b/agent/taskresource/cgroup/control/factory/mock/mock_cgroups_linux.go new file mode 100644 index 00000000000..e66882973bc --- /dev/null +++ b/agent/taskresource/cgroup/control/factory/mock/mock_cgroups_linux.go @@ -0,0 +1,226 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/containerd/cgroups (interfaces: Cgroup) + +// Package mock_cgroups is a generated GoMock package. +package mock_cgroups + +import ( + reflect "reflect" + + cgroups "github.com/containerd/cgroups" + gomock "github.com/golang/mock/gomock" + specs_go "github.com/opencontainers/runtime-spec/specs-go" +) + +// MockCgroup is a mock of Cgroup interface +type MockCgroup struct { + ctrl *gomock.Controller + recorder *MockCgroupMockRecorder +} + +// MockCgroupMockRecorder is the mock recorder for MockCgroup +type MockCgroupMockRecorder struct { + mock *MockCgroup +} + +// NewMockCgroup creates a new mock instance +func NewMockCgroup(ctrl *gomock.Controller) *MockCgroup { + mock := &MockCgroup{ctrl: ctrl} + mock.recorder = &MockCgroupMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockCgroup) EXPECT() *MockCgroupMockRecorder { + return m.recorder +} + +// Add mocks base method +func (m *MockCgroup) Add(arg0 cgroups.Process) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add +func (mr *MockCgroupMockRecorder) Add(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockCgroup)(nil).Add), arg0) +} + +// Delete mocks base method +func (m *MockCgroup) Delete() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete") + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete +func (mr *MockCgroupMockRecorder) Delete() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockCgroup)(nil).Delete)) +} + +// Freeze mocks base method +func (m *MockCgroup) Freeze() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Freeze") + ret0, _ := ret[0].(error) + return ret0 +} + +// Freeze indicates an expected call of Freeze +func (mr *MockCgroupMockRecorder) Freeze() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Freeze", reflect.TypeOf((*MockCgroup)(nil).Freeze)) +} + +// MoveTo mocks base method +func (m *MockCgroup) MoveTo(arg0 cgroups.Cgroup) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MoveTo", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// MoveTo indicates an expected call of MoveTo +func (mr *MockCgroupMockRecorder) MoveTo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveTo", reflect.TypeOf((*MockCgroup)(nil).MoveTo), arg0) +} + +// New mocks base method +func (m *MockCgroup) New(arg0 string, arg1 *specs_go.LinuxResources) (cgroups.Cgroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "New", arg0, arg1) + ret0, _ := ret[0].(cgroups.Cgroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// New indicates an expected call of New +func (mr *MockCgroupMockRecorder) New(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockCgroup)(nil).New), arg0, arg1) +} + +// OOMEventFD mocks base method +func (m *MockCgroup) OOMEventFD() (uintptr, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OOMEventFD") + ret0, _ := ret[0].(uintptr) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// OOMEventFD indicates an expected call of OOMEventFD +func (mr *MockCgroupMockRecorder) OOMEventFD() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OOMEventFD", reflect.TypeOf((*MockCgroup)(nil).OOMEventFD)) +} + +// Processes mocks base method +func (m *MockCgroup) Processes(arg0 cgroups.Name, arg1 bool) ([]cgroups.Process, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Processes", arg0, arg1) + ret0, _ := ret[0].([]cgroups.Process) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Processes indicates an expected call of Processes +func (mr *MockCgroupMockRecorder) Processes(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Processes", reflect.TypeOf((*MockCgroup)(nil).Processes), arg0, arg1) +} + +// Stat mocks base method +func (m *MockCgroup) Stat(arg0 ...cgroups.ErrorHandler) (*cgroups.Stats, error) { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Stat", varargs...) + ret0, _ := ret[0].(*cgroups.Stats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stat indicates an expected call of Stat +func (mr *MockCgroupMockRecorder) Stat(arg0 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockCgroup)(nil).Stat), arg0...) +} + +// State mocks base method +func (m *MockCgroup) State() cgroups.State { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "State") + ret0, _ := ret[0].(cgroups.State) + return ret0 +} + +// State indicates an expected call of State +func (mr *MockCgroupMockRecorder) State() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "State", reflect.TypeOf((*MockCgroup)(nil).State)) +} + +// Subsystems mocks base method +func (m *MockCgroup) Subsystems() []cgroups.Subsystem { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Subsystems") + ret0, _ := ret[0].([]cgroups.Subsystem) + return ret0 +} + +// Subsystems indicates an expected call of Subsystems +func (mr *MockCgroupMockRecorder) Subsystems() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subsystems", reflect.TypeOf((*MockCgroup)(nil).Subsystems)) +} + +// Thaw mocks base method +func (m *MockCgroup) Thaw() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Thaw") + ret0, _ := ret[0].(error) + return ret0 +} + +// Thaw indicates an expected call of Thaw +func (mr *MockCgroupMockRecorder) Thaw() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Thaw", reflect.TypeOf((*MockCgroup)(nil).Thaw)) +} + +// Update mocks base method +func (m *MockCgroup) Update(arg0 *specs_go.LinuxResources) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update +func (mr *MockCgroupMockRecorder) Update(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockCgroup)(nil).Update), arg0) +} diff --git a/agent/taskresource/cgroup/control/factory/mock_factory/mock_cgroup_factory_linux.go b/agent/taskresource/cgroup/control/factory/mock_factory/mock_cgroup_factory_linux.go new file mode 100644 index 00000000000..e299f57a00a --- /dev/null +++ b/agent/taskresource/cgroup/control/factory/mock_factory/mock_cgroup_factory_linux.go @@ -0,0 +1,80 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory (interfaces: CgroupFactory) + +// Package mock_factory is a generated GoMock package. +package mock_factory + +import ( + reflect "reflect" + + cgroups "github.com/containerd/cgroups" + gomock "github.com/golang/mock/gomock" + specs_go "github.com/opencontainers/runtime-spec/specs-go" +) + +// MockCgroupFactory is a mock of CgroupFactory interface +type MockCgroupFactory struct { + ctrl *gomock.Controller + recorder *MockCgroupFactoryMockRecorder +} + +// MockCgroupFactoryMockRecorder is the mock recorder for MockCgroupFactory +type MockCgroupFactoryMockRecorder struct { + mock *MockCgroupFactory +} + +// NewMockCgroupFactory creates a new mock instance +func NewMockCgroupFactory(ctrl *gomock.Controller) *MockCgroupFactory { + mock := &MockCgroupFactory{ctrl: ctrl} + mock.recorder = &MockCgroupFactoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockCgroupFactory) EXPECT() *MockCgroupFactoryMockRecorder { + return m.recorder +} + +// Load mocks base method +func (m *MockCgroupFactory) Load(arg0 cgroups.Hierarchy, arg1 cgroups.Path) (cgroups.Cgroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Load", arg0, arg1) + ret0, _ := ret[0].(cgroups.Cgroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Load indicates an expected call of Load +func (mr *MockCgroupFactoryMockRecorder) Load(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockCgroupFactory)(nil).Load), arg0, arg1) +} + +// New mocks base method +func (m *MockCgroupFactory) New(arg0 cgroups.Hierarchy, arg1 cgroups.Path, arg2 *specs_go.LinuxResources) (cgroups.Cgroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "New", arg0, arg1, arg2) + ret0, _ := ret[0].(cgroups.Cgroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// New indicates an expected call of New +func (mr *MockCgroupFactoryMockRecorder) New(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockCgroupFactory)(nil).New), arg0, arg1, arg2) +} diff --git a/agent/taskresource/cgroup/control/generate_mocks_linux.go b/agent/taskresource/cgroup/control/generate_mocks_linux.go new file mode 100644 index 00000000000..30a8d0f1a38 --- /dev/null +++ b/agent/taskresource/cgroup/control/generate_mocks_linux.go @@ -0,0 +1,18 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package control + +//go:generate mockgen -destination=mock_control/mock_cgroup_control_linux.go -copyright_file=../../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control Control diff --git a/agent/taskresource/cgroup/control/init_linux.go b/agent/taskresource/cgroup/control/init_linux.go new file mode 100644 index 00000000000..27b71718ddc --- /dev/null +++ b/agent/taskresource/cgroup/control/init_linux.go @@ -0,0 +1,36 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package control + +import ( + "github.com/aws/amazon-ecs-agent/agent/config" + + "github.com/cihub/seelog" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Init is used to setup the cgroup root for ecs +func (c *control) Init() error { + seelog.Infof("Creating root ecs cgroup: %s", config.DefaultTaskCgroupPrefix) + + // Build cgroup spec + cgroupSpec := &Spec{ + Root: config.DefaultTaskCgroupPrefix, + Specs: &specs.LinuxResources{}, + } + _, err := c.Create(cgroupSpec) + return err +} diff --git a/agent/taskresource/cgroup/control/init_linux_test.go b/agent/taskresource/cgroup/control/init_linux_test.go new file mode 100644 index 00000000000..4a4837975a8 --- /dev/null +++ b/agent/taskresource/cgroup/control/init_linux_test.go @@ -0,0 +1,54 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package control + +import ( + "errors" + "testing" + + mock_cgroups "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory/mock" + "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory/mock_factory" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestInitHappyCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroup := mock_cgroups.NewMockCgroup(ctrl) + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + mockCgroupFactory.EXPECT().New(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockCgroup, nil) + + control := newControl(mockCgroupFactory) + + assert.NoError(t, control.Init()) +} + +func TestInitErrorCase(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCgroupFactory := mock_factory.NewMockCgroupFactory(ctrl) + + mockCgroupFactory.EXPECT().New(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("cgroup error")) + + control := newControl(mockCgroupFactory) + + assert.Error(t, control.Init()) +} diff --git a/agent/taskresource/cgroup/control/mock_control/mock_cgroup_control_linux.go b/agent/taskresource/cgroup/control/mock_control/mock_cgroup_control_linux.go new file mode 100644 index 00000000000..690ac9c6d39 --- /dev/null +++ b/agent/taskresource/cgroup/control/mock_control/mock_cgroup_control_linux.go @@ -0,0 +1,107 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control (interfaces: Control) + +// Package mock_control is a generated GoMock package. +package mock_control + +import ( + reflect "reflect" + + control "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" + cgroups "github.com/containerd/cgroups" + gomock "github.com/golang/mock/gomock" +) + +// MockControl is a mock of Control interface +type MockControl struct { + ctrl *gomock.Controller + recorder *MockControlMockRecorder +} + +// MockControlMockRecorder is the mock recorder for MockControl +type MockControlMockRecorder struct { + mock *MockControl +} + +// NewMockControl creates a new mock instance +func NewMockControl(ctrl *gomock.Controller) *MockControl { + mock := &MockControl{ctrl: ctrl} + mock.recorder = &MockControlMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockControl) EXPECT() *MockControlMockRecorder { + return m.recorder +} + +// Create mocks base method +func (m *MockControl) Create(arg0 *control.Spec) (cgroups.Cgroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", arg0) + ret0, _ := ret[0].(cgroups.Cgroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Create indicates an expected call of Create +func (mr *MockControlMockRecorder) Create(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockControl)(nil).Create), arg0) +} + +// Exists mocks base method +func (m *MockControl) Exists(arg0 string) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exists", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Exists indicates an expected call of Exists +func (mr *MockControlMockRecorder) Exists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockControl)(nil).Exists), arg0) +} + +// Init mocks base method +func (m *MockControl) Init() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Init") + ret0, _ := ret[0].(error) + return ret0 +} + +// Init indicates an expected call of Init +func (mr *MockControlMockRecorder) Init() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockControl)(nil).Init)) +} + +// Remove mocks base method +func (m *MockControl) Remove(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Remove", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Remove indicates an expected call of Remove +func (mr *MockControlMockRecorder) Remove(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockControl)(nil).Remove), arg0) +} diff --git a/agent/taskresource/cgroup/control/types_linux.go b/agent/taskresource/cgroup/control/types_linux.go new file mode 100644 index 00000000000..a4edf238f99 --- /dev/null +++ b/agent/taskresource/cgroup/control/types_linux.go @@ -0,0 +1,37 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package control + +import ( + "github.com/containerd/cgroups" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Spec captures the abstraction for a creating a new +// cgroup based on root and the runtime specifications +type Spec struct { + // Root is the cgroup path + Root string + // Specs are for all the linux resources including cpu, memory, etc... + Specs *specs.LinuxResources +} + +type Control interface { + Create(cgroupSpec *Spec) (cgroups.Cgroup, error) + Remove(cgroupPath string) error + Exists(cgroupPath string) bool + Init() error +} diff --git a/agent/taskresource/credentialspec/credentialspec_unsupported.go b/agent/taskresource/credentialspec/credentialspec_unsupported.go new file mode 100644 index 00000000000..70d1757ec2b --- /dev/null +++ b/agent/taskresource/credentialspec/credentialspec_unsupported.go @@ -0,0 +1,170 @@ +// +build !windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentialspec + +import ( + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + s3factory "github.com/aws/amazon-ecs-agent/agent/s3/factory" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/pkg/errors" +) + +// CredentialSpecResource is the abstraction for credentialspec resources +type CredentialSpecResource struct { +} + +// NewCredentialSpecResource creates a new CredentialSpecResource object +func NewCredentialSpecResource(taskARN, region string, + credentialSpecs []string, + executionCredentialsID string, + credentialsManager credentials.Manager, + ssmClientCreator ssmfactory.SSMClientCreator, + s3ClientCreator s3factory.S3ClientCreator) (*CredentialSpecResource, error) { + return nil, errors.New("not supported") +} + +func (cs *CredentialSpecResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (cs *CredentialSpecResource) GetTerminalReason() string { + return "undefined" +} + +// GetDesiredStatus safely returns the desired status of the task +func (cs *CredentialSpecResource) GetDesiredStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// SetDesiredStatus safely sets the desired status of the resource +func (cs *CredentialSpecResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { +} + +// DesiredTerminal returns true if the credentialspec's desired status is REMOVED +func (cs *CredentialSpecResource) DesiredTerminal() bool { + return false +} + +// KnownCreated returns true if the credentialspec's known status is CREATED +func (cs *CredentialSpecResource) KnownCreated() bool { + return false +} + +// TerminalStatus returns the last transition state of credentialspec +func (cs *CredentialSpecResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (cs *CredentialSpecResource) NextKnownState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// ApplyTransition calls the function required to move to the specified status +func (cs *CredentialSpecResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + return errors.New("not implemented") +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (cs *CredentialSpecResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// SetKnownStatus safely sets the currently known status of the resource +func (cs *CredentialSpecResource) SetKnownStatus(status resourcestatus.ResourceStatus) { +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (cs *CredentialSpecResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + return false +} + +// GetKnownStatus safely returns the currently known status of the task +func (cs *CredentialSpecResource) GetKnownStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// StatusString returns the string of the cgroup resource status +func (cs *CredentialSpecResource) StatusString(status resourcestatus.ResourceStatus) string { + return "undefined" +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (cs *CredentialSpecResource) SetCreatedAt(createdAt time.Time) { +} + +// GetCreatedAt sets the timestamp for resource's creation time +func (cs *CredentialSpecResource) GetCreatedAt() time.Time { + return time.Time{} +} + +// GetName safely returns the name of the resource +func (cs *CredentialSpecResource) GetName() string { + return "undefined" +} + +// Create is used to create all the credentialspec resources for a given task +func (cs *CredentialSpecResource) Create() error { + return errors.New("not implemented") +} + +func (cs *CredentialSpecResource) GetTargetMapping(credSpecInput string) (string, error) { + return "", errors.New("not implemented") +} + +// Cleanup removes the credentialspec created for the task +func (cs *CredentialSpecResource) Cleanup() error { + return errors.New("not implemented") +} + +// MarshalJSON serialises the CredentialSpecResourceJSON struct to JSON +func (cs *CredentialSpecResource) MarshalJSON() ([]byte, error) { + return nil, errors.New("not implemented") +} + +// UnmarshalJSON deserialises the raw JSON to a CredentialSpecResourceJSON struct +func (cs *CredentialSpecResource) UnmarshalJSON(b []byte) error { + return errors.New("not implemented") +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (cs *CredentialSpecResource) GetAppliedStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +func (cs *CredentialSpecResource) DependOnTaskNetwork() bool { + return false +} + +func (cs *CredentialSpecResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (cs *CredentialSpecResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/credentialspec/credentialspec_windows.go b/agent/taskresource/credentialspec/credentialspec_windows.go new file mode 100644 index 00000000000..34511d8c8ac --- /dev/null +++ b/agent/taskresource/credentialspec/credentialspec_windows.go @@ -0,0 +1,665 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentialspec + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/s3" + s3factory "github.com/aws/amazon-ecs-agent/agent/s3/factory" + "github.com/aws/amazon-ecs-agent/agent/ssm" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +const ( + tempFileName = "temp_file" + // filePerm is the permission for the credentialspec file. + filePerm = 0644 + + s3DownloadTimeout = 30 * time.Second + + // Environment variables to setup resource location + envProgramData = "ProgramData" + dockerCredentialSpecDataDir = "docker/credentialspecs" +) + +// CredentialSpecResource is the abstraction for credentialspec resources +type CredentialSpecResource struct { + taskARN string + region string + executionCredentialsID string + credentialsManager credentials.Manager + ioutil ioutilwrapper.IOUtil + createdAt time.Time + desiredStatusUnsafe resourcestatus.ResourceStatus + knownStatusUnsafe resourcestatus.ResourceStatus + // appliedStatus is the status that has been "applied" (e.g., we've called some + // operation such as 'Create' on the resource) but we don't yet know that the + // application was successful, which may then change the known status. This is + // used while progressing resource states in progressTask() of task manager + appliedStatus resourcestatus.ResourceStatus + resourceStatusToTransitionFunction map[resourcestatus.ResourceStatus]func() error + // terminalReason should be set for resource creation failures. This ensures + // the resource object carries some context for why provisioning failed. + terminalReason string + terminalReasonOnce sync.Once + // ssmClientCreator is a factory interface that creates new SSM clients. This is + // needed mostly for testing. + ssmClientCreator ssmfactory.SSMClientCreator + // s3ClientCreator is a factory interface that creates new S3 clients. This is + // needed mostly for testing. + s3ClientCreator s3factory.S3ClientCreator + // credentialSpecResourceLocation is the location for all the tasks' credentialspec artifacts + credentialSpecResourceLocation string + // required for processing credentialspecs + // Example item := credentialspec:file://credentialspec.json + requiredCredentialSpecs []string + // map to transform credentialspec values, key is a input credentialspec + // Examples: + // * key := credentialspec:file://credentialspec.json, value := credentialspec=file://credentialspec.json + // * key := credentialspec:s3ARN, value := credentialspec=file://CredentialSpecResourceLocation/s3_taskARN_fileName.json + // * key := credentialspec:ssmARN, value := credentialspec=file://CredentialSpecResourceLocation/ssm_taskARN_param.json + CredSpecMap map[string]string + // lock is used for fields that are accessed and updated concurrently + lock sync.RWMutex +} + +// NewCredentialSpecResource creates a new CredentialSpecResource object +func NewCredentialSpecResource(taskARN, region string, + credentialSpecs []string, + executionCredentialsID string, + credentialsManager credentials.Manager, + ssmClientCreator ssmfactory.SSMClientCreator, + s3ClientCreator s3factory.S3ClientCreator) (*CredentialSpecResource, error) { + + s := &CredentialSpecResource{ + taskARN: taskARN, + region: region, + requiredCredentialSpecs: credentialSpecs, + credentialsManager: credentialsManager, + executionCredentialsID: executionCredentialsID, + ssmClientCreator: ssmClientCreator, + s3ClientCreator: s3ClientCreator, + CredSpecMap: make(map[string]string), + ioutil: ioutilwrapper.NewIOUtil(), + } + + err := s.setCredentialSpecResourceLocation() + if err != nil { + return nil, err + } + + s.initStatusToTransition() + return s, nil +} + +func (cs *CredentialSpecResource) initStatusToTransition() { + resourceStatusToTransitionFunction := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(CredentialSpecCreated): cs.Create, + } + cs.resourceStatusToTransitionFunction = resourceStatusToTransitionFunction +} + +func (cs *CredentialSpecResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { + + cs.credentialsManager = resourceFields.CredentialsManager + cs.ssmClientCreator = resourceFields.SSMClientCreator + cs.s3ClientCreator = resourceFields.S3ClientCreator + cs.initStatusToTransition() +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (cs *CredentialSpecResource) GetTerminalReason() string { + return cs.terminalReason +} + +func (cs *CredentialSpecResource) setTerminalReason(reason string) { + cs.terminalReasonOnce.Do(func() { + seelog.Debugf("credentialspec resource: setting terminal reason for credentialspec resource in task: [%s]", cs.taskARN) + cs.terminalReason = reason + }) +} + +// GetDesiredStatus safely returns the desired status of the task +func (cs *CredentialSpecResource) GetDesiredStatus() resourcestatus.ResourceStatus { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return cs.desiredStatusUnsafe +} + +// SetDesiredStatus safely sets the desired status of the resource +func (cs *CredentialSpecResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + cs.lock.Lock() + defer cs.lock.Unlock() + + cs.desiredStatusUnsafe = status +} + +// DesiredTerminal returns true if the credentialspec's desired status is REMOVED +func (cs *CredentialSpecResource) DesiredTerminal() bool { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return cs.desiredStatusUnsafe == resourcestatus.ResourceStatus(CredentialSpecRemoved) +} + +// KnownCreated returns true if the credentialspec's known status is CREATED +func (cs *CredentialSpecResource) KnownCreated() bool { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return cs.knownStatusUnsafe == resourcestatus.ResourceStatus(CredentialSpecCreated) +} + +// TerminalStatus returns the last transition state of credentialspec +func (cs *CredentialSpecResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(CredentialSpecRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (cs *CredentialSpecResource) NextKnownState() resourcestatus.ResourceStatus { + return cs.GetKnownStatus() + 1 +} + +// ApplyTransition calls the function required to move to the specified status +func (cs *CredentialSpecResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + transitionFunc, ok := cs.resourceStatusToTransitionFunction[nextState] + if !ok { + err := errors.Errorf("resource [%s]: transition to %s impossible", cs.GetName(), + cs.StatusString(nextState)) + cs.setTerminalReason(err.Error()) + return err + } + + return transitionFunc() +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (cs *CredentialSpecResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(CredentialSpecCreated) +} + +// SetKnownStatus safely sets the currently known status of the resource +func (cs *CredentialSpecResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + cs.lock.Lock() + defer cs.lock.Unlock() + + cs.knownStatusUnsafe = status + cs.updateAppliedStatusUnsafe(status) +} + +// updateAppliedStatusUnsafe updates the resource transitioning status +func (cs *CredentialSpecResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if cs.appliedStatus == resourcestatus.ResourceStatus(CredentialSpecStatusNone) { + return + } + + // Check if the resource transition has already finished + if cs.appliedStatus <= knownStatus { + cs.appliedStatus = resourcestatus.ResourceStatus(CredentialSpecStatusNone) + } +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (cs *CredentialSpecResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + cs.lock.Lock() + defer cs.lock.Unlock() + + if cs.appliedStatus != resourcestatus.ResourceStatus(CredentialSpecStatusNone) { + // return false to indicate the set operation failed + return false + } + + cs.appliedStatus = status + return true +} + +// GetKnownStatus safely returns the currently known status of the task +func (cs *CredentialSpecResource) GetKnownStatus() resourcestatus.ResourceStatus { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return cs.knownStatusUnsafe +} + +// StatusString returns the string of the cgroup resource status +func (cs *CredentialSpecResource) StatusString(status resourcestatus.ResourceStatus) string { + return CredentialSpecStatus(status).String() +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (cs *CredentialSpecResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + cs.lock.Lock() + defer cs.lock.Unlock() + + cs.createdAt = createdAt +} + +// GetCreatedAt sets the timestamp for resource's creation time +func (cs *CredentialSpecResource) GetCreatedAt() time.Time { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return cs.createdAt +} + +// getRequiredCredentialSpecs returns the requiredCredentialSpecs field of credentialspec task resource +func (cs *CredentialSpecResource) getRequiredCredentialSpecs() []string { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return cs.requiredCredentialSpecs +} + +// getExecutionCredentialsID returns the execution role's credential ID +func (cs *CredentialSpecResource) getExecutionCredentialsID() string { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return cs.executionCredentialsID +} + +// GetName safely returns the name of the resource +func (cs *CredentialSpecResource) GetName() string { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return ResourceName +} + +// Create is used to create all the credentialspec resources for a given task +func (cs *CredentialSpecResource) Create() error { + var err error + var iamCredentials credentials.IAMRoleCredentials + + executionCredentials, ok := cs.credentialsManager.GetTaskCredentials(cs.getExecutionCredentialsID()) + if ok { + iamCredentials = executionCredentials.GetIAMRoleCredentials() + } + + for _, credSpecStr := range cs.requiredCredentialSpecs { + credSpecSplit := strings.SplitAfterN(credSpecStr, "credentialspec:", 2) + if len(credSpecSplit) != 2 { + seelog.Errorf("Invalid credentialspec: %s", credSpecStr) + continue + } + credSpecValue := credSpecSplit[1] + + if strings.HasPrefix(credSpecValue, "file://") { + err = cs.handleCredentialspecFile(credSpecStr) + if err != nil { + seelog.Errorf("Failed to handle the credentialspec file: %v", err) + cs.setTerminalReason(err.Error()) + return err + } + continue + } + + parsedARN, err := arn.Parse(credSpecValue) + if err != nil { + cs.setTerminalReason(err.Error()) + return err + } + + parsedARNService := parsedARN.Service + if parsedARNService == "s3" { + err = cs.handleS3CredentialspecFile(credSpecStr, credSpecValue, iamCredentials) + if err != nil { + seelog.Errorf("Failed to handle the credentialspec file from s3: %v", err) + cs.setTerminalReason(err.Error()) + return err + } + } else if parsedARNService == "ssm" { + err = cs.handleSSMCredentialspecFile(credSpecStr, credSpecValue, iamCredentials) + if err != nil { + seelog.Errorf("Failed to handle the credentialspec file from SSM: %v", err) + cs.setTerminalReason(err.Error()) + return err + } + } else { + err = errors.New("unsupported credentialspec ARN, only s3/ssm ARNs are valid") + cs.setTerminalReason(err.Error()) + return err + } + } + + return nil +} + +func (cs *CredentialSpecResource) handleCredentialspecFile(credentialspec string) error { + credSpecSplit := strings.SplitAfterN(credentialspec, "credentialspec:", 2) + if len(credSpecSplit) != 2 { + seelog.Errorf("Invalid credentialspec: %s", credentialspec) + return errors.New("invalid credentialspec file specification") + } + credSpecFile := credSpecSplit[1] + + if !strings.HasPrefix(credSpecFile, "file://") { + return errors.New("invalid credentialspec file specification") + } + + dockerHostconfigSecOptCredSpec := strings.Replace(credentialspec, "credentialspec:", "credentialspec=", 1) + cs.updateCredSpecMapping(credentialspec, dockerHostconfigSecOptCredSpec) + + return nil +} + +func (cs *CredentialSpecResource) handleS3CredentialspecFile(originalCredentialspec, credentialspecS3ARN string, iamCredentials credentials.IAMRoleCredentials) error { + if iamCredentials == (credentials.IAMRoleCredentials{}) { + err := errors.New("credentialspec resource: unable to find execution role credentials") + cs.setTerminalReason(err.Error()) + return err + } + + parsedARN, err := arn.Parse(credentialspecS3ARN) + if err != nil { + cs.setTerminalReason(err.Error()) + return err + } + + bucket, key, err := s3.ParseS3ARN(credentialspecS3ARN) + if err != nil { + cs.setTerminalReason(err.Error()) + return err + } + + s3Client, err := cs.s3ClientCreator.NewS3ClientForBucket(bucket, cs.region, iamCredentials) + if err != nil { + cs.setTerminalReason(err.Error()) + return err + } + + resourceBase := filepath.Base(parsedARN.Resource) + taskArnSplit := strings.Split(cs.taskARN, "/") + length := len(taskArnSplit) + if length < 2 { + return errors.New("Failed to retrieve taskId from taskArn.") + } + + localCredSpecFilePath := fmt.Sprintf("%s\\s3_%v_%s", cs.credentialSpecResourceLocation, taskArnSplit[length-1], resourceBase) + err = cs.writeS3File(func(file oswrapper.File) error { + return s3.DownloadFile(bucket, key, s3DownloadTimeout, file, s3Client) + }, localCredSpecFilePath) + if err != nil { + cs.setTerminalReason(err.Error()) + return err + } + + dockerHostconfigSecOptCredSpec := fmt.Sprintf("credentialspec=file://%s", filepath.Base(localCredSpecFilePath)) + cs.updateCredSpecMapping(originalCredentialspec, dockerHostconfigSecOptCredSpec) + + return nil +} + +func (cs *CredentialSpecResource) handleSSMCredentialspecFile(originalCredentialspec, credentialspecSSMARN string, iamCredentials credentials.IAMRoleCredentials) error { + if iamCredentials == (credentials.IAMRoleCredentials{}) { + err := errors.New("credentialspec resource: unable to find execution role credentials") + cs.setTerminalReason(err.Error()) + return err + } + + parsedARN, err := arn.Parse(credentialspecSSMARN) + if err != nil { + cs.setTerminalReason(err.Error()) + return err + } + + ssmClient := cs.ssmClientCreator.NewSSMClient(cs.region, iamCredentials) + + ssmParam := filepath.Base(parsedARN.Resource) + ssmParams := []string{ssmParam} + + ssmParamMap, err := ssm.GetParametersFromSSM(ssmParams, ssmClient) + if err != nil { + cs.setTerminalReason(err.Error()) + return err + } + + ssmParamData := ssmParamMap[ssmParam] + taskArnSplit := strings.Split(cs.taskARN, "/") + length := len(taskArnSplit) + if length < 2 { + return errors.New("Failed to retrieve taskId from taskArn.") + } + localCredSpecFilePath := fmt.Sprintf("%s\\ssm_%v_%s", cs.credentialSpecResourceLocation, taskArnSplit[length-1], ssmParam) + err = cs.writeSSMFile(ssmParamData, localCredSpecFilePath) + if err != nil { + cs.setTerminalReason(err.Error()) + return err + } + + dockerHostconfigSecOptCredSpec := fmt.Sprintf("credentialspec=file://%s", filepath.Base(localCredSpecFilePath)) + cs.updateCredSpecMapping(originalCredentialspec, dockerHostconfigSecOptCredSpec) + + return nil +} + +var rename = os.Rename + +func (cs *CredentialSpecResource) writeS3File(writeFunc func(file oswrapper.File) error, filePath string) error { + temp, err := cs.ioutil.TempFile(cs.credentialSpecResourceLocation, tempFileName) + if err != nil { + return err + } + + err = writeFunc(temp) + if err != nil { + return err + } + + err = temp.Close() + if err != nil { + seelog.Errorf("Error while closing the handle to file %s: %v", temp.Name(), err) + return err + } + + err = rename(temp.Name(), filePath) + if err != nil { + seelog.Errorf("Error while renaming the temporary file %s: %v", temp.Name(), err) + return err + } + return nil +} + +func (cs *CredentialSpecResource) writeSSMFile(ssmParamData, filePath string) error { + return cs.ioutil.WriteFile(filePath, []byte(ssmParamData), filePerm) +} + +func (cs *CredentialSpecResource) getCredSpecMap() map[string]string { + cs.lock.RLock() + defer cs.lock.RUnlock() + + return cs.CredSpecMap +} + +func (cs *CredentialSpecResource) GetTargetMapping(credSpecInput string) (string, error) { + cs.lock.RLock() + defer cs.lock.RUnlock() + + targetCredSpecMapping, ok := cs.CredSpecMap[credSpecInput] + if !ok { + return "", errors.New("unable to obtain credentialspec mapping") + } + + return targetCredSpecMapping, nil +} + +func (cs *CredentialSpecResource) updateCredSpecMapping(credSpecInput, targetCredSpec string) { + cs.lock.Lock() + defer cs.lock.Unlock() + + seelog.Debugf("Updating credentialspec mapping for %s with %s", credSpecInput, targetCredSpec) + cs.CredSpecMap[credSpecInput] = targetCredSpec +} + +// Cleanup removes the credentialspec created for the task +func (cs *CredentialSpecResource) Cleanup() error { + cs.clearCredentialSpec() + return nil +} + +var remove = os.Remove + +// clearCredentialSpec cycles through the collection of credentialspec data and +// removes them from the task +func (cs *CredentialSpecResource) clearCredentialSpec() { + cs.lock.Lock() + defer cs.lock.Unlock() + + for key, value := range cs.CredSpecMap { + if strings.HasPrefix(key, "credentialspec:file://") { + seelog.Debugf("Skipping cleanup of local credentialspec file option: %s", key) + continue + } + // Split credentialspec to obtain local file-name + credSpecSplit := strings.SplitAfterN(value, "credentialspec=file://", 2) + if len(credSpecSplit) != 2 { + seelog.Warnf("Unable to parse target credentialspec: %s", value) + continue + } + localCredentialSpecFile := credSpecSplit[1] + localCredentialSpecFilePath := filepath.Join(cs.credentialSpecResourceLocation, localCredentialSpecFile) + err := remove(localCredentialSpecFilePath) + if err != nil { + seelog.Warnf("Unable to clear local credential spec file %s for task %s", localCredentialSpecFile, cs.taskARN) + } + delete(cs.CredSpecMap, key) + } +} + +// CredentialSpecResourceJSON is the json representation of the credentialspec resource +type CredentialSpecResourceJSON struct { + TaskARN string `json:"taskARN"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + DesiredStatus *CredentialSpecStatus `json:"desiredStatus"` + KnownStatus *CredentialSpecStatus `json:"knownStatus"` + RequiredCredentialSpecs []string `json:"credentialSpecResources"` + CredSpecMap map[string]string `json:"CredSpecMap"` + ExecutionCredentialsID string `json:"executionCredentialsID"` +} + +// MarshalJSON serialises the CredentialSpecResourceJSON struct to JSON +func (cs *CredentialSpecResource) MarshalJSON() ([]byte, error) { + if cs == nil { + return nil, errors.New("credential specresource is nil") + } + createdAt := cs.GetCreatedAt() + return json.Marshal(CredentialSpecResourceJSON{ + TaskARN: cs.taskARN, + CreatedAt: &createdAt, + DesiredStatus: func() *CredentialSpecStatus { + desiredState := cs.GetDesiredStatus() + s := CredentialSpecStatus(desiredState) + return &s + }(), + KnownStatus: func() *CredentialSpecStatus { + knownState := cs.GetKnownStatus() + s := CredentialSpecStatus(knownState) + return &s + }(), + RequiredCredentialSpecs: cs.getRequiredCredentialSpecs(), + CredSpecMap: cs.getCredSpecMap(), + ExecutionCredentialsID: cs.getExecutionCredentialsID(), + }) +} + +// UnmarshalJSON deserialises the raw JSON to a CredentialSpecResourceJSON struct +func (cs *CredentialSpecResource) UnmarshalJSON(b []byte) error { + temp := CredentialSpecResourceJSON{} + + if err := json.Unmarshal(b, &temp); err != nil { + return err + } + + if temp.DesiredStatus != nil { + cs.SetDesiredStatus(resourcestatus.ResourceStatus(*temp.DesiredStatus)) + } + if temp.KnownStatus != nil { + cs.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) + } + if temp.CreatedAt != nil && !temp.CreatedAt.IsZero() { + cs.SetCreatedAt(*temp.CreatedAt) + } + if temp.RequiredCredentialSpecs != nil { + cs.requiredCredentialSpecs = temp.RequiredCredentialSpecs + } + if temp.CredSpecMap != nil { + cs.CredSpecMap = temp.CredSpecMap + } + cs.taskARN = temp.TaskARN + cs.executionCredentialsID = temp.ExecutionCredentialsID + + return nil +} + +func (cs *CredentialSpecResource) setCredentialSpecResourceLocation() error { + // TODO: Use registry to setup credentialspec resource location + // This should always be available on Windows instances + programDataDir := os.Getenv(envProgramData) + if programDataDir != "" { + // Sample resource location: C:\ProgramData\docker\credentialspecs + cs.credentialSpecResourceLocation = filepath.Join(programDataDir, dockerCredentialSpecDataDir) + } + + if cs.credentialSpecResourceLocation == "" { + return errors.New("credentialspec resource location not available") + } + + return nil +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (cs *CredentialSpecResource) GetAppliedStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +func (cs *CredentialSpecResource) DependOnTaskNetwork() bool { + return false +} + +func (cs *CredentialSpecResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (cs *CredentialSpecResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/credentialspec/credentialspec_windows_test.go b/agent/taskresource/credentialspec/credentialspec_windows_test.go new file mode 100644 index 00000000000..bab2f318c81 --- /dev/null +++ b/agent/taskresource/credentialspec/credentialspec_windows_test.go @@ -0,0 +1,717 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentialspec + +import ( + "encoding/json" + "os" + "testing" + "time" + + "github.com/pkg/errors" + + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + mock_s3_factory "github.com/aws/amazon-ecs-agent/agent/s3/factory/mocks" + mock_s3 "github.com/aws/amazon-ecs-agent/agent/s3/mocks" + mock_factory "github.com/aws/amazon-ecs-agent/agent/ssm/factory/mocks" + mock_ssmiface "github.com/aws/amazon-ecs-agent/agent/ssm/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + mock_ioutilwrapper "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper/mocks" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + taskARN = "arn:aws:ecs:us-west-2:123456789012:task/12345-678901234-56789" + executionCredentialsID = "exec-creds-id" + testTempFile = "testtempfile" +) + +func mockRename() func() { + rename = func(oldpath, newpath string) error { + return nil + } + + return func() { + rename = os.Rename + } +} + +func TestClearCredentialSpecDataHappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credSpecMapData := map[string]string{ + "credentialspec:file://localfilePath.json": "credentialspec=file://localfilePath.json", + "credentialspec:s3ARN": "credentialspec=file://s3_taskARN_fileName.json", + "credentialspec:ssmARN": "credentialspec=file://ssm_taskARN_fileName.json", + } + + credentialSpecResourceLocation := "C:/ProgramData/docker/credentialspecs/" + credspecRes := &CredentialSpecResource{ + CredSpecMap: credSpecMapData, + credentialSpecResourceLocation: credentialSpecResourceLocation, + } + + err := credspecRes.Cleanup() + assert.NoError(t, err) + assert.Equal(t, 1, len(credspecRes.CredSpecMap)) +} + +func TestClearCredentialSpecDataErr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credSpecMapData := map[string]string{ + "credentialspec:file://localfilePath.json": "credentialspec=file://localfilePath.json", + "credentialspec:ssmARN": "credentialspec=file://ssm_taskARN_fileName.json", + } + + credentialSpecResourceLocation := "C:/ProgramData/docker/credentialspecs/" + credspecRes := &CredentialSpecResource{ + CredSpecMap: credSpecMapData, + credentialSpecResourceLocation: credentialSpecResourceLocation, + } + + remove = func(name string) error { + return errors.New("test-error") + } + defer func() { + remove = os.Remove + }() + + err := credspecRes.Cleanup() + assert.NoError(t, err) + assert.Equal(t, 1, len(credspecRes.CredSpecMap)) +} + +func TestInitialize(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + credspecRes := &CredentialSpecResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + } + credspecRes.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + assert.NotNil(t, credspecRes.credentialsManager) + assert.NotNil(t, credspecRes.ssmClientCreator) + assert.NotNil(t, credspecRes.s3ClientCreator) + assert.NotNil(t, credspecRes.resourceStatusToTransitionFunction) +} + +func TestMarshalUnmarshalJSON(t *testing.T) { + testCredSpec := "credentialspec:file://test.json" + targetCredSpec := "credentialspec=file://test.json" + + requiredCredentialSpecs := []string{testCredSpec} + + credSpecMap := map[string]string{} + credSpecMap[testCredSpec] = targetCredSpec + + credspecIn := &CredentialSpecResource{ + taskARN: taskARN, + executionCredentialsID: executionCredentialsID, + createdAt: time.Now(), + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredCredentialSpecs: requiredCredentialSpecs, + CredSpecMap: credSpecMap, + } + + bytes, err := json.Marshal(credspecIn) + require.NoError(t, err) + + credSpecOut := &CredentialSpecResource{} + err = json.Unmarshal(bytes, credSpecOut) + require.NoError(t, err) + assert.Equal(t, credspecIn.taskARN, credSpecOut.taskARN) + assert.WithinDuration(t, credspecIn.createdAt, credSpecOut.createdAt, time.Microsecond) + assert.Equal(t, credspecIn.desiredStatusUnsafe, credSpecOut.desiredStatusUnsafe) + assert.Equal(t, credspecIn.knownStatusUnsafe, credSpecOut.knownStatusUnsafe) + assert.Equal(t, credspecIn.executionCredentialsID, credSpecOut.executionCredentialsID) + assert.Equal(t, len(credspecIn.requiredCredentialSpecs), len(credSpecOut.requiredCredentialSpecs)) + assert.Equal(t, len(credspecIn.CredSpecMap), len(credSpecOut.CredSpecMap)) + assert.EqualValues(t, credspecIn.CredSpecMap, credSpecOut.CredSpecMap) +} + +func TestHandleCredentialspecFile(t *testing.T) { + fileCredentialSpec := "credentialspec:file://test.json" + expectedFileCredentialSpec := "credentialspec=file://test.json" + + requiredCredSpec := []string{fileCredentialSpec} + + cs := &CredentialSpecResource{ + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + } + + err := cs.handleCredentialspecFile(fileCredentialSpec) + assert.NoError(t, err) + + targetCredentialSpecFile, err := cs.GetTargetMapping(fileCredentialSpec) + assert.NoError(t, err) + assert.Equal(t, expectedFileCredentialSpec, targetCredentialSpecFile) +} + +func TestHandleCredentialspecFileErr(t *testing.T) { + fileCredentialSpec := "credentialspec:invalid-file://test.json" + requiredCredSpec := []string{fileCredentialSpec} + + cs := &CredentialSpecResource{ + requiredCredentialSpecs: requiredCredSpec, + } + + err := cs.handleCredentialspecFile(fileCredentialSpec) + assert.Error(t, err) +} + +func TestHandleSSMCredentialspecFile(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl) + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + + credentialSpecSSMARN := "arn:aws:ssm:us-west-2:123456789012:parameter/test" + ssmCredentialSpec := "credentialspec:arn:aws:ssm:us-west-2:123456789012:parameter/test" + expectedFileCredentialSpec := "credentialspec=file://ssm_12345-678901234-56789_test" + + requiredCredSpec := []string{ssmCredentialSpec} + + cs := &CredentialSpecResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + taskARN: taskARN, + ioutil: mockIO, + } + cs.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + testData := "test-cred-spec-data" + ssmClientOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String("test"), + Value: aws.String(testData), + }, + }, + } + + gomock.InOrder( + ssmClientCreator.EXPECT().NewSSMClient(gomock.Any(), gomock.Any()).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Return(ssmClientOutput, nil).Times(1), + mockIO.EXPECT().WriteFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), + ) + + err := cs.handleSSMCredentialspecFile(ssmCredentialSpec, credentialSpecSSMARN, iamCredentials) + assert.NoError(t, err) + + targetCredentialSpecFile, err := cs.GetTargetMapping(ssmCredentialSpec) + assert.NoError(t, err) + assert.Equal(t, expectedFileCredentialSpec, targetCredentialSpecFile) +} + +func TestHandleSSMCredentialspecFileARNParseErr(t *testing.T) { + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + credentialSpecSSMARN := "arn:aws:ssm:parameter/test" + ssmCredentialSpec := "credentialspec:arn:aws:ssm:us-west-2:123456789012:parameter/test" + + var termReason string + cs := &CredentialSpecResource{ + terminalReason: termReason, + } + + err := cs.handleSSMCredentialspecFile(ssmCredentialSpec, credentialSpecSSMARN, iamCredentials) + assert.Error(t, err) +} + +func TestHandleSSMCredentialspecFileGetSSMParamErr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl) + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + credentialSpecSSMARN := "arn:aws:ssm:us-west-2:123456789012:parameter/test" + ssmCredentialSpec := "credentialspec:arn:aws:ssm:us-west-2:123456789012:parameter/test" + + requiredCredSpec := []string{ssmCredentialSpec} + + cs := &CredentialSpecResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + taskARN: taskARN, + } + cs.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + gomock.InOrder( + ssmClientCreator.EXPECT().NewSSMClient(gomock.Any(), gomock.Any()).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Return(nil, errors.New("test-error")).Times(1), + ) + + err := cs.handleSSMCredentialspecFile(ssmCredentialSpec, credentialSpecSSMARN, iamCredentials) + assert.Error(t, err) +} + +func TestHandleSSMCredentialspecFileIOErr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl) + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + credentialSpecSSMARN := "arn:aws:ssm:us-west-2:123456789012:parameter/test" + ssmCredentialSpec := "credentialspec:arn:aws:ssm:us-west-2:123456789012:parameter/test" + + requiredCredSpec := []string{ssmCredentialSpec} + + cs := &CredentialSpecResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + taskARN: taskARN, + ioutil: mockIO, + } + cs.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + testData := "test-cred-spec-data" + ssmClientOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String("test"), + Value: aws.String(testData), + }, + }, + } + + gomock.InOrder( + ssmClientCreator.EXPECT().NewSSMClient(gomock.Any(), gomock.Any()).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Return(ssmClientOutput, nil).Times(1), + mockIO.EXPECT().WriteFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("test-error")), + ) + + err := cs.handleSSMCredentialspecFile(ssmCredentialSpec, credentialSpecSSMARN, iamCredentials) + assert.Error(t, err) +} + +func TestHandlerSSMCredentialspecCredMissingErr(t *testing.T) { + cs := &CredentialSpecResource{} + + ssmCredentialSpec := "credentialspec:arn:aws:ssm:us-west-2:123456789012:parameter/test" + credentialSpecSSMARN := "arn:aws:ssm:us-west-2:123456789012:parameter/test" + iamCredentials := credentials.IAMRoleCredentials{} + + err := cs.handleSSMCredentialspecFile(ssmCredentialSpec, credentialSpecSSMARN, iamCredentials) + assert.Error(t, err) +} + +func TestHandleS3CredentialspecFile(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockFile := mock_oswrapper.NewMockFile() + mockS3Client := mock_s3.NewMockS3Client(ctrl) + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + credentialSpecS3ARN := "arn:aws:s3:::bucket_name/test" + s3CredentialSpec := "credentialspec:arn:aws:s3:::bucket_name/test" + expectedFileCredentialSpec := "credentialspec=file://s3_12345-678901234-56789_test" + + requiredCredSpec := []string{s3CredentialSpec} + + cs := &CredentialSpecResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + taskARN: taskARN, + ioutil: mockIO, + } + cs.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + defer mockRename()() + mockFile.(*mock_oswrapper.MockFile).NameImpl = func() string { + return testTempFile + } + gomock.InOrder( + s3ClientCreator.EXPECT().NewS3ClientForBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockS3Client, nil), + mockIO.EXPECT().TempFile(gomock.Any(), gomock.Any()).Return(mockFile, nil), + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Return(int64(0), nil), + ) + + err := cs.handleS3CredentialspecFile(s3CredentialSpec, credentialSpecS3ARN, iamCredentials) + assert.NoError(t, err) + + targetCredentialSpecFile, err := cs.GetTargetMapping(s3CredentialSpec) + assert.NoError(t, err) + assert.Equal(t, expectedFileCredentialSpec, targetCredentialSpecFile) +} + +func TestHandleS3CredentialspecFileARNParseErr(t *testing.T) { + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + credentialSpecS3ARN := "arn:aws:/test" + s3CredentialSpec := "credentialspec:arn:/test" + + var termReason string + cs := &CredentialSpecResource{ + terminalReason: termReason, + } + + err := cs.handleS3CredentialspecFile(s3CredentialSpec, credentialSpecS3ARN, iamCredentials) + assert.Error(t, err) +} + +func TestHandleS3CredentialspecFileS3ClientErr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + mockS3Client := mock_s3.NewMockS3Client(ctrl) + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + credentialSpecS3ARN := "arn:aws:s3:::bucket_name/test" + s3CredentialSpec := "credentialspec:arn:aws:s3:::bucket_name/test" + + var termReason string + cs := &CredentialSpecResource{ + terminalReason: termReason, + } + cs.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + gomock.InOrder( + s3ClientCreator.EXPECT().NewS3ClientForBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockS3Client, errors.New("test-error")), + ) + + err := cs.handleS3CredentialspecFile(s3CredentialSpec, credentialSpecS3ARN, iamCredentials) + assert.Error(t, err) +} + +func TestHandleS3CredentialspecFileWriteErr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockFile := mock_oswrapper.NewMockFile() + mockS3Client := mock_s3.NewMockS3Client(ctrl) + + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + credentialSpecS3ARN := "arn:aws:s3:::bucket_name/test" + s3CredentialSpec := "credentialspec:arn:aws:s3:::bucket_name/test" + + requiredCredSpec := []string{s3CredentialSpec} + + cs := &CredentialSpecResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + taskARN: taskARN, + ioutil: mockIO, + } + cs.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + mockFile.(*mock_oswrapper.MockFile).NameImpl = func() string { + return testTempFile + } + + rename = func(oldpath, newpath string) error { + return errors.New("test-error") + } + defer func() { + rename = os.Rename + }() + + gomock.InOrder( + s3ClientCreator.EXPECT().NewS3ClientForBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockS3Client, nil), + mockIO.EXPECT().TempFile(gomock.Any(), gomock.Any()).Return(mockFile, nil), + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Return(int64(0), nil), + ) + + err := cs.handleS3CredentialspecFile(s3CredentialSpec, credentialSpecS3ARN, iamCredentials) + assert.Error(t, err) +} + +func TestHandlerS3CredentialspecCredMissingErr(t *testing.T) { + cs := &CredentialSpecResource{} + + credentialSpecS3ARN := "arn:aws:s3:::bucket_name/test" + s3CredentialSpec := "credentialspec:arn:aws:s3:::bucket_name/test" + iamCredentials := credentials.IAMRoleCredentials{} + + err := cs.handleS3CredentialspecFile(s3CredentialSpec, credentialSpecS3ARN, iamCredentials) + assert.Error(t, err) +} + +func TestCreateSSM(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl) + + ssmCredentialSpec := "credentialspec:arn:aws:ssm:us-west-2:123456789012:parameter/test" + requiredCredSpec := []string{ssmCredentialSpec} + + cs := &CredentialSpecResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + taskARN: taskARN, + ioutil: mockIO, + } + cs.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + testData := "test-cred-spec-data" + ssmClientOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String("test"), + Value: aws.String(testData), + }, + }, + } + + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: "id", + SecretAccessKey: "key", + }, + } + + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(gomock.Any()).Return(creds, true), + ssmClientCreator.EXPECT().NewSSMClient(gomock.Any(), gomock.Any()).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Return(ssmClientOutput, nil).Times(1), + mockIO.EXPECT().WriteFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), + ) + + assert.NoError(t, cs.Create()) +} + +func TestCreateS3(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + s3ClientCreator := mock_s3_factory.NewMockS3ClientCreator(ctrl) + mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockFile := mock_oswrapper.NewMockFile() + mockS3Client := mock_s3.NewMockS3Client(ctrl) + + s3CredentialSpec := "credentialspec:arn:aws:s3:::bucket_name/test" + + requiredCredSpec := []string{s3CredentialSpec} + + cs := &CredentialSpecResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + taskARN: taskARN, + ioutil: mockIO, + } + cs.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + S3ClientCreator: s3ClientCreator, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: "id", + SecretAccessKey: "key", + }, + } + + defer mockRename()() + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(gomock.Any()).Return(creds, true), + s3ClientCreator.EXPECT().NewS3ClientForBucket(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockS3Client, nil), + mockIO.EXPECT().TempFile(gomock.Any(), gomock.Any()).Return(mockFile, nil), + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Return(int64(0), nil), + ) + + assert.NoError(t, cs.Create()) +} + +func TestCreateFile(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + + fileCredentialSpec := "credentialspec:file://test.json" + requiredCredSpec := []string{fileCredentialSpec} + + cs := &CredentialSpecResource{ + credentialsManager: credentialsManager, + requiredCredentialSpecs: requiredCredSpec, + CredSpecMap: map[string]string{}, + } + + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: "id", + SecretAccessKey: "key", + }, + } + + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(gomock.Any()).Return(creds, true), + ) + + assert.NoError(t, cs.Create()) +} + +func TestGetName(t *testing.T) { + cs := &CredentialSpecResource{} + + assert.Equal(t, ResourceName, cs.GetName()) +} + +func TestGetTargetMapping(t *testing.T) { + inputCredSpec, outputCredSpec := "credentialspec:file://test.json", "credentialspec=file://test.json" + credSpecMapData := map[string]string{ + inputCredSpec: outputCredSpec, + } + + cs := &CredentialSpecResource{ + CredSpecMap: credSpecMapData, + } + + targetCredSpec, err := cs.GetTargetMapping(inputCredSpec) + assert.NoError(t, err) + assert.Equal(t, outputCredSpec, targetCredSpec) +} + +func TestGetTargetMappingErr(t *testing.T) { + cs := &CredentialSpecResource{ + CredSpecMap: map[string]string{}, + } + + targetCredSpec, err := cs.GetTargetMapping("testcredspec") + assert.Error(t, err) + assert.Empty(t, targetCredSpec) +} diff --git a/agent/taskresource/credentialspec/credentialspecstatus.go b/agent/taskresource/credentialspec/credentialspecstatus.go new file mode 100644 index 00000000000..09695c549c2 --- /dev/null +++ b/agent/taskresource/credentialspec/credentialspecstatus.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentialspec + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +type CredentialSpecStatus resourcestatus.ResourceStatus + +const ( + // is the zero state of a task resource + CredentialSpecStatusNone CredentialSpecStatus = iota + // represents a task resource which has been created + CredentialSpecCreated + // represents a task resource which has been cleaned up + CredentialSpecRemoved +) + +var CredentialSpecStatusMap = map[string]CredentialSpecStatus{ + "NONE": CredentialSpecStatusNone, + "CREATED": CredentialSpecCreated, + "REMOVED": CredentialSpecRemoved, +} + +// StatusString returns a human readable string representation of this object +func (cs CredentialSpecStatus) String() string { + for k, v := range CredentialSpecStatusMap { + if v == cs { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type +func (cs *CredentialSpecStatus) MarshalJSON() ([]byte, error) { + if cs == nil { + return nil, errors.New("credentialspec resource status is nil") + } + return []byte(`"` + cs.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data +func (cs *CredentialSpecStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *cs = CredentialSpecStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *cs = CredentialSpecStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := CredentialSpecStatusMap[string(strStatus)] + if !ok { + *cs = CredentialSpecStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *cs = stat + return nil +} diff --git a/agent/taskresource/credentialspec/credentialspecstatus_test.go b/agent/taskresource/credentialspec/credentialspecstatus_test.go new file mode 100644 index 00000000000..7c01877ee42 --- /dev/null +++ b/agent/taskresource/credentialspec/credentialspecstatus_test.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentialspec + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusString(t *testing.T) { + cases := []struct { + Name string + InCredentialSpecStatus CredentialSpecStatus + OutCredentialSpecStatus string + }{ + { + Name: "ToStringCredentialSpecStatusNone", + InCredentialSpecStatus: CredentialSpecStatusNone, + OutCredentialSpecStatus: "NONE", + }, + { + Name: "ToStringCredentialSpecCreated", + InCredentialSpecStatus: CredentialSpecCreated, + OutCredentialSpecStatus: "CREATED", + }, + { + Name: "ToStringCredentialSpecRemoved", + InCredentialSpecStatus: CredentialSpecRemoved, + OutCredentialSpecStatus: "REMOVED", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + assert.Equal(t, c.OutCredentialSpecStatus, c.InCredentialSpecStatus.String()) + }) + } +} + +func TestMarshalNilCredentialSpecStatus(t *testing.T) { + var status *CredentialSpecStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Error(t, err) +} + +func TestMarshalCredentialSpecStatus(t *testing.T) { + cases := []struct { + Name string + InCredentialSpecStatus CredentialSpecStatus + OutCredentialSpecStatus string + }{ + { + Name: "ToStringCredentialSpecStatusNone", + InCredentialSpecStatus: CredentialSpecStatusNone, + OutCredentialSpecStatus: "\"NONE\"", + }, + { + Name: "ToStringCredentialSpecCreated", + InCredentialSpecStatus: CredentialSpecCreated, + OutCredentialSpecStatus: "\"CREATED\"", + }, + { + Name: "ToStringCredentialSpecRemoved", + InCredentialSpecStatus: CredentialSpecRemoved, + OutCredentialSpecStatus: "\"REMOVED\"", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + bytes, err := c.InCredentialSpecStatus.MarshalJSON() + + assert.NoError(t, err) + assert.Equal(t, c.OutCredentialSpecStatus, string(bytes[:])) + }) + } + +} + +func TestUnmarshalSSMSecretStatus(t *testing.T) { + cases := []struct { + Name string + InCredentialSpecStatus string + OutCredentialSpecStatus CredentialSpecStatus + ShouldError bool + }{ + { + Name: "UnmarshallCredentialSpecStatusNoneNone", + InCredentialSpecStatus: "\"NONE\"", + OutCredentialSpecStatus: CredentialSpecStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallCredentialSpecCreated", + InCredentialSpecStatus: "\"CREATED\"", + OutCredentialSpecStatus: CredentialSpecCreated, + ShouldError: false, + }, + { + Name: "UnmarshallCredentialSpecRemoved", + InCredentialSpecStatus: "\"REMOVED\"", + OutCredentialSpecStatus: CredentialSpecRemoved, + ShouldError: false, + }, + { + Name: "UnmarshallCredentialSpecStatusNull", + InCredentialSpecStatus: "null", + OutCredentialSpecStatus: CredentialSpecStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallCredentialSpecStatusNonString", + InCredentialSpecStatus: "1", + OutCredentialSpecStatus: CredentialSpecStatusNone, + ShouldError: true, + }, + { + Name: "UnmarshallCredentialSpecUnmappedStatus", + InCredentialSpecStatus: "\"LOL\"", + OutCredentialSpecStatus: CredentialSpecStatusNone, + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + + var status CredentialSpecStatus + err := json.Unmarshal([]byte(c.InCredentialSpecStatus), &status) + + if c.ShouldError { + assert.Error(t, err) + } else { + + assert.NoError(t, err) + assert.Equal(t, c.OutCredentialSpecStatus, status) + } + }) + } +} diff --git a/agent/taskresource/credentialspec/types.go b/agent/taskresource/credentialspec/types.go new file mode 100644 index 00000000000..14fbde728ba --- /dev/null +++ b/agent/taskresource/credentialspec/types.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package credentialspec + +const ( + // ResourceName is the name of the credentialspec resource + ResourceName = "credentialspec" +) diff --git a/agent/taskresource/envFiles/envfile.go b/agent/taskresource/envFiles/envfile.go new file mode 100644 index 00000000000..fe56ad67410 --- /dev/null +++ b/agent/taskresource/envFiles/envfile.go @@ -0,0 +1,611 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package envFiles + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/s3" + "github.com/aws/amazon-ecs-agent/agent/s3/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/amazon-ecs-agent/agent/utils/bufiowrapper" + "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +const ( + ResourceName = "envfile" + envFileDirPath = "envfiles" + envTempFilePrefix = "tmp_env" + envFileExtension = ".env" + commentIndicator = "#" + envVariableDelimiter = "=" + + renameBackoffMin = 500 * time.Millisecond + renameBackoffMax = 5 * time.Second + renameBackoffJitter = 0.0 + renameBackoffMultiple = 1.5 + renameRetryAttempts = 5 + + s3DownloadTimeout = 30 * time.Second +) + +// EnvironmentFileResource represents envfile as a task resource +// these environment files are retrieved from s3 +type EnvironmentFileResource struct { + cluster string + taskARN string + region string + resourceDir string // path to store env var files + containerName string + + // env file related attributes + environmentFilesSource []apicontainer.EnvironmentFile // list of env file objects + + executionCredentialsID string + credentialsManager credentials.Manager + s3ClientCreator factory.S3ClientCreator + ioutil ioutilwrapper.IOUtil + bufio bufiowrapper.Bufio + + // Fields for the common functionality of task resource. Access to these fields are protected by lock. + createdAtUnsafe time.Time + desiredStatusUnsafe resourcestatus.ResourceStatus + knownStatusUnsafe resourcestatus.ResourceStatus + appliedStatusUnsafe resourcestatus.ResourceStatus + statusToTransitions map[resourcestatus.ResourceStatus]func() error + terminalReasonUnsafe string + terminalReasonOnce sync.Once + lock sync.RWMutex +} + +// NewEnvironmentFileResource creates a new EnvironmentFileResource object +func NewEnvironmentFileResource(cluster, taskARN, region, dataDir, containerName string, envfiles []apicontainer.EnvironmentFile, + credentialsManager credentials.Manager, executionCredentialsID string) (*EnvironmentFileResource, error) { + envfileResource := &EnvironmentFileResource{ + cluster: cluster, + taskARN: taskARN, + region: region, + containerName: containerName, + environmentFilesSource: envfiles, + ioutil: ioutilwrapper.NewIOUtil(), + bufio: bufiowrapper.NewBufio(), + s3ClientCreator: factory.NewS3ClientCreator(), + executionCredentialsID: executionCredentialsID, + credentialsManager: credentialsManager, + } + + taskARNFields := strings.Split(taskARN, "/") + taskID := taskARNFields[len(taskARNFields)-1] + // we save envfiles for a task to path: /var/lib/ecs/data/envfiles/cluster_name/task_id/ + envfileResource.resourceDir = filepath.Join(dataDir, envFileDirPath, cluster, taskID) + + envfileResource.initStatusToTransition() + return envfileResource, nil +} + +// Initialize initializes the EnvironmentFileResource +func (envfile *EnvironmentFileResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { + envfile.lock.Lock() + + envfile.initStatusToTransition() + envfile.credentialsManager = resourceFields.CredentialsManager + envfile.s3ClientCreator = factory.NewS3ClientCreator() + envfile.ioutil = ioutilwrapper.NewIOUtil() + envfile.bufio = bufiowrapper.NewBufio() + envfile.lock.Unlock() + + // if task isn't in 'created' status and desired status is 'running', + // reset the resource status to 'NONE' so we always retrieve the data + // this is in case agent crashes + if taskKnownStatus < status.TaskCreated && taskDesiredStatus <= status.TaskRunning { + envfile.SetKnownStatus(resourcestatus.ResourceStatusNone) + } +} + +func (envfile *EnvironmentFileResource) initStatusToTransition() { + resourceStatusToTransitionFunc := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(EnvFileCreated): envfile.Create, + } + envfile.statusToTransitions = resourceStatusToTransitionFunc +} + +// SetDesiredStatus safely sets the desired status of the resource +func (envfile *EnvironmentFileResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + envfile.lock.Lock() + defer envfile.lock.Unlock() + + envfile.desiredStatusUnsafe = status +} + +// GetDesiredStatus safely returns the desired status of the resource +func (envfile *EnvironmentFileResource) GetDesiredStatus() resourcestatus.ResourceStatus { + envfile.lock.RLock() + defer envfile.lock.RUnlock() + + return envfile.desiredStatusUnsafe +} + +func (envfile *EnvironmentFileResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if envfile.appliedStatusUnsafe == resourcestatus.ResourceStatus(EnvFileStatusNone) { + return + } + + // only apply if resource transition has already finished + if envfile.appliedStatusUnsafe <= knownStatus { + envfile.appliedStatusUnsafe = resourcestatus.ResourceStatus(EnvFileStatusNone) + } +} + +// SetKnownStatus safely sets the currently known status of the resource +func (envfile *EnvironmentFileResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + envfile.lock.Lock() + defer envfile.lock.Unlock() + + envfile.knownStatusUnsafe = status + envfile.updateAppliedStatusUnsafe(status) +} + +// GetKnownStatus safely returns the currently known status of the resource +func (envfile *EnvironmentFileResource) GetKnownStatus() resourcestatus.ResourceStatus { + envfile.lock.RLock() + defer envfile.lock.RUnlock() + + return envfile.knownStatusUnsafe +} + +// SetCreatedAt safely sets the timestamp for the resource's creation time +func (envfile *EnvironmentFileResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + + envfile.lock.Lock() + defer envfile.lock.Unlock() + + envfile.createdAtUnsafe = createdAt +} + +// GetCreatedAt safely returns the timestamp for the resource's creation time +func (envfile *EnvironmentFileResource) GetCreatedAt() time.Time { + envfile.lock.RLock() + defer envfile.lock.RUnlock() + + return envfile.createdAtUnsafe +} + +// GetName returns the name fo the resource +func (envfile *EnvironmentFileResource) GetName() string { + return ResourceName +} + +// DesiredTerminal returns true if the resource's desired status is REMOVED +func (envfile *EnvironmentFileResource) DesiredTerminal() bool { + envfile.lock.RLock() + defer envfile.lock.RUnlock() + + return envfile.desiredStatusUnsafe == resourcestatus.ResourceStatus(EnvironmentFileStatus(EnvFileRemoved)) +} + +// KnownCreated returns true if the resource's known status is CREATED +func (envfile *EnvironmentFileResource) KnownCreated() bool { + envfile.lock.RLock() + defer envfile.lock.RUnlock() + + return envfile.knownStatusUnsafe == resourcestatus.ResourceStatus(EnvFileCreated) +} + +// TerminalStatus returns the last transition state of the resource +func (envfile *EnvironmentFileResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(EnvFileRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState` +func (envfile *EnvironmentFileResource) NextKnownState() resourcestatus.ResourceStatus { + return envfile.GetKnownStatus() + 1 +} + +// ApplyTransition calls the function required to move to the specified status +func (envfile *EnvironmentFileResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + transitionFunc, ok := envfile.statusToTransitions[nextState] + if !ok { + return errors.Errorf("resource [%s]: transition to %s impossible", envfile.GetName(), + envfile.StatusString(nextState)) + } + return transitionFunc() +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (envfile *EnvironmentFileResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(EnvFileCreated) +} + +// SetAppliedStatus sets the applied status of the resource and returns whether +// the resource is already in a transition +func (envfile *EnvironmentFileResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + envfile.lock.Lock() + defer envfile.lock.Unlock() + + if envfile.appliedStatusUnsafe != resourcestatus.ResourceStatus(EnvFileStatusNone) { + // set operation failed, return false + return false + } + + envfile.appliedStatusUnsafe = status + return true +} + +// StatusString returns the string representation of the resource status +func (envfile *EnvironmentFileResource) StatusString(status resourcestatus.ResourceStatus) string { + return EnvironmentFileStatus(status).String() +} + +// GetTerminalReason returns an error string to propagate up through to to +// state change messages +func (envfile *EnvironmentFileResource) GetTerminalReason() string { + envfile.lock.RLock() + defer envfile.lock.RUnlock() + + return envfile.terminalReasonUnsafe +} + +func (envfile *EnvironmentFileResource) setTerminalReason(reason string) { + envfile.lock.Lock() + defer envfile.lock.Unlock() + + envfile.terminalReasonOnce.Do(func() { + seelog.Infof("envfile resource: setting terminal reason for task: [%s]", envfile.taskARN) + envfile.terminalReasonUnsafe = reason + }) +} + +// Create performs resource creation. This retrieves env file contents concurrently +// from s3 and writes them to disk +func (envfile *EnvironmentFileResource) Create() error { + seelog.Debugf("Creating envfile resource.") + // make sure it has the task execution role + executionCredentials, ok := envfile.credentialsManager.GetTaskCredentials(envfile.executionCredentialsID) + if !ok { + err := errors.New("environment file resource: unable to find execution role credentials") + envfile.setTerminalReason(err.Error()) + return err + } + + var wg sync.WaitGroup + errorEvents := make(chan error, len(envfile.environmentFilesSource)) + + iamCredentials := executionCredentials.GetIAMRoleCredentials() + for _, envfileSource := range envfile.environmentFilesSource { + wg.Add(1) + // if we support types besides S3 ARN, we will need to add filtering before the below method is called + // call an additional go routine per env file + go envfile.downloadEnvfileFromS3(envfileSource.Value, iamCredentials, &wg, errorEvents) + } + + wg.Wait() + close(errorEvents) + + if len(errorEvents) > 0 { + var terminalReasons []string + for err := range errorEvents { + terminalReasons = append(terminalReasons, err.Error()) + } + + errorString := strings.Join(terminalReasons, ";") + envfile.setTerminalReason(errorString) + return errors.New(errorString) + } + + return nil +} + +var mkdirAll = os.MkdirAll + +// createEnvfileDirectory creates the directory that we will be writing the +// envfile to - needs to be called for each different envfile +func (envfile *EnvironmentFileResource) createEnvfileDirectory(bucket, key string) error { + // create directories to include bucket and key but not the actual resulting file + keyDir := filepath.Dir(key) + envfileDir := filepath.Join(envfile.resourceDir, bucket, keyDir) + err := mkdirAll(envfileDir, os.ModePerm) + if err != nil { + return errors.Wrapf(err, "unable to create envfiles directory with bucket %s", bucket) + } + + return nil +} + +func (envfile *EnvironmentFileResource) downloadEnvfileFromS3(envFilePath string, iamCredentials credentials.IAMRoleCredentials, + wg *sync.WaitGroup, errorEvents chan error) { + defer wg.Done() + + bucket, key, err := s3.ParseS3ARN(envFilePath) + if err != nil { + errorEvents <- fmt.Errorf("unable to parse bucket and key from s3 ARN specified in environmentFile %s, error: %v", envFilePath, err) + return + } + + s3Client, err := envfile.s3ClientCreator.NewS3ClientForBucket(bucket, envfile.region, iamCredentials) + if err != nil { + errorEvents <- fmt.Errorf("unable to initialize s3 client for bucket %s, error: %v", bucket, err) + return + } + + err = envfile.createEnvfileDirectory(bucket, key) + if err != nil { + errorEvents <- fmt.Errorf("unable to initialize envfile resource directory, error: %v", err) + return + } + + seelog.Debugf("Downloading envfile with bucket name %v and key name %v", bucket, key) + // we save envfiles to path: /var/lib/ecs/data/envfiles/cluster_name/task_id/${s3bucketname}/${s3filename.env} + downloadPath := filepath.Join(envfile.resourceDir, bucket, key) + err = envfile.writeEnvFile(func(file oswrapper.File) error { + return s3.DownloadFile(bucket, key, s3DownloadTimeout, file, s3Client) + }, downloadPath) + + if err != nil { + errorEvents <- fmt.Errorf("unable to download env file with key %s from bucket %s, error: %v", key, bucket, err) + return + } + + seelog.Debugf("Downloaded envfile from s3 and saved to %s", downloadPath) +} + +var rename = os.Rename + +func (envfile *EnvironmentFileResource) writeEnvFile(writeFunc func(file oswrapper.File) error, fullPathName string) error { + // File moves (renaming) are atomic while file writes are not + // so we write to a temp file before renaming to actual file + // multiple programs calling TempFile will not reference the same file + // so this should be ok to be called by multiple go routines + tmpFile, err := envfile.ioutil.TempFile(envfile.resourceDir, envTempFilePrefix) + if err != nil { + seelog.Errorf("Something went wrong trying to create a temp file with prefix %s", envTempFilePrefix) + return err + } + // defer tmpFile.Close() in case something goes wrong and we don't actually hit the manual Close call + // the source for golang *os.File.Close() shows that subsequent calls to Close() after the first + // will do nothing except return syscall.EINVAL, so it is ok to make potentially multiple .Close calls + defer tmpFile.Close() + + if err = writeFunc(tmpFile); err != nil { + seelog.Errorf("Something went wrong trying to write to tmpFile %s", tmpFile.Name()) + return err + } + + err = tmpFile.Close() + if err != nil { + seelog.Errorf("Error while closing temporary file %s created for envfile resource", tmpFile.Name()) + return err + } + + backoff := retry.NewExponentialBackoff(renameBackoffMin, renameBackoffMax, renameBackoffJitter, renameBackoffMultiple) + err = retry.RetryNWithBackoff(backoff, renameRetryAttempts, func() error { + return rename(tmpFile.Name(), fullPathName) + }) + if err != nil { + seelog.Errorf("Something went wrong when trying to rename envfile from %s to %s", tmpFile.Name(), fullPathName) + return err + } + + return nil +} + +var removeAll = os.RemoveAll + +// Cleanup removes env file directory for the task +func (envfile *EnvironmentFileResource) Cleanup() error { + err := removeAll(envfile.resourceDir) + if err != nil { + return fmt.Errorf("unable to remove envfile resource directory %s: %v", envfile.resourceDir, err) + } + + seelog.Infof("Removed envfile resource directory at %s", envfile.resourceDir) + return nil +} + +type environmentFileResourceJSON struct { + TaskARN string `json:"taskARN"` + ContainerName string `json:"containerName"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + DesiredStatus *EnvironmentFileStatus `json:"desiredStatus"` + KnownStatus *EnvironmentFileStatus `json:"knownStatus"` + EnvironmentFilesSource []apicontainer.EnvironmentFile `json:"environmentFilesSource"` + ExecutionCredentialsID string `json:"executionCredentialsID"` +} + +// MarshalJSON serializes the EnvironmentFileResource struct to JSON +func (envfile *EnvironmentFileResource) MarshalJSON() ([]byte, error) { + if envfile == nil { + return nil, errors.New("envfile resource is nil") + } + createdAt := envfile.GetCreatedAt() + return json.Marshal(environmentFileResourceJSON{ + TaskARN: envfile.taskARN, + ContainerName: envfile.containerName, + CreatedAt: &createdAt, + DesiredStatus: func() *EnvironmentFileStatus { + desiredState := envfile.GetDesiredStatus() + envfileStatus := EnvironmentFileStatus(desiredState) + return &envfileStatus + }(), + KnownStatus: func() *EnvironmentFileStatus { + knownState := envfile.GetKnownStatus() + envfileStatus := EnvironmentFileStatus(knownState) + return &envfileStatus + }(), + EnvironmentFilesSource: envfile.environmentFilesSource, + ExecutionCredentialsID: envfile.executionCredentialsID, + }) + +} + +// UnmarshalJSON deserializes the raw JSON to an EnvironmentFileResource struct +func (envfile *EnvironmentFileResource) UnmarshalJSON(b []byte) error { + envfileJson := environmentFileResourceJSON{} + + if err := json.Unmarshal(b, &envfileJson); err != nil { + return err + } + + if envfileJson.DesiredStatus != nil { + envfile.SetDesiredStatus(resourcestatus.ResourceStatus(*envfileJson.DesiredStatus)) + } + + if envfileJson.KnownStatus != nil { + envfile.SetKnownStatus(resourcestatus.ResourceStatus(*envfileJson.KnownStatus)) + } + + if envfileJson.CreatedAt != nil && !envfileJson.CreatedAt.IsZero() { + envfile.SetCreatedAt(*envfileJson.CreatedAt) + } + + if envfileJson.EnvironmentFilesSource != nil { + envfile.environmentFilesSource = envfileJson.EnvironmentFilesSource + } + + envfile.taskARN = envfileJson.TaskARN + envfile.containerName = envfileJson.ContainerName + envfile.executionCredentialsID = envfileJson.ExecutionCredentialsID + + return nil +} + +// GetContainerName returns the container that this resource is created for +func (envfile *EnvironmentFileResource) GetContainerName() string { + return envfile.containerName +} + +// this method converts EnvironmentFile objects into the path that it would've been downloaded at +// and returns the list +func (envfile *EnvironmentFileResource) convertEnvfileToPath() ([]string, error) { + var envfileLocations []string + + for _, envfileObj := range envfile.environmentFilesSource { + bucket, key, err := s3.ParseS3ARN(envfileObj.Value) + if err != nil { + seelog.Errorf("unable to parse bucket and key from s3 ARN specified in environmentFile %s", envfileObj.Value) + return nil, err + } + + downloadPath := filepath.Join(envfile.resourceDir, bucket, key) + envfileLocations = append(envfileLocations, downloadPath) + } + + return envfileLocations, nil +} + +// ReadEnvVarsFromEnvFiles reads the environment files that have been downloaded +// and puts them into a list of maps +func (envfile *EnvironmentFileResource) ReadEnvVarsFromEnvfiles() ([]map[string]string, error) { + var envVarsPerEnvfile []map[string]string + envfileLocations, err := envfile.convertEnvfileToPath() + if err != nil { + return nil, err + } + + for _, envfilePath := range envfileLocations { + envVars, err := envfile.readEnvVarsFromFile(envfilePath) + if err != nil { + return nil, err + } + envVarsPerEnvfile = append(envVarsPerEnvfile, envVars) + } + + return envVarsPerEnvfile, nil +} + +var open = func(name string) (oswrapper.File, error) { + return os.Open(name) +} + +func (envfile *EnvironmentFileResource) readEnvVarsFromFile(envfilePath string) (map[string]string, error) { + file, err := open(envfilePath) + if err != nil { + seelog.Errorf("Unable to open environment file at %s to read the variables", envfilePath) + return nil, err + } + defer file.Close() + + scanner := envfile.bufio.NewScanner(file) + envVars := make(map[string]string) + lineNum := 0 + for scanner.Scan() { + lineNum += 1 + line := scanner.Text() + // if line starts with a #, ignore + if strings.HasPrefix(line, commentIndicator) { + continue + } + // only read the line that has "=" + if strings.Contains(line, envVariableDelimiter) { + variables := strings.SplitN(line, envVariableDelimiter, 2) + // verify that there is at least a character on each side + if len(variables[0]) > 0 && len(variables[1]) > 0 { + envVars[variables[0]] = variables[1] + } else { + seelog.Infof("Not applying line %d of environment file %s, key or value is empty.", lineNum, envfilePath) + } + } + } + + if err := scanner.Err(); err != nil { + seelog.Errorf("Something went wrong trying to read environment file at %s", envfilePath) + return nil, err + } + + return envVars, nil +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (envfile *EnvironmentFileResource) GetAppliedStatus() resourcestatus.ResourceStatus { + envfile.lock.RLock() + defer envfile.lock.RUnlock() + + return envfile.appliedStatusUnsafe +} + +// DependOnTaskNetwork shows whether the resource creation needs task network setup beforehand +func (envfile *EnvironmentFileResource) DependOnTaskNetwork() bool { + return false +} + +// BuildContainerDependency adds a new dependency container and its satisfied status +func (envfile *EnvironmentFileResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +// GetContainerDependencies returns dependent containers for a status +func (envfile *EnvironmentFileResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/envFiles/envfile_test.go b/agent/taskresource/envFiles/envfile_test.go new file mode 100644 index 00000000000..daa4c3b47d0 --- /dev/null +++ b/agent/taskresource/envFiles/envfile_test.go @@ -0,0 +1,441 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package envFiles + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/api/container" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + mock_factory "github.com/aws/amazon-ecs-agent/agent/s3/factory/mocks" + mock_s3 "github.com/aws/amazon-ecs-agent/agent/s3/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + mock_bufio "github.com/aws/amazon-ecs-agent/agent/utils/bufiowrapper/mocks" + mock_ioutilwrapper "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper/mocks" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + executionCredentialsID = "exec-creds-id" + region = "us-west-2" + cluster = "testCluster" + taskARN = "arn:aws:ecs:us-east-2:01234567891011:task/testCluster/abcdef12-34gh-idkl-mno5-pqrst6789" + resourceDir = "resourceDir" + iamRoleARN = "iamRoleARN" + accessKeyId = "accessKey" + secretAccessKey = "secret" + s3Bucket = "s3Bucket" + s3Path = "path" + string(filepath.Separator) + "to" + string(filepath.Separator) + "envfile" + s3File = "s3key.env" + s3Key = s3Path + string(filepath.Separator) + s3File + tempFile = "tmp_file" +) + +func setup(t *testing.T) (oswrapper.File, *mock_ioutilwrapper.MockIOUtil, + *mock_credentials.MockManager, *mock_factory.MockS3ClientCreator, *mock_s3.MockS3Client, func()) { + ctrl := gomock.NewController(t) + + mockFile := mock_oswrapper.NewMockFile() + mockIOUtil := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockCredentialsManager := mock_credentials.NewMockManager(ctrl) + mockS3ClientCreator := mock_factory.NewMockS3ClientCreator(ctrl) + mockS3Client := mock_s3.NewMockS3Client(ctrl) + + return mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, mockS3Client, ctrl.Finish +} + +func newMockEnvfileResource(envfileLocations []container.EnvironmentFile, mockCredentialsManager *mock_credentials.MockManager, + mockS3ClientCreator *mock_factory.MockS3ClientCreator, + mockIOUtil *mock_ioutilwrapper.MockIOUtil) *EnvironmentFileResource { + return &EnvironmentFileResource{ + cluster: cluster, + taskARN: taskARN, + region: region, + resourceDir: resourceDir, + environmentFilesSource: envfileLocations, + executionCredentialsID: executionCredentialsID, + credentialsManager: mockCredentialsManager, + s3ClientCreator: mockS3ClientCreator, + ioutil: mockIOUtil, + } +} + +func sampleEnvironmentFile(value, envfileType string) container.EnvironmentFile { + return container.EnvironmentFile{ + Value: value, + Type: envfileType, + } +} + +func TestInitializeFileEnvResource(t *testing.T) { + _, _, mockCredentialsManager, _, _, done := setup(t) + defer done() + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, mockCredentialsManager, nil, nil) + envfileResource.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + CredentialsManager: mockCredentialsManager, + }, + }, status.TaskRunning, status.TaskRunning) + + assert.NotNil(t, envfileResource.statusToTransitions) + assert.Equal(t, 1, len(envfileResource.statusToTransitions)) + assert.NotNil(t, envfileResource.credentialsManager) + assert.NotNil(t, envfileResource.s3ClientCreator) + assert.NotNil(t, envfileResource.ioutil) +} + +func TestCreateWithEnvVarFile(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, mockS3Client, done := setup(t) + defer done() + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, mockCredentialsManager, mockS3ClientCreator, mockIOUtil) + creds := credentials.TaskIAMRoleCredentials{ + ARN: iamRoleARN, + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: accessKeyId, + SecretAccessKey: secretAccessKey, + }, + } + + rename = func(oldpath, newpath string) error { + return nil + } + defer func() { + rename = os.Rename + }() + + gomock.InOrder( + mockCredentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true), + mockS3ClientCreator.EXPECT().NewS3ClientForBucket(s3Bucket, region, creds.IAMRoleCredentials).Return(mockS3Client, nil), + mockIOUtil.EXPECT().TempFile(resourceDir, gomock.Any()).Return(mockFile, nil), + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Do( + func(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput) { + assert.Equal(t, s3Bucket, aws.StringValue(input.Bucket)) + assert.Equal(t, s3Key, aws.StringValue(input.Key)) + }).Return(int64(0), nil), + ) + + assert.NoError(t, envfileResource.Create()) +} + +func TestCreateWithInvalidS3ARN(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s", s3File), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, mockCredentialsManager, mockS3ClientCreator, mockIOUtil) + creds := credentials.TaskIAMRoleCredentials{ + ARN: iamRoleARN, + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: accessKeyId, + SecretAccessKey: secretAccessKey, + }, + } + + mockCredentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + + assert.Error(t, envfileResource.Create()) + assert.NotEmpty(t, envfileResource.terminalReasonUnsafe) + assert.Contains(t, envfileResource.GetTerminalReason(), "unable to parse bucket and key from s3 ARN specified in environmentFile") +} + +func TestCreateUnableToRetrieveDataFromS3(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, mockS3Client, done := setup(t) + defer done() + + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, mockCredentialsManager, mockS3ClientCreator, mockIOUtil) + creds := credentials.TaskIAMRoleCredentials{ + ARN: iamRoleARN, + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: accessKeyId, + SecretAccessKey: secretAccessKey, + }, + } + + gomock.InOrder( + mockCredentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true), + mockS3ClientCreator.EXPECT().NewS3ClientForBucket(s3Bucket, region, creds.IAMRoleCredentials).Return(mockS3Client, nil), + mockIOUtil.EXPECT().TempFile(resourceDir, gomock.Any()).Return(mockFile, nil), + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Return(int64(0), errors.New("error response")), + ) + + assert.Error(t, envfileResource.Create()) + assert.NotEmpty(t, envfileResource.terminalReasonUnsafe) + assert.Contains(t, envfileResource.GetTerminalReason(), "error response") +} + +func TestCreateUnableToCreateTmpFile(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, mockS3Client, done := setup(t) + defer done() + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, mockCredentialsManager, mockS3ClientCreator, mockIOUtil) + creds := credentials.TaskIAMRoleCredentials{ + ARN: iamRoleARN, + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: accessKeyId, + SecretAccessKey: secretAccessKey, + }, + } + + gomock.InOrder( + mockCredentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true), + mockS3ClientCreator.EXPECT().NewS3ClientForBucket(s3Bucket, region, creds.IAMRoleCredentials).Return(mockS3Client, nil), + mockIOUtil.EXPECT().TempFile(resourceDir, gomock.Any()).Return(nil, errors.New("error response")), + ) + + assert.Error(t, envfileResource.Create()) + assert.NotEmpty(t, envfileResource.terminalReasonUnsafe) + assert.Contains(t, envfileResource.GetTerminalReason(), "error response") +} + +func TestCreateRenameFileError(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, mockS3Client, done := setup(t) + defer done() + + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, mockCredentialsManager, mockS3ClientCreator, mockIOUtil) + creds := credentials.TaskIAMRoleCredentials{ + ARN: iamRoleARN, + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: accessKeyId, + SecretAccessKey: secretAccessKey, + }, + } + + rename = func(oldpath, newpath string) error { + return errors.New("error response") + } + defer func() { + rename = os.Rename + }() + + gomock.InOrder( + mockCredentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true), + mockS3ClientCreator.EXPECT().NewS3ClientForBucket(s3Bucket, region, creds.IAMRoleCredentials).Return(mockS3Client, nil), + mockIOUtil.EXPECT().TempFile(resourceDir, gomock.Any()).Return(mockFile, nil), + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Return(int64(0), nil), + ) + + assert.Error(t, envfileResource.Create()) + assert.NotEmpty(t, envfileResource.terminalReasonUnsafe) + assert.Contains(t, envfileResource.GetTerminalReason(), "error response") +} + +func TestEnvFileCleanupSuccess(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, mockCredentialsManager, mockS3ClientCreator, mockIOUtil) + + assert.NoError(t, envfileResource.Cleanup()) +} + +func TestEnvFileCleanupResourceDirRemoveFail(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, mockCredentialsManager, mockS3ClientCreator, mockIOUtil) + + removeAll = func(path string) error { + return errors.New("error response") + } + defer func() { + removeAll = os.RemoveAll + }() + + assert.Error(t, envfileResource.Cleanup()) +} + +func TestReadEnvVarsFromEnvfiles(t *testing.T) { + mockFile, mockIOUtil, _, _, _, done := setup(t) + defer done() + + ctrl := gomock.NewController(t) + mockBufio := mock_bufio.NewMockBufio(ctrl) + mockScanner := mock_bufio.NewMockScanner(ctrl) + + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, nil, nil, mockIOUtil) + envfileResource.bufio = mockBufio + + envfileContentLine1 := "key1=value" + envFileContentLine2 := "key2=val1=val2" + + tempOpen := open + open = func(name string) (oswrapper.File, error) { + return mockFile, nil + } + defer func() { + open = tempOpen + }() + gomock.InOrder( + mockBufio.EXPECT().NewScanner(mockFile).Return(mockScanner), + mockScanner.EXPECT().Scan().Return(true), + mockScanner.EXPECT().Text().Return(envfileContentLine1), + mockScanner.EXPECT().Scan().Return(true), + mockScanner.EXPECT().Text().Return(envFileContentLine2), + mockScanner.EXPECT().Scan().Return(false), + mockScanner.EXPECT().Err().Return(nil), + ) + + envVarsList, err := envfileResource.ReadEnvVarsFromEnvfiles() + + assert.Nil(t, err) + assert.Equal(t, 1, len(envVarsList)) + assert.Equal(t, "value", envVarsList[0]["key1"]) + assert.Equal(t, "val1=val2", envVarsList[0]["key2"]) +} + +func TestReadEnvVarsCommentFromEnvfiles(t *testing.T) { + mockFile, mockIOUtil, _, _, _, done := setup(t) + defer done() + + ctrl := gomock.NewController(t) + mockBufio := mock_bufio.NewMockBufio(ctrl) + mockScanner := mock_bufio.NewMockScanner(ctrl) + + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, nil, nil, mockIOUtil) + envfileResource.bufio = mockBufio + + tempOpen := open + open = func(name string) (oswrapper.File, error) { + return mockFile, nil + } + defer func() { + open = tempOpen + }() + + envfileContentComment := "# some comment here" + gomock.InOrder( + mockBufio.EXPECT().NewScanner(mockFile).Return(mockScanner), + mockScanner.EXPECT().Scan().Return(true), + mockScanner.EXPECT().Text().Return(envfileContentComment), + mockScanner.EXPECT().Scan().Return(false), + mockScanner.EXPECT().Err().Return(nil), + ) + + envVarsList, err := envfileResource.ReadEnvVarsFromEnvfiles() + + assert.Nil(t, err) + assert.Equal(t, 0, len(envVarsList[0])) +} + +func TestReadEnvVarsInvalidFromEnvfiles(t *testing.T) { + mockFile, mockIOUtil, _, _, _, done := setup(t) + defer done() + + ctrl := gomock.NewController(t) + mockBufio := mock_bufio.NewMockBufio(ctrl) + mockScanner := mock_bufio.NewMockScanner(ctrl) + + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, nil, nil, mockIOUtil) + envfileResource.bufio = mockBufio + + tempOpen := open + open = func(name string) (oswrapper.File, error) { + return mockFile, nil + } + defer func() { + open = tempOpen + }() + + envfileContentInvalid := "=value" + gomock.InOrder( + mockBufio.EXPECT().NewScanner(mockFile).Return(mockScanner), + mockScanner.EXPECT().Scan().Return(true), + mockScanner.EXPECT().Text().Return(envfileContentInvalid), + mockScanner.EXPECT().Scan().Return(false), + mockScanner.EXPECT().Err().Return(nil), + ) + + envVarsList, err := envfileResource.ReadEnvVarsFromEnvfiles() + + assert.Nil(t, err) + assert.Equal(t, 0, len(envVarsList[0])) +} + +func TestReadEnvVarsUnableToReadEnvfile(t *testing.T) { + _, mockIOUtil, _, _, _, done := setup(t) + defer done() + + envfiles := []container.EnvironmentFile{ + sampleEnvironmentFile(fmt.Sprintf("arn:aws:s3:::%s/%s", s3Bucket, s3Key), "s3"), + } + + envfileResource := newMockEnvfileResource(envfiles, nil, nil, mockIOUtil) + + tempOpen := open + open = func(name string) (oswrapper.File, error) { + return nil, errors.New("error response") + } + defer func() { + open = tempOpen + }() + + _, err := envfileResource.ReadEnvVarsFromEnvfiles() + + assert.NotNil(t, err) +} diff --git a/agent/taskresource/envFiles/envfilestatus.go b/agent/taskresource/envFiles/envfilestatus.go new file mode 100644 index 00000000000..366cbc55aea --- /dev/null +++ b/agent/taskresource/envFiles/envfilestatus.go @@ -0,0 +1,77 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package envFiles + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +type EnvironmentFileStatus resourcestatus.ResourceStatus + +const ( + // EnvFileStatusNone is the zero state of a task resource + EnvFileStatusNone EnvironmentFileStatus = iota + // EnvFileCreated means the task resource is created + EnvFileCreated + // EnvFileRemoved means the task resource is cleaned up + EnvFileRemoved +) + +var envfileStatusMap = map[string]EnvironmentFileStatus{ + "NONE": EnvFileStatusNone, + "CREATED": EnvFileCreated, + "REMOVED": EnvFileRemoved, +} + +func (envfileStatus EnvironmentFileStatus) String() string { + for k, v := range envfileStatusMap { + if v == envfileStatus { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type. +func (envfileStatus *EnvironmentFileStatus) MarshalJSON() ([]byte, error) { + if envfileStatus == nil { + return nil, errors.New("envfile resource status is nil") + } + return []byte(`"` + envfileStatus.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data. +func (envfileStatus *EnvironmentFileStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *envfileStatus = EnvFileStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *envfileStatus = EnvFileStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := envfileStatusMap[string(strStatus)] + if !ok { + *envfileStatus = EnvFileStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *envfileStatus = stat + return nil +} diff --git a/agent/taskresource/envFiles/envfilestatus_test.go b/agent/taskresource/envFiles/envfilestatus_test.go new file mode 100644 index 00000000000..fcac4dbf285 --- /dev/null +++ b/agent/taskresource/envFiles/envfilestatus_test.go @@ -0,0 +1,155 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package envFiles + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusString(t *testing.T) { + cases := []struct { + Name string + InEnvFileStatus EnvironmentFileStatus + OutEnvFileStatus string + }{ + { + Name: "ToStringEnvFileStatusNone", + InEnvFileStatus: EnvFileStatusNone, + OutEnvFileStatus: "NONE", + }, + { + Name: "ToStringEnvFileCreated", + InEnvFileStatus: EnvFileCreated, + OutEnvFileStatus: "CREATED", + }, + { + Name: "ToStringEnvFileRemoved", + InEnvFileStatus: EnvFileRemoved, + OutEnvFileStatus: "REMOVED", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + assert.Equal(t, c.OutEnvFileStatus, c.InEnvFileStatus.String()) + }) + } + +} + +func TestMarshalNilEnvFileStatus(t *testing.T) { + var status *EnvironmentFileStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Error(t, err) +} + +func TestMarshalEnvFileStatus(t *testing.T) { + cases := []struct { + Name string + InEnvFileStatus EnvironmentFileStatus + OutEnvFileStatus string + }{ + { + Name: "MarshallEnvFileStatusNone", + InEnvFileStatus: EnvFileStatusNone, + OutEnvFileStatus: "\"NONE\"", + }, + { + Name: "MarshallEnvFileCreated", + InEnvFileStatus: EnvFileCreated, + OutEnvFileStatus: "\"CREATED\"", + }, + { + Name: "MarshallEnvFileRemoved", + InEnvFileStatus: EnvFileRemoved, + OutEnvFileStatus: "\"REMOVED\"", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + bytes, err := c.InEnvFileStatus.MarshalJSON() + + assert.NoError(t, err) + assert.Equal(t, c.OutEnvFileStatus, string(bytes[:])) + }) + } +} + +func TestUnmarshalEnvFileStatus(t *testing.T) { + cases := []struct { + Name string + InEnvFileStatus string + OutEnvFileStatus EnvironmentFileStatus + ShouldError bool + }{ + { + Name: "UnmarshallEnvFileStatusNone", + InEnvFileStatus: "\"NONE\"", + OutEnvFileStatus: EnvFileStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallEnvFileCreated", + InEnvFileStatus: "\"CREATED\"", + OutEnvFileStatus: EnvFileCreated, + ShouldError: false, + }, + { + Name: "UnmarshallEnvFileRemoved", + InEnvFileStatus: "\"REMOVED\"", + OutEnvFileStatus: EnvFileRemoved, + ShouldError: false, + }, + { + Name: "UnmarshallEnvFileStatusNull", + InEnvFileStatus: "null", + OutEnvFileStatus: EnvFileStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallEnvFileStatusNonString", + InEnvFileStatus: "123", + OutEnvFileStatus: EnvFileStatusNone, + ShouldError: true, + }, + { + Name: "UnmarshallEnvFileStatusUnmappedStatus", + InEnvFileStatus: "\"WEEWOO\"", + OutEnvFileStatus: EnvFileStatusNone, + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var status EnvironmentFileStatus + err := json.Unmarshal([]byte(c.InEnvFileStatus), &status) + + if c.ShouldError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, c.OutEnvFileStatus, status) + } + }) + } +} diff --git a/agent/taskresource/firelens/firelens_unimplemented.go b/agent/taskresource/firelens/firelens_unimplemented.go new file mode 100644 index 00000000000..3d5810ab68d --- /dev/null +++ b/agent/taskresource/firelens/firelens_unimplemented.go @@ -0,0 +1,176 @@ +// +build !linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "errors" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +const ( + // ResourceName is the name of the firelens resource. + ResourceName = "firelens" + // FirelensConfigTypeFluentd is the type of a fluentd firelens container. + FirelensConfigTypeFluentd = "fluentd" + // FirelensConfigTypeFluentbit is the type of a fluentbit firelens container. + FirelensConfigTypeFluentbit = "fluentbit" + // ExternalConfigTypeOption is the option that specifies the type of an external config file to be included as + // part of the config file generated by agent. Its allowed values are "s3" and "file". + ExternalConfigTypeOption = "config-file-type" + // ExternalConfigTypeS3 means the firelens container is using a config file from S3. + ExternalConfigTypeS3 = "s3" + // ExternalConfigTypeFile means the firelens container is using a config file inside the container. + ExternalConfigTypeFile = "file" + // S3ConfigPathFluentd and S3ConfigPathFluentbit are the paths where we bind mount the config downloaded from S3 to. + S3ConfigPathFluentd = "/fluentd/etc/external.conf" + S3ConfigPathFluentbit = "/fluent-bit/etc/external.conf" +) + +// FirelensResource represents the firelens resource. +type FirelensResource struct{} + +// NewFirelensResource returns a new FirelensResource. +func NewFirelensResource(cluster, taskARN, taskDefinition, ec2InstanceID, dataDir, firelensConfigType, region, networkMode string, + firelensOptions map[string]string, containerToLogOptions map[string]map[string]string, credentialsManager credentials.Manager, + executionCredentialsID string) (*FirelensResource, error) { + return nil, errors.New("not implemented") +} + +// SetDesiredStatus safely sets the desired status of the resource. +func (firelens *FirelensResource) SetDesiredStatus(status resourcestatus.ResourceStatus) {} + +// GetDesiredStatus safely returns the desired status of the resource. +func (firelens *FirelensResource) GetDesiredStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// GetName safely returns the name of the resource. +func (firelens *FirelensResource) GetName() string { + return "undefined" +} + +// DesiredTerminal returns true if the resource's desired status is REMOVED. +func (firelens *FirelensResource) DesiredTerminal() bool { + return false +} + +// KnownCreated returns true if the resource's known status is CREATED. +func (firelens *FirelensResource) KnownCreated() bool { + return false +} + +// TerminalStatus returns the last transition state of firelens resource. +func (firelens *FirelensResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// NextKnownState returns the state that the resource should progress to based +// on its `KnownState`. +func (firelens *FirelensResource) NextKnownState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// ApplyTransition calls the function required to move to the specified status. +func (firelens *FirelensResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + return errors.New("not implemented") +} + +// SteadyState returns the transition state of the resource defined as "ready". +func (firelens *FirelensResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// SetKnownStatus safely sets the currently known status of the resource. +func (firelens *FirelensResource) SetKnownStatus(status resourcestatus.ResourceStatus) {} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition. +func (firelens *FirelensResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + return false +} + +// GetKnownStatus safely returns the currently known status of the resource. +func (firelens *FirelensResource) GetKnownStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// SetCreatedAt sets the timestamp for resource's creation time. +func (firelens *FirelensResource) SetCreatedAt(createdAt time.Time) {} + +// GetCreatedAt sets the timestamp for resource's creation time. +func (firelens *FirelensResource) GetCreatedAt() time.Time { + return time.Time{} +} + +// Create creates the firelens resource. +func (firelens *FirelensResource) Create() error { + return errors.New("not implemented") +} + +// Cleanup cleans up the firelens resource. +func (firelens *FirelensResource) Cleanup() error { + return errors.New("not implemented") +} + +// StatusString returns the string of the firelens resource status. +func (firelens *FirelensResource) StatusString(status resourcestatus.ResourceStatus) string { + return "undefined" +} + +// GetTerminalReason returns the terminal reason for the resource. +func (firelens *FirelensResource) GetTerminalReason() string { + return "undefined" +} + +// MarshalJSON marshals FirelensResource object. +func (firelens *FirelensResource) MarshalJSON() ([]byte, error) { + return nil, errors.New("not implemented") +} + +// UnmarshalJSON unmarshals FirelensResource object. +func (firelens *FirelensResource) UnmarshalJSON(b []byte) error { + return errors.New("not implemented") +} + +// Initialize fills in the resource fields. +func (firelens *FirelensResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (firelens *FirelensResource) GetAppliedStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +func (firelens *FirelensResource) DependOnTaskNetwork() bool { + return false +} + +func (firelens *FirelensResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (firelens *FirelensResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/firelens/firelens_unix.go b/agent/taskresource/firelens/firelens_unix.go new file mode 100644 index 00000000000..20ce2e42dce --- /dev/null +++ b/agent/taskresource/firelens/firelens_unix.go @@ -0,0 +1,569 @@ +// +build linux +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/cihub/seelog" + "github.com/pkg/errors" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/s3" + "github.com/aws/amazon-ecs-agent/agent/s3/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" +) + +const ( + // ResourceName is the name of the firelens resource. + ResourceName = "firelens" + // tempFile is the name of the temp file generated during generating config file. + tempFile = "temp_config_file" + // configFilePerm is the permission for the firelens config file. + configFilePerm = 0644 + // ecsMetadataEnableOption is the option that specifies whether to enable appending ecs metadata to log stream. + ecsLogMetadataEnableOption = "enable-ecs-log-metadata" + // ExternalConfigTypeOption is the option that specifies the type of an external config file to be included as + // part of the config file generated by agent. Its allowed values are "s3" and "file". + ExternalConfigTypeOption = "config-file-type" + // ExternalConfigTypeS3 means the firelens container is using a config file from S3. + ExternalConfigTypeS3 = "s3" + // ExternalConfigTypeFile means the firelens container is using a config file inside the container. + ExternalConfigTypeFile = "file" + // externalConfigValueOption is the option that specifies the location of the external config file. When + // ExternalConfigTypeOption is s3, the value for this option should be an s3 arn; when ExternalConfigTypeOption is + // file, the value for this option should be a path to the config file inside the firelens container. + externalConfigValueOption = "config-file-value" + + s3DownloadTimeout = 30 * time.Second +) + +// FirelensResource models fluentd/fluentbit firelens container related resources as a task resource. +type FirelensResource struct { + // Fields that are specific to firelens resource. They are only set at initialization so are not protected by lock. + cluster string + taskARN string + taskDefinition string + ec2InstanceID string + resourceDir string + firelensConfigType string + region string + ecsMetadataEnabled bool + containerToLogOptions map[string]map[string]string + credentialsManager credentials.Manager + executionCredentialsID string + externalConfigType string + externalConfigValue string + networkMode string + ioutil ioutilwrapper.IOUtil + s3ClientCreator factory.S3ClientCreator + + // Fields for the common functionality of task resource. Access to these fields are protected by lock. + createdAtUnsafe time.Time + desiredStatusUnsafe resourcestatus.ResourceStatus + knownStatusUnsafe resourcestatus.ResourceStatus + appliedStatusUnsafe resourcestatus.ResourceStatus + statusToTransitions map[resourcestatus.ResourceStatus]func() error + terminalReason string + terminalReasonOnce sync.Once + lock sync.RWMutex +} + +// NewFirelensResource returns a new FirelensResource. +func NewFirelensResource(cluster, taskARN, taskDefinition, ec2InstanceID, dataDir, firelensConfigType, region, networkMode string, + firelensOptions map[string]string, containerToLogOptions map[string]map[string]string, credentialsManager credentials.Manager, + executionCredentialsID string) (*FirelensResource, error) { + firelensResource := &FirelensResource{ + cluster: cluster, + taskARN: taskARN, + taskDefinition: taskDefinition, + ec2InstanceID: ec2InstanceID, + firelensConfigType: firelensConfigType, + region: region, + networkMode: networkMode, + containerToLogOptions: containerToLogOptions, + ioutil: ioutilwrapper.NewIOUtil(), + s3ClientCreator: factory.NewS3ClientCreator(), + executionCredentialsID: executionCredentialsID, + credentialsManager: credentialsManager, + } + + fields := strings.Split(taskARN, "/") + taskID := fields[len(fields)-1] + firelensResource.resourceDir = filepath.Join(filepath.Join(dataDir, "firelens"), taskID) + + err := firelensResource.parseOptions(firelensOptions) + if err != nil { + return nil, errors.Wrap(err, "error parsing firelens options") + } + + firelensResource.initStatusToTransition() + return firelensResource, nil +} + +func (firelens *FirelensResource) parseOptions(options map[string]string) error { + if _, ok := options[ecsLogMetadataEnableOption]; ok { + val := options[ecsLogMetadataEnableOption] + b, err := strconv.ParseBool(val) + if err != nil { + seelog.Warnf("Invalid value for firelens container option %s was specified: %s. Ignoring it.", ecsLogMetadataEnableOption, val) + } else { + firelens.ecsMetadataEnabled = b + } + } else { + firelens.ecsMetadataEnabled = true + } + + if externalConfigType, ok := options[ExternalConfigTypeOption]; ok { + if externalConfigType != ExternalConfigTypeS3 && externalConfigType != ExternalConfigTypeFile { + return errors.Errorf("invalid value %s is specified for option %s", externalConfigType, ExternalConfigTypeOption) + } + firelens.externalConfigType = externalConfigType + + externalConfigValue, ok := options[externalConfigValueOption] + if !ok { + return errors.Errorf("option %s is specified but %s is not specified", ExternalConfigTypeOption, externalConfigValueOption) + } + firelens.externalConfigValue = externalConfigValue + } + + return nil +} + +// GetCluster returns the cluster. +func (firelens *FirelensResource) GetCluster() string { + return firelens.cluster +} + +// GetTaskARN returns the task arn. +func (firelens *FirelensResource) GetTaskARN() string { + return firelens.taskARN +} + +// GetTaskDefinition returns the task definition. +func (firelens *FirelensResource) GetTaskDefinition() string { + return firelens.taskDefinition +} + +// GetEC2InstanceID returns the ec2 instance id. +func (firelens *FirelensResource) GetEC2InstanceID() string { + return firelens.ec2InstanceID +} + +// GetResourceDir returns the resource dir. +func (firelens *FirelensResource) GetResourceDir() string { + return firelens.resourceDir +} + +// GetECSMetadataEnabled returns whether ecs metadata is enabled. +func (firelens *FirelensResource) GetECSMetadataEnabled() bool { + return firelens.ecsMetadataEnabled +} + +// GetContainerToLogOptions returns a map of containers' log options. +func (firelens *FirelensResource) GetContainerToLogOptions() map[string]map[string]string { + return firelens.containerToLogOptions +} + +func (firelens *FirelensResource) GetRegion() string { + return firelens.region +} + +func (firelens *FirelensResource) GetExecutionCredentialsID() string { + return firelens.executionCredentialsID +} + +func (firelens *FirelensResource) GetExternalConfigType() string { + return firelens.externalConfigType +} + +func (firelens *FirelensResource) GetExternalConfigValue() string { + return firelens.externalConfigValue +} + +// Initialize initializes the resource. +func (firelens *FirelensResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, taskDesiredStatus status.TaskStatus) { + firelens.lock.Lock() + defer firelens.lock.Unlock() + + // Initialize the fields that won't be populated by unmarshalling from state file. + firelens.initStatusToTransition() + firelens.ioutil = ioutilwrapper.NewIOUtil() + firelens.s3ClientCreator = factory.NewS3ClientCreator() + firelens.credentialsManager = resourceFields.CredentialsManager +} + +// GetNetworkMode returns the network mode of the task. +func (firelens *FirelensResource) GetNetworkMode() string { + return firelens.networkMode +} + +func (firelens *FirelensResource) initStatusToTransition() { + resourceStatusToTransitionFunction := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(FirelensCreated): firelens.Create, + } + firelens.statusToTransitions = resourceStatusToTransitionFunction +} + +// GetName returns the name of the resource. +func (firelens *FirelensResource) GetName() string { + return ResourceName +} + +// DesiredTerminal returns true if the resource's desired status is REMOVED. +func (firelens *FirelensResource) DesiredTerminal() bool { + firelens.lock.RLock() + defer firelens.lock.RUnlock() + + return firelens.desiredStatusUnsafe == resourcestatus.ResourceStatus(FirelensRemoved) +} + +func (firelens *FirelensResource) setTerminalReason(reason string) { + firelens.terminalReasonOnce.Do(func() { + seelog.Infof("firelens resource: setting terminal reason for task: [%s]", firelens.taskARN) + firelens.terminalReason = reason + }) +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages. +func (firelens *FirelensResource) GetTerminalReason() string { + firelens.lock.RLock() + defer firelens.lock.RUnlock() + + return firelens.terminalReason +} + +// SetDesiredStatus safely sets the desired status of the resource. +func (firelens *FirelensResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + firelens.lock.Lock() + defer firelens.lock.Unlock() + + firelens.desiredStatusUnsafe = status +} + +// GetDesiredStatus safely returns the desired status of the task. +func (firelens *FirelensResource) GetDesiredStatus() resourcestatus.ResourceStatus { + firelens.lock.RLock() + defer firelens.lock.RUnlock() + + return firelens.desiredStatusUnsafe +} + +// SetKnownStatus safely sets the currently known status of the resource. +func (firelens *FirelensResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + firelens.lock.Lock() + defer firelens.lock.Unlock() + + firelens.knownStatusUnsafe = status + firelens.updateAppliedStatusUnsafe(status) +} + +// updateAppliedStatusUnsafe updates the resource transitioning status. +func (firelens *FirelensResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if firelens.appliedStatusUnsafe == resourcestatus.ResourceStatus(FirelensStatusNone) { + return + } + + if firelens.appliedStatusUnsafe <= knownStatus { + // Set applied status back to none to indicate that the transition has finished. + firelens.appliedStatusUnsafe = resourcestatus.ResourceStatus(FirelensStatusNone) + } +} + +// GetKnownStatus safely returns the currently known status of the task. +func (firelens *FirelensResource) GetKnownStatus() resourcestatus.ResourceStatus { + firelens.lock.RLock() + defer firelens.lock.RUnlock() + + return firelens.knownStatusUnsafe +} + +// KnownCreated returns true if the resource's known status is CREATED. +func (firelens *FirelensResource) KnownCreated() bool { + firelens.lock.RLock() + defer firelens.lock.RUnlock() + + return firelens.knownStatusUnsafe == resourcestatus.ResourceStatus(FirelensCreated) +} + +// TerminalStatus returns the last transition state of resource. +func (firelens *FirelensResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(FirelensRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (firelens *FirelensResource) NextKnownState() resourcestatus.ResourceStatus { + return firelens.GetKnownStatus() + 1 +} + +// SteadyState returns the transition state of the resource defined as "ready". +func (firelens *FirelensResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(FirelensCreated) +} + +// ApplyTransition calls the function required to move to the specified status. +func (firelens *FirelensResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + firelens.lock.Lock() + defer firelens.lock.Unlock() + + transitionFunc, ok := firelens.statusToTransitions[nextState] + if !ok { + err := errors.Errorf("resource [%s]: impossible to transition to %s", ResourceName, + firelens.StatusString(nextState)) + firelens.setTerminalReason(err.Error()) + return err + } + return transitionFunc() +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition. +func (firelens *FirelensResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + firelens.lock.Lock() + defer firelens.lock.Unlock() + + if firelens.appliedStatusUnsafe != resourcestatus.ResourceStatus(FirelensStatusNone) { + // Return false to indicate the set operation failed. + return false + } + + firelens.appliedStatusUnsafe = status + return true +} + +// GetAppliedStatus returns the applied status. +func (firelens *FirelensResource) GetAppliedStatus() resourcestatus.ResourceStatus { + firelens.lock.RLock() + defer firelens.lock.RUnlock() + + return firelens.appliedStatusUnsafe +} + +// StatusString returns the string representation of the resource status. +func (firelens *FirelensResource) StatusString(status resourcestatus.ResourceStatus) string { + return FirelensStatus(status).String() +} + +// SetCreatedAt sets the timestamp for resource's creation time. +func (firelens *FirelensResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + firelens.lock.Lock() + defer firelens.lock.Unlock() + + firelens.createdAtUnsafe = createdAt +} + +// GetCreatedAt returns the timestamp for resource's creation time. +func (firelens *FirelensResource) GetCreatedAt() time.Time { + firelens.lock.RLock() + defer firelens.lock.RUnlock() + + return firelens.createdAtUnsafe +} + +// Create performs resource creation. This includes creating a config directory, a socket directory, and generating +// a config file under the config directory. +func (firelens *FirelensResource) Create() error { + // Fail fast if firelens configuration type is invalid. + if firelens.firelensConfigType != FirelensConfigTypeFluentd && + firelens.firelensConfigType != FirelensConfigTypeFluentbit { + err := errors.New(fmt.Sprintf("invalid firelens configuration type: %s", firelens.firelensConfigType)) + firelens.setTerminalReason(err.Error()) + return err + } + + err := firelens.createDirectories() + if err != nil { + err = errors.Wrapf(err, "unable to initialize resource directory %s", firelens.resourceDir) + firelens.setTerminalReason(err.Error()) + return err + } + + if firelens.externalConfigType == ExternalConfigTypeS3 { + err = firelens.downloadConfigFromS3() + if err != nil { + err = errors.Wrap(err, "unable to download firelens s3 config file") + firelens.setTerminalReason(err.Error()) + return err + } + } + + err = firelens.generateConfigFile() + if err != nil { + err = errors.Wrap(err, "unable to generate firelens config file") + firelens.setTerminalReason(err.Error()) + return err + } + + return nil +} + +var mkdirAll = os.MkdirAll + +// createDirectories creates two directories: +// - $(DATA_DIR)/firelens/$(TASK_ID)/config: used to store firelens config file. The config file under this directory +// will be mounted to the firelens container at an expected path. +// - $(DATA_DIR)/firelens/$(TASK_ID)/socket: used to store the unix socket. This directory will be mounted to +// the firelens container and it will generate a socket file under this directory. Containers that use firelens to +// send logs will then use this socket to send logs to the firelens container. +// Note: socket path has a limit of at most 108 characters on Linux. If using default data dir, the +// resulting socket path will be 79 characters (/var/lib/ecs/data/firelens//socket/fluent.sock) which is fine. +// However if ECS_HOST_DATA_DIR is specified to be a longer path, we will exceed the limit and fail. I don't really +// see a way to avoid this failure since ECS_HOST_DATA_DIR can be arbitrary long.. +func (firelens *FirelensResource) createDirectories() error { + configDir := filepath.Join(firelens.resourceDir, "config") + err := mkdirAll(configDir, os.ModePerm) + if err != nil { + return errors.Wrap(err, "unable to create config directory") + } + + socketDir := filepath.Join(firelens.resourceDir, "socket") + err = mkdirAll(socketDir, os.ModePerm) + if err != nil { + return errors.Wrap(err, "unable to create socket directory") + } + return nil +} + +// generateConfigFile generates a firelens config file at $(RESOURCE_DIR)/config/fluent.conf. +// This contains configs needed by the firelens container. +func (firelens *FirelensResource) generateConfigFile() error { + config, err := firelens.generateConfig() + if err != nil { + return errors.Wrap(err, "unable to generate firelens config") + } + + confFilePath := filepath.Join(firelens.resourceDir, "config", "fluent.conf") + err = firelens.writeConfigFile(func(file oswrapper.File) error { + if firelens.firelensConfigType == FirelensConfigTypeFluentd { + return config.WriteFluentdConfig(file) + } else { + return config.WriteFluentBitConfig(file) + } + }, confFilePath) + if err != nil { + return errors.Wrapf(err, "unable to generate firelens config file") + } + + seelog.Infof("Generated firelens config file at: %s", confFilePath) + return nil +} + +// downloadConfigFromS3 downloads an external config file from S3 and saves it at ${RESOURCE_DIR}/config/external.conf. +// The generated firelens config file fluent.conf will have a reference to include this file. +func (firelens *FirelensResource) downloadConfigFromS3() error { + creds, ok := firelens.credentialsManager.GetTaskCredentials(firelens.executionCredentialsID) + if !ok { + return errors.New("unable to get execution role credentials") + } + + bucket, key, err := s3.ParseS3ARN(firelens.externalConfigValue) + if err != nil { + return errors.Wrap(err, "unable to parse bucket and key from s3 arn") + } + + s3Client, err := firelens.s3ClientCreator.NewS3ClientForBucket(bucket, firelens.region, creds.GetIAMRoleCredentials()) + if err != nil { + return errors.Wrapf(err, "unable to initialize s3 client for bucket %s", bucket) + } + + confFilePath := filepath.Join(firelens.resourceDir, "config", "external.conf") + err = firelens.writeConfigFile(func(file oswrapper.File) error { + return s3.DownloadFile(bucket, key, s3DownloadTimeout, file, s3Client) + }, confFilePath) + + if err != nil { + return errors.Wrapf(err, "unable to download s3 config %s from bucket %s", key, bucket) + } + + seelog.Debugf("Downloaded firelens config file from s3 and saved to: %s", confFilePath) + return nil +} + +var rename = os.Rename + +// writeConfigFile writes a config file at a given path. +func (firelens *FirelensResource) writeConfigFile(writeFunc func(file oswrapper.File) error, filePath string) error { + temp, err := firelens.ioutil.TempFile(firelens.resourceDir, tempFile) + if err != nil { + return err + } + defer temp.Close() + + err = writeFunc(temp) + if err != nil { + return err + } + + err = temp.Chmod(os.FileMode(configFilePerm)) + if err != nil { + return err + } + + // Persist the config file to disk. + err = temp.Sync() + if err != nil { + return err + } + + err = rename(temp.Name(), filePath) + if err != nil { + return err + } + + return nil +} + +var removeAll = os.RemoveAll + +// Cleanup performs resource cleanup. +func (firelens *FirelensResource) Cleanup() error { + err := removeAll(firelens.resourceDir) + if err != nil { + return fmt.Errorf("unable to remove firelens resource directory %s: %v", firelens.resourceDir, err) + } + + seelog.Infof("Removed firelens resource directory at %s", firelens.resourceDir) + return nil +} + +func (firelens *FirelensResource) DependOnTaskNetwork() bool { + return false +} + +func (firelens *FirelensResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (firelens *FirelensResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/firelens/firelens_unix_test.go b/agent/taskresource/firelens/firelens_unix_test.go new file mode 100644 index 00000000000..ba37b6131b8 --- /dev/null +++ b/agent/taskresource/firelens/firelens_unix_test.go @@ -0,0 +1,510 @@ +// +build linux,unit +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "io" + "os" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + mock_factory "github.com/aws/amazon-ecs-agent/agent/s3/factory/mocks" + mock_s3 "github.com/aws/amazon-ecs-agent/agent/s3/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + mock_ioutilwrapper "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper/mocks" + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + mock_oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper/mocks" +) + +const ( + testCluster = "mycluster" + testTaskARN = "arn:aws:ecs:us-east-2:01234567891011:task/mycluster/3de392df-6bfa-470b-97ed-aa6f482cd7a" + testTaskDefinition = "taskdefinition:1" + testEC2InstanceID = "i-123456789a" + testDataDir = "testdatadir" + testResourceDir = "testresourcedir" + testTerminalResason = "testterminalreason" + testTempFile = "testtempfile" + testRegion = "us-west-2" + testExecutionCredentialsID = "testexecutioncredentialsid" + testExternalConfigType = "testexternalconfigtype" + testExternalConfigValue = "testexternalconfigvalue" +) + +var ( + testFirelensOptionsFile = map[string]string{ + "enable-ecs-log-metadata": "true", + "config-file-type": "file", + "config-file-value": "/tmp/dummy.conf", + } + + testFirelensOptionsS3 = map[string]string{ + "enable-ecs-log-metadata": "true", + "config-file-type": "s3", + "config-file-value": "arn:aws:s3:::bucket/key", + } +) + +func setup(t *testing.T) (oswrapper.File, *mock_ioutilwrapper.MockIOUtil, + *mock_credentials.MockManager, *mock_factory.MockS3ClientCreator, *mock_s3.MockS3Client, func()) { + ctrl := gomock.NewController(t) + + mockFile := mock_oswrapper.NewMockFile() + mockIOUtil := mock_ioutilwrapper.NewMockIOUtil(ctrl) + mockCredentialsManager := mock_credentials.NewMockManager(ctrl) + mockS3ClientCreator := mock_factory.NewMockS3ClientCreator(ctrl) + mockS3Client := mock_s3.NewMockS3Client(ctrl) + + return mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, mockS3Client, ctrl.Finish +} + +func mockRename() func() { + rename = func(oldpath, newpath string) error { + return nil + } + + return func() { + rename = os.Rename + } +} + +func mockMkdirAllError() func() { + mkdirAll = func(path string, perm os.FileMode) error { + return errors.New("test error") + } + + return func() { + mkdirAll = os.MkdirAll + } +} + +func newMockFirelensResource(firelensConfigType, networkMode string, lopOptions map[string]string, + mockIOUtil *mock_ioutilwrapper.MockIOUtil, mockCredentialsManager *mock_credentials.MockManager, + mockS3ClientCreator *mock_factory.MockS3ClientCreator) *FirelensResource { + return &FirelensResource{ + cluster: testCluster, + taskARN: testTaskARN, + taskDefinition: testTaskDefinition, + ec2InstanceID: testEC2InstanceID, + resourceDir: testResourceDir, + firelensConfigType: firelensConfigType, + region: testRegion, + networkMode: networkMode, + containerToLogOptions: map[string]map[string]string{ + "container": lopOptions, + }, + executionCredentialsID: testExecutionCredentialsID, + credentialsManager: mockCredentialsManager, + ioutil: mockIOUtil, + s3ClientCreator: mockS3ClientCreator, + } +} + +func TestParseOptions(t *testing.T) { + firelensResource := FirelensResource{} + err := firelensResource.parseOptions(testFirelensOptionsFile) + assert.NoError(t, err) + assert.Equal(t, true, firelensResource.ecsMetadataEnabled) + assert.Equal(t, "file", firelensResource.externalConfigType) + assert.Equal(t, "/tmp/dummy.conf", firelensResource.externalConfigValue) +} + +func TestParseOptionsInvalidType(t *testing.T) { + options := map[string]string{ + "enable-ecs-log-metadata": "true", + "config-file-type": "invalid", + "config-file-value": "xxx", + } + firelensResource := FirelensResource{} + assert.Error(t, firelensResource.parseOptions(options)) +} + +func TestParseOptionsNoValue(t *testing.T) { + options := map[string]string{ + "enable-ecs-log-metadata": "true", + "config-file-type": "file", + } + firelensResource := FirelensResource{} + assert.Error(t, firelensResource.parseOptions(options)) +} + +func TestCreateFirelensResourceFluentdBridgeMode(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + defer mockRename()() + gomock.InOrder( + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + ) + + assert.NoError(t, firelensResource.Create()) +} + +func TestCreateFirelensResourceFluentdAWSVPCMode(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, awsvpcNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + defer mockRename()() + gomock.InOrder( + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + ) + + assert.NoError(t, firelensResource.Create()) +} + +func TestCreateFirelensResourceFluentdDefaultMode(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, "", testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + defer mockRename()() + gomock.InOrder( + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + ) + + assert.NoError(t, firelensResource.Create()) +} + +func TestCreateFirelensResourceFluentbit(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentbit, bridgeNetworkMode, testFluentbitOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + defer mockRename()() + gomock.InOrder( + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + ) + + assert.NoError(t, firelensResource.Create()) +} + +func TestCreateFirelensResourceInvalidType(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + firelensResource.firelensConfigType = "invalid" + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceCreateConfigDirError(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + defer mockMkdirAllError()() + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceCreateSocketDirError(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + defer mockMkdirAllError()() + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceGenerateConfigError(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + firelensResource.containerToLogOptions = map[string]map[string]string{ + "container": { + "invalid": "invalid", + }, + } + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceCreateTempFileError(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + gomock.InOrder( + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(nil, errors.New("test error")), + ) + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceWriteConfigFileError(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + mockFile.(*mock_oswrapper.MockFile).WriteImpl = func(bytes []byte) (i int, e error) { + return 0, errors.New("test error") + } + + gomock.InOrder( + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + ) + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceChmodError(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + mockFile.(*mock_oswrapper.MockFile).ChmodImpl = func(mode os.FileMode) error { + return errors.New("test error") + } + + gomock.InOrder( + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + ) + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceRenameError(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + gomock.InOrder( + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + ) + + rename = func(oldpath, newpath string) error { + return errors.New("test error") + } + defer func() { + rename = os.Rename + }() + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceWithS3Config(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, mockS3Client, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + err := firelensResource.parseOptions(testFirelensOptionsS3) + require.NoError(t, err) + + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: "id", + SecretAccessKey: "key", + }, + } + + defer mockRename()() + + gomock.InOrder( + mockCredentialsManager.EXPECT().GetTaskCredentials(testExecutionCredentialsID).Return(creds, true), + mockS3ClientCreator.EXPECT().NewS3ClientForBucket("bucket", testRegion, creds.IAMRoleCredentials).Return(mockS3Client, nil), + // write external config file downloaded from s3 + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Do( + func(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput) { + assert.Equal(t, "bucket", aws.StringValue(input.Bucket)) + assert.Equal(t, "key", aws.StringValue(input.Key)) + }).Return(int64(0), nil), + + // write main config file + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + ) + + assert.NoError(t, firelensResource.Create()) +} + +func TestCreateFirelensResourceWithS3ConfigMissingCredentials(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + err := firelensResource.parseOptions(testFirelensOptionsS3) + require.NoError(t, err) + + gomock.InOrder( + mockCredentialsManager.EXPECT().GetTaskCredentials(testExecutionCredentialsID).Return(credentials.TaskIAMRoleCredentials{}, false), + ) + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceWithS3ConfigInvalidS3ARN(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + err := firelensResource.parseOptions(testFirelensOptionsS3) + require.NoError(t, err) + firelensResource.externalConfigValue = "arn:s3:::xxx" + + gomock.InOrder( + mockCredentialsManager.EXPECT().GetTaskCredentials(testExecutionCredentialsID).Return(credentials.TaskIAMRoleCredentials{}, true), + ) + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCreateFirelensResourceWithS3ConfigDownloadFailure(t *testing.T) { + mockFile, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, mockS3Client, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + err := firelensResource.parseOptions(testFirelensOptionsS3) + require.NoError(t, err) + + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: "id", + SecretAccessKey: "key", + }, + } + gomock.InOrder( + mockCredentialsManager.EXPECT().GetTaskCredentials(testExecutionCredentialsID).Return(creds, true), + mockS3ClientCreator.EXPECT().NewS3ClientForBucket("bucket", testRegion, creds.IAMRoleCredentials).Return(mockS3Client, nil), + mockIOUtil.EXPECT().TempFile(testResourceDir, tempFile).Return(mockFile, nil), + mockS3Client.EXPECT().DownloadWithContext(gomock.Any(), mockFile, gomock.Any()).Return(int64(0), errors.New("test error")), + ) + + assert.Error(t, firelensResource.Create()) + assert.NotEmpty(t, firelensResource.terminalReason) +} + +func TestCleanupFirelensResource(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + assert.NoError(t, firelensResource.Cleanup()) +} + +func TestCleanupFirelensResourceError(t *testing.T) { + _, mockIOUtil, mockCredentialsManager, mockS3ClientCreator, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, mockIOUtil, + mockCredentialsManager, mockS3ClientCreator) + + removeAll = func(path string) error { + return errors.New("test error") + } + + defer func() { + removeAll = os.RemoveAll + }() + + assert.Error(t, firelensResource.Cleanup()) +} + +func TestInitializeFirelensResource(t *testing.T) { + _, _, mockCredentialsManager, _, _, done := setup(t) + defer done() + + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, nil, nil, + nil) + firelensResource.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + CredentialsManager: mockCredentialsManager, + }, + }, status.TaskRunning, status.TaskRunning) + + assert.NotNil(t, firelensResource.statusToTransitions) + assert.Equal(t, 1, len(firelensResource.statusToTransitions)) + assert.NotNil(t, firelensResource.ioutil) + assert.NotNil(t, firelensResource.s3ClientCreator) + assert.NotNil(t, firelensResource.credentialsManager) +} + +func TestSetKnownStatus(t *testing.T) { + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, nil, nil, + nil) + firelensResource.appliedStatusUnsafe = resourcestatus.ResourceStatus(FirelensCreated) + + firelensResource.SetKnownStatus(resourcestatus.ResourceStatus(FirelensCreated)) + assert.Equal(t, resourcestatus.ResourceStatus(FirelensCreated), firelensResource.knownStatusUnsafe) + assert.Equal(t, resourcestatus.ResourceStatus(FirelensStatusNone), firelensResource.appliedStatusUnsafe) +} + +func TestSetKnownStatusNoAppliedStatusUpdate(t *testing.T) { + firelensResource := newMockFirelensResource(FirelensConfigTypeFluentd, bridgeNetworkMode, testFluentdOptions, nil, nil, + nil) + firelensResource.appliedStatusUnsafe = resourcestatus.ResourceStatus(FirelensCreated) + + firelensResource.SetKnownStatus(resourcestatus.ResourceStatus(FirelensStatusNone)) + assert.Equal(t, resourcestatus.ResourceStatus(FirelensStatusNone), firelensResource.knownStatusUnsafe) + assert.Equal(t, resourcestatus.ResourceStatus(FirelensCreated), firelensResource.appliedStatusUnsafe) +} diff --git a/agent/taskresource/firelens/firelensconfig_unix.go b/agent/taskresource/firelens/firelensconfig_unix.go new file mode 100644 index 00000000000..910fe9d2d6a --- /dev/null +++ b/agent/taskresource/firelens/firelensconfig_unix.go @@ -0,0 +1,266 @@ +// +build linux +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "fmt" + + "github.com/cihub/seelog" + "github.com/pkg/errors" + + generator "github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit" +) + +const ( + // FirelensConfigTypeFluentd is the type of a fluentd firelens container. + FirelensConfigTypeFluentd = "fluentd" + + // FirelensConfigTypeFluentbit is the type of a fluentbit firelens container. + FirelensConfigTypeFluentbit = "fluentbit" + + // socketInputNameFluentd is the name of the socket input plugin for fluentd. + socketInputNameFluentd = "unix" + + // inputNameForward is the name of the tcp socket input plugin for fluentd and fluentbit. + inputNameForward = "forward" + + // socketInputPathOptionFluentd is the key for specifying socket path for fluentd. + socketInputPathOptionFluentd = "path" + + // socketInputPathOptionFluentbit is the key for specifying socket path for fluentbit. + socketInputPathOptionFluentbit = "unix_path" + + // outputTypeLogOptionKeyFluentd is the key for the log option that specifies output plugin type for fluentd. + outputTypeLogOptionKeyFluentd = "@type" + + // outputTypeLogOptionKeyFluentbit is the key for the log option that specifies output plugin type for fluentbit. + outputTypeLogOptionKeyFluentbit = "Name" + + // includePatternKey is the key for include pattern. + includePatternKey = "include-pattern" + + // excludePatternKey is the key for exclude pattern. + excludePatternKey = "exclude-pattern" + + // socketPath is the path for socket file. + socketPath = "/var/run/fluent.sock" + + // S3ConfigPathFluentd and S3ConfigPathFluentbit are the paths where we bind mount the config downloaded from S3 to. + S3ConfigPathFluentd = "/fluentd/etc/external.conf" + S3ConfigPathFluentbit = "/fluent-bit/etc/external.conf" + + // fluentTagOutputFormat is the format for the log tag captured by the output section. First placeholder is + // container name. Second placeholder is the wildcard that matches all contents. + // When customer uses config generated by the agent, the input log will have tag as containerName-firelens-taskID which + // matches this. Customers can also generate logs with their own config and specify the tag to start with containerName-firelens + // to match this. + fluentTagOutputFormat = "%s-firelens%s" + + // matchAnyWildcardFluentd is the wildcard to match arbitrary content for fluentd. + matchAnyWildcardFluentd = "**" + // matchAnyWildcardFluentd is the wildcard to match arbitrary content for fluentbit. + matchAnyWildcardFluentbit = "*" + + // inputBindOptionFluentd is the key for specifying host for fluentd for tcp. + inputBindOptionFluentd = "bind" + + // inputBridgeBindValue is the value for specifying host for Bridge mode. + inputBridgeBindValue = "0.0.0.0" + + // inputAWSVPCBindValue is the value for specifying host for AWSVPC mode. + inputAWSVPCBindValue = "127.0.0.1" + + // inputPortOptionFluentd is the key for specifying port for fluentd for tcp. + inputPortOptionFluentd = "port" + + // inputPortValue is the value for specifying port for fluentd for tcp. + inputPortValue = "24224" + + // healthcheckInputNameFluentbit in the input plugin used to receive health check message for fluentbit. + healthcheckInputNameFluentbit = "tcp" + // healthcheckInputBindValue is the source where health check message comes from. + healthcheckInputBindValue = "127.0.0.1" + // healthcheckInputPortValue is the port for healthcheck. + healthcheckInputPortValue = "8877" + // healthcheckTag is the tag for health check message. + healthcheckTag = "firelens-healthcheck" + // healthcheckOutputName is the output plugin that health check message goes to. It's a black hole so that the health + // check messages don't go into logs. + healthcheckOutputName = "null" + + // inputListenOptionFluentbit is the key for the log option that specifies host for fluentbit. + inputListenOptionFluentbit = "Listen" + + // inputPortOptionFluentbit is the key for the log option that specifies port for fluentbit. + inputPortOptionFluentbit = "Port" + + // bridgeNetworkMode specifies bridge type mode for a task + bridgeNetworkMode = "bridge" + + // specifies awsvpc type mode for a task + awsvpcNetworkMode = "awsvpc" +) + +// generateConfig generates a FluentConfig object that contains all necessary information to construct +// a fluentd or fluentbit config file for a firelens container. +func (firelens *FirelensResource) generateConfig() (generator.FluentConfig, error) { + config := generator.New() + + // Specify log stream input, which is a unix socket that will be used for communication between the Firelens + // container and other containers. + var inputName, inputPathOption, matchAnyWildcard string + if firelens.firelensConfigType == FirelensConfigTypeFluentd { + inputName = socketInputNameFluentd + inputPathOption = socketInputPathOptionFluentd + matchAnyWildcard = matchAnyWildcardFluentd + } else { + inputName = inputNameForward + inputPathOption = socketInputPathOptionFluentbit + matchAnyWildcard = matchAnyWildcardFluentbit + } + config.AddInput(inputName, "", map[string]string{ + inputPathOption: socketPath, + }) + // Specify log stream input of tcp socket kind that can be used for communication between the Firelens + // container and other containers if the network is bridge or awsvpc mode. Also add health check sections to support + // doing container health check on firlens container for these two modes. + if firelens.networkMode == bridgeNetworkMode || firelens.networkMode == awsvpcNetworkMode { + var inputMap map[string]string + var inputBindValue string + if firelens.networkMode == bridgeNetworkMode { + inputBindValue = inputBridgeBindValue + } else if firelens.networkMode == awsvpcNetworkMode { + inputBindValue = inputAWSVPCBindValue + } + if firelens.firelensConfigType == FirelensConfigTypeFluentd { + inputMap = map[string]string{ + inputPortOptionFluentd: inputPortValue, + inputBindOptionFluentd: inputBindValue, + } + inputName = inputNameForward + } else { + inputName = inputNameForward + inputMap = map[string]string{ + inputPortOptionFluentbit: inputPortValue, + inputListenOptionFluentbit: inputBindValue, + } + } + config.AddInput(inputName, "", inputMap) + + firelens.addHealthcheckSections(config) + } + + if firelens.ecsMetadataEnabled { + // Add ecs metadata fields to the log stream. + config.AddFieldToRecord("ecs_cluster", firelens.cluster, matchAnyWildcard). + AddFieldToRecord("ecs_task_arn", firelens.taskARN, matchAnyWildcard). + AddFieldToRecord("ecs_task_definition", firelens.taskDefinition, matchAnyWildcard) + if firelens.ec2InstanceID != "" { + config.AddFieldToRecord("ec2_instance_id", firelens.ec2InstanceID, matchAnyWildcard) + } + } + + // Specify log stream output. Each container that uses the firelens container to stream logs + // may have its own output section with options, constructed from container's log options. + for containerName, logOptions := range firelens.containerToLogOptions { + tag := fmt.Sprintf(fluentTagOutputFormat, containerName, matchAnyWildcard) // Each output section is distinguished by a tag specific to a container. + newConfig, err := addOutputSection(tag, firelens.firelensConfigType, logOptions, config) + if err != nil { + return nil, fmt.Errorf("unable to apply log options of container %s to firelens config: %v", containerName, err) + } + config = newConfig + } + + // Include external config file if specified. + if firelens.externalConfigType == ExternalConfigTypeFile { + config.AddExternalConfig(firelens.externalConfigValue, generator.AfterFilters) + } else if firelens.externalConfigType == ExternalConfigTypeS3 { + var s3ConfPath string + if firelens.firelensConfigType == FirelensConfigTypeFluentd { + s3ConfPath = S3ConfigPathFluentd + } else { + s3ConfPath = S3ConfigPathFluentbit + } + config.AddExternalConfig(s3ConfPath, generator.AfterFilters) + } + seelog.Infof("Included external firelens config file at: %s", firelens.externalConfigValue) + + return config, nil +} + +// addHealthcheckSections adds a health check input section and a health check output section to the config. +func (firelens *FirelensResource) addHealthcheckSections(config generator.FluentConfig) { + // Health check supported is only added for fluentbit. + if firelens.firelensConfigType != FirelensConfigTypeFluentbit { + return + } + + // Add healthcheck input section. + inputName := healthcheckInputNameFluentbit + inputOptions := map[string]string{ + inputPortOptionFluentbit: healthcheckInputPortValue, + inputListenOptionFluentbit: healthcheckInputBindValue, + } + config.AddInput(inputName, healthcheckTag, inputOptions) + + // Add healthcheck output section. + config.AddOutput(healthcheckOutputName, healthcheckTag, nil) +} + +// addOutputSection adds an output section to the firelens container's config that specifies how it routes another +// container's logs. It's constructed based on that container's log options. +// logOptions is a set of key-value pairs, which includes the following: +// 1. The name of the output plugin (required when there are output options specified, i.e. the ones in 4). For +// fluentd, the key is "@type", for fluentbit, the key is "Name". +// 2. include-pattern (optional): a regex specifying the logs to be included. +// 3. exclude-pattern (optional): a regex specifying the logs to be excluded. +// 4. All other key-value pairs are customer specified options for the plugin. They are unique for each plugin and +// we don't check them. +func addOutputSection(tag, firelensConfigType string, logOptions map[string]string, config generator.FluentConfig) (generator.FluentConfig, error) { + var outputKey string + if firelensConfigType == FirelensConfigTypeFluentd { + outputKey = outputTypeLogOptionKeyFluentd + } else { + outputKey = outputTypeLogOptionKeyFluentbit + } + + outputOptions := make(map[string]string) + for key, value := range logOptions { + switch key { + case outputKey: + continue + case includePatternKey: + config.AddIncludeFilter(value, "log", tag) + case excludePatternKey: + config.AddExcludeFilter(value, "log", tag) + default: // This is a plugin specific option. + outputOptions[key] = value + } + } + + output, ok := logOptions[outputKey] + // If there are some output options specified, there must be an output key so that we know what is the output plugin. + if len(outputOptions) > 0 && !ok { + return config, errors.New( + fmt.Sprintf("missing output key %s which is required for firelens configuration of type %s", + outputKey, firelensConfigType)) + } else if !ok { // Otherwise it's ok to not generate an output section, since customers may specify the output in external config. + return config, nil + } + + // Output key is specified. Add an output section. + config.AddOutput(output, tag, outputOptions) + return config, nil +} diff --git a/agent/taskresource/firelens/firelensconfig_unix_test.go b/agent/taskresource/firelens/firelensconfig_unix_test.go new file mode 100644 index 00000000000..424f29c7253 --- /dev/null +++ b/agent/taskresource/firelens/firelensconfig_unix_test.go @@ -0,0 +1,452 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testFluentdOptions = map[string]string{ + "@type": "kinesis_firehose", + "region": "us-west-2", + "deliver_stream_name": "my-stream", + "include-pattern": "*failure*", + "exclude-pattern": "*success*", + } + + testFluentbitOptions = map[string]string{ + "Name": "kinesis_firehose", + "region": "us-west-2", + "deliver_stream_name": "my-stream", + "include-pattern": "*failure*", + "exclude-pattern": "*success*", + } + + expectedFluentdBridgeModeConfig = ` + + @type unix + path /var/run/fluent.sock + + + + @type forward + bind 0.0.0.0 + port 24224 + + + + @type grep + + key log + pattern *failure* + + + + + @type grep + + key log + pattern *success* + + + + + @type record_transformer + + ec2_instance_id i-123456789a + ecs_cluster mycluster + ecs_task_arn arn:aws:ecs:us-east-2:01234567891011:task/mycluster/3de392df-6bfa-470b-97ed-aa6f482cd7a + ecs_task_definition taskdefinition:1 + + + +@include /tmp/dummy.conf + + + @type kinesis_firehose + deliver_stream_name my-stream + region us-west-2 + +` + expectedFluentdAWSVPCConfig = ` + + @type unix + path /var/run/fluent.sock + + + + @type forward + bind 127.0.0.1 + port 24224 + + + + @type grep + + key log + pattern *failure* + + + + + @type grep + + key log + pattern *success* + + + + + @type record_transformer + + ec2_instance_id i-123456789a + ecs_cluster mycluster + ecs_task_arn arn:aws:ecs:us-east-2:01234567891011:task/mycluster/3de392df-6bfa-470b-97ed-aa6f482cd7a + ecs_task_definition taskdefinition:1 + + + +@include /tmp/dummy.conf + + + @type kinesis_firehose + deliver_stream_name my-stream + region us-west-2 + +` + expectedFluentdDefaultModeConfig = ` + + @type unix + path /var/run/fluent.sock + + + + @type grep + + key log + pattern *failure* + + + + + @type grep + + key log + pattern *success* + + + + + @type record_transformer + + ec2_instance_id i-123456789a + ecs_cluster mycluster + ecs_task_arn arn:aws:ecs:us-east-2:01234567891011:task/mycluster/3de392df-6bfa-470b-97ed-aa6f482cd7a + ecs_task_definition taskdefinition:1 + + + +@include /tmp/dummy.conf + + + @type kinesis_firehose + deliver_stream_name my-stream + region us-west-2 + +` + expectedFluentbitConfig = ` +[INPUT] + Name forward + unix_path /var/run/fluent.sock + +[INPUT] + Name forward + Listen 0.0.0.0 + Port 24224 + +[INPUT] + Name tcp + Tag firelens-healthcheck + Listen 127.0.0.1 + Port 8877 + +[FILTER] + Name grep + Match container-firelens* + Regex log *failure* + +[FILTER] + Name grep + Match container-firelens* + Exclude log *success* + +[FILTER] + Name record_modifier + Match * + Record ec2_instance_id i-123456789a + Record ecs_cluster mycluster + Record ecs_task_arn arn:aws:ecs:us-east-2:01234567891011:task/mycluster/3de392df-6bfa-470b-97ed-aa6f482cd7a + Record ecs_task_definition taskdefinition:1 + +@INCLUDE /fluent-bit/etc/external.conf + +[OUTPUT] + Name null + Match firelens-healthcheck + +[OUTPUT] + Name kinesis_firehose + Match container-firelens* + deliver_stream_name my-stream + region us-west-2 +` + expectedFluentdConfigWithoutECSMetadata = ` + + @type unix + path /var/run/fluent.sock + + + + @type forward + bind 0.0.0.0 + port 24224 + + + + @type grep + + key log + pattern *failure* + + + + + @type grep + + key log + pattern *success* + + + +@include /tmp/dummy.conf + + + @type kinesis_firehose + deliver_stream_name my-stream + region us-west-2 + +` + + expectedFluentbitConfigWithoutOutputSection = ` +[INPUT] + Name forward + unix_path /var/run/fluent.sock + +[INPUT] + Name forward + Listen 0.0.0.0 + Port 24224 + +[INPUT] + Name tcp + Tag firelens-healthcheck + Listen 127.0.0.1 + Port 8877 + +[FILTER] + Name grep + Match container-firelens* + Regex log *failure* + +[FILTER] + Name grep + Match container-firelens* + Exclude log *success* + +[FILTER] + Name record_modifier + Match * + Record ec2_instance_id i-123456789a + Record ecs_cluster mycluster + Record ecs_task_arn arn:aws:ecs:us-east-2:01234567891011:task/mycluster/3de392df-6bfa-470b-97ed-aa6f482cd7a + Record ecs_task_definition taskdefinition:1 + +@INCLUDE /fluent-bit/etc/external.conf + +[OUTPUT] + Name null + Match firelens-healthcheck +` +) + +func TestGenerateFluentdBridgeModeConfig(t *testing.T) { + containerToLogOptions := map[string]map[string]string{ + "container": testFluentdOptions, + } + + firelensResource, err := NewFirelensResource(testCluster, testTaskARN, testTaskDefinition, testEC2InstanceID, + testDataDir, FirelensConfigTypeFluentd, testRegion, bridgeNetworkMode, testFirelensOptionsFile, containerToLogOptions, + nil, testExecutionCredentialsID) + require.NoError(t, err) + + config, err := firelensResource.generateConfig() + assert.NoError(t, err) + + configBytes := new(bytes.Buffer) + err = config.WriteFluentdConfig(configBytes) + assert.NoError(t, err) + assert.Equal(t, expectedFluentdBridgeModeConfig, configBytes.String()) +} + +func TestGenerateFluentdAWSVPCModeConfig(t *testing.T) { + containerToLogOptions := map[string]map[string]string{ + "container": testFluentdOptions, + } + + firelensResource, err := NewFirelensResource(testCluster, testTaskARN, testTaskDefinition, testEC2InstanceID, + testDataDir, FirelensConfigTypeFluentd, testRegion, awsvpcNetworkMode, testFirelensOptionsFile, containerToLogOptions, + nil, testExecutionCredentialsID) + require.NoError(t, err) + + config, err := firelensResource.generateConfig() + assert.NoError(t, err) + + configBytes := new(bytes.Buffer) + err = config.WriteFluentdConfig(configBytes) + assert.NoError(t, err) + assert.Equal(t, expectedFluentdAWSVPCConfig, configBytes.String()) +} + +func TestGenerateFluentdDefaultModeConfig(t *testing.T) { + containerToLogOptions := map[string]map[string]string{ + "container": testFluentdOptions, + } + + firelensResource, err := NewFirelensResource(testCluster, testTaskARN, testTaskDefinition, testEC2InstanceID, + testDataDir, FirelensConfigTypeFluentd, testRegion, "", testFirelensOptionsFile, containerToLogOptions, + nil, testExecutionCredentialsID) + require.NoError(t, err) + + config, err := firelensResource.generateConfig() + assert.NoError(t, err) + + configBytes := new(bytes.Buffer) + err = config.WriteFluentdConfig(configBytes) + assert.NoError(t, err) + assert.Equal(t, expectedFluentdDefaultModeConfig, configBytes.String()) +} + +func TestGenerateFluentbitConfig(t *testing.T) { + containerToLogOptions := map[string]map[string]string{ + "container": testFluentbitOptions, + } + + firelensResource, err := NewFirelensResource(testCluster, testTaskARN, testTaskDefinition, testEC2InstanceID, + testDataDir, FirelensConfigTypeFluentbit, testRegion, bridgeNetworkMode, testFirelensOptionsS3, containerToLogOptions, + nil, testExecutionCredentialsID) + require.NoError(t, err) + + config, err := firelensResource.generateConfig() + assert.NoError(t, err) + + configBytes := new(bytes.Buffer) + err = config.WriteFluentBitConfig(configBytes) + assert.NoError(t, err) + assert.Equal(t, expectedFluentbitConfig, configBytes.String()) +} + +func TestGenerateFluentdConfigMissingOutputName(t *testing.T) { + containerToLogOptions := map[string]map[string]string{ + "container": { + "key1": "value1", + }, + } + + firelensResource, err := NewFirelensResource(testCluster, testTaskARN, testTaskDefinition, testEC2InstanceID, + testDataDir, FirelensConfigTypeFluentd, testRegion, bridgeNetworkMode, testFirelensOptionsFile, containerToLogOptions, + nil, testExecutionCredentialsID) + require.NoError(t, err) + + _, err = firelensResource.generateConfig() + assert.Error(t, err) +} + +func TestGenerateFLuentbitConfigMissingOutputName(t *testing.T) { + containerToLogOptions := map[string]map[string]string{ + "container": { + "key1": "value1", + }, + } + + firelensResource, err := NewFirelensResource(testCluster, testTaskARN, testTaskDefinition, testEC2InstanceID, + testDataDir, FirelensConfigTypeFluentbit, testRegion, bridgeNetworkMode, testFirelensOptionsFile, containerToLogOptions, + nil, testExecutionCredentialsID) + require.NoError(t, err) + + _, err = firelensResource.generateConfig() + assert.Error(t, err) +} + +func TestGenerateConfigWithECSMetadataDisabled(t *testing.T) { + containerToLogOptions := map[string]map[string]string{ + "container": testFluentdOptions, + } + testFirelensOptions := map[string]string{ + "enable-ecs-log-metadata": "false", + "config-file-type": "file", + "config-file-value": "/tmp/dummy.conf", + } + + firelensResource, err := NewFirelensResource(testCluster, testTaskARN, testTaskDefinition, testEC2InstanceID, + testDataDir, FirelensConfigTypeFluentd, testRegion, bridgeNetworkMode, testFirelensOptions, containerToLogOptions, + nil, testExecutionCredentialsID) + require.NoError(t, err) + + config, err := firelensResource.generateConfig() + assert.NoError(t, err) + + configBytes := new(bytes.Buffer) + err = config.WriteFluentdConfig(configBytes) + assert.NoError(t, err) + assert.Equal(t, expectedFluentdConfigWithoutECSMetadata, configBytes.String()) +} + +func TestGenerateConfigWithoutOutputSection(t *testing.T) { + containerToLogOptions := map[string]map[string]string{ + "container": { + "include-pattern": "*failure*", + "exclude-pattern": "*success*", + }, + } + + firelensResource, err := NewFirelensResource(testCluster, testTaskARN, testTaskDefinition, testEC2InstanceID, + testDataDir, FirelensConfigTypeFluentbit, testRegion, bridgeNetworkMode, testFirelensOptionsS3, containerToLogOptions, + nil, testExecutionCredentialsID) + require.NoError(t, err) + + config, err := firelensResource.generateConfig() + assert.NoError(t, err) + + configBytes := new(bytes.Buffer) + err = config.WriteFluentBitConfig(configBytes) + assert.NoError(t, err) + assert.Equal(t, expectedFluentbitConfigWithoutOutputSection, configBytes.String()) +} diff --git a/agent/taskresource/firelens/firelensstatus.go b/agent/taskresource/firelens/firelensstatus.go new file mode 100644 index 00000000000..79ca69301b5 --- /dev/null +++ b/agent/taskresource/firelens/firelensstatus.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +type FirelensStatus resourcestatus.ResourceStatus + +const ( + // FirelensStatusNone is the zero state of a firelens task resource. + FirelensStatusNone FirelensStatus = iota + // FirelensCreated represents the status of a firelens task resource which has been created. + FirelensCreated + // FirelensRemoved represents the status of a firelens task resource which has been cleaned up. + FirelensRemoved +) + +var firelensStatusMap = map[string]FirelensStatus{ + "NONE": FirelensStatusNone, + "CREATED": FirelensCreated, + "REMOVED": FirelensRemoved, +} + +// String returns a human readable string representation of this object. +func (as FirelensStatus) String() string { + for k, v := range firelensStatusMap { + if v == as { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type. +func (as *FirelensStatus) MarshalJSON() ([]byte, error) { + if as == nil { + return nil, errors.New("firelens resource status is nil") + } + return []byte(`"` + as.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data. +func (as *FirelensStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *as = FirelensStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *as = FirelensStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := firelensStatusMap[string(strStatus)] + if !ok { + *as = FirelensStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *as = stat + return nil +} diff --git a/agent/taskresource/firelens/firelensstatus_test.go b/agent/taskresource/firelens/firelensstatus_test.go new file mode 100644 index 00000000000..02418c9aa6e --- /dev/null +++ b/agent/taskresource/firelens/firelensstatus_test.go @@ -0,0 +1,149 @@ +// +build unit +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusString(t *testing.T) { + cases := []struct { + Name string + InFirelensStatus FirelensStatus + OutFirelensStatus string + }{ + { + Name: "ToStringFirelensStatusNone", + InFirelensStatus: FirelensStatusNone, + OutFirelensStatus: "NONE", + }, + { + Name: "ToStringFirelensStatusCreated", + InFirelensStatus: FirelensCreated, + OutFirelensStatus: "CREATED", + }, + { + Name: "ToStringFirelensStatusRemoved", + InFirelensStatus: FirelensRemoved, + OutFirelensStatus: "REMOVED", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + assert.Equal(t, c.OutFirelensStatus, c.InFirelensStatus.String()) + }) + } +} + +func TestMarshalNilFirelensStatus(t *testing.T) { + var status *FirelensStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Error(t, err) +} + +func TestMarshalFirelensStatus(t *testing.T) { + cases := []struct { + Name string + InFirelensStatus FirelensStatus + OutFirelensStatus string + }{ + { + Name: "MarshallFirelensStatusNone", + InFirelensStatus: FirelensStatusNone, + OutFirelensStatus: "\"NONE\"", + }, + { + Name: "MarshallFirelensCreated", + InFirelensStatus: FirelensCreated, + OutFirelensStatus: "\"CREATED\"", + }, + { + Name: "MarshallFirelensRemoved", + InFirelensStatus: FirelensRemoved, + OutFirelensStatus: "\"REMOVED\"", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + bytes, err := c.InFirelensStatus.MarshalJSON() + assert.NoError(t, err) + assert.Equal(t, c.OutFirelensStatus, string(bytes[:])) + }) + } +} + +func TestUnmarshalFirelensStatus(t *testing.T) { + cases := []struct { + Name string + InFirelensStatus string + OutFirelensStatus FirelensStatus + ShouldError bool + }{ + { + Name: "UnmarshalFirelensStatusNone", + InFirelensStatus: "\"NONE\"", + OutFirelensStatus: FirelensStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshalFirelensCreated", + InFirelensStatus: "\"CREATED\"", + OutFirelensStatus: FirelensCreated, + ShouldError: false, + }, + { + Name: "UnmarshalFirelensRemoved", + InFirelensStatus: "\"REMOVED\"", + OutFirelensStatus: FirelensRemoved, + ShouldError: false, + }, + { + Name: "UnmarshalFirelensStatusNull", + InFirelensStatus: "null", + OutFirelensStatus: FirelensStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshalFirelensStatusNonString", + InFirelensStatus: "1", + ShouldError: true, + }, + { + Name: "UnmarshalFirelensStatusUnmappedStatus", + InFirelensStatus: "\"INVALID\"", + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + var status FirelensStatus + err := json.Unmarshal([]byte(c.InFirelensStatus), &status) + if c.ShouldError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, c.OutFirelensStatus, status) + } + }) + } +} diff --git a/agent/taskresource/firelens/json_unix.go b/agent/taskresource/firelens/json_unix.go new file mode 100644 index 00000000000..4e2d7631ae1 --- /dev/null +++ b/agent/taskresource/firelens/json_unix.go @@ -0,0 +1,125 @@ +// +build linux +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "encoding/json" + "time" + + "github.com/pkg/errors" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +type firelensResourceJSON struct { + Cluster string + TaskARN string + TaskDefinition string + EC2InstanceID string + ResourceDir string + FirelensConfigType string + Region string + ECSMetadataEnabled bool + ContainersToLogOptions map[string]map[string]string + ExecutionCredentialsID string + ExternalConfigType string + ExternalConfigValue string + TerminalReason string + + CreatedAt time.Time + DesiredStatus *FirelensStatus + KnownStatus *FirelensStatus + AppliedStatus *FirelensStatus + NetworkMode string +} + +// MarshalJSON marshals a FirelensResource object into bytes of json. +func (firelens *FirelensResource) MarshalJSON() ([]byte, error) { + if firelens == nil { + return nil, errors.New("firelens resource is nil") + } + + firelens.lock.RLock() + defer firelens.lock.RUnlock() + + return json.Marshal(firelensResourceJSON{ + Cluster: firelens.cluster, + TaskARN: firelens.taskARN, + TaskDefinition: firelens.taskDefinition, + EC2InstanceID: firelens.ec2InstanceID, + ResourceDir: firelens.resourceDir, + FirelensConfigType: firelens.firelensConfigType, + Region: firelens.region, + ECSMetadataEnabled: firelens.ecsMetadataEnabled, + ContainersToLogOptions: firelens.containerToLogOptions, + ExecutionCredentialsID: firelens.executionCredentialsID, + ExternalConfigType: firelens.externalConfigType, + ExternalConfigValue: firelens.externalConfigValue, + TerminalReason: firelens.terminalReason, + CreatedAt: firelens.createdAtUnsafe, + NetworkMode: firelens.networkMode, + DesiredStatus: func() *FirelensStatus { + desiredStatus := firelens.desiredStatusUnsafe + s := FirelensStatus(desiredStatus) + return &s + }(), + KnownStatus: func() *FirelensStatus { + knownStatus := firelens.knownStatusUnsafe + s := FirelensStatus(knownStatus) + return &s + }(), + AppliedStatus: func() *FirelensStatus { + appliedStatus := firelens.appliedStatusUnsafe + s := FirelensStatus(appliedStatus) + return &s + }(), + }) +} + +// UnmarshalJSON unmarshals bytes of json into a FirelensResource object. +func (firelens *FirelensResource) UnmarshalJSON(b []byte) error { + if firelens == nil { + return errors.New("firelens resource is nil") + } + + temp := firelensResourceJSON{} + if err := json.Unmarshal(b, &temp); err != nil { + return err + } + + firelens.lock.Lock() + defer firelens.lock.Unlock() + + firelens.cluster = temp.Cluster + firelens.taskARN = temp.TaskARN + firelens.taskDefinition = temp.TaskDefinition + firelens.ec2InstanceID = temp.EC2InstanceID + firelens.resourceDir = temp.ResourceDir + firelens.firelensConfigType = temp.FirelensConfigType + firelens.region = temp.Region + firelens.ecsMetadataEnabled = temp.ECSMetadataEnabled + firelens.containerToLogOptions = temp.ContainersToLogOptions + firelens.executionCredentialsID = temp.ExecutionCredentialsID + firelens.externalConfigType = temp.ExternalConfigType + firelens.externalConfigValue = temp.ExternalConfigValue + firelens.terminalReason = temp.TerminalReason + firelens.createdAtUnsafe = temp.CreatedAt + firelens.desiredStatusUnsafe = resourcestatus.ResourceStatus(*temp.DesiredStatus) + firelens.knownStatusUnsafe = resourcestatus.ResourceStatus(*temp.KnownStatus) + firelens.appliedStatusUnsafe = resourcestatus.ResourceStatus(*temp.AppliedStatus) + firelens.networkMode = temp.NetworkMode + + return nil +} diff --git a/agent/taskresource/firelens/json_unix_test.go b/agent/taskresource/firelens/json_unix_test.go new file mode 100644 index 00000000000..c76e75a6211 --- /dev/null +++ b/agent/taskresource/firelens/json_unix_test.go @@ -0,0 +1,78 @@ +// +build linux,unit +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package firelens + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +func TestMarshalUnmarshalJSON(t *testing.T) { + testCreatedAt := time.Now() + testContainerToLogOptions := map[string]map[string]string{ + "container": testFluentdOptions, + } + + firelensResIn := &FirelensResource{ + cluster: testCluster, + taskARN: testTaskARN, + taskDefinition: testTaskDefinition, + ec2InstanceID: testEC2InstanceID, + resourceDir: testResourceDir, + firelensConfigType: FirelensConfigTypeFluentd, + region: testRegion, + ecsMetadataEnabled: true, + containerToLogOptions: testContainerToLogOptions, + executionCredentialsID: testExecutionCredentialsID, + externalConfigType: testExternalConfigType, + externalConfigValue: testExternalConfigValue, + terminalReason: testTerminalResason, + createdAtUnsafe: testCreatedAt, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + knownStatusUnsafe: resourcestatus.ResourceCreated, + appliedStatusUnsafe: resourcestatus.ResourceCreated, + networkMode: bridgeNetworkMode, + } + + bytes, err := json.Marshal(firelensResIn) + require.NoError(t, err) + + firelensResOut := &FirelensResource{} + err = json.Unmarshal(bytes, firelensResOut) + require.NoError(t, err) + assert.Equal(t, testCluster, firelensResOut.cluster) + assert.Equal(t, testTaskARN, firelensResOut.taskARN) + assert.Equal(t, testTaskDefinition, firelensResOut.taskDefinition) + assert.Equal(t, testRegion, firelensResOut.region) + assert.True(t, firelensResOut.ecsMetadataEnabled) + assert.Equal(t, testContainerToLogOptions, firelensResOut.containerToLogOptions) + assert.Equal(t, testExecutionCredentialsID, firelensResOut.executionCredentialsID) + assert.Equal(t, testExternalConfigType, firelensResOut.externalConfigType) + assert.Equal(t, testExternalConfigValue, firelensResOut.externalConfigValue) + assert.Equal(t, testTerminalResason, firelensResOut.terminalReason) + // Can't use assert.Equal for time here. See https://github.com/golang/go/issues/22957. + assert.True(t, testCreatedAt.Equal(firelensResOut.createdAtUnsafe)) + assert.Equal(t, resourcestatus.ResourceCreated, firelensResOut.desiredStatusUnsafe) + assert.Equal(t, resourcestatus.ResourceCreated, firelensResOut.knownStatusUnsafe) + assert.Equal(t, resourcestatus.ResourceCreated, firelensResOut.appliedStatusUnsafe) + assert.Equal(t, testTerminalResason, firelensResOut.terminalReason) + assert.Equal(t, bridgeNetworkMode, firelensResOut.networkMode) +} diff --git a/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_unsupported.go b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_unsupported.go new file mode 100644 index 00000000000..2ba0239b986 --- /dev/null +++ b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_unsupported.go @@ -0,0 +1,182 @@ +// +build !windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsxwindowsfileserver + +import ( + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + asmfactory "github.com/aws/amazon-ecs-agent/agent/asm/factory" + "github.com/aws/amazon-ecs-agent/agent/credentials" + fsxfactory "github.com/aws/amazon-ecs-agent/agent/fsx/factory" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/pkg/errors" +) + +// FSxWindowsFileServerResource represents a fsxwindowsfileserver resource +type FSxWindowsFileServerResource struct { +} + +// FSxWindowsFileServerVolumeConfig represents fsxWindowsFileServer volume configuration. +type FSxWindowsFileServerVolumeConfig struct { +} + +// NewFSxWindowsFileServerResource creates a new FSxWindowsFileServerResource object +func NewFSxWindowsFileServerResource( + taskARN, + region string, + name string, + volumeType string, + volumeConfig *FSxWindowsFileServerVolumeConfig, + hostPath string, + executionCredentialsID string, + credentialsManager credentials.Manager, + ssmClientCreator ssmfactory.SSMClientCreator, + asmClientCreator asmfactory.ClientCreator, + fsxClientCreator fsxfactory.FSxClientCreator) (*FSxWindowsFileServerResource, error) { + return nil, errors.New("not supported") +} + +func (fv *FSxWindowsFileServerResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (fv *FSxWindowsFileServerResource) GetTerminalReason() string { + return "undefined" +} + +// GetDesiredStatus safely returns the desired status of the task +func (fv *FSxWindowsFileServerResource) GetDesiredStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// SetDesiredStatus safely sets the desired status of the resource +func (fv *FSxWindowsFileServerResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { +} + +// DesiredTerminal returns true if the fsxwindowsfileserver resource's desired status is REMOVED +func (fv *FSxWindowsFileServerResource) DesiredTerminal() bool { + return false +} + +// KnownCreated returns true if the fsxwindowsfileserver resource's known status is CREATED +func (fv *FSxWindowsFileServerResource) KnownCreated() bool { + return false +} + +// TerminalStatus returns the last transition state of fsxwindowsfileserver resource +func (fv *FSxWindowsFileServerResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (fv *FSxWindowsFileServerResource) NextKnownState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// ApplyTransition calls the function required to move to the specified status +func (fv *FSxWindowsFileServerResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + return errors.New("not supported") +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (fv *FSxWindowsFileServerResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// SetKnownStatus safely sets the currently known status of the resource +func (fv *FSxWindowsFileServerResource) SetKnownStatus(status resourcestatus.ResourceStatus) { +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (fv *FSxWindowsFileServerResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + return false +} + +// GetKnownStatus safely returns the currently known status of the task +func (fv *FSxWindowsFileServerResource) GetKnownStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +// StatusString returns the string of the fsxwindowsfileserver resource status +func (fv *FSxWindowsFileServerResource) StatusString(status resourcestatus.ResourceStatus) string { + return "undefined" +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (fv *FSxWindowsFileServerResource) SetCreatedAt(createdAt time.Time) { +} + +// GetCreatedAt sets the timestamp for resource's creation time +func (fv *FSxWindowsFileServerResource) GetCreatedAt() time.Time { + return time.Time{} +} + +// Source returns the host path of the fsxwindowsfileserver resource which is used as the source of the volume mount +func (cfg *FSxWindowsFileServerVolumeConfig) Source() string { + return "undefined" +} + +// GetName safely returns the name of the resource +func (fv *FSxWindowsFileServerResource) GetName() string { + return "undefined" +} + +// Create is used to create all the fsxwindowsfileserver resources for a given task +func (fv *FSxWindowsFileServerResource) Create() error { + return errors.New("not supported") +} + +// Cleanup removes the fsxwindowsfileserver resources created for the task +func (fv *FSxWindowsFileServerResource) Cleanup() error { + return errors.New("not supported") +} + +// MarshalJSON serialises the FSxWindowsFileServerResourceJSON struct to JSON +func (fv *FSxWindowsFileServerResource) MarshalJSON() ([]byte, error) { + return nil, errors.New("not supported") +} + +// UnmarshalJSON deserialises the raw JSON to a FSxWindowsFileServerResourceJSON struct +func (fv *FSxWindowsFileServerResource) UnmarshalJSON(b []byte) error { + return errors.New("not supported") +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (fv *FSxWindowsFileServerResource) GetAppliedStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatusNone +} + +func (fv *FSxWindowsFileServerResource) DependOnTaskNetwork() bool { + return false +} + +func (fv *FSxWindowsFileServerResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (fv *FSxWindowsFileServerResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_windows.go b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_windows.go new file mode 100644 index 00000000000..4ad0959f9f5 --- /dev/null +++ b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_windows.go @@ -0,0 +1,706 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsxwindowsfileserver + +import ( + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/aws/amazon-ecs-agent/agent/asm" + "github.com/aws/amazon-ecs-agent/agent/ssm" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/aws-sdk-go/aws/arn" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + asmfactory "github.com/aws/amazon-ecs-agent/agent/asm/factory" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/fsx" + fsxfactory "github.com/aws/amazon-ecs-agent/agent/fsx/factory" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +const ( + resourceProvisioningError = "VolumeError: Agent could not create task's volume resources" +) + +// FSxWindowsFileServerResource represents a fsxwindowsfileserver resource +type FSxWindowsFileServerResource struct { + Name string + VolumeType string + VolumeConfig FSxWindowsFileServerVolumeConfig + taskARN string + region string + executionCredentialsID string + credentialsManager credentials.Manager + + // ssmClientCreator is a factory interface that creates new SSM clients. + ssmClientCreator ssmfactory.SSMClientCreator + // asmClientCreator is a factory interface that creates new ASM clients. + asmClientCreator asmfactory.ClientCreator + // fsxClientCreator is a factory interface that creates new FSx clients. + fsxClientCreator fsxfactory.FSxClientCreator + + // fields that are set later during resource creation + FSxWindowsFileServerDNSName string + Credentials FSxWindowsFileServerCredentials + + // Fields for the common functionality of task resource. Access to these fields are protected by lock. + createdAtUnsafe time.Time + knownStatusUnsafe resourcestatus.ResourceStatus + desiredStatusUnsafe resourcestatus.ResourceStatus + appliedStatusUnsafe resourcestatus.ResourceStatus + statusToTransitions map[resourcestatus.ResourceStatus]func() error + terminalReason string + terminalReasonOnce sync.Once + lock sync.RWMutex +} + +// FSxWindowsFileServerVolumeConfig represents fsxWindowsFileServer volume configuration. +type FSxWindowsFileServerVolumeConfig struct { + FileSystemID string `json:"fileSystemId,omitempty"` + RootDirectory string `json:"rootDirectory,omitempty"` + AuthConfig FSxWindowsFileServerAuthConfig `json:"authorizationConfig,omitempty"` + // HostPath is used for bind mount as part of HostConfig. + HostPath string `json:"fsxWindowsFileServerHostPath"` +} + +// FSxWindowsFileServerAuthConfig contains auth config for a fsxWindowsFileServer volume. +type FSxWindowsFileServerAuthConfig struct { + CredentialsParameter string `json:"credentialsParameter,omitempty"` + Domain string `json:"domain,omitempty"` +} + +// FSxWindowsFileServerCredentials represents user credentials for accessing the fsxWindowsFileServer volume +type FSxWindowsFileServerCredentials struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// NewFSxWindowsFileServerResource creates a new FSxWindowsFileServerResource object +func NewFSxWindowsFileServerResource( + taskARN, + region string, + name string, + volumeType string, + volumeConfig *FSxWindowsFileServerVolumeConfig, + executionCredentialsID string, + credentialsManager credentials.Manager, + ssmClientCreator ssmfactory.SSMClientCreator, + asmClientCreator asmfactory.ClientCreator, + fsxClientCreator fsxfactory.FSxClientCreator) (*FSxWindowsFileServerResource, error) { + + fv := &FSxWindowsFileServerResource{ + Name: name, + VolumeType: volumeType, + VolumeConfig: FSxWindowsFileServerVolumeConfig{ + FileSystemID: volumeConfig.FileSystemID, + RootDirectory: volumeConfig.RootDirectory, + AuthConfig: volumeConfig.AuthConfig, + }, + taskARN: taskARN, + region: region, + executionCredentialsID: executionCredentialsID, + credentialsManager: credentialsManager, + ssmClientCreator: ssmClientCreator, + asmClientCreator: asmClientCreator, + fsxClientCreator: fsxClientCreator, + } + + fv.initStatusToTransition() + return fv, nil +} + +func (fv *FSxWindowsFileServerResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { + + fv.credentialsManager = resourceFields.CredentialsManager + fv.ssmClientCreator = resourceFields.SSMClientCreator + fv.asmClientCreator = resourceFields.ASMClientCreator + fv.fsxClientCreator = resourceFields.FSxClientCreator + fv.initStatusToTransition() +} + +func (fv *FSxWindowsFileServerResource) initStatusToTransition() { + statusToTransitions := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(FSxWindowsFileServerVolumeCreated): fv.Create, + } + + fv.statusToTransitions = statusToTransitions +} + +// DesiredTerminal returns true if the fsxwindowsfileserver's desired status is REMOVED +func (fv *FSxWindowsFileServerResource) DesiredTerminal() bool { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.desiredStatusUnsafe == resourcestatus.ResourceStatus(FSxWindowsFileServerVolumeRemoved) +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (fv *FSxWindowsFileServerResource) GetTerminalReason() string { + if fv.terminalReason == "" { + return resourceProvisioningError + } + return fv.terminalReason +} + +func (fv *FSxWindowsFileServerResource) setTerminalReason(reason string) { + fv.terminalReasonOnce.Do(func() { + seelog.Debugf("fsxwindowsfileserver resource [%s]: setting terminal reason for fsxwindowsfileserver resource in task: [%s]", fv.Name, fv.taskARN) + fv.terminalReason = reason + }) +} + +// GetDesiredStatus safely returns the desired status of the task +func (fv *FSxWindowsFileServerResource) GetDesiredStatus() resourcestatus.ResourceStatus { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.desiredStatusUnsafe +} + +// SetDesiredStatus safely sets the desired status of the resource +func (fv *FSxWindowsFileServerResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + fv.lock.Lock() + defer fv.lock.Unlock() + + fv.desiredStatusUnsafe = status +} + +// GetKnownStatus safely returns the currently known status of the task +func (fv *FSxWindowsFileServerResource) GetKnownStatus() resourcestatus.ResourceStatus { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.knownStatusUnsafe +} + +// SetKnownStatus safely sets the currently known status of the resource +func (fv *FSxWindowsFileServerResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + fv.lock.Lock() + defer fv.lock.Unlock() + + fv.knownStatusUnsafe = status + fv.updateAppliedStatusUnsafe(status) +} + +// KnownCreated returns true if the fsxwindowsfileserver's known status is CREATED +func (fv *FSxWindowsFileServerResource) KnownCreated() bool { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.knownStatusUnsafe == resourcestatus.ResourceStatus(FSxWindowsFileServerVolumeCreated) +} + +// TerminalStatus returns the last transition state of fsxwindowsfileserver +func (fv *FSxWindowsFileServerResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(FSxWindowsFileServerVolumeRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (fv *FSxWindowsFileServerResource) NextKnownState() resourcestatus.ResourceStatus { + return fv.GetKnownStatus() + 1 +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (fv *FSxWindowsFileServerResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(FSxWindowsFileServerVolumeCreated) +} + +// ApplyTransition calls the function required to move to the specified status +func (fv *FSxWindowsFileServerResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + transitionFunc, ok := fv.statusToTransitions[nextState] + if !ok { + err := errors.Errorf("resource [%s]: transition to %s impossible", fv.Name, + fv.StatusString(nextState)) + fv.setTerminalReason(err.Error()) + return err + } + + return transitionFunc() +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (fv *FSxWindowsFileServerResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + fv.lock.Lock() + defer fv.lock.Unlock() + + if fv.appliedStatusUnsafe != resourcestatus.ResourceStatus(FSxWindowsFileServerVolumeStatusNone) { + // return false to indicate the set operation failed + return false + } + + fv.appliedStatusUnsafe = status + return true +} + +// StatusString returns the string of the fsxwindowsfileserver resource status +func (fv *FSxWindowsFileServerResource) StatusString(status resourcestatus.ResourceStatus) string { + return FSxWindowsFileServerVolumeStatus(status).String() +} + +// GetCreatedAt gets the timestamp for resource's creation time +func (fv *FSxWindowsFileServerResource) GetCreatedAt() time.Time { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.createdAtUnsafe +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (fv *FSxWindowsFileServerResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + fv.lock.Lock() + defer fv.lock.Unlock() + + fv.createdAtUnsafe = createdAt +} + +// Source returns the host path of the fsxwindowsfileserver resource which is used as the source of the volume mount +func (cfg *FSxWindowsFileServerVolumeConfig) Source() string { + return utils.GetCanonicalPath(cfg.HostPath) +} + +// GetName safely returns the name of the fsxwindowsfileserver resource +func (fv *FSxWindowsFileServerResource) GetName() string { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.Name +} + +// GetVolumeConfig safely returns the volume config of the fsxwindowsfileserver resource +func (fv *FSxWindowsFileServerResource) GetVolumeConfig() FSxWindowsFileServerVolumeConfig { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.VolumeConfig +} + +// GetCredentials safely returns the user credentials of the fsxwindowsfileserver resource +func (fv *FSxWindowsFileServerResource) GetCredentials() FSxWindowsFileServerCredentials { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.Credentials +} + +// GetFileSystemDNSName safely returns the filesystem dns name of the fsxwindowsfileserver resource +func (fv *FSxWindowsFileServerResource) GetFileSystemDNSName() string { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.FSxWindowsFileServerDNSName +} + +// getExecutionCredentialsID safely returns the execution role's credential ID +func (fv *FSxWindowsFileServerResource) GetExecutionCredentialsID() string { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.executionCredentialsID +} + +// SetCredentials safely updates the user credentials of the fsxwindowsfileserver resource +func (fv *FSxWindowsFileServerResource) SetCredentials(credentials FSxWindowsFileServerCredentials) { + fv.lock.Lock() + defer fv.lock.Unlock() + + fv.Credentials = credentials +} + +// SetFileSystemDNSName safely updates the filesystem dns name of the fsxwindowsfileserver resource +func (fv *FSxWindowsFileServerResource) SetFileSystemDNSName(DNSName string) { + fv.lock.Lock() + defer fv.lock.Unlock() + + fv.FSxWindowsFileServerDNSName = DNSName +} + +// SetFileSystemDNSName safely updates volume config's root directory of the fsxwindowsfileserver resource +func (fv *FSxWindowsFileServerResource) SetRootDirectory(rootDirectory string) { + fv.lock.Lock() + defer fv.lock.Unlock() + + fv.VolumeConfig.RootDirectory = rootDirectory +} + +// GetHostPath safely returns the volume config's host path +func (fv *FSxWindowsFileServerResource) GetHostPath() string { + fv.lock.RLock() + defer fv.lock.RUnlock() + + return fv.VolumeConfig.HostPath +} + +// SetHostPath safely updates volume config's host path +func (fv *FSxWindowsFileServerResource) SetHostPath(hostPath string) { + fv.lock.Lock() + defer fv.lock.Unlock() + + fv.VolumeConfig.HostPath = hostPath +} + +// Create is used to create all the fsxwindowsfileserver resources for a given task +func (fv *FSxWindowsFileServerResource) Create() error { + var err error + volumeConfig := fv.GetVolumeConfig() + + var iamCredentials credentials.IAMRoleCredentials + executionCredentials, ok := fv.credentialsManager.GetTaskCredentials(fv.GetExecutionCredentialsID()) + if !ok { + err = errors.New("fsxwindowsfileserver resource: unable to find execution role credentials") + fv.setTerminalReason(err.Error()) + return err + } + iamCredentials = executionCredentials.GetIAMRoleCredentials() + + err = fv.retrieveCredentials(volumeConfig.AuthConfig.CredentialsParameter, iamCredentials) + if err != nil { + fv.setTerminalReason(err.Error()) + return err + } + + err = fv.retrieveFileSystemDNSName(volumeConfig.FileSystemID, iamCredentials) + if err != nil { + fv.setTerminalReason(err.Error()) + return err + } + + fv.handleRootDirectory(volumeConfig.RootDirectory) + + creds := fv.GetCredentials() + if creds.Username == "" || creds.Password == "" { + return errors.New("invalid credentials for mounting fsxwindowsfileserver volume on the container instance") + } + // formatting to keep powershell happy + username := volumeConfig.AuthConfig.Domain + `\` + creds.Username + remotePath := fmt.Sprintf(`\\%s`, fv.GetFileSystemDNSName()) + if volumeConfig.RootDirectory != "" { + remotePath = fmt.Sprintf(`%s\%s`, remotePath, volumeConfig.RootDirectory) + } else { + remotePath = fmt.Sprintf(`%s\share`, remotePath) + } + + password := creds.Password + + err = fv.performHostMount(remotePath, username, password) + if err != nil { + fv.setTerminalReason(err.Error()) + return err + } + + return nil +} + +func (fv *FSxWindowsFileServerResource) retrieveCredentials(credentialsParameterARN string, iamCredentials credentials.IAMRoleCredentials) error { + parsedARN, err := arn.Parse(credentialsParameterARN) + if err != nil { + fv.setTerminalReason(err.Error()) + return err + } + + parsedARNService := parsedARN.Service + switch parsedARNService { + case "ssm": + err = fv.retrieveSSMCredentials(credentialsParameterARN, iamCredentials) + if err != nil { + seelog.Errorf("Failed to retrieve credentials from ssm: %v", err) + fv.setTerminalReason(err.Error()) + return err + } + case "secretsmanager": + err = fv.retrieveASMCredentials(credentialsParameterARN, iamCredentials) + if err != nil { + seelog.Errorf("Failed to retrieve credentials from asm: %v", err) + fv.setTerminalReason(err.Error()) + return err + } + default: + err = errors.New("unsupported credentialsParameter, only ssm/secretsmanager ARNs are valid") + fv.setTerminalReason(err.Error()) + return err + } + + return nil +} + +func (fv *FSxWindowsFileServerResource) retrieveSSMCredentials(credentialsParameterARN string, iamCredentials credentials.IAMRoleCredentials) error { + parsedARN, err := arn.Parse(credentialsParameterARN) + if err != nil { + return err + } + + ssmClient := fv.ssmClientCreator.NewSSMClient(fv.region, iamCredentials) + ssmParam := filepath.Base(parsedARN.Resource) + ssmParams := []string{ssmParam} + + ssmParamMap, err := ssm.GetParametersFromSSM(ssmParams, ssmClient) + if err != nil { + return err + } + + ssmParamData, _ := ssmParamMap[ssmParam] + creds := FSxWindowsFileServerCredentials{} + + if err := json.Unmarshal([]byte(ssmParamData), &creds); err != nil { + return err + } + + fv.SetCredentials(creds) + return nil +} + +func (fv *FSxWindowsFileServerResource) retrieveASMCredentials(credentialsParameterARN string, iamCredentials credentials.IAMRoleCredentials) error { + _, err := arn.Parse(credentialsParameterARN) + if err != nil { + return err + } + + asmClient := fv.asmClientCreator.NewASMClient(fv.region, iamCredentials) + asmData, err := asm.GetSecretFromASM(credentialsParameterARN, asmClient) + if err != nil { + return err + } + + creds := FSxWindowsFileServerCredentials{} + if err := json.Unmarshal([]byte(asmData), &creds); err != nil { + return err + } + + fv.SetCredentials(creds) + return nil +} + +func (fv *FSxWindowsFileServerResource) retrieveFileSystemDNSName(fileSystemId string, iamCredentials credentials.IAMRoleCredentials) error { + fileSystemIds := []string{fileSystemId} + fsxClient := fv.fsxClientCreator.NewFSxClient(fv.region, iamCredentials) + fileSystemDNSMap, err := fsx.GetFileSystemDNSNames(fileSystemIds, fsxClient) + if err != nil { + fv.setTerminalReason(err.Error()) + return err + } + + fv.SetFileSystemDNSName(fileSystemDNSMap[fileSystemId]) + return nil +} + +var mountLock sync.Mutex +var DriveLetterAvailable = utils.IsAvailableDriveLetter +var execCommand = exec.Command + +func (fv *FSxWindowsFileServerResource) performHostMount(remotePath string, username string, password string) error { + // mountLock is a package-level mutex lock. + // It is used to avoid a scenario where concurrent tasks get the same drive letter and perform host mount at the same time. + // A drive letter is free until host mount is performed, a lock prevents multiple tasks getting the same drive letter. + mountLock.Lock() + defer mountLock.Unlock() + + hostPath, err := utils.FindUnusedDriveLetter() + if err != nil { + return err + } + fv.SetHostPath(hostPath) + + // formatting to keep powershell happy + localPathArg := fmt.Sprintf("-LocalPath \"%s\"", strings.Trim(hostPath, `\`)) + + // remove mount if localPath is not available + // handling case where agent crashes after mount and before resource status is updated + if !(DriveLetterAvailable(hostPath)) { + err = fv.removeHostMount(localPathArg) + if err != nil { + seelog.Errorf("Failed to remove existing fsxwindowsfileserver resource mount on the container instance: %v", err) + fv.setTerminalReason(err.Error()) + return err + } + } + + // formatting to keep powershell happy + creds := fmt.Sprintf("-Credential $(New-Object System.Management.Automation.PSCredential(\"%s\", $(ConvertTo-SecureString \"%s\" -AsPlainText -Force)))", username, password) + remotePathArg := fmt.Sprintf("-RemotePath \"%s\"", remotePath) + + // New-SmbGlobalMapping cmdlet creates an SMB mapping between the container instance + // and SMB share (FSx for Windows File Server file-system) + cmd := execCommand("powershell.exe", + "New-SmbGlobalMapping", + localPathArg, + remotePathArg, + creds, + "-Persistent $true", + "-RequirePrivacy $true", + "-ErrorAction Stop") + + _, err = cmd.CombinedOutput() + if err != nil { + seelog.Errorf("Failed to map fsxwindowsfileserver resource on the container instance: %v", err) + fv.setTerminalReason(err.Error()) + return err + } + + // Get-SmbGlobalMapping cmdlet retrieves existing SMB mappings + cmd = execCommand("powershell.exe", "Get-SmbGlobalMapping", localPathArg) + + _, err = cmd.CombinedOutput() + if err != nil { + return errors.New("failed to verify fsxwindowsfileserver resource map on the container instance") + } + + return nil +} + +func (fv *FSxWindowsFileServerResource) handleRootDirectory(rootDirectory string) { + dir := utils.GetCanonicalPath(rootDirectory) + dir = strings.Trim(dir, "\\") + + fv.SetRootDirectory(dir) + return +} + +func (fv *FSxWindowsFileServerResource) removeHostMount(localPathArg string) error { + // Remove-SmbGlobalMapping cmdlet removes the existing Server Message Block (SMB) mapping between the + // container instance and SMB share (FSx for Windows File Server file-system) + cmd := execCommand("powershell.exe", "Remove-SmbGlobalMapping", localPathArg, "-Force") + + _, err := cmd.CombinedOutput() + if err != nil { + return err + } + return nil +} + +func (fv *FSxWindowsFileServerResource) Cleanup() error { + localPath := fv.GetHostPath() + // formatting to keep powershell happy + localPathArg := fmt.Sprintf("-LocalPath \"%s\"", strings.Trim(localPath, `\`)) + err := fv.removeHostMount(localPathArg) + if err != nil { + seelog.Warnf("Unable to clear fsxwindowsfileserver host path %s for task %s: %s", localPath, fv.taskARN, err) + } + return nil +} + +// FSxWindowsFileServerResourceJSON is the json representation of the fsxwindowsfileserver resource +type FSxWindowsFileServerResourceJSON struct { + Name string `json:"name"` + VolumeConfig FSxWindowsFileServerVolumeConfig `json:"fsxWindowsFileServerVolumeConfiguration"` + TaskARN string `json:"taskARN"` + ExecutionCredentialsID string `json:"executionCredentialsID"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + DesiredStatus *FSxWindowsFileServerVolumeStatus `json:"desiredStatus"` + KnownStatus *FSxWindowsFileServerVolumeStatus `json:"knownStatus"` +} + +// MarshalJSON serialises the FSxWindowsFileServerResourceJSON struct to JSON +func (fv *FSxWindowsFileServerResource) MarshalJSON() ([]byte, error) { + if fv == nil { + return nil, errors.New("fsxwindowfileserver resource is nil") + } + createdAt := fv.GetCreatedAt() + return json.Marshal(FSxWindowsFileServerResourceJSON{ + Name: fv.Name, + VolumeConfig: fv.VolumeConfig, + TaskARN: fv.taskARN, + ExecutionCredentialsID: fv.executionCredentialsID, + CreatedAt: &createdAt, + DesiredStatus: func() *FSxWindowsFileServerVolumeStatus { + desiredState := fv.GetDesiredStatus() + s := FSxWindowsFileServerVolumeStatus(desiredState) + return &s + }(), + KnownStatus: func() *FSxWindowsFileServerVolumeStatus { + knownState := fv.GetKnownStatus() + s := FSxWindowsFileServerVolumeStatus(knownState) + return &s + }(), + }) +} + +// UnmarshalJSON deserialises the raw JSON to a FSxWindowsFileServerResourceJSON struct +func (fv *FSxWindowsFileServerResource) UnmarshalJSON(b []byte) error { + temp := FSxWindowsFileServerResourceJSON{} + + if err := json.Unmarshal(b, &temp); err != nil { + return err + } + + fv.Name = temp.Name + fv.VolumeConfig = temp.VolumeConfig + fv.taskARN = temp.TaskARN + fv.executionCredentialsID = temp.ExecutionCredentialsID + if temp.DesiredStatus != nil { + fv.SetDesiredStatus(resourcestatus.ResourceStatus(*temp.DesiredStatus)) + } + if temp.KnownStatus != nil { + fv.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) + } + if temp.CreatedAt != nil && !temp.CreatedAt.IsZero() { + fv.SetCreatedAt(*temp.CreatedAt) + } + return nil +} + +// updateAppliedStatusUnsafe updates the resource transitioning status +func (fv *FSxWindowsFileServerResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if fv.appliedStatusUnsafe == resourcestatus.ResourceStatus(FSxWindowsFileServerVolumeStatusNone) { + return + } + + // Check if the resource transition has already finished + if fv.appliedStatusUnsafe <= knownStatus { + fv.appliedStatusUnsafe = resourcestatus.ResourceStatus(FSxWindowsFileServerVolumeStatusNone) + } +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (fv *FSxWindowsFileServerResource) GetAppliedStatus() resourcestatus.ResourceStatus { + fv.lock.RLock() + defer fv.lock.RLock() + + return fv.appliedStatusUnsafe +} + +func (fv *FSxWindowsFileServerResource) DependOnTaskNetwork() bool { + return false +} + +// BuildContainerDependency sets the container dependencies of the resource. +func (fv *FSxWindowsFileServerResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { + return +} + +// GetContainerDependencies returns the container dependencies of the resource. +func (fv *FSxWindowsFileServerResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_windows_test.go b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_windows_test.go new file mode 100644 index 00000000000..2c9eee70ebd --- /dev/null +++ b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserver_windows_test.go @@ -0,0 +1,711 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsxwindowsfileserver + +import ( + "encoding/json" + "os" + "os/exec" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/utils" + + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + mock_asm_factory "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks" + mock_secretsmanageriface "github.com/aws/amazon-ecs-agent/agent/asm/mocks" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + mock_fsx_factory "github.com/aws/amazon-ecs-agent/agent/fsx/factory/mocks" + mock_fsxiface "github.com/aws/amazon-ecs-agent/agent/fsx/mocks" + mock_ssm_factory "github.com/aws/amazon-ecs-agent/agent/ssm/factory/mocks" + mock_ssmiface "github.com/aws/amazon-ecs-agent/agent/ssm/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + taskARN = "arn:aws:ecs:us-west-2:123456789012:task/12345-678901234-56789" + executionCredentialsID = "exec-creds-id" + fileSystemId = "fs-12345678" + rootDirectory = `\test\directory` + credentialsParameter = "arn" + domain = "testdomain" + hostPath = `Z:\` +) + +func setup(t *testing.T) ( + *FSxWindowsFileServerResource, *mock_credentials.MockManager, *mock_ssm_factory.MockSSMClientCreator, + *mock_asm_factory.MockClientCreator, *mock_fsx_factory.MockFSxClientCreator, *mock_ssmiface.MockSSMClient, + *mock_secretsmanageriface.MockSecretsManagerAPI, *mock_fsxiface.MockFSxClient) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl) + fsxClientCreator := mock_fsx_factory.NewMockFSxClientCreator(ctrl) + + mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + mockFSxClient := mock_fsxiface.NewMockFSxClient(ctrl) + + fv := &FSxWindowsFileServerResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + taskARN: taskARN, + } + fv.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + ASMClientCreator: asmClientCreator, + FSxClientCreator: fsxClientCreator, + CredentialsManager: credentialsManager, + }, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + return fv, credentialsManager, ssmClientCreator, asmClientCreator, fsxClientCreator, mockSSMClient, mockASMClient, mockFSxClient +} + +func TestInitialize(t *testing.T) { + fv, _, _, _, _, _, _, _ := setup(t) + assert.NotNil(t, fv.credentialsManager) + assert.NotNil(t, fv.ssmClientCreator) + assert.NotNil(t, fv.asmClientCreator) + assert.NotNil(t, fv.fsxClientCreator) + assert.NotNil(t, fv.statusToTransitions) +} + +func TestMarshalUnmarshalJSON(t *testing.T) { + volumeConfig := FSxWindowsFileServerVolumeConfig{ + FileSystemID: "fs-12345678", + RootDirectory: rootDirectory, + AuthConfig: FSxWindowsFileServerAuthConfig{ + CredentialsParameter: credentialsParameter, + Domain: domain, + }, + HostPath: hostPath, + } + fsxWindowsFileServerIn := &FSxWindowsFileServerResource{ + Name: "test", + VolumeConfig: volumeConfig, + taskARN: taskARN, + executionCredentialsID: executionCredentialsID, + createdAtUnsafe: time.Time{}, + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + } + + bytes, err := json.Marshal(fsxWindowsFileServerIn) + require.NoError(t, err) + + fsxWindowsFileServerOut := &FSxWindowsFileServerResource{} + err = json.Unmarshal(bytes, fsxWindowsFileServerOut) + require.NoError(t, err) + assert.Equal(t, fsxWindowsFileServerIn.Name, fsxWindowsFileServerOut.Name) + assert.Equal(t, fsxWindowsFileServerIn.VolumeConfig, fsxWindowsFileServerOut.VolumeConfig) + assert.Equal(t, fsxWindowsFileServerIn.taskARN, fsxWindowsFileServerOut.taskARN) + assert.Equal(t, fsxWindowsFileServerIn.executionCredentialsID, fsxWindowsFileServerOut.executionCredentialsID) + assert.WithinDuration(t, fsxWindowsFileServerIn.createdAtUnsafe, fsxWindowsFileServerOut.createdAtUnsafe, time.Microsecond) + assert.Equal(t, fsxWindowsFileServerIn.desiredStatusUnsafe, fsxWindowsFileServerOut.desiredStatusUnsafe) + assert.Equal(t, fsxWindowsFileServerIn.knownStatusUnsafe, fsxWindowsFileServerOut.knownStatusUnsafe) +} + +// TODO: Make tests table driven +func TestRetrieveCredentials(t *testing.T) { + fv, _, ssmClientCreator, _, _, mockSSMClient, _, _ := setup(t) + + credentialsParameterARN := "arn:aws:ssm:us-west-2:123456789012:parameter/test" + + ssmTestData := "{\n\"username\": \"user\", \n\"password\": \"pass\"\n}" + ssmClientOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String("test"), + Value: aws.String(ssmTestData), + }, + }, + } + + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + + gomock.InOrder( + ssmClientCreator.EXPECT().NewSSMClient(gomock.Any(), gomock.Any()).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Return(ssmClientOutput, nil).Times(1), + ) + + err := fv.retrieveCredentials(credentialsParameterARN, iamCredentials) + assert.NoError(t, err) + + credentials := fv.Credentials + assert.Equal(t, "user", credentials.Username) + assert.Equal(t, "pass", credentials.Password) +} + +func TestRetrieveSSMCredentials(t *testing.T) { + fv, _, ssmClientCreator, _, _, mockSSMClient, _, _ := setup(t) + credentialsParameterARN := "arn:aws:ssm:us-west-2:123456789012:parameter/test" + + ssmTestData := "{\n\"username\": \"user\", \n\"password\": \"pass\"\n}" + ssmClientOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String("test"), + Value: aws.String(ssmTestData), + }, + }, + } + + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + + gomock.InOrder( + ssmClientCreator.EXPECT().NewSSMClient(gomock.Any(), gomock.Any()).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Return(ssmClientOutput, nil).Times(1), + ) + + err := fv.retrieveSSMCredentials(credentialsParameterARN, iamCredentials) + assert.NoError(t, err) + + credentials := fv.Credentials + assert.Equal(t, "user", credentials.Username) + assert.Equal(t, "pass", credentials.Password) +} + +func TestRetrieveASMCredentials(t *testing.T) { + fv, _, _, asmClientCreator, _, _, mockASMClient, _ := setup(t) + credentialsParameterARN := "arn:aws:secretsmanager:us-east-1:123456789012:secret:testing/some-random-name" + + asmTestData := "{\"username\":\"user\",\"password\":\"pass\"}" + asmClientOutput := &secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(asmTestData), + } + + gomock.InOrder( + asmClientCreator.EXPECT().NewASMClient(gomock.Any(), gomock.Any()).Return(mockASMClient), + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) { + assert.Equal(t, aws.StringValue(in.SecretId), credentialsParameterARN) + }).Return(asmClientOutput, nil), + ) + + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + + err := fv.retrieveASMCredentials(credentialsParameterARN, iamCredentials) + assert.NoError(t, err) + + credentials := fv.Credentials + assert.Equal(t, "user", credentials.Username) + assert.Equal(t, "pass", credentials.Password) +} + +func TestRetrieveCredentialsInvalidService(t *testing.T) { + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + + credentialsParameterARN := "arn:aws:foo:us-east-1:123456789012:parameter/test" + + var termReason string + fv := &FSxWindowsFileServerResource{ + terminalReason: termReason, + } + + err := fv.retrieveCredentials(credentialsParameterARN, iamCredentials) + assert.Error(t, err) +} + +func TestRetrieveSSMCredentialsARNParseErr(t *testing.T) { + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + + credentialsParameterARN := "arn:aws:ssm:parameter/test" + + var termReason string + fv := &FSxWindowsFileServerResource{ + terminalReason: termReason, + } + + err := fv.retrieveSSMCredentials(credentialsParameterARN, iamCredentials) + assert.Error(t, err) +} + +func TestRetrieveASMCredentialsARNParseErr(t *testing.T) { + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + + credentialsParameterARN := "arn:aws:secretsmanager:some-random-name" + + var termReason string + fv := &FSxWindowsFileServerResource{ + terminalReason: termReason, + } + + err := fv.retrieveASMCredentials(credentialsParameterARN, iamCredentials) + assert.Error(t, err) +} + +func TestRetrieveFSxWindowsFileServerDNSName(t *testing.T) { + fv, _, _, _, fsxClientCreator, _, _, mockFSxClient := setup(t) + fsxClientOutput := &fsx.DescribeFileSystemsOutput{ + FileSystems: []*fsx.FileSystem{ + { + FileSystemId: aws.String(fileSystemId), + DNSName: aws.String("test"), + }, + }, + } + + gomock.InOrder( + fsxClientCreator.EXPECT().NewFSxClient(gomock.Any(), gomock.Any()).Return(mockFSxClient), + mockFSxClient.EXPECT().DescribeFileSystems(gomock.Any()).Return(fsxClientOutput, nil).Times(1), + ) + + iamCredentials := credentials.IAMRoleCredentials{ + CredentialsID: "test-cred-id", + } + + err := fv.retrieveFileSystemDNSName(fileSystemId, iamCredentials) + assert.NoError(t, err) + + DNSName := fv.FSxWindowsFileServerDNSName + assert.Equal(t, "test", DNSName) +} + +func TestHandleRootDirectory(t *testing.T) { + fv1 := &FSxWindowsFileServerResource{} + fv1.handleRootDirectory("some/path/") + + fv2 := &FSxWindowsFileServerResource{} + fv2.handleRootDirectory("\\some\\path") + + fv3 := &FSxWindowsFileServerResource{} + fv3.handleRootDirectory("\\some/path") + + fv4 := &FSxWindowsFileServerResource{} + fv4.handleRootDirectory("\\") + + assert.Equal(t, "some\\path", fv1.VolumeConfig.RootDirectory) + assert.Equal(t, "some\\path", fv2.VolumeConfig.RootDirectory) + assert.Equal(t, "some\\path", fv3.VolumeConfig.RootDirectory) + assert.Equal(t, "", fv4.VolumeConfig.RootDirectory) +} + +func TestGetName(t *testing.T) { + fv := &FSxWindowsFileServerResource{ + Name: "test", + } + + assert.Equal(t, "test", fv.GetName()) +} + +func TestGetVolumeConfig(t *testing.T) { + fv := &FSxWindowsFileServerResource{ + VolumeConfig: FSxWindowsFileServerVolumeConfig{ + FileSystemID: "fs-12345678", + RootDirectory: "root", + AuthConfig: FSxWindowsFileServerAuthConfig{ + CredentialsParameter: credentialsParameter, + Domain: "test", + }, + HostPath: hostPath, + }, + } + + volumeConfig := fv.GetVolumeConfig() + assert.Equal(t, "fs-12345678", volumeConfig.FileSystemID) + assert.Equal(t, "root", volumeConfig.RootDirectory) + assert.Equal(t, "arn", volumeConfig.AuthConfig.CredentialsParameter) + assert.Equal(t, "test", volumeConfig.AuthConfig.Domain) + assert.Equal(t, `Z:\`, volumeConfig.HostPath) +} + +func TestGetCredentials(t *testing.T) { + fv := &FSxWindowsFileServerResource{ + Credentials: FSxWindowsFileServerCredentials{ + Username: "user", + Password: "pass", + }, + } + + credentials := fv.GetCredentials() + assert.Equal(t, "user", credentials.Username) + assert.Equal(t, "pass", credentials.Password) +} + +func TestGetFileSystemDNSName(t *testing.T) { + fv := &FSxWindowsFileServerResource{ + FSxWindowsFileServerDNSName: "test", + } + + assert.Equal(t, "test", fv.GetFileSystemDNSName()) +} + +func TestSetCredentials(t *testing.T) { + fv := &FSxWindowsFileServerResource{} + fv.SetCredentials(FSxWindowsFileServerCredentials{ + Username: "user", + Password: "pass", + }) + + assert.Equal(t, "user", fv.Credentials.Username) + assert.Equal(t, "pass", fv.Credentials.Password) +} + +func TestSetFileSystemDNSName(t *testing.T) { + fv := &FSxWindowsFileServerResource{} + fv.SetFileSystemDNSName("test") + assert.Equal(t, "test", fv.FSxWindowsFileServerDNSName) +} + +func TestSetRootDirectory(t *testing.T) { + fv := &FSxWindowsFileServerResource{} + fv.SetRootDirectory("root") + assert.Equal(t, "root", fv.VolumeConfig.RootDirectory) +} + +func fakeExecCommand(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + os.Exit(0) +} + +func TestPerformHostMount(t *testing.T) { + fv := &FSxWindowsFileServerResource{} + execCommand = fakeExecCommand + defer func() { execCommand = exec.Command }() + + err := fv.performHostMount(`\\amznfsxfp8sdlcw.test.corp.com\share`, `test\user`, `pass`) + assert.NoError(t, err) +} + +func TestRemoveHostMount(t *testing.T) { + fv, _, _, _, _, _, _, _ := setup(t) + execCommand = fakeExecCommand + + defer func() { + execCommand = exec.Command + }() + + err := fv.removeHostMount("test") + assert.NoError(t, err) +} + +func TestCreateInvalidExecutionRoleCredentialsErr(t *testing.T) { + fv, credentialsManager, _, _, _, _, _, _ := setup(t) + + DriveLetterAvailable = func(string) bool { + return true + } + + defer func() { + DriveLetterAvailable = utils.IsAvailableDriveLetter + }() + + creds := credentials.TaskIAMRoleCredentials{} + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(gomock.Any()).Return(creds, false), + ) + + err := fv.Create() + assert.Error(t, err) +} + +func TestCreateUnavailableLocalPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl) + fsxClientCreator := mock_fsx_factory.NewMockFSxClientCreator(ctrl) + mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl) + mockFSxClient := mock_fsxiface.NewMockFSxClient(ctrl) + + fv := &FSxWindowsFileServerResource{ + VolumeConfig: FSxWindowsFileServerVolumeConfig{ + FileSystemID: fileSystemId, + RootDirectory: rootDirectory, + AuthConfig: FSxWindowsFileServerAuthConfig{ + CredentialsParameter: "arn:aws:ssm:us-west-2:123456789012:parameter/test", + Domain: domain, + }, + HostPath: hostPath, + }, + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + taskARN: taskARN, + executionCredentialsID: executionCredentialsID, + } + fv.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + ASMClientCreator: asmClientCreator, + FSxClientCreator: fsxClientCreator, + CredentialsManager: credentialsManager, + }, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + ssmTestData := "{\n\"username\": \"user\", \n\"password\": \"pass\"\n}" + ssmClientOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String("test"), + Value: aws.String(ssmTestData), + }, + }, + } + + fsxClientOutput := &fsx.DescribeFileSystemsOutput{ + FileSystems: []*fsx.FileSystem{ + { + FileSystemId: aws.String(fileSystemId), + DNSName: aws.String("test"), + }, + }, + } + + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: "id", + SecretAccessKey: "key", + }, + } + + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(gomock.Any()).Return(creds, true), + ssmClientCreator.EXPECT().NewSSMClient(gomock.Any(), gomock.Any()).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Return(ssmClientOutput, nil).Times(1), + fsxClientCreator.EXPECT().NewFSxClient(gomock.Any(), gomock.Any()).Return(mockFSxClient), + mockFSxClient.EXPECT().DescribeFileSystems(gomock.Any()).Return(fsxClientOutput, nil).Times(1), + ) + + DriveLetterAvailable = func(string) bool { + return false + } + execCommand = fakeExecCommand + + defer func() { + DriveLetterAvailable = utils.IsAvailableDriveLetter + execCommand = exec.Command + }() + + err := fv.Create() + assert.NoError(t, err) +} + +func TestCreateSSM(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl) + fsxClientCreator := mock_fsx_factory.NewMockFSxClientCreator(ctrl) + mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl) + mockFSxClient := mock_fsxiface.NewMockFSxClient(ctrl) + + fv := &FSxWindowsFileServerResource{ + VolumeConfig: FSxWindowsFileServerVolumeConfig{ + FileSystemID: fileSystemId, + RootDirectory: rootDirectory, + AuthConfig: FSxWindowsFileServerAuthConfig{ + CredentialsParameter: "arn:aws:ssm:us-west-2:123456789012:parameter/test", + Domain: domain, + }, + HostPath: hostPath, + }, + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + taskARN: taskARN, + executionCredentialsID: executionCredentialsID, + } + fv.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + ASMClientCreator: asmClientCreator, + FSxClientCreator: fsxClientCreator, + CredentialsManager: credentialsManager, + }, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + ssmTestData := "{\n\"username\": \"user\", \n\"password\": \"pass\"\n}" + ssmClientOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String("test"), + Value: aws.String(ssmTestData), + }, + }, + } + + fsxClientOutput := &fsx.DescribeFileSystemsOutput{ + FileSystems: []*fsx.FileSystem{ + { + FileSystemId: aws.String(fileSystemId), + DNSName: aws.String("test"), + }, + }, + } + + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: "id", + SecretAccessKey: "key", + }, + } + + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(gomock.Any()).Return(creds, true), + ssmClientCreator.EXPECT().NewSSMClient(gomock.Any(), gomock.Any()).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Return(ssmClientOutput, nil).Times(1), + fsxClientCreator.EXPECT().NewFSxClient(gomock.Any(), gomock.Any()).Return(mockFSxClient), + mockFSxClient.EXPECT().DescribeFileSystems(gomock.Any()).Return(fsxClientOutput, nil).Times(1), + ) + + DriveLetterAvailable = func(string) bool { + return true + } + execCommand = fakeExecCommand + + defer func() { + DriveLetterAvailable = utils.IsAvailableDriveLetter + execCommand = exec.Command + }() + + err := fv.Create() + assert.NoError(t, err) +} + +func TestCreateASM(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl) + asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl) + fsxClientCreator := mock_fsx_factory.NewMockFSxClientCreator(ctrl) + mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl) + mockFSxClient := mock_fsxiface.NewMockFSxClient(ctrl) + + credentialsParameter := "arn:aws:secretsmanager:us-east-1:123456789012:secret:testing/some-random-name" + + fv := &FSxWindowsFileServerResource{ + VolumeConfig: FSxWindowsFileServerVolumeConfig{ + FileSystemID: fileSystemId, + RootDirectory: rootDirectory, + AuthConfig: FSxWindowsFileServerAuthConfig{ + CredentialsParameter: "arn:aws:secretsmanager:us-east-1:123456789012:secret:testing/some-random-name", + Domain: domain, + }, + HostPath: hostPath, + }, + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + taskARN: taskARN, + executionCredentialsID: executionCredentialsID, + } + fv.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + ASMClientCreator: asmClientCreator, + FSxClientCreator: fsxClientCreator, + CredentialsManager: credentialsManager, + }, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + + asmTestData := "{\"username\":\"user\",\"password\":\"pass\"}" + asmClientOutput := &secretsmanager.GetSecretValueOutput{ + SecretString: aws.String(asmTestData), + } + + fsxClientOutput := &fsx.DescribeFileSystemsOutput{ + FileSystems: []*fsx.FileSystem{ + { + FileSystemId: aws.String(fileSystemId), + DNSName: aws.String("test"), + }, + }, + } + + creds := credentials.TaskIAMRoleCredentials{ + ARN: "arn", + IAMRoleCredentials: credentials.IAMRoleCredentials{ + AccessKeyID: "id", + SecretAccessKey: "key", + }, + } + + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(gomock.Any()).Return(creds, true), + asmClientCreator.EXPECT().NewASMClient(gomock.Any(), gomock.Any()).Return(mockASMClient), + mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) { + assert.Equal(t, aws.StringValue(in.SecretId), credentialsParameter) + }).Return(asmClientOutput, nil), + fsxClientCreator.EXPECT().NewFSxClient(gomock.Any(), gomock.Any()).Return(mockFSxClient), + mockFSxClient.EXPECT().DescribeFileSystems(gomock.Any()).Return(fsxClientOutput, nil).Times(1), + ) + + DriveLetterAvailable = func(string) bool { + return true + } + execCommand = fakeExecCommand + + defer func() { + DriveLetterAvailable = utils.IsAvailableDriveLetter + execCommand = exec.Command + }() + + err := fv.Create() + assert.NoError(t, err) +} + +func TestClearFSxWindowsFileServerResource(t *testing.T) { + fv := &FSxWindowsFileServerResource{VolumeConfig: FSxWindowsFileServerVolumeConfig{HostPath: hostPath}} + + execCommand = fakeExecCommand + defer func() { execCommand = exec.Command }() + + err := fv.Cleanup() + assert.NoError(t, err) +} diff --git a/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserverstatus.go b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserverstatus.go new file mode 100644 index 00000000000..222f5f0c540 --- /dev/null +++ b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserverstatus.go @@ -0,0 +1,79 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsxwindowsfileserver + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +// FSxWindowsFileServerVolumeStatus defines resource statuses for fsxwindowsfileserver resource +type FSxWindowsFileServerVolumeStatus resourcestatus.ResourceStatus + +const ( + // FSxWindowsFileServerVolumeStatusNone is the zero state of a task resource + FSxWindowsFileServerVolumeStatusNone FSxWindowsFileServerVolumeStatus = iota + // FSxWindowsFileServerVolumeCreated represents a task resource which has been created + FSxWindowsFileServerVolumeCreated + // FSxWindowsFileServerVolumeRemoved represents a task resource which has been cleaned up + FSxWindowsFileServerVolumeRemoved +) + +var FSxWindowsFileServerVolumeStatusMap = map[string]FSxWindowsFileServerVolumeStatus{ + "NONE": FSxWindowsFileServerVolumeStatusNone, + "CREATED": FSxWindowsFileServerVolumeCreated, + "REMOVED": FSxWindowsFileServerVolumeRemoved, +} + +// StatusString returns a human readable string representation of this object +func (fs FSxWindowsFileServerVolumeStatus) String() string { + for k, v := range FSxWindowsFileServerVolumeStatusMap { + if v == fs { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type +func (fs *FSxWindowsFileServerVolumeStatus) MarshalJSON() ([]byte, error) { + if fs == nil { + return nil, nil + } + return []byte(`"` + fs.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data +func (fs *FSxWindowsFileServerVolumeStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *fs = FSxWindowsFileServerVolumeStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *fs = FSxWindowsFileServerVolumeStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := FSxWindowsFileServerVolumeStatusMap[string(strStatus)] + if !ok { + *fs = FSxWindowsFileServerVolumeStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *fs = stat + return nil +} diff --git a/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserverstatus_test.go b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserverstatus_test.go new file mode 100644 index 00000000000..186ecd6d446 --- /dev/null +++ b/agent/taskresource/fsxwindowsfileserver/fsxwindowsfileserverstatus_test.go @@ -0,0 +1,88 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsxwindowsfileserver + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusString(t *testing.T) { + var resourceStatus FSxWindowsFileServerVolumeStatus + + resourceStatus = FSxWindowsFileServerVolumeStatusNone + assert.Equal(t, resourceStatus.String(), "NONE") + resourceStatus = FSxWindowsFileServerVolumeCreated + assert.Equal(t, resourceStatus.String(), "CREATED") + resourceStatus = FSxWindowsFileServerVolumeRemoved + assert.Equal(t, resourceStatus.String(), "REMOVED") +} + +func TestMarshalFSxWindowsFileServerVolumeStatus(t *testing.T) { + status := FSxWindowsFileServerVolumeStatusNone + bytes, err := status.MarshalJSON() + + assert.NoError(t, err) + assert.Equal(t, `"NONE"`, string(bytes[:])) +} + +func TestMarshalNilFSxWindowsFileServerVolumeStatus(t *testing.T) { + var status *FSxWindowsFileServerVolumeStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Nil(t, err) +} + +type testFSxWindowsFileServerVolumeStatus struct { + SomeStatus FSxWindowsFileServerVolumeStatus `json:"status"` +} + +func TestUnmarshalFSxWindowsFileServerVolumeStatus(t *testing.T) { + status := FSxWindowsFileServerVolumeStatusNone + + err := json.Unmarshal([]byte(`"CREATED"`), &status) + assert.NoError(t, err) + assert.Equal(t, FSxWindowsFileServerVolumeCreated, status, "CREATED should unmarshal to CREATED, not "+status.String()) + + var testStatus testFSxWindowsFileServerVolumeStatus + err = json.Unmarshal([]byte(`{"status":"REMOVED"}`), &testStatus) + assert.NoError(t, err) + assert.Equal(t, FSxWindowsFileServerVolumeRemoved, testStatus.SomeStatus, "REMOVED should unmarshal to REMOVED, not "+testStatus.SomeStatus.String()) +} + +func TestUnmarshalNullFSxWindowsFileServerVolumeStatus(t *testing.T) { + status := FSxWindowsFileServerVolumeCreated + err := json.Unmarshal([]byte("null"), &status) + assert.NoError(t, err) + assert.Equal(t, FSxWindowsFileServerVolumeStatusNone, status, "null should unmarshal to None, not "+status.String()) +} + +func TestUnmarshalNonStringFSxWindowsFileServerVolumeStatusDefaultNone(t *testing.T) { + status := FSxWindowsFileServerVolumeCreated + err := json.Unmarshal([]byte(`1`), &status) + assert.NotNil(t, err) + assert.Equal(t, FSxWindowsFileServerVolumeStatusNone, status, "non-string status should unmarshal to None, not "+status.String()) +} + +func TestUnmarshalUnmappedFSxWindowsFileServerVolumeStatusDefaultNone(t *testing.T) { + status := FSxWindowsFileServerVolumeRemoved + err := json.Unmarshal([]byte(`"SOMEOTHER"`), &status) + assert.NotNil(t, err) + assert.Equal(t, FSxWindowsFileServerVolumeStatusNone, status, "Unmapped status should unmarshal to None, not "+status.String()) +} diff --git a/agent/taskresource/fsxwindowsfileserver/types.go b/agent/taskresource/fsxwindowsfileserver/types.go new file mode 100644 index 00000000000..eedc2714171 --- /dev/null +++ b/agent/taskresource/fsxwindowsfileserver/types.go @@ -0,0 +1,19 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package fsxwindowsfileserver + +const ( + // ResourceName is the name of the fsxwindowsfileserver resource + ResourceName = "fsxwindowsfileserver" +) diff --git a/agent/taskresource/generate_mocks.go b/agent/taskresource/generate_mocks.go new file mode 100644 index 00000000000..aa6edd03238 --- /dev/null +++ b/agent/taskresource/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package taskresource + +//go:generate mockgen -destination=mocks/taskresource_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/taskresource TaskResource diff --git a/agent/taskresource/interface.go b/agent/taskresource/interface.go new file mode 100644 index 00000000000..ee1ae2b6e62 --- /dev/null +++ b/agent/taskresource/interface.go @@ -0,0 +1,93 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package taskresource + +import ( + "encoding/json" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +// TaskResource is a wrapper for task level resource methods we need +type TaskResource interface { + // SetDesiredStatus sets the desired status of the resource + SetDesiredStatus(resourcestatus.ResourceStatus) + // GetDesiredStatus gets the desired status of the resource + GetDesiredStatus() resourcestatus.ResourceStatus + // SetKnownStatus sets the desired status of the resource + SetKnownStatus(resourcestatus.ResourceStatus) + // GetKnownStatus gets the desired status of the resource + GetKnownStatus() resourcestatus.ResourceStatus + // SetCreatedAt sets the timestamp for resource's creation time + SetCreatedAt(time.Time) + // GetCreatedAt sets the timestamp for resource's creation time + GetCreatedAt() time.Time + // Create performs resource creation + Create() error + // Cleanup performs resource cleanup + Cleanup() error + // GetName returns the unique name of the resource + GetName() string + // DesiredTeminal returns true if remove is in terminal state + DesiredTerminal() bool + // KnownCreated returns true if resource state is CREATED + KnownCreated() bool + // TerminalStatus returns the last transition state of the resource + TerminalStatus() resourcestatus.ResourceStatus + // NextKnownState returns resource's next state + NextKnownState() resourcestatus.ResourceStatus + // ApplyTransition calls the function required to move to the specified status + ApplyTransition(resourcestatus.ResourceStatus) error + // SteadyState returns the transition state of the resource defined as "ready" + SteadyState() resourcestatus.ResourceStatus + // SetAppliedStatus sets the applied status of resource and returns whether + // the resource is already in a transition + SetAppliedStatus(status resourcestatus.ResourceStatus) bool + // GetAppliedStatus gets the applied status of resource + GetAppliedStatus() resourcestatus.ResourceStatus + // StatusString returns the string of the resource status + StatusString(status resourcestatus.ResourceStatus) string + // GetTerminalReason returns string describing why the resource failed to + // provision + GetTerminalReason() string + // DependOnTaskNetwork shows whether the resource creation needs task network setup beforehand + DependOnTaskNetwork() bool + // GetContainerDependencies returns dependent containers for a status + GetContainerDependencies(resourcestatus.ResourceStatus) []apicontainer.ContainerDependency + // BuildContainerDependency adds a new dependency container and its satisfied status + BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) + // Initialize will initialze the resource fields of the resource + Initialize(res *ResourceFields, + taskKnownStatus apitaskstatus.TaskStatus, taskDesiredStatus apitaskstatus.TaskStatus) + + json.Marshaler + json.Unmarshaler +} + +// TransitionDependenciesMap is a map of the dependent resource status to other +// dependencies that must be satisfied. +type TransitionDependenciesMap map[resourcestatus.ResourceStatus]TransitionDependencySet + +// TransitionDependencySet contains dependencies that impact transitions of +// resources. +type TransitionDependencySet struct { + // ContainerDependencies is the set of containers on which a transition is + // dependent. + ContainerDependencies []apicontainer.ContainerDependency `json:"ContainerDependencies"` +} diff --git a/agent/taskresource/mocks/taskresource_mocks.go b/agent/taskresource/mocks/taskresource_mocks.go new file mode 100644 index 00000000000..ae5f7124d9c --- /dev/null +++ b/agent/taskresource/mocks/taskresource_mocks.go @@ -0,0 +1,395 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/taskresource (interfaces: TaskResource) + +// Package mock_taskresource is a generated GoMock package. +package mock_taskresource + +import ( + reflect "reflect" + time "time" + + container "github.com/aws/amazon-ecs-agent/agent/api/container" + status "github.com/aws/amazon-ecs-agent/agent/api/container/status" + status0 "github.com/aws/amazon-ecs-agent/agent/api/task/status" + taskresource "github.com/aws/amazon-ecs-agent/agent/taskresource" + status1 "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + gomock "github.com/golang/mock/gomock" +) + +// MockTaskResource is a mock of TaskResource interface +type MockTaskResource struct { + ctrl *gomock.Controller + recorder *MockTaskResourceMockRecorder +} + +// MockTaskResourceMockRecorder is the mock recorder for MockTaskResource +type MockTaskResourceMockRecorder struct { + mock *MockTaskResource +} + +// NewMockTaskResource creates a new mock instance +func NewMockTaskResource(ctrl *gomock.Controller) *MockTaskResource { + mock := &MockTaskResource{ctrl: ctrl} + mock.recorder = &MockTaskResourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTaskResource) EXPECT() *MockTaskResourceMockRecorder { + return m.recorder +} + +// ApplyTransition mocks base method +func (m *MockTaskResource) ApplyTransition(arg0 status1.ResourceStatus) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyTransition", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyTransition indicates an expected call of ApplyTransition +func (mr *MockTaskResourceMockRecorder) ApplyTransition(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyTransition", reflect.TypeOf((*MockTaskResource)(nil).ApplyTransition), arg0) +} + +// BuildContainerDependency mocks base method +func (m *MockTaskResource) BuildContainerDependency(arg0 string, arg1 status.ContainerStatus, arg2 status1.ResourceStatus) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "BuildContainerDependency", arg0, arg1, arg2) +} + +// BuildContainerDependency indicates an expected call of BuildContainerDependency +func (mr *MockTaskResourceMockRecorder) BuildContainerDependency(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildContainerDependency", reflect.TypeOf((*MockTaskResource)(nil).BuildContainerDependency), arg0, arg1, arg2) +} + +// Cleanup mocks base method +func (m *MockTaskResource) Cleanup() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Cleanup") + ret0, _ := ret[0].(error) + return ret0 +} + +// Cleanup indicates an expected call of Cleanup +func (mr *MockTaskResourceMockRecorder) Cleanup() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockTaskResource)(nil).Cleanup)) +} + +// Create mocks base method +func (m *MockTaskResource) Create() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create") + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create +func (mr *MockTaskResourceMockRecorder) Create() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockTaskResource)(nil).Create)) +} + +// DependOnTaskNetwork mocks base method +func (m *MockTaskResource) DependOnTaskNetwork() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DependOnTaskNetwork") + ret0, _ := ret[0].(bool) + return ret0 +} + +// DependOnTaskNetwork indicates an expected call of DependOnTaskNetwork +func (mr *MockTaskResourceMockRecorder) DependOnTaskNetwork() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DependOnTaskNetwork", reflect.TypeOf((*MockTaskResource)(nil).DependOnTaskNetwork)) +} + +// DesiredTerminal mocks base method +func (m *MockTaskResource) DesiredTerminal() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DesiredTerminal") + ret0, _ := ret[0].(bool) + return ret0 +} + +// DesiredTerminal indicates an expected call of DesiredTerminal +func (mr *MockTaskResourceMockRecorder) DesiredTerminal() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DesiredTerminal", reflect.TypeOf((*MockTaskResource)(nil).DesiredTerminal)) +} + +// GetAppliedStatus mocks base method +func (m *MockTaskResource) GetAppliedStatus() status1.ResourceStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAppliedStatus") + ret0, _ := ret[0].(status1.ResourceStatus) + return ret0 +} + +// GetAppliedStatus indicates an expected call of GetAppliedStatus +func (mr *MockTaskResourceMockRecorder) GetAppliedStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAppliedStatus", reflect.TypeOf((*MockTaskResource)(nil).GetAppliedStatus)) +} + +// GetContainerDependencies mocks base method +func (m *MockTaskResource) GetContainerDependencies(arg0 status1.ResourceStatus) []container.ContainerDependency { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetContainerDependencies", arg0) + ret0, _ := ret[0].([]container.ContainerDependency) + return ret0 +} + +// GetContainerDependencies indicates an expected call of GetContainerDependencies +func (mr *MockTaskResourceMockRecorder) GetContainerDependencies(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerDependencies", reflect.TypeOf((*MockTaskResource)(nil).GetContainerDependencies), arg0) +} + +// GetCreatedAt mocks base method +func (m *MockTaskResource) GetCreatedAt() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCreatedAt") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetCreatedAt indicates an expected call of GetCreatedAt +func (mr *MockTaskResourceMockRecorder) GetCreatedAt() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCreatedAt", reflect.TypeOf((*MockTaskResource)(nil).GetCreatedAt)) +} + +// GetDesiredStatus mocks base method +func (m *MockTaskResource) GetDesiredStatus() status1.ResourceStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDesiredStatus") + ret0, _ := ret[0].(status1.ResourceStatus) + return ret0 +} + +// GetDesiredStatus indicates an expected call of GetDesiredStatus +func (mr *MockTaskResourceMockRecorder) GetDesiredStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDesiredStatus", reflect.TypeOf((*MockTaskResource)(nil).GetDesiredStatus)) +} + +// GetKnownStatus mocks base method +func (m *MockTaskResource) GetKnownStatus() status1.ResourceStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetKnownStatus") + ret0, _ := ret[0].(status1.ResourceStatus) + return ret0 +} + +// GetKnownStatus indicates an expected call of GetKnownStatus +func (mr *MockTaskResourceMockRecorder) GetKnownStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKnownStatus", reflect.TypeOf((*MockTaskResource)(nil).GetKnownStatus)) +} + +// GetName mocks base method +func (m *MockTaskResource) GetName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetName") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetName indicates an expected call of GetName +func (mr *MockTaskResourceMockRecorder) GetName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockTaskResource)(nil).GetName)) +} + +// GetTerminalReason mocks base method +func (m *MockTaskResource) GetTerminalReason() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTerminalReason") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetTerminalReason indicates an expected call of GetTerminalReason +func (mr *MockTaskResourceMockRecorder) GetTerminalReason() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTerminalReason", reflect.TypeOf((*MockTaskResource)(nil).GetTerminalReason)) +} + +// Initialize mocks base method +func (m *MockTaskResource) Initialize(arg0 *taskresource.ResourceFields, arg1, arg2 status0.TaskStatus) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Initialize", arg0, arg1, arg2) +} + +// Initialize indicates an expected call of Initialize +func (mr *MockTaskResourceMockRecorder) Initialize(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockTaskResource)(nil).Initialize), arg0, arg1, arg2) +} + +// KnownCreated mocks base method +func (m *MockTaskResource) KnownCreated() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "KnownCreated") + ret0, _ := ret[0].(bool) + return ret0 +} + +// KnownCreated indicates an expected call of KnownCreated +func (mr *MockTaskResourceMockRecorder) KnownCreated() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KnownCreated", reflect.TypeOf((*MockTaskResource)(nil).KnownCreated)) +} + +// MarshalJSON mocks base method +func (m *MockTaskResource) MarshalJSON() ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarshalJSON") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MarshalJSON indicates an expected call of MarshalJSON +func (mr *MockTaskResourceMockRecorder) MarshalJSON() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarshalJSON", reflect.TypeOf((*MockTaskResource)(nil).MarshalJSON)) +} + +// NextKnownState mocks base method +func (m *MockTaskResource) NextKnownState() status1.ResourceStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NextKnownState") + ret0, _ := ret[0].(status1.ResourceStatus) + return ret0 +} + +// NextKnownState indicates an expected call of NextKnownState +func (mr *MockTaskResourceMockRecorder) NextKnownState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextKnownState", reflect.TypeOf((*MockTaskResource)(nil).NextKnownState)) +} + +// SetAppliedStatus mocks base method +func (m *MockTaskResource) SetAppliedStatus(arg0 status1.ResourceStatus) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetAppliedStatus", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// SetAppliedStatus indicates an expected call of SetAppliedStatus +func (mr *MockTaskResourceMockRecorder) SetAppliedStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAppliedStatus", reflect.TypeOf((*MockTaskResource)(nil).SetAppliedStatus), arg0) +} + +// SetCreatedAt mocks base method +func (m *MockTaskResource) SetCreatedAt(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetCreatedAt", arg0) +} + +// SetCreatedAt indicates an expected call of SetCreatedAt +func (mr *MockTaskResourceMockRecorder) SetCreatedAt(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCreatedAt", reflect.TypeOf((*MockTaskResource)(nil).SetCreatedAt), arg0) +} + +// SetDesiredStatus mocks base method +func (m *MockTaskResource) SetDesiredStatus(arg0 status1.ResourceStatus) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDesiredStatus", arg0) +} + +// SetDesiredStatus indicates an expected call of SetDesiredStatus +func (mr *MockTaskResourceMockRecorder) SetDesiredStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDesiredStatus", reflect.TypeOf((*MockTaskResource)(nil).SetDesiredStatus), arg0) +} + +// SetKnownStatus mocks base method +func (m *MockTaskResource) SetKnownStatus(arg0 status1.ResourceStatus) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetKnownStatus", arg0) +} + +// SetKnownStatus indicates an expected call of SetKnownStatus +func (mr *MockTaskResourceMockRecorder) SetKnownStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetKnownStatus", reflect.TypeOf((*MockTaskResource)(nil).SetKnownStatus), arg0) +} + +// StatusString mocks base method +func (m *MockTaskResource) StatusString(arg0 status1.ResourceStatus) string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StatusString", arg0) + ret0, _ := ret[0].(string) + return ret0 +} + +// StatusString indicates an expected call of StatusString +func (mr *MockTaskResourceMockRecorder) StatusString(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatusString", reflect.TypeOf((*MockTaskResource)(nil).StatusString), arg0) +} + +// SteadyState mocks base method +func (m *MockTaskResource) SteadyState() status1.ResourceStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SteadyState") + ret0, _ := ret[0].(status1.ResourceStatus) + return ret0 +} + +// SteadyState indicates an expected call of SteadyState +func (mr *MockTaskResourceMockRecorder) SteadyState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SteadyState", reflect.TypeOf((*MockTaskResource)(nil).SteadyState)) +} + +// TerminalStatus mocks base method +func (m *MockTaskResource) TerminalStatus() status1.ResourceStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TerminalStatus") + ret0, _ := ret[0].(status1.ResourceStatus) + return ret0 +} + +// TerminalStatus indicates an expected call of TerminalStatus +func (mr *MockTaskResourceMockRecorder) TerminalStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TerminalStatus", reflect.TypeOf((*MockTaskResource)(nil).TerminalStatus)) +} + +// UnmarshalJSON mocks base method +func (m *MockTaskResource) UnmarshalJSON(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnmarshalJSON", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnmarshalJSON indicates an expected call of UnmarshalJSON +func (mr *MockTaskResourceMockRecorder) UnmarshalJSON(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnmarshalJSON", reflect.TypeOf((*MockTaskResource)(nil).UnmarshalJSON), arg0) +} diff --git a/agent/taskresource/ssmsecret/ssmsecret.go b/agent/taskresource/ssmsecret/ssmsecret.go new file mode 100644 index 00000000000..7a921832a52 --- /dev/null +++ b/agent/taskresource/ssmsecret/ssmsecret.go @@ -0,0 +1,502 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ssmsecret + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/cihub/seelog" + "github.com/pkg/errors" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + "github.com/aws/amazon-ecs-agent/agent/ssm" + "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +const ( + // ResourceName is the name of the ssmsecret resource + ResourceName = "ssmsecret" + + //MaxBatchNum is the maximum batch number that ssm GetParameters API can accept at one time + MaxBatchNum = 10 +) + +// SSMSecretResource represents secrets as a task resource. +// The secrets are stored in SSM Parameter Store. +type SSMSecretResource struct { + taskARN string + createdAt time.Time + desiredStatusUnsafe resourcestatus.ResourceStatus + knownStatusUnsafe resourcestatus.ResourceStatus + // appliedStatus is the status that has been "applied" (e.g., we've called some + // operation such as 'Create' on the resource) but we don't yet know that the + // application was successful, which may then change the known status. This is + // used while progressing resource states in progressTask() of task manager + appliedStatus resourcestatus.ResourceStatus + resourceStatusToTransitionFunction map[resourcestatus.ResourceStatus]func() error + credentialsManager credentials.Manager + executionCredentialsID string + + // required for store ssm secrets value, key is region of secret + requiredSecrets map[string][]apicontainer.Secret + // map to store secret values, key is a combination of valueFrom and region + secretData map[string]string + + // ssmClientCreator is a factory interface that creates new SSM clients. This is + // needed mostly for testing. + ssmClientCreator factory.SSMClientCreator + + // terminalReason should be set for resource creation failures. This ensures + // the resource object carries some context for why provisioning failed. + terminalReason string + terminalReasonOnce sync.Once + + // lock is used for fields that are accessed and updated concurrently + lock sync.RWMutex +} + +// NewSSMSecretResource creates a new SSMSecretResource object +func NewSSMSecretResource(taskARN string, + ssmSecrets map[string][]apicontainer.Secret, + executionCredentialsID string, + credentialsManager credentials.Manager, + ssmClientCreator factory.SSMClientCreator) *SSMSecretResource { + + s := &SSMSecretResource{ + taskARN: taskARN, + requiredSecrets: ssmSecrets, + credentialsManager: credentialsManager, + executionCredentialsID: executionCredentialsID, + ssmClientCreator: ssmClientCreator, + } + + s.initStatusToTransition() + return s +} + +func (secret *SSMSecretResource) initStatusToTransition() { + resourceStatusToTransitionFunction := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(SSMSecretCreated): secret.Create, + } + secret.resourceStatusToTransitionFunction = resourceStatusToTransitionFunction +} + +func (secret *SSMSecretResource) setTerminalReason(reason string) { + secret.terminalReasonOnce.Do(func() { + seelog.Infof("ssm secret resource: setting terminal reason for ssm secret resource in task: [%s]", secret.taskARN) + secret.terminalReason = reason + }) +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (secret *SSMSecretResource) GetTerminalReason() string { + return secret.terminalReason +} + +// SetDesiredStatus safely sets the desired status of the resource +func (secret *SSMSecretResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + secret.lock.Lock() + defer secret.lock.Unlock() + + secret.desiredStatusUnsafe = status +} + +// GetDesiredStatus safely returns the desired status of the task +func (secret *SSMSecretResource) GetDesiredStatus() resourcestatus.ResourceStatus { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.desiredStatusUnsafe +} + +// GetName safely returns the name of the resource +func (secret *SSMSecretResource) GetName() string { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return ResourceName +} + +// DesiredTerminal returns true if the secret's desired status is REMOVED +func (secret *SSMSecretResource) DesiredTerminal() bool { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.desiredStatusUnsafe == resourcestatus.ResourceStatus(SSMSecretRemoved) +} + +// KnownCreated returns true if the secret's known status is CREATED +func (secret *SSMSecretResource) KnownCreated() bool { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.knownStatusUnsafe == resourcestatus.ResourceStatus(SSMSecretCreated) +} + +// TerminalStatus returns the last transition state of cgroup +func (secret *SSMSecretResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(SSMSecretRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (secret *SSMSecretResource) NextKnownState() resourcestatus.ResourceStatus { + return secret.GetKnownStatus() + 1 +} + +// ApplyTransition calls the function required to move to the specified status +func (secret *SSMSecretResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + transitionFunc, ok := secret.resourceStatusToTransitionFunction[nextState] + if !ok { + return errors.Errorf("resource [%s]: transition to %s impossible", secret.GetName(), + secret.StatusString(nextState)) + } + return transitionFunc() +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (secret *SSMSecretResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(SSMSecretCreated) +} + +// SetKnownStatus safely sets the currently known status of the resource +func (secret *SSMSecretResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + secret.lock.Lock() + defer secret.lock.Unlock() + + secret.knownStatusUnsafe = status + secret.updateAppliedStatusUnsafe(status) +} + +// updateAppliedStatusUnsafe updates the resource transitioning status +func (secret *SSMSecretResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if secret.appliedStatus == resourcestatus.ResourceStatus(SSMSecretStatusNone) { + return + } + + // Check if the resource transition has already finished + if secret.appliedStatus <= knownStatus { + secret.appliedStatus = resourcestatus.ResourceStatus(SSMSecretStatusNone) + } +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (secret *SSMSecretResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + secret.lock.Lock() + defer secret.lock.Unlock() + + if secret.appliedStatus != resourcestatus.ResourceStatus(SSMSecretStatusNone) { + // return false to indicate the set operation failed + return false + } + + secret.appliedStatus = status + return true +} + +// GetKnownStatus safely returns the currently known status of the task +func (secret *SSMSecretResource) GetKnownStatus() resourcestatus.ResourceStatus { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.knownStatusUnsafe +} + +// StatusString returns the string of the cgroup resource status +func (secret *SSMSecretResource) StatusString(status resourcestatus.ResourceStatus) string { + return SSMSecretStatus(status).String() +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (secret *SSMSecretResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + secret.lock.Lock() + defer secret.lock.Unlock() + + secret.createdAt = createdAt +} + +// GetCreatedAt sets the timestamp for resource's creation time +func (secret *SSMSecretResource) GetCreatedAt() time.Time { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.createdAt +} + +// Create fetches secret value from SSM in batches. It spins up multiple goroutines in order to +// retrieve values in parallel. +func (secret *SSMSecretResource) Create() error { + + // To fail fast, check execution role first + executionCredentials, ok := secret.credentialsManager.GetTaskCredentials(secret.getExecutionCredentialsID()) + if !ok { + // No need to log here. managedTask.applyResourceState already does that + err := errors.New("ssm secret resource: unable to find execution role credentials") + secret.setTerminalReason(err.Error()) + return err + } + iamCredentials := executionCredentials.GetIAMRoleCredentials() + + var wg sync.WaitGroup + + // Get the maximum number of errors can be returned, which will be one error per goroutine + chanLen := secret.getGoRoutineMaxNum() + errorEvents := make(chan error, chanLen) + + seelog.Infof("ssm secret resource: retrieving secrets for containers in task: [%s]", secret.taskARN) + secret.secretData = make(map[string]string) + + for region, secrets := range secret.getRequiredSecrets() { + wg.Add(1) + // Spin up goroutine each region to speed up processing time + go secret.retrieveSSMSecretValuesByRegion(region, secrets, iamCredentials, &wg, errorEvents) + } + + wg.Wait() + + // Get the first error returned and set as terminal reason + select { + case err := <-errorEvents: + secret.setTerminalReason(err.Error()) + return err + default: + return nil + } +} + +// getGoRoutineMaxNum calculates the maximum number of goroutines that we need to spin up +// to retrieve secret values from SSM parameter store. Assume each goroutine initiates one +// SSM GetParameters call and each call will have 10 parameters +func (secret *SSMSecretResource) getGoRoutineMaxNum() int { + total := 0 + for _, secrets := range secret.requiredSecrets { + total += len(secrets)/MaxBatchNum + 1 + } + return total +} + +// retrieveSSMSecretValuesByRegion reads secret values from cache first, if not exists, batches secrets based on field +// valueFrom and call retrieveSSMSecretValues to retrieve values from SSM +func (secret *SSMSecretResource) retrieveSSMSecretValuesByRegion(region string, secrets []apicontainer.Secret, iamCredentials credentials.IAMRoleCredentials, wg *sync.WaitGroup, errorEvents chan error) { + seelog.Infof("ssm secret resource: retrieving secrets for region %s in task: [%s]", region, secret.taskARN) + defer wg.Done() + + var wgPerRegion sync.WaitGroup + var secretNames []string + + for _, s := range secrets { + secretKey := s.GetSecretResourceCacheKey() + if _, ok := secret.GetCachedSecretValue(secretKey); ok { + continue + } + secretNames = append(secretNames, s.ValueFrom) + if len(secretNames) == MaxBatchNum { + secretNamesTmp := make([]string, MaxBatchNum) + copy(secretNamesTmp, secretNames) + wgPerRegion.Add(1) + go secret.retrieveSSMSecretValues(region, secretNamesTmp, iamCredentials, &wgPerRegion, errorEvents) + secretNames = []string{} + } + } + + if len(secretNames) > 0 { + wgPerRegion.Add(1) + go secret.retrieveSSMSecretValues(region, secretNames, iamCredentials, &wgPerRegion, errorEvents) + } + wgPerRegion.Wait() +} + +// retrieveSSMSecretValues retrieves secret values from SSM parameter store and caches them into memory +func (secret *SSMSecretResource) retrieveSSMSecretValues(region string, names []string, iamCredentials credentials.IAMRoleCredentials, wg *sync.WaitGroup, errorEvents chan error) { + defer wg.Done() + + ssmClient := secret.ssmClientCreator.NewSSMClient(region, iamCredentials) + seelog.Infof("ssm secret resource: retrieving resource for secrets %v in region [%s] in task: [%s]", names, region, secret.taskARN) + secValueMap, err := ssm.GetSecretsFromSSM(names, ssmClient) + if err != nil { + errorEvents <- fmt.Errorf("fetching secret data from SSM Parameter Store in %s: %v", region, err) + return + } + + secret.lock.Lock() + defer secret.lock.Unlock() + + // put secret value in secretData + for secretName, secretValue := range secValueMap { + secretKey := secretName + "_" + region + secret.secretData[secretKey] = secretValue + } +} + +// getRequiredSecrets returns the requiredSecrets field of ssmsecret task resource +func (secret *SSMSecretResource) getRequiredSecrets() map[string][]apicontainer.Secret { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.requiredSecrets +} + +// getExecutionCredentialsID returns the execution role's credential ID +func (secret *SSMSecretResource) getExecutionCredentialsID() string { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.executionCredentialsID +} + +// Cleanup removes the secret value created for the task +func (secret *SSMSecretResource) Cleanup() error { + secret.clearSSMSecretValue() + return nil +} + +// clearSSMSecretValue cycles through the collection of secret value data and +// removes them from the task +func (secret *SSMSecretResource) clearSSMSecretValue() { + secret.lock.Lock() + defer secret.lock.Unlock() + + for key := range secret.secretData { + delete(secret.secretData, key) + } +} + +// GetCachedSecretValue retrieves the secret value from secretData field +func (secret *SSMSecretResource) GetCachedSecretValue(secretKey string) (string, bool) { + secret.lock.RLock() + defer secret.lock.RUnlock() + + s, ok := secret.secretData[secretKey] + return s, ok +} + +// SetCachedSecretValue set the secret value in the secretData field given the key and value +func (secret *SSMSecretResource) SetCachedSecretValue(secretKey string, secretValue string) { + secret.lock.Lock() + defer secret.lock.Unlock() + + if secret.secretData == nil { + secret.secretData = make(map[string]string) + } + + secret.secretData[secretKey] = secretValue +} + +func (secret *SSMSecretResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { + secret.initStatusToTransition() + secret.credentialsManager = resourceFields.CredentialsManager + secret.ssmClientCreator = resourceFields.SSMClientCreator + + // if task hasn't turn to 'created' status, and it's desire status is 'running' + // the resource status needs to be reset to 'NONE' status so the secret value + // will be retrieved again + if taskKnownStatus < status.TaskCreated && + taskDesiredStatus <= status.TaskRunning { + secret.SetKnownStatus(resourcestatus.ResourceStatusNone) + } +} + +type SSMSecretResourceJSON struct { + TaskARN string `json:"taskARN"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + DesiredStatus *SSMSecretStatus `json:"desiredStatus"` + KnownStatus *SSMSecretStatus `json:"knownStatus"` + RequiredSecrets map[string][]apicontainer.Secret `json:"secretResources"` + ExecutionCredentialsID string `json:"executionCredentialsID"` +} + +// MarshalJSON serialises the SSMSecretResource struct to JSON +func (secret *SSMSecretResource) MarshalJSON() ([]byte, error) { + if secret == nil { + return nil, errors.New("ssmsecret resource is nil") + } + createdAt := secret.GetCreatedAt() + return json.Marshal(SSMSecretResourceJSON{ + TaskARN: secret.taskARN, + CreatedAt: &createdAt, + DesiredStatus: func() *SSMSecretStatus { + desiredState := secret.GetDesiredStatus() + s := SSMSecretStatus(desiredState) + return &s + }(), + KnownStatus: func() *SSMSecretStatus { + knownState := secret.GetKnownStatus() + s := SSMSecretStatus(knownState) + return &s + }(), + RequiredSecrets: secret.getRequiredSecrets(), + ExecutionCredentialsID: secret.getExecutionCredentialsID(), + }) +} + +// UnmarshalJSON deserialises the raw JSON to a SSMSecretResource struct +func (secret *SSMSecretResource) UnmarshalJSON(b []byte) error { + temp := SSMSecretResourceJSON{} + + if err := json.Unmarshal(b, &temp); err != nil { + return err + } + + if temp.DesiredStatus != nil { + secret.SetDesiredStatus(resourcestatus.ResourceStatus(*temp.DesiredStatus)) + } + if temp.KnownStatus != nil { + secret.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) + } + if temp.CreatedAt != nil && !temp.CreatedAt.IsZero() { + secret.SetCreatedAt(*temp.CreatedAt) + } + if temp.RequiredSecrets != nil { + secret.requiredSecrets = temp.RequiredSecrets + } + secret.taskARN = temp.TaskARN + secret.executionCredentialsID = temp.ExecutionCredentialsID + + return nil +} + +// GetAppliedStatus safely returns the currently applied status of the resource +func (secret *SSMSecretResource) GetAppliedStatus() resourcestatus.ResourceStatus { + secret.lock.RLock() + defer secret.lock.RUnlock() + + return secret.appliedStatus +} + +func (secret *SSMSecretResource) DependOnTaskNetwork() bool { + return false +} + +func (secret *SSMSecretResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { +} + +func (secret *SSMSecretResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + return nil +} diff --git a/agent/taskresource/ssmsecret/ssmsecret_test.go b/agent/taskresource/ssmsecret/ssmsecret_test.go new file mode 100644 index 00000000000..8f3156c77fc --- /dev/null +++ b/agent/taskresource/ssmsecret/ssmsecret_test.go @@ -0,0 +1,520 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ssmsecret + +import ( + "encoding/json" + "strconv" + "testing" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/credentials" + mock_credentials "github.com/aws/amazon-ecs-agent/agent/credentials/mocks" + mock_factory "github.com/aws/amazon-ecs-agent/agent/ssm/factory/mocks" + mock_ssm "github.com/aws/amazon-ecs-agent/agent/ssm/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + executionCredentialsID = "exec-creds-id" + region1 = "us-west-2" + region2 = "us-east-1" + secretName1 = "db_username_1" + secretName2 = "db_username_2" + valueFrom1 = "secret-name" + valueFrom2 = "secret-name-2" + valueFromARN = "arn:aws:ssm:us-west-2:123456:parameter/secret-name" + secretKeyWest1 = "secret-name_us-west-2" + secretKeyWest2 = "secret-name-2_us-west-2" + secretKeyEast1 = "secret-name_us-east-1" + secretValue = "secret-value" + taskARN = "task1" +) + +func TestCreateAndGetWithOneCall(t *testing.T) { + requiredSecretData := make(map[string][]apicontainer.Secret) + secretsInRegion1 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region1, + Provider: "ssm", + }, + { + Name: secretName2, + ValueFrom: valueFrom2, + Region: region1, + Provider: "ssm", + }, + } + + requiredSecretData[region1] = secretsInRegion1 + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + mockSSMClient := mock_ssm.NewMockSSMClient(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + ssmOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String(valueFrom1), + Value: aws.String(secretValue), + }, + &ssm.Parameter{ + Name: aws.String(valueFrom2), + Value: aws.String(secretValue), + }, + }, + } + + allNames := []*string{aws.String(valueFrom1), aws.String(valueFrom2)} + + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + ssmClientCreator.EXPECT().NewSSMClient(region1, iamRoleCreds).Return(mockSSMClient) + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Do(func(in *ssm.GetParametersInput) { + assert.Equal(t, in.Names, allNames) + }).Return(ssmOutput, nil).Times(1) + + ssmRes := &SSMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + ssmClientCreator: ssmClientCreator, + } + require.NoError(t, ssmRes.Create()) + + value1, ok := ssmRes.GetCachedSecretValue(secretKeyWest1) + require.True(t, ok) + assert.Equal(t, secretValue, value1) + + value2, ok := ssmRes.GetCachedSecretValue(secretKeyWest2) + require.True(t, ok) + assert.Equal(t, secretValue, value2) +} + +func TestCreateAndGetWithTwoCallsAcrossRegions(t *testing.T) { + requiredSecretData := make(map[string][]apicontainer.Secret) + secretsInRegion1 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region1, + Provider: "ssm", + }, + } + secretsInRegion2 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region2, + Provider: "ssm", + }, + } + requiredSecretData[region1] = secretsInRegion1 + requiredSecretData[region2] = secretsInRegion2 + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + mockSSMClient := mock_ssm.NewMockSSMClient(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + ssmOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: []*ssm.Parameter{ + &ssm.Parameter{ + Name: aws.String(valueFrom1), + Value: aws.String(secretValue), + }, + }, + } + + allNames := []*string{aws.String(valueFrom1)} + + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + ssmClientCreator.EXPECT().NewSSMClient(region1, iamRoleCreds).Return(mockSSMClient) + ssmClientCreator.EXPECT().NewSSMClient(region2, iamRoleCreds).Return(mockSSMClient) + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Do(func(in *ssm.GetParametersInput) { + assert.Equal(t, in.Names, allNames) + }).Return(ssmOutput, nil).Times(2) + + ssmRes := &SSMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + ssmClientCreator: ssmClientCreator, + } + require.NoError(t, ssmRes.Create()) + + value1, ok := ssmRes.GetCachedSecretValue(secretKeyWest1) + require.True(t, ok) + assert.Equal(t, secretValue, value1) + + value2, ok := ssmRes.GetCachedSecretValue(secretKeyEast1) + require.True(t, ok) + assert.Equal(t, secretValue, value2) +} + +func TestCreateAndGetWithTwoCallsInSameRegion(t *testing.T) { + requiredSecretData := make(map[string][]apicontainer.Secret) + var secrets []apicontainer.Secret + for i := 1; i <= 12; i++ { + num := strconv.Itoa(i) + secret := apicontainer.Secret{ + Name: "db_username_" + num, + ValueFrom: "secret-name-" + num, + Region: region1, + Provider: "ssm", + } + secrets = append(secrets, secret) + } + + requiredSecretData[region1] = secrets + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + mockSSMClient := mock_ssm.NewMockSSMClient(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + var params1, params2 []*ssm.Parameter + var paramsInput1, paramsInput2 []*string + var paramsValue1, paramsValue2 []string + + for i := 1; i <= 10; i++ { + num := strconv.Itoa(i) + param := &ssm.Parameter{ + Name: aws.String("secret-name-" + num), + Value: aws.String("secret-value-" + num), + } + params1 = append(params1, param) + paramsInput1 = append(paramsInput1, aws.String("secret-name-"+num)) + paramsValue1 = append(paramsValue1, "secret-value-"+num) + } + + ssmOutput1 := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: params1, + } + + ssmInput1 := &ssm.GetParametersInput{ + Names: paramsInput1, + WithDecryption: aws.Bool(true), + } + + for i := 11; i <= 12; i++ { + num := strconv.Itoa(i) + param := &ssm.Parameter{ + Name: aws.String("secret-name-" + num), + Value: aws.String("secret-value-" + num), + } + params2 = append(params2, param) + paramsInput2 = append(paramsInput2, aws.String("secret-name-"+num)) + paramsValue2 = append(paramsValue2, "secret-value-"+num) + } + ssmOutput2 := &ssm.GetParametersOutput{ + InvalidParameters: []*string{}, + Parameters: params2, + } + + ssmInput2 := &ssm.GetParametersInput{ + Names: paramsInput2, + WithDecryption: aws.Bool(true), + } + + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + ssmClientCreator.EXPECT().NewSSMClient(region1, iamRoleCreds).Return(mockSSMClient).Times(2) + mockSSMClient.EXPECT().GetParameters(ssmInput1).Return(ssmOutput1, nil) + mockSSMClient.EXPECT().GetParameters(ssmInput2).Return(ssmOutput2, nil) + + ssmRes := &SSMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + ssmClientCreator: ssmClientCreator, + } + require.NoError(t, ssmRes.Create()) + + for i := 1; i <= 12; i++ { + num := strconv.Itoa(i) + value, ok := ssmRes.GetCachedSecretValue("secret-name-" + num + "_" + region1) + require.True(t, ok) + assert.Equal(t, "secret-value-"+num, value) + } +} + +func TestCreateReturnMultipleErrors(t *testing.T) { + requiredSecretData := make(map[string][]apicontainer.Secret) + secretsInRegion1 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region1, + Provider: "ssm", + }, + } + secretsInRegion2 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region2, + Provider: "ssm", + }, + } + requiredSecretData[region1] = secretsInRegion1 + requiredSecretData[region2] = secretsInRegion2 + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + mockSSMClient := mock_ssm.NewMockSSMClient(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + ssmOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{aws.String(valueFrom1)}, + Parameters: []*ssm.Parameter{}, + } + + allNames := []*string{aws.String(valueFrom1)} + + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true) + ssmClientCreator.EXPECT().NewSSMClient(region1, iamRoleCreds).Return(mockSSMClient) + ssmClientCreator.EXPECT().NewSSMClient(region2, iamRoleCreds).Return(mockSSMClient) + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Do(func(in *ssm.GetParametersInput) { + assert.Equal(t, in.Names, allNames) + }).Return(ssmOutput, nil).Times(2) + + ssmRes := &SSMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + ssmClientCreator: ssmClientCreator, + } + + assert.Error(t, ssmRes.Create()) + expectedError := "invalid parameters: secret-name" + assert.Contains(t, ssmRes.GetTerminalReason(), expectedError) +} + +func TestCreateReturnError(t *testing.T) { + requiredSecretData := make(map[string][]apicontainer.Secret) + secretsInRegion1 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region1, + Provider: "ssm", + }, + } + requiredSecretData[region1] = secretsInRegion1 + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + mockSSMClient := mock_ssm.NewMockSSMClient(ctrl) + + iamRoleCreds := credentials.IAMRoleCredentials{} + creds := credentials.TaskIAMRoleCredentials{ + IAMRoleCredentials: iamRoleCreds, + } + + ssmOutput := &ssm.GetParametersOutput{ + InvalidParameters: []*string{aws.String(valueFrom1)}, + Parameters: []*ssm.Parameter{}, + } + + allNames := []*string{aws.String(valueFrom1)} + gomock.InOrder( + credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true), + ssmClientCreator.EXPECT().NewSSMClient(region1, iamRoleCreds).Return(mockSSMClient), + mockSSMClient.EXPECT().GetParameters(gomock.Any()).Do(func(in *ssm.GetParametersInput) { + assert.Equal(t, in.Names, allNames) + }).Return(ssmOutput, nil), + ) + ssmRes := &SSMSecretResource{ + executionCredentialsID: executionCredentialsID, + requiredSecrets: requiredSecretData, + credentialsManager: credentialsManager, + ssmClientCreator: ssmClientCreator, + } + + assert.Error(t, ssmRes.Create()) + expectedError := "fetching secret data from SSM Parameter Store in us-west-2: invalid parameters: secret-name" + assert.Equal(t, expectedError, ssmRes.GetTerminalReason()) +} + +func TestGetGoRoutineMaxNumTwoRegions(t *testing.T) { + requiredSecretData := make(map[string][]apicontainer.Secret) + secretsInRegion1 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region1, + Provider: "ssm", + }, + } + secretsInRegion2 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region2, + Provider: "ssm", + }, + } + requiredSecretData[region1] = secretsInRegion1 + requiredSecretData[region2] = secretsInRegion2 + + ssmRes := &SSMSecretResource{ + requiredSecrets: requiredSecretData, + } + + number := ssmRes.getGoRoutineMaxNum() + assert.Equal(t, 2, number) +} + +func TestGetGoRoutineMaxNumOneRegion(t *testing.T) { + requiredSecretData := make(map[string][]apicontainer.Secret) + secretsInRegion1 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region1, + Provider: "ssm", + }, + { + Name: secretName2, + ValueFrom: valueFrom1, + Region: region1, + Provider: "ssm", + }, + } + + requiredSecretData[region1] = secretsInRegion1 + + ssmRes := &SSMSecretResource{ + requiredSecrets: requiredSecretData, + } + + number := ssmRes.getGoRoutineMaxNum() + assert.Equal(t, 1, number) +} + +func TestMarshalUnmarshalJSON(t *testing.T) { + requiredSecretData := make(map[string][]apicontainer.Secret) + secretsInRegion1 := []apicontainer.Secret{ + { + Name: secretName1, + ValueFrom: valueFrom1, + Region: region1, + Provider: "ssm", + }, + } + requiredSecretData[region1] = secretsInRegion1 + + ssmResIn := &SSMSecretResource{ + taskARN: taskARN, + executionCredentialsID: executionCredentialsID, + createdAt: time.Now(), + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + requiredSecrets: requiredSecretData, + } + + bytes, err := json.Marshal(ssmResIn) + require.NoError(t, err) + + ssmResOut := &SSMSecretResource{} + err = json.Unmarshal(bytes, ssmResOut) + require.NoError(t, err) + assert.Equal(t, ssmResIn.taskARN, ssmResOut.taskARN) + assert.WithinDuration(t, ssmResIn.createdAt, ssmResOut.createdAt, time.Microsecond) + assert.Equal(t, ssmResIn.desiredStatusUnsafe, ssmResOut.desiredStatusUnsafe) + assert.Equal(t, ssmResIn.knownStatusUnsafe, ssmResOut.knownStatusUnsafe) + assert.Equal(t, ssmResIn.executionCredentialsID, ssmResOut.executionCredentialsID) + assert.Equal(t, len(ssmResIn.requiredSecrets), len(ssmResOut.requiredSecrets)) + assert.Equal(t, ssmResIn.requiredSecrets[region1], ssmResOut.requiredSecrets[region1]) +} + +func TestInitialize(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + credentialsManager := mock_credentials.NewMockManager(ctrl) + ssmClientCreator := mock_factory.NewMockSSMClientCreator(ctrl) + ssmRes := &SSMSecretResource{ + knownStatusUnsafe: resourcestatus.ResourceCreated, + desiredStatusUnsafe: resourcestatus.ResourceCreated, + } + ssmRes.Initialize(&taskresource.ResourceFields{ + ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ + SSMClientCreator: ssmClientCreator, + CredentialsManager: credentialsManager, + }, + }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning) + assert.Equal(t, resourcestatus.ResourceStatusNone, ssmRes.GetKnownStatus()) + assert.Equal(t, resourcestatus.ResourceCreated, ssmRes.GetDesiredStatus()) + +} + +func TestClearSSMSecretValue(t *testing.T) { + secretValues := map[string]string{ + "db_name": "db_value", + "secret": "secret_value", + } + + ssmRes := &SSMSecretResource{ + secretData: secretValues, + } + ssmRes.clearSSMSecretValue() + assert.Equal(t, 0, len(ssmRes.secretData)) +} diff --git a/agent/taskresource/ssmsecret/ssmsecretstatus.go b/agent/taskresource/ssmsecret/ssmsecretstatus.go new file mode 100644 index 00000000000..2c10ef478cf --- /dev/null +++ b/agent/taskresource/ssmsecret/ssmsecretstatus.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ssmsecret + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +type SSMSecretStatus resourcestatus.ResourceStatus + +const ( + // is the zero state of a task resource + SSMSecretStatusNone SSMSecretStatus = iota + // represents a task resource which has been created + SSMSecretCreated + // represents a task resource which has been cleaned up + SSMSecretRemoved +) + +var ssmSecretStatusMap = map[string]SSMSecretStatus{ + "NONE": SSMSecretStatusNone, + "CREATED": SSMSecretCreated, + "REMOVED": SSMSecretRemoved, +} + +// StatusString returns a human readable string representation of this object +func (as SSMSecretStatus) String() string { + for k, v := range ssmSecretStatusMap { + if v == as { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type +func (as *SSMSecretStatus) MarshalJSON() ([]byte, error) { + if as == nil { + return nil, errors.New("ssmsecret resource status is nil") + } + return []byte(`"` + as.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data +func (as *SSMSecretStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *as = SSMSecretStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *as = SSMSecretStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := ssmSecretStatusMap[string(strStatus)] + if !ok { + *as = SSMSecretStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *as = stat + return nil +} diff --git a/agent/taskresource/ssmsecret/ssmsecretstatus_test.go b/agent/taskresource/ssmsecret/ssmsecretstatus_test.go new file mode 100644 index 00000000000..c1b54e74336 --- /dev/null +++ b/agent/taskresource/ssmsecret/ssmsecretstatus_test.go @@ -0,0 +1,157 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ssmsecret + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusString(t *testing.T) { + cases := []struct { + Name string + InSSMSecretStatus SSMSecretStatus + OutSSMSecretStatus string + }{ + { + Name: "ToStringSSMSecretStatusNone", + InSSMSecretStatus: SSMSecretStatusNone, + OutSSMSecretStatus: "NONE", + }, + { + Name: "ToStringSSMSecretCreated", + InSSMSecretStatus: SSMSecretCreated, + OutSSMSecretStatus: "CREATED", + }, + { + Name: "ToStringSSMSecretRemoved", + InSSMSecretStatus: SSMSecretRemoved, + OutSSMSecretStatus: "REMOVED", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + assert.Equal(t, c.OutSSMSecretStatus, c.InSSMSecretStatus.String()) + }) + } +} + +func TestMarshalNilSSMSecretStatus(t *testing.T) { + var status *SSMSecretStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Error(t, err) +} + +func TestMarshalSSMSecretStatus(t *testing.T) { + cases := []struct { + Name string + InSSMSecretStatus SSMSecretStatus + OutSSMSecretStatus string + }{ + { + Name: "MarshallSSMSecretStatusNone", + InSSMSecretStatus: SSMSecretStatusNone, + OutSSMSecretStatus: "\"NONE\"", + }, + { + Name: "MarshallSSMSecretCreated", + InSSMSecretStatus: SSMSecretCreated, + OutSSMSecretStatus: "\"CREATED\"", + }, + { + Name: "MarshallSSMSecretRemoved", + InSSMSecretStatus: SSMSecretRemoved, + OutSSMSecretStatus: "\"REMOVED\"", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + bytes, err := c.InSSMSecretStatus.MarshalJSON() + + assert.NoError(t, err) + assert.Equal(t, c.OutSSMSecretStatus, string(bytes[:])) + }) + } + +} + +func TestUnmarshalSSMSecretStatus(t *testing.T) { + cases := []struct { + Name string + InSSMSecretStatus string + OutSSMSecretStatus SSMSecretStatus + ShouldError bool + }{ + { + Name: "UnmarshallSSMSecretStatusNone", + InSSMSecretStatus: "\"NONE\"", + OutSSMSecretStatus: SSMSecretStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallSSMSecretCreated", + InSSMSecretStatus: "\"CREATED\"", + OutSSMSecretStatus: SSMSecretCreated, + ShouldError: false, + }, + { + Name: "UnmarshallSSMSecretRemoved", + InSSMSecretStatus: "\"REMOVED\"", + OutSSMSecretStatus: SSMSecretRemoved, + ShouldError: false, + }, + { + Name: "UnmarshallSSMSecretStatusNull", + InSSMSecretStatus: "null", + OutSSMSecretStatus: SSMSecretStatusNone, + ShouldError: false, + }, + { + Name: "UnmarshallSSMSecretStatusNonString", + InSSMSecretStatus: "1", + OutSSMSecretStatus: SSMSecretStatusNone, + ShouldError: true, + }, + { + Name: "UnmarshallSSMSecretStatusUnmappedStatus", + InSSMSecretStatus: "\"LOL\"", + OutSSMSecretStatus: SSMSecretStatusNone, + ShouldError: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + + var status SSMSecretStatus + err := json.Unmarshal([]byte(c.InSSMSecretStatus), &status) + + if c.ShouldError { + assert.Error(t, err) + } else { + + assert.NoError(t, err) + assert.Equal(t, c.OutSSMSecretStatus, status) + } + }) + } +} diff --git a/agent/taskresource/status/status.go b/agent/taskresource/status/status.go new file mode 100644 index 00000000000..79c4f74ce7f --- /dev/null +++ b/agent/taskresource/status/status.go @@ -0,0 +1,26 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// ResourceStatus is an enumeration of valid states of task resource lifecycle + +package status + +type ResourceStatus int32 + +const ( + // ResourceStatusNone is the zero state of a task resource + ResourceStatusNone ResourceStatus = iota + // ResourceCreated represents state where task resource has been created + ResourceCreated + // ResourceRemoved represents state where task resource has been cleaned up + ResourceRemoved +) diff --git a/agent/taskresource/types/types.go b/agent/taskresource/types/types.go new file mode 100644 index 00000000000..b1025d0dd39 --- /dev/null +++ b/agent/taskresource/types/types.go @@ -0,0 +1,258 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package types + +import ( + "encoding/json" + "errors" + + "github.com/aws/amazon-ecs-agent/agent/taskresource" + asmauthres "github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth" + asmsecretres "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret" + cgroupres "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup" + "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" + "github.com/aws/amazon-ecs-agent/agent/taskresource/envFiles" + "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" + "github.com/aws/amazon-ecs-agent/agent/taskresource/fsxwindowsfileserver" + ssmsecretres "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret" + "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" +) + +const ( + // CgroupKey is the string used in resources map to represent cgroup resource + CgroupKey = "cgroup" + // DockerVolumeKey is the string used in resources map to represent docker volume + DockerVolumeKey = "dockerVolume" + // ASMAuthKey is the string used in resources map to represent asm auth + ASMAuthKey = asmauthres.ResourceName + // SSMSecretKey is the string used in resources map to represent ssm secret + SSMSecretKey = ssmsecretres.ResourceName + // ASMSecretKey is the string used in resources map to represent asm secret + ASMSecretKey = asmsecretres.ResourceName + // FirelensKey is the string used in resources map to represent firelens resource + FirelensKey = firelens.ResourceName + // CredentialSpecKey is the string used in resources map to represent credentialspec resource + CredentialSpecKey = credentialspec.ResourceName + //EnvironmentFilesKey is the string used in resources map to represent environmentfiles resource + EnvironmentFilesKey = envFiles.ResourceName + // FSxWindowsFileServerKey is the string used in resources map to represent fsxwindowsfileserver resource + FSxWindowsFileServerKey = fsxwindowsfileserver.ResourceName +) + +// ResourcesMap represents the map of resource type to the corresponding resource +// objects +type ResourcesMap map[string][]taskresource.TaskResource + +// UnmarshalJSON unmarshals ResourcesMap object +func (rm *ResourcesMap) UnmarshalJSON(data []byte) error { + resources := make(map[string]json.RawMessage) + err := json.Unmarshal(data, &resources) + if err != nil { + return err + } + result := make(map[string][]taskresource.TaskResource) + for key, value := range resources { + if err := unmarshalResource(key, value, result); err != nil { + return err + } + } + *rm = result + return nil +} + +func unmarshalResource(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + switch key { + case CgroupKey: + return unmarshlCgroup(key, value, result) + case DockerVolumeKey: + return unmarshalDockerVolume(key, value, result) + case ASMAuthKey: + return unmarshalASMAuthKey(key, value, result) + case SSMSecretKey: + return unmarshalSSMSecretKey(key, value, result) + case ASMSecretKey: + return unmarshalASMSecretKey(key, value, result) + case FirelensKey: + return unmarshalFirelensKey(key, value, result) + case CredentialSpecKey: + return unmarshalCredentialSpecKey(key, value, result) + case EnvironmentFilesKey: + return unmarshalEnvironmentFilesKey(key, value, result) + case FSxWindowsFileServerKey: + return unmarshalFSxWindowsFileServerKey(key, value, result) + default: + return errors.New("Unsupported resource type") + } +} + +func unmarshlCgroup(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var cgroups []json.RawMessage + err := json.Unmarshal(value, &cgroups) + if err != nil { + return err + } + for _, c := range cgroups { + cgroup := &cgroupres.CgroupResource{} + err := cgroup.UnmarshalJSON(c) + if err != nil { + return err + } + result[key] = append(result[key], cgroup) + } + return nil +} + +func unmarshalDockerVolume(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var volumes []json.RawMessage + err := json.Unmarshal(value, &volumes) + if err != nil { + return err + } + for _, vol := range volumes { + dockerVolume := &volume.VolumeResource{} + err := dockerVolume.UnmarshalJSON(vol) + if err != nil { + return err + } + result[key] = append(result[key], dockerVolume) + } + return nil +} + +func unmarshalASMAuthKey(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var asmauths []json.RawMessage + err := json.Unmarshal(value, &asmauths) + if err != nil { + return err + } + + for _, a := range asmauths { + auth := &asmauthres.ASMAuthResource{} + err := auth.UnmarshalJSON(a) + if err != nil { + return err + } + result[key] = append(result[key], auth) + } + return nil +} + +func unmarshalSSMSecretKey(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var ssmsecrets []json.RawMessage + err := json.Unmarshal(value, &ssmsecrets) + if err != nil { + return err + } + + for _, secret := range ssmsecrets { + res := &ssmsecretres.SSMSecretResource{} + err := res.UnmarshalJSON(secret) + if err != nil { + return err + } + result[key] = append(result[key], res) + } + return nil +} + +func unmarshalASMSecretKey(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var asmsecrets []json.RawMessage + err := json.Unmarshal(value, &asmsecrets) + if err != nil { + return err + } + + for _, secret := range asmsecrets { + res := &asmsecretres.ASMSecretResource{} + err := res.UnmarshalJSON(secret) + if err != nil { + return err + } + result[key] = append(result[key], res) + } + return nil +} + +func unmarshalFirelensKey(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var firelensResources []json.RawMessage + err := json.Unmarshal(value, &firelensResources) + if err != nil { + return err + } + + for _, firelensResource := range firelensResources { + res := &firelens.FirelensResource{} + err := res.UnmarshalJSON(firelensResource) + if err != nil { + return err + } + + result[key] = append(result[key], res) + } + return nil +} + +func unmarshalCredentialSpecKey(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var credentialSpecs []json.RawMessage + err := json.Unmarshal(value, &credentialSpecs) + if err != nil { + return err + } + + for _, credSpec := range credentialSpecs { + res := &credentialspec.CredentialSpecResource{} + err := res.UnmarshalJSON(credSpec) + if err != nil { + return err + } + result[key] = append(result[key], res) + } + return nil +} + +func unmarshalEnvironmentFilesKey(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var environmentFiles []json.RawMessage + err := json.Unmarshal(value, &environmentFiles) + if err != nil { + return err + } + + for _, environmentFile := range environmentFiles { + res := &envFiles.EnvironmentFileResource{} + err := res.UnmarshalJSON(environmentFile) + if err != nil { + return err + } + result[key] = append(result[key], res) + } + return nil +} + +func unmarshalFSxWindowsFileServerKey(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error { + var fsxWindowsFileServers []json.RawMessage + err := json.Unmarshal(value, &fsxWindowsFileServers) + if err != nil { + return err + } + + for _, fsxWindowsFileServer := range fsxWindowsFileServers { + res := &fsxwindowsfileserver.FSxWindowsFileServerResource{} + err := res.UnmarshalJSON(fsxWindowsFileServer) + if err != nil { + return err + } + result[key] = append(result[key], res) + } + return nil +} diff --git a/agent/taskresource/types/types_linux_test.go b/agent/taskresource/types/types_linux_test.go new file mode 100644 index 00000000000..5d9ae7a289c --- /dev/null +++ b/agent/taskresource/types/types_linux_test.go @@ -0,0 +1,67 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package types + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aws/amazon-ecs-agent/agent/taskresource" + cgroupres "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup" + "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +func TestUnmarshalResourcesMap(t *testing.T) { + cgroupRoot := "/ecs/taskid" + cgroupMountPath := "/sys/fs/cgroup" + bytes := []byte(`{"cgroup":[{"CgroupRoot":"/ecs/taskid","CgroupMountPath":"/sys/fs/cgroup","CreatedAt":"0001-01-01T00:00:00Z","DesiredStatus":"REMOVED","KnownStatus":"REMOVED"}]}`) + unmarshalledMap := make(ResourcesMap) + err := unmarshalledMap.UnmarshalJSON(bytes) + assert.NoError(t, err) + var cgroupResource *cgroupres.CgroupResource + cgroupResource = unmarshalledMap["cgroup"][0].(*cgroupres.CgroupResource) + assert.Equal(t, cgroupRoot, cgroupResource.GetCgroupRoot()) + assert.Equal(t, cgroupMountPath, cgroupResource.GetCgroupMountPath()) + assert.Equal(t, time.Time{}, cgroupResource.GetCreatedAt()) + assert.Equal(t, resourcestatus.ResourceStatus(cgroupres.CgroupRemoved), cgroupResource.GetDesiredStatus()) + assert.Equal(t, resourcestatus.ResourceStatus(cgroupres.CgroupRemoved), cgroupResource.GetKnownStatus()) +} + +func TestMarshalUnmarshalFirelensResource(t *testing.T) { + resources := make(map[string][]taskresource.TaskResource) + firelensResources := []taskresource.TaskResource{ + &firelens.FirelensResource{}, + } + firelensResources[0].SetDesiredStatus(resourcestatus.ResourceCreated) + firelensResources[0].SetKnownStatus(resourcestatus.ResourceStatusNone) + + resources["firelens"] = firelensResources + data, err := json.Marshal(resources) + require.NoError(t, err) + + var unmarshalledResource ResourcesMap + err = json.Unmarshal(data, &unmarshalledResource) + assert.NoError(t, err) + unMarshalledFirelensResources, ok := unmarshalledResource["firelens"] + assert.True(t, ok) + assert.Equal(t, unMarshalledFirelensResources[0].GetDesiredStatus(), resourcestatus.ResourceCreated) + assert.Equal(t, unMarshalledFirelensResources[0].GetKnownStatus(), resourcestatus.ResourceStatusNone) +} diff --git a/agent/taskresource/types/types_test.go b/agent/taskresource/types/types_test.go new file mode 100644 index 00000000000..51c4b56d5dd --- /dev/null +++ b/agent/taskresource/types/types_test.go @@ -0,0 +1,108 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package types + +import ( + "encoding/json" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret" + "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/aws/amazon-ecs-agent/agent/taskresource/volume" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + secretKeyWest1 = "/test/secretName_us-west-2" + asmSecretKeyWest1 = "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName_us-west-2" +) + +func TestMarshalUnmarshalVolumeResource(t *testing.T) { + resources := make(map[string][]taskresource.TaskResource) + + volumes := []taskresource.TaskResource{ + &volume.VolumeResource{ + Name: "test-volume", + VolumeConfig: volume.DockerVolumeConfig{ + DockerVolumeName: "test-volume-docker", + Scope: "task", + Autoprovision: true, + Driver: "local", + }, + }, + } + volumes[0].SetDesiredStatus(resourcestatus.ResourceCreated) + volumes[0].SetKnownStatus(resourcestatus.ResourceStatusNone) + + resources["dockerVolume"] = volumes + data, err := json.Marshal(resources) + require.NoError(t, err) + + var unMarshalledResource ResourcesMap + err = json.Unmarshal(data, &unMarshalledResource) + assert.NoError(t, err, "unmarshal volume resource from data failed") + unMarshalledVolumes, ok := unMarshalledResource["dockerVolume"] + assert.True(t, ok, "volume resource not found in the resource map") + assert.Equal(t, unMarshalledVolumes[0].GetName(), "test-volume") + assert.Equal(t, unMarshalledVolumes[0].GetDesiredStatus(), resourcestatus.ResourceCreated) + assert.Equal(t, unMarshalledVolumes[0].GetKnownStatus(), resourcestatus.ResourceStatusNone) +} + +func TestMarshalUnmarshalSSMSecretResource(t *testing.T) { + resources := make(map[string][]taskresource.TaskResource) + ssmSecrets := []taskresource.TaskResource{ + &ssmsecret.SSMSecretResource{}, + } + + ssmSecrets[0].SetDesiredStatus(resourcestatus.ResourceCreated) + ssmSecrets[0].SetKnownStatus(resourcestatus.ResourceStatusNone) + + resources["ssmsecret"] = ssmSecrets + data, err := json.Marshal(resources) + require.NoError(t, err) + + var unMarshalledResource ResourcesMap + err = json.Unmarshal(data, &unMarshalledResource) + assert.NoError(t, err) + unMarshalledSSMSecret, ok := unMarshalledResource["ssmsecret"] + assert.True(t, ok) + assert.Equal(t, unMarshalledSSMSecret[0].GetDesiredStatus(), resourcestatus.ResourceCreated) + assert.Equal(t, unMarshalledSSMSecret[0].GetKnownStatus(), resourcestatus.ResourceStatusNone) +} + +func TestMarshalUnmarshalASMSecretResource(t *testing.T) { + resources := make(map[string][]taskresource.TaskResource) + asmSecrets := []taskresource.TaskResource{ + &asmsecret.ASMSecretResource{}, + } + asmSecrets[0].SetDesiredStatus(resourcestatus.ResourceCreated) + asmSecrets[0].SetKnownStatus(resourcestatus.ResourceStatusNone) + + resources["asmsecret"] = asmSecrets + data, err := json.Marshal(resources) + require.NoError(t, err) + + var unMarshalledResource ResourcesMap + err = json.Unmarshal(data, &unMarshalledResource) + assert.NoError(t, err) + unMarshalledASMSecret, ok := unMarshalledResource["asmsecret"] + assert.True(t, ok) + assert.Equal(t, unMarshalledASMSecret[0].GetDesiredStatus(), resourcestatus.ResourceCreated) + assert.Equal(t, unMarshalledASMSecret[0].GetKnownStatus(), resourcestatus.ResourceStatusNone) +} diff --git a/agent/taskresource/types_common.go b/agent/taskresource/types_common.go new file mode 100644 index 00000000000..e2fb0b8a501 --- /dev/null +++ b/agent/taskresource/types_common.go @@ -0,0 +1,31 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package taskresource + +import ( + asmfactory "github.com/aws/amazon-ecs-agent/agent/asm/factory" + "github.com/aws/amazon-ecs-agent/agent/credentials" + fsxfactory "github.com/aws/amazon-ecs-agent/agent/fsx/factory" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" + "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" +) + +type ResourceFieldsCommon struct { + IOUtil ioutilwrapper.IOUtil + ASMClientCreator asmfactory.ClientCreator + SSMClientCreator ssmfactory.SSMClientCreator + FSxClientCreator fsxfactory.FSxClientCreator + CredentialsManager credentials.Manager + EC2InstanceID string +} diff --git a/agent/taskresource/types_unix.go b/agent/taskresource/types_unix.go new file mode 100644 index 00000000000..8b7fc1d9426 --- /dev/null +++ b/agent/taskresource/types_unix.go @@ -0,0 +1,34 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package taskresource + +import ( + "context" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/gpu" + cgroup "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" +) + +// ResourceFields is the list of fields required for creation of task resources +// obtained from engine +type ResourceFields struct { + Control cgroup.Control + *ResourceFieldsCommon + Ctx context.Context + DockerClient dockerapi.DockerClient + NvidiaGPUManager gpu.GPUManager +} diff --git a/agent/taskresource/types_unsupported.go b/agent/taskresource/types_unsupported.go new file mode 100644 index 00000000000..20025be2f68 --- /dev/null +++ b/agent/taskresource/types_unsupported.go @@ -0,0 +1,29 @@ +// +build !linux,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package taskresource + +import ( + "context" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" +) + +// ResourceFields is the list of fields required for creation of task resources +type ResourceFields struct { + *ResourceFieldsCommon + Ctx context.Context + DockerClient dockerapi.DockerClient +} diff --git a/agent/taskresource/types_windows.go b/agent/taskresource/types_windows.go new file mode 100644 index 00000000000..61f3eff0b81 --- /dev/null +++ b/agent/taskresource/types_windows.go @@ -0,0 +1,31 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package taskresource + +import ( + "context" + + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + s3factory "github.com/aws/amazon-ecs-agent/agent/s3/factory" +) + +// ResourceFields is the list of fields required for creation of task resources +type ResourceFields struct { + *ResourceFieldsCommon + Ctx context.Context + DockerClient dockerapi.DockerClient + S3ClientCreator s3factory.S3ClientCreator +} diff --git a/agent/taskresource/volume/dockervolume.go b/agent/taskresource/volume/dockervolume.go new file mode 100644 index 00000000000..37839c0fb7f --- /dev/null +++ b/agent/taskresource/volume/dockervolume.go @@ -0,0 +1,490 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +const ( + // TaskScope indicates that the volume is created and deleted with task + TaskScope = "task" + // SharedScope indicates that the volume's lifecycle is outside the scope of task + SharedScope = "shared" + // DockerLocalVolumeDriver is the name of the docker default volume driver + DockerLocalVolumeDriver = "local" + resourceProvisioningError = "VolumeError: Agent could not create task's volume resources" + EFSVolumeType = "efs" + netNSFormat = "/proc/%s/ns/net" +) + +// VolumeResource represents volume resource +type VolumeResource struct { + // Name is the name of the docker volume + Name string + VolumeType string + // VolumeConfig contains docker specific volume fields + VolumeConfig DockerVolumeConfig + pauseContainerPIDUnsafe string + + createdAtUnsafe time.Time + desiredStatusUnsafe resourcestatus.ResourceStatus + knownStatusUnsafe resourcestatus.ResourceStatus + // appliedStatusUnsafe is the status that has been "applied" (e.g., we've called some + // operation such as 'Create' on the resource) but we don't yet know that the + // application was successful, which may then change the known status. This is + // used while progressing resource states in progressTask() of task manager + appliedStatusUnsafe resourcestatus.ResourceStatus + statusToTransitions map[resourcestatus.ResourceStatus]func() error + client dockerapi.DockerClient + ctx context.Context + + // TransitionDependenciesMap is a map of the dependent container status to other + // dependencies that must be satisfied in order for this container to transition. + transitionDependenciesMap taskresource.TransitionDependenciesMap + + // terminalReason should be set for resource creation failures. This ensures + // the resource object carries some context for why provisoning failed. + terminalReason string + terminalReasonOnce sync.Once + + // lock is used for fields that are accessed and updated concurrently + lock sync.RWMutex +} + +// DockerVolumeConfig represents docker volume configuration +// See https://tinyurl.com/zmexdw2 +type DockerVolumeConfig struct { + // Scope represents lifetime of the volume: "task" or "shared" + Scope string `json:"scope"` + // Autoprovision is true if agent needs to create the volume, + // false if it is pre-provisioned outside of ECS + Autoprovision bool `json:"autoprovision"` + // Mountpoint is a read-only field returned from docker + Mountpoint string `json:"mountPoint"` + Driver string `json:"driver"` + DriverOpts map[string]string `json:"driverOpts"` + Labels map[string]string `json:"labels"` + // DockerVolumeName is internal docker name for this volume. + DockerVolumeName string `json:"dockerVolumeName"` +} + +// NewVolumeResource returns a docker volume wrapper object +func NewVolumeResource(ctx context.Context, + name string, + volumeType string, + dockerVolumeName string, + scope string, + autoprovision bool, + driver string, + driverOptions map[string]string, + labels map[string]string, + client dockerapi.DockerClient) (*VolumeResource, error) { + + if scope == TaskScope && autoprovision { + return nil, errors.Errorf("volume [%s] : task scoped volume could not be autoprovisioned", name) + } + + v := &VolumeResource{ + Name: name, + VolumeType: volumeType, + VolumeConfig: DockerVolumeConfig{ + Scope: scope, + Autoprovision: autoprovision, + Driver: driver, + DriverOpts: driverOptions, + Labels: labels, + DockerVolumeName: dockerVolumeName, + }, + client: client, + ctx: ctx, + transitionDependenciesMap: make(map[resourcestatus.ResourceStatus]taskresource.TransitionDependencySet), + } + v.initStatusToTransitions() + return v, nil +} + +func (vol *VolumeResource) Initialize(resourceFields *taskresource.ResourceFields, + taskKnownStatus status.TaskStatus, + taskDesiredStatus status.TaskStatus) { + + vol.ctx = resourceFields.Ctx + vol.client = resourceFields.DockerClient + vol.initStatusToTransitions() +} + +func (vol *VolumeResource) initStatusToTransitions() { + statusToTransitions := map[resourcestatus.ResourceStatus]func() error{ + resourcestatus.ResourceStatus(VolumeCreated): vol.Create, + } + + vol.statusToTransitions = statusToTransitions +} + +// Source returns the name of the volume resource which is used as the source of the volume mount +func (cfg *EFSVolumeConfig) Source() string { + return cfg.DockerVolumeName +} + +// Source returns the name of the volume resource which is used as the source of the volume mount +func (cfg *DockerVolumeConfig) Source() string { + return cfg.DockerVolumeName +} + +// GetName returns the name of the volume resource +func (vol *VolumeResource) GetName() string { + return vol.Name +} + +// DesiredTerminal returns true if the cgroup's desired status is REMOVED +func (vol *VolumeResource) DesiredTerminal() bool { + vol.lock.RLock() + defer vol.lock.RUnlock() + + return vol.desiredStatusUnsafe == resourcestatus.ResourceStatus(VolumeRemoved) +} + +// GetTerminalReason returns an error string to propagate up through to task +// state change messages +func (vol *VolumeResource) GetTerminalReason() string { + if vol.terminalReason == "" { + return resourceProvisioningError + } + return vol.terminalReason +} + +func (vol *VolumeResource) setTerminalReason(reason string) { + vol.terminalReasonOnce.Do(func() { + seelog.Infof("Volume Resource [%s]: setting terminal reason for volume resource, reason: %s", vol.Name, reason) + vol.terminalReason = reason + }) +} + +// SetDesiredStatus safely sets the desired status of the resource +func (vol *VolumeResource) SetDesiredStatus(status resourcestatus.ResourceStatus) { + vol.lock.Lock() + defer vol.lock.Unlock() + + vol.desiredStatusUnsafe = status +} + +// GetDesiredStatus safely returns the desired status of the task +func (vol *VolumeResource) GetDesiredStatus() resourcestatus.ResourceStatus { + vol.lock.RLock() + defer vol.lock.RUnlock() + + return vol.desiredStatusUnsafe +} + +// SetKnownStatus safely sets the currently known status of the resource +func (vol *VolumeResource) SetKnownStatus(status resourcestatus.ResourceStatus) { + vol.lock.Lock() + defer vol.lock.Unlock() + + vol.knownStatusUnsafe = status + vol.updateAppliedStatusUnsafe(status) +} + +// GetKnownStatus safely returns the currently known status of the task +func (vol *VolumeResource) GetKnownStatus() resourcestatus.ResourceStatus { + vol.lock.RLock() + defer vol.lock.RUnlock() + + return vol.knownStatusUnsafe +} + +// KnownCreated returns true if the volume's known status is CREATED +func (vol *VolumeResource) KnownCreated() bool { + vol.lock.RLock() + defer vol.lock.RUnlock() + + return vol.knownStatusUnsafe == resourcestatus.ResourceStatus(VolumeCreated) +} + +// TerminalStatus returns the last transition state of volume +func (vol *VolumeResource) TerminalStatus() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(VolumeRemoved) +} + +// NextKnownState returns the state that the resource should +// progress to based on its `KnownState`. +func (vol *VolumeResource) NextKnownState() resourcestatus.ResourceStatus { + return vol.GetKnownStatus() + 1 +} + +// SteadyState returns the transition state of the resource defined as "ready" +func (vol *VolumeResource) SteadyState() resourcestatus.ResourceStatus { + return resourcestatus.ResourceStatus(VolumeCreated) +} + +// ApplyTransition calls the function required to move to the specified status +func (vol *VolumeResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { + transitionFunc, ok := vol.statusToTransitions[nextState] + if !ok { + errW := errors.Errorf("volume [%s]: transition to %s impossible", vol.Name, + vol.StatusString(nextState)) + vol.setTerminalReason(errW.Error()) + return errW + } + return transitionFunc() +} + +// SetAppliedStatus sets the applied status of resource and returns whether +// the resource is already in a transition +func (vol *VolumeResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool { + vol.lock.Lock() + defer vol.lock.Unlock() + + if vol.appliedStatusUnsafe != resourcestatus.ResourceStatus(VolumeStatusNone) { + // return false to indicate the set operation failed + return false + } + + vol.appliedStatusUnsafe = status + return true +} + +// StatusString returns the string of the cgroup resource status +func (vol *VolumeResource) StatusString(status resourcestatus.ResourceStatus) string { + return VolumeStatus(status).String() +} + +// SetCreatedAt sets the timestamp for resource's creation time +func (vol *VolumeResource) SetCreatedAt(createdAt time.Time) { + if createdAt.IsZero() { + return + } + vol.lock.Lock() + defer vol.lock.Unlock() + + vol.createdAtUnsafe = createdAt +} + +// GetCreatedAt sets the timestamp for resource's creation time +func (vol *VolumeResource) GetCreatedAt() time.Time { + vol.lock.RLock() + defer vol.lock.RUnlock() + + return vol.createdAtUnsafe +} + +// setMountPoint sets the mountpoint of the created volume. +// This is a read-only field, hence making this a private function. +func (vol *VolumeResource) setMountPoint(mountPoint string) { + vol.lock.Lock() + defer vol.lock.Unlock() + + vol.VolumeConfig.Mountpoint = mountPoint +} + +// GetMountPoint gets the mountpoint of the created volume. +func (vol *VolumeResource) GetMountPoint() string { + vol.lock.RLock() + defer vol.lock.RUnlock() + + return vol.VolumeConfig.Mountpoint +} + +// SourcePath is fulfilling the HostVolume interface +func (vol *VolumeResource) SourcePath() string { + return vol.GetMountPoint() +} + +// Create performs resource creation +func (vol *VolumeResource) Create() error { + seelog.Debugf("Creating volume with name %s using driver %s", vol.VolumeConfig.DockerVolumeName, vol.VolumeConfig.Driver) + volumeResponse := vol.client.CreateVolume( + vol.ctx, + vol.VolumeConfig.DockerVolumeName, + vol.VolumeConfig.Driver, + vol.getDriverOpts(), + vol.VolumeConfig.Labels, + dockerclient.CreateVolumeTimeout) + + if volumeResponse.Error != nil { + vol.setTerminalReason(volumeResponse.Error.Error()) + return volumeResponse.Error + } + + // set readonly field after creation + vol.setMountPoint(volumeResponse.DockerVolume.Name) + return nil +} + +func (vol *VolumeResource) getDriverOpts() map[string]string { + opts := vol.VolumeConfig.DriverOpts + if vol.VolumeConfig.Driver != ECSVolumePlugin { + return opts + } + // For awsvpc network mode, pause pid will be set (during the step when the pause container transitions to + // RESOURCE_PROVISIONED), and if the driver is the ecs volume plugin, we will pass the network namespace handle + // to the driver so that the mount will happen in the task network namespace. + pausePID := vol.GetPauseContainerPID() + if pausePID != "" { + mntOpt := NewVolumeMountOptionsFromString(opts["o"]) + mntOpt.AddOption("netns", fmt.Sprintf(netNSFormat, pausePID)) + opts["o"] = mntOpt.String() + } + return opts +} + +// Cleanup performs resource cleanup +func (vol *VolumeResource) Cleanup() error { + // Enable volume clean up if it's task scoped + if vol.VolumeConfig.Scope != TaskScope { + seelog.Debugf("Volume [%s] is shared, not removing", vol.Name) + return nil + } + + seelog.Debugf("Removing volume with name %s", vol.Name) + err := vol.client.RemoveVolume(vol.ctx, vol.VolumeConfig.DockerVolumeName, dockerclient.RemoveVolumeTimeout) + + if err != nil { + vol.setTerminalReason(err.Error()) + return err + } + return nil +} + +// volumeResourceJSON duplicates VolumeResource fields, only for marshalling and unmarshalling purposes +type volumeResourceJSON struct { + Name string `json:"name"` + VolumeConfig DockerVolumeConfig `json:"dockerVolumeConfiguration"` + PauseContainerPID string `json:"pauseContainerPID,omitempty"` + CreatedAt time.Time `json:"createdAt"` + DesiredStatus *VolumeStatus `json:"desiredStatus"` + KnownStatus *VolumeStatus `json:"knownStatus"` +} + +// MarshalJSON marshals VolumeResource object using duplicate struct VolumeResourceJSON +func (vol *VolumeResource) MarshalJSON() ([]byte, error) { + if vol == nil { + return nil, nil + } + return json.Marshal(volumeResourceJSON{ + vol.Name, + vol.VolumeConfig, + vol.GetPauseContainerPID(), + vol.GetCreatedAt(), + func() *VolumeStatus { desiredState := VolumeStatus(vol.GetDesiredStatus()); return &desiredState }(), + func() *VolumeStatus { knownState := VolumeStatus(vol.GetKnownStatus()); return &knownState }(), + }) +} + +// UnmarshalJSON unmarshals VolumeResource object using duplicate struct VolumeResourceJSON +func (vol *VolumeResource) UnmarshalJSON(b []byte) error { + temp := &volumeResourceJSON{} + + if err := json.Unmarshal(b, &temp); err != nil { + return err + } + + vol.Name = temp.Name + vol.VolumeConfig = temp.VolumeConfig + vol.SetPauseContainerPID(temp.PauseContainerPID) + if temp.DesiredStatus != nil { + vol.SetDesiredStatus(resourcestatus.ResourceStatus(*temp.DesiredStatus)) + } + if temp.KnownStatus != nil { + vol.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) + } + return nil +} + +// updateAppliedStatusUnsafe updates the resource transitioning status +func (vol *VolumeResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) { + if vol.appliedStatusUnsafe == resourcestatus.ResourceStatus(VolumeStatusNone) { + return + } + + // Check if the resource transition has already finished + if vol.appliedStatusUnsafe <= knownStatus { + vol.appliedStatusUnsafe = resourcestatus.ResourceStatus(VolumeStatusNone) + } +} + +func (vol *VolumeResource) GetAppliedStatus() resourcestatus.ResourceStatus { + vol.lock.RLock() + defer vol.lock.RUnlock() + + return vol.appliedStatusUnsafe +} + +// DependOnTaskNetwork shows whether the resource creation needs task network setup beforehand +func (vol *VolumeResource) DependOnTaskNetwork() bool { + return vol.VolumeType == EFSVolumeType +} + +// BuildContainerDependency sets the container dependencies of the resource. +func (vol *VolumeResource) BuildContainerDependency(containerName string, satisfied apicontainerstatus.ContainerStatus, + dependent resourcestatus.ResourceStatus) { + // No op for non-EFS volume type + if vol.VolumeType != EFSVolumeType { + return + } + + contDep := apicontainer.ContainerDependency{ + ContainerName: containerName, + SatisfiedStatus: satisfied, + } + if _, ok := vol.transitionDependenciesMap[dependent]; !ok { + vol.transitionDependenciesMap[dependent] = taskresource.TransitionDependencySet{} + } + deps := vol.transitionDependenciesMap[dependent] + deps.ContainerDependencies = append(deps.ContainerDependencies, contDep) + vol.transitionDependenciesMap[dependent] = deps +} + +// GetContainerDependencies returns the container dependencies of the resource. +func (vol *VolumeResource) GetContainerDependencies(dependent resourcestatus.ResourceStatus) []apicontainer.ContainerDependency { + // No op for non-EFS volume type + if vol.VolumeType != EFSVolumeType { + return nil + } + + if _, ok := vol.transitionDependenciesMap[dependent]; !ok { + return nil + } + return vol.transitionDependenciesMap[dependent].ContainerDependencies +} + +// SetPauseContainerPID adds pause container pid to the resource. +func (vol *VolumeResource) SetPauseContainerPID(pid string) { + vol.lock.Lock() + defer vol.lock.Unlock() + + vol.pauseContainerPIDUnsafe = pid +} + +// GetPauseContainerPID returns the pause container pid. +func (vol *VolumeResource) GetPauseContainerPID() string { + vol.lock.RLock() + defer vol.lock.RUnlock() + + return vol.pauseContainerPIDUnsafe +} diff --git a/agent/taskresource/volume/dockervolume_efs.go b/agent/taskresource/volume/dockervolume_efs.go new file mode 100644 index 00000000000..967700ea4c9 --- /dev/null +++ b/agent/taskresource/volume/dockervolume_efs.go @@ -0,0 +1,128 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +import ( + "fmt" + "strconv" + + "github.com/aws/amazon-ecs-agent/agent/config" + + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/cihub/seelog" +) + +const ( + // ECSVolumePlugin is the driver name of the ECS volume plugin. + ECSVolumePlugin = "amazon-ecs-volume-plugin" + // DockerLocalDriverName is the name of Docker's local volume driver. + DockerLocalDriverName = "local" + + efsVolumePluginDriverType = "efs" + efsLocalDriverType = "nfs" + + // Capability injected by the ecs volume plugin via ECS_VOLUME_PLUGIN_CAPABILITIES. + efsVolumePluginCapability = "efsAuth" + + // Enums used by acs's api-2.json model. + efsIAMAuthEnabled = "ENABLED" + efsTransitEncryptionEnabled = "ENABLED" +) + +// EFSVolumeConfig represents efs volume configuration. +type EFSVolumeConfig struct { + AuthConfig EFSAuthConfig `json:"authorizationConfig,omitempty"` + FileSystemID string `json:"fileSystemId,omitempty"` + RootDirectory string `json:"rootDirectory,omitempty"` + TransitEncryption string `json:"transitEncryption,omitempty"` + TransitEncryptionPort int64 `json:"transitEncryptionPort,omitempty"` + // DockerVolumeName is internal docker name for this volume. + DockerVolumeName string `json:"dockerVolumeName"` +} + +// EFSAuthConfig contains auth config for an efs volume. +type EFSAuthConfig struct { + AccessPointId string `json:"accessPointId,omitempty"` + Iam string `json:"iam,omitempty"` +} + +// GetDriverOptions returns the driver options for creating an EFS volume. +func GetDriverOptions(cfg *config.Config, efsVolCfg *EFSVolumeConfig, credsRelativeURI string) map[string]string { + if UseECSVolumePlugin(cfg) { + return efsVolCfg.getVolumePluginDriverOptions(credsRelativeURI) + } + return efsVolCfg.getDockerLocalDriverOptions(cfg) +} + +// UseECSVolumePlugin checks whether we should use the ECS volume plugin. +func UseECSVolumePlugin(cfg *config.Config) bool { + for _, cap := range cfg.VolumePluginCapabilities { + if cap == efsVolumePluginCapability { + return true + } + } + return false +} + +// GetDockerLocalDriverOptions returns the options for creating an EFS volume using Docker's local volume driver. +func (efsVolCfg *EFSVolumeConfig) getDockerLocalDriverOptions(cfg *config.Config) map[string]string { + domain := getDomainForPartition(cfg.AWSRegion) + // These are the NFS options recommended by EFS, see: + // https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-general.html + ostr := fmt.Sprintf("addr=%s.efs.%s.%s,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport", efsVolCfg.FileSystemID, cfg.AWSRegion, domain) + devstr := fmt.Sprintf(":%s", efsVolCfg.RootDirectory) + return map[string]string{ + "type": efsLocalDriverType, + "device": devstr, + "o": ostr, + } +} + +// GetEFSDriverOptions returns the options for creating an EFS volume using the ECS volume plugin. +func (efsVolCfg *EFSVolumeConfig) getVolumePluginDriverOptions(credsRelativeURI string) map[string]string { + device := efsVolCfg.FileSystemID + if efsVolCfg.RootDirectory != "" { + device = fmt.Sprintf("%s:%s", device, efsVolCfg.RootDirectory) + } + + mntOpt := NewVolumeMountOptions() + if efsVolCfg.TransitEncryption == efsTransitEncryptionEnabled { + mntOpt.AddOption("tls", "") + } + if efsVolCfg.TransitEncryptionPort != 0 { + mntOpt.AddOption("tlsport", strconv.Itoa(int(efsVolCfg.TransitEncryptionPort))) + } + if efsVolCfg.AuthConfig.Iam == efsIAMAuthEnabled { + mntOpt.AddOption("iam", "") + mntOpt.AddOption("awscredsuri", credsRelativeURI) + } + if efsVolCfg.AuthConfig.AccessPointId != "" { + mntOpt.AddOption("accesspoint", efsVolCfg.AuthConfig.AccessPointId) + } + options := map[string]string{ + "type": efsVolumePluginDriverType, + "device": device, + "o": mntOpt.String(), + } + return options +} + +func getDomainForPartition(region string) string { + partition, ok := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), region) + if !ok { + seelog.Warnf("No partition resolved for region (%s). Using AWS default (%s)", region, endpoints.AwsPartition().DNSSuffix()) + return endpoints.AwsPartition().DNSSuffix() + } + return partition.DNSSuffix() +} diff --git a/agent/taskresource/volume/dockervolume_efs_test.go b/agent/taskresource/volume/dockervolume_efs_test.go new file mode 100644 index 00000000000..984288610d5 --- /dev/null +++ b/agent/taskresource/volume/dockervolume_efs_test.go @@ -0,0 +1,95 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +import ( + "testing" + + "github.com/aws/amazon-ecs-agent/agent/config" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func getTestEFSVolCfg() *EFSVolumeConfig { + return &EFSVolumeConfig{ + AuthConfig: EFSAuthConfig{ + AccessPointId: "test-ap", + Iam: "ENABLED", + }, + FileSystemID: "fs-123", + RootDirectory: "/test", + TransitEncryption: "ENABLED", + TransitEncryptionPort: int64(123), + DockerVolumeName: "test-vol", + } +} + +func TestGetEFSDriverOptions_UseLocalVolumeDriver(t *testing.T) { + cfg := &config.Config{ + AWSRegion: "us-west-1", + } + efsVolCfg := getTestEFSVolCfg() + credsURI := "/v2/creds-id" + options := GetDriverOptions(cfg, efsVolCfg, credsURI) + assert.Equal(t, "nfs", options["type"]) + assert.Equal(t, "addr=fs-123.efs.us-west-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport", options["o"]) + assert.Equal(t, ":/test", options["device"]) +} + +func TestGetEFSDriverOptions_UseECSVolumePlugin(t *testing.T) { + cfg := &config.Config{ + VolumePluginCapabilities: []string{"efsAuth"}, + } + efsVolCfg := getTestEFSVolCfg() + credsURI := "/v2/creds-id" + options := GetDriverOptions(cfg, efsVolCfg, credsURI) + assert.Equal(t, "efs", options["type"]) + assert.Equal(t, "tls,tlsport=123,iam,awscredsuri=/v2/creds-id,accesspoint=test-ap", options["o"]) + assert.Equal(t, "fs-123:/test", options["device"]) +} + +func TestUseVolumePlugin(t *testing.T) { + cfg := &config.Config{ + VolumePluginCapabilities: []string{"efsAuth"}, + } + assert.True(t, UseECSVolumePlugin(cfg)) + assert.False(t, UseECSVolumePlugin(&config.Config{})) +} + +func TestGetDockerLocalDriverOptions(t *testing.T) { + efsVolCfg := getTestEFSVolCfg() + cfg := &config.Config{ + AWSRegion: "us-west-2", + } + options := efsVolCfg.getDockerLocalDriverOptions(cfg) + require.Contains(t, options, "type") + require.Contains(t, options, "device") + require.Contains(t, options, "o") + assert.Equal(t, "nfs", options["type"]) + assert.Equal(t, ":/test", options["device"]) + assert.Equal(t, "addr=fs-123.efs.us-west-2.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport", + options["o"]) +} + +func TestGetVolumePluginDriverOptions(t *testing.T) { + efsVolCfg := getTestEFSVolCfg() + options := efsVolCfg.getVolumePluginDriverOptions("/v2/abc") + require.Contains(t, options, "type") + require.Contains(t, options, "device") + require.Contains(t, options, "o") + assert.Equal(t, "efs", options["type"]) + assert.Equal(t, "fs-123:/test", options["device"]) + assert.Equal(t, "tls,tlsport=123,iam,awscredsuri=/v2/abc,accesspoint=test-ap", options["o"]) +} diff --git a/agent/taskresource/volume/dockervolume_test.go b/agent/taskresource/volume/dockervolume_test.go new file mode 100644 index 00000000000..d616a231fc6 --- /dev/null +++ b/agent/taskresource/volume/dockervolume_test.go @@ -0,0 +1,368 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + "github.com/aws/amazon-ecs-agent/agent/dockerclient" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + mock_dockerapi "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks" + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestCreateSuccess(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + + name := "volumeName" + scope := "shared" + autoprovision := true + mountPoint := "some/mount/point" + driver := "driver" + driverOptions := map[string]string{ + "opt1": "val1", + "opt2": "val2", + } + + mockClient.EXPECT().CreateVolume(gomock.Any(), name, driver, driverOptions, nil, dockerclient.CreateVolumeTimeout).Return( + dockerapi.SDKVolumeResponse{ + DockerVolume: &types.Volume{Name: name, Driver: driver, Mountpoint: mountPoint, Labels: nil}, + Error: nil, + }) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volume, _ := NewVolumeResource(ctx, name, "docker", name, scope, autoprovision, driver, driverOptions, nil, mockClient) + err := volume.Create() + assert.NoError(t, err) + assert.Equal(t, name, volume.VolumeConfig.Mountpoint) +} + +func TestCreateError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + + name := "volumeName" + scope := "shared" + autoprovision := true + driver := "driver" + labels := map[string]string{ + "label1": "val1", + "label2": "val2", + } + + mockClient.EXPECT().CreateVolume(gomock.Any(), name, driver, nil, labels, dockerclient.CreateVolumeTimeout).Return( + dockerapi.SDKVolumeResponse{ + DockerVolume: nil, + Error: errors.New("Test this error is propogated"), + }) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volume, _ := NewVolumeResource(ctx, name, "docker", name, scope, autoprovision, driver, nil, labels, mockClient) + err := volume.Create() + assert.NotNil(t, err) + assert.Equal(t, "Test this error is propogated", err.Error()) + assert.Equal(t, "Test this error is propogated", volume.GetTerminalReason()) +} + +func TestCleanupSuccess(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + + name := "volumeName" + scope := "task" + autoprovision := false + driver := "driver" + + mockClient.EXPECT().RemoveVolume(gomock.Any(), name, dockerclient.RemoveVolumeTimeout).Return(nil) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volume, _ := NewVolumeResource(ctx, name, "docker", name, scope, autoprovision, driver, nil, nil, mockClient) + err := volume.Cleanup() + assert.NoError(t, err) +} + +func TestCleanupError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + mockClient.EXPECT().RemoveVolume(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("Test this is propogated")) + + name := "volumeName" + scope := "task" + autoprovision := false + driver := "driver" + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volume, _ := NewVolumeResource(ctx, name, "docker", name, scope, autoprovision, driver, nil, nil, mockClient) + err := volume.Cleanup() + assert.Error(t, err) + assert.Equal(t, "Test this is propogated", err.Error()) + assert.Equal(t, "Test this is propogated", volume.GetTerminalReason()) +} + +func TestApplyTransitionForTaskScopeVolume(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + + name := "volumeName" + scope := "task" + autoprovision := false + driver := "driver" + driverOptions := map[string]string{} + labels := map[string]string{} + mountPoint := "some/mount/point" + + gomock.InOrder( + mockClient.EXPECT().CreateVolume(gomock.Any(), name, driver, driverOptions, labels, dockerclient.CreateVolumeTimeout).Times(1).Return( + dockerapi.SDKVolumeResponse{ + DockerVolume: &types.Volume{Name: name, Driver: driver, Mountpoint: mountPoint, Labels: nil}, + Error: nil, + }), + ) + + volume, _ := NewVolumeResource(nil, name, "docker", name, scope, autoprovision, driver, driverOptions, labels, mockClient) + volume.ApplyTransition(resourcestatus.ResourceStatus(VolumeCreated)) +} + +func TestApplyTransitionForSharedScopeVolume(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mock_dockerapi.NewMockDockerClient(ctrl) + + name := "volumeName" + scope := "shared" + autoprovision := false + driver := "driver" + mountPoint := "some/mount/point" + + gomock.InOrder( + mockClient.EXPECT().CreateVolume(gomock.Any(), name, driver, nil, nil, dockerclient.CreateVolumeTimeout).Times(1).Return( + dockerapi.SDKVolumeResponse{ + DockerVolume: &types.Volume{Name: name, Driver: driver, Mountpoint: mountPoint, Labels: nil}, + Error: nil, + }), + mockClient.EXPECT().RemoveVolume(gomock.Any(), name, dockerclient.RemoveVolumeTimeout).Times(0), + ) + + volume, _ := NewVolumeResource(nil, name, "docker", name, scope, autoprovision, driver, nil, nil, mockClient) + volume.ApplyTransition(resourcestatus.ResourceStatus(VolumeCreated)) + volume.ApplyTransition(resourcestatus.ResourceStatus(VolumeRemoved)) +} + +func TestMarshall(t *testing.T) { + volumeStr := "{\"name\":\"volumeName\"," + + "\"dockerVolumeConfiguration\":{\"scope\":\"shared\",\"autoprovision\":true,\"mountPoint\":\"\",\"driver\":\"driver\",\"driverOpts\":{},\"labels\":{}," + + "\"dockerVolumeName\":\"volumeName\"}," + + "\"pauseContainerPID\":\"123\"," + + "\"createdAt\":\"0001-01-01T00:00:00Z\",\"desiredStatus\":\"CREATED\",\"knownStatus\":\"NONE\"}" + name := "volumeName" + scope := "shared" + autoprovision := true + driver := "driver" + driverOpts := make(map[string]string) + labels := make(map[string]string) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + volume, _ := NewVolumeResource(ctx, name, "docker", name, scope, autoprovision, driver, driverOpts, labels, nil) + volume.SetPauseContainerPID("123") + volume.SetDesiredStatus(resourcestatus.ResourceStatus(VolumeCreated)) + volume.SetKnownStatus(resourcestatus.ResourceStatus(VolumeStatusNone)) + + bytes, err := volume.MarshalJSON() + assert.NoError(t, err) + assert.Equal(t, volumeStr, string(bytes[:])) +} + +func TestUnmarshall(t *testing.T) { + name := "volumeName" + scope := "task" + autoprovision := false + mountPoint := "mountPoint" + driver := "drive" + + labels := map[string]string{ + "lab1": "label", + } + bytes := []byte("{\"name\":\"volumeName\",\"dockerVolumeName\":\"volumeName\"," + + "\"dockerVolumeConfiguration\":{\"scope\":\"task\",\"autoprovision\":false,\"mountPoint\":\"mountPoint\",\"driver\":\"drive\",\"labels\":{\"lab1\":\"label\"}}," + + "\"pauseContainerPID\":\"123\"," + + "\"createdAt\":\"0001-01-01T00:00:00Z\",\"desiredStatus\":\"CREATED\",\"knownStatus\":\"NONE\"}") + unmarshalledVolume := &VolumeResource{} + + err := unmarshalledVolume.UnmarshalJSON(bytes) + assert.NoError(t, err) + + assert.Equal(t, name, unmarshalledVolume.Name) + assert.Equal(t, scope, unmarshalledVolume.VolumeConfig.Scope) + assert.Equal(t, autoprovision, unmarshalledVolume.VolumeConfig.Autoprovision) + assert.Equal(t, mountPoint, unmarshalledVolume.VolumeConfig.Mountpoint) + assert.Equal(t, driver, unmarshalledVolume.VolumeConfig.Driver) + assert.Equal(t, labels, unmarshalledVolume.VolumeConfig.Labels) + assert.Equal(t, "123", unmarshalledVolume.GetPauseContainerPID()) + assert.Equal(t, time.Time{}, unmarshalledVolume.GetCreatedAt()) + assert.Equal(t, resourcestatus.ResourceStatus(VolumeCreated), unmarshalledVolume.GetDesiredStatus()) + assert.Equal(t, resourcestatus.ResourceStatus(VolumeStatusNone), unmarshalledVolume.GetKnownStatus()) +} + +func TestNewVolumeResource(t *testing.T) { + testCases := []struct { + description string + scope string + autoprovision bool + fail bool + }{ + { + "task scoped volume can not be auto provisioned", + "task", + true, + true, + }, + { + "task scoped volume should not be auto provisioned", + "task", + false, + false, + }, + { + "shared scoped volume can be auto provisioned", + "shared", + false, + false, + }, + { + "shared scoped volume can be non-auto provisioned", + "shared", + true, + false, + }, + } + + for _, testcase := range testCases { + t.Run(fmt.Sprintf("%s,scope %s, autoprovision: %v", testcase.description, + testcase.scope, testcase.autoprovision), func(t *testing.T) { + vol, err := NewVolumeResource(nil, "volume", "efs", "dockerVolume", + testcase.scope, testcase.autoprovision, "", nil, nil, nil) + if testcase.fail { + assert.Error(t, err) + assert.Nil(t, vol) + assert.Contains(t, err.Error(), "task scoped volume could not be autoprovisioned") + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestEFSVolumeBuildContainerDependency(t *testing.T) { + testRes, _ := NewVolumeResource(nil, "volume", "efs", "dockerVolume", + "", false, "", nil, nil, nil) + + testRes.BuildContainerDependency("PauseContainer", apicontainerstatus.ContainerCreated, resourcestatus.ResourceStatus(VolumeCreated)) + contDep := testRes.GetContainerDependencies(resourcestatus.ResourceStatus(VolumeCreated)) + assert.NotNil(t, contDep) + + assert.Equal(t, "PauseContainer", contDep[0].ContainerName) + assert.Equal(t, apicontainerstatus.ContainerCreated, contDep[0].SatisfiedStatus) + +} + +func TestGetDriverOpts(t *testing.T) { + volCfg := DockerVolumeConfig{ + Driver: ECSVolumePlugin, + DriverOpts: map[string]string{ + "o": "iam", + }, + } + testCases := []struct { + name string + volRes *VolumeResource + expectedOpts map[string]string + }{ + { + name: "netns option is appended when pause container pid is set and driver is the volume plugin", + volRes: &VolumeResource{ + VolumeConfig: volCfg, + pauseContainerPIDUnsafe: "123", + }, + expectedOpts: map[string]string{ + "o": "iam,netns=/proc/123/ns/net", + }, + }, + { + name: "netns option is not appended when pause container pid is not set", + volRes: &VolumeResource{ + VolumeConfig: volCfg, + }, + expectedOpts: map[string]string{ + "o": "iam,netns=/proc/123/ns/net", + }, + }, + { + name: "netns option is not appended when driver is not the volume plugin", + volRes: &VolumeResource{ + VolumeConfig: DockerVolumeConfig{ + Driver: "another-driver", + DriverOpts: map[string]string{ + "o": "iam", + }, + }, + pauseContainerPIDUnsafe: "123", + }, + expectedOpts: map[string]string{ + "o": "iam", + }, + }, + { + name: "netns option is added correctly when it's the only option", + volRes: &VolumeResource{ + VolumeConfig: DockerVolumeConfig{ + Driver: ECSVolumePlugin, + DriverOpts: map[string]string{ + "o": "", + }, + }, + pauseContainerPIDUnsafe: "123", + }, + expectedOpts: map[string]string{ + "o": "netns=/proc/123/ns/net", + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedOpts["o"], tc.volRes.getDriverOpts()["o"]) + }) + } +} diff --git a/agent/taskresource/volume/mountoptions.go b/agent/taskresource/volume/mountoptions.go new file mode 100644 index 00000000000..0162f7653f6 --- /dev/null +++ b/agent/taskresource/volume/mountoptions.go @@ -0,0 +1,58 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +import ( + "strings" +) + +const mntOptSep = "," + +// VolumeMountOptions contains a list of mount options for mounting a volume. +// This struct exists mainly to make serializing/deserializing a list of options easier. +type VolumeMountOptions struct { + opts []string +} + +// NewVolumeMountOptions returns a new VolumeMountOptions with options from argument. +func NewVolumeMountOptions(opts ...string) *VolumeMountOptions { + return &VolumeMountOptions{ + opts: opts, + } +} + +// NewVolumeMountOptionsFromString returns a new VolumeMountOptions with options from argument. +func NewVolumeMountOptionsFromString(s string) *VolumeMountOptions { + opts := []string{} + if s != "" { // s == "" needs to be dealt with specially because it ends up with an empty option string. + opts = strings.Split(s, mntOptSep) + } + return &VolumeMountOptions{ + opts: opts, + } +} + +// String serializes a list of options joined by comma. +func (mo *VolumeMountOptions) String() string { + return strings.Join(mo.opts, mntOptSep) +} + +// AddOption adds a new option to the option list. +func (mo *VolumeMountOptions) AddOption(key, val string) { + opt := key + if val != "" { + opt = opt + "=" + val + } + mo.opts = append(mo.opts, opt) +} diff --git a/agent/taskresource/volume/mountoptions_test.go b/agent/taskresource/volume/mountoptions_test.go new file mode 100644 index 00000000000..34a538cae05 --- /dev/null +++ b/agent/taskresource/volume/mountoptions_test.go @@ -0,0 +1,42 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewVolumeMountOptions(t *testing.T) { + assert.Equal(t, []string{"a", "b=c"}, NewVolumeMountOptions("a", "b=c").opts) +} + +func TestNewVolumeMountOptionsFromString(t *testing.T) { + assert.Equal(t, []string{"a", "b=c"}, NewVolumeMountOptionsFromString("a,b=c").opts) + assert.Equal(t, []string{}, NewVolumeMountOptionsFromString("").opts) +} + +func TestString(t *testing.T) { + assert.Equal(t, "a", NewVolumeMountOptions("a").String()) + assert.Equal(t, "a,b=c", NewVolumeMountOptions("a", "b=c").String()) +} + +func TestAddOption(t *testing.T) { + mntOpts := NewVolumeMountOptions() + mntOpts.AddOption("a", "") + assert.Equal(t, []string{"a"}, mntOpts.opts) + mntOpts.AddOption("b", "c") + assert.Equal(t, []string{"a", "b=c"}, mntOpts.opts) +} diff --git a/agent/taskresource/volume/volumestatus.go b/agent/taskresource/volume/volumestatus.go new file mode 100644 index 00000000000..17250b4b381 --- /dev/null +++ b/agent/taskresource/volume/volumestatus.go @@ -0,0 +1,79 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +import ( + "errors" + "strings" + + resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" +) + +// VolumeStatus defines resource statuses for docker volume +type VolumeStatus resourcestatus.ResourceStatus + +const ( + // VolumeStatusNone is the zero state of a task resource + VolumeStatusNone VolumeStatus = iota + // VolumeCreated represents a task resource which has been created + VolumeCreated + // VolumeRemoved represents a task resource which has been Removed + VolumeRemoved +) + +var resourceStatusMap = map[string]VolumeStatus{ + "NONE": VolumeStatusNone, + "CREATED": VolumeCreated, + "REMOVED": VolumeRemoved, +} + +// StatusString returns a human readable string representation of this object +func (vs VolumeStatus) String() string { + for k, v := range resourceStatusMap { + if v == vs { + return k + } + } + return "NONE" +} + +// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type +func (vs *VolumeStatus) MarshalJSON() ([]byte, error) { + if vs == nil { + return nil, nil + } + return []byte(`"` + vs.String() + `"`), nil +} + +// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data +func (vs *VolumeStatus) UnmarshalJSON(b []byte) error { + if strings.ToLower(string(b)) == "null" { + *vs = VolumeStatusNone + return nil + } + + if b[0] != '"' || b[len(b)-1] != '"' { + *vs = VolumeStatusNone + return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b)) + } + + strStatus := b[1 : len(b)-1] + stat, ok := resourceStatusMap[string(strStatus)] + if !ok { + *vs = VolumeStatusNone + return errors.New("resource status unmarshal: unrecognized status") + } + *vs = stat + return nil +} diff --git a/agent/taskresource/volume/volumestatus_test.go b/agent/taskresource/volume/volumestatus_test.go new file mode 100644 index 00000000000..aca999f8758 --- /dev/null +++ b/agent/taskresource/volume/volumestatus_test.go @@ -0,0 +1,88 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusString(t *testing.T) { + var resourceStatus VolumeStatus + + resourceStatus = VolumeStatusNone + assert.Equal(t, resourceStatus.String(), "NONE") + resourceStatus = VolumeCreated + assert.Equal(t, resourceStatus.String(), "CREATED") + resourceStatus = VolumeRemoved + assert.Equal(t, resourceStatus.String(), "REMOVED") +} + +func TestMarshalVolumeStatus(t *testing.T) { + status := VolumeStatusNone + bytes, err := status.MarshalJSON() + + assert.NoError(t, err) + assert.Equal(t, `"NONE"`, string(bytes[:])) +} + +func TestMarshalNilVolumeStatus(t *testing.T) { + var status *VolumeStatus + bytes, err := status.MarshalJSON() + + assert.Nil(t, bytes) + assert.Nil(t, err) +} + +type testVolumeStatus struct { + SomeStatus VolumeStatus `json:"status"` +} + +func TestUnmarshalVolumeStatus(t *testing.T) { + status := VolumeStatusNone + + err := json.Unmarshal([]byte(`"CREATED"`), &status) + assert.NoError(t, err) + assert.Equal(t, VolumeCreated, status, "CREATED should unmarshal to CREATED, not "+status.String()) + + var testStatus testVolumeStatus + err = json.Unmarshal([]byte(`{"status":"REMOVED"}`), &testStatus) + assert.NoError(t, err) + assert.Equal(t, VolumeRemoved, testStatus.SomeStatus, "REMOVED should unmarshal to REMOVED, not "+testStatus.SomeStatus.String()) +} + +func TestUnmarshalNullVolumeStatus(t *testing.T) { + status := VolumeCreated + err := json.Unmarshal([]byte("null"), &status) + assert.NoError(t, err) + assert.Equal(t, VolumeStatusNone, status, "null should unmarshal to None, not "+status.String()) +} + +func TestUnmarshalNonStringVolumeStatusDefaultNone(t *testing.T) { + status := VolumeCreated + err := json.Unmarshal([]byte(`1`), &status) + assert.NotNil(t, err) + assert.Equal(t, VolumeStatusNone, status, "non-string status should unmarshal to None, not "+status.String()) +} + +func TestUnmarshalUnmappedVolumeStatusDefaultNone(t *testing.T) { + status := VolumeRemoved + err := json.Unmarshal([]byte(`"SOMEOTHER"`), &status) + assert.NotNil(t, err) + assert.Equal(t, VolumeStatusNone, status, "Unmapped status should unmarshal to None, not "+status.String()) +} diff --git a/agent/taskresource/volume/volumetypes.go b/agent/taskresource/volume/volumetypes.go new file mode 100644 index 00000000000..a158545de09 --- /dev/null +++ b/agent/taskresource/volume/volumetypes.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package volume + +// Volume is an interface for something that may be used as the host half of a +// docker volume mount +type Volume interface { + Source() string +} + +// FSHostVolume is a simple type of HostVolume which references an arbitrary +// location on the host as the Volume. +type FSHostVolume struct { + FSSourcePath string `json:"sourcePath"` +} + +// SourcePath returns the path on the host filesystem that should be mounted +func (fs *FSHostVolume) Source() string { + return fs.FSSourcePath +} + +// LocalDockerVolume represents a volume without a specified host path +// This is essentially DockerVolume with only the name specified; however, +// for backward compatibility we can't directly map to DockerVolume. +type LocalDockerVolume struct { + HostPath string `json:"hostPath"` +} + +// SourcePath returns the generated host path for the volume +func (e *LocalDockerVolume) Source() string { + return e.HostPath +} diff --git a/agent/tcs/client/client.go b/agent/tcs/client/client.go new file mode 100644 index 00000000000..ee60f3fe345 --- /dev/null +++ b/agent/tcs/client/client.go @@ -0,0 +1,347 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package tcsclient + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + "github.com/aws/amazon-ecs-agent/agent/utils" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/cihub/seelog" +) + +const ( + // tasksInMetricMessage is the maximum number of tasks that can be sent in a message to the backend + // This is a very conservative estimate assuming max allowed string lengths for all fields. + tasksInMetricMessage = 10 + // tasksInHealthMessage is the maximum number of tasks that can be sent in a message to the backend + tasksInHealthMessage = 10 +) + +// clientServer implements wsclient.ClientServer interface for metrics backend. +type clientServer struct { + statsEngine stats.Engine + publishTicker *time.Ticker + publishHealthTicker *time.Ticker + ctx context.Context + cancel context.CancelFunc + disableResourceMetrics bool + publishMetricsInterval time.Duration + wsclient.ClientServerImpl +} + +// New returns a client/server to bidirectionally communicate with the backend. +// The returned struct should have both 'Connect' and 'Serve' called upon it +// before being used. +func New(url string, + cfg *config.Config, + credentialProvider *credentials.Credentials, + statsEngine stats.Engine, + publishMetricsInterval time.Duration, + rwTimeout time.Duration, + disableResourceMetrics bool) wsclient.ClientServer { + cs := &clientServer{ + statsEngine: statsEngine, + publishTicker: nil, + publishHealthTicker: nil, + publishMetricsInterval: publishMetricsInterval, + } + cs.URL = url + cs.AgentConfig = cfg + cs.CredentialProvider = credentialProvider + cs.ServiceError = &tcsError{} + cs.RequestHandlers = make(map[string]wsclient.RequestHandler) + cs.MakeRequestHook = signRequestFunc(url, cs.AgentConfig.AWSRegion, credentialProvider) + cs.TypeDecoder = NewTCSDecoder() + cs.RWTimeout = rwTimeout + cs.disableResourceMetrics = disableResourceMetrics + // TODO make this context inherited from the handler + cs.ctx, cs.cancel = context.WithCancel(context.TODO()) + return cs +} + +// Serve begins serving requests using previously registered handlers (see +// AddRequestHandler). All request handlers should be added prior to making this +// call as unhandled requests will be discarded. +func (cs *clientServer) Serve() error { + seelog.Debug("TCS client starting websocket poll loop") + if !cs.IsReady() { + return fmt.Errorf("tcs client: websocket not ready for connections") + } + + if cs.statsEngine == nil { + return fmt.Errorf("tcs client: uninitialized stats engine") + } + + // Start the timer function to publish metrics to the backend. + cs.publishTicker = time.NewTicker(cs.publishMetricsInterval) + cs.publishHealthTicker = time.NewTicker(cs.publishMetricsInterval) + + if !cs.disableResourceMetrics { + go cs.publishMetrics() + } + go cs.publishHealthMetrics() + + return cs.ConsumeMessages() +} + +// Close closes the underlying connection. +func (cs *clientServer) Close() error { + if cs.publishTicker != nil { + cs.publishTicker.Stop() + } + if cs.publishHealthTicker != nil { + cs.publishHealthTicker.Stop() + } + + cs.cancel() + return cs.Disconnect() +} + +// signRequestFunc is a MakeRequestHookFunc that signs each generated request +func signRequestFunc(url, region string, credentialProvider *credentials.Credentials) wsclient.MakeRequestHookFunc { + return func(payload []byte) ([]byte, error) { + reqBody := bytes.NewReader(payload) + + request, err := http.NewRequest("GET", url, reqBody) + if err != nil { + return nil, err + } + + err = utils.SignHTTPRequest(request, region, "ecs", credentialProvider, reqBody) + if err != nil { + return nil, err + } + + request.Header.Add("Host", request.Host) + var dataBuffer bytes.Buffer + request.Header.Write(&dataBuffer) + io.WriteString(&dataBuffer, "\r\n") + + data := dataBuffer.Bytes() + data = append(data, payload...) + + return data, nil + } +} + +// publishMetrics invokes the PublishMetricsRequest on the clientserver object. +func (cs *clientServer) publishMetrics() { + if cs.publishTicker == nil { + seelog.Debug("Skipping publishing metrics. Publish ticker is uninitialized") + return + } + + // don't simply range over the ticker since its channel doesn't ever get closed + for { + select { + case <-cs.publishTicker.C: + err := cs.publishMetricsOnce() + if err != nil { + seelog.Warnf("Error publishing metrics: %v", err) + } + case <-cs.ctx.Done(): + return + } + } +} + +// publishMetricsOnce is invoked by the ticker to periodically publish metrics to backend. +func (cs *clientServer) publishMetricsOnce() error { + // Get the list of objects to send to backend. + requests, err := cs.metricsToPublishMetricRequests() + if err != nil { + return err + } + + // Make the publish metrics request to the backend. + for _, request := range requests { + err = cs.MakeRequest(request) + if err != nil { + return err + } + } + return nil +} + +// metricsToPublishMetricRequests gets task metrics and converts them to a list of PublishMetricRequest +// objects. +func (cs *clientServer) metricsToPublishMetricRequests() ([]*ecstcs.PublishMetricsRequest, error) { + metadata, taskMetrics, err := cs.statsEngine.GetInstanceMetrics() + if err != nil { + return nil, err + } + + var requests []*ecstcs.PublishMetricsRequest + if *metadata.Idle { + metadata.Fin = aws.Bool(true) + // Idle instance, we have only one request to send to backend. + requests = append(requests, ecstcs.NewPublishMetricsRequest(metadata, taskMetrics)) + return requests, nil + } + var messageTaskMetrics []*ecstcs.TaskMetric + numTasks := len(taskMetrics) + + for i, taskMetric := range taskMetrics { + messageTaskMetrics = append(messageTaskMetrics, taskMetric) + var requestMetadata *ecstcs.MetricsMetadata + if (i + 1) == numTasks { + // If this is the last task to send, set fin to true + requestMetadata = copyMetricsMetadata(metadata, true) + } else { + requestMetadata = copyMetricsMetadata(metadata, false) + } + if (i+1)%tasksInMetricMessage == 0 { + // Construct payload with tasksInMetricMessage number of task metrics and send to backend. + requests = append(requests, ecstcs.NewPublishMetricsRequest(requestMetadata, copyTaskMetrics(messageTaskMetrics))) + messageTaskMetrics = messageTaskMetrics[:0] + } + } + + if len(messageTaskMetrics) > 0 { + // Create the new metadata object and set fin to true as this is the last message in the payload. + requestMetadata := copyMetricsMetadata(metadata, true) + // Create a request with remaining task metrics. + requests = append(requests, ecstcs.NewPublishMetricsRequest(requestMetadata, messageTaskMetrics)) + } + return requests, nil +} + +// publishHealthMetrics send the container health information to backend +func (cs *clientServer) publishHealthMetrics() { + if cs.publishTicker == nil { + seelog.Debug("Skipping publishing health metrics. Publish ticker is uninitialized") + return + } + + // Publish metrics immediately after we connect and wait for ticks. This makes + // sure that there is no data loss when a scheduled metrics publishing fails + // due to a connection reset. + err := cs.publishHealthMetricsOnce() + if err != nil { + seelog.Warnf("Unable to publish health metrics: %v", err) + } + for { + select { + case <-cs.publishHealthTicker.C: + err := cs.publishHealthMetricsOnce() + if err != nil { + seelog.Warnf("Unable to publish health metrics: %v", err) + } + case <-cs.ctx.Done(): + return + } + } +} + +// publishHealthMetricsOnce is invoked by the ticker to periodically publish metrics to backend. +func (cs *clientServer) publishHealthMetricsOnce() error { + // Get the list of health request to send to backend. + requests, err := cs.createPublishHealthRequests() + if err != nil { + return err + } + // Make the publish metrics request to the backend. + for _, request := range requests { + err = cs.MakeRequest(request) + if err != nil { + return err + } + } + return nil +} + +// createPublishHealthRequests creates the requests to publish container health +func (cs *clientServer) createPublishHealthRequests() ([]*ecstcs.PublishHealthRequest, error) { + metadata, taskHealthMetrics, err := cs.statsEngine.GetTaskHealthMetrics() + if err != nil { + return nil, err + } + + if metadata == nil || taskHealthMetrics == nil { + seelog.Debug("No container health metrics to report") + return nil, nil + } + + var requests []*ecstcs.PublishHealthRequest + var taskHealths []*ecstcs.TaskHealth + numOfTasks := len(taskHealthMetrics) + for i, taskHealth := range taskHealthMetrics { + taskHealths = append(taskHealths, taskHealth) + // create a request if the number of task reaches the maximum page size + if (i+1)%tasksInHealthMessage == 0 { + requestMetadata := copyHealthMetadata(metadata, (i+1) == numOfTasks) + requestTaskHealth := copyTaskHealthMetrics(taskHealths) + request := ecstcs.NewPublishHealthMetricsRequest(requestMetadata, requestTaskHealth) + requests = append(requests, request) + taskHealths = taskHealths[:0] + } + } + + // Put the rest of the metrics in another request + if len(taskHealths) != 0 { + requestMetadata := copyHealthMetadata(metadata, true) + requests = append(requests, ecstcs.NewPublishHealthMetricsRequest(requestMetadata, taskHealths)) + } + + return requests, nil +} + +// copyMetricsMetadata creates a new MetricsMetadata object from a given MetricsMetadata object. +// It copies all the fields from the source object to the new object and sets the 'Fin' field +// as specified by the argument. +func copyMetricsMetadata(metadata *ecstcs.MetricsMetadata, fin bool) *ecstcs.MetricsMetadata { + return &ecstcs.MetricsMetadata{ + Cluster: aws.String(*metadata.Cluster), + ContainerInstance: aws.String(*metadata.ContainerInstance), + Idle: aws.Bool(*metadata.Idle), + MessageId: aws.String(*metadata.MessageId), + Fin: aws.Bool(fin), + } +} + +// copyTaskMetrics copies a slice of TaskMetric objects to another slice. This is needed as we +// reset the source slice after creating a new PublishMetricsRequest object. +func copyTaskMetrics(from []*ecstcs.TaskMetric) []*ecstcs.TaskMetric { + to := make([]*ecstcs.TaskMetric, len(from)) + copy(to, from) + return to +} + +// copyHealthMetadata performs a deep copy of HealthMetadata object +func copyHealthMetadata(metadata *ecstcs.HealthMetadata, fin bool) *ecstcs.HealthMetadata { + return &ecstcs.HealthMetadata{ + Cluster: aws.String(aws.StringValue(metadata.Cluster)), + ContainerInstance: aws.String(aws.StringValue(metadata.ContainerInstance)), + Fin: aws.Bool(fin), + MessageId: aws.String(aws.StringValue(metadata.MessageId)), + } +} + +// copyTaskHealthMetrics copies a slice of taskHealthMetrics to another slice +func copyTaskHealthMetrics(from []*ecstcs.TaskHealth) []*ecstcs.TaskHealth { + to := make([]*ecstcs.TaskHealth, len(from)) + copy(to, from) + return to +} diff --git a/agent/tcs/client/client_test.go b/agent/tcs/client/client_test.go new file mode 100644 index 00000000000..7da2bab9d29 --- /dev/null +++ b/agent/tcs/client/client_test.go @@ -0,0 +1,446 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package tcsclient wraps the generated aws-sdk-go client to provide marshalling +// and unmarshalling of data over a websocket connection in the format expected +// by TCS. It allows for bidirectional communication and acts as both a +// client-and-server in terms of requests, but only as a client in terms of +// connecting. + +package tcsclient + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/stats" + mock_stats "github.com/aws/amazon-ecs-agent/agent/stats/mock" + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + mock_wsconn "github.com/aws/amazon-ecs-agent/agent/wsclient/wsconn/mock" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + testPublishMetricsInterval = 1 * time.Second + testMessageId = "testMessageId" + testCluster = "default" + testContainerInstance = "containerInstance" + rwTimeout = time.Second +) + +var testCreds = credentials.NewStaticCredentials("test-id", "test-secret", "test-token") + +type mockStatsEngine struct{} + +func (*mockStatsEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) { + return nil, nil, fmt.Errorf("uninitialized") +} + +func (*mockStatsEngine) ContainerDockerStats(taskARN string, id string) (*types.StatsJSON, *stats.NetworkStatsPerSec, error) { + return nil, nil, fmt.Errorf("not implemented") +} + +func (*mockStatsEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) { + return nil, nil, nil +} + +type emptyStatsEngine struct{} + +func (*emptyStatsEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) { + return nil, nil, fmt.Errorf("empty stats") +} + +func (*emptyStatsEngine) ContainerDockerStats(taskARN string, id string) (*types.StatsJSON, *stats.NetworkStatsPerSec, error) { + return nil, nil, fmt.Errorf("not implemented") +} + +func (*emptyStatsEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) { + return nil, nil, nil +} + +type idleStatsEngine struct{} + +func (*idleStatsEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) { + metadata := &ecstcs.MetricsMetadata{ + Cluster: aws.String(testCluster), + ContainerInstance: aws.String(testContainerInstance), + Idle: aws.Bool(true), + MessageId: aws.String(testMessageId), + } + return metadata, []*ecstcs.TaskMetric{}, nil +} + +func (*idleStatsEngine) ContainerDockerStats(taskARN string, id string) (*types.StatsJSON, *stats.NetworkStatsPerSec, error) { + return nil, nil, fmt.Errorf("not implemented") +} + +func (*idleStatsEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) { + return nil, nil, nil +} + +type nonIdleStatsEngine struct { + numTasks int +} + +func (engine *nonIdleStatsEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) { + metadata := &ecstcs.MetricsMetadata{ + Cluster: aws.String(testCluster), + ContainerInstance: aws.String(testContainerInstance), + Idle: aws.Bool(false), + MessageId: aws.String(testMessageId), + } + var taskMetrics []*ecstcs.TaskMetric + var i int64 + for i = 0; int(i) < engine.numTasks; i++ { + taskArn := "task/" + strconv.FormatInt(i, 10) + taskMetrics = append(taskMetrics, &ecstcs.TaskMetric{TaskArn: &taskArn}) + } + return metadata, taskMetrics, nil +} + +func (*nonIdleStatsEngine) ContainerDockerStats(taskARN string, id string) (*types.StatsJSON, *stats.NetworkStatsPerSec, error) { + return nil, nil, fmt.Errorf("not implemented") +} + +func (*nonIdleStatsEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) { + return nil, nil, nil +} +func newNonIdleStatsEngine(numTasks int) *nonIdleStatsEngine { + return &nonIdleStatsEngine{numTasks: numTasks} +} + +func TestPayloadHandlerCalled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + cs := testCS(conn) + + // Messages should be read from the connection at least once + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().ReadMessage().Return(1, + []byte(`{"type":"AckPublishMetric","message":{}}`), nil).MinTimes(1) + // Invoked when closing the connection + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil) + conn.EXPECT().Close() + + handledPayload := make(chan *ecstcs.AckPublishMetric) + + reqHandler := func(payload *ecstcs.AckPublishMetric) { + handledPayload <- payload + } + cs.AddRequestHandler(reqHandler) + + go cs.Serve() + defer cs.Close() + + t.Log("Waiting for handler to return payload.") + <-handledPayload +} + +func TestPublishMetricsRequest(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + // Invoked when closing the connection + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil).Times(2) + conn.EXPECT().Close() + // TODO: should use explicit values + conn.EXPECT().WriteMessage(gomock.Any(), gomock.Any()) + + cs := testCS(conn) + defer cs.Close() + err := cs.MakeRequest(&ecstcs.PublishMetricsRequest{}) + if err != nil { + t.Fatal(err) + } +} + +func TestPublishMetricsOnceEmptyStatsError(t *testing.T) { + cs := clientServer{ + statsEngine: &emptyStatsEngine{}, + } + err := cs.publishMetricsOnce() + + assert.Error(t, err, "Failed: expecting publishMerticOnce return err ") +} + +func TestPublishOnceIdleStatsEngine(t *testing.T) { + cs := clientServer{ + statsEngine: &idleStatsEngine{}, + } + requests, err := cs.metricsToPublishMetricRequests() + if err != nil { + t.Fatal("Error creating publishmetricrequests: ", err) + } + if len(requests) != 1 { + t.Errorf("Expected %d requests, got %d", 1, len(requests)) + } + lastRequest := requests[0] + if !*lastRequest.Metadata.Fin { + t.Error("Fin not set to true in Last request") + } +} + +func TestPublishOnceNonIdleStatsEngine(t *testing.T) { + expectedRequests := 3 + // Cretes 21 task metrics, which translate to 3 batches, + // {[Task1, Task2, ...Task10], [Task11, Task12, ...Task20], [Task21]} + numTasks := (tasksInMetricMessage * (expectedRequests - 1)) + 1 + cs := clientServer{ + statsEngine: newNonIdleStatsEngine(numTasks), + } + requests, err := cs.metricsToPublishMetricRequests() + if err != nil { + t.Fatal("Error creating publishmetricrequests: ", err) + } + taskArns := make(map[string]bool) + for _, request := range requests { + for _, taskMetric := range request.TaskMetrics { + _, exists := taskArns[*taskMetric.TaskArn] + if exists { + t.Fatal("Duplicate task arn in requests: ", *taskMetric.TaskArn) + } + taskArns[*taskMetric.TaskArn] = true + } + } + if len(requests) != expectedRequests { + t.Errorf("Expected %d requests, got %d", expectedRequests, len(requests)) + } + lastRequest := requests[expectedRequests-1] + if !*lastRequest.Metadata.Fin { + t.Error("Fin not set to true in last request") + } + requests = requests[:(expectedRequests - 1)] + for i, request := range requests { + if *request.Metadata.Fin { + t.Errorf("Fin set to true in request %d/%d", i, (expectedRequests - 1)) + } + } +} + +func testCS(conn *mock_wsconn.MockWebsocketConn) wsclient.ClientServer { + cfg := &config.Config{ + AWSRegion: "us-east-1", + AcceptInsecureCert: true, + } + cs := New("https://aws.amazon.com/ecs", cfg, testCreds, &mockStatsEngine{}, + testPublishMetricsInterval, rwTimeout, false).(*clientServer) + cs.SetConnection(conn) + return cs +} + +// TestCloseClientServer tests the ws connection will be closed by tcs client when +// received the deregisterInstanceStream +func TestCloseClientServer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + cs := testCS(conn) + + gomock.InOrder( + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil), + conn.EXPECT().WriteMessage(gomock.Any(), gomock.Any()), + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil), + conn.EXPECT().Close(), + ) + + err := cs.MakeRequest(&ecstcs.PublishMetricsRequest{}) + assert.Nil(t, err) + + err = cs.Disconnect() + assert.Nil(t, err) +} + +func TestAckPublishHealthHandlerCalled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + cs := testCS(conn) + + // Messages should be read from the connection at least once + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().ReadMessage().Return(1, + []byte(`{"type":"AckPublishHealth","message":{}}`), nil).MinTimes(1) + // Invoked when closing the connection + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil) + conn.EXPECT().Close() + + handledPayload := make(chan *ecstcs.AckPublishHealth) + + reqHandler := func(payload *ecstcs.AckPublishHealth) { + handledPayload <- payload + } + cs.AddRequestHandler(reqHandler) + + go cs.Serve() + defer cs.Close() + + t.Log("Waiting for handler to return payload.") + <-handledPayload +} + +// TestMetricsDisabled tests that if metrics is disabled, only health metrics will be sent +func TestMetricsDisabled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + mockStatsEngine := mock_stats.NewMockEngine(ctrl) + + cfg := config.DefaultConfig() + + cs := New("", &cfg, testCreds, mockStatsEngine, testPublishMetricsInterval, rwTimeout, true) + cs.SetConnection(conn) + + published := make(chan struct{}) + readed := make(chan struct{}) + + // stats engine should only be called for getting health metrics + mockStatsEngine.EXPECT().GetTaskHealthMetrics().Return(&ecstcs.HealthMetadata{ + Cluster: aws.String("TestMetricsDisabled"), + ContainerInstance: aws.String("container_instance"), + Fin: aws.Bool(true), + MessageId: aws.String("message_id"), + }, []*ecstcs.TaskHealth{{}}, nil).MinTimes(1) + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().ReadMessage().Do(func() { + readed <- struct{}{} + }).Return(1, nil, nil).MinTimes(1) + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().WriteMessage(gomock.Any(), gomock.Any()).Do(func(messageType int, data []byte) { + published <- struct{}{} + }).Return(nil).MinTimes(1) + + go cs.Serve() + <-published + <-readed +} + +func TestCreatePublishHealthRequestsEmpty(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + mockStatsEngine := mock_stats.NewMockEngine(ctrl) + cfg := config.DefaultConfig() + + cs := New("", &cfg, testCreds, mockStatsEngine, testPublishMetricsInterval, rwTimeout, true) + cs.SetConnection(conn) + + mockStatsEngine.EXPECT().GetTaskHealthMetrics().Return(nil, nil, stats.EmptyHealthMetricsError) + _, err := cs.(*clientServer).createPublishHealthRequests() + assert.Equal(t, err, stats.EmptyHealthMetricsError) + + mockStatsEngine.EXPECT().GetTaskHealthMetrics().Return(nil, nil, nil) + _, err = cs.(*clientServer).createPublishHealthRequests() + assert.NoError(t, err) +} + +func TestCreatePublishHealthRequests(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + mockStatsEngine := mock_stats.NewMockEngine(ctrl) + cfg := config.DefaultConfig() + + cs := New("", &cfg, testCreds, mockStatsEngine, testPublishMetricsInterval, rwTimeout, true) + cs.SetConnection(conn) + + testMetadata := &ecstcs.HealthMetadata{ + Cluster: aws.String("TestCreatePublishHealthRequests"), + ContainerInstance: aws.String("container_instance"), + Fin: aws.Bool(true), + MessageId: aws.String("message_id"), + } + + testHealthMetrics := []*ecstcs.TaskHealth{ + { + Containers: []*ecstcs.ContainerHealth{ + { + ContainerName: aws.String("container1"), + HealthStatus: aws.String("HEALTHY"), + StatusSince: aws.Time(time.Now()), + }, + }, + TaskArn: aws.String("t1"), + TaskDefinitionFamily: aws.String("tdf1"), + TaskDefinitionVersion: aws.String("1"), + }, + { + Containers: []*ecstcs.ContainerHealth{ + { + ContainerName: aws.String("container2"), + HealthStatus: aws.String("HEALTHY"), + StatusSince: aws.Time(time.Now()), + }, + }, + TaskArn: aws.String("t2"), + TaskDefinitionFamily: aws.String("tdf2"), + TaskDefinitionVersion: aws.String("2"), + }, + } + + mockStatsEngine.EXPECT().GetTaskHealthMetrics().Return(testMetadata, testHealthMetrics, nil) + request, err := cs.(*clientServer).createPublishHealthRequests() + + assert.NoError(t, err) + assert.Len(t, request, 1) + assert.Len(t, request[0].Tasks, 2) + + assert.Equal(t, request[0].Metadata, testMetadata) + assert.Equal(t, request[0].Tasks, testHealthMetrics) +} + +func TestSessionClosed(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + conn := mock_wsconn.NewMockWebsocketConn(ctrl) + cs := testCS(conn) + + // Messages should be read from the connection at least once + conn.EXPECT().SetReadDeadline(gomock.Any()).Return(nil).MinTimes(1) + conn.EXPECT().ReadMessage().Return(1, + []byte(`{"type":"AckPublishMetric","message":{}}`), nil).MinTimes(1) + // Invoked when closing the connection + conn.EXPECT().SetWriteDeadline(gomock.Any()).Return(nil) + conn.EXPECT().Close() + + handledPayload := make(chan *ecstcs.AckPublishMetric) + reqHandler := func(payload *ecstcs.AckPublishMetric) { + handledPayload <- payload + } + cs.AddRequestHandler(reqHandler) + + go cs.Serve() + // wait for the session start + <-handledPayload + cs.Close() + _, ok := <-cs.(*clientServer).ctx.Done() + assert.False(t, ok, "channel should be closed") +} diff --git a/agent/tcs/client/client_types.go b/agent/tcs/client/client_types.go new file mode 100644 index 00000000000..143179900f6 --- /dev/null +++ b/agent/tcs/client/client_types.go @@ -0,0 +1,54 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package tcsclient wraps the generated aws-sdk-go client to provide marshalling +// and unmarshalling of data over a websocket connection in the format expected +// by TCS. It allows for bidirectional communication and acts as both a +// client-and-server in terms of requests, but only as a client in terms of +// connecting. + +package tcsclient + +import ( + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + "github.com/aws/amazon-ecs-agent/agent/wsclient" +) + +var tcsTypes []interface{} + +func init() { + // This list is currently *manually updated* and assumes that the generated + // struct type-names within the package *exactly match* the type sent by ACS/TCS + // (true so far; careful with inflections) + // TODO, this list should be autogenerated + // I couldn't figure out how to get a list of all structs in a package via + // reflection, but that would solve this. The alternative is to either parse + // the .json model or the generated struct names. + tcsTypes = []interface{}{ + ecstcs.StopTelemetrySessionMessage{}, + ecstcs.AckPublishMetric{}, + ecstcs.HeartbeatMessage{}, + ecstcs.PublishMetricsRequest{}, + ecstcs.PublishHealthRequest{}, + ecstcs.AckPublishHealth{}, + ecstcs.StartTelemetrySessionRequest{}, + ecstcs.ServerException{}, + ecstcs.BadRequestException{}, + ecstcs.ResourceValidationException{}, + ecstcs.InvalidParameterException{}, + } +} + +func NewTCSDecoder() wsclient.TypeDecoder { + return wsclient.BuildTypeDecoder(tcsTypes) +} diff --git a/agent/tcs/client/error.go b/agent/tcs/client/error.go new file mode 100644 index 00000000000..1c7e3393858 --- /dev/null +++ b/agent/tcs/client/error.go @@ -0,0 +1,45 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package tcsclient + +import "github.com/aws/amazon-ecs-agent/agent/wsclient" + +const errType = "TCSError" + +// UnretriableErrors wraps all the typed errors that TCS may return +type UnretriableErrors struct{} + +// Get gets the list of unretriable error types. +func (err *UnretriableErrors) Get() []interface{} { + return unretriableErrors +} + +// tcsError implements wsclient.ServiceError interface. +type tcsError struct{} + +// NewError returns an error corresponding to a typed error returned from +// ACS. It is expected that the passed in interface{} is really a struct which +// has a 'Message' field of type *string. In that case, the Message will be +// conveyed as part of the Error string as well as the type. It is safe to pass +// anything into this constructor and it will also work reasonably well with +// anything fulfilling the 'error' interface. +func (te *tcsError) NewError(err interface{}) *wsclient.WSError { + return &wsclient.WSError{ErrObj: err, Type: errType, WSUnretriableErrors: &UnretriableErrors{}} +} + +// These errors are all fatal and there's nothing we can do about them. +// AccessDeniedException is actually potentially fixable because you can change +// credentials at runtime, but still close to unretriable. +// TODO: Populate this list when the json is updated with actual exceptions. +var unretriableErrors = []interface{}{} diff --git a/agent/tcs/handler/handler.go b/agent/tcs/handler/handler.go new file mode 100644 index 00000000000..3bad054c245 --- /dev/null +++ b/agent/tcs/handler/handler.go @@ -0,0 +1,212 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package tcshandler + +import ( + "context" + "io" + "net/url" + "strings" + "time" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/stats" + tcsclient "github.com/aws/amazon-ecs-agent/agent/tcs/client" + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + "github.com/aws/amazon-ecs-agent/agent/utils/retry" + "github.com/aws/amazon-ecs-agent/agent/version" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/cihub/seelog" +) + +const ( + // The maximum time to wait between heartbeats without disconnecting + defaultHeartbeatTimeout = 1 * time.Minute + defaultHeartbeatJitter = 1 * time.Minute + // wsRWTimeout is the duration of read and write deadline for the + // websocket connection + wsRWTimeout = 2*defaultHeartbeatTimeout + defaultHeartbeatJitter + deregisterContainerInstanceHandler = "TCSDeregisterContainerInstanceHandler" +) + +// StartMetricsSession starts a metric session. It initializes the stats engine +// and invokes StartSession. +func StartMetricsSession(params *TelemetrySessionParams) { + ok, err := params.isContainerHealthMetricsDisabled() + if err != nil { + seelog.Warnf("Error starting metrics session: %v", err) + return + } + if ok { + seelog.Warnf("Metrics were disabled, not starting the telemetry session") + return + } + + err = params.StatsEngine.MustInit(params.Ctx, params.TaskEngine, params.Cfg.Cluster, + params.ContainerInstanceArn) + if err != nil { + seelog.Warnf("Error initializing metrics engine: %v", err) + return + } + + err = StartSession(params, params.StatsEngine) + if err != nil { + seelog.Warnf("Error starting metrics session with backend: %v", err) + } +} + +// StartSession creates a session with the backend and handles requests +// using the passed in arguments. +// The engine is expected to initialized and gathering container metrics by +// the time the websocket client starts using it. +func StartSession(params *TelemetrySessionParams, statsEngine stats.Engine) error { + backoff := retry.NewExponentialBackoff(time.Second, 1*time.Minute, 0.2, 2) + for { + tcsError := startTelemetrySession(params, statsEngine) + if tcsError == nil || tcsError == io.EOF { + seelog.Info("TCS Websocket connection closed for a valid reason") + backoff.Reset() + } else { + seelog.Errorf("Error: lost websocket connection with ECS Telemetry service (TCS): %v", tcsError) + params.time().Sleep(backoff.Duration()) + } + select { + case <-params.Ctx.Done(): + seelog.Info("TCS session exited cleanly.") + return nil + default: + } + } +} + +func startTelemetrySession(params *TelemetrySessionParams, statsEngine stats.Engine) error { + tcsEndpoint, err := params.ECSClient.DiscoverTelemetryEndpoint(params.ContainerInstanceArn) + if err != nil { + seelog.Errorf("tcs: unable to discover poll endpoint: %v", err) + return err + } + url := formatURL(tcsEndpoint, params.Cfg.Cluster, params.ContainerInstanceArn, params.TaskEngine) + return startSession(params.Ctx, url, params.Cfg, params.CredentialProvider, statsEngine, + defaultHeartbeatTimeout, defaultHeartbeatJitter, config.DefaultContainerMetricsPublishInterval, + params.DeregisterInstanceEventStream) +} + +func startSession( + ctx context.Context, + url string, + cfg *config.Config, + credentialProvider *credentials.Credentials, + statsEngine stats.Engine, + heartbeatTimeout, heartbeatJitter, + publishMetricsInterval time.Duration, + deregisterInstanceEventStream *eventstream.EventStream) error { + client := tcsclient.New(url, cfg, credentialProvider, statsEngine, + publishMetricsInterval, wsRWTimeout, cfg.DisableMetrics.Enabled()) + defer client.Close() + + err := deregisterInstanceEventStream.Subscribe(deregisterContainerInstanceHandler, client.Disconnect) + if err != nil { + return err + } + defer deregisterInstanceEventStream.Unsubscribe(deregisterContainerInstanceHandler) + + err = client.Connect() + if err != nil { + seelog.Errorf("Error connecting to TCS: %v", err.Error()) + return err + } + seelog.Info("Connected to TCS endpoint") + // start a timer and listens for tcs heartbeats/acks. The timer is reset when + // we receive a heartbeat from the server or when a publish metrics message + // is acked. + timer := time.NewTimer(retry.AddJitter(heartbeatTimeout, heartbeatJitter)) + defer timer.Stop() + client.AddRequestHandler(heartbeatHandler(timer)) + client.AddRequestHandler(ackPublishMetricHandler(timer)) + client.AddRequestHandler(ackPublishHealthMetricHandler(timer)) + client.SetAnyRequestHandler(anyMessageHandler(client)) + serveC := make(chan error) + go func() { + serveC <- client.Serve() + }() + select { + case <-ctx.Done(): + // outer context done, agent is exiting + client.Disconnect() + case <-timer.C: + seelog.Info("TCS Connection hasn't had any activity for too long; disconnecting") + client.Disconnect() + case err := <-serveC: + return err + } + return nil +} + +// heartbeatHandler resets the heartbeat timer when HeartbeatMessage message is received from tcs. +func heartbeatHandler(timer *time.Timer) func(*ecstcs.HeartbeatMessage) { + return func(*ecstcs.HeartbeatMessage) { + seelog.Debug("Received HeartbeatMessage from tcs") + timer.Reset(retry.AddJitter(defaultHeartbeatTimeout, defaultHeartbeatJitter)) + } +} + +// ackPublishMetricHandler consumes the ack message from the backend. THe backend sends +// the ack each time it processes a metric message. +func ackPublishMetricHandler(timer *time.Timer) func(*ecstcs.AckPublishMetric) { + return func(*ecstcs.AckPublishMetric) { + seelog.Debug("Received AckPublishMetric from tcs") + timer.Reset(retry.AddJitter(defaultHeartbeatTimeout, defaultHeartbeatJitter)) + } +} + +// ackPublishHealthMetricHandler consumes the ack message from backend. The backend sends +// the ack each time it processes a health message +func ackPublishHealthMetricHandler(timer *time.Timer) func(*ecstcs.AckPublishHealth) { + return func(*ecstcs.AckPublishHealth) { + seelog.Debug("Received ACKPublishHealth from tcs") + timer.Reset(retry.AddJitter(defaultHeartbeatTimeout, defaultHeartbeatJitter)) + } +} + +// anyMessageHandler handles any server message. Any server message means the +// connection is active +func anyMessageHandler(client wsclient.ClientServer) func(interface{}) { + return func(interface{}) { + seelog.Trace("TCS activity occurred") + // Reset read deadline as there's activity on the channel + if err := client.SetReadDeadline(time.Now().Add(wsRWTimeout)); err != nil { + seelog.Warnf("Unable to extend read deadline for TCS connection: %v", err) + } + } +} + +// formatURL returns formatted url for tcs endpoint. +func formatURL(endpoint string, cluster string, containerInstance string, taskEngine engine.TaskEngine) string { + tcsURL := endpoint + if !strings.HasSuffix(tcsURL, "/") { + tcsURL += "/" + } + query := url.Values{} + query.Set("cluster", cluster) + query.Set("containerInstance", containerInstance) + query.Set("agentVersion", version.Version) + query.Set("agentHash", version.GitHashString()) + if dockerVersion, err := taskEngine.Version(); err == nil { + query.Set("dockerVersion", dockerVersion) + } + return tcsURL + "ws?" + query.Encode() +} diff --git a/agent/tcs/handler/handler_test.go b/agent/tcs/handler/handler_test.go new file mode 100644 index 00000000000..a5524aba4a9 --- /dev/null +++ b/agent/tcs/handler/handler_test.go @@ -0,0 +1,322 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package tcshandler + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net/url" + "strings" + "sync" + "testing" + "time" + + "context" + + mock_api "github.com/aws/amazon-ecs-agent/agent/api/mocks" + "github.com/aws/amazon-ecs-agent/agent/config" + mock_engine "github.com/aws/amazon-ecs-agent/agent/engine/mocks" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/stats" + tcsclient "github.com/aws/amazon-ecs-agent/agent/tcs/client" + "github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs" + "github.com/aws/amazon-ecs-agent/agent/version" + "github.com/aws/amazon-ecs-agent/agent/wsclient" + wsmock "github.com/aws/amazon-ecs-agent/agent/wsclient/mock/utils" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/docker/docker/api/types" + "github.com/golang/mock/gomock" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" +) + +const ( + testTaskArn = "arn:aws:ecs:us-east-1:123:task/def" + testTaskDefinitionFamily = "task-def" + testClusterArn = "arn:aws:ecs:us-east-1:123:cluster/default" + testInstanceArn = "arn:aws:ecs:us-east-1:123:container-instance/abc" + testMessageId = "testMessageId" + testPublishMetricsInterval = 1 * time.Millisecond +) + +type mockStatsEngine struct{} + +var testCreds = credentials.NewStaticCredentials("test-id", "test-secret", "test-token") + +var testCfg = &config.Config{ + AcceptInsecureCert: true, + AWSRegion: "us-east-1", +} + +func (*mockStatsEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) { + req := createPublishMetricsRequest() + return req.Metadata, req.TaskMetrics, nil +} + +func (*mockStatsEngine) ContainerDockerStats(taskARN string, id string) (*types.StatsJSON, *stats.NetworkStatsPerSec, error) { + return nil, nil, fmt.Errorf("not implemented") +} + +func (*mockStatsEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) { + return nil, nil, nil +} + +// TestDisableMetrics tests the StartMetricsSession will return immediately if +// the metrics was disabled +func TestDisableMetrics(t *testing.T) { + params := TelemetrySessionParams{ + Cfg: &config.Config{ + DisableMetrics: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + DisableDockerHealthCheck: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, + }, + } + + StartMetricsSession(¶ms) +} + +func TestFormatURL(t *testing.T) { + endpoint := "http://127.0.0.0.1/" + ctrl := gomock.NewController(t) + defer ctrl.Finish() + taskEngine := mock_engine.NewMockTaskEngine(ctrl) + + taskEngine.EXPECT().Version().Return("Docker version result", nil) + wsurl := formatURL(endpoint, testClusterArn, testInstanceArn, taskEngine) + parsed, err := url.Parse(wsurl) + assert.NoError(t, err, "should be able to parse URL") + assert.Equal(t, "/ws", parsed.Path, "wrong path") + assert.Equal(t, testClusterArn, parsed.Query().Get("cluster"), "wrong cluster") + assert.Equal(t, testInstanceArn, parsed.Query().Get("containerInstance"), "wrong container instance") + assert.Equal(t, version.Version, parsed.Query().Get("agentVersion"), "wrong agent version") + assert.Equal(t, version.GitHashString(), parsed.Query().Get("agentHash"), "wrong agent hash") + assert.Equal(t, "Docker version result", parsed.Query().Get("dockerVersion"), "wrong docker version") +} + +func TestStartSession(t *testing.T) { + // Start test server. + closeWS := make(chan []byte) + server, serverChan, requestChan, serverErr, err := wsmock.GetMockServer(closeWS) + server.StartTLS() + defer server.Close() + if err != nil { + t.Fatal(err) + } + wait := &sync.WaitGroup{} + ctx, cancel := context.WithCancel(context.Background()) + wait.Add(1) + go func() { + select { + case sErr := <-serverErr: + t.Error(sErr) + case <-ctx.Done(): + } + wait.Done() + }() + defer func() { + closeSocket(closeWS) + close(serverChan) + }() + + deregisterInstanceEventStream := eventstream.NewEventStream("Deregister_Instance", context.Background()) + // Start a session with the test server. + go startSession(ctx, server.URL, testCfg, testCreds, &mockStatsEngine{}, + defaultHeartbeatTimeout, defaultHeartbeatJitter, + testPublishMetricsInterval, deregisterInstanceEventStream) + + // startSession internally starts publishing metrics from the mockStatsEngine object. + time.Sleep(testPublishMetricsInterval) + + // Read request channel to get the metric data published to the server. + request := <-requestChan + cancel() + wait.Wait() + + go func() { + for { + select { + case <-requestChan: + } + } + }() + + // Decode and verify the metric data. + payload, err := getPayloadFromRequest(request) + if err != nil { + t.Fatal("Error decoding payload: ", err) + } + + // Decode and verify the metric data. + _, responseType, err := wsclient.DecodeData([]byte(payload), tcsclient.NewTCSDecoder()) + if err != nil { + t.Fatal("error decoding data: ", err) + } + if responseType != "PublishMetricsRequest" { + t.Fatal("Unexpected responseType: ", responseType) + } +} + +func TestSessionConnectionClosedByRemote(t *testing.T) { + // Start test server. + closeWS := make(chan []byte) + server, serverChan, _, serverErr, err := wsmock.GetMockServer(closeWS) + server.StartTLS() + defer server.Close() + if err != nil { + t.Fatal(err) + } + go func() { + serr := <-serverErr + if !websocket.IsCloseError(serr, websocket.CloseNormalClosure) { + t.Error(serr) + } + }() + sleepBeforeClose := 10 * time.Millisecond + go func() { + time.Sleep(sleepBeforeClose) + closeSocket(closeWS) + close(serverChan) + }() + + ctx, cancel := context.WithCancel(context.Background()) + deregisterInstanceEventStream := eventstream.NewEventStream("Deregister_Instance", ctx) + deregisterInstanceEventStream.StartListening() + defer cancel() + + // Start a session with the test server. + err = startSession(ctx, server.URL, testCfg, testCreds, &mockStatsEngine{}, + defaultHeartbeatTimeout, defaultHeartbeatJitter, + testPublishMetricsInterval, deregisterInstanceEventStream) + + if err == nil { + t.Error("Expected io.EOF on closed connection") + } + if err != io.EOF { + t.Error("Expected io.EOF on closed connection, got: ", err) + } +} + +// TestConnectionInactiveTimeout tests the tcs client reconnect when it loses network +// connection or it's inactive for too long +func TestConnectionInactiveTimeout(t *testing.T) { + // Start test server. + closeWS := make(chan []byte) + server, _, requestChan, serverErr, err := wsmock.GetMockServer(closeWS) + server.StartTLS() + defer server.Close() + if err != nil { + t.Fatal(err) + } + + go func() { + for { + select { + case <-requestChan: + } + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + deregisterInstanceEventStream := eventstream.NewEventStream("Deregister_Instance", ctx) + deregisterInstanceEventStream.StartListening() + defer cancel() + // Start a session with the test server. + err = startSession(ctx, server.URL, testCfg, testCreds, &mockStatsEngine{}, + 50*time.Millisecond, 100*time.Millisecond, + testPublishMetricsInterval, deregisterInstanceEventStream) + // if we are not blocked here, then the test pass as it will reconnect in StartSession + assert.NoError(t, err, "Close the connection should cause the tcs client return error") + + assert.True(t, websocket.IsCloseError(<-serverErr, websocket.CloseAbnormalClosure), + "Read from closed connection should produce an io.EOF error") + + closeSocket(closeWS) +} + +func TestDiscoverEndpointAndStartSession(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockEcs := mock_api.NewMockECSClient(ctrl) + mockEcs.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Return("", errors.New("error")) + + err := startTelemetrySession(&TelemetrySessionParams{ECSClient: mockEcs}, nil) + if err == nil { + t.Error("Expected error from startTelemetrySession when DiscoverTelemetryEndpoint returns error") + } +} + +func getPayloadFromRequest(request string) (string, error) { + lines := strings.Split(request, "\r\n") + if len(lines) > 0 { + return lines[len(lines)-1], nil + } + + return "", errors.New("Could not get payload") +} + +// closeSocket tells the server to send a close frame. This lets us test +// what happens if the connection is closed by the remote server. +func closeSocket(ws chan<- []byte) { + ws <- websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + close(ws) +} + +func createPublishMetricsRequest() *ecstcs.PublishMetricsRequest { + cluster := testClusterArn + ci := testInstanceArn + taskArn := testTaskArn + taskDefinitionFamily := testTaskDefinitionFamily + var fval float64 + fval = rand.Float64() + var ival int64 + ival = rand.Int63n(10) + ts := time.Now() + idle := false + messageId := testMessageId + return &ecstcs.PublishMetricsRequest{ + Metadata: &ecstcs.MetricsMetadata{ + Cluster: &cluster, + ContainerInstance: &ci, + Idle: &idle, + MessageId: &messageId, + }, + TaskMetrics: []*ecstcs.TaskMetric{ + { + ContainerMetrics: []*ecstcs.ContainerMetric{ + { + CpuStatsSet: &ecstcs.CWStatsSet{ + Max: &fval, + Min: &fval, + SampleCount: &ival, + Sum: &fval, + }, + MemoryStatsSet: &ecstcs.CWStatsSet{ + Max: &fval, + Min: &fval, + SampleCount: &ival, + Sum: &fval, + }, + }, + }, + TaskArn: &taskArn, + TaskDefinitionFamily: &taskDefinitionFamily, + }, + }, + Timestamp: &ts, + } +} diff --git a/agent/tcs/handler/types.go b/agent/tcs/handler/types.go new file mode 100644 index 00000000000..5de56478d41 --- /dev/null +++ b/agent/tcs/handler/types.go @@ -0,0 +1,60 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package tcshandler + +import ( + "context" + "sync" + + "github.com/aws/amazon-ecs-agent/agent/api" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/engine" + "github.com/aws/amazon-ecs-agent/agent/eventstream" + "github.com/aws/amazon-ecs-agent/agent/stats" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/pkg/errors" +) + +// TelemetrySessionParams contains all the parameters required to start a tcs +// session +type TelemetrySessionParams struct { + Ctx context.Context + ContainerInstanceArn string + CredentialProvider *credentials.Credentials + Cfg *config.Config + DeregisterInstanceEventStream *eventstream.EventStream + AcceptInvalidCert bool + ECSClient api.ECSClient + TaskEngine engine.TaskEngine + StatsEngine *stats.DockerStatsEngine + _time ttime.Time + _timeOnce sync.Once +} + +func (params *TelemetrySessionParams) time() ttime.Time { + params._timeOnce.Do(func() { + if params._time == nil { + params._time = &ttime.DefaultTime{} + } + }) + return params._time +} + +func (params *TelemetrySessionParams) isContainerHealthMetricsDisabled() (bool, error) { + if params.Cfg != nil { + return params.Cfg.DisableMetrics.Enabled() && params.Cfg.DisableDockerHealthCheck.Enabled(), nil + } + return false, errors.New("Config is empty in the tcs session parameter") +} diff --git a/agent/tcs/handler/types_test.go b/agent/tcs/handler/types_test.go new file mode 100644 index 00000000000..4f096ccb780 --- /dev/null +++ b/agent/tcs/handler/types_test.go @@ -0,0 +1,76 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package tcshandler + +import ( + "testing" + + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestIsMetricsDisabled(t *testing.T) { + tcs := []struct { + param *TelemetrySessionParams + result bool + err error + description string + }{ + { + param: &TelemetrySessionParams{}, + result: false, + err: errors.New("Config is empty in the tcs session parameter"), + description: "Config not set should cause error", + }, + { + param: &TelemetrySessionParams{Cfg: &config.Config{DisableMetrics: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, DisableDockerHealthCheck: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}}}, + result: false, + err: nil, + description: "No metrics was disable should return false", + }, + { + param: &TelemetrySessionParams{Cfg: &config.Config{DisableMetrics: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, DisableDockerHealthCheck: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}}}, + result: false, + err: nil, + description: "Only health metrics was disable should return false", + }, + { + param: &TelemetrySessionParams{Cfg: &config.Config{DisableMetrics: config.BooleanDefaultFalse{Value: config.ExplicitlyDisabled}, DisableDockerHealthCheck: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}}}, + result: false, + err: nil, + description: "Only telemetry metrics was disable should return false", + }, + { + param: &TelemetrySessionParams{Cfg: &config.Config{DisableMetrics: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}, DisableDockerHealthCheck: config.BooleanDefaultFalse{Value: config.ExplicitlyEnabled}}}, + result: true, + err: nil, + description: "both telemetry and health metrics were disable should return true", + }, + } + + for _, tc := range tcs { + t.Run(tc.description, func(t *testing.T) { + ok, err := tc.param.isContainerHealthMetricsDisabled() + if tc.err != nil { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tc.result, ok) + }) + } +} diff --git a/agent/tcs/model/api/api-2.json b/agent/tcs/model/api/api-2.json new file mode 100644 index 00000000000..108fabba78d --- /dev/null +++ b/agent/tcs/model/api/api-2.json @@ -0,0 +1,338 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2015-03-10", + "endpointPrefix":"ecstcs", + "jsonVersion":"1.1", + "serviceAbbreviation":"Amazon ECS TCS", + "serviceFullName":"Amazon EC2 Container Service Telemetry Communication Service", + "signatureVersion":"v4", + "targetPrefix":"AmazonEC2ContainerServiceTelemetryV20150310", + "protocol":"json" + }, + "operations":{ + "Heartbeat":{ + "name":"Heartbeat", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"HeartbeatMessage"} + }, + "PublishHealth":{ + "name":"PublishHealth", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PublishHealthRequest"}, + "output":{"shape":"AckPublishHealth"}, + "errors":[ + { + "shape":"BadRequestException", + "exception":true + }, + { + "shape":"ResourceValidationException", + "exception":true + }, + { + "shape":"InvalidParameterException", + "exception":true + }, + { + "shape":"ServerException", + "exception":true + } + ] + }, + "PublishMetrics":{ + "name":"PublishMetrics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PublishMetricsRequest"}, + "output":{"shape":"AckPublishMetric"}, + "errors":[ + { + "shape":"BadRequestException", + "exception":true + }, + { + "shape":"ResourceValidationException", + "exception":true + }, + { + "shape":"InvalidParameterException", + "exception":true + }, + { + "shape":"ServerException", + "exception":true + } + ] + }, + "StartTelemetrySession":{ + "name":"StartTelemetrySession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartTelemetrySessionRequest"}, + "output":{"shape":"StopTelemetrySessionMessage"}, + "errors":[ + { + "shape":"BadRequestException", + "exception":true + }, + { + "shape":"ResourceValidationException", + "exception":true + }, + { + "shape":"ServerException", + "exception":true + } + ] + } + }, + "shapes":{ + "AckPublishHealth":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + } + }, + "AckPublishMetric":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + } + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "Boolean":{"type":"boolean"}, + "CWStatsSet":{ + "type":"structure", + "members":{ + "min":{"shape":"Double"}, + "max":{"shape":"Double"}, + "sampleCount":{"shape":"UInteger"}, + "sum":{"shape":"Double"} + } + }, + "ContainerHealth":{ + "type":"structure", + "members":{ + "containerName":{"shape":"String"}, + "healthStatus":{"shape":"HealthStatus"}, + "statusSince":{"shape":"Timestamp"} + } + }, + "ContainerHealths":{ + "type":"list", + "member":{"shape":"ContainerHealth"} + }, + "ContainerMetric":{ + "type":"structure", + "members":{ + "containerArn":{"shape":"String"}, + "containerName":{"shape":"String"}, + "cpuStatsSet":{"shape":"CWStatsSet"}, + "memoryStatsSet":{"shape":"CWStatsSet"}, + "networkStatsSet":{"shape":"NetworkStatsSet"}, + "storageStatsSet":{"shape":"StorageStatsSet"} + } + }, + "ContainerMetrics":{ + "type":"list", + "member":{"shape":"ContainerMetric"} + }, + "Double":{"type":"double"}, + "HealthMetadata":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "messageId":{"shape":"String"}, + "fin":{"shape":"Boolean"} + } + }, + "HealthStatus":{ + "type":"string", + "enum":[ + "HEALTHY", + "UNHEALTHY", + "UNKNOWN" + ] + }, + "HeartbeatMessage":{ + "type":"structure", + "members":{ + "healthy":{"shape":"Boolean"} + } + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "MetricsMetadata":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"}, + "messageId":{"shape":"String"}, + "idle":{"shape":"Boolean"}, + "fin":{"shape":"Boolean"} + } + }, + "NetworkStatsSet":{ + "type":"structure", + "members":{ + "rxBytesPerSecond":{"shape":"UDoubleCWStatsSet"}, + "rxBytes":{"shape":"ULongStatsSet"}, + "rxDropped":{"shape":"ULongStatsSet"}, + "rxErrors":{"shape":"ULongStatsSet"}, + "rxPackets":{"shape":"ULongStatsSet"}, + "txBytesPerSecond":{"shape":"UDoubleCWStatsSet"}, + "txBytes":{"shape":"ULongStatsSet"}, + "txDropped":{"shape":"ULongStatsSet"}, + "txErrors":{"shape":"ULongStatsSet"}, + "txPackets":{"shape":"ULongStatsSet"} + } + }, + "PublishHealthRequest":{ + "type":"structure", + "members":{ + "metadata":{"shape":"HealthMetadata"}, + "tasks":{"shape":"TaskHealths"}, + "timestamp":{"shape":"Timestamp"} + } + }, + "PublishMetricsRequest":{ + "type":"structure", + "members":{ + "metadata":{"shape":"MetricsMetadata"}, + "taskMetrics":{"shape":"TaskMetrics"}, + "timestamp":{"shape":"Timestamp"} + } + }, + "ResourceValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "exception":true + }, + "StartTelemetrySessionRequest":{ + "type":"structure", + "members":{ + "cluster":{"shape":"String"}, + "containerInstance":{"shape":"String"} + } + }, + "StopTelemetrySessionMessage":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + } + }, + "StorageStatsSet":{ + "type":"structure", + "members":{ + "readSizeBytes":{"shape":"ULongStatsSet"}, + "writeSizeBytes":{"shape":"ULongStatsSet"} + } + }, + "String":{"type":"string"}, + "TaskHealth":{ + "type":"structure", + "members":{ + "taskArn":{"shape":"String"}, + "taskDefinitionFamily":{"shape":"String"}, + "taskDefinitionVersion":{"shape":"String"}, + "containers":{"shape":"ContainerHealths"} + } + }, + "TaskHealths":{ + "type":"list", + "member":{"shape":"TaskHealth"} + }, + "TaskMetric":{ + "type":"structure", + "members":{ + "taskArn":{"shape":"String"}, + "taskDefinitionFamily":{"shape":"String"}, + "taskDefinitionVersion":{"shape":"String"}, + "containerMetrics":{"shape":"ContainerMetrics"} + } + }, + "TaskMetrics":{ + "type":"list", + "member":{"shape":"TaskMetric"}, + "max":10 + }, + "Timestamp":{"type":"timestamp"}, + "UDouble":{ + "type":"double", + "min":0 + }, + "UDoubleCWStatsSet":{ + "type":"structure", + "required":[ + "min", + "max", + "sampleCount", + "sum" + ], + "members":{ + "min":{"shape":"UDouble"}, + "max":{"shape":"UDouble"}, + "sampleCount":{"shape":"UInteger"}, + "sum":{"shape":"UDouble"} + } + }, + "UInteger":{ + "type":"integer", + "min":0 + }, + "ULong":{ + "type":"long", + "min":0 + }, + "ULongStatsSet":{ + "type":"structure", + "required":[ + "min", + "max", + "sampleCount", + "sum" + ], + "members":{ + "min":{"shape":"ULong"}, + "max":{"shape":"ULong"}, + "sampleCount":{"shape":"ULong"}, + "sum":{"shape":"ULong"}, + "overflowMin":{"shape":"ULong"}, + "overflowMax":{"shape":"ULong"}, + "overflowSum":{"shape":"ULong"} + } + } + } +} diff --git a/agent/tcs/model/ecstcs/api.go b/agent/tcs/model/ecstcs/api.go new file mode 100644 index 00000000000..09f662ebb81 --- /dev/null +++ b/agent/tcs/model/ecstcs/api.go @@ -0,0 +1,955 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// +// Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. + +package ecstcs + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +type AckPublishHealth struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AckPublishHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AckPublishHealth) GoString() string { + return s.String() +} + +type AckPublishMetric struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AckPublishMetric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AckPublishMetric) GoString() string { + return s.String() +} + +type BadRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BadRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BadRequestException) GoString() string { + return s.String() +} + +func newErrorBadRequestException(v protocol.ResponseMetadata) error { + return &BadRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BadRequestException) Code() string { + return "BadRequestException" +} + +// Message returns the exception's message. +func (s *BadRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BadRequestException) OrigErr() error { + return nil +} + +func (s *BadRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CWStatsSet struct { + _ struct{} `type:"structure"` + + Max *float64 `locationName:"max" type:"double"` + + Min *float64 `locationName:"min" type:"double"` + + SampleCount *int64 `locationName:"sampleCount" type:"integer"` + + Sum *float64 `locationName:"sum" type:"double"` +} + +// String returns the string representation +func (s CWStatsSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CWStatsSet) GoString() string { + return s.String() +} + +type ContainerHealth struct { + _ struct{} `type:"structure"` + + ContainerName *string `locationName:"containerName" type:"string"` + + HealthStatus *string `locationName:"healthStatus" type:"string" enum:"HealthStatus"` + + StatusSince *time.Time `locationName:"statusSince" type:"timestamp"` +} + +// String returns the string representation +func (s ContainerHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerHealth) GoString() string { + return s.String() +} + +type ContainerMetric struct { + _ struct{} `type:"structure"` + + ContainerArn *string `locationName:"containerArn" type:"string"` + + ContainerName *string `locationName:"containerName" type:"string"` + + CpuStatsSet *CWStatsSet `locationName:"cpuStatsSet" type:"structure"` + + MemoryStatsSet *CWStatsSet `locationName:"memoryStatsSet" type:"structure"` + + NetworkStatsSet *NetworkStatsSet `locationName:"networkStatsSet" type:"structure"` + + StorageStatsSet *StorageStatsSet `locationName:"storageStatsSet" type:"structure"` +} + +// String returns the string representation +func (s ContainerMetric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContainerMetric) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContainerMetric) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContainerMetric"} + if s.NetworkStatsSet != nil { + if err := s.NetworkStatsSet.Validate(); err != nil { + invalidParams.AddNested("NetworkStatsSet", err.(request.ErrInvalidParams)) + } + } + if s.StorageStatsSet != nil { + if err := s.StorageStatsSet.Validate(); err != nil { + invalidParams.AddNested("StorageStatsSet", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type HealthMetadata struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + Fin *bool `locationName:"fin" type:"boolean"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s HealthMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HealthMetadata) GoString() string { + return s.String() +} + +type HeartbeatInput struct { + _ struct{} `type:"structure"` + + Healthy *bool `locationName:"healthy" type:"boolean"` +} + +// String returns the string representation +func (s HeartbeatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeartbeatInput) GoString() string { + return s.String() +} + +type HeartbeatMessage struct { + _ struct{} `type:"structure"` + + Healthy *bool `locationName:"healthy" type:"boolean"` +} + +// String returns the string representation +func (s HeartbeatMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeartbeatMessage) GoString() string { + return s.String() +} + +type HeartbeatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeartbeatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeartbeatOutput) GoString() string { + return s.String() +} + +type InvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidParameterException) GoString() string { + return s.String() +} + +func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { + return &InvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidParameterException) Code() string { + return "InvalidParameterException" +} + +// Message returns the exception's message. +func (s *InvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameterException) OrigErr() error { + return nil +} + +func (s *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +type MetricsMetadata struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` + + Fin *bool `locationName:"fin" type:"boolean"` + + Idle *bool `locationName:"idle" type:"boolean"` + + MessageId *string `locationName:"messageId" type:"string"` +} + +// String returns the string representation +func (s MetricsMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsMetadata) GoString() string { + return s.String() +} + +type NetworkStatsSet struct { + _ struct{} `type:"structure"` + + RxBytes *ULongStatsSet `locationName:"rxBytes" type:"structure"` + + RxBytesPerSecond *UDoubleCWStatsSet `locationName:"rxBytesPerSecond" type:"structure"` + + RxDropped *ULongStatsSet `locationName:"rxDropped" type:"structure"` + + RxErrors *ULongStatsSet `locationName:"rxErrors" type:"structure"` + + RxPackets *ULongStatsSet `locationName:"rxPackets" type:"structure"` + + TxBytes *ULongStatsSet `locationName:"txBytes" type:"structure"` + + TxBytesPerSecond *UDoubleCWStatsSet `locationName:"txBytesPerSecond" type:"structure"` + + TxDropped *ULongStatsSet `locationName:"txDropped" type:"structure"` + + TxErrors *ULongStatsSet `locationName:"txErrors" type:"structure"` + + TxPackets *ULongStatsSet `locationName:"txPackets" type:"structure"` +} + +// String returns the string representation +func (s NetworkStatsSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkStatsSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NetworkStatsSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NetworkStatsSet"} + if s.RxBytes != nil { + if err := s.RxBytes.Validate(); err != nil { + invalidParams.AddNested("RxBytes", err.(request.ErrInvalidParams)) + } + } + if s.RxBytesPerSecond != nil { + if err := s.RxBytesPerSecond.Validate(); err != nil { + invalidParams.AddNested("RxBytesPerSecond", err.(request.ErrInvalidParams)) + } + } + if s.RxDropped != nil { + if err := s.RxDropped.Validate(); err != nil { + invalidParams.AddNested("RxDropped", err.(request.ErrInvalidParams)) + } + } + if s.RxErrors != nil { + if err := s.RxErrors.Validate(); err != nil { + invalidParams.AddNested("RxErrors", err.(request.ErrInvalidParams)) + } + } + if s.RxPackets != nil { + if err := s.RxPackets.Validate(); err != nil { + invalidParams.AddNested("RxPackets", err.(request.ErrInvalidParams)) + } + } + if s.TxBytes != nil { + if err := s.TxBytes.Validate(); err != nil { + invalidParams.AddNested("TxBytes", err.(request.ErrInvalidParams)) + } + } + if s.TxBytesPerSecond != nil { + if err := s.TxBytesPerSecond.Validate(); err != nil { + invalidParams.AddNested("TxBytesPerSecond", err.(request.ErrInvalidParams)) + } + } + if s.TxDropped != nil { + if err := s.TxDropped.Validate(); err != nil { + invalidParams.AddNested("TxDropped", err.(request.ErrInvalidParams)) + } + } + if s.TxErrors != nil { + if err := s.TxErrors.Validate(); err != nil { + invalidParams.AddNested("TxErrors", err.(request.ErrInvalidParams)) + } + } + if s.TxPackets != nil { + if err := s.TxPackets.Validate(); err != nil { + invalidParams.AddNested("TxPackets", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PublishHealthInput struct { + _ struct{} `type:"structure"` + + Metadata *HealthMetadata `locationName:"metadata" type:"structure"` + + Tasks []*TaskHealth `locationName:"tasks" type:"list"` + + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s PublishHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishHealthInput) GoString() string { + return s.String() +} + +type PublishHealthOutput struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s PublishHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishHealthOutput) GoString() string { + return s.String() +} + +type PublishHealthRequest struct { + _ struct{} `type:"structure"` + + Metadata *HealthMetadata `locationName:"metadata" type:"structure"` + + Tasks []*TaskHealth `locationName:"tasks" type:"list"` + + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s PublishHealthRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishHealthRequest) GoString() string { + return s.String() +} + +type PublishMetricsInput struct { + _ struct{} `type:"structure"` + + Metadata *MetricsMetadata `locationName:"metadata" type:"structure"` + + TaskMetrics []*TaskMetric `locationName:"taskMetrics" type:"list"` + + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s PublishMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishMetricsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PublishMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PublishMetricsInput"} + if s.TaskMetrics != nil { + for i, v := range s.TaskMetrics { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TaskMetrics", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PublishMetricsOutput struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s PublishMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishMetricsOutput) GoString() string { + return s.String() +} + +type PublishMetricsRequest struct { + _ struct{} `type:"structure"` + + Metadata *MetricsMetadata `locationName:"metadata" type:"structure"` + + TaskMetrics []*TaskMetric `locationName:"taskMetrics" type:"list"` + + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s PublishMetricsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublishMetricsRequest) GoString() string { + return s.String() +} + +type ResourceValidationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResourceValidationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceValidationException) GoString() string { + return s.String() +} + +func newErrorResourceValidationException(v protocol.ResponseMetadata) error { + return &ResourceValidationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceValidationException) Code() string { + return "ResourceValidationException" +} + +// Message returns the exception's message. +func (s *ResourceValidationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceValidationException) OrigErr() error { + return nil +} + +func (s *ResourceValidationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceValidationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceValidationException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerException) GoString() string { + return s.String() +} + +func newErrorServerException(v protocol.ResponseMetadata) error { + return &ServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServerException) Code() string { + return "ServerException" +} + +// Message returns the exception's message. +func (s *ServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServerException) OrigErr() error { + return nil +} + +func (s *ServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartTelemetrySessionInput struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` +} + +// String returns the string representation +func (s StartTelemetrySessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTelemetrySessionInput) GoString() string { + return s.String() +} + +type StartTelemetrySessionOutput struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StartTelemetrySessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTelemetrySessionOutput) GoString() string { + return s.String() +} + +type StartTelemetrySessionRequest struct { + _ struct{} `type:"structure"` + + Cluster *string `locationName:"cluster" type:"string"` + + ContainerInstance *string `locationName:"containerInstance" type:"string"` +} + +// String returns the string representation +func (s StartTelemetrySessionRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartTelemetrySessionRequest) GoString() string { + return s.String() +} + +type StopTelemetrySessionMessage struct { + _ struct{} `type:"structure"` + + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StopTelemetrySessionMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopTelemetrySessionMessage) GoString() string { + return s.String() +} + +type StorageStatsSet struct { + _ struct{} `type:"structure"` + + ReadSizeBytes *ULongStatsSet `locationName:"readSizeBytes" type:"structure"` + + WriteSizeBytes *ULongStatsSet `locationName:"writeSizeBytes" type:"structure"` +} + +// String returns the string representation +func (s StorageStatsSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageStatsSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageStatsSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageStatsSet"} + if s.ReadSizeBytes != nil { + if err := s.ReadSizeBytes.Validate(); err != nil { + invalidParams.AddNested("ReadSizeBytes", err.(request.ErrInvalidParams)) + } + } + if s.WriteSizeBytes != nil { + if err := s.WriteSizeBytes.Validate(); err != nil { + invalidParams.AddNested("WriteSizeBytes", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TaskHealth struct { + _ struct{} `type:"structure"` + + Containers []*ContainerHealth `locationName:"containers" type:"list"` + + TaskArn *string `locationName:"taskArn" type:"string"` + + TaskDefinitionFamily *string `locationName:"taskDefinitionFamily" type:"string"` + + TaskDefinitionVersion *string `locationName:"taskDefinitionVersion" type:"string"` +} + +// String returns the string representation +func (s TaskHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskHealth) GoString() string { + return s.String() +} + +type TaskMetric struct { + _ struct{} `type:"structure"` + + ContainerMetrics []*ContainerMetric `locationName:"containerMetrics" type:"list"` + + TaskArn *string `locationName:"taskArn" type:"string"` + + TaskDefinitionFamily *string `locationName:"taskDefinitionFamily" type:"string"` + + TaskDefinitionVersion *string `locationName:"taskDefinitionVersion" type:"string"` +} + +// String returns the string representation +func (s TaskMetric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskMetric) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TaskMetric) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TaskMetric"} + if s.ContainerMetrics != nil { + for i, v := range s.ContainerMetrics { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContainerMetrics", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UDoubleCWStatsSet struct { + _ struct{} `type:"structure"` + + // Max is a required field + Max *float64 `locationName:"max" type:"double" required:"true"` + + // Min is a required field + Min *float64 `locationName:"min" type:"double" required:"true"` + + // SampleCount is a required field + SampleCount *int64 `locationName:"sampleCount" type:"integer" required:"true"` + + // Sum is a required field + Sum *float64 `locationName:"sum" type:"double" required:"true"` +} + +// String returns the string representation +func (s UDoubleCWStatsSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UDoubleCWStatsSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UDoubleCWStatsSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UDoubleCWStatsSet"} + if s.Max == nil { + invalidParams.Add(request.NewErrParamRequired("Max")) + } + if s.Min == nil { + invalidParams.Add(request.NewErrParamRequired("Min")) + } + if s.SampleCount == nil { + invalidParams.Add(request.NewErrParamRequired("SampleCount")) + } + if s.Sum == nil { + invalidParams.Add(request.NewErrParamRequired("Sum")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ULongStatsSet struct { + _ struct{} `type:"structure"` + + // Max is a required field + Max *int64 `locationName:"max" type:"long" required:"true"` + + // Min is a required field + Min *int64 `locationName:"min" type:"long" required:"true"` + + OverflowMax *int64 `locationName:"overflowMax" type:"long"` + + OverflowMin *int64 `locationName:"overflowMin" type:"long"` + + OverflowSum *int64 `locationName:"overflowSum" type:"long"` + + // SampleCount is a required field + SampleCount *int64 `locationName:"sampleCount" type:"long" required:"true"` + + // Sum is a required field + Sum *int64 `locationName:"sum" type:"long" required:"true"` +} + +// String returns the string representation +func (s ULongStatsSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ULongStatsSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ULongStatsSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ULongStatsSet"} + if s.Max == nil { + invalidParams.Add(request.NewErrParamRequired("Max")) + } + if s.Min == nil { + invalidParams.Add(request.NewErrParamRequired("Min")) + } + if s.SampleCount == nil { + invalidParams.Add(request.NewErrParamRequired("SampleCount")) + } + if s.Sum == nil { + invalidParams.Add(request.NewErrParamRequired("Sum")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} diff --git a/agent/tcs/model/ecstcs/types.go b/agent/tcs/model/ecstcs/types.go new file mode 100644 index 00000000000..7c20531d21d --- /dev/null +++ b/agent/tcs/model/ecstcs/types.go @@ -0,0 +1,38 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecstcs + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +// NewPublishMetricsRequest creates a PublishMetricsRequest object. +func NewPublishMetricsRequest(metadata *MetricsMetadata, taskMetrics []*TaskMetric) *PublishMetricsRequest { + return &PublishMetricsRequest{ + Metadata: metadata, + TaskMetrics: taskMetrics, + Timestamp: aws.Time(time.Now()), + } +} + +// NewPublishHealthMetricsRequest creates a PublishHealthRequest +func NewPublishHealthMetricsRequest(metadata *HealthMetadata, healthMetrics []*TaskHealth) *PublishHealthRequest { + return &PublishHealthRequest{ + Metadata: metadata, + Tasks: healthMetrics, + Timestamp: aws.Time(time.Now()), + } +} diff --git a/agent/tcs/model/generate.go b/agent/tcs/model/generate.go new file mode 100644 index 00000000000..6f3c7de1401 --- /dev/null +++ b/agent/tcs/model/generate.go @@ -0,0 +1,3 @@ +package model + +//go:generate go run -tags=codegen ../../gogenerate/awssdk.go -typesOnly=true -copyright_file ../../../scripts/copyright_file diff --git a/agent/utils/bufiowrapper/bufio.go b/agent/utils/bufiowrapper/bufio.go new file mode 100644 index 00000000000..4c42d61b041 --- /dev/null +++ b/agent/utils/bufiowrapper/bufio.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package bufiowrapper + +import ( + "bufio" + + "io" +) + +// Bufio wraps method from bufio package for testing +type Bufio interface { + NewScanner(io.Reader) Scanner +} + +// Scanner wraps methods for bufio.Scanner type +type Scanner interface { + Scan() bool + Text() string + Err() error +} + +type _bufio struct { +} + +func NewBufio() Bufio { + return &_bufio{} +} + +func (*_bufio) NewScanner(reader io.Reader) Scanner { + return bufio.NewScanner(reader) +} diff --git a/agent/utils/bufiowrapper/generate_mocks.go b/agent/utils/bufiowrapper/generate_mocks.go new file mode 100644 index 00000000000..85e8e74be33 --- /dev/null +++ b/agent/utils/bufiowrapper/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package bufiowrapper + +//go:generate mockgen -copyright_file=../../../scripts/copyright_file -destination=mocks/bufiowrapper_mocks.go github.com/aws/amazon-ecs-agent/agent/utils/bufiowrapper Bufio,Scanner diff --git a/agent/utils/bufiowrapper/mocks/bufiowrapper_mocks.go b/agent/utils/bufiowrapper/mocks/bufiowrapper_mocks.go new file mode 100644 index 00000000000..cb042ba635c --- /dev/null +++ b/agent/utils/bufiowrapper/mocks/bufiowrapper_mocks.go @@ -0,0 +1,129 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/utils/bufiowrapper (interfaces: Bufio,Scanner) + +// Package mock_bufiowrapper is a generated GoMock package. +package mock_bufiowrapper + +import ( + io "io" + reflect "reflect" + + bufiowrapper "github.com/aws/amazon-ecs-agent/agent/utils/bufiowrapper" + gomock "github.com/golang/mock/gomock" +) + +// MockBufio is a mock of Bufio interface +type MockBufio struct { + ctrl *gomock.Controller + recorder *MockBufioMockRecorder +} + +// MockBufioMockRecorder is the mock recorder for MockBufio +type MockBufioMockRecorder struct { + mock *MockBufio +} + +// NewMockBufio creates a new mock instance +func NewMockBufio(ctrl *gomock.Controller) *MockBufio { + mock := &MockBufio{ctrl: ctrl} + mock.recorder = &MockBufioMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockBufio) EXPECT() *MockBufioMockRecorder { + return m.recorder +} + +// NewScanner mocks base method +func (m *MockBufio) NewScanner(arg0 io.Reader) bufiowrapper.Scanner { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewScanner", arg0) + ret0, _ := ret[0].(bufiowrapper.Scanner) + return ret0 +} + +// NewScanner indicates an expected call of NewScanner +func (mr *MockBufioMockRecorder) NewScanner(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewScanner", reflect.TypeOf((*MockBufio)(nil).NewScanner), arg0) +} + +// MockScanner is a mock of Scanner interface +type MockScanner struct { + ctrl *gomock.Controller + recorder *MockScannerMockRecorder +} + +// MockScannerMockRecorder is the mock recorder for MockScanner +type MockScannerMockRecorder struct { + mock *MockScanner +} + +// NewMockScanner creates a new mock instance +func NewMockScanner(ctrl *gomock.Controller) *MockScanner { + mock := &MockScanner{ctrl: ctrl} + mock.recorder = &MockScannerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockScanner) EXPECT() *MockScannerMockRecorder { + return m.recorder +} + +// Err mocks base method +func (m *MockScanner) Err() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 +} + +// Err indicates an expected call of Err +func (mr *MockScannerMockRecorder) Err() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockScanner)(nil).Err)) +} + +// Scan mocks base method +func (m *MockScanner) Scan() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Scan") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Scan indicates an expected call of Scan +func (mr *MockScannerMockRecorder) Scan() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scan", reflect.TypeOf((*MockScanner)(nil).Scan)) +} + +// Text mocks base method +func (m *MockScanner) Text() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Text") + ret0, _ := ret[0].(string) + return ret0 +} + +// Text indicates an expected call of Text +func (mr *MockScannerMockRecorder) Text() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Text", reflect.TypeOf((*MockScanner)(nil).Text)) +} diff --git a/agent/utils/cipher/cipher.go b/agent/utils/cipher/cipher.go new file mode 100644 index 00000000000..34ec0a02be5 --- /dev/null +++ b/agent/utils/cipher/cipher.go @@ -0,0 +1,37 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package cipher provides customized cipher configuration for agent client +package cipher + +import ( + "crypto/tls" +) + +// Only support a subset of ciphers, corresponding cipher suite names can be found here: https://golang.org/pkg/crypto/tls/#Config +var SupportedCipherSuites = []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, +} + +func WithSupportedCipherSuites(config *tls.Config) { + config.CipherSuites = SupportedCipherSuites +} diff --git a/agent/utils/compare_versions.go b/agent/utils/compare_versions.go new file mode 100644 index 00000000000..a33efd1a4bf --- /dev/null +++ b/agent/utils/compare_versions.go @@ -0,0 +1,175 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version string + +type semver struct { + major int + minor int + patch int + preReleaseVersion string + buildMetadata string +} + +// Matches returns whether or not a version matches a given selector. +// The selector can be any of the following: +// +// * x.y.z -- Matches a version exactly the same as the selector version +// * >=x.y.z -- Matches a version greater than or equal to the selector version +// * >x.y.z -- Matches a version greater than the selector version +// * <=x.y.z -- Matches a version less than or equal to the selector version +// * =") { + rhsVersion, err := parseSemver(selector[2:]) + if err != nil { + return false, err + } + return compareSemver(lhsVersion, rhsVersion) >= 0, nil + } else if strings.HasPrefix(selector, ">") { + rhsVersion, err := parseSemver(selector[1:]) + if err != nil { + return false, err + } + return compareSemver(lhsVersion, rhsVersion) > 0, nil + } else if strings.HasPrefix(selector, "<=") { + rhsVersion, err := parseSemver(selector[2:]) + if err != nil { + return false, err + } + return compareSemver(lhsVersion, rhsVersion) <= 0, nil + } else if strings.HasPrefix(selector, "<") { + rhsVersion, err := parseSemver(selector[1:]) + if err != nil { + return false, err + } + return compareSemver(lhsVersion, rhsVersion) < 0, nil + } + + rhsVersion, err := parseSemver(selector) + if err != nil { + return false, err + } + return compareSemver(lhsVersion, rhsVersion) == 0, nil +} + +func parseSemver(version string) (semver, error) { + var result semver + // 0.0.0-some-prealpha-stuff.1+12345 + versionAndMetadata := strings.SplitN(version, "+", 2) + // [0.0.0-some-prealpha-stuff.1, 12345] + if len(versionAndMetadata) == 2 { + result.buildMetadata = versionAndMetadata[1] + } + versionAndPrerelease := strings.SplitN(versionAndMetadata[0], "-", 2) + // [0.0.0, some-prealpha-stuff.1] + if len(versionAndPrerelease) == 2 { + result.preReleaseVersion = versionAndPrerelease[1] + } + versionStr := versionAndPrerelease[0] + versionParts := strings.SplitN(versionStr, ".", 3) + // [0, 0, 0] + if len(versionParts) != 3 { + return result, fmt.Errorf("Not enough '.' characters in the version part") + } + major, err := strconv.Atoi(versionParts[0]) + if err != nil { + return result, fmt.Errorf("Cannot parse major version as int: %v", err) + } + minor, err := strconv.Atoi(versionParts[1]) + if err != nil { + return result, fmt.Errorf("Cannot parse minor version as int: %v", err) + } + patch, err := strconv.Atoi(versionParts[2]) + if err != nil { + return result, fmt.Errorf("Cannot parse patch version as int: %v", err) + } + result.major = major + result.minor = minor + result.patch = patch + return result, nil +} + +// compareSemver compares two semvers, 'lhs' and 'rhs', and returns -1 if lhs is less +// than rhs, 0 if they are equal, and 1 lhs is greater than rhs +func compareSemver(lhs, rhs semver) int { + if lhs.major < rhs.major { + return -1 + } + if lhs.major > rhs.major { + return 1 + } + if lhs.minor < rhs.minor { + return -1 + } + if lhs.minor > rhs.minor { + return 1 + } + if lhs.patch < rhs.patch { + return -1 + } + if lhs.patch > rhs.patch { + return 1 + } + if lhs.preReleaseVersion != "" && rhs.preReleaseVersion == "" { + return -1 + } + if lhs.preReleaseVersion == "" && rhs.preReleaseVersion != "" { + return 1 + } + if lhs.preReleaseVersion < rhs.preReleaseVersion { + return -1 + } + if lhs.preReleaseVersion > rhs.preReleaseVersion { + return 1 + } + return 0 +} + +// ExtractVersion extracts a matching version from the version number string +func ExtractVersion(input string) string { + versionNumberRegex := regexp.MustCompile(` v(\d+\.\d+\.\d+(\-[\S\.\-]+)?(\+[\S\.\-]+)?)`) + versionNumberStr := versionNumberRegex.FindStringSubmatch(input) + if len(versionNumberStr) >= 2 { + return string(versionNumberStr[1]) + } + return "UNKNOWN" +} diff --git a/agent/utils/compare_versions_test.go b/agent/utils/compare_versions_test.go new file mode 100644 index 00000000000..b2d8eb912af --- /dev/null +++ b/agent/utils/compare_versions_test.go @@ -0,0 +1,236 @@ +// +build unit +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseSemver(t *testing.T) { + testCases := []struct { + input string + output semver + }{ + { + input: "1.0.0", + output: semver{ + major: 1, + minor: 0, + patch: 0, + }, + }, + { + input: "5.2.3", + output: semver{ + major: 5, + minor: 2, + patch: 3, + }, + }, + { + input: "1.0.0-rc-3", + output: semver{ + major: 1, + minor: 0, + patch: 0, + preReleaseVersion: "rc-3", + }, + }, + { + input: "1.0.0+sha.xyz", + output: semver{ + major: 1, + minor: 0, + patch: 0, + buildMetadata: "sha.xyz", + }, + }, + { + input: "1.0.0-rc1+sha.xyz", + output: semver{ + major: 1, + minor: 0, + patch: 0, + preReleaseVersion: "rc1", + buildMetadata: "sha.xyz", + }, + }, + { + input: "1.14.0-1.windows.1", + output: semver{ + major: 1, + minor: 14, + patch: 0, + preReleaseVersion: "1.windows.1", + }, + }, + } + + for i, testCase := range testCases { + result, err := parseSemver(testCase.input) + if err != nil { + t.Errorf("#%v: Error parsing %v: %v", i, testCase.input, err) + } + if result != testCase.output { + t.Errorf("#%v: Expected %v, got %v", i, testCase.output, result) + } + } + + invalidCases := []string{"foo", "", "bar", "1.2", "x.y.z", "1.x.y", "1.1.z"} + for i, invalidCase := range invalidCases { + _, err := parseSemver(invalidCase) + if err == nil { + t.Errorf("#%v: Expected error, didn't get one. Input: %v", i, invalidCase) + } + } +} + +func TestVersionMatches(t *testing.T) { + testCases := []struct { + version string + selector string + expectedOutput bool + }{ + { + version: "1.0.0", + selector: "1.0.0", + expectedOutput: true, + }, + { + version: "1.0.0", + selector: ">=1.0.0", + expectedOutput: true, + }, + { + version: "2.1.0", + selector: ">=1.0.0", + expectedOutput: true, + }, + { + version: "2.1.0", + selector: "<=1.0.0", + expectedOutput: false, + }, + { + version: "0.1.0", + selector: "<1.0.0", + expectedOutput: true, + }, + { + version: "0.1.0", + selector: "<=1.0.0", + expectedOutput: true, + }, + { + version: "1.0.0-dev", + selector: "<1.0.0", + expectedOutput: true, + }, + { + version: "1.0.0-alpha", + selector: "<1.0.0-beta", + expectedOutput: true, + }, + { + version: "1.0.0", + selector: "<1.1.0", + expectedOutput: true, + }, + { + version: "1.1.0", + selector: "<1.1.1", + expectedOutput: true, + }, + { + version: "1.1.1", + selector: ">1.1.0", + expectedOutput: true, + }, + { + version: "1.1.0", + selector: ">1.0.0", + expectedOutput: true, + }, + { + version: "1.1.0", + selector: "2.1.0,1.1.0", + expectedOutput: true, + }, + { + version: "2.0.0", + selector: "2.1.0,1.1.0", + expectedOutput: false, + }, + { + version: "1.9.1", + selector: ">=1.9.0,<=1.9.1", + expectedOutput: true, + }, + { + version: "1.14.0-1.windows.1", + selector: ">=1.5.0", + expectedOutput: true, + }, + } + + for i, testCase := range testCases { + result, err := Version(testCase.version).Matches(testCase.selector) + if err != nil { + t.Errorf("#%v: Unexpected error %v", i, err) + } + if result != testCase.expectedOutput { + t.Errorf("#%v: %v(%v) expected %v but got %v", i, testCase.version, testCase.selector, testCase.expectedOutput, result) + } + } +} + +func TestExtractVersion(t *testing.T) { + testCases := []struct { + version string + expectedOutput string + }{ + { + version: "Amazon ECS v1.0.0", + expectedOutput: "1.0.0", + }, + { + version: " v1.0.0-test", + expectedOutput: "1.0.0-test", + }, + { + version: "Amazon ECS v1.2.3-1.test.2", + expectedOutput: "1.2.3-1.test.2", + }, + { + version: " v1.2.3-1.test.2+build.123", + expectedOutput: "1.2.3-1.test.2+build.123", + }, + { + version: "", + expectedOutput: "UNKNOWN", + }, + { + version: " abcd", + expectedOutput: "UNKNOWN", + }, + } + + for i, testCase := range testCases { + result := ExtractVersion(testCase.version) + assert.Equal(t, testCase.expectedOutput, result, "Test case %d", i) + } +} diff --git a/agent/utils/cpuinfo.go b/agent/utils/cpuinfo.go new file mode 100644 index 00000000000..1e88131bb49 --- /dev/null +++ b/agent/utils/cpuinfo.go @@ -0,0 +1,95 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "bufio" + "os" + "strings" +) + +type CPUInfo struct { + Processors []Processor `json:"processors"` +} + +type Processor struct { + Flags []string `json:"flags"` +} + +var ( + OpenFile = os.Open +) + +func ReadCPUInfo(path string) (*CPUInfo, error) { + file, err := OpenFile(path) + if err != nil { + return nil, err + } + + defer file.Close() + + scanner := bufio.NewScanner(file) + var lines []string + + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + var cpuinfo = CPUInfo{} + var processor = &Processor{} + + for i, line := range lines { + var key string + var value string + + if len(line) == 0 && i != len(lines)-1 { + // end of processor + cpuinfo.Processors = append(cpuinfo.Processors, *processor) + processor = &Processor{} + continue + } else if i == len(lines)-1 { + cpuinfo.Processors = append(cpuinfo.Processors, *processor) + continue + } + + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + key = strings.TrimSpace(fields[0]) + value = strings.TrimSpace(fields[1]) + + switch key { + case "flags", "Features": + processor.Flags = strings.FieldsFunc(value, func(r rune) bool { + return r == ',' || r == ' ' + }) + } + } + return &cpuinfo, nil +} + +// GetCPUFlags merges all processors' flags and return as a map. Returning map makes it +// easy to check whether a flag exists or not. +func GetCPUFlags(cpuInfo *CPUInfo) map[string]bool { + flagMap := map[string]bool{} + for _, proc := range cpuInfo.Processors { + for _, flag := range proc.Flags { + flagMap[flag] = true + } + } + return flagMap +} diff --git a/agent/utils/cpuinfo_test.go b/agent/utils/cpuinfo_test.go new file mode 100644 index 00000000000..24212ca7adf --- /dev/null +++ b/agent/utils/cpuinfo_test.go @@ -0,0 +1,71 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestReadCPUInfoWithFlags(t *testing.T) { + path := filepath.Join(".", "testdata", "test_cpu_info") + cpuInfo, err := ReadCPUInfo(path) + assert.Nil(t, err) + assert.Equal(t, 2, len(cpuInfo.Processors)) + assert.Equal(t, 8, len(cpuInfo.Processors[0].Flags)) + assert.Equal(t, 10, len(cpuInfo.Processors[1].Flags)) +} + +func TestReadCPUInfoWithFlagsARM(t *testing.T) { + path := filepath.Join(".", "testdata", "test_cpu_info_arm") + cpuInfo, err := ReadCPUInfo(path) + assert.Nil(t, err) + assert.Equal(t, 2, len(cpuInfo.Processors)) + assert.Equal(t, 8, len(cpuInfo.Processors[0].Flags)) + assert.Equal(t, 9, len(cpuInfo.Processors[1].Flags)) +} + +func TestReadCPUInfoNoFlags(t *testing.T) { + path := filepath.Join(".", "testdata", "test_cpu_info_no_flag") + cpuInfo, err := ReadCPUInfo(path) + assert.Nil(t, err) + proc := cpuInfo.Processors[0] + assert.Nil(t, proc.Flags) +} + +func TestReadCPUInfoError(t *testing.T) { + path := filepath.Join(".", "testdata", "test_cpu_info_error") + cpu, err := ReadCPUInfo(path) + assert.Nil(t, cpu) + assert.Error(t, err) +} + +func TestGetCPUFlags(t *testing.T) { + proc1 := Processor{ + Flags: []string{"flag1", "flag2"}, + } + proc2 := Processor{ + Flags: []string{"flag1", "flag3"}, + } + cpu := &CPUInfo{ + []Processor{proc1, proc2}, + } + + flagMap := GetCPUFlags(cpu) + assert.Equal(t, 3, len(flagMap)) +} diff --git a/agent/utils/generate_mocks.go b/agent/utils/generate_mocks.go new file mode 100644 index 00000000000..32314b95e47 --- /dev/null +++ b/agent/utils/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +//go:generate mockgen -destination=mocks/utils_mocks.go -copyright_file=../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/utils LicenseProvider diff --git a/agent/utils/ioutilwrapper/generate_mocks.go b/agent/utils/ioutilwrapper/generate_mocks.go new file mode 100644 index 00000000000..038f9fef9aa --- /dev/null +++ b/agent/utils/ioutilwrapper/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ioutilwrapper + +//go:generate mockgen -copyright_file=../../../scripts/copyright_file -destination=mocks/ioutilwrapper_mocks.go github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper IOUtil diff --git a/agent/utils/ioutilwrapper/ioutil.go b/agent/utils/ioutilwrapper/ioutil.go new file mode 100644 index 00000000000..02a4c860610 --- /dev/null +++ b/agent/utils/ioutilwrapper/ioutil.go @@ -0,0 +1,43 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ioutilwrapper + +import ( + "io/ioutil" + "os" + + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" +) + +// IOUtil wraps 'io/util' methods for testing +type IOUtil interface { + TempFile(string, string) (oswrapper.File, error) + WriteFile(string, []byte, os.FileMode) error +} + +type ioUtil struct { +} + +// NewIOUtil creates a new IOUtil object +func NewIOUtil() IOUtil { + return &ioUtil{} +} + +func (*ioUtil) TempFile(dir string, prefix string) (oswrapper.File, error) { + return ioutil.TempFile(dir, prefix) +} + +func (*ioUtil) WriteFile(filename string, data []byte, perm os.FileMode) error { + return ioutil.WriteFile(filename, data, perm) +} diff --git a/agent/utils/ioutilwrapper/mocks/ioutilwrapper_mocks.go b/agent/utils/ioutilwrapper/mocks/ioutilwrapper_mocks.go new file mode 100644 index 00000000000..6b0c88639e8 --- /dev/null +++ b/agent/utils/ioutilwrapper/mocks/ioutilwrapper_mocks.go @@ -0,0 +1,79 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper (interfaces: IOUtil) + +// Package mock_ioutilwrapper is a generated GoMock package. +package mock_ioutilwrapper + +import ( + os "os" + reflect "reflect" + + oswrapper "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" + gomock "github.com/golang/mock/gomock" +) + +// MockIOUtil is a mock of IOUtil interface +type MockIOUtil struct { + ctrl *gomock.Controller + recorder *MockIOUtilMockRecorder +} + +// MockIOUtilMockRecorder is the mock recorder for MockIOUtil +type MockIOUtilMockRecorder struct { + mock *MockIOUtil +} + +// NewMockIOUtil creates a new mock instance +func NewMockIOUtil(ctrl *gomock.Controller) *MockIOUtil { + mock := &MockIOUtil{ctrl: ctrl} + mock.recorder = &MockIOUtilMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockIOUtil) EXPECT() *MockIOUtilMockRecorder { + return m.recorder +} + +// TempFile mocks base method +func (m *MockIOUtil) TempFile(arg0, arg1 string) (oswrapper.File, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TempFile", arg0, arg1) + ret0, _ := ret[0].(oswrapper.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TempFile indicates an expected call of TempFile +func (mr *MockIOUtilMockRecorder) TempFile(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TempFile", reflect.TypeOf((*MockIOUtil)(nil).TempFile), arg0, arg1) +} + +// WriteFile mocks base method +func (m *MockIOUtil) WriteFile(arg0 string, arg1 []byte, arg2 os.FileMode) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteFile", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteFile indicates an expected call of WriteFile +func (mr *MockIOUtilMockRecorder) WriteFile(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteFile", reflect.TypeOf((*MockIOUtil)(nil).WriteFile), arg0, arg1, arg2) +} diff --git a/agent/utils/json.go b/agent/utils/json.go new file mode 100644 index 00000000000..aee4eb76510 --- /dev/null +++ b/agent/utils/json.go @@ -0,0 +1,75 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "encoding/json" + "errors" + "reflect" + "strings" +) + +// JsonKeys takes an arbitrary byte array representing a json stringified object +// and returns all the keys of that object. +func JsonKeys(b []byte) ([]string, error) { + var keyMap map[string]interface{} + + err := json.Unmarshal(b, &keyMap) + if err != nil { + return []string{}, err + } + + keys := make([]string, len(keyMap)) + ndx := 0 + for k := range keyMap { + keys[ndx] = k + ndx++ + } + return keys, nil +} + +// CompleteJsonUnmarshal determines if a given struct has members corresponding +// to every key of a json object (passed as the json string). +// By default, Go ignores fields in an object which have no corresponding struct +// member and this can be used to determine if this ignoring has occurred +// Errors will result in "false" as a return value +func CompleteJsonUnmarshal(b []byte, iface interface{}) error { + keys, err := JsonKeys(b) + if err != nil { + return err + } + + structType := reflect.ValueOf(iface).Type() + + for _, key := range keys { + _, found := structType.FieldByNameFunc(func(name string) bool { + structField, _ := structType.FieldByName(name) + jsonTag := structField.Tag.Get("json") + jsonName := strings.Split(jsonTag, ",")[0] + if jsonName == key { + return true + } + if strings.EqualFold(key, structField.Name) { + return true + } + return false + }) + if !found { + // We encountered a json key that there was no corresponding struct + // field for; this is not complete + return errors.New("Unrecognized key: " + key) + } + } + return nil +} diff --git a/agent/utils/json_test.go b/agent/utils/json_test.go new file mode 100644 index 00000000000..9d96a285f20 --- /dev/null +++ b/agent/utils/json_test.go @@ -0,0 +1,104 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "encoding/json" + "testing" +) + +func TestJsonKeys(t *testing.T) { + keys, err := JsonKeys([]byte(`{"key1": 1, "key2": "val", "key3": {}}`)) + if err != nil { + t.Error(err) + } + if !SlicesDeepEqual(keys, []string{"key1", "key2", "key3"}) { + t.Errorf("Incorrect json keys: Got %v", keys) + } + + _, err = JsonKeys([]byte(`Invalid json }{`)) + if err == nil { + t.Error("Could find keys for invalid json") + } + + keys, err = JsonKeys([]byte(`{}`)) + if err != nil { + t.Error(err) + } + if len(keys) != 0 { + t.Error("Keys for empty object should be empty") + } + + _, err = JsonKeys([]byte(`[]`)) + if err == nil { + t.Error("Trying to get keys of a non-object wasn't an error") + } +} + +type testStruct struct { + Field1 string + Field2 int + Field3 int `json:"f3"` + Field4 int `json:"f4,omitempty,string"` +} + +type futureTestStruct struct { + Field1 string + Field2 int + Field3 int `json:"f3"` + Field4 int `json:"f4,omitempty,string"` + Field5 int `json:"f5,omitempty"` +} + +func TestCompleteJsonUnmarshal(t *testing.T) { + test := testStruct{Field1: "str", Field2: 1, Field3: 1, Field4: 1} + jsonString, _ := json.Marshal(test) + + var unmarshaledTest testStruct + err := json.Unmarshal(jsonString, &unmarshaledTest) + if err != nil { + t.Error(err) + } + + if err = CompleteJsonUnmarshal(jsonString, unmarshaledTest); err != nil { + t.Errorf("Unmarshal should have been complete: %v", err) + } + + future := futureTestStruct{Field1: "str", Field2: 1, Field5: 1} + futureStr, _ := json.Marshal(future) + + var outOfDate testStruct + err = json.Unmarshal(futureStr, &outOfDate) + if err != nil { + t.Error(err) + } + + if err = CompleteJsonUnmarshal(futureStr, outOfDate); err == nil { + t.Error("Shouldn't be complete with an unrecognized field5") + } + + var empty struct{} + if err = CompleteJsonUnmarshal([]byte(`{}`), empty); err != nil { + t.Error("The empty object can losslessly unmarshal into anything") + } + if err := CompleteJsonUnmarshal([]byte(`{}`), test); err != nil { + t.Error("The empty object can losslessly unmarshal into anything") + } + + if err = CompleteJsonUnmarshal([]byte(`{"key":"val"}`), empty); err == nil { + t.Error("The non-empty object can't unmarshal into the empty struct losslessly") + } +} diff --git a/agent/utils/license.go b/agent/utils/license.go new file mode 100644 index 00000000000..6eccff6ac9e --- /dev/null +++ b/agent/utils/license.go @@ -0,0 +1,57 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package utils + +import ( + "io/ioutil" + "strings" +) + +type LicenseProvider interface { + GetText() (string, error) +} + +type licenseProvider struct{} + +const ( + licenseFile = "LICENSE" + noticeFile = "NOTICE" + thirdPartyFile = "THIRD-PARTY" +) + +var readFile = ioutil.ReadFile + +func NewLicenseProvider() LicenseProvider { + return &licenseProvider{} +} + +func (l *licenseProvider) GetText() (string, error) { + licenseText, err := readFile(licenseFile) + if err != nil { + return "", err + } + + noticeText, err := readFile(noticeFile) + if err != nil { + return "", err + } + + thirdPartyText, err := readFile(thirdPartyFile) + if err != nil { + return "", err + } + + separator := "\n" + strings.Repeat("=", 80) + "\n" + + return string(licenseText) + separator + string(noticeText) + separator + string(thirdPartyText), nil +} diff --git a/agent/utils/license_test.go b/agent/utils/license_test.go new file mode 100644 index 00000000000..115ebe38be6 --- /dev/null +++ b/agent/utils/license_test.go @@ -0,0 +1,69 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "errors" + "strings" + "testing" +) + +func TestGetTextSuccess(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return []byte(name), nil + } + + provider := licenseProvider{} + text, err := provider.GetText() + + if err != nil { + t.Error("Did not expect error", err) + } + + expectedText := "LICENSE\n" + strings.Repeat("=", 80) + "\nNOTICE\n" + strings.Repeat("=", 80) + "\nTHIRD-PARTY" + if text != expectedText { + t.Errorf("Expected %s but was %s", expectedText, text) + } +} + +func TestGetTextErrorLicense(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return nil, errors.New("test error") + } + + provider := licenseProvider{} + _, err := provider.GetText() + + if err == nil { + t.Error("Expected error but was nil") + } +} + +func TestGetTextErrorNotice(t *testing.T) { + readFile = func(name string) ([]byte, error) { + if name == "LICENSE" { + return nil, nil + } + return nil, errors.New("test error") + } + + provider := licenseProvider{} + _, err := provider.GetText() + + if err == nil { + t.Error("Expected error but was nil") + } +} diff --git a/agent/utils/mobypkgwrapper/generate_mocks.go b/agent/utils/mobypkgwrapper/generate_mocks.go new file mode 100644 index 00000000000..179698c24e8 --- /dev/null +++ b/agent/utils/mobypkgwrapper/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package mobypkgwrapper + +//go:generate mockgen -destination=mocks/pluginswrapper_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper Plugins diff --git a/agent/utils/mobypkgwrapper/mocks/pluginswrapper_mocks.go b/agent/utils/mobypkgwrapper/mocks/pluginswrapper_mocks.go new file mode 100644 index 00000000000..76932641ead --- /dev/null +++ b/agent/utils/mobypkgwrapper/mocks/pluginswrapper_mocks.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper (interfaces: Plugins) + +// Package mock_mobypkgwrapper is a generated GoMock package. +package mock_mobypkgwrapper + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockPlugins is a mock of Plugins interface +type MockPlugins struct { + ctrl *gomock.Controller + recorder *MockPluginsMockRecorder +} + +// MockPluginsMockRecorder is the mock recorder for MockPlugins +type MockPluginsMockRecorder struct { + mock *MockPlugins +} + +// NewMockPlugins creates a new mock instance +func NewMockPlugins(ctrl *gomock.Controller) *MockPlugins { + mock := &MockPlugins{ctrl: ctrl} + mock.recorder = &MockPluginsMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockPlugins) EXPECT() *MockPluginsMockRecorder { + return m.recorder +} + +// Scan mocks base method +func (m *MockPlugins) Scan() ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Scan") + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Scan indicates an expected call of Scan +func (mr *MockPluginsMockRecorder) Scan() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scan", reflect.TypeOf((*MockPlugins)(nil).Scan)) +} diff --git a/agent/utils/mobypkgwrapper/plugins.go b/agent/utils/mobypkgwrapper/plugins.go new file mode 100644 index 00000000000..86858f5bcbb --- /dev/null +++ b/agent/utils/mobypkgwrapper/plugins.go @@ -0,0 +1,35 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package mobypkgwrapper + +import ( + mobyplugins "github.com/docker/docker/pkg/plugins" +) + +// Plugins wraps moby/pkg/plugins methods for testing +type Plugins interface { + Scan() ([]string, error) +} + +type plugins struct { +} + +// NewPlugins creates a new Plugins object +func NewPlugins() Plugins { + return &plugins{} +} + +func (*plugins) Scan() ([]string, error) { + return mobyplugins.Scan() +} diff --git a/agent/utils/mocks/utils_mocks.go b/agent/utils/mocks/utils_mocks.go new file mode 100644 index 00000000000..94758c9f316 --- /dev/null +++ b/agent/utils/mocks/utils_mocks.go @@ -0,0 +1,63 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/utils (interfaces: LicenseProvider) + +// Package mock_utils is a generated GoMock package. +package mock_utils + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockLicenseProvider is a mock of LicenseProvider interface +type MockLicenseProvider struct { + ctrl *gomock.Controller + recorder *MockLicenseProviderMockRecorder +} + +// MockLicenseProviderMockRecorder is the mock recorder for MockLicenseProvider +type MockLicenseProviderMockRecorder struct { + mock *MockLicenseProvider +} + +// NewMockLicenseProvider creates a new mock instance +func NewMockLicenseProvider(ctrl *gomock.Controller) *MockLicenseProvider { + mock := &MockLicenseProvider{ctrl: ctrl} + mock.recorder = &MockLicenseProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockLicenseProvider) EXPECT() *MockLicenseProviderMockRecorder { + return m.recorder +} + +// GetText mocks base method +func (m *MockLicenseProvider) GetText() (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetText") + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetText indicates an expected call of GetText +func (mr *MockLicenseProviderMockRecorder) GetText() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetText", reflect.TypeOf((*MockLicenseProvider)(nil).GetText)) +} diff --git a/agent/utils/nswrapper/generate_mocks.go b/agent/utils/nswrapper/generate_mocks.go new file mode 100644 index 00000000000..a80575efc25 --- /dev/null +++ b/agent/utils/nswrapper/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package nswrapper + +//go:generate mockgen -destination=mocks/nswrapper_mocks_linux.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/utils/nswrapper NS diff --git a/agent/utils/nswrapper/mocks/nswrapper_mocks_linux.go b/agent/utils/nswrapper/mocks/nswrapper_mocks_linux.go new file mode 100644 index 00000000000..7a3de355ae0 --- /dev/null +++ b/agent/utils/nswrapper/mocks/nswrapper_mocks_linux.go @@ -0,0 +1,78 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/utils/nswrapper (interfaces: NS) + +// Package mock_nswrapper is a generated GoMock package. +package mock_nswrapper + +import ( + reflect "reflect" + + ns "github.com/containernetworking/plugins/pkg/ns" + gomock "github.com/golang/mock/gomock" +) + +// MockNS is a mock of NS interface +type MockNS struct { + ctrl *gomock.Controller + recorder *MockNSMockRecorder +} + +// MockNSMockRecorder is the mock recorder for MockNS +type MockNSMockRecorder struct { + mock *MockNS +} + +// NewMockNS creates a new mock instance +func NewMockNS(ctrl *gomock.Controller) *MockNS { + mock := &MockNS{ctrl: ctrl} + mock.recorder = &MockNSMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockNS) EXPECT() *MockNSMockRecorder { + return m.recorder +} + +// GetNS mocks base method +func (m *MockNS) GetNS(arg0 string) (ns.NetNS, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNS", arg0) + ret0, _ := ret[0].(ns.NetNS) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNS indicates an expected call of GetNS +func (mr *MockNSMockRecorder) GetNS(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNS", reflect.TypeOf((*MockNS)(nil).GetNS), arg0) +} + +// WithNetNSPath mocks base method +func (m *MockNS) WithNetNSPath(arg0 string, arg1 func(ns.NetNS) error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WithNetNSPath", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WithNetNSPath indicates an expected call of WithNetNSPath +func (mr *MockNSMockRecorder) WithNetNSPath(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithNetNSPath", reflect.TypeOf((*MockNS)(nil).WithNetNSPath), arg0, arg1) +} diff --git a/agent/utils/nswrapper/ns_linux.go b/agent/utils/nswrapper/ns_linux.go new file mode 100644 index 00000000000..69947596cab --- /dev/null +++ b/agent/utils/nswrapper/ns_linux.go @@ -0,0 +1,40 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package nswrapper + +import "github.com/containernetworking/plugins/pkg/ns" + +// NS wraps methods used from the cni/pkg/ns package +type NS interface { + GetNS(nspath string) (ns.NetNS, error) + WithNetNSPath(nspath string, toRun func(ns.NetNS) error) error +} + +type agentNS struct { +} + +// NewNS creates a new NS object +func NewNS() NS { + return &agentNS{} +} + +func (*agentNS) GetNS(nspath string) (ns.NetNS, error) { + return ns.GetNS(nspath) +} + +func (*agentNS) WithNetNSPath(nspath string, toRun func(ns.NetNS) error) error { + return ns.WithNetNSPath(nspath, toRun) +} diff --git a/agent/utils/oswrapper/mocks/oswrapper_mocks.go b/agent/utils/oswrapper/mocks/oswrapper_mocks.go new file mode 100644 index 00000000000..f4351dc7ea1 --- /dev/null +++ b/agent/utils/oswrapper/mocks/oswrapper_mocks.go @@ -0,0 +1,59 @@ +package mocks + +import ( + "os" + + "github.com/aws/amazon-ecs-agent/agent/utils/oswrapper" +) + +type MockFile struct { + ChmodImpl func(os.FileMode) error + NameImpl func() string + SyncImpl func() error + WriteImpl func([]byte) (int, error) +} + +func NewMockFile() oswrapper.File { + return &MockFile{ + ChmodImpl: func(os.FileMode) error { + return nil + }, + NameImpl: func() string { + return "" + }, + SyncImpl: func() error { + return nil + }, + WriteImpl: func(bytes []byte) (i int, e error) { + return 0, nil + }, + } +} + +func (f *MockFile) Name() string { + return f.NameImpl() +} + +func (f *MockFile) Close() error { + return nil +} + +func (f *MockFile) Chmod(fileMode os.FileMode) error { + return f.ChmodImpl(fileMode) +} + +func (f *MockFile) Write(content []byte) (int, error) { + return f.WriteImpl(content) +} + +func (f *MockFile) WriteAt(b []byte, off int64) (n int, err error) { + return 0, nil +} + +func (f *MockFile) Sync() error { + return f.SyncImpl() +} + +func (f *MockFile) Read([]byte) (int, error) { + return 0, nil +} diff --git a/agent/utils/oswrapper/os.go b/agent/utils/oswrapper/os.go new file mode 100644 index 00000000000..04f640ebc45 --- /dev/null +++ b/agent/utils/oswrapper/os.go @@ -0,0 +1,27 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package oswrapper + +import "os" + +// File wraps methods for os.File type +type File interface { + Name() string + Close() error + Chmod(os.FileMode) error + Write([]byte) (int, error) + WriteAt(b []byte, off int64) (n int, err error) + Sync() error + Read([]byte) (int, error) +} diff --git a/agent/utils/repotag.go b/agent/utils/repotag.go new file mode 100644 index 00000000000..f2a4159a760 --- /dev/null +++ b/agent/utils/repotag.go @@ -0,0 +1,26 @@ +// This file is derived from the fsouza/go-dockerclient Project, Copyright (c) 2013-2016, go-dockerclient authors +// The original code may be found: +// https://github.com/fsouza/go-dockerclient/blob/master/misc.go +// +// Copyright 2013 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Modifications are Copyright 2016 Amazon.com, Inc. or its affiliates. Licensed under the Apache License 2.0 + +package utils + +import "strings" + +// ParseRepositoryTag mimics the go-dockerclient's ParseReposirotyData. The only difference +// is that it doesn't ignore the sha when present. +func ParseRepositoryTag(repoTag string) (repository string, tag string) { + n := strings.LastIndex(repoTag, ":") + if n < 0 { + return repoTag, "" + } + if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { + return repoTag[:n], tag + } + return repoTag, "" +} diff --git a/agent/utils/retry/backoff.go b/agent/utils/retry/backoff.go new file mode 100644 index 00000000000..4fae160f8a5 --- /dev/null +++ b/agent/utils/retry/backoff.go @@ -0,0 +1,36 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package retry + +import ( + "math/rand" + "time" +) + +type Backoff interface { + Reset() + Duration() time.Duration +} + +// AddJitter adds an amount of jitter between 0 and the given jitter to the +// given duration +func AddJitter(duration time.Duration, jitter time.Duration) time.Duration { + var randJitter int64 + if jitter.Nanoseconds() == 0 { + randJitter = 0 + } else { + randJitter = rand.Int63n(jitter.Nanoseconds()) + } + return time.Duration(duration.Nanoseconds() + randJitter) +} diff --git a/agent/utils/retry/backoff_test.go b/agent/utils/retry/backoff_test.go new file mode 100644 index 00000000000..09e3bf57d51 --- /dev/null +++ b/agent/utils/retry/backoff_test.go @@ -0,0 +1,30 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package retry + +import ( + "testing" + "time" +) + +func TestJitter(t *testing.T) { + for i := 0; i < 10; i++ { + duration := AddJitter(10*time.Second, 3*time.Second) + if duration < 10*time.Second || duration > 13*time.Second { + t.Error("Excessive amount of jitter", duration) + } + } +} diff --git a/agent/utils/retry/exponential_backoff.go b/agent/utils/retry/exponential_backoff.go new file mode 100644 index 00000000000..15d36e8d968 --- /dev/null +++ b/agent/utils/retry/exponential_backoff.go @@ -0,0 +1,61 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package retry + +import ( + "math" + "sync" + "time" +) + +type ExponentialBackoff struct { + current time.Duration + start time.Duration + max time.Duration + jitterMultiple float64 + multiple float64 + mu sync.Mutex +} + +// NewExponentialBackoff creates a Backoff which ranges from min to max increasing by +// multiple each time. (t = start * multiple * n for the nth iteration) +// It also adds (and yes, the jitter is always added, never +// subtracted) a random amount of jitter up to jitterMultiple percent (that is, +// jitterMultiple = 0.0 is no jitter, 0.15 is 15% added jitter). The total time +// may exceed "max" when accounting for jitter, such that the absolute max is +// max + max * jiterMultiple + +func NewExponentialBackoff(min, max time.Duration, jitterMultiple, multiple float64) *ExponentialBackoff { + return &ExponentialBackoff{ + start: min, + current: min, + max: max, + jitterMultiple: jitterMultiple, + multiple: multiple, + } +} + +func (sb *ExponentialBackoff) Duration() time.Duration { + sb.mu.Lock() + defer sb.mu.Unlock() + ret := sb.current + sb.current = time.Duration(math.Min(float64(sb.max.Nanoseconds()), float64(sb.current.Nanoseconds())*sb.multiple)) + return AddJitter(ret, time.Duration(int64(float64(ret)*sb.jitterMultiple))) +} + +func (sb *ExponentialBackoff) Reset() { + sb.mu.Lock() + defer sb.mu.Unlock() + sb.current = sb.start +} diff --git a/agent/utils/retry/exponential_backoff_test.go b/agent/utils/retry/exponential_backoff_test.go new file mode 100644 index 00000000000..5c43702e7e6 --- /dev/null +++ b/agent/utils/retry/exponential_backoff_test.go @@ -0,0 +1,46 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package retry + +import ( + "testing" + "time" +) + +func TestExponentialBackoff(t *testing.T) { + sb := NewExponentialBackoff(10*time.Second, time.Minute, 0, 2) + + for i := 0; i < 2; i++ { + duration := sb.Duration() + if duration.Nanoseconds() != 10*time.Second.Nanoseconds() { + t.Error("Initial duration incorrect. Got ", duration.Nanoseconds()) + } + + duration = sb.Duration() + if duration.Nanoseconds() != 20*time.Second.Nanoseconds() { + t.Error("Increase incorrect") + } + duration = sb.Duration() // 40s + if duration.Nanoseconds() != 40*time.Second.Nanoseconds() { + t.Error("Increase incorrect") + } + duration = sb.Duration() + if duration.Nanoseconds() != 60*time.Second.Nanoseconds() { + t.Error("Didn't stop at maximum") + } + sb.Reset() + // loop to redo the above tests after resetting, they should be the same + } +} diff --git a/agent/utils/retry/generate_mocks.go b/agent/utils/retry/generate_mocks.go new file mode 100644 index 00000000000..784ac393ff8 --- /dev/null +++ b/agent/utils/retry/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package retry + +//go:generate mockgen -destination=mock/retry_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/utils/retry Backoff diff --git a/agent/utils/retry/mock/retry_mocks.go b/agent/utils/retry/mock/retry_mocks.go new file mode 100644 index 00000000000..4acf1a4adc5 --- /dev/null +++ b/agent/utils/retry/mock/retry_mocks.go @@ -0,0 +1,75 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/utils/retry (interfaces: Backoff) + +// Package mock_retry is a generated GoMock package. +package mock_retry + +import ( + reflect "reflect" + time "time" + + gomock "github.com/golang/mock/gomock" +) + +// MockBackoff is a mock of Backoff interface +type MockBackoff struct { + ctrl *gomock.Controller + recorder *MockBackoffMockRecorder +} + +// MockBackoffMockRecorder is the mock recorder for MockBackoff +type MockBackoffMockRecorder struct { + mock *MockBackoff +} + +// NewMockBackoff creates a new mock instance +func NewMockBackoff(ctrl *gomock.Controller) *MockBackoff { + mock := &MockBackoff{ctrl: ctrl} + mock.recorder = &MockBackoffMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockBackoff) EXPECT() *MockBackoffMockRecorder { + return m.recorder +} + +// Duration mocks base method +func (m *MockBackoff) Duration() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Duration") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// Duration indicates an expected call of Duration +func (mr *MockBackoffMockRecorder) Duration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Duration", reflect.TypeOf((*MockBackoff)(nil).Duration)) +} + +// Reset mocks base method +func (m *MockBackoff) Reset() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset") +} + +// Reset indicates an expected call of Reset +func (mr *MockBackoffMockRecorder) Reset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockBackoff)(nil).Reset)) +} diff --git a/agent/utils/retry/retry.go b/agent/utils/retry/retry.go new file mode 100644 index 00000000000..f8acac21a17 --- /dev/null +++ b/agent/utils/retry/retry.go @@ -0,0 +1,84 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package retry + +import ( + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + "golang.org/x/net/context" +) + +var _time ttime.Time = &ttime.DefaultTime{} + +// RetryWithBackoff takes a Backoff and a function to call that returns an error +// If the error is nil then the function will no longer be called +// If the error is Retriable then that will be used to determine if it should be +// retried +func RetryWithBackoff(backoff Backoff, fn func() error) error { + return RetryWithBackoffCtx(context.Background(), backoff, fn) +} + +// RetryWithBackoffCtx takes a context, a Backoff, and a function to call that returns an error +// If the context is done, nil will be returned +// If the error is nil then the function will no longer be called +// If the error is Retriable then that will be used to determine if it should be +// retried +func RetryWithBackoffCtx(ctx context.Context, backoff Backoff, fn func() error) error { + var err error + for { + select { + case <-ctx.Done(): + return nil + default: + } + + err = fn() + + retriableErr, isRetriableErr := err.(apierrors.Retriable) + + if err == nil || (isRetriableErr && !retriableErr.Retry()) { + return err + } + + _time.Sleep(backoff.Duration()) + } +} + +// RetryNWithBackoff takes a Backoff, a maximum number of tries 'n', and a +// function that returns an error. The function is called until either it does +// not return an error or the maximum tries have been reached. +// If the error returned is Retriable, the Retriability of it will be respected. +// If the number of tries is exhausted, the last error will be returned. +func RetryNWithBackoff(backoff Backoff, n int, fn func() error) error { + return RetryNWithBackoffCtx(context.Background(), backoff, n, fn) +} + +// RetryNWithBackoffCtx takes a context, a Backoff, a maximum number of tries 'n', and a function that returns an error. +// The function is called until it does not return an error, the context is done, or the maximum tries have been +// reached. +// If the error returned is Retriable, the Retriability of it will be respected. +// If the number of tries is exhausted, the last error will be returned. +func RetryNWithBackoffCtx(ctx context.Context, backoff Backoff, n int, fn func() error) error { + var err error + RetryWithBackoffCtx(ctx, backoff, func() error { + err = fn() + n-- + if n == 0 { + // Break out after n tries + return nil + } + return err + }) + return err +} diff --git a/agent/utils/retry/retry_test.go b/agent/utils/retry/retry_test.go new file mode 100644 index 00000000000..abf7ac1b394 --- /dev/null +++ b/agent/utils/retry/retry_test.go @@ -0,0 +1,186 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package retry + +import ( + "errors" + "testing" + "time" + + apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + mock_ttime "github.com/aws/amazon-ecs-agent/agent/utils/ttime/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestRetryWithBackoff(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mocktime := mock_ttime.NewMockTime(ctrl) + _time = mocktime + defer func() { _time = &ttime.DefaultTime{} }() + + t.Run("retries", func(t *testing.T) { + mocktime.EXPECT().Sleep(100 * time.Millisecond).Times(3) + counter := 3 + RetryWithBackoff(NewExponentialBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), func() error { + if counter == 0 { + return nil + } + counter-- + return errors.New("err") + }) + assert.Equal(t, 0, counter, "Counter didn't go to 0; didn't get retried enough") + }) + + t.Run("no retries", func(t *testing.T) { + // no sleeps + RetryWithBackoff(NewExponentialBackoff(10*time.Second, 20*time.Second, 0, 2), func() error { + return apierrors.NewRetriableError(apierrors.NewRetriable(false), errors.New("can't retry")) + }) + }) +} + +func TestRetryWithBackoffCtx(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mocktime := mock_ttime.NewMockTime(ctrl) + _time = mocktime + defer func() { _time = &ttime.DefaultTime{} }() + + t.Run("retries", func(t *testing.T) { + mocktime.EXPECT().Sleep(100 * time.Millisecond).Times(3) + counter := 3 + RetryWithBackoffCtx(context.TODO(), NewExponentialBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), func() error { + if counter == 0 { + return nil + } + counter-- + return errors.New("err") + }) + assert.Equal(t, 0, counter, "Counter didn't go to 0; didn't get retried enough") + }) + + t.Run("no retries", func(t *testing.T) { + // no sleeps + RetryWithBackoffCtx(context.TODO(), NewExponentialBackoff(10*time.Second, 20*time.Second, 0, 2), func() error { + return apierrors.NewRetriableError(apierrors.NewRetriable(false), errors.New("can't retry")) + }) + }) + + t.Run("cancel context", func(t *testing.T) { + mocktime.EXPECT().Sleep(100 * time.Millisecond).Times(2) + counter := 2 + ctx, cancel := context.WithCancel(context.TODO()) + RetryWithBackoffCtx(ctx, NewExponentialBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), func() error { + counter-- + if counter == 0 { + cancel() + } + return errors.New("err") + }) + assert.Equal(t, 0, counter, "Counter not 0; went the wrong number of times") + }) + +} + +func TestRetryNWithBackoff(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mocktime := mock_ttime.NewMockTime(ctrl) + _time = mocktime + defer func() { _time = &ttime.DefaultTime{} }() + + t.Run("count exceeded", func(t *testing.T) { + // 2 tries, 1 sleep + mocktime.EXPECT().Sleep(100 * time.Millisecond).Times(1) + counter := 3 + err := RetryNWithBackoff(NewExponentialBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), 2, func() error { + counter-- + return errors.New("err") + }) + assert.Equal(t, 1, counter, "Should have stopped after two tries") + assert.Error(t, err) + }) + + t.Run("retry succeeded", func(t *testing.T) { + // 3 tries, 2 sleeps + mocktime.EXPECT().Sleep(100 * time.Millisecond).Times(2) + counter := 3 + err := RetryNWithBackoff(NewExponentialBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), 5, func() error { + counter-- + if counter == 0 { + return nil + } + return errors.New("err") + }) + assert.Equal(t, 0, counter) + assert.NoError(t, err) + }) +} + +func TestRetryNWithBackoffCtx(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mocktime := mock_ttime.NewMockTime(ctrl) + _time = mocktime + defer func() { _time = &ttime.DefaultTime{} }() + + t.Run("count exceeded", func(t *testing.T) { + // 2 tries, 1 sleep + mocktime.EXPECT().Sleep(100 * time.Millisecond).Times(1) + counter := 3 + err := RetryNWithBackoffCtx(context.TODO(), NewExponentialBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), 2, func() error { + counter-- + return errors.New("err") + }) + assert.Equal(t, 1, counter, "Should have stopped after two tries") + assert.Error(t, err) + }) + + t.Run("retry succeeded", func(t *testing.T) { + // 3 tries, 2 sleeps + mocktime.EXPECT().Sleep(100 * time.Millisecond).Times(2) + counter := 3 + err := RetryNWithBackoffCtx(context.TODO(), NewExponentialBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), 5, func() error { + counter-- + if counter == 0 { + return nil + } + return errors.New("err") + }) + assert.Equal(t, 0, counter) + assert.NoError(t, err) + }) + + t.Run("cancel context", func(t *testing.T) { + // 2 tries, 2 sleeps + mocktime.EXPECT().Sleep(100 * time.Millisecond).Times(2) + counter := 3 + ctx, cancel := context.WithCancel(context.TODO()) + err := RetryNWithBackoffCtx(ctx, NewExponentialBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), 5, func() error { + counter-- + if counter == 1 { + cancel() + } + return errors.New("err") + }) + assert.Equal(t, 1, counter, "Should have stopped after two tries") + assert.Error(t, err) + }) +} diff --git a/agent/utils/semaphore.go b/agent/utils/semaphore.go new file mode 100644 index 00000000000..215a063d066 --- /dev/null +++ b/agent/utils/semaphore.go @@ -0,0 +1,48 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +// Implements a simple counting sempahore on top of channels + +type empty struct{} +type Semaphore interface { + Post() + Wait() +} + +// Implements semaphore +type ChanSemaphore struct { + semaphore chan empty + Count int // Public for introspection; should not be written to +} + +func NewSemaphore(count int) Semaphore { + sem := make(chan empty, count) + // Init to all resources available + for i := 0; i < count; i++ { + sem <- empty{} + } + return &ChanSemaphore{ + semaphore: sem, + Count: count, + } +} + +func (s *ChanSemaphore) Post() { + s.semaphore <- empty{} +} + +func (s *ChanSemaphore) Wait() { + <-s.semaphore +} diff --git a/agent/utils/sign_http_request.go b/agent/utils/sign_http_request.go new file mode 100644 index 00000000000..adb9a7d2f8f --- /dev/null +++ b/agent/utils/sign_http_request.go @@ -0,0 +1,36 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/cihub/seelog" + "github.com/pkg/errors" +) + +// SignHTTPRequest signs an http.Request struct with authv4 using the given region, service, and credentials. +func SignHTTPRequest(req *http.Request, region, service string, creds *credentials.Credentials, body io.ReadSeeker) error { + signer := v4.NewSigner(creds) + _, err := signer.Sign(req, body, service, region, time.Now()) + if err != nil { + seelog.Warnf("Signing HTTP request failed: %v", err) + return errors.Wrap(err, "aws sdk http signer: failed to sign http request") + } + return nil +} diff --git a/agent/utils/sync/sequential_waitgroup.go b/agent/utils/sync/sequential_waitgroup.go new file mode 100644 index 00000000000..78b0e266196 --- /dev/null +++ b/agent/utils/sync/sequential_waitgroup.go @@ -0,0 +1,91 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Package sync is an analogue to the stdlib sync package. +// It contains lowlevel synchonization primitives, but not quite as low level as 'sync' does +package sync + +import stdsync "sync" + +// A SequentialWaitGroup waits for a collection of goroutines to finish. Each +// goroutine may add itself to the waitgroup with 'Add', providing a sequence +// number. Each goroutine should then call 'Done' with its sequence number when finished. +// Elsewhere, 'Wait' can be used to wait for all groups at or below the +// provided sequence number to complete. +type SequentialWaitGroup struct { + mutex stdsync.Mutex + // Implement our own semaphore over using sync.WaitGroup so that we can safely GC our map + semaphores map[int64]int + change *stdsync.Cond +} + +func NewSequentialWaitGroup() *SequentialWaitGroup { + return &SequentialWaitGroup{ + semaphores: make(map[int64]int), + change: stdsync.NewCond(&stdsync.Mutex{}), + } +} + +// Add adds the given delta to the waitgroup at the given sequence +func (s *SequentialWaitGroup) Add(sequence int64, delta int) { + s.mutex.Lock() + defer s.mutex.Unlock() + + _, ok := s.semaphores[sequence] + if ok { + s.semaphores[sequence] += delta + } else { + s.semaphores[sequence] = delta + } + if s.semaphores[sequence] <= 0 { + delete(s.semaphores, sequence) + s.change.Broadcast() + } +} + +// Done decrements the waitgroup at the given sequence by one +func (s *SequentialWaitGroup) Done(sequence int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + _, ok := s.semaphores[sequence] + if ok { + s.semaphores[sequence]-- + if s.semaphores[sequence] == 0 { + delete(s.semaphores, sequence) + s.change.Broadcast() + } + } +} + +// Wait waits for all waitgroups at or below the given sequence to complete. +// Please note that this is *INCLUSIVE* of the sequence +func (s *SequentialWaitGroup) Wait(sequence int64) { + waitOver := func() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + for storedSequence := range s.semaphores { + if storedSequence <= sequence { + // At least one non-empty seqnum greater than ours; wait more + return false + } + } + return true + } + + s.change.L.Lock() + defer s.change.L.Unlock() + // Wake up to check if our wait is over after each element being deleted from the map + for !waitOver() { + s.change.Wait() + } +} diff --git a/agent/utils/sync/sequential_waitgroup_test.go b/agent/utils/sync/sequential_waitgroup_test.go new file mode 100644 index 00000000000..780f9b873a1 --- /dev/null +++ b/agent/utils/sync/sequential_waitgroup_test.go @@ -0,0 +1,69 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package sync + +import "testing" + +func TestSequentialWaitgroup(t *testing.T) { + wg := NewSequentialWaitGroup() + wg.Add(1, 1) + wg.Add(2, 1) + wg.Add(1, 1) + + // Wait for '0' should not fail, nothing for sequence numbers below it + wg.Wait(0) + + done := make(chan bool) + go func() { + wg.Done(1) + wg.Done(1) + wg.Wait(1) + wg.Done(2) + wg.Wait(2) + done <- true + }() + <-done +} + +func TestManyDones(t *testing.T) { + wg := NewSequentialWaitGroup() + + waitGroupCount := 10 + for i := 1; i < waitGroupCount; i++ { + wg.Add(int64(i), i) + } + + for i := 1; i < waitGroupCount; i++ { + wg.Wait(int64(i - 1)) + + isAwake := make(chan bool) + go func(i int64) { + wg.Wait(i) + isAwake <- true + }(int64(i)) + + for j := 0; j < i; j++ { + if j < i-1 { + select { + case <-isAwake: + t.Fatal("Should not be awake before all dones called") + default: + } + } + wg.Done(int64(i)) + } + } +} diff --git a/agent/utils/testdata/test_cpu_info b/agent/utils/testdata/test_cpu_info new file mode 100644 index 00000000000..6ee8e1eea06 --- /dev/null +++ b/agent/utils/testdata/test_cpu_info @@ -0,0 +1,24 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz +stepping : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz +stepping : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 avx + diff --git a/agent/utils/testdata/test_cpu_info_arm b/agent/utils/testdata/test_cpu_info_arm new file mode 100644 index 00000000000..11cc1afb050 --- /dev/null +++ b/agent/utils/testdata/test_cpu_info_arm @@ -0,0 +1,18 @@ +processor : 0 +BogoMIPS : 166.66 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd08 +CPU revision : 3 + +processor : 1 +BogoMIPS : 166.66 +Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid +CPU implementer : 0x41 +CPU architecture: 8 +CPU variant : 0x0 +CPU part : 0xd08 +CPU revision : 3 + diff --git a/agent/utils/testdata/test_cpu_info_no_flag b/agent/utils/testdata/test_cpu_info_no_flag new file mode 100644 index 00000000000..ad70645aff8 --- /dev/null +++ b/agent/utils/testdata/test_cpu_info_no_flag @@ -0,0 +1,11 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz +stepping : 4 +microcode : 0x200005e +cpu MHz : 2500.000 +cache size : 33792 KB +physical id : 0 + diff --git a/agent/utils/ticker.go b/agent/utils/ticker.go new file mode 100644 index 00000000000..5d040920197 --- /dev/null +++ b/agent/utils/ticker.go @@ -0,0 +1,52 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package utils + +import ( + "context" + "math/rand" + "time" +) + +// NewJitteredTicker works like a time.Ticker except with randomly distributed ticks +// between start and end duration. +func NewJitteredTicker(ctx context.Context, start, end time.Duration) <-chan time.Time { + ticker := make(chan time.Time, 1) + + go func() { + defer close(ticker) + + for { + select { + case <-ctx.Done(): + return + default: + time.Sleep(randomDuration(start, end)) + sendNow(ticker) + } + } + }() + + return ticker +} + +func randomDuration(start, end time.Duration) time.Duration { + return time.Duration(start.Nanoseconds()+rand.Int63n(end.Nanoseconds()-start.Nanoseconds())) * time.Nanosecond +} + +func sendNow(ticker chan<- time.Time) { + select { + case ticker <- time.Now(): + default: + } +} diff --git a/agent/utils/ticker_test.go b/agent/utils/ticker_test.go new file mode 100644 index 00000000000..868b6749dc3 --- /dev/null +++ b/agent/utils/ticker_test.go @@ -0,0 +1,85 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +package utils + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + TheBestNumber = 28 +) + +func init() { + // The best randomness is deterministic + rand.Seed(TheBestNumber) +} + +func TestTickerHappyCase(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1000*time.Millisecond) + defer cancel() + mTicker := NewJitteredTicker(ctx, 10*time.Millisecond, 100*time.Millisecond) + + lowerTickLimit := 7 + upperTickLimit := 130 + + times := 0 + for { + _, ok := <-mTicker + times++ + if !ok { + break + } + } + if times < lowerTickLimit || times > upperTickLimit { + t.Errorf("Should tick more than %d but less than %d times, got: %d", lowerTickLimit, upperTickLimit, times) + } +} + +func TestRandomDuration(t *testing.T) { + start := 10 * time.Second + end := 20 * time.Second + + for i := 0; i < 10000; i++ { + c := randomDuration(start, end) + if c < start || c > end { + t.Errorf("Generated a bad time! seconds: %v nanoseconds: %d", c, c.Nanoseconds()) + } + } +} + +func TestSendNowDoesNotBlock(t *testing.T) { + c := make(chan time.Time, 1) + sendNow(c) + sendNow(c) + + times := 0 +loop: + for { + select { + case <-c: + times++ + default: + break loop + } + } + + assert.Equal(t, 1, times, "Channel didn't have exactly one message on the queue") +} diff --git a/agent/utils/ttime/generate_mocks.go b/agent/utils/ttime/generate_mocks.go new file mode 100644 index 00000000000..a89f0acd682 --- /dev/null +++ b/agent/utils/ttime/generate_mocks.go @@ -0,0 +1,16 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ttime + +//go:generate mockgen -destination=mocks/time_mocks.go -copyright_file=../../../scripts/copyright_file github.com/aws/amazon-ecs-agent/agent/utils/ttime Time,Timer diff --git a/agent/utils/ttime/mocks/time_mocks.go b/agent/utils/ttime/mocks/time_mocks.go new file mode 100644 index 00000000000..f59f1c8b409 --- /dev/null +++ b/agent/utils/ttime/mocks/time_mocks.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/amazon-ecs-agent/agent/utils/ttime (interfaces: Time,Timer) + +// Package mock_ttime is a generated GoMock package. +package mock_ttime + +import ( + reflect "reflect" + time "time" + + ttime "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + gomock "github.com/golang/mock/gomock" +) + +// MockTime is a mock of Time interface +type MockTime struct { + ctrl *gomock.Controller + recorder *MockTimeMockRecorder +} + +// MockTimeMockRecorder is the mock recorder for MockTime +type MockTimeMockRecorder struct { + mock *MockTime +} + +// NewMockTime creates a new mock instance +func NewMockTime(ctrl *gomock.Controller) *MockTime { + mock := &MockTime{ctrl: ctrl} + mock.recorder = &MockTimeMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTime) EXPECT() *MockTimeMockRecorder { + return m.recorder +} + +// After mocks base method +func (m *MockTime) After(arg0 time.Duration) <-chan time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "After", arg0) + ret0, _ := ret[0].(<-chan time.Time) + return ret0 +} + +// After indicates an expected call of After +func (mr *MockTimeMockRecorder) After(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "After", reflect.TypeOf((*MockTime)(nil).After), arg0) +} + +// AfterFunc mocks base method +func (m *MockTime) AfterFunc(arg0 time.Duration, arg1 func()) ttime.Timer { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AfterFunc", arg0, arg1) + ret0, _ := ret[0].(ttime.Timer) + return ret0 +} + +// AfterFunc indicates an expected call of AfterFunc +func (mr *MockTimeMockRecorder) AfterFunc(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AfterFunc", reflect.TypeOf((*MockTime)(nil).AfterFunc), arg0, arg1) +} + +// Now mocks base method +func (m *MockTime) Now() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Now") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// Now indicates an expected call of Now +func (mr *MockTimeMockRecorder) Now() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Now", reflect.TypeOf((*MockTime)(nil).Now)) +} + +// Sleep mocks base method +func (m *MockTime) Sleep(arg0 time.Duration) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Sleep", arg0) +} + +// Sleep indicates an expected call of Sleep +func (mr *MockTimeMockRecorder) Sleep(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sleep", reflect.TypeOf((*MockTime)(nil).Sleep), arg0) +} + +// MockTimer is a mock of Timer interface +type MockTimer struct { + ctrl *gomock.Controller + recorder *MockTimerMockRecorder +} + +// MockTimerMockRecorder is the mock recorder for MockTimer +type MockTimerMockRecorder struct { + mock *MockTimer +} + +// NewMockTimer creates a new mock instance +func NewMockTimer(ctrl *gomock.Controller) *MockTimer { + mock := &MockTimer{ctrl: ctrl} + mock.recorder = &MockTimerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTimer) EXPECT() *MockTimerMockRecorder { + return m.recorder +} + +// Reset mocks base method +func (m *MockTimer) Reset(arg0 time.Duration) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reset", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Reset indicates an expected call of Reset +func (mr *MockTimerMockRecorder) Reset(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockTimer)(nil).Reset), arg0) +} + +// Stop mocks base method +func (m *MockTimer) Stop() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stop") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Stop indicates an expected call of Stop +func (mr *MockTimerMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockTimer)(nil).Stop)) +} diff --git a/agent/utils/ttime/ttime.go b/agent/utils/ttime/ttime.go new file mode 100644 index 00000000000..485aed7134f --- /dev/null +++ b/agent/utils/ttime/ttime.go @@ -0,0 +1,60 @@ +// Package ttime implements a testable alternative to the Go "time" package. +package ttime + +import "time" + +// Time represents an implementation for this package's methods +type Time interface { + Now() time.Time + Sleep(d time.Duration) + After(d time.Duration) <-chan time.Time + AfterFunc(d time.Duration, f func()) Timer +} + +type Timer interface { + Reset(d time.Duration) bool + Stop() bool +} + +// DefaultTime is a Time that behaves normally +type DefaultTime struct{} + +var _time Time = &DefaultTime{} + +// Now returns the current time +func (*DefaultTime) Now() time.Time { + return time.Now() +} + +// Sleep sleeps for the given duration +func (*DefaultTime) Sleep(d time.Duration) { + time.Sleep(d) +} + +// After sleeps for the given duration and then writes to to the returned channel +func (*DefaultTime) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// AfterFunc waits for the duration to elapse and then calls f in its own +// goroutine. It returns a Timer that can be used to cancel the call using its +// Stop method. +func (*DefaultTime) AfterFunc(d time.Duration, f func()) Timer { + return time.AfterFunc(d, f) +} + +// SetTime configures what 'Time' implementation to use for each of the +// package-level methods. +func SetTime(t Time) { + _time = t +} + +// Now returns the implementation's current time +func Now() time.Time { + return _time.Now() +} + +// Since returns the time different from Now and the given time t +func Since(t time.Time) time.Duration { + return _time.Now().Sub(t) +} diff --git a/agent/utils/utils.go b/agent/utils/utils.go new file mode 100644 index 00000000000..3cbb8ccf2a4 --- /dev/null +++ b/agent/utils/utils.go @@ -0,0 +1,236 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "crypto/rand" + "encoding/binary" + "encoding/hex" + "fmt" + "io/ioutil" + "math" + "math/big" + "path/filepath" + "reflect" + "strconv" + "strings" + + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/awserr" + + "github.com/pkg/errors" +) + +func DefaultIfBlank(str string, default_value string) string { + if len(str) == 0 { + return default_value + } + return str +} + +func ZeroOrNil(obj interface{}) bool { + value := reflect.ValueOf(obj) + if !value.IsValid() { + return true + } + if obj == nil { + return true + } + switch value.Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + return value.Len() == 0 + } + zero := reflect.Zero(reflect.TypeOf(obj)) + if !value.Type().Comparable() { + return false + } + if obj == zero.Interface() { + return true + } + return false +} + +// SlicesDeepEqual checks if slice1 and slice2 are equal, disregarding order. +func SlicesDeepEqual(slice1, slice2 interface{}) bool { + s1 := reflect.ValueOf(slice1) + s2 := reflect.ValueOf(slice2) + + if s1.Len() != s2.Len() { + return false + } + if s1.Len() == 0 { + return true + } + + s2found := make([]int, s2.Len()) +OuterLoop: + for i := 0; i < s1.Len(); i++ { + s1el := s1.Slice(i, i+1) + for j := 0; j < s2.Len(); j++ { + if s2found[j] == 1 { + // We already counted this s2 element + continue + } + s2el := s2.Slice(j, j+1) + if reflect.DeepEqual(s1el.Interface(), s2el.Interface()) { + s2found[j] = 1 + continue OuterLoop + } + } + // Couldn't find something unused equal to s1 + return false + } + return true +} + +func RandHex() string { + randInt, _ := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + out := make([]byte, 10) + binary.PutVarint(out, randInt.Int64()) + return hex.EncodeToString(out) +} + +func Strptr(s string) *string { + return &s +} + +// Uint16SliceToStringSlice converts a slice of type uint16 to a slice of type +// *string. It uses strconv.Itoa on each element +func Uint16SliceToStringSlice(slice []uint16) []*string { + stringSlice := make([]*string, len(slice)) + for i, el := range slice { + str := strconv.Itoa(int(el)) + stringSlice[i] = &str + } + return stringSlice +} + +func StrSliceEqual(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + + for i := 0; i < len(s1); i++ { + if s1[i] != s2[i] { + return false + } + } + return true +} + +func StrSliceContains(strs []string, s string) bool { + for _, a := range strs { + if a == s { + return true + } + } + return false +} + +func ParseBool(str string, default_ bool) bool { + res, err := strconv.ParseBool(strings.TrimSpace(str)) + if err != nil { + return default_ + } + return res +} + +// IsAWSErrorCodeEqual returns true if the err implements Error +// interface of awserr and it has the same error code as +// the passed in error code. +func IsAWSErrorCodeEqual(err error, code string) bool { + awsErr, ok := err.(awserr.Error) + return ok && awsErr.Code() == code +} + +// MapToTags converts a map to a slice of tags. +func MapToTags(tagsMap map[string]string) []*ecs.Tag { + tags := make([]*ecs.Tag, 0) + if tagsMap == nil { + return tags + } + + for key, value := range tagsMap { + tags = append(tags, &ecs.Tag{ + Key: aws.String(key), + Value: aws.String(value), + }) + } + + return tags +} + +// SearchStrInDir searches the files in directory for specific content +func SearchStrInDir(dir, filePrefix, content string) error { + logfiles, err := ioutil.ReadDir(dir) + if err != nil { + return fmt.Errorf("Error reading the directory, err %v", err) + } + + var desiredFile string + found := false + + for _, file := range logfiles { + if strings.HasPrefix(file.Name(), filePrefix) { + desiredFile = file.Name() + if ZeroOrNil(desiredFile) { + return fmt.Errorf("File with prefix: %v does not exist", filePrefix) + } + + data, err := ioutil.ReadFile(filepath.Join(dir, desiredFile)) + if err != nil { + return fmt.Errorf("Failed to read file, err: %v", err) + } + + if strings.Contains(string(data), content) { + found = true + break + } + } + } + + if !found { + return fmt.Errorf("Could not find the content: %v in the file: %v", content, desiredFile) + } + + return nil +} + +// GetTaskID retrieves the task ID from task ARN. +func GetTaskID(taskARN string) (string, error) { + _, err := arn.Parse(taskARN) + if err != nil { + return "", errors.Errorf("failed to get task id: task arn format invalid: %s", taskARN) + } + fields := strings.Split(taskARN, "/") + if len(fields) < 2 { + return "", errors.Errorf("failed to get task id: task arn format invalid: %s", taskARN) + } + return fields[len(fields)-1], nil +} + +// GetENIAttachmentId retrieves the attachment ID from eni attachment ARN. +func GetENIAttachmentId(eniAttachmentArn string) (string, error) { + _, err := arn.Parse(eniAttachmentArn) + if err != nil { + return "", errors.Errorf("failed to get eni attachment id: eni attachment arn format invalid: %s", eniAttachmentArn) + } + fields := strings.Split(eniAttachmentArn, "/") + if len(fields) < 2 { + return "", errors.Errorf("failed to get eni attachment id: eni attachment arn invalid: %s", eniAttachmentArn) + } + return fields[len(fields)-1], nil +} diff --git a/agent/utils/utils_linux.go b/agent/utils/utils_linux.go new file mode 100644 index 00000000000..1b3f590623c --- /dev/null +++ b/agent/utils/utils_linux.go @@ -0,0 +1,18 @@ +// +build linux + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +func GetCanonicalPath(path string) string { return path } diff --git a/agent/utils/utils_linux_test.go b/agent/utils/utils_linux_test.go new file mode 100644 index 00000000000..088c3cb8537 --- /dev/null +++ b/agent/utils/utils_linux_test.go @@ -0,0 +1,26 @@ +// +build linux,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetCanonicalPath(t *testing.T) { + assert.Equal(t, "test", GetCanonicalPath("test")) +} diff --git a/agent/utils/utils_test.go b/agent/utils/utils_test.go new file mode 100644 index 00000000000..ce2057cdfa6 --- /dev/null +++ b/agent/utils/utils_test.go @@ -0,0 +1,208 @@ +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "encoding/json" + "errors" + "sort" + "testing" + + "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDefaultIfBlank(t *testing.T) { + const defaultValue = "a boring default" + const specifiedValue = "new value" + result := DefaultIfBlank(specifiedValue, defaultValue) + assert.Equal(t, specifiedValue, result) + + result = DefaultIfBlank("", defaultValue) + assert.Equal(t, defaultValue, result) +} + +type dummyStruct struct { + // no contents +} + +func (d dummyStruct) MarshalJSON([]byte, error) { + json.Marshal(nil) +} + +func TestZeroOrNil(t *testing.T) { + type ZeroTest struct { + testInt int + TestStr string + testNilJson dummyStruct + } + + var strMap map[string]string + + testCases := []struct { + param interface{} + expected bool + name string + }{ + {nil, true, "Nil is nil"}, + {0, true, "0 is 0"}, + {"", true, "\"\" is the string zerovalue"}, + {ZeroTest{}, true, "ZeroTest zero-value should be zero"}, + {ZeroTest{TestStr: "asdf"}, false, "ZeroTest with a field populated isn't zero"}, + {ZeroTest{testNilJson: dummyStruct{}}, true, "nil is nil"}, + {1, false, "1 is not 0"}, + {[]uint16{1, 2, 3}, false, "[1,2,3] is not zero"}, + {[]uint16{}, true, "[] is zero"}, + {struct{ uncomparable []uint16 }{uncomparable: []uint16{1, 2, 3}}, false, "Uncomparable structs are never zero"}, + {struct{ uncomparable []uint16 }{uncomparable: nil}, false, "Uncomparable structs are never zero"}, + {strMap, true, "map[string]string is zero or nil"}, + {make(map[string]string), true, "empty map[string]string is zero or nil"}, + {map[string]string{"foo": "bar"}, false, "map[string]string{foo:bar} is not zero or nil"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, ZeroOrNil(tc.param), tc.name) + }) + } + +} + +func TestSlicesDeepEqual(t *testing.T) { + testCases := []struct { + left []string + right []string + expected bool + name string + }{ + {[]string{}, []string{}, true, "Two empty slices"}, + {[]string{"cat"}, []string{}, false, "One empty slice"}, + {[]string{}, []string{"cat"}, false, "Another empty slice"}, + {[]string{"cat"}, []string{"cat"}, true, "Two slices with one element each"}, + {[]string{"cat", "dog", "cat"}, []string{"dog", "cat", "cat"}, true, "Two slices with multiple elements"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, SlicesDeepEqual(tc.left, tc.right)) + }) + } +} + +func TestParseBool(t *testing.T) { + truthyStrings := []string{"true", "1", "t", "true\r", "true ", "true \r"} + falsyStrings := []string{"false", "0", "f", "false\r", "false ", "false \r"} + neitherStrings := []string{"apple", " ", "\r", "orange", "maybe"} + + for _, str := range truthyStrings { + t.Run("truthy", func(t *testing.T) { + assert.True(t, ParseBool(str, false), "Truthy string should be truthy") + assert.True(t, ParseBool(str, true), "Truthy string should be truthy (regardless of default)") + }) + } + + for _, str := range falsyStrings { + t.Run("falsy", func(t *testing.T) { + assert.False(t, ParseBool(str, false), "Falsy string should be falsy") + assert.False(t, ParseBool(str, true), "Falsy string should be falsy (regardless of default)") + }) + } + + for _, str := range neitherStrings { + t.Run("defaults", func(t *testing.T) { + assert.False(t, ParseBool(str, false), "Should default to false") + assert.True(t, ParseBool(str, true), "Should default to true") + }) + } +} + +func TestIsAWSErrorCodeEqual(t *testing.T) { + testcases := []struct { + name string + err error + res bool + }{ + { + name: "Happy Path", + err: awserr.New(ecs.ErrCodeInvalidParameterException, "errMsg", errors.New("err")), + res: true, + }, + { + name: "Wrong Error Code", + err: awserr.New("errCode", "errMsg", errors.New("err")), + res: false, + }, + { + name: "Wrong Error Type", + err: errors.New("err"), + res: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.res, IsAWSErrorCodeEqual(tc.err, ecs.ErrCodeInvalidParameterException)) + }) + } +} + +func TestMapToTags(t *testing.T) { + tagKey1 := "tagKey1" + tagKey2 := "tagKey2" + tagValue1 := "tagValue1" + tagValue2 := "tagValue2" + tagsMap := map[string]string{ + tagKey1: tagValue1, + tagKey2: tagValue2, + } + tags := MapToTags(tagsMap) + assert.Equal(t, 2, len(tags)) + sort.Slice(tags, func(i, j int) bool { + return aws.StringValue(tags[i].Key) < aws.StringValue(tags[j].Key) + }) + + assert.Equal(t, aws.StringValue(tags[0].Key), tagKey1) + assert.Equal(t, aws.StringValue(tags[0].Value), tagValue1) + assert.Equal(t, aws.StringValue(tags[1].Key), tagKey2) + assert.Equal(t, aws.StringValue(tags[1].Value), tagValue2) +} + +func TestNilMapToTags(t *testing.T) { + assert.Zero(t, len(MapToTags(nil))) +} + +func TestGetTaskID(t *testing.T) { + taskARN := "arn:aws:ecs:us-west-2:1234567890:task/test-cluster/abc" + id, err := GetTaskID(taskARN) + require.NoError(t, err) + assert.Equal(t, "abc", id) + + _, err = GetTaskID("invalid") + assert.Error(t, err) +} + +func TestGetENIAttachmentId(t *testing.T) { + attachmentArn := "arn:aws:ecs:us-west-2:1234567890:attachment/abc" + id, err := GetENIAttachmentId(attachmentArn) + require.NoError(t, err) + assert.Equal(t, "abc", id) + + _, err = GetENIAttachmentId("invalid") + assert.Error(t, err) +} diff --git a/agent/utils/utils_unsupported.go b/agent/utils/utils_unsupported.go new file mode 100644 index 00000000000..fda26f31697 --- /dev/null +++ b/agent/utils/utils_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +func GetCanonicalPath(path string) string { return path } diff --git a/agent/utils/utils_windows.go b/agent/utils/utils_windows.go new file mode 100644 index 00000000000..d63f671d15d --- /dev/null +++ b/agent/utils/utils_windows.go @@ -0,0 +1,81 @@ +// +build windows + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "errors" + "os" + "path/filepath" + "regexp" + "strings" +) + +func GetCanonicalPath(path string) string { + lowercasedPath := strings.ToLower(path) + // if the path is a bare drive like "d:", don't filepath.Clean it because it will add a '.'. + // this is to fix the case where mounting from D:\ to D: is supported by docker but not ecs + if isBareDrive(lowercasedPath) { + return lowercasedPath + } + + if isNamedPipesPath(lowercasedPath) { + return lowercasedPath + } + + return filepath.Clean(lowercasedPath) +} + +func isBareDrive(path string) bool { + if filepath.VolumeName(path) == path { + return true + } + + return false +} + +func isNamedPipesPath(path string) bool { + matched, err := regexp.MatchString(`\\{2}\.[\\]pipe[\\].+`, path) + + if err != nil { + return false + } + + return matched +} + +// findUnusedDriveLetter is used to search for an available drive letter on the container instance. +// Reference: https://golang.org/src/os/os_windows_test.go +func FindUnusedDriveLetter() (string, error) { + // Do not use A: and B:, because they are reserved for floppy drive. + // Do not use C:, because it is normally used for main drive. + for l := 'Z'; l >= 'D'; l-- { + p := string(l) + `:\` + if IsAvailableDriveLetter(p) { + return p, nil + } + } + return "", errors.New("could not find an available drive letter to mount fsxwindowsfileserver resource on the container instance") +} + +var DriveLetterAvailable = IsAvailableDriveLetter + +func IsAvailableDriveLetter(hostPath string) bool { + _, err := os.Stat(hostPath) + if os.IsNotExist(err) { + return true + } + return false +} diff --git a/agent/utils/utils_windows_test.go b/agent/utils/utils_windows_test.go new file mode 100644 index 00000000000..bdb574b43ef --- /dev/null +++ b/agent/utils/utils_windows_test.go @@ -0,0 +1,35 @@ +// +build windows,unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFindUnusedDriveLetter(t *testing.T) { + DriveLetterAvailable = func(string) bool { + return true + } + + defer func() { + DriveLetterAvailable = IsAvailableDriveLetter + }() + driveLetter, err := FindUnusedDriveLetter() + assert.NoError(t, err) + assert.Equal(t, `Z:\`, driveLetter) +} diff --git a/agent/utils/uuid.go b/agent/utils/uuid.go new file mode 100644 index 00000000000..10a2a6287c8 --- /dev/null +++ b/agent/utils/uuid.go @@ -0,0 +1,54 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package utils + +import ( + "github.com/pborman/uuid" +) + +// UUIDProvider wraps 'uuid' methods for testing +type UUIDProvider interface { + New() string +} + +// dynamicUUIDProvider is a uuid provider used in run time +type dynamicUUIDProvider struct { +} + +// NewDynamicUUIDProvider returns a new dynamicUUIDProvider +func NewDynamicUUIDProvider() UUIDProvider { + return &dynamicUUIDProvider{} +} + +// New returns a new random uuid +func (*dynamicUUIDProvider) New() string { + return uuid.New() +} + +// staticUUIDProvider is a uuid provider used in testing +type staticUUIDProvider struct { + staticID string +} + +// NewStaticUUIDProvider returns a new staticUUIDProvider +func NewStaticUUIDProvider(staticID string) UUIDProvider { + return &staticUUIDProvider{ + staticID: staticID, + } +} + +// New returns a static uuid +func (s *staticUUIDProvider) New() string { + return s.staticID +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/.gitignore b/agent/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 00000000000..b883f1fdc6d --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/agent/vendor/github.com/Microsoft/go-winio/LICENSE b/agent/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 00000000000..b8b569d7746 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/agent/vendor/github.com/Microsoft/go-winio/README.md b/agent/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 00000000000..5680010575b --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,22 @@ +# go-winio + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. + +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/agent/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/agent/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/agent/vendor/github.com/Microsoft/go-winio/backup.go b/agent/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 00000000000..2be34af4310 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/ea.go b/agent/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 00000000000..b37e930d6af --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/file.go b/agent/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 00000000000..4334ff1cbee --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,307 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return makeWin32File(h) +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/fileinfo.go b/agent/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 00000000000..b1d60abb836 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,60 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "syscall" + "unsafe" +) + +//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx +//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle + +const ( + fileBasicInfo = 0 + fileIDInfo = 0x12 +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + FileAttributes uintptr // includes padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/pipe.go b/agent/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 00000000000..82cbe7af45e --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,424 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cPIPE_ACCESS_DUPLEX = 0x3 + cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + + cPIPE_UNLIMITED_INSTANCES = 255 + + cNMPWAIT_USE_DEFAULT_WAIT = 0 + cNMPWAIT_NOWAIT = 1 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then the timeout +// is the default timeout established by the pipe server. +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } + var err error + var h syscall.Handle + for { + h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != cERROR_PIPE_BUSY { + break + } + now := time.Now() + var ms uint32 + if absTimeout.IsZero() { + ms = cNMPWAIT_USE_DEFAULT_WAIT + } else if now.After(absTimeout) { + ms = cNMPWAIT_NOWAIT + } else { + ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) + } + err = waitNamedPipe(path, ms) + if err != nil { + if err == cERROR_SEM_TIMEOUT { + return nil, ErrTimeout + } + break + } + } + if err != nil { + return nil, &os.PathError{Op: "open", Path: path, Err: err} + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + var state uint32 + err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) + if err != nil { + return nil, err + } + + if state&cPIPE_READMODE_MESSAGE != 0 { + return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + securityDescriptor []byte + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED + if first { + flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + } + + var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + if c.MessageMode { + mode |= cPIPE_TYPE_MESSAGE + } + + sa := &syscall.SecurityAttributes{} + sa.Length = uint32(unsafe.Sizeof(*sa)) + if securityDescriptor != nil { + len := uint32(len(securityDescriptor)) + sa.SecurityDescriptor = localAlloc(0, len) + defer localFree(sa.SecurityDescriptor) + copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) + } + h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size the input buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + // Immediately open and then close a client handle so that the named pipe is + // created but not currently accepting connections. + h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != nil { + syscall.Close(h) + return nil, err + } + syscall.Close(h2) + l := &win32PipeListener{ + firstHandle: h, + path: path, + securityDescriptor: sd, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/privilege.go b/agent/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 00000000000..9c83d36fe53 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,202 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/reparse.go b/agent/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 00000000000..fc1ee4d3a3e --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/sd.go b/agent/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 00000000000..db1b370a1b5 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/syscall.go b/agent/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 00000000000..20d64cf41d0 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go diff --git a/agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 00000000000..3f527639a47 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,520 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") +) + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name string, timeout uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _waitNamedPipe(_p0, timeout) +} + +func _waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/agent/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/agent/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 00000000000..899129ecc46 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 00000000000..1c496742903 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 00000000000..99849c0e19c --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 00000000000..9cf7eaf4007 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 00000000000..1a3d106d5c1 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 00000000000..142a7a01c52 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 00000000000..a4eb6a7f43a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 00000000000..710eb432f85 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 00000000000..645df2450fc --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 00000000000..03334d69207 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,97 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 00000000000..9f6af19dd45 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 00000000000..8958c32d4e9 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,194 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 00000000000..0c48f72e08e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,14 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 00000000000..881d575f010 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/config.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 00000000000..3b809e8478c --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,587 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `nil` or the value to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disable 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDisableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requests. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 00000000000..2866f9a7fb9 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 00000000000..3718b26e101 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 00000000000..2f9446333a6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package aws + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.BackgroundCtx +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 00000000000..9c29f29af17 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 00000000000..304fd156120 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 00000000000..4e076c1837a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 00000000000..d95a5eb5408 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,232 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 00000000000..7d50b1557cc --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 00000000000..ab69c7a6f38 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 00000000000..3ad1e798df8 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 00000000000..5852b264870 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 00000000000..388b2154182 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 00000000000..8152a864add --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 00000000000..4356edb3d5d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 00000000000..a880a3de8fe --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,383 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + // Passed in expirations should have the monotonic clock values stripped. + // This ensures time comparisons will be based on wall-time. + e.expiration = expiration.Round(0) + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sf singleflight.Group + + m sync.RWMutex + creds Value + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { + c.m.Lock() + defer c.m.Unlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil + } + + var creds Value + var err error + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds = creds + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.creds = Value{} +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpiredLocked(c.creds) +} + +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", + c.creds.ProviderName), + nil) + } + if c.creds == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 00000000000..92af5b7250a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,188 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 00000000000..785f30d8e6c --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,210 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 00000000000..54c5cf7333f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 00000000000..7fc91d9d204 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 00000000000..e6248360029 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,426 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 00000000000..22b5c5d9f32 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 00000000000..cbba1e3d560 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000000..6846ef6f808 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,363 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000000..cefe2a76d4d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,154 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType + + // Duration the STS credentials will be valid for. Truncated to seconds. + // If unset, the assumed role will use AssumeRoleWithWebIdentity's default + // expiry duration. See + // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity + // for more information. + Duration time.Duration + + // The amount of time the credentials will be refreshed before they expire. + // This is useful refresh credentials before they expire to reduce risk of + // using credentials as they expire. If unset, will default to no expiry + // window. + ExpiryWindow time.Duration + + client stsiface.STSAPI + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + + var duration *int64 + if p.Duration != 0 { + duration = aws.Int64(int64(p.Duration / time.Second)) + } + + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + DurationSeconds: duration, + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 00000000000..25a66d1dda2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 00000000000..4b19e2800e3 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 00000000000..5bacc791a1e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 00000000000..82a3e345e93 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 00000000000..54a99280ce9 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 00000000000..835bcd49cba --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,264 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case request.ErrCodeRequestError, + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 00000000000..23bb639e018 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 00000000000..ca0ee1dcc78 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 00000000000..4fcb6161848 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 00000000000..69fa63dc08f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,250 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/latest/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + +// GetMetadata uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), + } + output := &metadataOutput{} + + req := c.NewRequest(op, nil, output) + + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/latest/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) + if err != nil { + return "", err + } + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 00000000000..8f35b3464ba --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,245 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See aws/session#Options.EC2IMDSEndpoint for more details. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS +// client is able to communicate with the EC2 IMDS API. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 1 * time.Second, + } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) + } + + if u, err := url.Parse(endpoint); err == nil { + // Remove path from the endpoint since it will be added by requests. + // This is an artifact of the SDK adding `/latest` to the endpoint for + // EC2 IMDS, but this is now moved to the operation definition. + u.Path = "" + u.RawPath = "" + endpoint = u.String() + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This short-circuits the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +type tokenOutput struct { + Token string + TTL time.Duration +} + +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.HTTPResponse.StatusCode, r.RequestID) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 00000000000..4b29f190bf9 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,93 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + t.token.Store(ec2Token{}) + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 00000000000..654fb1ad52d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,216 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRegionalS3(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services[svcName] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 00000000000..fc8c1f92921 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,10081 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. +) + +// AWS Standard partition's regions. +const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "Europe (Frankfurt)", + }, + "eu-north-1": region{ + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", + }, + "eu-west-1": region{ + Description: "Europe (Ireland)", + }, + "eu-west-2": region{ + Description: "Europe (London)", + }, + "eu-west-3": region{ + Description: "Europe (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "access-analyzer": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "airflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-dkr-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-dkr-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-dkr-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-dkr-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "app-integrations": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appmesh": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "chime.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codeartifact": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar-connections": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "contact-lens": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "fips-us-east-2": endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + }, + }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "identitystore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lookoutvision": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "macie": service{ + + Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "macie2": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "managedblockchain": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "rekognition-fips.ca-central-1": endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rekognition-fips.us-east-1": endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rekognition-fips.us-east-2": endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rekognition-fips.us-west-1": endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rekognition-fips.us-west-2": endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "ca-central-1-fips": endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "servicediscovery-fips": endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-northeast-3": endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-af-south-1": endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "fips-dkr-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-dkr-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ebs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "dataplane-us-gov-east-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "dataplane-us-gov-west-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-east-1": endpoint{ + Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "iam-govcloud-fips": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "pinpoint.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "rekognition-fips.us-gov-west-1": endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + }, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 00000000000..ca8fc828e15 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 00000000000..84316b92c05 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 00000000000..ca956e5f12a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,564 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned may look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := make(map[string]Region, len(p.p.Regions)) + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := make(map[string]Service, len(p.p.Services)) + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The endpoint partition + PartitionID string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 00000000000..df75e899adb --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 00000000000..773613722f4 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,351 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + + e, hasEndpoint := s.endpointForRegion(region) + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + region = signingRegion + } + + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + return ResolvedEndpoint{ + URL: u, + PartitionID: partitionID, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 00000000000..0fdfcc56e05 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,351 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 00000000000..fa06f7a8f8b --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 00000000000..91a6f277a7e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 00000000000..6ed15b2ecc2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 00000000000..2ba3c56c11f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "use of closed network connection") || + strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 00000000000..e819ab6c0e8 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,343 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + BuildStream HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.BuildStream.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 00000000000..79f79602b03 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 00000000000..9370fa50c38 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 00000000000..d597c6ead55 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,698 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + streamingBody io.ReadCloser + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + if retryer == nil { + retryer = noOpRetryer{} + } + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + SanitizeHostForHeader(r.HTTPRequest) + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + if r.URL == nil { + return "" + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 00000000000..e36e468b7c6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 00000000000..de1292f45a2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,36 @@ +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 00000000000..a7365cd1e46 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 00000000000..307fa0705be --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 00000000000..64784e16f3d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,266 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// for p.Next() { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// // ... +// // break out of loop to stop fetching additional pages +// } +// +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 00000000000..752ae47f845 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,309 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } + cfg.Retryer = retryer + return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + ErrCodeRequestError: {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 00000000000..09a44eb987a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 00000000000..8630683f317 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 00000000000..4601f883cc5 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 00000000000..ea9ebb6f6a2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 00000000000..fec39dfc126 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 00000000000..1c5a5391e65 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000000..fe6dac1f476 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,267 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 00000000000..cc461bd3230 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,262 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2IMDSEndpoint: "http://[::1]", + }) +*/ +package session diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 00000000000..d67c261d74f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,355 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool + + // Specifies the alternative endpoint to use for EC2 IMDS. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } + ec2IMDSEndpointEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 00000000000..6430a7f1526 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,791 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers + + options Options +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg, envErr := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + + return s + } + + s := deprecatedNewSession(envCfg, cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers + + // Allows specifying a custom endpoint to be used by the EC2 IMDS client + // when making requests to the EC2 IMDS API. The must endpoint value must + // include protocol prefix. + // + // If unset, will the EC2 IMDS client will use its default endpoint. + // + // Can also be specified via the environment variable, + // AWS_EC2_METADATA_SERVICE_ENDPOINT. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + // + // If using an URL with an IPv6 address literal, the IPv6 address + // component must be enclosed in square brackets. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +// Wraps the endpoint resolver with a resolver that will return a custom +// endpoint for EC2 IMDS. +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver { + return endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) ( + endpoints.ResolvedEndpoint, error, + ) { + if service == ec2MetadataServiceID { + return endpoints.ResolvedEndpoint{ + URL: endpoint, + SigningName: ec2MetadataServiceID, + SigningRegion: region, + }, nil + } + return resolver.EndpointFor(service, region) + }) +} + +func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + + if len(envCfg.EC2IMDSEndpoint) != 0 { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint) + } + + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: Options{ + EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, + }, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: opts, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint + if len(ec2IMDSEndpoint) == 0 { + ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint + } + if len(ec2IMDSEndpoint) != 0 { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint) + } + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + return nil +} + +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + options: s.options, + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +const ec2MetadataServiceID = "ec2metadata" + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 00000000000..680805a38ad --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,555 @@ +package session + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + } + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 00000000000..07ea799fbd3 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,81 @@ +package v4 + +import ( + "github.com/aws/aws-sdk-go/internal/strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 00000000000..6aa2ed241bb --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 00000000000..f35fc860b3b --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 00000000000..fed5c859ca6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 00000000000..02cbd97e234 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 00000000000..bd082e9d1f7 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 00000000000..d71f7b3f4fa --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,846 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + awsV4Request = "aws4_request" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + authHeaderSignatureElem + ctx.signature, + } + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) + } + + return nil +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + if !r.IsValid(k) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + formatTime(ctx.Time), + ctx.credentialString, + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func hmacSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func hashSHA256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/types.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 00000000000..98751ee84f2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,264 @@ +package aws + +import ( + "io" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/url.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 00000000000..6192b2455b6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 00000000000..0210d2720e7 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/aws/version.go b/agent/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 00000000000..c0e056ea83a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.36.0" diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 00000000000..876dcb3fde2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 00000000000..e83a99886bc --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 00000000000..0895d53cbe6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 00000000000..0b76999ba1f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 00000000000..25ce0fe134d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 00000000000..04345a54c20 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 00000000000..91ba2a59dd5 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 00000000000..8d462f77e24 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 00000000000..3b0ca7afe3b --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 00000000000..582c024ad15 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 00000000000..55fa73ebcf2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,357 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 00000000000..24df543d38c --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 00000000000..e52ac399f17 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 00000000000..a45c0bc5662 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 00000000000..8a84c7cbe08 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 00000000000..45728701931 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 00000000000..7f01cf7c703 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 00000000000..f82095ba259 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 00000000000..da7a4049cfa --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 00000000000..18f3fe89317 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 00000000000..305999d29be --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 00000000000..94841c32443 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 00000000000..99915f7f777 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 00000000000..7ffb4ae06ff --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 00000000000..bf18031a38e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,50 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint +// +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go new file mode 100644 index 00000000000..7a8e46fbdae --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -0,0 +1,74 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if a.Service != "s3" && a.Service != "s3-outposts" { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 00000000000..1e10f8de00b --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,126 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +// +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +// +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go new file mode 100644 index 00000000000..e756b2f8733 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go @@ -0,0 +1,189 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARNError +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns the invalid ARN error code +func (e InvalidARNError) Code() string { + return invalidARNErrorErrCode +} + +// Message returns the message for Invalid ARN error +func (e InvalidARNError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) OrigErr() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints +func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns configuration error's error-code +func (e ConfigurationError) Code() string { + return configurationErrorErrCode +} + +// Message returns the configuration error message +func (e ConfigurationError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) OrigErr() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go new file mode 100644 index 00000000000..9f70a64ecff --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go @@ -0,0 +1,62 @@ +package s3shared + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +// ResourceRequest represents the request and arn resource +type ResourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +// UseFIPS returns true if request config region is FIPS +func (r ResourceRequest) UseFIPS() bool { + return IsFIPS(aws.StringValue(r.Request.Config.Region)) +} + +// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS +func (r ResourceRequest) ResourceConfiguredForFIPS() bool { + return IsFIPS(r.ARN().Region) +} + +// IsCrossPartition returns true if client is configured for another partition, than +// the partition that resource ARN region resolves to. +func (r ResourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +// IsCrossRegion returns true if ARN region is different than client configured region +func (r ResourceRequest) IsCrossRegion() bool { + return IsCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +// HasCustomEndpoint returns true if custom client endpoint is provided +func (r ResourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +// IsFIPS returns true if region is a fips region +func IsFIPS(clientRegion string) bool { + return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") +} + +// IsCrossRegion returns true if request signing region is not same as configured region +func IsCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go new file mode 100644 index 00000000000..0b9b0dfce04 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 00000000000..6c443988bbc --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 00000000000..5aa9137e0f9 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 00000000000..e5f005613b7 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000000..44898eed0fd --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000000..810ec7f08b0 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 00000000000..0c9802d8770 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 00000000000..f4651da2da5 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 00000000000..b1d93a33d48 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 00000000000..38ea61afeaa --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 00000000000..7da8a49ce52 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000000..ebcbc2b40a3 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 00000000000..d008ae27cb3 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/agent/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/agent/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/agent/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..14ad0c58911 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/agent/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 00000000000..e045f38d837 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/api.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/api.go new file mode 100644 index 00000000000..e44aaa9f132 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/api.go @@ -0,0 +1,1028 @@ +// +build codegen + +// Package api represents API abstractions for rendering service generated files. +package api + +import ( + "bytes" + "fmt" + "path" + "regexp" + "sort" + "strings" + "text/template" + "unicode" +) + +// SDKImportRoot is the root import path of the SDK. +const SDKImportRoot = "github.com/aws/aws-sdk-go" + +// An API defines a service API's definition. and logic to serialize the definition. +type API struct { + Metadata Metadata + Operations map[string]*Operation + Shapes map[string]*Shape + Waiters []Waiter + Documentation string + Examples Examples + SmokeTests SmokeTestSuite + + IgnoreUnsupportedAPIs bool + + // Set to true to avoid removing unused shapes + NoRemoveUnusedShapes bool + + // Set to true to avoid renaming to 'Input/Output' postfixed shapes + NoRenameToplevelShapes bool + + // Set to true to ignore service/request init methods (for testing) + NoInitMethods bool + + // Set to true to ignore String() and GoString methods (for generated tests) + NoStringerMethods bool + + // Set to true to not generate API service name constants + NoConstServiceNames bool + + // Set to true to not generate validation shapes + NoValidataShapeMethods bool + + // Set to true to not generate struct field accessors + NoGenStructFieldAccessors bool + + BaseImportPath string + + initialized bool + imports map[string]bool + name string + path string + + BaseCrosslinkURL string + + HasEventStream bool `json:"-"` + + EndpointDiscoveryOp *Operation + + HasEndpointARN bool `json:"-"` + + HasOutpostID bool `json:"-"` + + HasAccountIdWithARN bool `json:"-"` + + WithGeneratedTypedErrors bool +} + +// A Metadata is the metadata about an API's definition. +type Metadata struct { + APIVersion string + EndpointPrefix string + SigningName string + ServiceAbbreviation string + ServiceFullName string + SignatureVersion string + JSONVersion string + TargetPrefix string + Protocol string + ProtocolSettings ProtocolSettings + UID string + EndpointsID string + ServiceID string + + NoResolveEndpoint bool +} + +// ProtocolSettings define how the SDK should handle requests in the context +// of of a protocol. +type ProtocolSettings struct { + HTTP2 string `json:"h2,omitempty"` +} + +// PackageName name of the API package +func (a *API) PackageName() string { + return strings.ToLower(a.StructName()) +} + +// ImportPath returns the client's full import path +func (a *API) ImportPath() string { + return path.Join(a.BaseImportPath, a.PackageName()) +} + +// InterfacePackageName returns the package name for the interface. +func (a *API) InterfacePackageName() string { + return a.PackageName() + "iface" +} + +var stripServiceNamePrefixes = []string{ + "Amazon", + "AWS", +} + +// StructName returns the struct name for a given API. +func (a *API) StructName() string { + if len(a.name) != 0 { + return a.name + } + + name := a.Metadata.ServiceAbbreviation + if len(name) == 0 { + name = a.Metadata.ServiceFullName + } + + name = strings.TrimSpace(name) + + // Strip out prefix names not reflected in service client symbol names. + for _, prefix := range stripServiceNamePrefixes { + if strings.HasPrefix(name, prefix) { + name = name[len(prefix):] + break + } + } + + // Replace all Non-letter/number values with space + runes := []rune(name) + for i := 0; i < len(runes); i++ { + if r := runes[i]; !(unicode.IsNumber(r) || unicode.IsLetter(r)) { + runes[i] = ' ' + } + } + name = string(runes) + + // Title case name so its readable as a symbol. + name = strings.Title(name) + + // Strip out spaces. + name = strings.Replace(name, " ", "", -1) + + a.name = name + return a.name +} + +// UseInitMethods returns if the service's init method should be rendered. +func (a *API) UseInitMethods() bool { + return !a.NoInitMethods +} + +// NiceName returns the human friendly API name. +func (a *API) NiceName() string { + if a.Metadata.ServiceAbbreviation != "" { + return a.Metadata.ServiceAbbreviation + } + return a.Metadata.ServiceFullName +} + +// ProtocolPackage returns the package name of the protocol this API uses. +func (a *API) ProtocolPackage() string { + switch a.Metadata.Protocol { + case "json": + return "jsonrpc" + case "ec2": + return "ec2query" + default: + return strings.Replace(a.Metadata.Protocol, "-", "", -1) + } +} + +// OperationNames returns a slice of API operations supported. +func (a *API) OperationNames() []string { + i, names := 0, make([]string, len(a.Operations)) + for n := range a.Operations { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// OperationList returns a slice of API operation pointers +func (a *API) OperationList() []*Operation { + list := make([]*Operation, len(a.Operations)) + for i, n := range a.OperationNames() { + list[i] = a.Operations[n] + } + return list +} + +// OperationHasOutputPlaceholder returns if any of the API operation input +// or output shapes are place holders. +func (a *API) OperationHasOutputPlaceholder() bool { + for _, op := range a.Operations { + if op.OutputRef.Shape.Placeholder { + return true + } + } + return false +} + +// ShapeNames returns a slice of names for each shape used by the API. +func (a *API) ShapeNames() []string { + i, names := 0, make([]string, len(a.Shapes)) + for n := range a.Shapes { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// ShapeList returns a slice of shape pointers used by the API. +// +// Will exclude error shapes from the list of shapes returned. +func (a *API) ShapeList() []*Shape { + list := make([]*Shape, 0, len(a.Shapes)) + for _, n := range a.ShapeNames() { + // Ignore non-eventstream exception shapes in list. + if s := a.Shapes[n]; !(s.Exception && len(s.EventFor) == 0) { + list = append(list, s) + } + } + return list +} + +// ShapeListErrors returns a list of the errors defined by the API model +func (a *API) ShapeListErrors() []*Shape { + list := []*Shape{} + for _, n := range a.ShapeNames() { + // Ignore error shapes in list + if s := a.Shapes[n]; s.Exception { + list = append(list, s) + } + } + return list +} + +// resetImports resets the import map to default values. +func (a *API) resetImports() { + a.imports = map[string]bool{} +} + +// importsGoCode returns the generated Go import code. +func (a *API) importsGoCode() string { + if len(a.imports) == 0 { + return "" + } + + corePkgs, extPkgs := []string{}, []string{} + for i := range a.imports { + if strings.Contains(i, ".") { + extPkgs = append(extPkgs, i) + } else { + corePkgs = append(corePkgs, i) + } + } + sort.Strings(corePkgs) + sort.Strings(extPkgs) + + code := "import (\n" + for _, i := range corePkgs { + code += fmt.Sprintf("\t%q\n", i) + } + if len(corePkgs) > 0 { + code += "\n" + } + for _, i := range extPkgs { + code += fmt.Sprintf("\t%q\n", i) + } + code += ")\n\n" + return code +} + +// A tplAPI is the top level template for the API +var tplAPI = template.Must(template.New("api").Parse(` +{{- range $_, $o := .OperationList }} + + {{ $o.GoCode }} +{{- end }} + +{{- range $_, $s := $.Shapes }} + {{- if and $s.IsInternal (eq $s.Type "structure") (not $s.Exception) }} + + {{ $s.GoCode }} + {{- else if and $s.Exception (or $.WithGeneratedTypedErrors $s.EventFor) }} + + {{ $s.GoCode }} + {{- end }} +{{- end }} + +{{- range $_, $s := $.Shapes }} + {{- if $s.IsEnum }} + + {{ $s.GoCode }} + {{- end }} +{{- end }} +`)) + +// AddImport adds the import path to the generated file's import. +func (a *API) AddImport(v string) error { + a.imports[v] = true + return nil +} + +// AddSDKImport adds a SDK package import to the generated file's import. +func (a *API) AddSDKImport(v ...string) error { + e := make([]string, 0, 5) + e = append(e, SDKImportRoot) + e = append(e, v...) + + a.imports[path.Join(e...)] = true + return nil +} + +// APIGoCode renders the API in Go code. Returning it as a string +func (a *API) APIGoCode() string { + a.resetImports() + a.AddSDKImport("aws") + a.AddSDKImport("aws/awsutil") + a.AddSDKImport("aws/request") + + if a.HasEndpointARN { + a.AddImport("fmt") + if a.PackageName() == "s3" || a.PackageName() == "s3control" { + a.AddSDKImport("internal/s3shared/arn") + } else { + a.AddSDKImport("service", a.PackageName(), "internal", "arn") + } + } + + var buf bytes.Buffer + err := tplAPI.Execute(&buf, a) + if err != nil { + panic(err) + } + + code := a.importsGoCode() + strings.TrimSpace(buf.String()) + return code +} + +var noCrossLinkServices = map[string]struct{}{ + "apigateway": {}, + "budgets": {}, + "cloudsearch": {}, + "cloudsearchdomain": {}, + "elastictranscoder": {}, + "elasticsearchservice": {}, + "glacier": {}, + "importexport": {}, + "iot": {}, + "iotdataplane": {}, + "machinelearning": {}, + "rekognition": {}, + "sdb": {}, + "swf": {}, +} + +// HasCrosslinks will return whether or not a service has crosslinking . +func HasCrosslinks(service string) bool { + _, ok := noCrossLinkServices[service] + return !ok +} + +// GetCrosslinkURL returns the crosslinking URL for the shape based on the name and +// uid provided. Empty string is returned if no crosslink link could be determined. +func (a *API) GetCrosslinkURL(params ...string) string { + baseURL := a.BaseCrosslinkURL + uid := a.Metadata.UID + + if a.Metadata.UID == "" || a.BaseCrosslinkURL == "" { + return "" + } + + if !HasCrosslinks(strings.ToLower(a.PackageName())) { + return "" + } + + return strings.Join(append([]string{baseURL, "goto", "WebAPI", uid}, params...), "/") +} + +// ServiceIDFromUID will parse the service id from the uid and return +// the service id that was found. +func ServiceIDFromUID(uid string) string { + found := 0 + i := len(uid) - 1 + for ; i >= 0; i-- { + if uid[i] == '-' { + found++ + } + // Terminate after the date component is found, e.g. es-2017-11-11 + if found == 3 { + break + } + } + + return uid[0:i] +} + +// APIName returns the API's service name. +func (a *API) APIName() string { + return a.name +} + +var tplServiceDoc = template.Must(template.New("service docs"). + Parse(` +// Package {{ .PackageName }} provides the client and types for making API +// requests to {{ .Metadata.ServiceFullName }}. +{{ if .Documentation -}} +// +{{ .Documentation }} +{{ end -}} +{{ $crosslinkURL := $.GetCrosslinkURL -}} +{{ if $crosslinkURL -}} +// +// See {{ $crosslinkURL }} for more information on this service. +{{ end -}} +// +// See {{ .PackageName }} package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/{{ .PackageName }}/ +// +// Using the Client +// +// To contact {{ .Metadata.ServiceFullName }} with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the {{ .Metadata.ServiceFullName }} client {{ .StructName }} for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/{{ .PackageName }}/#New +`)) + +var serviceIDRegex = regexp.MustCompile("[^a-zA-Z0-9 ]+") +var prefixDigitRegex = regexp.MustCompile("^[0-9]+") + +// ServiceID will return a unique identifier specific to a service. +func ServiceID(a *API) string { + if len(a.Metadata.ServiceID) > 0 { + return a.Metadata.ServiceID + } + + name := a.Metadata.ServiceAbbreviation + if len(name) == 0 { + name = a.Metadata.ServiceFullName + } + + name = strings.Replace(name, "Amazon", "", -1) + name = strings.Replace(name, "AWS", "", -1) + name = serviceIDRegex.ReplaceAllString(name, "") + name = prefixDigitRegex.ReplaceAllString(name, "") + name = strings.TrimSpace(name) + return name +} + +// A tplService defines the template for the service generated code. +var tplService = template.Must(template.New("service").Funcs(template.FuncMap{ + "ServiceNameConstValue": ServiceName, + "ServiceNameValue": func(a *API) string { + if !a.NoConstServiceNames { + return "ServiceName" + } + return fmt.Sprintf("%q", ServiceName(a)) + }, + "EndpointsIDConstValue": func(a *API) string { + if a.NoConstServiceNames { + return fmt.Sprintf("%q", a.Metadata.EndpointsID) + } + if a.Metadata.EndpointsID == ServiceName(a) { + return "ServiceName" + } + + return fmt.Sprintf("%q", a.Metadata.EndpointsID) + }, + "EndpointsIDValue": func(a *API) string { + if a.NoConstServiceNames { + return fmt.Sprintf("%q", a.Metadata.EndpointsID) + } + + return "EndpointsID" + }, + "ServiceIDVar": func(a *API) string { + if a.NoConstServiceNames { + return fmt.Sprintf("%q", ServiceID(a)) + } + + return "ServiceID" + }, + "ServiceID": ServiceID, +}).Parse(` +// {{ .StructName }} provides the API operation methods for making requests to +// {{ .Metadata.ServiceFullName }}. See this package's package overview docs +// for details on the service. +// +// {{ .StructName }} methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type {{ .StructName }} struct { + *client.Client + {{- if .EndpointDiscoveryOp }} + endpointCache *crr.EndpointCache + {{ end -}} +} + +{{ if .UseInitMethods }}// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) +{{ end }} + + +{{ if not .NoConstServiceNames -}} +// Service information constants +const ( + ServiceName = "{{ ServiceNameConstValue . }}" // Name of service. + EndpointsID = {{ EndpointsIDConstValue . }} // ID to lookup a service endpoint with. + ServiceID = "{{ ServiceID . }}" // ServiceID is a unique identifier of a specific service. +) +{{- end }} + +// New creates a new instance of the {{ .StructName }} client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a {{ .StructName }} client from just a session. +// svc := {{ .PackageName }}.New(mySession) +// +// // Create a {{ .StructName }} client with additional configuration +// svc := {{ .PackageName }}.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *{{ .StructName }} { + {{ if .Metadata.NoResolveEndpoint -}} + var c client.Config + if v, ok := p.(client.ConfigNoResolveEndpointProvider); ok { + c = v.ClientConfigNoResolveEndpoint(cfgs...) + } else { + c = p.ClientConfig({{ EndpointsIDValue . }}, cfgs...) + } + {{- else -}} + c := p.ClientConfig({{ EndpointsIDValue . }}, cfgs...) + {{- end }} + + {{- if .Metadata.SigningName }} + if c.SigningNameDerived || len(c.SigningName) == 0{ + c.SigningName = "{{ .Metadata.SigningName }}" + } + {{- end }} + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *{{ .StructName }} { + svc := &{{ .StructName }}{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: {{ ServiceNameValue . }}, + ServiceID : {{ ServiceIDVar . }}, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "{{ .Metadata.APIVersion }}", + {{ if and (.Metadata.JSONVersion) (eq .Metadata.Protocol "json") -}} + JSONVersion: "{{ .Metadata.JSONVersion }}", + {{- end }} + {{ if and (.Metadata.TargetPrefix) (eq .Metadata.Protocol "json") -}} + TargetPrefix: "{{ .Metadata.TargetPrefix }}", + {{- end }} + }, + handlers, + ), + } + + {{- if .EndpointDiscoveryOp }} + svc.endpointCache = crr.NewEndpointCache(10) + {{- end }} + + // Handlers + svc.Handlers.Sign.PushBackNamed( + {{- if eq .Metadata.SignatureVersion "v2" -}} + v2.SignRequestHandler + {{- else if or (eq .Metadata.SignatureVersion "s3") (eq .Metadata.SignatureVersion "s3v4") -}} + v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + }) + {{- else -}} + v4.SignRequestHandler + {{- end -}} + ) + {{- if eq .Metadata.SignatureVersion "v2" }} + svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + {{- end }} + svc.Handlers.Build.PushBackNamed({{ .ProtocolPackage }}.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed({{ .ProtocolPackage }}.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed({{ .ProtocolPackage }}.UnmarshalMetaHandler) + + {{- if and $.WithGeneratedTypedErrors (gt (len $.ShapeListErrors) 0) }} + {{- $_ := $.AddSDKImport "private/protocol" }} + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler({{ .ProtocolPackage }}.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + {{- else }} + svc.Handlers.UnmarshalError.PushBackNamed({{ .ProtocolPackage }}.UnmarshalErrorHandler) + {{- end }} + + {{- if .HasEventStream }} + + svc.Handlers.BuildStream.PushBackNamed({{ .ProtocolPackage }}.BuildHandler) + svc.Handlers.UnmarshalStream.PushBackNamed({{ .ProtocolPackage }}.UnmarshalHandler) + {{- end }} + + {{- if .UseInitMethods }} + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + {{- end }} + + return svc +} + +// newRequest creates a new request for a {{ .StructName }} operation and runs any +// custom request initialization. +func (c *{{ .StructName }}) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + {{- if .UseInitMethods }} + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + {{- end }} + + return req +} +`)) + +// ServicePackageDoc generates the contents of the doc file for the service. +// +// Will also read in the custom doc templates for the service if found. +func (a *API) ServicePackageDoc() string { + a.imports = map[string]bool{} + + var buf bytes.Buffer + if err := tplServiceDoc.Execute(&buf, a); err != nil { + panic(err) + } + + return buf.String() +} + +// ServiceGoCode renders service go code. Returning it as a string. +func (a *API) ServiceGoCode() string { + a.resetImports() + a.AddSDKImport("aws") + a.AddSDKImport("aws/client") + a.AddSDKImport("aws/client/metadata") + a.AddSDKImport("aws/request") + if a.Metadata.SignatureVersion == "v2" { + a.AddSDKImport("private/signer/v2") + a.AddSDKImport("aws/corehandlers") + } else { + a.AddSDKImport("aws/signer/v4") + } + a.AddSDKImport("private/protocol", a.ProtocolPackage()) + if a.EndpointDiscoveryOp != nil { + a.AddSDKImport("aws/crr") + } + + var buf bytes.Buffer + err := tplService.Execute(&buf, a) + if err != nil { + panic(err) + } + + code := a.importsGoCode() + buf.String() + return code +} + +// ExampleGoCode renders service example code. Returning it as a string. +func (a *API) ExampleGoCode() string { + exs := []string{} + imports := map[string]bool{} + for _, o := range a.OperationList() { + o.imports = map[string]bool{} + exs = append(exs, o.Example()) + for k, v := range o.imports { + imports[k] = v + } + } + + code := fmt.Sprintf("import (\n%q\n%q\n%q\n\n%q\n%q\n%q\n", + "bytes", + "fmt", + "time", + SDKImportRoot+"/aws", + SDKImportRoot+"/aws/session", + a.ImportPath(), + ) + for k := range imports { + code += fmt.Sprintf("%q\n", k) + } + code += ")\n\n" + code += "var _ time.Duration\nvar _ bytes.Buffer\n\n" + code += strings.Join(exs, "\n\n") + return code +} + +// A tplInterface defines the template for the service interface type. +var tplInterface = template.Must(template.New("interface").Parse(` +// {{ .StructName }}API provides an interface to enable mocking the +// {{ .PackageName }}.{{ .StructName }} service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // {{.Metadata.ServiceFullName}}. {{ $opts := .OperationList }}{{ $opt := index $opts 0 }} +// func myFunc(svc {{ .InterfacePackageName }}.{{ .StructName }}API) bool { +// // Make svc.{{ $opt.ExportedName }} request +// } +// +// func main() { +// sess := session.New() +// svc := {{ .PackageName }}.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mock{{ .StructName }}Client struct { +// {{ .InterfacePackageName }}.{{ .StructName }}API +// } +// func (m *mock{{ .StructName }}Client) {{ $opt.ExportedName }}(input {{ $opt.InputRef.GoTypeWithPkgName }}) ({{ $opt.OutputRef.GoTypeWithPkgName }}, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mock{{ .StructName }}Client{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type {{ .StructName }}API interface { + {{ range $_, $o := .OperationList }} + {{ $o.InterfaceSignature }} + {{ end }} + {{ range $_, $w := .Waiters }} + {{ $w.InterfaceSignature }} + {{ end }} +} + +var _ {{ .StructName }}API = (*{{ .PackageName }}.{{ .StructName }})(nil) +`)) + +// InterfaceGoCode returns the go code for the service's API operations as an +// interface{}. Assumes that the interface is being created in a different +// package than the service API's package. +func (a *API) InterfaceGoCode() string { + a.resetImports() + a.AddSDKImport("aws") + a.AddSDKImport("aws/request") + a.AddImport(a.ImportPath()) + + var buf bytes.Buffer + err := tplInterface.Execute(&buf, a) + + if err != nil { + panic(err) + } + + code := a.importsGoCode() + strings.TrimSpace(buf.String()) + return code +} + +// NewAPIGoCodeWithPkgName returns a string of instantiating the API prefixed +// with its package name. Takes a string depicting the Config. +func (a *API) NewAPIGoCodeWithPkgName(cfg string) string { + return fmt.Sprintf("%s.New(%s)", a.PackageName(), cfg) +} + +// computes the validation chain for all input shapes +func (a *API) addShapeValidations() { + for _, o := range a.Operations { + resolveShapeValidations(o.InputRef.Shape) + } +} + +// Updates the source shape and all nested shapes with the validations that +// could possibly be needed. +func resolveShapeValidations(s *Shape, ancestry ...*Shape) { + for _, a := range ancestry { + if a == s { + return + } + } + + children := []string{} + for _, name := range s.MemberNames() { + ref := s.MemberRefs[name] + if s.IsRequired(name) && !s.Validations.Has(ref, ShapeValidationRequired) { + s.Validations = append(s.Validations, ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationRequired, + }) + } + + if ref.Shape.Min != 0 && !s.Validations.Has(ref, ShapeValidationMinVal) { + s.Validations = append(s.Validations, ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationMinVal, + }) + } + + if !ref.CanBeEmpty() && !s.Validations.Has(ref, ShapeValidationMinVal) { + s.Validations = append(s.Validations, ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationMinVal, + }) + } + + switch ref.Shape.Type { + case "map", "list", "structure": + children = append(children, name) + } + } + + ancestry = append(ancestry, s) + for _, name := range children { + ref := s.MemberRefs[name] + // Since this is a grab bag we will just continue since + // we can't validate because we don't know the valued shape. + if ref.JSONValue || (s.UsedAsInput && ref.Shape.IsEventStream) { + continue + } + + nestedShape := ref.Shape.NestedShape() + + var v *ShapeValidation + if len(nestedShape.Validations) > 0 { + v = &ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationNested, + } + } else { + resolveShapeValidations(nestedShape, ancestry...) + if len(nestedShape.Validations) > 0 { + v = &ShapeValidation{ + Name: name, Ref: ref, Type: ShapeValidationNested, + } + } + } + + if v != nil && !s.Validations.Has(v.Ref, v.Type) { + s.Validations = append(s.Validations, *v) + } + } + ancestry = ancestry[:len(ancestry)-1] +} + +// A tplAPIErrors is the top level template for the API +var tplAPIErrors = template.Must(template.New("api").Parse(` +const ( + {{- range $_, $s := $.ShapeListErrors }} + + // {{ $s.ErrorCodeName }} for service response error code + // {{ printf "%q" $s.ErrorName }}. + {{ if $s.Docstring -}} + // + {{ $s.Docstring }} + {{ end -}} + {{ $s.ErrorCodeName }} = {{ printf "%q" $s.ErrorName }} + {{- end }} +) + +{{- if $.WithGeneratedTypedErrors }} + {{- $_ := $.AddSDKImport "private/protocol" }} + + var exceptionFromCode = map[string]func(protocol.ResponseMetadata)error { + {{- range $_, $s := $.ShapeListErrors }} + "{{ $s.ErrorName }}": newError{{ $s.ShapeName }}, + {{- end }} + } +{{- end }} +`)) + +// APIErrorsGoCode returns the Go code for the errors.go file. +func (a *API) APIErrorsGoCode() string { + a.resetImports() + + var buf bytes.Buffer + err := tplAPIErrors.Execute(&buf, a) + + if err != nil { + panic(err) + } + + return a.importsGoCode() + strings.TrimSpace(buf.String()) +} + +// removeOperation removes an operation, its input/output shapes, as well as +// any references/shapes that are unique to this operation. +func (a *API) removeOperation(name string) { + debugLogger.Logln("removing operation,", name) + op := a.Operations[name] + + delete(a.Operations, name) + delete(a.Examples, name) + + a.removeShape(op.InputRef.Shape) + a.removeShape(op.OutputRef.Shape) +} + +// removeShape removes the given shape, and all form member's reference target +// shapes. Will also remove member reference targeted shapes if those shapes do +// not have any additional references. +func (a *API) removeShape(s *Shape) { + debugLogger.Logln("removing shape,", s.ShapeName) + + delete(a.Shapes, s.ShapeName) + + for name, ref := range s.MemberRefs { + a.removeShapeRef(ref) + delete(s.MemberRefs, name) + } + + for _, ref := range []*ShapeRef{&s.MemberRef, &s.KeyRef, &s.ValueRef} { + if ref.Shape == nil { + continue + } + a.removeShapeRef(ref) + *ref = ShapeRef{} + } +} + +// removeShapeRef removes the shape reference from its target shape. If the +// reference was the last reference to the target shape, the shape will also be +// removed. +func (a *API) removeShapeRef(ref *ShapeRef) { + if ref.Shape == nil { + return + } + + ref.Shape.removeRef(ref) + if len(ref.Shape.refs) == 0 { + a.removeShape(ref.Shape) + } +} + +// writeInputOutputLocationName writes the ShapeName to the +// shapes LocationName in the event that there is no LocationName +// specified. +func (a *API) writeInputOutputLocationName() { + for _, o := range a.Operations { + setInput := len(o.InputRef.LocationName) == 0 && a.Metadata.Protocol == "rest-xml" + setOutput := len(o.OutputRef.LocationName) == 0 && (a.Metadata.Protocol == "rest-xml" || a.Metadata.Protocol == "ec2") + + if setInput { + o.InputRef.LocationName = o.InputRef.Shape.OrigShapeName + } + if setOutput { + o.OutputRef.LocationName = o.OutputRef.Shape.OrigShapeName + } + } +} + +func (a *API) addHeaderMapDocumentation() { + for _, shape := range a.Shapes { + if !shape.UsedAsOutput { + continue + } + for _, shapeRef := range shape.MemberRefs { + if shapeRef.Location == "headers" { + if dLen := len(shapeRef.Documentation); dLen > 0 { + if shapeRef.Documentation[dLen-1] != '\n' { + shapeRef.Documentation += "\n" + } + shapeRef.Documentation += "//" + } + shapeRef.Documentation += ` +// By default unmarshaled keys are written as a map keys in following canonicalized format: +// the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. +// Set ` + "`aws.Config.LowerCaseHeaderMaps`" + ` to ` + "`true`" + ` to write unmarshaled keys to the map as lowercase. +` + } + } + } +} + +func getDeprecatedMessage(msg string, name string) string { + if len(msg) == 0 { + return name + " has been deprecated" + } + + return msg +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go new file mode 100644 index 00000000000..8f7c3308845 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go @@ -0,0 +1,365 @@ +// +build codegen + +package api + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +type service struct { + srcName string + dstName string + + serviceVersion string +} + +var mergeServices = map[string]service{ + "dynamodbstreams": { + dstName: "dynamodb", + srcName: "streams.dynamodb", + }, + "wafregional": { + dstName: "waf", + srcName: "waf-regional", + serviceVersion: "2015-08-24", + }, +} + +var serviceAliaseNames = map[string]string{ + "costandusagereportservice": "CostandUsageReportService", + "elasticloadbalancing": "ELB", + "elasticloadbalancingv2": "ELBV2", + "config": "ConfigService", +} + +func (a *API) setServiceAliaseName() { + if newName, ok := serviceAliaseNames[a.PackageName()]; ok { + a.name = newName + } +} + +// customizationPasses Executes customization logic for the API by package name. +func (a *API) customizationPasses() error { + var svcCustomizations = map[string]func(*API) error{ + "s3": s3Customizations, + "s3control": s3ControlCustomizations, + "cloudfront": cloudfrontCustomizations, + "rds": rdsCustomizations, + + // Disable endpoint resolving for services that require customer + // to provide endpoint them selves. + "cloudsearchdomain": disableEndpointResolving, + "iotdataplane": disableEndpointResolving, + + // MTurk smoke test is invalid. The service requires AWS account to be + // linked to Amazon Mechanical Turk Account. + "mturk": supressSmokeTest, + + // Backfill the authentication type for cognito identity and sts. + // Removes the need for the customizations in these services. + "cognitoidentity": backfillAuthType(NoneAuthType, + "GetId", + "GetOpenIdToken", + "UnlinkIdentity", + "GetCredentialsForIdentity", + ), + "sts": backfillAuthType(NoneAuthType, + "AssumeRoleWithSAML", + "AssumeRoleWithWebIdentity", + ), + } + + for k := range mergeServices { + svcCustomizations[k] = mergeServicesCustomizations + } + + if fn := svcCustomizations[a.PackageName()]; fn != nil { + err := fn(a) + if err != nil { + return fmt.Errorf("service customization pass failure for %s: %v", a.PackageName(), err) + } + } + + return nil +} + +func supressSmokeTest(a *API) error { + a.SmokeTests.TestCases = []SmokeTestCase{} + return nil +} + +// Customizes the API generation to replace values specific to S3. +func s3Customizations(a *API) error { + var strExpires *Shape + + var keepContentMD5Ref = map[string]struct{}{ + "PutObjectInput": {}, + "UploadPartInput": {}, + } + + for name, s := range a.Shapes { + // Remove ContentMD5 members unless specified otherwise. + if _, keep := keepContentMD5Ref[name]; !keep { + if _, have := s.MemberRefs["ContentMD5"]; have { + delete(s.MemberRefs, "ContentMD5") + } + } + + // Generate getter methods for API operation fields used by customizations. + for _, refName := range []string{"Bucket", "SSECustomerKey", "CopySourceSSECustomerKey"} { + if ref, ok := s.MemberRefs[refName]; ok { + ref.GenerateGetter = true + } + } + + // Generate a endpointARN method for the BucketName shape if this is used as an operation input + if s.UsedAsInput { + if s.ShapeName == "CreateBucketInput" { + // For all operations but CreateBucket the BucketName shape + // needs to be decorated. + continue + } + var endpointARNShape *ShapeRef + for _, ref := range s.MemberRefs { + if ref.OrigShapeName != "BucketName" || ref.Shape.Type != "string" { + continue + } + if endpointARNShape != nil { + return fmt.Errorf("more then one BucketName shape present on shape") + } + ref.EndpointARN = true + endpointARNShape = ref + } + if endpointARNShape != nil { + s.HasEndpointARNMember = true + a.HasEndpointARN = true + } + } + + // Decorate member references that are modeled with the wrong type. + // Specifically the case where a member was modeled as a string, but is + // expected to sent across the wire as a base64 value. + // + // e.g. S3's SSECustomerKey and CopySourceSSECustomerKey + for _, refName := range []string{ + "SSECustomerKey", + "CopySourceSSECustomerKey", + } { + if ref, ok := s.MemberRefs[refName]; ok { + ref.CustomTags = append(ref.CustomTags, ShapeTag{ + "marshal-as", "blob", + }) + } + } + + // Expires should be a string not time.Time since the format is not + // enforced by S3, and any value can be set to this field outside of the SDK. + if strings.HasSuffix(name, "Output") { + if ref, ok := s.MemberRefs["Expires"]; ok { + if strExpires == nil { + newShape := *ref.Shape + strExpires = &newShape + strExpires.Type = "string" + strExpires.refs = []*ShapeRef{} + } + ref.Shape.removeRef(ref) + ref.Shape = strExpires + ref.Shape.refs = append(ref.Shape.refs, &s.MemberRef) + } + } + } + s3CustRemoveHeadObjectModeledErrors(a) + + return nil +} + +// S3 HeadObject API call incorrect models NoSuchKey as valid +// error code that can be returned. This operation does not +// return error codes, all error codes are derived from HTTP +// status codes. +// +// aws/aws-sdk-go#1208 +func s3CustRemoveHeadObjectModeledErrors(a *API) { + op, ok := a.Operations["HeadObject"] + if !ok { + return + } + op.Documentation += ` +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors.` + op.ErrorRefs = []ShapeRef{} +} + +// S3 service operations with an AccountId need accessors to be generated for +// them so the fields can be dynamically accessed without reflection. +func s3ControlCustomizations(a *API) error { + for _, s := range a.Shapes { + // Generate a endpointARN method for the BucketName shape if this is used as an operation input + if s.UsedAsInput { + if s.ShapeName == "CreateBucketInput" || s.ShapeName == "ListRegionalBucketsInput" { + // For operations CreateBucketInput and ListRegionalBuckets the OutpostID shape + // needs to be decorated + var outpostIDMemberShape *ShapeRef + for memberName, ref := range s.MemberRefs { + if memberName != "OutpostId" || ref.Shape.Type != "string" { + continue + } + if outpostIDMemberShape != nil { + return fmt.Errorf("more then one OutpostID shape present on shape") + } + ref.OutpostIDMember = true + outpostIDMemberShape = ref + } + if outpostIDMemberShape != nil { + s.HasOutpostIDMember = true + a.HasOutpostID = true + } + continue + } + + // List of input shapes that use accesspoint names as arnable fields + accessPointNameArnables := map[string]struct{}{ + "GetAccessPointInput": {}, + "DeleteAccessPointInput": {}, + "PutAccessPointPolicyInput": {}, + "GetAccessPointPolicyInput": {}, + "DeleteAccessPointPolicyInput": {}, + } + + var endpointARNShape *ShapeRef + for _, ref := range s.MemberRefs { + // Operations that have AccessPointName field that takes in an ARN as input + if _, ok := accessPointNameArnables[s.ShapeName]; ok { + if ref.OrigShapeName != "AccessPointName" || ref.Shape.Type != "string" { + continue + } + } else if ref.OrigShapeName != "BucketName" || ref.Shape.Type != "string" { + // All other operations currently allow BucketName field to take in ARN. + // Exceptions for these are CreateBucket and ListRegionalBucket which use + // Outpost id and are handled above separately. + continue + } + + if endpointARNShape != nil { + return fmt.Errorf("more then one member present on shape takes arn as input") + } + ref.EndpointARN = true + endpointARNShape = ref + } + if endpointARNShape != nil { + s.HasEndpointARNMember = true + a.HasEndpointARN = true + + for _, ref := range s.MemberRefs { + // check for account id customization + if ref.OrigShapeName == "AccountId" && ref.Shape.Type == "string" { + ref.AccountIDMemberWithARN = true + s.HasAccountIdMemberWithARN = true + a.HasAccountIdWithARN = true + } + } + } + } + } + + return nil +} + +// cloudfrontCustomizations customized the API generation to replace values +// specific to CloudFront. +func cloudfrontCustomizations(a *API) error { + // MaxItems members should always be integers + for _, s := range a.Shapes { + if ref, ok := s.MemberRefs["MaxItems"]; ok { + ref.ShapeName = "Integer" + ref.Shape = a.Shapes["Integer"] + } + } + return nil +} + +// mergeServicesCustomizations references any duplicate shapes from DynamoDB +func mergeServicesCustomizations(a *API) error { + info := mergeServices[a.PackageName()] + + p := strings.Replace(a.path, info.srcName, info.dstName, -1) + + if info.serviceVersion != "" { + index := strings.LastIndex(p, string(filepath.Separator)) + files, _ := ioutil.ReadDir(p[:index]) + if len(files) > 1 { + panic("New version was introduced") + } + p = p[:index] + "/" + info.serviceVersion + } + + file := filepath.Join(p, "api-2.json") + + serviceAPI := API{} + serviceAPI.Attach(file) + serviceAPI.Setup() + + for n := range a.Shapes { + if _, ok := serviceAPI.Shapes[n]; ok { + a.Shapes[n].resolvePkg = SDKImportRoot + "/service/" + info.dstName + } + } + + return nil +} + +// rdsCustomizations are customization for the service/rds. This adds non-modeled fields used for presigning. +func rdsCustomizations(a *API) error { + inputs := []string{ + "CopyDBSnapshotInput", + "CreateDBInstanceReadReplicaInput", + "CopyDBClusterSnapshotInput", + "CreateDBClusterInput", + } + for _, input := range inputs { + if ref, ok := a.Shapes[input]; ok { + ref.MemberRefs["SourceRegion"] = &ShapeRef{ + Documentation: docstring(`SourceRegion is the source region where the resource exists. This is not sent over the wire and is only used for presigning. This value should always have the same region as the source ARN.`), + ShapeName: "String", + Shape: a.Shapes["String"], + Ignore: true, + } + ref.MemberRefs["DestinationRegion"] = &ShapeRef{ + Documentation: docstring(`DestinationRegion is used for presigning the request to a given region.`), + ShapeName: "String", + Shape: a.Shapes["String"], + } + } + } + + return nil +} + +func disableEndpointResolving(a *API) error { + a.Metadata.NoResolveEndpoint = true + return nil +} + +func backfillAuthType(typ AuthType, opNames ...string) func(*API) error { + return func(a *API) error { + for _, opName := range opNames { + op, ok := a.Operations[opName] + if !ok { + panic("unable to backfill auth-type for unknown operation " + opName) + } + if v := op.AuthType; len(v) != 0 { + fmt.Fprintf(os.Stderr, "unable to backfill auth-type for %s, already set, %s", opName, v) + continue + } + + op.AuthType = typ + } + + return nil + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/docstring.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/docstring.go new file mode 100644 index 00000000000..43f94084742 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/docstring.go @@ -0,0 +1,546 @@ +// +build codegen + +package api + +import ( + "bufio" + "encoding/json" + "fmt" + "html" + "io" + "os" + "regexp" + "strings" + + xhtml "golang.org/x/net/html" + "golang.org/x/net/html/atom" +) + +type apiDocumentation struct { + Operations map[string]string + Service string + Shapes map[string]shapeDocumentation +} + +type shapeDocumentation struct { + Base string + Refs map[string]string +} + +// AttachDocs attaches documentation from a JSON filename. +func (a *API) AttachDocs(filename string) error { + var d apiDocumentation + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + return err + } + err = json.NewDecoder(f).Decode(&d) + if err != nil { + return fmt.Errorf("failed to decode %s, err: %v", filename, err) + } + + return d.setup(a) +} + +func (d *apiDocumentation) setup(a *API) error { + a.Documentation = docstring(d.Service) + + for opName, doc := range d.Operations { + if _, ok := a.Operations[opName]; !ok { + return fmt.Errorf("%s, doc op %q not found in API op set", + a.name, opName) + } + a.Operations[opName].Documentation = docstring(doc) + } + + for shapeName, docShape := range d.Shapes { + if s, ok := a.Shapes[shapeName]; ok { + s.Documentation = docstring(docShape.Base) + } + + for ref, doc := range docShape.Refs { + if doc == "" { + continue + } + + parts := strings.Split(ref, "$") + if len(parts) != 2 { + fmt.Fprintf(os.Stderr, + "Shape Doc %s has unexpected reference format, %q\n", + shapeName, ref) + continue + } + + if s, ok := a.Shapes[parts[0]]; ok && len(s.MemberRefs) != 0 { + if m, ok := s.MemberRefs[parts[1]]; ok && m.ShapeName == shapeName { + m.Documentation = docstring(doc) + } + } + } + } + + return nil +} + +var reNewline = regexp.MustCompile(`\r?\n`) +var reMultiSpace = regexp.MustCompile(`\s+`) +var reComments = regexp.MustCompile(``) +var reFullnameBlock = regexp.MustCompile(`(.+?)<\/fullname>`) +var reFullname = regexp.MustCompile(`(.*?)`) +var reExamples = regexp.MustCompile(`.+?<\/examples?>`) +var reEndNL = regexp.MustCompile(`\n+$`) + +// docstring rewrites a string to insert godocs formatting. +func docstring(doc string) string { + doc = strings.TrimSpace(doc) + if doc == "" { + return "" + } + + doc = reNewline.ReplaceAllString(doc, "") + doc = reMultiSpace.ReplaceAllString(doc, " ") + doc = reComments.ReplaceAllString(doc, "") + + var fullname string + parts := reFullnameBlock.FindStringSubmatch(doc) + if len(parts) > 1 { + fullname = parts[1] + } + // Remove full name block from doc string + doc = reFullname.ReplaceAllString(doc, "") + + doc = reExamples.ReplaceAllString(doc, "") + doc = generateDoc(doc) + doc = reEndNL.ReplaceAllString(doc, "") + doc = html.UnescapeString(doc) + + // Replace doc with full name if doc is empty. + if len(doc) == 0 { + doc = fullname + } + + return commentify(doc) +} + +const ( + indent = " " +) + +// commentify converts a string to a Go comment +func commentify(doc string) string { + if len(doc) == 0 { + return "" + } + + lines := strings.Split(doc, "\n") + out := make([]string, 0, len(lines)) + for i := 0; i < len(lines); i++ { + line := lines[i] + + if i > 0 && line == "" && lines[i-1] == "" { + continue + } + out = append(out, line) + } + + if len(out) > 0 { + out[0] = "// " + out[0] + return strings.Join(out, "\n// ") + } + return "" +} + +func wrap(text string, length int) string { + var b strings.Builder + + s := bufio.NewScanner(strings.NewReader(text)) + for s.Scan() { + line := s.Text() + + // cleanup the line's spaces + var i int + for i = 0; i < len(line); i++ { + c := line[i] + // Ignore leading spaces, e.g indents. + if !(c == ' ' || c == '\t') { + break + } + } + line = line[:i] + strings.Join(strings.Fields(line[i:]), " ") + splitLine(&b, line, length) + } + + return strings.TrimRight(b.String(), "\n") +} + +func splitLine(w stringWriter, line string, length int) { + leading := getLeadingWhitespace(line) + + line = line[len(leading):] + length -= len(leading) + + const splitOn = " " + for len(line) > length { + // Find the next whitespace to the length + idx := strings.Index(line[length:], splitOn) + if idx == -1 { + break + } + offset := length + idx + + if v := line[offset+len(splitOn):]; len(v) == 1 && strings.ContainsAny(v, `,.!?'"`) { + // Workaround for long lines with space before the punctuation mark. + break + } + + w.WriteString(leading) + w.WriteString(line[:offset]) + w.WriteByte('\n') + line = strings.TrimLeft(line[offset+len(splitOn):], " \t") + } + + if len(line) > 0 { + w.WriteString(leading) + w.WriteString(line) + } + // Add the newline back in that was stripped out by scanner. + w.WriteByte('\n') +} + +func getLeadingWhitespace(v string) string { + var o strings.Builder + for _, c := range v { + if c == ' ' || c == '\t' { + o.WriteRune(c) + } else { + break + } + } + + return o.String() +} + +// generateDoc will generate the proper doc string for html encoded or plain text doc entries. +func generateDoc(htmlSrc string) string { + tokenizer := xhtml.NewTokenizer(strings.NewReader(htmlSrc)) + var builder strings.Builder + if err := encodeHTMLToText(&builder, tokenizer); err != nil { + panic(fmt.Sprintf("failed to generated docs, %v", err)) + } + + return wrap(strings.Trim(builder.String(), "\n"), 72) +} + +type stringWriter interface { + Write([]byte) (int, error) + WriteByte(byte) error + WriteRune(rune) (int, error) + WriteString(string) (int, error) +} + +func encodeHTMLToText(w stringWriter, z *xhtml.Tokenizer) error { + encoder := newHTMLTokenEncoder(w) + defer encoder.Flush() + + for { + tt := z.Next() + if tt == xhtml.ErrorToken { + if err := z.Err(); err == io.EOF { + return nil + } else if err != nil { + return err + } + } + + if err := encoder.Encode(z.Token()); err != nil { + return err + } + } +} + +type htmlTokenHandler interface { + OnStartTagToken(xhtml.Token) htmlTokenHandler + OnEndTagToken(xhtml.Token, bool) + OnSelfClosingTagToken(xhtml.Token) + OnTextTagToken(xhtml.Token) +} + +type htmlTokenEncoder struct { + w stringWriter + depth int + handlers []tokenHandlerItem + baseHandler tokenHandlerItem +} + +type tokenHandlerItem struct { + handler htmlTokenHandler + depth int +} + +func newHTMLTokenEncoder(w stringWriter) *htmlTokenEncoder { + baseHandler := newBlockTokenHandler(w) + baseHandler.rootBlock = true + + return &htmlTokenEncoder{ + w: w, + baseHandler: tokenHandlerItem{ + handler: baseHandler, + }, + } +} + +func (e *htmlTokenEncoder) Flush() error { + e.baseHandler.handler.OnEndTagToken(xhtml.Token{Type: xhtml.TextToken}, true) + return nil +} + +func (e *htmlTokenEncoder) Encode(token xhtml.Token) error { + h := e.baseHandler + if len(e.handlers) != 0 { + h = e.handlers[len(e.handlers)-1] + } + + switch token.Type { + case xhtml.StartTagToken: + e.depth++ + + next := h.handler.OnStartTagToken(token) + if next != nil { + e.handlers = append(e.handlers, tokenHandlerItem{ + handler: next, + depth: e.depth, + }) + } + + case xhtml.EndTagToken: + handlerBlockClosing := e.depth == h.depth + + h.handler.OnEndTagToken(token, handlerBlockClosing) + + // Remove all but the root handler as the handler is no longer needed. + if handlerBlockClosing { + e.handlers = e.handlers[:len(e.handlers)-1] + } + e.depth-- + + case xhtml.SelfClosingTagToken: + h.handler.OnSelfClosingTagToken(token) + + case xhtml.TextToken: + h.handler.OnTextTagToken(token) + } + + return nil +} + +type baseTokenHandler struct { + w stringWriter +} + +func (e *baseTokenHandler) OnStartTagToken(token xhtml.Token) htmlTokenHandler { return nil } +func (e *baseTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) {} +func (e *baseTokenHandler) OnSelfClosingTagToken(token xhtml.Token) {} +func (e *baseTokenHandler) OnTextTagToken(token xhtml.Token) { + e.w.WriteString(token.Data) +} + +type blockTokenHandler struct { + baseTokenHandler + + rootBlock bool + origWriter stringWriter + strBuilder *strings.Builder + + started bool + newlineBeforeNextBlock bool +} + +func newBlockTokenHandler(w stringWriter) *blockTokenHandler { + strBuilder := &strings.Builder{} + return &blockTokenHandler{ + origWriter: w, + strBuilder: strBuilder, + baseTokenHandler: baseTokenHandler{ + w: strBuilder, + }, + } +} +func (e *blockTokenHandler) OnStartTagToken(token xhtml.Token) htmlTokenHandler { + e.started = true + if e.newlineBeforeNextBlock { + e.w.WriteString("\n") + e.newlineBeforeNextBlock = false + } + + switch token.DataAtom { + case atom.A: + return newLinkTokenHandler(e.w, token) + case atom.Ul: + e.w.WriteString("\n") + e.newlineBeforeNextBlock = true + return newListTokenHandler(e.w) + + case atom.Div, atom.Dt, atom.P, atom.H1, atom.H2, atom.H3, atom.H4, atom.H5, atom.H6: + e.w.WriteString("\n") + e.newlineBeforeNextBlock = true + return newBlockTokenHandler(e.w) + + case atom.Pre, atom.Code: + if e.rootBlock { + e.w.WriteString("\n") + e.w.WriteString(indent) + e.newlineBeforeNextBlock = true + } + return newBlockTokenHandler(e.w) + } + + return nil +} +func (e *blockTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) { + if !blockClosing { + return + } + + e.origWriter.WriteString(e.strBuilder.String()) + if e.newlineBeforeNextBlock { + e.origWriter.WriteString("\n") + e.newlineBeforeNextBlock = false + } + + e.strBuilder.Reset() +} + +func (e *blockTokenHandler) OnTextTagToken(token xhtml.Token) { + if e.newlineBeforeNextBlock { + e.w.WriteString("\n") + e.newlineBeforeNextBlock = false + } + if !e.started { + token.Data = strings.TrimLeft(token.Data, " \t\n") + } + if len(token.Data) != 0 { + e.started = true + } + e.baseTokenHandler.OnTextTagToken(token) +} + +type linkTokenHandler struct { + baseTokenHandler + linkToken xhtml.Token +} + +func newLinkTokenHandler(w stringWriter, token xhtml.Token) *linkTokenHandler { + return &linkTokenHandler{ + baseTokenHandler: baseTokenHandler{ + w: w, + }, + linkToken: token, + } +} +func (e *linkTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) { + if !blockClosing { + return + } + + if href, ok := getHTMLTokenAttr(e.linkToken.Attr, "href"); ok && len(href) != 0 { + fmt.Fprintf(e.w, " (%s)", strings.TrimSpace(href)) + } +} + +type listTokenHandler struct { + baseTokenHandler + + items int +} + +func newListTokenHandler(w stringWriter) *listTokenHandler { + return &listTokenHandler{ + baseTokenHandler: baseTokenHandler{ + w: w, + }, + } +} +func (e *listTokenHandler) OnStartTagToken(token xhtml.Token) htmlTokenHandler { + switch token.DataAtom { + case atom.Li: + if e.items >= 1 { + e.w.WriteString("\n\n") + } + e.items++ + return newListItemTokenHandler(e.w) + } + return nil +} + +func (e *listTokenHandler) OnTextTagToken(token xhtml.Token) { + // Squash whitespace between list and items +} + +type listItemTokenHandler struct { + baseTokenHandler + + origWriter stringWriter + strBuilder *strings.Builder +} + +func newListItemTokenHandler(w stringWriter) *listItemTokenHandler { + strBuilder := &strings.Builder{} + return &listItemTokenHandler{ + origWriter: w, + strBuilder: strBuilder, + baseTokenHandler: baseTokenHandler{ + w: strBuilder, + }, + } +} +func (e *listItemTokenHandler) OnStartTagToken(token xhtml.Token) htmlTokenHandler { + switch token.DataAtom { + case atom.P: + return newBlockTokenHandler(e.w) + } + return nil +} +func (e *listItemTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) { + if !blockClosing { + return + } + + e.origWriter.WriteString(indent + "* ") + e.origWriter.WriteString(strings.TrimSpace(e.strBuilder.String())) +} + +type trimSpaceTokenHandler struct { + baseTokenHandler + + origWriter stringWriter + strBuilder *strings.Builder +} + +func newTrimSpaceTokenHandler(w stringWriter) *trimSpaceTokenHandler { + strBuilder := &strings.Builder{} + return &trimSpaceTokenHandler{ + origWriter: w, + strBuilder: strBuilder, + baseTokenHandler: baseTokenHandler{ + w: strBuilder, + }, + } +} +func (e *trimSpaceTokenHandler) OnEndTagToken(token xhtml.Token, blockClosing bool) { + if !blockClosing { + return + } + + e.origWriter.WriteString(strings.TrimSpace(e.strBuilder.String())) +} + +func getHTMLTokenAttr(attr []xhtml.Attribute, name string) (string, bool) { + for _, a := range attr { + if strings.EqualFold(a.Key, name) { + return a.Val, true + } + } + return "", false +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/endpoint_arn.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/endpoint_arn.go new file mode 100644 index 00000000000..d1c2bd76bb6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/endpoint_arn.go @@ -0,0 +1,100 @@ +package api + +import "text/template" + +const endpointARNShapeTmplDef = ` +{{- define "endpointARNShapeTmpl" }} +{{ range $_, $name := $.MemberNames -}} + {{ $elem := index $.MemberRefs $name -}} + {{ if $elem.EndpointARN -}} + func (s *{{ $.ShapeName }}) getEndpointARN() (arn.Resource, error) { + if s.{{ $name }} == nil { + return nil, fmt.Errorf("member {{ $name }} is nil") + } + return parseEndpointARN(*s.{{ $name }}) + } + + func (s *{{ $.ShapeName }}) hasEndpointARN() bool { + if s.{{ $name }} == nil { + return false + } + return arn.IsARN(*s.{{ $name }}) + } + + // updateArnableField updates the value of the input field that + // takes an ARN as an input. This method is useful to backfill + // the parsed resource name from ARN into the input member. + // It returns a pointer to a modified copy of input and an error. + // Note that original input is not modified. + func (s {{ $.ShapeName }}) updateArnableField(v string) (interface{}, error) { + if s.{{ $name }} == nil { + return nil, fmt.Errorf("member {{ $name }} is nil") + } + s.{{ $name }} = aws.String(v) + return &s, nil + } + {{ end -}} +{{ end }} +{{ end }} +` + +var endpointARNShapeTmpl = template.Must( + template.New("endpointARNShapeTmpl"). + Parse(endpointARNShapeTmplDef), +) + +const outpostIDShapeTmplDef = ` +{{- define "outpostIDShapeTmpl" }} +{{ range $_, $name := $.MemberNames -}} + {{ $elem := index $.MemberRefs $name -}} + {{ if $elem.OutpostIDMember -}} + func (s *{{ $.ShapeName }}) getOutpostID() (string, error) { + if s.{{ $name }} == nil { + return "", fmt.Errorf("member {{ $name }} is nil") + } + return *s.{{ $name }}, nil + } + + func (s *{{ $.ShapeName }}) hasOutpostID() bool { + if s.{{ $name }} == nil { + return false + } + return true + } + {{ end -}} +{{ end }} +{{ end }} +` + +var outpostIDShapeTmpl = template.Must( + template.New("outpostIDShapeTmpl"). + Parse(outpostIDShapeTmplDef), +) + +const accountIDWithARNShapeTmplDef = ` +{{- define "accountIDWithARNShapeTmpl" }} +{{ range $_, $name := $.MemberNames -}} + {{ $elem := index $.MemberRefs $name -}} + {{ if $elem.AccountIDMemberWithARN -}} + // updateAccountID returns a pointer to a modified copy of input, + // if account id is not provided, we update the account id in modified input + // if account id is provided, but doesn't match with the one in ARN, we throw an error + // if account id is not updated, we return nil. Note that original input is not modified. + func (s {{ $.ShapeName }}) updateAccountID(accountId string) (interface{}, error) { + if s.{{ $name }} == nil { + s.{{ $name }} = aws.String(accountId) + return &s, nil + } else if *s.{{ $name }} != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil + } + {{ end -}} +{{ end }} +{{ end }} +` + +var accountIDWithARNShapeTmpl = template.Must( + template.New("accountIDWithARNShapeTmpl"). + Parse(accountIDWithARNShapeTmplDef), +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/endpoint_trait.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/endpoint_trait.go new file mode 100644 index 00000000000..08eba2da820 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/endpoint_trait.go @@ -0,0 +1,59 @@ +// +build codegen + +package api + +import ( + "fmt" + "text/template" +) + +func setupEndpointHostPrefix(op *Operation) { + op.API.AddSDKImport("private/protocol") + + buildHandler := fmt.Sprintf("protocol.NewHostPrefixHandler(%q, ", + op.Endpoint.HostPrefix) + + if op.InputRef.Shape.HasHostLabelMembers() { + buildHandler += "input.hostLabels" + } else { + buildHandler += "nil" + } + + buildHandler += ")" + + op.CustomBuildHandlers = append(op.CustomBuildHandlers, + buildHandler, + "protocol.ValidateEndpointHostHandler", + ) +} + +// HasHostLabelMembers returns true if the shape contains any members which are +// decorated with the hostLabel trait. +func (s *Shape) HasHostLabelMembers() bool { + for _, ref := range s.MemberRefs { + if ref.HostLabel { + return true + } + } + + return false +} + +var hostLabelsShapeTmpl = template.Must( + template.New("hostLabelsShapeTmpl"). + Parse(hostLabelsShapeTmplDef), +) + +const hostLabelsShapeTmplDef = ` +{{- define "hostLabelsShapeTmpl" }} +func (s *{{ $.ShapeName }}) hostLabels() map[string]string { + return map[string]string{ + {{- range $name, $ref := $.MemberRefs }} + {{- if $ref.HostLabel }} + "{{ $name }}": aws.StringValue(s.{{ $name }}), + {{- end }} + {{- end }} + } +} +{{- end }} +` diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream.go new file mode 100644 index 00000000000..9a883ba1119 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream.go @@ -0,0 +1,314 @@ +// +build codegen + +package api + +import ( + "bytes" + "fmt" + "text/template" +) + +// EventStreamAPI provides details about the event stream async API and +// associated EventStream shapes. +type EventStreamAPI struct { + API *API + Operation *Operation + Name string + InputStream *EventStream + OutputStream *EventStream + RequireHTTP2 bool + + // The eventstream generated code was generated with an older model that + // does not scale with bi-directional models. This drives the need to + // expose the output shape's event stream member as an exported member. + Legacy bool +} + +func (es *EventStreamAPI) StreamInputEventTypeGetterName() string { + return "eventTypeFor" + es.Name + "InputEvent" +} +func (es *EventStreamAPI) StreamOutputUnmarshalerForEventName() string { + return "eventTypeFor" + es.Name + "OutputEvent" +} + +// EventStream represents a single eventstream group (input/output) and the +// modeled events that are known for the stream. +type EventStream struct { + Name string + Shape *Shape + Events []*Event + Exceptions []*Event +} + +func (es *EventStream) EventGroupName() string { + return es.Name + "Event" +} + +func (es *EventStream) StreamWriterAPIName() string { + return es.Name + "Writer" +} + +func (es *EventStream) StreamWriterImplName() string { + return "write" + es.Name +} + +func (es *EventStream) StreamEventTypeGetterName() string { + return "eventTypeFor" + es.Name + "Event" +} + +func (es *EventStream) StreamReaderAPIName() string { + return es.Name + "Reader" +} + +func (es *EventStream) StreamReaderImplName() string { + return "read" + es.Name +} +func (es *EventStream) StreamReaderImplConstructorName() string { + return "newRead" + es.Name +} + +func (es *EventStream) StreamUnmarshalerForEventName() string { + return "unmarshalerFor" + es.Name + "Event" +} + +func (es *EventStream) StreamUnknownEventName() string { + return es.Name + "UnknownEvent" +} + +// Event is a single EventStream event that can be sent or received in an +// EventStream. +type Event struct { + Name string + Shape *Shape + For *EventStream + Private bool +} + +// ShapeDoc returns the docstring for the EventStream API. +func (esAPI *EventStreamAPI) ShapeDoc() string { + tmpl := template.Must(template.New("eventStreamShapeDoc").Parse(` +{{- $.Name }} provides handling of EventStreams for +the {{ $.Operation.ExportedName }} API. + +{{- if $.OutputStream }} + +Use this type to receive {{ $.OutputStream.Name }} events. The events +can be read from the stream. + +The events that can be received are: +{{- range $_, $event := $.OutputStream.Events }} + * {{ $event.Shape.ShapeName }} +{{- end }} + +{{- end }} + +{{- if $.InputStream }} + +Use this type to send {{ $.InputStream.Name }} events. The events +can be written to the stream. + +The events that can be sent are: +{{ range $_, $event := $.InputStream.Events -}} + * {{ $event.Shape.ShapeName }} +{{- end }} + +{{- end }}`)) + + var w bytes.Buffer + if err := tmpl.Execute(&w, esAPI); err != nil { + panic(fmt.Sprintf("failed to generate eventstream shape template for %v, %v", + esAPI.Operation.ExportedName, err)) + } + + return commentify(w.String()) +} + +func hasEventStream(topShape *Shape) bool { + for _, ref := range topShape.MemberRefs { + if ref.Shape.IsEventStream { + return true + } + } + + return false +} + +func eventStreamAPIShapeRefDoc(refName string) string { + return commentify(fmt.Sprintf("Use %s to use the API's stream.", refName)) +} + +func (a *API) setupEventStreams() error { + streams := EventStreams{} + + for opName, op := range a.Operations { + inputRef := getEventStreamMember(op.InputRef.Shape) + outputRef := getEventStreamMember(op.OutputRef.Shape) + + if inputRef == nil && outputRef == nil { + continue + } + if inputRef != nil && outputRef == nil { + return fmt.Errorf("event stream input only stream not supported for protocol %s, %s, %v", + a.NiceName(), opName, a.Metadata.Protocol) + } + switch a.Metadata.Protocol { + case `rest-json`, `rest-xml`, `json`: + default: + return UnsupportedAPIModelError{ + Err: fmt.Errorf("EventStream not supported for protocol %s, %s, %v", + a.NiceName(), opName, a.Metadata.Protocol), + } + } + + var inputStream *EventStream + if inputRef != nil { + inputStream = streams.GetStream(op.InputRef.Shape, inputRef.Shape) + inputStream.Shape.IsInputEventStream = true + } + + var outputStream *EventStream + if outputRef != nil { + outputStream = streams.GetStream(op.OutputRef.Shape, outputRef.Shape) + outputStream.Shape.IsOutputEventStream = true + } + + requireHTTP2 := op.API.Metadata.ProtocolSettings.HTTP2 == "eventstream" && + inputStream != nil && outputStream != nil + + a.HasEventStream = true + op.EventStreamAPI = &EventStreamAPI{ + API: a, + Operation: op, + Name: op.ExportedName + "EventStream", + InputStream: inputStream, + OutputStream: outputStream, + Legacy: isLegacyEventStream(op), + RequireHTTP2: requireHTTP2, + } + op.OutputRef.Shape.OutputEventStreamAPI = op.EventStreamAPI + + if s, ok := a.Shapes[op.EventStreamAPI.Name]; ok { + newName := op.EventStreamAPI.Name + "Data" + if _, ok := a.Shapes[newName]; ok { + panic(fmt.Sprintf( + "%s: attempting to rename %s to %s, but shape with that name already exists", + a.NiceName(), op.EventStreamAPI.Name, newName)) + } + s.Rename(newName) + } + } + + return nil +} + +// EventStreams is a map of streams for the API shared across all operations. +// Ensurs that no stream is duplicated. +type EventStreams map[*Shape]*EventStream + +// GetStream returns an EventStream for the operations top level shape, and +// member reference to the stream shape. +func (es *EventStreams) GetStream(topShape *Shape, streamShape *Shape) *EventStream { + var stream *EventStream + if v, ok := (*es)[streamShape]; ok { + stream = v + } else { + stream = setupEventStream(streamShape) + (*es)[streamShape] = stream + } + + if topShape.API.Metadata.Protocol == "json" { + topShape.EventFor = append(topShape.EventFor, stream) + } + + return stream +} + +var legacyEventStream = map[string]map[string]struct{}{ + "s3": { + "SelectObjectContent": struct{}{}, + }, + "kinesis": { + "SubscribeToShard": struct{}{}, + }, +} + +func isLegacyEventStream(op *Operation) bool { + if s, ok := legacyEventStream[op.API.PackageName()]; ok { + if _, ok = s[op.ExportedName]; ok { + return true + } + } + return false +} + +func (e EventStreamAPI) OutputMemberName() string { + if e.Legacy { + return "EventStream" + } + + return "eventStream" +} + +func getEventStreamMember(topShape *Shape) *ShapeRef { + for _, ref := range topShape.MemberRefs { + if !ref.Shape.IsEventStream { + continue + } + return ref + } + + return nil +} + +func setupEventStream(s *Shape) *EventStream { + eventStream := &EventStream{ + Name: s.ShapeName, + Shape: s, + } + s.EventStream = eventStream + + for _, eventRefName := range s.MemberNames() { + eventRef := s.MemberRefs[eventRefName] + if !(eventRef.Shape.IsEvent || eventRef.Shape.Exception) { + panic(fmt.Sprintf("unexpected non-event member reference %s.%s", + s.ShapeName, eventRefName)) + } + + updateEventPayloadRef(eventRef.Shape) + + eventRef.Shape.EventFor = append(eventRef.Shape.EventFor, eventStream) + + // Exceptions and events are two different lists to allow the SDK + // to easily generate code with the two handled differently. + event := &Event{ + Name: eventRefName, + Shape: eventRef.Shape, + For: eventStream, + } + if eventRef.Shape.Exception { + eventStream.Exceptions = append(eventStream.Exceptions, event) + } else { + eventStream.Events = append(eventStream.Events, event) + } + } + + return eventStream +} + +func updateEventPayloadRef(parent *Shape) { + refName := parent.PayloadRefName() + if len(refName) == 0 { + return + } + + payloadRef := parent.MemberRefs[refName] + if payloadRef.Shape.Type == "blob" { + return + } + + if len(payloadRef.LocationName) != 0 { + return + } + + payloadRef.LocationName = refName +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl.go new file mode 100644 index 00000000000..ef72b225f4a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl.go @@ -0,0 +1,657 @@ +// +build codegen + +package api + +import ( + "fmt" + "io" + "strings" + "text/template" +) + +func renderEventStreamAPI(w io.Writer, op *Operation) error { + // Imports needed by the EventStream APIs. + op.API.AddImport("fmt") + op.API.AddImport("bytes") + op.API.AddImport("io") + op.API.AddImport("time") + op.API.AddSDKImport("aws") + op.API.AddSDKImport("aws/awserr") + op.API.AddSDKImport("aws/request") + op.API.AddSDKImport("private/protocol/eventstream") + op.API.AddSDKImport("private/protocol/eventstream/eventstreamapi") + + w.Write([]byte(` +var _ awserr.Error +`)) + + return eventStreamAPITmpl.Execute(w, op) +} + +// Template for an EventStream API Shape that will provide read/writing events +// across the EventStream. This is a special shape that's only public members +// are the Events channel and a Close and Err method. +// +// Executed in the context of a Shape. +var eventStreamAPITmpl = template.Must( + template.New("eventStreamAPITmplDef"). + Funcs(template.FuncMap{ + "unexported": func(v string) string { + return strings.ToLower(string(v[0])) + v[1:] + }, + }). + Parse(eventStreamAPITmplDef), +) + +const eventStreamAPITmplDef = ` +{{- $esapi := $.EventStreamAPI }} +{{- $outputStream := $esapi.OutputStream }} +{{- $inputStream := $esapi.InputStream }} + +// {{ $esapi.Name }} provides the event stream handling for the {{ $.ExportedName }}. +// +// For testing and mocking the event stream this type should be initialized via +// the New{{ $esapi.Name }} constructor function. Using the functional options +// to pass in nested mock behavior. +type {{ $esapi.Name }} struct { + {{- if $inputStream }} + + // Writer is the EventStream writer for the {{ $inputStream.Name }} + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Writer. + // + // Must not be nil. + Writer {{ $inputStream.StreamWriterAPIName }} + + inputWriter io.WriteCloser + {{- if eq .API.Metadata.Protocol "json" }} + input {{ $.InputRef.GoType }} + {{- end }} + {{- end }} + + {{- if $outputStream }} + + // Reader is the EventStream reader for the {{ $outputStream.Name }} + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader {{ $outputStream.StreamReaderAPIName }} + + outputReader io.ReadCloser + {{- if eq .API.Metadata.Protocol "json" }} + output {{ $.OutputRef.GoType }} + {{- end }} + {{- end }} + + {{- if $esapi.Legacy }} + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + {{- end }} + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +// New{{ $esapi.Name }} initializes an {{ $esapi.Name }}. +// This function should only be used for testing and mocking the {{ $esapi.Name }} +// stream within your application. +{{- if $inputStream }} +// +// The Writer member must be set before writing events to the stream. +{{- end }} +{{- if $outputStream }} +// +// The Reader member must be set before reading events from the stream. +{{- end }} +{{- if $esapi.Legacy }} +// +// The StreamCloser member should be set to the underlying io.Closer, +// (e.g. http.Response.Body), that will be closed when the stream Close method +// is called. +{{- end }} +// +// es := New{{ $esapi.Name }}(func(o *{{ $esapi.Name}}{ +{{- if $inputStream }} +// es.Writer = myMockStreamWriter +{{- end }} +{{- if $outputStream }} +// es.Reader = myMockStreamReader +{{- end }} +{{- if $esapi.Legacy }} +// es.StreamCloser = myMockStreamCloser +{{- end }} +// }) +func New{{ $esapi.Name }}(opts ...func(*{{ $esapi.Name}})) *{{ $esapi.Name }} { + es := &{{ $esapi.Name }} { + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +{{- if $esapi.Legacy }} + + func (es *{{ $esapi.Name }}) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body + } +{{- end }} + +func (es *{{ $esapi.Name }}) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *{{ $esapi.Name }}) waitStreamPartClose() { + {{- if $inputStream }} + var inputErrCh <-chan struct{} + if v, ok := es.Writer.(interface{ErrorSet() <-chan struct{}}); ok { + inputErrCh = v.ErrorSet() + } + {{- end }} + {{- if $outputStream }} + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ErrorSet() <-chan struct{}}); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <- chan struct{} + if v, ok := es.Reader.(interface{Closed() <-chan struct{}}); ok { + outputClosedCh = v.Closed() + } + {{- end }} + + select { + case <-es.done: + + {{- if $inputStream }} + case <-inputErrCh: + es.err.SetError(es.Writer.Err()) + es.Close() + {{- end }} + + {{- if $outputStream }} + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + {{- end }} + } +} + +{{- if $inputStream }} + + {{- if eq .API.Metadata.Protocol "json" }} + + func {{ $esapi.StreamInputEventTypeGetterName }}(event {{ $inputStream.EventGroupName }}) (string, error) { + if _, ok := event.({{ $.InputRef.GoType }}); ok { + return "initial-request", nil + } + return {{ $inputStream.StreamEventTypeGetterName }}(event) + } + {{- end }} + + func (es *{{ $esapi.Name }}) setupInputPipe(r *request.Request) { + inputReader, inputWriter := io.Pipe() + r.SetStreamingBody(inputReader) + es.inputWriter = inputWriter + } + + // Send writes the event to the stream blocking until the event is written. + // Returns an error if the event was not written. + // + // These events are: + // {{ range $_, $event := $inputStream.Events }} + // * {{ $event.Shape.ShapeName }} + {{- end }} + func (es *{{ $esapi.Name }}) Send(ctx aws.Context, event {{ $inputStream.EventGroupName }}) error { + return es.Writer.Send(ctx, event) + } + + func (es *{{ $esapi.Name }}) runInputStream(r *request.Request) { + var opts []func(*eventstream.Encoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.EncodeWithLogger(r.Config.Logger)) + } + var encoder eventstreamapi.Encoder = eventstream.NewEncoder(es.inputWriter, opts...) + + var closer aws.MultiCloser + {{- if $.ShouldSignRequestBody }} + {{- $_ := $.API.AddSDKImport "aws/signer/v4" }} + sigSeed, err := v4.GetSignedRequestSignature(r.HTTPRequest) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "unable to get initial request's signature", err) + return + } + signer := eventstreamapi.NewSignEncoder( + v4.NewStreamSigner(r.ClientInfo.SigningRegion, r.ClientInfo.SigningName, + sigSeed, r.Config.Credentials), + encoder, + ) + encoder = signer + closer = append(closer, signer) + {{- end }} + closer = append(closer, es.inputWriter) + + eventWriter := eventstreamapi.NewEventWriter(encoder, + protocol.HandlerPayloadMarshal{ + Marshalers: r.Handlers.BuildStream, + }, + {{- if eq .API.Metadata.Protocol "json" }} + {{ $esapi.StreamInputEventTypeGetterName }}, + {{- else }} + {{ $inputStream.StreamEventTypeGetterName }}, + {{- end }} + ) + + es.Writer = &{{ $inputStream.StreamWriterImplName }}{ + StreamWriter: eventstreamapi.NewStreamWriter(eventWriter, closer), + } + } + + {{- if eq .API.Metadata.Protocol "json" }} + func (es *{{ $esapi.Name }}) sendInitialEvent(r *request.Request) { + if err := es.Send(es.input); err != nil { + r.Error = err + } + } + {{- end }} +{{- end }} + +{{- if $outputStream }} + {{- if eq .API.Metadata.Protocol "json" }} + + type {{ $esapi.StreamOutputUnmarshalerForEventName }} struct { + unmarshalerForEvent func(string) (eventstreamapi.Unmarshaler, error) + output {{ $.OutputRef.GoType }} + } + func (e {{ $esapi.StreamOutputUnmarshalerForEventName }}) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + if eventType == "initial-response" { + return e.output, nil + } + return e.unmarshalerForEvent(eventType) + } + {{- end }} + + // Events returns a channel to read events from. + // + // These events are: + // {{ range $_, $event := $outputStream.Events }} + // * {{ $event.Shape.ShapeName }} + {{- end }} + // * {{ $outputStream.StreamUnknownEventName }} + func (es *{{ $esapi.Name }}) Events() <-chan {{ $outputStream.EventGroupName }} { + return es.Reader.Events() + } + + func (es *{{ $esapi.Name }}) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := {{ $outputStream.StreamUnmarshalerForEventName }}{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + {{- if eq .API.Metadata.Protocol "json" }} + unmarshalerForEvent = {{ $esapi.StreamOutputUnmarshalerForEventName }}{ + unmarshalerForEvent: unmarshalerForEvent, + output: es.output, + }.UnmarshalerForEventName + {{- end }} + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = {{ $outputStream.StreamReaderImplConstructorName }}(eventReader) + } + + {{- if eq .API.Metadata.Protocol "json" }} + func (es *{{ $esapi.Name }}) recvInitialEvent(r *request.Request) { + // Wait for the initial response event, which must be the first + // event to be received from the API. + select { + case event, ok := <- es.Events(): + if !ok { + return + } + + v, ok := event.({{ $.OutputRef.GoType }}) + if !ok || v == nil { + r.Error = awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("invalid event, %T, expect %T, %v", + event, ({{ $.OutputRef.GoType }})(nil), v), + nil, + ) + return + } + + *es.output = *v + es.output.{{ $.EventStreamAPI.OutputMemberName }} = es + } + } + {{- end }} +{{- end }} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +{{- if $inputStream }} +// +// Will close the underlying EventStream writer, and no more events can be +// sent. +{{- end }} +{{- if $outputStream }} +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +{{- end }} +// +func (es *{{ $esapi.Name }}) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *{{ $esapi.Name }}) safeClose() { + if es.done != nil { + close(es.done) + } + + {{- if $inputStream }} + + t := time.NewTicker(time.Second) + defer t.Stop() + writeCloseDone := make(chan error) + go func() { + if err := es.Writer.Close(); err != nil { + es.err.SetError(err) + } + close(writeCloseDone) + }() + select { + case <-t.C: + case <-writeCloseDone: + } + if es.inputWriter != nil { + es.inputWriter.Close() + } + {{- end }} + + {{- if $outputStream }} + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + {{- end }} + + {{- if $esapi.Legacy }} + + es.StreamCloser.Close() + {{- end }} +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *{{ $esapi.Name }}) Err() error { + if err := es.err.Err(); err != nil { + return err + } + + {{- if $inputStream }} + if err := es.Writer.Err(); err != nil { + return err + } + {{- end }} + + {{- if $outputStream }} + if err := es.Reader.Err(); err != nil { + return err + } + {{- end }} + + return nil +} +` + +func renderEventStreamShape(w io.Writer, s *Shape) error { + // Imports needed by the EventStream APIs. + s.API.AddImport("fmt") + s.API.AddImport("bytes") + s.API.AddImport("io") + s.API.AddImport("sync") + s.API.AddSDKImport("aws") + s.API.AddSDKImport("aws/awserr") + s.API.AddSDKImport("private/protocol/eventstream") + s.API.AddSDKImport("private/protocol/eventstream/eventstreamapi") + + return eventStreamShapeTmpl.Execute(w, s) +} + +var eventStreamShapeTmpl = func() *template.Template { + t := template.Must( + template.New("eventStreamShapeTmplDef"). + Parse(eventStreamShapeTmplDef), + ) + template.Must( + t.AddParseTree( + "eventStreamShapeWriterTmpl", eventStreamShapeWriterTmpl.Tree), + ) + template.Must( + t.AddParseTree( + "eventStreamShapeReaderTmpl", eventStreamShapeReaderTmpl.Tree), + ) + + return t +}() + +const eventStreamShapeTmplDef = ` +{{- $eventStream := $.EventStream }} +{{- $eventStreamEventGroup := printf "%sEvent" $eventStream.Name }} + +// {{ $eventStreamEventGroup }} groups together all EventStream +// events writes for {{ $eventStream.Name }}. +// +// These events are: +// {{ range $_, $event := $eventStream.Events }} +// * {{ $event.Shape.ShapeName }} +{{- end }} +type {{ $eventStreamEventGroup }} interface { + event{{ $eventStream.Name }}() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +{{- if $.IsInputEventStream }} + {{- template "eventStreamShapeWriterTmpl" $ }} +{{- end }} + +{{- if $.IsOutputEventStream }} + {{- template "eventStreamShapeReaderTmpl" $ }} +{{- end }} +` + +// EventStreamHeaderTypeMap provides the mapping of a EventStream Header's +// Value type to the shape reference's member type. +type EventStreamHeaderTypeMap struct { + Header string + Member string +} + +// Returns if the event has any members which are not the event's blob payload, +// nor a header. +func eventHasNonBlobPayloadMembers(s *Shape) bool { + num := len(s.MemberRefs) + for _, ref := range s.MemberRefs { + if ref.IsEventHeader || (ref.IsEventPayload && (ref.Shape.Type == "blob" || ref.Shape.Type == "string")) { + num-- + } + } + return num > 0 +} + +func setEventHeaderValueForType(s *Shape, memVar string) string { + switch s.Type { + case "blob": + return fmt.Sprintf("eventstream.BytesValue(%s)", memVar) + case "string": + return fmt.Sprintf("eventstream.StringValue(*%s)", memVar) + case "boolean": + return fmt.Sprintf("eventstream.BoolValue(*%s)", memVar) + case "byte": + return fmt.Sprintf("eventstream.Int8Value(int8(*%s))", memVar) + case "short": + return fmt.Sprintf("eventstream.Int16Value(int16(*%s))", memVar) + case "integer": + return fmt.Sprintf("eventstream.Int32Value(int32(*%s))", memVar) + case "long": + return fmt.Sprintf("eventstream.Int64Value(*%s)", memVar) + case "float": + return fmt.Sprintf("eventstream.Float32Value(float32(*%s))", memVar) + case "double": + return fmt.Sprintf("eventstream.Float64Value(*%s)", memVar) + case "timestamp": + return fmt.Sprintf("eventstream.TimestampValue(*%s)", memVar) + default: + panic(fmt.Sprintf("value type %s not supported for event headers, %s", s.Type, s.ShapeName)) + } +} + +func shapeMessageType(s *Shape) string { + if s.Exception { + return "eventstreamapi.ExceptionMessageType" + } + return "eventstreamapi.EventMessageType" +} + +var eventStreamEventShapeTmplFuncs = template.FuncMap{ + "EventStreamHeaderTypeMap": func(ref *ShapeRef) EventStreamHeaderTypeMap { + switch ref.Shape.Type { + case "boolean": + return EventStreamHeaderTypeMap{Header: "bool", Member: "bool"} + case "byte": + return EventStreamHeaderTypeMap{Header: "int8", Member: "int64"} + case "short": + return EventStreamHeaderTypeMap{Header: "int16", Member: "int64"} + case "integer": + return EventStreamHeaderTypeMap{Header: "int32", Member: "int64"} + case "long": + return EventStreamHeaderTypeMap{Header: "int64", Member: "int64"} + case "timestamp": + return EventStreamHeaderTypeMap{Header: "time.Time", Member: "time.Time"} + case "blob": + return EventStreamHeaderTypeMap{Header: "[]byte", Member: "[]byte"} + case "string": + return EventStreamHeaderTypeMap{Header: "string", Member: "string"} + case "uuid": + return EventStreamHeaderTypeMap{Header: "[]byte", Member: "[]byte"} + default: + panic("unsupported EventStream header type, " + ref.Shape.Type) + } + }, + "EventHeaderValueForType": setEventHeaderValueForType, + "ShapeMessageType": shapeMessageType, + "HasNonBlobPayloadMembers": eventHasNonBlobPayloadMembers, +} + +// Template for an EventStream Event shape. This is a normal API shape that is +// decorated as an EventStream Event. +// +// Executed in the context of a Shape. +var eventStreamEventShapeTmpl = template.Must(template.New("eventStreamEventShapeTmpl"). + Funcs(eventStreamEventShapeTmplFuncs).Parse(` +{{ range $_, $eventStream := $.EventFor }} + // The {{ $.ShapeName }} is and event in the {{ $eventStream.Name }} group of events. + func (s *{{ $.ShapeName }}) event{{ $eventStream.Name }}() {} +{{ end }} + +// UnmarshalEvent unmarshals the EventStream Message into the {{ $.ShapeName }} value. +// This method is only used internally within the SDK's EventStream handling. +func (s *{{ $.ShapeName }}) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + {{- range $memName, $memRef := $.MemberRefs }} + {{- if $memRef.IsEventHeader }} + if hv := msg.Headers.Get("{{ $memName }}"); hv != nil { + {{ $types := EventStreamHeaderTypeMap $memRef -}} + v := hv.Get().({{ $types.Header }}) + {{- if ne $types.Header $types.Member }} + m := {{ $types.Member }}(v) + s.{{ $memName }} = {{ if $memRef.UseIndirection }}&{{ end }}m + {{- else }} + s.{{ $memName }} = {{ if $memRef.UseIndirection }}&{{ end }}v + {{- end }} + } + {{- else if (and ($memRef.IsEventPayload) (eq $memRef.Shape.Type "blob")) }} + s.{{ $memName }} = make([]byte, len(msg.Payload)) + copy(s.{{ $memName }}, msg.Payload) + {{- else if (and ($memRef.IsEventPayload) (eq $memRef.Shape.Type "string")) }} + s.{{ $memName }} = aws.String(string(msg.Payload)) + {{- end }} + {{- end }} + {{- if HasNonBlobPayloadMembers $ }} + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + {{- end }} + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *{{ $.ShapeName}}) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue({{ ShapeMessageType $ }})) + + {{- range $memName, $memRef := $.MemberRefs }} + {{- if $memRef.IsEventHeader }} + {{ $memVar := printf "s.%s" $memName -}} + {{ $typedMem := EventHeaderValueForType $memRef.Shape $memVar -}} + msg.Headers.Set("{{ $memName }}", {{ $typedMem }}) + {{- else if (and ($memRef.IsEventPayload) (eq $memRef.Shape.Type "blob")) }} + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.{{ $memName }} + {{- else if (and ($memRef.IsEventPayload) (eq $memRef.Shape.Type "string")) }} + msg.Payload = []byte(aws.StringValue(s.{{ $memName }})) + {{- end }} + {{- end }} + {{- if HasNonBlobPayloadMembers $ }} + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + {{- end }} + return msg, err +} +`)) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_reader.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_reader.go new file mode 100644 index 00000000000..3e5caf34dad --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_reader.go @@ -0,0 +1,158 @@ +// +build codegen + +package api + +import "text/template" + +var eventStreamShapeReaderTmpl = template.Must(template.New("eventStreamShapeReaderTmpl"). + Funcs(template.FuncMap{}). + Parse(` +{{- $es := $.EventStream }} + +// {{ $es.StreamReaderAPIName }} provides the interface for reading to the stream. The +// default implementation for this interface will be {{ $.ShapeName }}. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// {{ range $_, $event := $es.Events }} +// * {{ $event.Shape.ShapeName }} +{{- end }} +// * {{ $es.StreamUnknownEventName }} +type {{ $es.StreamReaderAPIName }} interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan {{ $es.EventGroupName }} + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type {{ $es.StreamReaderImplName }} struct { + eventReader *eventstreamapi.EventReader + stream chan {{ $es.EventGroupName }} + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func {{ $es.StreamReaderImplConstructorName }}(eventReader *eventstreamapi.EventReader) *{{ $es.StreamReaderImplName }} { + r := &{{ $es.StreamReaderImplName }}{ + eventReader: eventReader, + stream: make(chan {{ $es.EventGroupName }}), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + go r.readEventStream() + + return r +} + +// Close will close the underlying event stream reader. +func (r *{{ $es.StreamReaderImplName }}) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *{{ $es.StreamReaderImplName }}) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *{{ $es.StreamReaderImplName }}) Closed() <-chan struct{} { + return r.done +} + +func (r *{{ $es.StreamReaderImplName }}) safeClose() { + close(r.done) +} + +func (r *{{ $es.StreamReaderImplName }}) Err() error { + return r.err.Err() +} + +func (r *{{ $es.StreamReaderImplName }}) Events() <-chan {{ $es.EventGroupName }} { + return r.stream +} + +func (r *{{ $es.StreamReaderImplName }}) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.({{ $es.EventGroupName }}): + case <-r.done: + return + } + } +} + +type {{ $es.StreamUnmarshalerForEventName }} struct { + metadata protocol.ResponseMetadata +} + +func (u {{ $es.StreamUnmarshalerForEventName }}) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + {{- range $_, $event := $es.Events }} + case {{ printf "%q" $event.Name }}: + return &{{ $event.Shape.ShapeName }}{}, nil + {{- end }} + {{- range $_, $event := $es.Exceptions }} + case {{ printf "%q" $event.Name }}: + return newError{{ $event.Shape.ShapeName }}(u.metadata).(eventstreamapi.Unmarshaler), nil + {{- end }} + default: + return &{{ $es.StreamUnknownEventName }}{Type: eventType}, nil + } +} + +// {{ $es.StreamUnknownEventName }} provides a failsafe event for the +// {{ $es.Name }} group of events when an unknown event is received. +type {{ $es.StreamUnknownEventName }} struct { + Type string + Message eventstream.Message +} + +// The {{ $es.StreamUnknownEventName }} is and event in the {{ $es.Name }} +// group of events. +func (s *{{ $es.StreamUnknownEventName }}) event{{ $es.Name }}() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *{{ $es.StreamUnknownEventName }}) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the {{ $.ShapeName }} value. +// This method is only used internally within the SDK's EventStream handling. +func (e *{{ $es.StreamUnknownEventName }}) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} +`)) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_readertests.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_readertests.go new file mode 100644 index 00000000000..cb79741e1b2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_readertests.go @@ -0,0 +1,439 @@ +// +build codegen + +package api + +import ( + "text/template" +) + +var eventStreamReaderTestTmpl = template.Must( + template.New("eventStreamReaderTestTmpl").Funcs(template.FuncMap{ + "ValueForType": valueForType, + "HasNonBlobPayloadMembers": eventHasNonBlobPayloadMembers, + "EventHeaderValueForType": setEventHeaderValueForType, + "Map": templateMap, + "OptionalAddInt": func(do bool, a, b int) int { + if !do { + return a + } + return a + b + }, + "HasNonEventStreamMember": func(s *Shape) bool { + for _, ref := range s.MemberRefs { + if !ref.Shape.IsEventStream { + return true + } + } + return false + }, + }).Parse(` +{{ range $opName, $op := $.Operations }} + {{ if $op.EventStreamAPI }} + {{ if $op.EventStreamAPI.OutputStream }} + {{ template "event stream outputStream tests" $op.EventStreamAPI }} + {{ end }} + {{ end }} +{{ end }} + +type loopReader struct { + source *bytes.Reader +} + +func (c *loopReader) Read(p []byte) (int, error) { + if c.source.Len() == 0 { + c.source.Seek(0, 0) + } + + return c.source.Read(p) +} + +{{ define "event stream outputStream tests" }} + func Test{{ $.Operation.ExportedName }}_Read(t *testing.T) { + expectEvents, eventMsgs := mock{{ $.Operation.ExportedName }}ReadEvents() + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + defer resp.GetStream().Close() + + {{- if eq $.Operation.API.Metadata.Protocol "json" }} + {{- if HasNonEventStreamMember $.Operation.OutputRef.Shape }} + expectResp := expectEvents[0].(*{{ $.Operation.OutputRef.Shape.ShapeName }}) + {{- range $name, $ref := $.Operation.OutputRef.Shape.MemberRefs }} + {{- if not $ref.Shape.IsEventStream }} + if e, a := expectResp.{{ $name }}, resp.{{ $name }}; !reflect.DeepEqual(e,a) { + t.Errorf("expect %v, got %v", e, a) + } + {{- end }} + {{- end }} + {{- end }} + // Trim off response output type pseudo event so only event messages remain. + expectEvents = expectEvents[1:] + {{ end }} + + var i int + for event := range resp.GetStream().Events() { + if event == nil { + t.Errorf("%d, expect event, got nil", i) + } + if e, a := expectEvents[i], event; !reflect.DeepEqual(e, a) { + t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a) + } + i++ + } + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } + } + + func Test{{ $.Operation.ExportedName }}_ReadClose(t *testing.T) { + _, eventMsgs := mock{{ $.Operation.ExportedName }}ReadEvents() + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + {{ if gt (len $.OutputStream.Events) 0 -}} + // Assert calling Err before close does not close the stream. + resp.GetStream().Err() + select { + case _, ok := <-resp.GetStream().Events(): + if !ok { + t.Fatalf("expect stream not to be closed, but was") + } + default: + } + {{- end }} + + resp.GetStream().Close() + <-resp.GetStream().Events() + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } + } + + func Test{{ $.Operation.ExportedName }}_ReadUnknownEvent(t *testing.T) { + expectEvents, eventMsgs := mock{{ $.Operation.ExportedName }}ReadEvents() + + {{- if eq $.Operation.API.Metadata.Protocol "json" }} + eventOffset := 1 + {{- else }} + var eventOffset int + {{- end }} + + unknownEvent := eventstream.Message{ + Headers: eventstream.Headers{ + eventstreamtest.EventMessageTypeHeader, + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("UnknownEventName"), + }, + }, + Payload: []byte("some unknown event"), + } + + eventMsgs = append(eventMsgs[:eventOffset], + append([]eventstream.Message{unknownEvent}, eventMsgs[eventOffset:]...)...) + + expectEvents = append(expectEvents[:eventOffset], + append([]{{ $.OutputStream.Name }}Event{ + &{{ $.OutputStream.StreamUnknownEventName }}{ + Type: "UnknownEventName", + Message: unknownEvent, + }, + }, + expectEvents[eventOffset:]...)...) + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + defer resp.GetStream().Close() + + {{- if eq $.Operation.API.Metadata.Protocol "json" }} + // Trim off response output type pseudo event so only event messages remain. + expectEvents = expectEvents[1:] + {{ end }} + + var i int + for event := range resp.GetStream().Events() { + if event == nil { + t.Errorf("%d, expect event, got nil", i) + } + if e, a := expectEvents[i], event; !reflect.DeepEqual(e, a) { + t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a) + } + i++ + } + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } + } + + func Benchmark{{ $.Operation.ExportedName }}_Read(b *testing.B) { + _, eventMsgs := mock{{ $.Operation.ExportedName }}ReadEvents() + var buf bytes.Buffer + encoder := eventstream.NewEncoder(&buf) + for _, msg := range eventMsgs { + if err := encoder.Encode(msg); err != nil { + b.Fatalf("failed to encode message, %v", err) + } + } + stream := &loopReader{source: bytes.NewReader(buf.Bytes())} + + sess := unit.Session + svc := New(sess, &aws.Config{ + Endpoint: aws.String("https://example.com"), + DisableParamValidation: aws.Bool(true), + }) + svc.Handlers.Send.Swap(corehandlers.SendHandler.Name, + request.NamedHandler{Name: "mockSend", + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Status: "200 OK", + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(stream), + } + }, + }, + ) + + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + b.Fatalf("failed to create request, %v", err) + } + defer resp.GetStream().Close() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if err = resp.GetStream().Err(); err != nil { + b.Fatalf("expect no error, got %v", err) + } + event := <-resp.GetStream().Events() + if event == nil { + b.Fatalf("expect event, got nil, %v, %d", resp.GetStream().Err(), i) + } + } + } + + func mock{{ $.Operation.ExportedName }}ReadEvents() ( + []{{ $.OutputStream.Name }}Event, + []eventstream.Message, + ) { + expectEvents := []{{ $.OutputStream.Name }}Event { + {{- if eq $.Operation.API.Metadata.Protocol "json" }} + {{- template "set event type" $.Operation.OutputRef.Shape }} + {{- end }} + {{- range $_, $event := $.OutputStream.Events }} + {{- template "set event type" $event.Shape }} + {{- end }} + } + + var marshalers request.HandlerList + marshalers.PushBackNamed({{ $.API.ProtocolPackage }}.BuildHandler) + payloadMarshaler := protocol.HandlerPayloadMarshal{ + Marshalers: marshalers, + } + _ = payloadMarshaler + + eventMsgs := []eventstream.Message{ + {{- if eq $.Operation.API.Metadata.Protocol "json" }} + {{- template "set event message" Map "idx" 0 "parentShape" $.Operation.OutputRef.Shape "eventName" "initial-response" }} + {{- end }} + {{- range $idx, $event := $.OutputStream.Events }} + {{- $offsetIdx := OptionalAddInt (eq $.Operation.API.Metadata.Protocol "json") $idx 1 }} + {{- template "set event message" Map "idx" $offsetIdx "parentShape" $event.Shape "eventName" $event.Name }} + {{- end }} + } + + return expectEvents, eventMsgs + } + + {{- if $.OutputStream.Exceptions }} + func Test{{ $.Operation.ExportedName }}_ReadException(t *testing.T) { + expectEvents := []{{ $.OutputStream.Name }}Event { + {{- if eq $.Operation.API.Metadata.Protocol "json" }} + {{- template "set event type" $.Operation.OutputRef.Shape }} + {{- end }} + + {{- $exception := index $.OutputStream.Exceptions 0 }} + {{- template "set event type" $exception.Shape }} + } + + var marshalers request.HandlerList + marshalers.PushBackNamed({{ $.API.ProtocolPackage }}.BuildHandler) + payloadMarshaler := protocol.HandlerPayloadMarshal{ + Marshalers: marshalers, + } + + eventMsgs := []eventstream.Message{ + {{- if eq $.Operation.API.Metadata.Protocol "json" }} + {{- template "set event message" Map "idx" 0 "parentShape" $.Operation.OutputRef.Shape "eventName" "initial-response" }} + {{- end }} + + {{- $offsetIdx := OptionalAddInt (eq $.Operation.API.Metadata.Protocol "json") 0 1 }} + {{- $exception := index $.OutputStream.Exceptions 0 }} + {{- template "set event message" Map "idx" $offsetIdx "parentShape" $exception.Shape "eventName" $exception.Name }} + } + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + Events: eventMsgs, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + defer resp.GetStream().Close() + + <-resp.GetStream().Events() + + err = resp.GetStream().Err() + if err == nil { + t.Fatalf("expect err, got none") + } + + expectErr := {{ ValueForType $exception.Shape nil }} + aerr, ok := err.(awserr.Error) + if !ok { + t.Errorf("expect exception, got %T, %#v", err, err) + } + if e, a := expectErr.Code(), aerr.Code(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := expectErr.Message(), aerr.Message(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + + if e, a := expectErr, aerr; !reflect.DeepEqual(e, a) { + t.Errorf("expect error %+#v, got %+#v", e, a) + } + } + + {{- range $_, $exception := $.OutputStream.Exceptions }} + var _ awserr.Error = (*{{ $exception.Shape.ShapeName }})(nil) + {{- end }} + + {{ end }} +{{ end }} + +{{/* Params: *Shape */}} +{{ define "set event type" }} + &{{ $.ShapeName }}{ + {{- if $.Exception }} + RespMetadata: protocol.ResponseMetadata{ + StatusCode: 200, + }, + {{- end }} + {{- range $memName, $memRef := $.MemberRefs }} + {{- if not $memRef.Shape.IsEventStream }} + {{ $memName }}: {{ ValueForType $memRef.Shape nil }}, + {{- end }} + {{- end }} + }, +{{- end }} + +{{/* Params: idx:int, parentShape:*Shape, eventName:string */}} +{{ define "set event message" }} + { + Headers: eventstream.Headers{ + {{- if $.parentShape.Exception }} + eventstreamtest.EventExceptionTypeHeader, + { + Name: eventstreamapi.ExceptionTypeHeader, + Value: eventstream.StringValue("{{ $.eventName }}"), + }, + {{- else }} + eventstreamtest.EventMessageTypeHeader, + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("{{ $.eventName }}"), + }, + {{- end }} + {{- range $memName, $memRef := $.parentShape.MemberRefs }} + {{- template "set event message header" Map "idx" $.idx "parentShape" $.parentShape "memName" $memName "memRef" $memRef }} + {{- end }} + }, + {{- template "set event message payload" Map "idx" $.idx "parentShape" $.parentShape }} + }, +{{- end }} + +{{/* Params: idx:int, parentShape:*Shape, memName:string, memRef:*ShapeRef */}} +{{ define "set event message header" }} + {{- if $.memRef.IsEventHeader }} + { + Name: "{{ $.memName }}", + {{- $shapeValueVar := printf "expectEvents[%d].(%s).%s" $.idx $.parentShape.GoType $.memName }} + Value: {{ EventHeaderValueForType $.memRef.Shape $shapeValueVar }}, + }, + {{- end }} +{{- end }} + +{{/* Params: idx:int, parentShape:*Shape, memName:string, memRef:*ShapeRef */}} +{{ define "set event message payload" }} + {{- $payloadMemName := $.parentShape.PayloadRefName }} + {{- if HasNonBlobPayloadMembers $.parentShape }} + Payload: eventstreamtest.MarshalEventPayload(payloadMarshaler, expectEvents[{{ $.idx }}]), + {{- else if $payloadMemName }} + {{- $shapeType := (index $.parentShape.MemberRefs $payloadMemName).Shape.Type }} + {{- if eq $shapeType "blob" }} + Payload: expectEvents[{{ $.idx }}].({{ $.parentShape.GoType }}).{{ $payloadMemName }}, + {{- else if eq $shapeType "string" }} + Payload: []byte(*expectEvents[{{ $.idx }}].({{ $.parentShape.GoType }}).{{ $payloadMemName }}), + {{- end }} + {{- end }} +{{- end }} +`)) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_tests.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_tests.go new file mode 100644 index 00000000000..73345f39df3 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_tests.go @@ -0,0 +1,137 @@ +// +build codegen + +package api + +import ( + "bytes" + "fmt" + "strings" +) + +// APIEventStreamTestGoCode generates Go code for EventStream operation tests. +func (a *API) APIEventStreamTestGoCode() string { + var buf bytes.Buffer + + a.resetImports() + a.AddImport("bytes") + a.AddImport("io/ioutil") + a.AddImport("net/http") + a.AddImport("reflect") + a.AddImport("testing") + a.AddImport("time") + a.AddImport("context") + a.AddImport("strings") + a.AddImport("sync") + a.AddSDKImport("aws") + a.AddSDKImport("aws/corehandlers") + a.AddSDKImport("aws/request") + a.AddSDKImport("aws/awserr") + a.AddSDKImport("awstesting/unit") + a.AddSDKImport("private/protocol") + a.AddSDKImport("private/protocol/", a.ProtocolPackage()) + a.AddSDKImport("private/protocol/eventstream") + a.AddSDKImport("private/protocol/eventstream/eventstreamapi") + a.AddSDKImport("private/protocol/eventstream/eventstreamtest") + + unused := ` + var _ time.Time + var _ awserr.Error + var _ context.Context + var _ sync.WaitGroup + var _ strings.Reader + ` + + if err := eventStreamReaderTestTmpl.Execute(&buf, a); err != nil { + panic(err) + } + + if err := eventStreamWriterTestTmpl.Execute(&buf, a); err != nil { + panic(err) + } + + return a.importsGoCode() + unused + strings.TrimSpace(buf.String()) +} + +func templateMap(args ...interface{}) map[string]interface{} { + if len(args)%2 != 0 { + panic(fmt.Sprintf("invalid map call, non-even args %v", args)) + } + + m := map[string]interface{}{} + for i := 0; i < len(args); i += 2 { + k, ok := args[i].(string) + if !ok { + panic(fmt.Sprintf("invalid map call, arg is not string, %T, %v", args[i], args[i])) + } + m[k] = args[i+1] + } + + return m +} + +func valueForType(s *Shape, visited []string) string { + for _, v := range visited { + if v == s.ShapeName { + return "nil" + } + } + + visited = append(visited, s.ShapeName) + + switch s.Type { + case "blob": + return `[]byte("blob value goes here")` + case "string": + return `aws.String("string value goes here")` + case "boolean": + return `aws.Bool(true)` + case "byte": + return `aws.Int64(1)` + case "short": + return `aws.Int64(12)` + case "integer": + return `aws.Int64(123)` + case "long": + return `aws.Int64(1234)` + case "float": + return `aws.Float64(123.4)` + case "double": + return `aws.Float64(123.45)` + case "timestamp": + return `aws.Time(time.Unix(1396594860, 0).UTC())` + case "structure": + w := bytes.NewBuffer(nil) + fmt.Fprintf(w, "&%s{\n", s.ShapeName) + if s.Exception { + fmt.Fprintf(w, `RespMetadata: protocol.ResponseMetadata{ + StatusCode: 200, +}, +`) + } + for _, refName := range s.MemberNames() { + fmt.Fprintf(w, "%s: %s,\n", refName, valueForType(s.MemberRefs[refName].Shape, visited)) + } + fmt.Fprintf(w, "}") + return w.String() + case "list": + w := bytes.NewBuffer(nil) + fmt.Fprintf(w, "%s{\n", s.GoType()) + for i := 0; i < 3; i++ { + fmt.Fprintf(w, "%s,\n", valueForType(s.MemberRef.Shape, visited)) + } + fmt.Fprintf(w, "}") + return w.String() + + case "map": + w := bytes.NewBuffer(nil) + fmt.Fprintf(w, "%s{\n", s.GoType()) + for _, k := range []string{"a", "b", "c"} { + fmt.Fprintf(w, "%q: %s,\n", k, valueForType(s.ValueRef.Shape, visited)) + } + fmt.Fprintf(w, "}") + return w.String() + + default: + panic(fmt.Sprintf("valueForType does not support %s, %s", s.ShapeName, s.Type)) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_writer.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_writer.go new file mode 100644 index 00000000000..bb043cbf207 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_writer.go @@ -0,0 +1,59 @@ +// +build codegen + +package api + +import "text/template" + +var eventStreamShapeWriterTmpl = template.Must(template.New("eventStreamShapeWriterTmpl"). + Funcs(template.FuncMap{}). + Parse(` +{{- $es := $.EventStream }} + +// {{ $es.StreamWriterAPIName }} provides the interface for writing events to the stream. +// The default implementation for this interface will be {{ $.ShapeName }}. +// +// The writer's Close method must allow multiple concurrent calls. +// +// These events are: +// {{ range $_, $event := $es.Events }} +// * {{ $event.Shape.ShapeName }} +{{- end }} +type {{ $es.StreamWriterAPIName }} interface { + // Sends writes events to the stream blocking until the event has been + // written. An error is returned if the write fails. + Send(aws.Context, {{ $es.EventGroupName }}) error + + // Close will stop the writer writing to the event stream. + Close() error + + // Returns any error that has occurred while writing to the event stream. + Err() error +} + +type {{ $es.StreamWriterImplName }} struct { + *eventstreamapi.StreamWriter +} + +func (w *{{ $es.StreamWriterImplName }}) Send(ctx aws.Context, event {{ $es.EventGroupName }}) error { + return w.StreamWriter.Send(ctx, event) +} + +func {{ $es.StreamEventTypeGetterName }}(event eventstreamapi.Marshaler) (string, error) { + switch event.(type) { + {{- range $_, $event := $es.Events }} + case *{{ $event.Shape.ShapeName }}: + return {{ printf "%q" $event.Name }}, nil + {{- end }} + {{- range $_, $event := $es.Exceptions }} + case *{{ $event.Shape.ShapeName }}: + return {{ printf "%q" $event.Name }}, nil + {{- end }} + default: + return "", awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("unknown event type, %T, for {{ $es.Name }}", event), + nil, + ) + } +} +`)) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_writertests.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_writertests.go new file mode 100644 index 00000000000..b126dbdf58a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/eventstream_tmpl_writertests.go @@ -0,0 +1,279 @@ +// +build codegen + +package api + +import ( + "text/template" +) + +var eventStreamWriterTestTmpl = template.Must( + template.New("eventStreamWriterTestTmpl").Funcs(template.FuncMap{ + "ValueForType": valueForType, + "HasNonBlobPayloadMembers": eventHasNonBlobPayloadMembers, + "EventHeaderValueForType": setEventHeaderValueForType, + "Map": templateMap, + "OptionalAddInt": func(do bool, a, b int) int { + if !do { + return a + } + return a + b + }, + "HasNonEventStreamMember": func(s *Shape) bool { + for _, ref := range s.MemberRefs { + if !ref.Shape.IsEventStream { + return true + } + } + return false + }, + }).Parse(` +{{ range $opName, $op := $.Operations }} + {{ if $op.EventStreamAPI }} + {{ if $op.EventStreamAPI.InputStream }} + {{ template "event stream inputStream tests" $op.EventStreamAPI }} + {{ end }} + {{ end }} +{{ end }} + +{{ define "event stream inputStream tests" }} + func Test{{ $.Operation.ExportedName }}_Write(t *testing.T) { + clientEvents, expectedClientEvents := mock{{ $.Operation.ExportedName }}WriteEvents() + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + &eventstreamtest.ServeEventStream{ + T: t, + ClientEvents: expectedClientEvents, + BiDirectional: true, + }, + true) + defer cleanupFn() + + svc := New(sess) + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + stream := resp.GetStream() + + for _, event := range clientEvents { + err = stream.Send(context.Background(), event) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + } + + if err := stream.Close(); err != nil { + t.Errorf("expect no error, got %v", err) + } + } + + func Test{{ $.Operation.ExportedName }}_WriteClose(t *testing.T) { + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{T: t, BiDirectional: true}, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + // Assert calling Err before close does not close the stream. + resp.GetStream().Err() + {{ $eventShape := index $.InputStream.Events 0 }} + err = resp.GetStream().Send(context.Background(), &{{ $eventShape.Shape.ShapeName }}{}) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + resp.GetStream().Close() + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } + } + + func Test{{ $.Operation.ExportedName }}_WriteError(t *testing.T) { + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + eventstreamtest.ServeEventStream{ + T: t, + BiDirectional: true, + ForceCloseAfter: time.Millisecond * 500, + }, + true, + ) + if err != nil { + t.Fatalf("expect no error, %v", err) + } + defer cleanupFn() + + svc := New(sess) + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + t.Fatalf("expect no error got, %v", err) + } + + defer resp.GetStream().Close() + {{ $eventShape := index $.InputStream.Events 0 }} + for { + err = resp.GetStream().Send(context.Background(), &{{ $eventShape.Shape.ShapeName }}{}) + if err != nil { + if strings.Contains("unable to send event", err.Error()) { + t.Errorf("expected stream closed error, got %v", err) + } + break + } + } + } + + func Test{{ $.Operation.ExportedName }}_ReadWrite(t *testing.T) { + expectedServiceEvents, serviceEvents := mock{{ $.Operation.ExportedName }}ReadEvents() + clientEvents, expectedClientEvents := mock{{ $.Operation.ExportedName }}WriteEvents() + + sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t, + &eventstreamtest.ServeEventStream{ + T: t, + ClientEvents: expectedClientEvents, + Events: serviceEvents, + BiDirectional: true, + }, + true) + defer cleanupFn() + + svc := New(sess) + resp, err := svc.{{ $.Operation.ExportedName }}(nil) + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + stream := resp.GetStream() + defer stream.Close() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + var i int + for event := range resp.GetStream().Events() { + if event == nil { + t.Errorf("%d, expect event, got nil", i) + } + if e, a := expectedServiceEvents[i], event; !reflect.DeepEqual(e, a) { + t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a) + } + i++ + } + }() + + for _, event := range clientEvents { + err = stream.Send(context.Background(), event) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + } + + resp.GetStream().Close() + + wg.Wait() + + if err := resp.GetStream().Err(); err != nil { + t.Errorf("expect no error, %v", err) + } + } + + func mock{{ $.Operation.ExportedName }}WriteEvents() ( + []{{ $.InputStream.Name }}Event, + []eventstream.Message, + ) { + inputEvents := []{{ $.InputStream.Name }}Event { + {{- if eq $.Operation.API.Metadata.Protocol "json" }} + {{- template "set event type" $.Operation.InputRef.Shape }} + {{- end }} + {{- range $_, $event := $.InputStream.Events }} + {{- template "set event type" $event.Shape }} + {{- end }} + } + + var marshalers request.HandlerList + marshalers.PushBackNamed({{ $.API.ProtocolPackage }}.BuildHandler) + payloadMarshaler := protocol.HandlerPayloadMarshal{ + Marshalers: marshalers, + } + _ = payloadMarshaler + + eventMsgs := []eventstream.Message{ + {{- range $idx, $event := $.InputStream.Events }} + {{- template "set event message" Map "idx" $idx "parentShape" $event.Shape "eventName" $event.Name }} + {{- end }} + } + + return inputEvents, eventMsgs + } +{{ end }} + +{{/* Params: *Shape */}} +{{ define "set event type" }} + &{{ $.ShapeName }}{ + {{- range $memName, $memRef := $.MemberRefs }} + {{- if not $memRef.Shape.IsEventStream }} + {{ $memName }}: {{ ValueForType $memRef.Shape nil }}, + {{- end }} + {{- end }} + }, +{{- end }} + +{{/* Params: idx:int, parentShape:*Shape, eventName:string */}} +{{ define "set event message" }} + { + Headers: eventstream.Headers{ + eventstreamtest.EventMessageTypeHeader, + {{- range $memName, $memRef := $.parentShape.MemberRefs }} + {{- template "set event message header" Map "idx" $.idx "parentShape" $.parentShape "memName" $memName "memRef" $memRef }} + {{- end }} + { + Name: eventstreamapi.EventTypeHeader, + Value: eventstream.StringValue("{{ $.eventName }}"), + }, + }, + {{- template "set event message payload" Map "idx" $.idx "parentShape" $.parentShape }} + }, +{{- end }} + +{{/* Params: idx:int, parentShape:*Shape, memName:string, memRef:*ShapeRef */}} +{{ define "set event message header" }} + {{- if (and ($.memRef.IsEventPayload) (eq $.memRef.Shape.Type "blob")) }} + { + Name: ":content-type", + Value: eventstream.StringValue("application/octet-stream"), + }, + {{- else if $.memRef.IsEventHeader }} + { + Name: "{{ $.memName }}", + {{- $shapeValueVar := printf "inputEvents[%d].(%s).%s" $.idx $.parentShape.GoType $.memName }} + Value: {{ EventHeaderValueForType $.memRef.Shape $shapeValueVar }}, + }, + {{- end }} +{{- end }} + +{{/* Params: idx:int, parentShape:*Shape, memName:string, memRef:*ShapeRef */}} +{{ define "set event message payload" }} + {{- $payloadMemName := $.parentShape.PayloadRefName }} + {{- if HasNonBlobPayloadMembers $.parentShape }} + Payload: eventstreamtest.MarshalEventPayload(payloadMarshaler, inputEvents[{{ $.idx }}]), + {{- else if $payloadMemName }} + {{- $shapeType := (index $.parentShape.MemberRefs $payloadMemName).Shape.Type }} + {{- if eq $shapeType "blob" }} + Payload: inputEvents[{{ $.idx }}].({{ $.parentShape.GoType }}).{{ $payloadMemName }}, + {{- else if eq $shapeType "string" }} + Payload: []byte(*inputEvents[{{ $.idx }}].({{ $.parentShape.GoType }}).{{ $payloadMemName }}), + {{- end }} + {{- end }} +{{- end }} +`)) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/example.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/example.go new file mode 100644 index 00000000000..790920ed345 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/example.go @@ -0,0 +1,306 @@ +// +build codegen + +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "sort" + "strings" + "text/template" + + "github.com/aws/aws-sdk-go/private/util" +) + +type Examples map[string][]Example + +// ExamplesDefinition is the structural representation of the examples-1.json file +type ExamplesDefinition struct { + *API `json:"-"` + Examples Examples `json:"examples"` +} + +// Example is a single entry within the examples-1.json file. +type Example struct { + API *API `json:"-"` + Operation *Operation `json:"-"` + OperationName string `json:"-"` + Index string `json:"-"` + Builder examplesBuilder `json:"-"` + VisitedErrors map[string]struct{} `json:"-"` + Title string `json:"title"` + Description string `json:"description"` + ID string `json:"id"` + Comments Comments `json:"comments"` + Input map[string]interface{} `json:"input"` + Output map[string]interface{} `json:"output"` +} + +type Comments struct { + Input map[string]interface{} `json:"input"` + Output map[string]interface{} `json:"output"` +} + +var exampleFuncMap = template.FuncMap{ + "commentify": commentify, + "wrap": wrap, + "generateExampleInput": generateExampleInput, + "generateTypes": generateTypes, +} + +var exampleCustomizations = map[string]template.FuncMap{} + +var exampleTmpls = template.Must(template.New("example").Funcs(exampleFuncMap).Parse(` +{{ generateTypes . }} +{{ commentify (wrap .Title 80) }} +// +{{ commentify (wrap .Description 80) }} +func Example{{ .API.StructName }}_{{ .MethodName }}() { + svc := {{ .API.PackageName }}.New(session.New()) + input := {{ generateExampleInput . }} + + result, err := svc.{{ .OperationName }}(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + {{ range $_, $ref := .Operation.ErrorRefs -}} + {{ if not ($.HasVisitedError $ref) -}} + case {{ .API.PackageName }}.{{ $ref.Shape.ErrorCodeName }}: + fmt.Println({{ .API.PackageName }}.{{ $ref.Shape.ErrorCodeName }}, aerr.Error()) + {{ end -}} + {{ end -}} + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} +`)) + +// Names will return the name of the example. This will also be the name of the operation +// that is to be tested. +func (exs Examples) Names() []string { + names := make([]string, 0, len(exs)) + for k := range exs { + names = append(names, k) + } + + sort.Strings(names) + return names +} + +func (exs Examples) GoCode() string { + buf := bytes.NewBuffer(nil) + for _, opName := range exs.Names() { + examples := exs[opName] + for _, ex := range examples { + buf.WriteString(util.GoFmt(ex.GoCode())) + buf.WriteString("\n") + } + } + return buf.String() +} + +// ExampleCode will generate the example code for the given Example shape. +// TODO: Can delete +func (ex Example) GoCode() string { + var buf bytes.Buffer + m := exampleFuncMap + if fMap, ok := exampleCustomizations[ex.API.PackageName()]; ok { + m = fMap + } + tmpl := exampleTmpls.Funcs(m) + if err := tmpl.ExecuteTemplate(&buf, "example", &ex); err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +func generateExampleInput(ex Example) string { + if ex.Operation.HasInput() { + return fmt.Sprintf("&%s{\n%s\n}", + ex.Builder.GoType(&ex.Operation.InputRef, true), + ex.Builder.BuildShape(&ex.Operation.InputRef, ex.Input, false), + ) + } + return "" +} + +// generateTypes will generate no types for default examples, but customizations may +// require their own defined types. +func generateTypes(ex Example) string { + return "" +} + +// correctType will cast the value to the correct type when printing the string. +// This is due to the json decoder choosing numbers to be floats, but the shape may +// actually be an int. To counter this, we pass the shape's type and properly do the +// casting here. +func correctType(memName string, t string, value interface{}) string { + if value == nil { + return "" + } + + v := "" + switch value.(type) { + case string: + v = value.(string) + case int: + v = fmt.Sprintf("%d", value.(int)) + case float64: + if t == "integer" || t == "long" || t == "int64" { + v = fmt.Sprintf("%d", int(value.(float64))) + } else { + v = fmt.Sprintf("%f", value.(float64)) + } + case bool: + v = fmt.Sprintf("%t", value.(bool)) + } + + return convertToCorrectType(memName, t, v) +} + +func convertToCorrectType(memName, t, v string) string { + return fmt.Sprintf("%s: %s,\n", memName, getValue(t, v)) +} + +func getValue(t, v string) string { + if t[0] == '*' { + t = t[1:] + } + switch t { + case "string": + return fmt.Sprintf("aws.String(%q)", v) + case "integer", "long", "int64": + return fmt.Sprintf("aws.Int64(%s)", v) + case "float", "float64", "double": + return fmt.Sprintf("aws.Float64(%s)", v) + case "boolean": + return fmt.Sprintf("aws.Bool(%s)", v) + default: + panic("Unsupported type: " + t) + } +} + +// AttachExamples will create a new ExamplesDefinition from the examples file +// and reference the API object. +func (a *API) AttachExamples(filename string) error { + p := ExamplesDefinition{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + return err + } + err = json.NewDecoder(f).Decode(&p) + if err != nil { + return fmt.Errorf("failed to decode %s, err: %v", filename, err) + } + + return p.setup() +} + +var examplesBuilderCustomizations = map[string]examplesBuilder{ + "wafregional": NewWAFregionalExamplesBuilder(), +} + +func (p *ExamplesDefinition) setup() error { + var builder examplesBuilder + ok := false + if builder, ok = examplesBuilderCustomizations[p.API.PackageName()]; !ok { + builder = NewExamplesBuilder() + } + + keys := p.Examples.Names() + for _, n := range keys { + examples := p.Examples[n] + for i, e := range examples { + n = p.ExportableName(n) + e.OperationName = n + e.API = p.API + e.Index = fmt.Sprintf("shared%02d", i) + + e.Builder = builder + + e.VisitedErrors = map[string]struct{}{} + op := p.API.Operations[e.OperationName] + e.OperationName = p.ExportableName(e.OperationName) + e.Operation = op + p.Examples[n][i] = e + } + } + + p.API.Examples = p.Examples + + return nil +} + +var exampleHeader = template.Must(template.New("exampleHeader").Parse(` +import ( + {{ .Builder.Imports .API }} +) + +var _ time.Duration +var _ strings.Reader +var _ aws.Config + +func parseTime(layout, value string) *time.Time { + t, err := time.Parse(layout, value) + if err != nil { + panic(err) + } + return &t +} + +`)) + +type exHeader struct { + Builder examplesBuilder + API *API +} + +// ExamplesGoCode will return a code representation of the entry within the +// examples.json file. +func (a *API) ExamplesGoCode() string { + var buf bytes.Buffer + var builder examplesBuilder + ok := false + if builder, ok = examplesBuilderCustomizations[a.PackageName()]; !ok { + builder = NewExamplesBuilder() + } + + if err := exampleHeader.ExecuteTemplate(&buf, "exampleHeader", &exHeader{builder, a}); err != nil { + panic(err) + } + + code := a.Examples.GoCode() + if len(code) == 0 { + return "" + } + + buf.WriteString(code) + return buf.String() +} + +// TODO: In the operation docuentation where we list errors, this needs to be done +// there as well. +func (ex *Example) HasVisitedError(errRef *ShapeRef) bool { + errName := errRef.Shape.ErrorCodeName() + _, ok := ex.VisitedErrors[errName] + ex.VisitedErrors[errName] = struct{}{} + return ok +} + +func (ex *Example) MethodName() string { + return fmt.Sprintf("%s_%s", ex.OperationName, ex.Index) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/examples_builder.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/examples_builder.go new file mode 100644 index 00000000000..ba1447ee2fa --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/examples_builder.go @@ -0,0 +1,58 @@ +// +build codegen + +package api + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +type examplesBuilder interface { + BuildShape(*ShapeRef, map[string]interface{}, bool) string + BuildList(string, string, *ShapeRef, []interface{}) string + BuildComplex(string, string, *ShapeRef, *Shape, map[string]interface{}) string + GoType(*ShapeRef, bool) string + Imports(*API) string +} + +type defaultExamplesBuilder struct { + ShapeValueBuilder +} + +// NewExamplesBuilder returns an initialized example builder for generating +// example input API shapes from a model. +func NewExamplesBuilder() defaultExamplesBuilder { + b := defaultExamplesBuilder{ + ShapeValueBuilder: NewShapeValueBuilder(), + } + b.ParseTimeString = parseExampleTimeString + return b +} + +func (builder defaultExamplesBuilder) Imports(a *API) string { + return `"fmt" + "strings" + "time" + + "` + SDKImportRoot + `/aws" + "` + SDKImportRoot + `/aws/awserr" + "` + SDKImportRoot + `/aws/session" + "` + a.ImportPath() + `" + ` +} + +// Returns a string which assigns the value of a time member by calling +// parseTime function defined in the file +func parseExampleTimeString(ref *ShapeRef, memName, v string) string { + if ref.Location == "header" { + return fmt.Sprintf("%s: parseTime(%q, %q),\n", memName, protocol.RFC822TimeFormat, v) + } + + switch ref.API.Metadata.Protocol { + case "json", "rest-json", "rest-xml", "ec2", "query": + return fmt.Sprintf("%s: parseTime(%q, %q),\n", memName, protocol.ISO8601TimeFormat, v) + default: + panic("Unsupported time type: " + ref.API.Metadata.Protocol) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/examples_builder_customizations.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/examples_builder_customizations.go new file mode 100644 index 00000000000..aac099e1aae --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/examples_builder_customizations.go @@ -0,0 +1,23 @@ +// +build codegen + +package api + +type wafregionalExamplesBuilder struct { + defaultExamplesBuilder +} + +func NewWAFregionalExamplesBuilder() wafregionalExamplesBuilder { + return wafregionalExamplesBuilder{defaultExamplesBuilder: NewExamplesBuilder()} +} +func (builder wafregionalExamplesBuilder) Imports(a *API) string { + return `"fmt" + "strings" + "time" + + "` + SDKImportRoot + `/aws" + "` + SDKImportRoot + `/aws/awserr" + "` + SDKImportRoot + `/aws/session" + "` + SDKImportRoot + `/service/waf" + "` + SDKImportRoot + `/service/` + a.PackageName() + `" + ` +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go new file mode 100644 index 00000000000..124e9008ea7 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/exportable_name.go @@ -0,0 +1,14 @@ +// +build codegen + +package api + +import "strings" + +// ExportableName a name which is exportable as a value or name in Go code +func (a *API) ExportableName(name string) string { + if name == "" { + return name + } + + return strings.ToUpper(name[0:1]) + name[1:] +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/legacy_stutter.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/legacy_stutter.go new file mode 100644 index 00000000000..f2a323373ea --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/legacy_stutter.go @@ -0,0 +1,252 @@ +package api + +type stutterNames struct { + Operations map[string]string + Shapes map[string]string + ShapeOrder []string +} + +var legacyStutterNames = map[string]stutterNames{ + "WorkSpaces": { + Shapes: map[string]string{ + "WorkspacesIpGroup": "IpGroup", + "WorkspacesIpGroupsList": "IpGroupsList", + }, + }, + "WorkMail": { + Shapes: map[string]string{ + "WorkMailIdentifier": "Identifier", + }, + }, + "WAF": { + Shapes: map[string]string{ + "WAFInvalidPermissionPolicyException": "InvalidPermissionPolicyException", + "WAFInvalidOperationException": "InvalidOperationException", + "WAFInternalErrorException": "InternalErrorException", + "WAFDisallowedNameException": "DisallowedNameException", + "WAFReferencedItemException": "ReferencedItemException", + "WAFInvalidParameterException": "InvalidParameterException", + "WAFLimitsExceededException": "LimitsExceededException", + "WAFNonexistentContainerException": "NonexistentContainerException", + "WAFInvalidAccountException": "InvalidAccountException", + "WAFSubscriptionNotFoundException": "SubscriptionNotFoundException", + "WAFBadRequestException": "BadRequestException", + "WAFNonexistentItemException": "NonexistentItemException", + "WAFServiceLinkedRoleErrorException": "ServiceLinkedRoleErrorException", + "WAFNonEmptyEntityException": "NonEmptyEntityException", + "WAFTagOperationInternalErrorException": "TagOperationInternalErrorException", + "WAFStaleDataException": "StaleDataException", + "WAFTagOperationException": "TagOperationException", + "WAFInvalidRegexPatternException": "InvalidRegexPatternException", + }, + }, + "Translate": { + Operations: map[string]string{ + "TranslateText": "Text", + }, + Shapes: map[string]string{ + "TranslateTextRequest": "TextRequest", + "TranslateTextResponse": "TextResponse", + }, + }, + "Storage Gateway": { + Shapes: map[string]string{ + "StorageGatewayError": "Error", + }, + }, + "Snowball": { + Shapes: map[string]string{ + "SnowballType": "Type", + "SnowballCapacity": "Capacity", + }, + }, + "S3": { + Shapes: map[string]string{ + "S3KeyFilter": "KeyFilter", + "S3Location": "Location", + }, + }, + "Rekognition": { + Shapes: map[string]string{ + "RekognitionUniqueId": "UniqueId", + }, + }, + "QuickSight": { + Shapes: map[string]string{ + "QuickSightUserNotFoundException": "UserNotFoundException", + }, + }, + "Marketplace Commerce Analytics": { + Shapes: map[string]string{ + "MarketplaceCommerceAnalyticsException": "Exception", + }, + }, + "KMS": { + Shapes: map[string]string{ + "KMSInternalException": "InternalException", + "KMSInvalidStateException": "InvalidStateException", + }, + }, + "Kinesis Analytics": { + Shapes: map[string]string{ + "KinesisAnalyticsARN": "ARN", + }, + }, + "IoT Events": { + ShapeOrder: []string{ + "Action", + "IotEventsAction", + }, + Shapes: map[string]string{ + "Action": "ActionData", + "IotEventsAction": "Action", + }, + }, + "Inspector": { + Shapes: map[string]string{ + "InspectorServiceAttributes": "ServiceAttributes", + "InspectorEvent": "Event", + }, + }, + "GuardDuty": { + Shapes: map[string]string{ + "GuardDutyArn": "Arn", + }, + }, + "GroundStation": { + Shapes: map[string]string{ + "GroundStationList": "List", + "GroundStationData": "Data", + }, + }, + "Glue": { + ShapeOrder: []string{ + "Table", + "GlueTable", + "GlueTables", + "GlueResourceArn", + "GlueEncryptionException", + }, + Shapes: map[string]string{ + "Table": "TableData", + "GlueTable": "Table", + "GlueTables": "Tables", + "GlueResourceArn": "ResourceArn", + "GlueEncryptionException": "EncryptionException", + }, + }, + "Glacier": { + Shapes: map[string]string{ + "GlacierJobDescription": "JobDescription", + }, + }, + "Elastic Beanstalk": { + Shapes: map[string]string{ + "ElasticBeanstalkServiceException": "ServiceException", + }, + }, + "Direct Connect": { + Shapes: map[string]string{ + "DirectConnectClientException": "ClientException", + "DirectConnectGatewayAssociationProposalId": "GatewayAssociationProposalId", + "DirectConnectGatewayAssociationProposalState": "GatewayAssociationProposalState", + "DirectConnectGatewayAttachmentType": "GatewayAttachmentType", + "DirectConnectGatewayAttachmentList": "GatewayAttachmentList", + "DirectConnectGatewayAssociationProposalList": "GatewayAssociationProposalList", + "DirectConnectGatewayAssociationId": "GatewayAssociationId", + "DirectConnectGatewayList": "GatewayList", + "DirectConnectGatewayName": "GatewayName", + "DirectConnectGatewayAttachment": "GatewayAttachment", + "DirectConnectServerException": "ServerException", + "DirectConnectGatewayState": "GatewayState", + "DirectConnectGateway": "Gateway", + "DirectConnectGatewayId": "GatewayId", + "DirectConnectGatewayAttachmentState": "GatewayAttachmentState", + "DirectConnectGatewayAssociation": "GatewayAssociation", + "DirectConnectGatewayAssociationProposal": "GatewayAssociationProposal", + "DirectConnectGatewayAssociationState": "GatewayAssociationState", + "DirectConnectGatewayAssociationList": "GatewayAssociationList", + }, + }, + "Comprehend": { + Shapes: map[string]string{ + "ComprehendArnName": "ArnName", + "ComprehendArn": "Arn", + }, + }, + "Cognito Identity": { + Shapes: map[string]string{ + "CognitoIdentityProviderList": "ProviderList", + "CognitoIdentityProviderName": "ProviderName", + "CognitoIdentityProviderClientId": "ProviderClientId", + "CognitoIdentityProviderTokenCheck": "ProviderTokenCheck", + "CognitoIdentityProvider": "Provider", + }, + }, + "CloudTrail": { + Shapes: map[string]string{ + "CloudTrailAccessNotEnabledException": "AccessNotEnabledException", + "CloudTrailARNInvalidException": "ARNInvalidException", + }, + }, + "CloudFront": { + Shapes: map[string]string{ + "CloudFrontOriginAccessIdentitySummaryList": "OriginAccessIdentitySummaryList", + "CloudFrontOriginAccessIdentity": "OriginAccessIdentity", + "CloudFrontOriginAccessIdentityAlreadyExists": "OriginAccessIdentityAlreadyExists", + "CloudFrontOriginAccessIdentityConfig": "OriginAccessIdentityConfig", + "CloudFrontOriginAccessIdentitySummary": "OriginAccessIdentitySummary", + "CloudFrontOriginAccessIdentityList": "OriginAccessIdentityList", + "CloudFrontOriginAccessIdentityInUse": "OriginAccessIdentityInUse", + }, + }, + "Backup": { + Shapes: map[string]string{ + "BackupPlan": "Plan", + "BackupRule": "Rule", + "BackupSelectionName": "SelectionName", + "BackupSelectionsList": "SelectionsList", + "BackupVaultEvents": "VaultEvents", + "BackupRuleName": "RuleName", + "BackupVaultName": "VaultName", + "BackupJob": "Job", + "BackupJobState": "JobState", + "BackupJobsList": "JobsList", + "BackupVaultEvent": "VaultEvent", + "BackupPlanVersionsList": "PlanVersionsList", + "BackupPlansListMember": "PlansListMember", + "BackupSelection": "Selection", + "BackupVaultList": "VaultList", + "BackupVaultListMember": "VaultListMember", + "BackupPlanInput": "PlanInput", + "BackupRules": "Rules", + "BackupPlansList": "PlansList", + "BackupPlanTemplatesList": "PlanTemplatesList", + "BackupRuleInput": "RuleInput", + "BackupPlanTemplatesListMember": "PlanTemplatesListMember", + "BackupRulesInput": "RulesInput", + "BackupSelectionsListMember": "SelectionsListMember", + "BackupPlanName": "PlanName", + }, + }, + "Auto Scaling": { + Shapes: map[string]string{ + "AutoScalingGroupDesiredCapacity": "GroupDesiredCapacity", + "AutoScalingGroupNames": "GroupNames", + "AutoScalingGroupsType": "GroupsType", + "AutoScalingNotificationTypes": "NotificationTypes", + "AutoScalingGroupNamesType": "GroupNamesType", + "AutoScalingInstancesType": "InstancesType", + "AutoScalingInstanceDetails": "InstanceDetails", + "AutoScalingGroupMaxSize": "GroupMaxSize", + "AutoScalingGroups": "Groups", + "AutoScalingGroupMinSize": "GroupMinSize", + "AutoScalingGroup": "Group", + }, + }, + "AppStream": { + Shapes: map[string]string{ + "AppstreamAgentVersion": "AgentVersion", + }, + }, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/list_of_shame.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/list_of_shame.go new file mode 100644 index 00000000000..cfb07dd860c --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/list_of_shame.go @@ -0,0 +1,583 @@ +package api + +type persistAPIType struct { + output bool + input bool +} + +type persistAPITypes map[string]map[string]persistAPIType + +func (ts persistAPITypes) Lookup(serviceName, opName string) persistAPIType { + service, ok := shamelist[serviceName] + if !ok { + return persistAPIType{} + } + + return service[opName] +} + +func (ts persistAPITypes) Input(serviceName, opName string) bool { + return ts.Lookup(serviceName, opName).input +} + +func (ts persistAPITypes) Output(serviceName, opName string) bool { + return ts.Lookup(serviceName, opName).output +} + +// shamelist is used to not rename certain operation's input and output shapes. +// We need to maintain backwards compatibility with pre-existing services. Since +// not generating unique input/output shapes is not desired, we will generate +// unique input/output shapes for new operations. +var shamelist = persistAPITypes{ + "APIGateway": { + "CreateApiKey": { + output: true, + }, + "CreateAuthorizer": { + output: true, + }, + "CreateBasePathMapping": { + output: true, + }, + "CreateDeployment": { + output: true, + }, + "CreateDocumentationPart": { + output: true, + }, + "CreateDocumentationVersion": { + output: true, + }, + "CreateDomainName": { + output: true, + }, + "CreateModel": { + output: true, + }, + "CreateResource": { + output: true, + }, + "CreateRestApi": { + output: true, + }, + "CreateStage": { + output: true, + }, + "CreateUsagePlan": { + output: true, + }, + "CreateUsagePlanKey": { + output: true, + }, + "GenerateClientCertificate": { + output: true, + }, + "GetAccount": { + output: true, + }, + "GetApiKey": { + output: true, + }, + "GetAuthorizer": { + output: true, + }, + "GetBasePathMapping": { + output: true, + }, + "GetClientCertificate": { + output: true, + }, + "GetDeployment": { + output: true, + }, + "GetDocumentationPart": { + output: true, + }, + "GetDocumentationVersion": { + output: true, + }, + "GetDomainName": { + output: true, + }, + "GetIntegration": { + output: true, + }, + "GetIntegrationResponse": { + output: true, + }, + "GetMethod": { + output: true, + }, + "GetMethodResponse": { + output: true, + }, + "GetModel": { + output: true, + }, + "GetResource": { + output: true, + }, + "GetRestApi": { + output: true, + }, + "GetSdkType": { + output: true, + }, + "GetStage": { + output: true, + }, + "GetUsage": { + output: true, + }, + "GetUsagePlan": { + output: true, + }, + "GetUsagePlanKey": { + output: true, + }, + "ImportRestApi": { + output: true, + }, + "PutIntegration": { + output: true, + }, + "PutIntegrationResponse": { + output: true, + }, + "PutMethod": { + output: true, + }, + "PutMethodResponse": { + output: true, + }, + "PutRestApi": { + output: true, + }, + "UpdateAccount": { + output: true, + }, + "UpdateApiKey": { + output: true, + }, + "UpdateAuthorizer": { + output: true, + }, + "UpdateBasePathMapping": { + output: true, + }, + "UpdateClientCertificate": { + output: true, + }, + "UpdateDeployment": { + output: true, + }, + "UpdateDocumentationPart": { + output: true, + }, + "UpdateDocumentationVersion": { + output: true, + }, + "UpdateDomainName": { + output: true, + }, + "UpdateIntegration": { + output: true, + }, + "UpdateIntegrationResponse": { + output: true, + }, + "UpdateMethod": { + output: true, + }, + "UpdateMethodResponse": { + output: true, + }, + "UpdateModel": { + output: true, + }, + "UpdateResource": { + output: true, + }, + "UpdateRestApi": { + output: true, + }, + "UpdateStage": { + output: true, + }, + "UpdateUsage": { + output: true, + }, + "UpdateUsagePlan": { + output: true, + }, + }, + "AutoScaling": { + "ResumeProcesses": { + input: true, + }, + "SuspendProcesses": { + input: true, + }, + }, + "CognitoIdentity": { + "CreateIdentityPool": { + output: true, + }, + "DescribeIdentity": { + output: true, + }, + "DescribeIdentityPool": { + output: true, + }, + "UpdateIdentityPool": { + input: true, + output: true, + }, + }, + "DirectConnect": { + "AllocateConnectionOnInterconnect": { + output: true, + }, + "AllocateHostedConnection": { + output: true, + }, + "AllocatePrivateVirtualInterface": { + output: true, + }, + "AllocatePublicVirtualInterface": { + output: true, + }, + "AssociateConnectionWithLag": { + output: true, + }, + "AssociateHostedConnection": { + output: true, + }, + "AssociateVirtualInterface": { + output: true, + }, + "CreateConnection": { + output: true, + }, + "CreateInterconnect": { + output: true, + }, + "CreateLag": { + output: true, + }, + "CreatePrivateVirtualInterface": { + output: true, + }, + "CreatePublicVirtualInterface": { + output: true, + }, + "DeleteConnection": { + output: true, + }, + "DeleteLag": { + output: true, + }, + "DescribeConnections": { + output: true, + }, + "DescribeConnectionsOnInterconnect": { + output: true, + }, + "DescribeHostedConnections": { + output: true, + }, + "DescribeLoa": { + output: true, + }, + "DisassociateConnectionFromLag": { + output: true, + }, + "UpdateLag": { + output: true, + }, + }, + "EC2": { + "AttachVolume": { + output: true, + }, + "CreateSnapshot": { + output: true, + }, + "CreateVolume": { + output: true, + }, + "DetachVolume": { + output: true, + }, + "RunInstances": { + output: true, + }, + }, + "EFS": { + "CreateFileSystem": { + output: true, + }, + "CreateMountTarget": { + output: true, + }, + }, + "ElastiCache": { + "AddTagsToResource": { + output: true, + }, + "ListTagsForResource": { + output: true, + }, + "ModifyCacheParameterGroup": { + output: true, + }, + "RemoveTagsFromResource": { + output: true, + }, + "ResetCacheParameterGroup": { + output: true, + }, + }, + "ElasticBeanstalk": { + "ComposeEnvironments": { + output: true, + }, + "CreateApplication": { + output: true, + }, + "CreateApplicationVersion": { + output: true, + }, + "CreateConfigurationTemplate": { + output: true, + }, + "CreateEnvironment": { + output: true, + }, + "DescribeEnvironments": { + output: true, + }, + "TerminateEnvironment": { + output: true, + }, + "UpdateApplication": { + output: true, + }, + "UpdateApplicationVersion": { + output: true, + }, + "UpdateConfigurationTemplate": { + output: true, + }, + "UpdateEnvironment": { + output: true, + }, + }, + "ElasticTranscoder": { + "CreateJob": { + output: true, + }, + }, + "Glacier": { + "DescribeJob": { + output: true, + }, + "UploadArchive": { + output: true, + }, + "CompleteMultipartUpload": { + output: true, + }, + }, + "IAM": { + "GetContextKeysForCustomPolicy": { + output: true, + }, + "GetContextKeysForPrincipalPolicy": { + output: true, + }, + "SimulateCustomPolicy": { + output: true, + }, + "SimulatePrincipalPolicy": { + output: true, + }, + }, + "Kinesis": { + "DisableEnhancedMonitoring": { + output: true, + }, + "EnableEnhancedMonitoring": { + output: true, + }, + }, + "KMS": { + "ListGrants": { + output: true, + }, + "ListRetirableGrants": { + output: true, + }, + }, + "Lambda": { + "CreateAlias": { + output: true, + }, + "CreateEventSourceMapping": { + output: true, + }, + "CreateFunction": { + output: true, + }, + "DeleteEventSourceMapping": { + output: true, + }, + "GetAlias": { + output: true, + }, + "GetEventSourceMapping": { + output: true, + }, + "GetFunctionConfiguration": { + output: true, + }, + "PublishVersion": { + output: true, + }, + "UpdateAlias": { + output: true, + }, + "UpdateEventSourceMapping": { + output: true, + }, + "UpdateFunctionCode": { + output: true, + }, + "UpdateFunctionConfiguration": { + output: true, + }, + }, + "MQ": { + "CreateBroker": { + input: true, + output: true, + }, + "CreateConfiguration": { + input: true, + output: true, + }, + "CreateUser": { + input: true, + }, + "DeleteBroker": { + output: true, + }, + "DescribeBroker": { + output: true, + }, + "DescribeUser": { + output: true, + }, + "DescribeConfigurationRevision": { + output: true, + }, + "ListBrokers": { + output: true, + }, + "ListConfigurations": { + output: true, + }, + "ListConfigurationRevisions": { + output: true, + }, + "ListUsers": { + output: true, + }, + "UpdateBroker": { + input: true, + output: true, + }, + "UpdateConfiguration": { + input: true, + output: true, + }, + "UpdateUser": { + input: true, + }, + }, + "RDS": { + "ModifyDBClusterParameterGroup": { + output: true, + }, + "ModifyDBParameterGroup": { + output: true, + }, + "ResetDBClusterParameterGroup": { + output: true, + }, + "ResetDBParameterGroup": { + output: true, + }, + }, + "Redshift": { + "DescribeLoggingStatus": { + output: true, + }, + "DisableLogging": { + output: true, + }, + "EnableLogging": { + output: true, + }, + "ModifyClusterParameterGroup": { + output: true, + }, + "ResetClusterParameterGroup": { + output: true, + }, + }, + "S3": { + "GetBucketNotification": { + input: true, + output: true, + }, + "GetBucketNotificationConfiguration": { + input: true, + output: true, + }, + }, + "ServerlessApplicationRepository": { + "CreateApplication": { + input: true, + }, + "CreateApplicationVersion": { + input: true, + }, + "CreateCloudFormationChangeSet": { + input: true, + }, + "UpdateApplication": { + input: true, + }, + }, + "SWF": { + "CountClosedWorkflowExecutions": { + output: true, + }, + "CountOpenWorkflowExecutions": { + output: true, + }, + "CountPendingActivityTasks": { + output: true, + }, + "CountPendingDecisionTasks": { + output: true, + }, + "ListClosedWorkflowExecutions": { + output: true, + }, + "ListOpenWorkflowExecutions": { + output: true, + }, + }, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/load.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/load.go new file mode 100644 index 00000000000..7e1d10d7140 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/load.go @@ -0,0 +1,261 @@ +// +build codegen + +package api + +import ( + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strings" +) + +// APIs provides a set of API models loaded by API package name. +type APIs map[string]*API + +// Loader provides the loading of APIs from files. +type Loader struct { + // The base Go import path the loaded models will be appended to. + BaseImport string + + // Allows ignoring API models that are unsupported by the SDK without + // failing the load of other supported APIs. + IgnoreUnsupportedAPIs bool +} + +// Load loads the API model files from disk returning the map of API package. +// Returns error if multiple API model resolve to the same package name. +func (l Loader) Load(modelPaths []string) (APIs, error) { + apis := APIs{} + for _, modelPath := range modelPaths { + a, err := loadAPI(modelPath, l.BaseImport, func(a *API) { + a.IgnoreUnsupportedAPIs = l.IgnoreUnsupportedAPIs + }) + if err != nil { + return nil, fmt.Errorf("failed to load API, %v, %v", modelPath, err) + } + + if len(a.Operations) == 0 { + if l.IgnoreUnsupportedAPIs { + fmt.Fprintf(os.Stderr, "API has no operations, ignoring model %s, %v\n", + modelPath, a.ImportPath()) + continue + } + } + + importPath := a.ImportPath() + if _, ok := apis[importPath]; ok { + return nil, fmt.Errorf( + "package names must be unique attempted to load %v twice. Second model file: %v", + importPath, modelPath) + } + apis[importPath] = a + } + + return apis, nil +} + +// attempts to load a model from disk into the import specified. Additional API +// options are invoked before to the API's Setup being called. +func loadAPI(modelPath, baseImport string, opts ...func(*API)) (*API, error) { + a := &API{ + BaseImportPath: baseImport, + BaseCrosslinkURL: "https://docs.aws.amazon.com", + } + + modelFile := filepath.Base(modelPath) + modelDir := filepath.Dir(modelPath) + err := attachModelFiles(modelDir, + modelLoader{modelFile, a.Attach, true}, + modelLoader{"docs-2.json", a.AttachDocs, false}, + modelLoader{"paginators-1.json", a.AttachPaginators, false}, + modelLoader{"waiters-2.json", a.AttachWaiters, false}, + modelLoader{"examples-1.json", a.AttachExamples, false}, + modelLoader{"smoke.json", a.AttachSmokeTests, false}, + ) + if err != nil { + return nil, err + } + + for _, opt := range opts { + opt(a) + } + + if err = a.Setup(); err != nil { + return nil, err + } + + return a, nil +} + +type modelLoader struct { + Filename string + Loader func(string) error + Required bool +} + +func attachModelFiles(modelPath string, modelFiles ...modelLoader) error { + for _, m := range modelFiles { + filepath := filepath.Join(modelPath, m.Filename) + _, err := os.Stat(filepath) + if os.IsNotExist(err) && !m.Required { + continue + } else if err != nil { + return fmt.Errorf("failed to load model file %v, %v", m.Filename, err) + } + + if err = m.Loader(filepath); err != nil { + return fmt.Errorf("model load failed, %s, %v", modelPath, err) + } + } + + return nil +} + +// ExpandModelGlobPath returns a slice of model paths expanded from the glob +// pattern passed in. Returns the path of the model file to be loaded. Includes +// all versions of a service model. +// +// e.g: +// models/apis/*/*/api-2.json +// +// Or with specific model file: +// models/apis/service/version/api-2.json +func ExpandModelGlobPath(globs ...string) ([]string, error) { + modelPaths := []string{} + + for _, g := range globs { + filepaths, err := filepath.Glob(g) + if err != nil { + return nil, err + } + for _, p := range filepaths { + modelPaths = append(modelPaths, p) + } + } + + return modelPaths, nil +} + +// TrimModelServiceVersions sorts the model paths by service version then +// returns recent model versions, and model version excluded. +// +// Uses the third from last path element to determine unique service. Only one +// service version will be included. +// +// models/apis/service/version/api-2.json +func TrimModelServiceVersions(modelPaths []string) (include, exclude []string) { + sort.Strings(modelPaths) + + // Remove old API versions from list + m := map[string]struct{}{} + for i := len(modelPaths) - 1; i >= 0; i-- { + // service name is 2nd-to-last component + parts := strings.Split(modelPaths[i], string(filepath.Separator)) + svc := parts[len(parts)-3] + + if _, ok := m[svc]; ok { + // Removed unused service version + exclude = append(exclude, modelPaths[i]) + continue + } + include = append(include, modelPaths[i]) + m[svc] = struct{}{} + } + + return include, exclude +} + +// Attach opens a file by name, and unmarshal its JSON data. +// Will proceed to setup the API if not already done so. +func (a *API) Attach(filename string) error { + a.path = filepath.Dir(filename) + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(a); err != nil { + return fmt.Errorf("failed to decode %s, err: %v", filename, err) + } + + return nil +} + +// AttachString will unmarshal a raw JSON string, and setup the +// API if not already done so. +func (a *API) AttachString(str string) error { + json.Unmarshal([]byte(str), a) + + if a.initialized { + return nil + } + + return a.Setup() +} + +// Setup initializes the API. +func (a *API) Setup() error { + a.setServiceAliaseName() + a.setMetadataEndpointsKey() + a.writeShapeNames() + a.resolveReferences() + a.backfillErrorMembers() + + if !a.NoRemoveUnusedShapes { + a.removeUnusedShapes() + } + + a.fixStutterNames() + if err := a.validateShapeNames(); err != nil { + log.Fatalf(err.Error()) + } + a.renameExportable() + a.applyShapeNameAliases() + a.createInputOutputShapes() + a.writeInputOutputLocationName() + a.renameAPIPayloadShapes() + a.renameCollidingFields() + a.updateTopLevelShapeReferences() + if err := a.setupEventStreams(); err != nil { + return err + } + + a.findEndpointDiscoveryOp() + a.injectUnboundedOutputStreaming() + + // Enables generated types for APIs using REST-JSON and JSONRPC protocols. + // Other protocols will be added as supported. + a.enableGeneratedTypedErrors() + + if err := a.customizationPasses(); err != nil { + return err + } + + a.addHeaderMapDocumentation() + + if !a.NoRemoveUnusedShapes { + a.removeUnusedShapes() + } + + if !a.NoValidataShapeMethods { + a.addShapeValidations() + } + + a.initialized = true + + return nil +} + +// UnsupportedAPIModelError provides wrapping of an error causing the API to +// fail to load because the SDK does not support the API service defined. +type UnsupportedAPIModelError struct { + Err error +} + +func (e UnsupportedAPIModelError) Error() string { + return fmt.Sprintf("service API is not supported, %v", e.Err) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/logger.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/logger.go new file mode 100644 index 00000000000..7a618513973 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/logger.go @@ -0,0 +1,54 @@ +// +build codegen + +package api + +import ( + "fmt" + "io" + "sync" +) + +var debugLogger *logger +var initDebugLoggerOnce sync.Once + +// logger provides a basic logging +type logger struct { + w io.Writer +} + +// LogDebug initialize's the debug logger for the components in the api +// package to log debug lines to. +// +// Panics if called multiple times. +// +// Must be used prior to any model loading or code gen. +func LogDebug(w io.Writer) { + var initialized bool + initDebugLoggerOnce.Do(func() { + debugLogger = &logger{ + w: w, + } + initialized = true + }) + + if !initialized && debugLogger != nil { + panic("LogDebug called multiple times. Can only be called once") + } +} + +// Logf logs using the fmt printf pattern. Appends a new line to the end of the +// logged statement. +func (l *logger) Logf(format string, args ...interface{}) { + if l == nil { + return + } + fmt.Fprintf(l.w, format+"\n", args...) +} + +// Logln logs using the fmt println pattern. +func (l *logger) Logln(args ...interface{}) { + if l == nil { + return + } + fmt.Fprintln(l.w, args...) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/operation.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/operation.go new file mode 100644 index 00000000000..0df3f52b932 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/operation.go @@ -0,0 +1,828 @@ +// +build codegen + +package api + +import ( + "bytes" + "fmt" + "regexp" + "sort" + "strings" + "text/template" +) + +// An Operation defines a specific API Operation. +type Operation struct { + API *API `json:"-"` + ExportedName string + Name string + Documentation string + HTTP HTTPInfo + Host string `json:"host"` + InputRef ShapeRef `json:"input"` + OutputRef ShapeRef `json:"output"` + ErrorRefs []ShapeRef `json:"errors"` + Paginator *Paginator + Deprecated bool `json:"deprecated"` + DeprecatedMsg string `json:"deprecatedMessage"` + AuthType AuthType `json:"authtype"` + imports map[string]bool + CustomBuildHandlers []string + + EventStreamAPI *EventStreamAPI + + IsEndpointDiscoveryOp bool `json:"endpointoperation"` + EndpointDiscovery *EndpointDiscovery `json:"endpointdiscovery"` + Endpoint *EndpointTrait `json:"endpoint"` + IsHttpChecksumRequired bool `json:"httpChecksumRequired"` +} + +// EndpointTrait provides the structure of the modeled endpoint trait, and its +// properties. +type EndpointTrait struct { + // Specifies the hostPrefix template to prepend to the operation's request + // endpoint host. + HostPrefix string `json:"hostPrefix"` +} + +// EndpointDiscovery represents a map of key values pairs that represents +// metadata about how a given API will make a call to the discovery endpoint. +type EndpointDiscovery struct { + // Required indicates that for a given operation that endpoint is required. + // Any required endpoint discovery operation cannot have endpoint discovery + // turned off. + Required bool `json:"required"` +} + +// OperationForMethod returns the API operation name that corresponds to the +// client method name provided. +func (a *API) OperationForMethod(name string) *Operation { + for _, op := range a.Operations { + for _, m := range op.Methods() { + if m == name { + return op + } + } + } + + return nil +} + +// A HTTPInfo defines the method of HTTP request for the Operation. +type HTTPInfo struct { + Method string + RequestURI string + ResponseCode uint +} + +// Methods Returns a list of method names that will be generated. +func (o *Operation) Methods() []string { + methods := []string{ + o.ExportedName, + o.ExportedName + "Request", + o.ExportedName + "WithContext", + } + + if o.Paginator != nil { + methods = append(methods, []string{ + o.ExportedName + "Pages", + o.ExportedName + "PagesWithContext", + }...) + } + + return methods +} + +// HasInput returns if the Operation accepts an input parameter +func (o *Operation) HasInput() bool { + return o.InputRef.ShapeName != "" +} + +// HasOutput returns if the Operation accepts an output parameter +func (o *Operation) HasOutput() bool { + return o.OutputRef.ShapeName != "" +} + +// AuthType provides the enumeration of AuthType trait. +type AuthType string + +// Enumeration values for AuthType trait +const ( + NoneAuthType AuthType = "none" + V4UnsignedBodyAuthType AuthType = "v4-unsigned-body" +) + +// ShouldSignRequestBody returns if the operation request body should be signed +// or not. +func (o *Operation) ShouldSignRequestBody() bool { + switch o.AuthType { + case NoneAuthType, V4UnsignedBodyAuthType: + return false + default: + return true + } +} + +// GetSigner returns the signer that should be used for a API request. +func (o *Operation) GetSigner() string { + buf := bytes.NewBuffer(nil) + + switch o.AuthType { + case NoneAuthType: + o.API.AddSDKImport("aws/credentials") + + buf.WriteString("req.Config.Credentials = credentials.AnonymousCredentials") + case V4UnsignedBodyAuthType: + o.API.AddSDKImport("aws/signer/v4") + + buf.WriteString("req.Handlers.Sign.Remove(v4.SignRequestHandler)\n") + buf.WriteString("handler := v4.BuildNamedHandler(\"v4.CustomSignerHandler\", v4.WithUnsignedPayload)\n") + buf.WriteString("req.Handlers.Sign.PushFrontNamed(handler)") + } + + return buf.String() +} + +// HasAccountIDMemberWithARN returns true if an account id member exists for an input shape that may take in an ARN. +func (o *Operation) HasAccountIDMemberWithARN() bool { + return o.InputRef.Shape.HasAccountIdMemberWithARN +} + +// operationTmpl defines a template for rendering an API Operation +var operationTmpl = template.Must(template.New("operation").Funcs(template.FuncMap{ + "EnableStopOnSameToken": enableStopOnSameToken, + "GetDeprecatedMsg": getDeprecatedMessage, +}).Parse(` +const op{{ .ExportedName }} = "{{ .Name }}" + +// {{ .ExportedName }}Request generates a "aws/request.Request" representing the +// client's request for the {{ .ExportedName }} operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See {{ .ExportedName }} for more information on using the {{ .ExportedName }} +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the {{ .ExportedName }}Request method. +// req, resp := client.{{ .ExportedName }}Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +{{ $crosslinkURL := $.API.GetCrosslinkURL $.ExportedName -}} +{{ if ne $crosslinkURL "" -}} +// +// See also, {{ $crosslinkURL }} +{{ end -}} +{{- if .Deprecated }}// +// Deprecated: {{ GetDeprecatedMsg .DeprecatedMsg .ExportedName }} +{{ end -}} +func (c *{{ .API.StructName }}) {{ .ExportedName }}Request(` + + `input {{ .InputRef.GoType }}) (req *request.Request, output {{ .OutputRef.GoType }}) { + {{ if (or .Deprecated (or .InputRef.Deprecated .OutputRef.Deprecated)) }}if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, {{ .ExportedName }}, has been deprecated") + } + op := &request.Operation{ {{ else }} op := &request.Operation{ {{ end }} + Name: op{{ .ExportedName }}, + {{ if ne .HTTP.Method "" }}HTTPMethod: "{{ .HTTP.Method }}", + {{ end }}HTTPPath: {{ if ne .HTTP.RequestURI "" }}"{{ .HTTP.RequestURI }}"{{ else }}"/"{{ end }}, + {{ if .Paginator }}Paginator: &request.Paginator{ + InputTokens: {{ .Paginator.InputTokensString }}, + OutputTokens: {{ .Paginator.OutputTokensString }}, + LimitToken: "{{ .Paginator.LimitKey }}", + TruncationToken: "{{ .Paginator.MoreResults }}", + }, + {{ end }} + } + + if input == nil { + input = &{{ .InputRef.GoTypeElem }}{} + } + + output = &{{ .OutputRef.GoTypeElem }}{} + req = c.newRequest(op, input, output) + {{- if ne .AuthType "" }} + {{ .GetSigner }} + {{- end }} + + {{- if .HasAccountIDMemberWithARN }} + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + {{- end }} + + {{- if .ShouldDiscardResponse -}} + {{- $_ := .API.AddSDKImport "private/protocol" }} + {{- $_ := .API.AddSDKImport "private/protocol" .API.ProtocolPackage }} + req.Handlers.Unmarshal.Swap({{ .API.ProtocolPackage }}.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + {{- else }} + {{- if $.EventStreamAPI }} + {{- $esapi := $.EventStreamAPI }} + + {{- if $esapi.RequireHTTP2 }} + req.Handlers.UnmarshalMeta.PushBack( + protocol.RequireHTTPMinProtocol{Major:2}.Handler, + ) + {{- end }} + + es := New{{ $esapi.Name }}() + {{- if $esapi.Legacy }} + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + {{- end }} + output.{{ $esapi.OutputMemberName }} = es + + {{- $inputStream := $esapi.InputStream }} + {{- $outputStream := $esapi.OutputStream }} + + {{- $_ := .API.AddSDKImport "private/protocol" .API.ProtocolPackage }} + {{- $_ := .API.AddSDKImport "private/protocol/rest" }} + + {{- if $inputStream }} + + req.Handlers.Sign.PushFront(es.setupInputPipe) + req.Handlers.Build.PushBack(request.WithSetRequestHeaders(map[string]string{ + "Content-Type": "application/vnd.amazon.eventstream", + "X-Amz-Content-Sha256": "STREAMING-AWS4-HMAC-SHA256-EVENTS", + })) + req.Handlers.Build.Swap({{ .API.ProtocolPackage }}.BuildHandler.Name, rest.BuildHandler) + req.Handlers.Send.Swap(client.LogHTTPRequestHandler.Name, client.LogHTTPRequestHeaderHandler) + req.Handlers.Unmarshal.PushBack(es.runInputStream) + + {{- if eq .API.Metadata.Protocol "json" }} + es.input = input + req.Handlers.Unmarshal.PushBack(es.sendInitialEvent) + {{- end }} + {{- end }} + + {{- if $outputStream }} + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap({{ .API.ProtocolPackage }}.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + + {{- if eq .API.Metadata.Protocol "json" }} + es.output = output + req.Handlers.Unmarshal.PushBack(es.recvInitialEvent) + {{- end }} + {{- end }} + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + + {{- end }} + {{- end }} + + {{- if .EndpointDiscovery }} + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + {{- if not .EndpointDiscovery.Required }} + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + {{- end }} + de := discoverer{{ .API.EndpointDiscoveryOp.Name }}{ + Required: {{ .EndpointDiscovery.Required }}, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + {{- range $key, $ref := .InputRef.Shape.MemberRefs -}} + {{- if $ref.EndpointDiscoveryID -}} + {{- if ne (len $ref.LocationName) 0 -}} + "{{ $ref.LocationName }}": input.{{ $key }}, + {{- else }} + "{{ $key }}": input.{{ $key }}, + {{- end }} + {{- end }} + {{- end }} + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + {{- if not .EndpointDiscovery.Required }} + } + {{- end }} + } + {{- end }} + + {{- range $_, $handler := $.CustomBuildHandlers }} + req.Handlers.Build.PushBackNamed({{ $handler }}) + {{- end }} + + {{- if .IsHttpChecksumRequired }} + {{- $_ := .API.AddSDKImport "private/checksum" }} + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + {{- end }} + return +} + +// {{ .ExportedName }} API operation for {{ .API.Metadata.ServiceFullName }}. +{{- if .Documentation }} +// +{{ .Documentation }} +{{- end }} +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for {{ .API.Metadata.ServiceFullName }}'s +// API operation {{ .ExportedName }} for usage and error information. +{{- if .ErrorRefs }} +// +// Returned Error {{ if $.API.WithGeneratedTypedErrors }}Types{{ else }}Codes{{ end }}: +{{- range $_, $err := .ErrorRefs -}} +{{- if $.API.WithGeneratedTypedErrors }} +// * {{ $err.ShapeName }} +{{- else }} +// * {{ $err.Shape.ErrorCodeName }} "{{ $err.Shape.ErrorName}}" +{{- end }} +{{- if $err.Docstring }} +{{ $err.IndentedDocstring }} +{{- end }} +// +{{- end }} +{{- end }} +{{ $crosslinkURL := $.API.GetCrosslinkURL $.ExportedName -}} +{{ if ne $crosslinkURL "" -}} +// See also, {{ $crosslinkURL }} +{{ end -}} +{{- if .Deprecated }}// +// Deprecated: {{ GetDeprecatedMsg .DeprecatedMsg .ExportedName }} +{{ end -}} +func (c *{{ .API.StructName }}) {{ .ExportedName }}(` + + `input {{ .InputRef.GoType }}) ({{ .OutputRef.GoType }}, error) { + req, out := c.{{ .ExportedName }}Request(input) + return out, req.Send() +} + +// {{ .ExportedName }}WithContext is the same as {{ .ExportedName }} with the addition of +// the ability to pass a context and additional request options. +// +// See {{ .ExportedName }} for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +{{ if .Deprecated }}// +// Deprecated: {{ GetDeprecatedMsg .DeprecatedMsg (printf "%s%s" .ExportedName "WithContext") }} +{{ end -}} +func (c *{{ .API.StructName }}) {{ .ExportedName }}WithContext(` + + `ctx aws.Context, input {{ .InputRef.GoType }}, opts ...request.Option) ` + + `({{ .OutputRef.GoType }}, error) { + req, out := c.{{ .ExportedName }}Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +{{ if .Paginator }} +// {{ .ExportedName }}Pages iterates over the pages of a {{ .ExportedName }} operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See {{ .ExportedName }} method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a {{ .ExportedName }} operation. +// pageNum := 0 +// err := client.{{ .ExportedName }}Pages(params, +// func(page {{ .OutputRef.Shape.GoTypeWithPkgName }}, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +{{ if .Deprecated }}// +// Deprecated: {{ GetDeprecatedMsg .DeprecatedMsg (printf "%s%s" .ExportedName "Pages") }} +{{ end -}} +func (c *{{ .API.StructName }}) {{ .ExportedName }}Pages(` + + `input {{ .InputRef.GoType }}, fn func({{ .OutputRef.GoType }}, bool) bool) error { + return c.{{ .ExportedName }}PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// {{ .ExportedName }}PagesWithContext same as {{ .ExportedName }}Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +{{ if .Deprecated }}// +// Deprecated: {{ GetDeprecatedMsg .DeprecatedMsg (printf "%s%s" .ExportedName "PagesWithContext") }} +{{ end -}} +func (c *{{ .API.StructName }}) {{ .ExportedName }}PagesWithContext(` + + `ctx aws.Context, ` + + `input {{ .InputRef.GoType }}, ` + + `fn func({{ .OutputRef.GoType }}, bool) bool, ` + + `opts ...request.Option) error { + p := request.Pagination { + {{ if EnableStopOnSameToken .API.PackageName -}}EndPageOnSameToken: true, + {{ end -}} + NewRequest: func() (*request.Request, error) { + var inCpy {{ .InputRef.GoType }} + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.{{ .ExportedName }}Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().({{ .OutputRef.GoType }}), !p.HasNextPage()) { + break + } + } + + return p.Err() +} +{{ end }} + +{{- if .IsEndpointDiscoveryOp }} +type discoverer{{ .ExportedName }} struct { + Client *{{ .API.StructName }} + Required bool + EndpointCache *crr.EndpointCache + Params map[string]*string + Key string + req *request.Request +} + +func (d *discoverer{{ .ExportedName }}) Discover() (crr.Endpoint, error) { + input := &{{ .API.EndpointDiscoveryOp.InputRef.ShapeName }}{ + {{ if .API.EndpointDiscoveryOp.InputRef.Shape.HasMember "Operation" -}} + Operation: d.Params["op"], + {{ end -}} + {{ if .API.EndpointDiscoveryOp.InputRef.Shape.HasMember "Identifiers" -}} + Identifiers: d.Params, + {{ end -}} + } + + resp, err := d.Client.{{ .API.EndpointDiscoveryOp.Name }}(input) + if err != nil { + return crr.Endpoint{}, err + } + + endpoint := crr.Endpoint{ + Key: d.Key, + } + + for _, e := range resp.Endpoints { + if e.Address == nil { + continue + } + + address := *e.Address + + var scheme string + if idx := strings.Index(address, "://"); idx != -1 { + scheme = address[:idx] + } + + if len(scheme) == 0 { + address = fmt.Sprintf("%s://%s", d.req.HTTPRequest.URL.Scheme, address) + } + + cachedInMinutes := aws.Int64Value(e.CachePeriodInMinutes) + u, err := url.Parse(address) + if err != nil { + continue + } + + addr := crr.WeightedAddress{ + URL: u, + Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute), + } + + endpoint.Add(addr) + } + + d.EndpointCache.Add(endpoint) + + return endpoint, nil +} + +func (d *discoverer{{ .ExportedName }}) Handler(r *request.Request) { + endpointKey := crr.BuildEndpointKey(d.Params) + d.Key = endpointKey + d.req = r + + endpoint, err := d.EndpointCache.Get(d, endpointKey, d.Required) + if err != nil { + r.Error = err + return + } + + if endpoint.URL != nil && len(endpoint.URL.String()) > 0 { + r.HTTPRequest.URL = endpoint.URL + } +} +{{- end }} +`)) + +// GoCode returns a string of rendered GoCode for this Operation +func (o *Operation) GoCode() string { + var buf bytes.Buffer + + if o.API.EndpointDiscoveryOp != nil { + o.API.AddSDKImport("aws/crr") + o.API.AddImport("time") + o.API.AddImport("net/url") + o.API.AddImport("fmt") + o.API.AddImport("strings") + } + + if o.Endpoint != nil && len(o.Endpoint.HostPrefix) != 0 { + setupEndpointHostPrefix(o) + } + + if err := operationTmpl.Execute(&buf, o); err != nil { + panic(fmt.Sprintf("failed to render operation, %v, %v", o.ExportedName, err)) + } + + if o.EventStreamAPI != nil { + o.API.AddSDKImport("aws/client") + o.API.AddSDKImport("private/protocol") + o.API.AddSDKImport("private/protocol/rest") + o.API.AddSDKImport("private/protocol", o.API.ProtocolPackage()) + if err := renderEventStreamAPI(&buf, o); err != nil { + panic(fmt.Sprintf("failed to render EventStreamAPI for %v, %v", o.ExportedName, err)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// tplInfSig defines the template for rendering an Operation's signature within an Interface definition. +var tplInfSig = template.Must(template.New("opsig").Parse(` +{{ .ExportedName }}({{ .InputRef.GoTypeWithPkgName }}) ({{ .OutputRef.GoTypeWithPkgName }}, error) +{{ .ExportedName }}WithContext(aws.Context, {{ .InputRef.GoTypeWithPkgName }}, ...request.Option) ({{ .OutputRef.GoTypeWithPkgName }}, error) +{{ .ExportedName }}Request({{ .InputRef.GoTypeWithPkgName }}) (*request.Request, {{ .OutputRef.GoTypeWithPkgName }}) + +{{ if .Paginator -}} +{{ .ExportedName }}Pages({{ .InputRef.GoTypeWithPkgName }}, func({{ .OutputRef.GoTypeWithPkgName }}, bool) bool) error +{{ .ExportedName }}PagesWithContext(aws.Context, {{ .InputRef.GoTypeWithPkgName }}, func({{ .OutputRef.GoTypeWithPkgName }}, bool) bool, ...request.Option) error +{{- end }} +`)) + +// InterfaceSignature returns a string representing the Operation's interface{} +// functional signature. +func (o *Operation) InterfaceSignature() string { + var buf bytes.Buffer + err := tplInfSig.Execute(&buf, o) + if err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// tplExample defines the template for rendering an Operation example +var tplExample = template.Must(template.New("operationExample").Parse(` +func Example{{ .API.StructName }}_{{ .ExportedName }}() { + sess := session.Must(session.NewSession()) + + svc := {{ .API.PackageName }}.New(sess) + + {{ .ExampleInput }} + resp, err := svc.{{ .ExportedName }}(params) + + if err != nil { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + return + } + + // Pretty-print the response data. + fmt.Println(resp) +} +`)) + +// Example returns a string of the rendered Go code for the Operation +func (o *Operation) Example() string { + var buf bytes.Buffer + err := tplExample.Execute(&buf, o) + if err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// ExampleInput return a string of the rendered Go code for an example's input parameters +func (o *Operation) ExampleInput() string { + if len(o.InputRef.Shape.MemberRefs) == 0 { + if strings.Contains(o.InputRef.GoTypeElem(), ".") { + o.imports[SDKImportRoot+"service/"+strings.Split(o.InputRef.GoTypeElem(), ".")[0]] = true + return fmt.Sprintf("var params *%s", o.InputRef.GoTypeElem()) + } + return fmt.Sprintf("var params *%s.%s", + o.API.PackageName(), o.InputRef.GoTypeElem()) + } + e := example{o, map[string]int{}} + return "params := " + e.traverseAny(o.InputRef.Shape, false, false) +} + +// ShouldDiscardResponse returns if the operation should discard the response +// returned by the service. +func (o *Operation) ShouldDiscardResponse() bool { + s := o.OutputRef.Shape + return s.Placeholder || len(s.MemberRefs) == 0 +} + +// A example provides +type example struct { + *Operation + visited map[string]int +} + +// traverseAny returns rendered Go code for the shape. +func (e *example) traverseAny(s *Shape, required, payload bool) string { + str := "" + e.visited[s.ShapeName]++ + + switch s.Type { + case "structure": + str = e.traverseStruct(s, required, payload) + case "list": + str = e.traverseList(s, required, payload) + case "map": + str = e.traverseMap(s, required, payload) + case "jsonvalue": + str = "aws.JSONValue{\"key\": \"value\"}" + if required { + str += " // Required" + } + default: + str = e.traverseScalar(s, required, payload) + } + + e.visited[s.ShapeName]-- + + return str +} + +var reType = regexp.MustCompile(`\b([A-Z])`) + +// traverseStruct returns rendered Go code for a structure type shape. +func (e *example) traverseStruct(s *Shape, required, payload bool) string { + var buf bytes.Buffer + + if s.resolvePkg != "" { + e.imports[s.resolvePkg] = true + buf.WriteString("&" + s.GoTypeElem() + "{") + } else { + buf.WriteString("&" + s.API.PackageName() + "." + s.GoTypeElem() + "{") + } + + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + req := make([]string, len(s.Required)) + copy(req, s.Required) + sort.Strings(req) + + if e.visited[s.ShapeName] < 2 { + for _, n := range req { + m := s.MemberRefs[n].Shape + p := n == s.Payload && (s.MemberRefs[n].Streaming || m.Streaming) + buf.WriteString(n + ": " + e.traverseAny(m, true, p) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + } + + for _, n := range s.MemberNames() { + if s.IsRequired(n) { + continue + } + m := s.MemberRefs[n].Shape + p := n == s.Payload && (s.MemberRefs[n].Streaming || m.Streaming) + buf.WriteString(n + ": " + e.traverseAny(m, false, p) + ",\n") + } + } else { + buf.WriteString("// Recursive values...\n") + } + + buf.WriteString("}") + return buf.String() +} + +// traverseMap returns rendered Go code for a map type shape. +func (e *example) traverseMap(s *Shape, required, payload bool) string { + var buf bytes.Buffer + + t := "" + if s.resolvePkg != "" { + e.imports[s.resolvePkg] = true + t = s.GoTypeElem() + } else { + t = reType.ReplaceAllString(s.GoTypeElem(), s.API.PackageName()+".$1") + } + buf.WriteString(t + "{") + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + if e.visited[s.ShapeName] < 2 { + m := s.ValueRef.Shape + buf.WriteString("\"Key\": " + e.traverseAny(m, true, false) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n// More values...\n") + } else { + buf.WriteString("// Recursive values...\n") + } + buf.WriteString("}") + + return buf.String() +} + +// traverseList returns rendered Go code for a list type shape. +func (e *example) traverseList(s *Shape, required, payload bool) string { + var buf bytes.Buffer + t := "" + if s.resolvePkg != "" { + e.imports[s.resolvePkg] = true + t = s.GoTypeElem() + } else { + t = reType.ReplaceAllString(s.GoTypeElem(), s.API.PackageName()+".$1") + } + + buf.WriteString(t + "{") + if required { + buf.WriteString(" // Required") + } + buf.WriteString("\n") + + if e.visited[s.ShapeName] < 2 { + m := s.MemberRef.Shape + buf.WriteString(e.traverseAny(m, true, false) + ",") + if m.Type != "list" && m.Type != "structure" && m.Type != "map" { + buf.WriteString(" // Required") + } + buf.WriteString("\n// More values...\n") + } else { + buf.WriteString("// Recursive values...\n") + } + buf.WriteString("}") + + return buf.String() +} + +// traverseScalar returns an AWS Type string representation initialized to a value. +// Will panic if s is an unsupported shape type. +func (e *example) traverseScalar(s *Shape, required, payload bool) string { + str := "" + switch s.Type { + case "integer", "long": + str = `aws.Int64(1)` + case "float", "double": + str = `aws.Float64(1.0)` + case "string", "character": + str = `aws.String("` + s.ShapeName + `")` + case "blob": + if payload { + str = `bytes.NewReader([]byte("PAYLOAD"))` + } else { + str = `[]byte("PAYLOAD")` + } + case "boolean": + str = `aws.Bool(true)` + case "timestamp": + str = `aws.Time(time.Now())` + default: + panic("unsupported shape " + s.Type) + } + + return str +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/pagination.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/pagination.go new file mode 100644 index 00000000000..65c3b9dcf47 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/pagination.go @@ -0,0 +1,102 @@ +// +build codegen + +package api + +import ( + "encoding/json" + "fmt" + "os" +) + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens interface{} `json:"input_token"` + OutputTokens interface{} `json:"output_token"` + LimitKey string `json:"limit_key"` + MoreResults string `json:"more_results"` +} + +// InputTokensString returns output tokens formatted as a list +func (p *Paginator) InputTokensString() string { + str := p.InputTokens.([]string) + return fmt.Sprintf("%#v", str) +} + +// OutputTokensString returns output tokens formatted as a list +func (p *Paginator) OutputTokensString() string { + str := p.OutputTokens.([]string) + return fmt.Sprintf("%#v", str) +} + +// used for unmarshaling from the paginators JSON file +type paginationDefinitions struct { + *API + Pagination map[string]Paginator +} + +// AttachPaginators attaches pagination configuration from filename to the API. +func (a *API) AttachPaginators(filename string) error { + p := paginationDefinitions{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + return err + } + err = json.NewDecoder(f).Decode(&p) + if err != nil { + return fmt.Errorf("failed to decode %s, err: %v", filename, err) + } + + return p.setup() +} + +// setup runs post-processing on the paginator configuration. +func (p *paginationDefinitions) setup() error { + for n, e := range p.Pagination { + if e.InputTokens == nil || e.OutputTokens == nil { + continue + } + paginator := e + + switch t := paginator.InputTokens.(type) { + case string: + paginator.InputTokens = []string{t} + case []interface{}: + toks := []string{} + for _, e := range t { + s := e.(string) + toks = append(toks, s) + } + paginator.InputTokens = toks + } + switch t := paginator.OutputTokens.(type) { + case string: + paginator.OutputTokens = []string{t} + case []interface{}: + toks := []string{} + for _, e := range t { + s := e.(string) + toks = append(toks, s) + } + paginator.OutputTokens = toks + } + + if o, ok := p.Operations[n]; ok { + o.Paginator = &paginator + } else { + return fmt.Errorf("unknown operation for paginator, %s", n) + } + } + + return nil +} + +func enableStopOnSameToken(service string) bool { + switch service { + case "cloudwatchlogs": + return true + default: + return false + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/param_filler.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/param_filler.go new file mode 100644 index 00000000000..5a9138f2df6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/param_filler.go @@ -0,0 +1,147 @@ +// +build codegen + +package api + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + + "github.com/aws/aws-sdk-go/private/util" +) + +// A paramFiller provides string formatting for a shape and its types. +type paramFiller struct { + prefixPackageName bool +} + +// typeName returns the type name of a shape. +func (f paramFiller) typeName(shape *Shape) string { + if f.prefixPackageName && shape.Type == "structure" { + return "*" + shape.API.PackageName() + "." + shape.GoTypeElem() + } + return shape.GoType() +} + +// ParamsStructFromJSON returns a JSON string representation of a structure. +func ParamsStructFromJSON(value interface{}, shape *Shape, prefixPackageName bool) string { + f := paramFiller{prefixPackageName: prefixPackageName} + return util.GoFmt(f.paramsStructAny(value, shape)) +} + +// paramsStructAny returns the string representation of any value. +func (f paramFiller) paramsStructAny(value interface{}, shape *Shape) string { + if value == nil { + return "" + } + + switch shape.Type { + case "structure": + if value != nil { + vmap := value.(map[string]interface{}) + return f.paramsStructStruct(vmap, shape) + } + case "list": + vlist := value.([]interface{}) + return f.paramsStructList(vlist, shape) + case "map": + vmap := value.(map[string]interface{}) + return f.paramsStructMap(vmap, shape) + case "string", "character": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.String(%#v)", v.Interface()) + } + case "blob": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() && shape.Streaming { + return fmt.Sprintf("bytes.NewReader([]byte(%#v))", v.Interface()) + } else if v.IsValid() { + return fmt.Sprintf("[]byte(%#v)", v.Interface()) + } + case "boolean": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Bool(%#v)", v.Interface()) + } + case "integer", "long": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Int64(%v)", v.Interface()) + } + case "float", "double": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Float64(%v)", v.Interface()) + } + case "timestamp": + v := reflect.Indirect(reflect.ValueOf(value)) + if v.IsValid() { + return fmt.Sprintf("aws.Time(time.Unix(%d, 0))", int(v.Float())) + } + case "jsonvalue": + v, err := json.Marshal(value) + if err != nil { + panic("failed to marshal JSONValue, " + err.Error()) + } + const tmpl = `func() aws.JSONValue { + var m aws.JSONValue + if err := json.Unmarshal([]byte(%q), &m); err != nil { + panic("failed to unmarshal JSONValue, "+err.Error()) + } + return m + }()` + return fmt.Sprintf(tmpl, string(v)) + default: + panic("Unhandled type " + shape.Type) + } + return "" +} + +// paramsStructStruct returns the string representation of a structure +func (f paramFiller) paramsStructStruct(value map[string]interface{}, shape *Shape) string { + out := "&" + f.typeName(shape)[1:] + "{\n" + for _, n := range shape.MemberNames() { + ref := shape.MemberRefs[n] + name := findParamMember(value, n) + + if val := f.paramsStructAny(value[name], ref.Shape); val != "" { + out += fmt.Sprintf("%s: %s,\n", n, val) + } + } + out += "}" + return out +} + +// paramsStructMap returns the string representation of a map of values +func (f paramFiller) paramsStructMap(value map[string]interface{}, shape *Shape) string { + out := f.typeName(shape) + "{\n" + keys := util.SortedKeys(value) + for _, k := range keys { + v := value[k] + out += fmt.Sprintf("%q: %s,\n", k, f.paramsStructAny(v, shape.ValueRef.Shape)) + } + out += "}" + return out +} + +// paramsStructList returns the string representation of slice of values +func (f paramFiller) paramsStructList(value []interface{}, shape *Shape) string { + out := f.typeName(shape) + "{\n" + for _, v := range value { + out += fmt.Sprintf("%s,\n", f.paramsStructAny(v, shape.MemberRef.Shape)) + } + out += "}" + return out +} + +// findParamMember searches a map for a key ignoring case. Returns the map key if found. +func findParamMember(value map[string]interface{}, key string) string { + for actualKey := range value { + if strings.EqualFold(key, actualKey) { + return actualKey + } + } + return "" +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/passes.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/passes.go new file mode 100644 index 00000000000..fc2149d3c2e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/passes.go @@ -0,0 +1,557 @@ +// +build codegen + +package api + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +func (a *API) enableGeneratedTypedErrors() { + switch a.Metadata.Protocol { + case "json": + case "rest-json": + default: + return + } + + a.WithGeneratedTypedErrors = true +} + +// updateTopLevelShapeReferences moves resultWrapper, locationName, and +// xmlNamespace traits from toplevel shape references to the toplevel +// shapes for easier code generation +func (a *API) updateTopLevelShapeReferences() { + for _, o := range a.Operations { + // these are for REST-XML services + if o.InputRef.LocationName != "" { + o.InputRef.Shape.LocationName = o.InputRef.LocationName + } + if o.InputRef.Location != "" { + o.InputRef.Shape.Location = o.InputRef.Location + } + if o.InputRef.Payload != "" { + o.InputRef.Shape.Payload = o.InputRef.Payload + } + if o.InputRef.XMLNamespace.Prefix != "" { + o.InputRef.Shape.XMLNamespace.Prefix = o.InputRef.XMLNamespace.Prefix + } + if o.InputRef.XMLNamespace.URI != "" { + o.InputRef.Shape.XMLNamespace.URI = o.InputRef.XMLNamespace.URI + } + } + +} + +// writeShapeNames sets each shape's API and shape name values. Binding the +// shape to its parent API. This will set OrigShapeName on each Shape and ShapeRef +// to allow access to the original shape name for code generation. +func (a *API) writeShapeNames() { + writeOrigShapeName := func(s *ShapeRef) { + if len(s.ShapeName) > 0 { + s.OrigShapeName = s.ShapeName + } + } + + for n, s := range a.Shapes { + s.API = a + s.ShapeName, s.OrigShapeName = n, n + for _, ref := range s.MemberRefs { + writeOrigShapeName(ref) + } + writeOrigShapeName(&s.MemberRef) + writeOrigShapeName(&s.KeyRef) + writeOrigShapeName(&s.ValueRef) + } +} + +func (a *API) resolveReferences() { + resolver := referenceResolver{API: a, visited: map[*ShapeRef]bool{}} + + for _, s := range a.Shapes { + resolver.resolveShape(s) + } + + for _, o := range a.Operations { + o.API = a // resolve parent reference + + resolver.resolveReference(&o.InputRef) + resolver.resolveReference(&o.OutputRef) + + // Resolve references for errors also + for i := range o.ErrorRefs { + resolver.resolveReference(&o.ErrorRefs[i]) + o.ErrorRefs[i].Shape.Exception = true + o.ErrorRefs[i].Shape.ErrorInfo.Type = o.ErrorRefs[i].Shape.ShapeName + } + } +} + +func (a *API) backfillErrorMembers() { + stubShape := &Shape{ + ShapeName: "string", + Type: "string", + } + var locName string + switch a.Metadata.Protocol { + case "ec2", "query", "rest-xml": + locName = "Message" + case "json", "rest-json": + locName = "message" + } + + for _, s := range a.Shapes { + if !s.Exception { + continue + } + + var haveMessage bool + for name := range s.MemberRefs { + if strings.EqualFold(name, "Message") { + haveMessage = true + break + } + } + if !haveMessage { + ref := &ShapeRef{ + ShapeName: stubShape.ShapeName, + Shape: stubShape, + LocationName: locName, + } + s.MemberRefs["Message"] = ref + stubShape.refs = append(stubShape.refs, ref) + } + } + + if len(stubShape.refs) != 0 { + a.Shapes["SDKStubErrorMessageShape"] = stubShape + } +} + +// A referenceResolver provides a way to resolve shape references to +// shape definitions. +type referenceResolver struct { + *API + visited map[*ShapeRef]bool +} + +// resolveReference updates a shape reference to reference the API and +// its shape definition. All other nested references are also resolved. +func (r *referenceResolver) resolveReference(ref *ShapeRef) { + if ref.ShapeName == "" { + return + } + + shape, ok := r.API.Shapes[ref.ShapeName] + if !ok { + panic(fmt.Sprintf("unable resolve reference, %s", ref.ShapeName)) + } + + if ref.JSONValue { + ref.ShapeName = "JSONValue" + if _, ok := r.API.Shapes[ref.ShapeName]; !ok { + r.API.Shapes[ref.ShapeName] = &Shape{ + API: r.API, + ShapeName: "JSONValue", + Type: "jsonvalue", + ValueRef: ShapeRef{ + JSONValue: true, + }, + } + } + } + + ref.API = r.API // resolve reference back to API + ref.Shape = shape // resolve shape reference + + if r.visited[ref] { + return + } + r.visited[ref] = true + + shape.refs = append(shape.refs, ref) // register the ref + + // resolve shape's references, if it has any + r.resolveShape(shape) +} + +// resolveShape resolves a shape's Member Key Value, and nested member +// shape references. +func (r *referenceResolver) resolveShape(shape *Shape) { + r.resolveReference(&shape.MemberRef) + r.resolveReference(&shape.KeyRef) + r.resolveReference(&shape.ValueRef) + for _, m := range shape.MemberRefs { + r.resolveReference(m) + } +} + +// fixStutterNames fixes all name stuttering based on Go naming conventions. +// "Stuttering" is when the prefix of a structure or function matches the +// package name (case insensitive). +func (a *API) fixStutterNames() { + names, ok := legacyStutterNames[ServiceID(a)] + if !ok { + return + } + + shapeNames := names.ShapeOrder + if len(shapeNames) == 0 { + shapeNames = make([]string, 0, len(names.Shapes)) + for k := range names.Shapes { + shapeNames = append(shapeNames, k) + } + } + + for _, shapeName := range shapeNames { + s := a.Shapes[shapeName] + newName := names.Shapes[shapeName] + if other, ok := a.Shapes[newName]; ok && (other.Type == "structure" || other.Type == "enum") { + panic(fmt.Sprintf( + "shape name already exists, renaming %v to %v\n", + s.ShapeName, newName)) + } + s.Rename(newName) + } + + for opName, newName := range names.Operations { + if _, ok := a.Operations[newName]; ok { + panic(fmt.Sprintf( + "operation name already exists, renaming %v to %v\n", + opName, newName)) + } + op := a.Operations[opName] + delete(a.Operations, opName) + a.Operations[newName] = op + op.ExportedName = newName + } +} + +// regexpForValidatingShapeName is used by validateShapeName to filter acceptable shape names +// that may be renamed to a new valid shape name, if not already. +// The regex allows underscores(_) at the beginning of the shape name +// There may be 0 or more underscores(_). The next character would be the leading character +// in the renamed shape name and thus, must be an alphabetic character. +// The regex allows alphanumeric characters along with underscores(_) in rest of the string. +var regexForValidatingShapeName = regexp.MustCompile("^[_]*[a-zA-Z][a-zA-Z0-9_]*$") + +// legacyShapeNames is a map of shape names that are supported and bypass the validateShapeNames util +var legacyShapeNames = map[string][]string{ + "mediapackage": { + "__AdTriggersElement", + "__PeriodTriggersElement", + }, +} + +// validateShapeNames is valid only for shapes of type structure or enums +// We validate a shape name to check if its a valid shape name +// A valid shape name would only contain alphanumeric characters and have an alphabet as leading character. +// +// If we encounter a shape name with underscores(_), we remove the underscores, and +// follow a canonical upper camel case naming scheme to create a new shape name. +// If the shape name collides with an existing shape name we return an error. +// The resulting shape name must be a valid shape name or throw an error. +func (a *API) validateShapeNames() error { +loop: + for _, s := range a.Shapes { + if s.Type == "structure" || s.IsEnum() { + for _, legacyname := range legacyShapeNames[a.PackageName()] { + if s.ShapeName == legacyname { + continue loop + } + } + name := s.ShapeName + if b := regexForValidatingShapeName.MatchString(name); !b { + return fmt.Errorf("invalid shape name found: %v", s.ShapeName) + } + + // Slice of strings returned after we split a string + // with a non alphanumeric character as delimiter. + slice := strings.FieldsFunc(name, func(r rune) bool { + return !unicode.IsLetter(r) && !unicode.IsNumber(r) + }) + + // Build a string that follows canonical upper camel casing + var b strings.Builder + for _, word := range slice { + b.WriteString(strings.Title(word)) + } + + name = b.String() + if s.ShapeName != name { + if a.Shapes[name] != nil { + // throw an error if shape with a new shape name already exists + return fmt.Errorf("attempt to rename shape %v to %v for package %v failed, as this rename would result in shape name collision", + s.ShapeName, name, a.PackageName()) + } + debugLogger.Logf("Renaming shape %v to %v for package %v \n", s.ShapeName, name, a.PackageName()) + s.Rename(name) + } + } + } + return nil +} + +// renameExportable renames all operation names to be exportable names. +// All nested Shape names are also updated to the exportable variant. +func (a *API) renameExportable() { + for name, op := range a.Operations { + newName := a.ExportableName(name) + if newName != name { + delete(a.Operations, name) + a.Operations[newName] = op + } + op.ExportedName = newName + } + + for k, s := range a.Shapes { + // FIXME SNS has lower and uppercased shape names with the same name, + // except the lowercased variant is used exclusively for string and + // other primitive types. Renaming both would cause a collision. + // We work around this by only renaming the structure shapes. + if s.Type == "string" { + continue + } + + for mName, member := range s.MemberRefs { + newName := a.ExportableName(mName) + if newName != mName { + delete(s.MemberRefs, mName) + s.MemberRefs[newName] = member + + // also apply locationName trait so we keep the old one + // but only if there's no locationName trait on ref or shape + if member.LocationName == "" && member.Shape.LocationName == "" { + member.LocationName = mName + } + } + + if newName == "_" { + panic("Shape " + s.ShapeName + " uses reserved member name '_'") + } + } + + newName := a.ExportableName(k) + if s.Type == "structure" && newName == a.StructName() { + // If struct collides client's struct type name the shape needs to + // be renamed with a trailing `_` to prevent collision. + newName += "_" + } + + if newName != s.ShapeName { + s.Rename(newName) + } + + s.Payload = a.ExportableName(s.Payload) + + // fix required trait names + for i, n := range s.Required { + s.Required[i] = a.ExportableName(n) + } + } + + for _, s := range a.Shapes { + // fix enum names + if s.IsEnum() { + s.EnumConsts = make([]string, len(s.Enum)) + for i := range s.Enum { + shape := s.ShapeName + shape = strings.ToUpper(shape[0:1]) + shape[1:] + s.EnumConsts[i] = shape + s.EnumName(i) + } + } + } +} + +// renameCollidingFields will rename any fields that uses an SDK or Golang +// specific name. +func (a *API) renameCollidingFields() { + for _, v := range a.Shapes { + namesWithSet := map[string]struct{}{} + for k, field := range v.MemberRefs { + if _, ok := v.MemberRefs["Set"+k]; ok { + namesWithSet["Set"+k] = struct{}{} + } + + if collides(k) || (v.Exception && exceptionCollides(k)) { + renameCollidingField(k, v, field) + } + } + + // checks if any field names collide with setters. + for name := range namesWithSet { + field := v.MemberRefs[name] + renameCollidingField(name, v, field) + } + } +} + +func renameCollidingField(name string, v *Shape, field *ShapeRef) { + newName := name + "_" + debugLogger.Logf("Shape %s's field %q renamed to %q", v.ShapeName, name, newName) + delete(v.MemberRefs, name) + v.MemberRefs[newName] = field + // Set LocationName to the original field name if it is not already set. + // This is to ensure we correctly serialize to the proper member name + if len(field.LocationName) == 0 { + field.LocationName = name + } +} + +// collides will return true if it is a name used by the SDK or Golang. +func collides(name string) bool { + switch name { + case "String", + "GoString", + "Validate": + return true + } + return false +} + +func exceptionCollides(name string) bool { + switch name { + case "Code", + "Message", + "OrigErr", + "Error", + "String", + "GoString", + "RequestID", + "StatusCode", + "RespMetadata": + return true + } + return false +} + +func (a *API) applyShapeNameAliases() { + service, ok := shapeNameAliases[a.name] + if !ok { + return + } + + // Generic Shape Aliases + for name, s := range a.Shapes { + if alias, ok := service[name]; ok { + s.Rename(alias) + s.AliasedShapeName = true + } + } +} + +// createInputOutputShapes creates toplevel input/output shapes if they +// have not been defined in the API. This normalizes all APIs to always +// have an input and output structure in the signature. +func (a *API) createInputOutputShapes() { + for _, op := range a.Operations { + createAPIParamShape(a, op.Name, &op.InputRef, op.ExportedName+"Input", + shamelist.Input, + ) + op.InputRef.Shape.UsedAsInput = true + + createAPIParamShape(a, op.Name, &op.OutputRef, op.ExportedName+"Output", + shamelist.Output, + ) + op.OutputRef.Shape.UsedAsOutput = true + } +} + +func (a *API) renameAPIPayloadShapes() { + for _, op := range a.Operations { + op.InputRef.Payload = a.ExportableName(op.InputRef.Payload) + op.OutputRef.Payload = a.ExportableName(op.OutputRef.Payload) + } +} + +func createAPIParamShape(a *API, opName string, ref *ShapeRef, shapeName string, shamelistLookup func(string, string) bool) { + if len(ref.ShapeName) == 0 { + setAsPlacholderShape(ref, shapeName, a) + return + } + + // nothing to do if already the correct name. + if s := ref.Shape; s.AliasedShapeName || s.ShapeName == shapeName || shamelistLookup(a.name, opName) { + return + } + + if s, ok := a.Shapes[shapeName]; ok { + panic(fmt.Sprintf( + "attempting to create duplicate API parameter shape, %v, %v, %v, %v\n", + shapeName, opName, ref.ShapeName, s.OrigShapeName, + )) + } + + ref.Shape.removeRef(ref) + ref.ShapeName = shapeName + ref.Shape = ref.Shape.Clone(shapeName) + ref.Shape.refs = append(ref.Shape.refs, ref) +} + +func setAsPlacholderShape(tgtShapeRef *ShapeRef, name string, a *API) { + shape := a.makeIOShape(name) + shape.Placeholder = true + *tgtShapeRef = ShapeRef{API: a, ShapeName: shape.ShapeName, Shape: shape} + shape.refs = append(shape.refs, tgtShapeRef) +} + +// makeIOShape returns a pointer to a new Shape initialized by the name provided. +func (a *API) makeIOShape(name string) *Shape { + shape := &Shape{ + API: a, ShapeName: name, Type: "structure", + MemberRefs: map[string]*ShapeRef{}, + } + a.Shapes[name] = shape + return shape +} + +// removeUnusedShapes removes shapes from the API which are not referenced by +// any other shape in the API. +func (a *API) removeUnusedShapes() { + for _, s := range a.Shapes { + if len(s.refs) == 0 { + a.removeShape(s) + } + } +} + +// Sets the EndpointsID field of Metadata with the value of the +// EndpointPrefix if EndpointsID is not set. +func (a *API) setMetadataEndpointsKey() { + if len(a.Metadata.EndpointsID) != 0 { + return + } + a.Metadata.EndpointsID = a.Metadata.EndpointPrefix +} + +func (a *API) findEndpointDiscoveryOp() { + for _, op := range a.Operations { + if op.IsEndpointDiscoveryOp { + a.EndpointDiscoveryOp = op + return + } + } +} + +func (a *API) injectUnboundedOutputStreaming() { + for _, op := range a.Operations { + if op.AuthType != V4UnsignedBodyAuthType { + continue + } + for _, ref := range op.InputRef.Shape.MemberRefs { + if ref.Streaming || ref.Shape.Streaming { + if len(ref.Documentation) != 0 { + ref.Documentation += ` +//` + } + ref.Documentation += ` +// To use an non-seekable io.Reader for this request wrap the io.Reader with +// "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable +// readers. This will allow the SDK to send the reader's payload as chunked +// transfer encoding.` + } + } + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/s3manger_input.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/s3manger_input.go new file mode 100644 index 00000000000..cba08f29afe --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/s3manger_input.go @@ -0,0 +1,86 @@ +// +build codegen + +package api + +import ( + "bytes" + "fmt" + "text/template" +) + +// S3ManagerUploadInputGoCode returns the Go code for the S3 Upload Manager's +// input structure. +func S3ManagerUploadInputGoCode(a *API) string { + if v := a.PackageName(); v != "s3" { + panic(fmt.Sprintf("unexpected API model %s", v)) + } + + s, ok := a.Shapes["PutObjectInput"] + if !ok { + panic(fmt.Sprintf("unable to find PutObjectInput shape in S3 model")) + } + + a.resetImports() + a.AddImport("io") + a.AddImport("time") + + var w bytes.Buffer + if err := s3managerUploadInputTmpl.Execute(&w, s); err != nil { + panic(fmt.Sprintf("failed to execute %s template, %v", + s3managerUploadInputTmpl.Name(), err)) + } + + return a.importsGoCode() + w.String() +} + +var s3managerUploadInputTmpl = template.Must( + template.New("s3managerUploadInputTmpl"). + Funcs(template.FuncMap{ + "GetDeprecatedMsg": getDeprecatedMessage, + }). + Parse(s3managerUploadInputTmplDef), +) + +const s3managerUploadInputTmplDef = ` +// UploadInput provides the input parameters for uploading a stream or buffer +// to an object in an Amazon S3 bucket. This type is similar to the s3 +// package's PutObjectInput with the exception that the Body member is an +// io.Reader instead of an io.ReadSeeker. +type UploadInput struct { + _ struct{} {{ .GoTags true false }} + + {{ range $name, $ref := $.MemberRefs -}} + {{ if eq $name "Body" }} + // The readable body payload to send to S3. + Body io.Reader + {{ else if eq $name "ContentLength" }} + {{/* S3 Upload Manager does not use modeled content length */}} + {{ else }} + {{ $isBlob := $.WillRefBeBase64Encoded $name -}} + {{ $isRequired := $.IsRequired $name -}} + {{ $doc := $ref.Docstring -}} + + {{ if $doc -}} + {{ $doc }} + {{ if $ref.Deprecated -}} + // + // Deprecated: {{ GetDeprecatedMsg $ref.DeprecatedMsg $name }} + {{ end -}} + {{ end -}} + {{ if $isBlob -}} + {{ if $doc -}} + // + {{ end -}} + // {{ $name }} is automatically base64 encoded/decoded by the SDK. + {{ end -}} + {{ if $isRequired -}} + {{ if or $doc $isBlob -}} + // + {{ end -}} + // {{ $name }} is a required field + {{ end -}} + {{ $name }} {{ $.GoStructType $name $ref }} {{ $ref.GoTags false $isRequired }} + {{ end }} + {{ end }} +} +` diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/service_name.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/service_name.go new file mode 100644 index 00000000000..9d9243a9867 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/service_name.go @@ -0,0 +1,159 @@ +// +build codegen + +package api + +// ServiceName returns the SDK's naming of the service. Has +// backwards compatibility built in for services that were +// incorrectly named with the service's endpoint prefix. +func ServiceName(a *API) string { + if oldName, ok := oldServiceNames[a.PackageName()]; ok { + return oldName + } + + return ServiceID(a) +} + +var oldServiceNames = map[string]string{ + "migrationhub": "mgh", + "acmpca": "acm-pca", + "acm": "acm", + "alexaforbusiness": "a4b", + "apigateway": "apigateway", + "applicationautoscaling": "autoscaling", + "appstream": "appstream2", + "appsync": "appsync", + "athena": "athena", + "autoscalingplans": "autoscaling", + "autoscaling": "autoscaling", + "batch": "batch", + "budgets": "budgets", + "costexplorer": "ce", + "cloud9": "cloud9", + "clouddirectory": "clouddirectory", + "cloudformation": "cloudformation", + "cloudfront": "cloudfront", + "cloudhsm": "cloudhsm", + "cloudhsmv2": "cloudhsmv2", + "cloudsearch": "cloudsearch", + "cloudsearchdomain": "cloudsearchdomain", + "cloudtrail": "cloudtrail", + "codebuild": "codebuild", + "codecommit": "codecommit", + "codedeploy": "codedeploy", + "codepipeline": "codepipeline", + "codestar": "codestar", + "cognitoidentity": "cognito-identity", + "cognitoidentityprovider": "cognito-idp", + "cognitosync": "cognito-sync", + "comprehend": "comprehend", + "configservice": "config", + "connect": "connect", + "costandusagereportservice": "cur", + "datapipeline": "datapipeline", + "dax": "dax", + "devicefarm": "devicefarm", + "directconnect": "directconnect", + "applicationdiscoveryservice": "discovery", + "databasemigrationservice": "dms", + "directoryservice": "ds", + "dynamodb": "dynamodb", + "ec2": "ec2", + "ecr": "ecr", + "ecs": "ecs", + "eks": "eks", + "elasticache": "elasticache", + "elasticbeanstalk": "elasticbeanstalk", + "efs": "elasticfilesystem", + "elb": "elasticloadbalancing", + "elbv2": "elasticloadbalancing", + "emr": "elasticmapreduce", + "elastictranscoder": "elastictranscoder", + "ses": "email", + "marketplaceentitlementservice": "entitlement.marketplace", + "elasticsearchservice": "es", + "cloudwatchevents": "events", + "firehose": "firehose", + "fms": "fms", + "gamelift": "gamelift", + "glacier": "glacier", + "glue": "glue", + "greengrass": "greengrass", + "guardduty": "guardduty", + "health": "health", + "iam": "iam", + "inspector": "inspector", + "iotdataplane": "data.iot", + "iotjobsdataplane": "data.jobs.iot", + "iot": "iot", + "iot1clickdevicesservice": "devices.iot1click", + "iot1clickprojects": "projects.iot1click", + "iotanalytics": "iotanalytics", + "kinesisvideoarchivedmedia": "kinesisvideo", + "kinesisvideomedia": "kinesisvideo", + "kinesis": "kinesis", + "kinesisanalytics": "kinesisanalytics", + "kinesisvideo": "kinesisvideo", + "kms": "kms", + "lambda": "lambda", + "lexmodelbuildingservice": "models.lex", + "lightsail": "lightsail", + "cloudwatchlogs": "logs", + "machinelearning": "machinelearning", + "marketplacecommerceanalytics": "marketplacecommerceanalytics", + "mediaconvert": "mediaconvert", + "medialive": "medialive", + "mediapackage": "mediapackage", + "mediastoredata": "data.mediastore", + "mediastore": "mediastore", + "mediatailor": "api.mediatailor", + "marketplacemetering": "metering.marketplace", + "mobile": "mobile", + "mobileanalytics": "mobileanalytics", + "cloudwatch": "monitoring", + "mq": "mq", + "mturk": "mturk-requester", + "neptune": "rds", + "opsworks": "opsworks", + "opsworkscm": "opsworks-cm", + "organizations": "organizations", + "pi": "pi", + "pinpoint": "pinpoint", + "polly": "polly", + "pricing": "api.pricing", + "rds": "rds", + "redshift": "redshift", + "rekognition": "rekognition", + "resourcegroups": "resource-groups", + "resourcegroupstaggingapi": "tagging", + "route53": "route53", + "route53domains": "route53domains", + "lexruntimeservice": "runtime.lex", + "sagemakerruntime": "runtime.sagemaker", + "s3": "s3", + "sagemaker": "sagemaker", + "simpledb": "sdb", + "secretsmanager": "secretsmanager", + "serverlessapplicationrepository": "serverlessrepo", + "servicecatalog": "servicecatalog", + "servicediscovery": "servicediscovery", + "shield": "shield", + "sms": "sms", + "snowball": "snowball", + "sns": "sns", + "sqs": "sqs", + "ssm": "ssm", + "sfn": "states", + "storagegateway": "storagegateway", + "dynamodbstreams": "streams.dynamodb", + "sts": "sts", + "support": "support", + "swf": "swf", + "transcribeservice": "transcribe", + "translate": "translate", + "wafregional": "waf-regional", + "waf": "waf", + "workdocs": "workdocs", + "workmail": "workmail", + "workspaces": "workspaces", + "xray": "xray", +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape.go new file mode 100644 index 00000000000..5871643d8c1 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape.go @@ -0,0 +1,1057 @@ +// +build codegen + +package api + +import ( + "bytes" + "fmt" + "path" + "regexp" + "sort" + "strings" + "text/template" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// ErrorInfo represents the error block of a shape's structure +type ErrorInfo struct { + Type string + Code string + HTTPStatusCode int +} + +// A XMLInfo defines URL and prefix for Shapes when rendered as XML +type XMLInfo struct { + Prefix string + URI string +} + +// A ShapeRef defines the usage of a shape within the API. +type ShapeRef struct { + API *API `json:"-"` + Shape *Shape `json:"-"` + Documentation string + ShapeName string `json:"shape"` + Location string + LocationName string + QueryName string + Flattened bool + Streaming bool + XMLAttribute bool + // Ignore, if set, will not be sent over the wire + Ignore bool + XMLNamespace XMLInfo + Payload string + IdempotencyToken bool `json:"idempotencyToken"` + TimestampFormat string `json:"timestampFormat"` + JSONValue bool `json:"jsonvalue"` + Deprecated bool `json:"deprecated"` + DeprecatedMsg string `json:"deprecatedMessage"` + EndpointDiscoveryID bool `json:"endpointdiscoveryid"` + HostLabel bool `json:"hostLabel"` + + OrigShapeName string `json:"-"` + + GenerateGetter bool + + IsEventPayload bool `json:"eventpayload"` + IsEventHeader bool `json:"eventheader"` + + // Collection of custom tags the shape reference includes. + CustomTags ShapeTags + + // Flags whether the member reference is a endpoint ARN + EndpointARN bool + + // Flags whether the member reference is a Outpost ID + OutpostIDMember bool + + // Flag whether the member reference is a Account ID when endpoint shape ARN is present + AccountIDMemberWithARN bool +} + +// A Shape defines the definition of a shape type +type Shape struct { + API *API `json:"-"` + ShapeName string + Documentation string + MemberRefs map[string]*ShapeRef `json:"members"` + MemberRef ShapeRef `json:"member"` // List ref + KeyRef ShapeRef `json:"key"` // map key ref + ValueRef ShapeRef `json:"value"` // map value ref + Required []string + Payload string + Type string + Exception bool + Enum []string + EnumConsts []string + Flattened bool + Streaming bool + Location string + LocationName string + IdempotencyToken bool `json:"idempotencyToken"` + TimestampFormat string `json:"timestampFormat"` + XMLNamespace XMLInfo + Min float64 // optional Minimum length (string, list) or value (number) + + OutputEventStreamAPI *EventStreamAPI + EventStream *EventStream + EventFor []*EventStream `json:"-"` + + IsInputEventStream bool `json:"-"` + IsOutputEventStream bool `json:"-"` + + IsEventStream bool `json:"eventstream"` + IsEvent bool `json:"event"` + + refs []*ShapeRef // References to this shape + resolvePkg string // use this package in the goType() if present + + OrigShapeName string `json:"-"` + + // Defines if the shape is a placeholder and should not be used directly + Placeholder bool + + Deprecated bool `json:"deprecated"` + DeprecatedMsg string `json:"deprecatedMessage"` + + Validations ShapeValidations + + // Error information that is set if the shape is an error shape. + ErrorInfo ErrorInfo `json:"error"` + + // Flags that the shape cannot be rename. Prevents the shape from being + // renamed further by the Input/Output. + AliasedShapeName bool + + // Sensitive types should not be logged by SDK type loggers. + Sensitive bool `json:"sensitive"` + + // Flags that a member of the shape is an EndpointARN + HasEndpointARNMember bool + + // Flags that a member of the shape is an OutpostIDMember + HasOutpostIDMember bool + + // Flags that the shape has an account id member along with EndpointARN member + HasAccountIdMemberWithARN bool + + // Indicates the Shape is used as an operation input + UsedAsInput bool + + // Indicates the Shape is used as an operation output + UsedAsOutput bool +} + +// CanBeEmpty returns if the shape value can sent request as an empty value. +// String, blob, list, and map are types must not be empty when the member is +// serialized to the URI path, or decorated with HostLabel. +func (ref *ShapeRef) CanBeEmpty() bool { + switch ref.Shape.Type { + case "string": + return !(ref.Location == "uri" || ref.HostLabel) + case "blob", "map", "list": + return !(ref.Location == "uri") + default: + return true + } +} + +// ErrorCodeName will return the error shape's name formated for +// error code const. +func (s *Shape) ErrorCodeName() string { + return "ErrCode" + s.ShapeName +} + +// ErrorName will return the shape's name or error code if available based +// on the API's protocol. This is the error code string returned by the service. +func (s *Shape) ErrorName() string { + name := s.ErrorInfo.Type + switch s.API.Metadata.Protocol { + case "query", "ec2query", "rest-xml": + if len(s.ErrorInfo.Code) > 0 { + name = s.ErrorInfo.Code + } + } + + if len(name) == 0 { + name = s.OrigShapeName + } + if len(name) == 0 { + name = s.ShapeName + } + + return name +} + +// PayloadRefName returns the payload member of the shape if there is one +// modeled. If no payload is modeled, empty string will be returned. +func (s *Shape) PayloadRefName() string { + if name := s.Payload; len(name) != 0 { + // Root shape + return name + } + + for name, ref := range s.MemberRefs { + if ref.IsEventPayload { + return name + } + } + + return "" +} + +// GoTags returns the struct tags for a shape. +func (s *Shape) GoTags(root, required bool) string { + ref := &ShapeRef{ShapeName: s.ShapeName, API: s.API, Shape: s} + return ref.GoTags(root, required) +} + +// Rename changes the name of the Shape to newName. Also updates +// the associated API's reference to use newName. +func (s *Shape) Rename(newName string) { + if s.AliasedShapeName { + panic(fmt.Sprintf("attempted to rename %s, but flagged as aliased", + s.ShapeName)) + } + + for _, r := range s.refs { + r.ShapeName = newName + } + + delete(s.API.Shapes, s.ShapeName) + s.API.Shapes[newName] = s + s.ShapeName = newName +} + +// MemberNames returns a slice of struct member names. +func (s *Shape) MemberNames() []string { + i, names := 0, make([]string, len(s.MemberRefs)) + for n := range s.MemberRefs { + names[i] = n + i++ + } + sort.Strings(names) + return names +} + +// HasMember will return whether or not the shape has a given +// member by name. +func (s *Shape) HasMember(name string) bool { + _, ok := s.MemberRefs[name] + return ok +} + +// GoTypeWithPkgName returns a shape's type as a string with the package name in +// . format. Package naming only applies to structures. +func (s *Shape) GoTypeWithPkgName() string { + return goType(s, true) +} + +// GoTypeWithPkgNameElem returns the shapes type as a string with the "*" +// removed if there was one preset. +func (s *Shape) GoTypeWithPkgNameElem() string { + t := goType(s, true) + if strings.HasPrefix(t, "*") { + return t[1:] + } + return t +} + +// UseIndirection returns if the shape's reference should use indirection or not. +func (s *ShapeRef) UseIndirection() bool { + switch s.Shape.Type { + case "map", "list", "blob", "structure", "jsonvalue": + return false + } + + if s.Streaming || s.Shape.Streaming { + return false + } + + if s.JSONValue { + return false + } + + return true +} + +func (s Shape) GetTimestampFormat() string { + format := s.TimestampFormat + + if len(format) > 0 && !protocol.IsKnownTimestampFormat(format) { + panic(fmt.Sprintf("Unknown timestampFormat %s, for %s", + format, s.ShapeName)) + } + + return format +} + +func (ref ShapeRef) GetTimestampFormat() string { + format := ref.TimestampFormat + + if len(format) == 0 { + format = ref.Shape.TimestampFormat + } + + if len(format) > 0 && !protocol.IsKnownTimestampFormat(format) { + panic(fmt.Sprintf("Unknown timestampFormat %s, for %s", + format, ref.ShapeName)) + } + + return format +} + +// GoStructValueType returns the Shape's Go type value instead of a pointer +// for the type. +func (s *Shape) GoStructValueType(name string, ref *ShapeRef) string { + v := s.GoStructType(name, ref) + + if ref.UseIndirection() && v[0] == '*' { + return v[1:] + } + + return v +} + +// GoStructType returns the type of a struct field based on the API +// model definition. +func (s *Shape) GoStructType(name string, ref *ShapeRef) string { + if (ref.Streaming || ref.Shape.Streaming) && s.Payload == name { + rtype := "io.ReadSeeker" + if strings.HasSuffix(s.ShapeName, "Output") { + rtype = "io.ReadCloser" + } + + s.API.imports["io"] = true + return rtype + } + + if ref.JSONValue { + s.API.AddSDKImport("aws") + return "aws.JSONValue" + } + + for _, v := range s.Validations { + // TODO move this to shape validation resolution + if (v.Ref.Shape.Type == "map" || v.Ref.Shape.Type == "list") && v.Type == ShapeValidationNested { + s.API.imports["fmt"] = true + } + } + + return ref.GoType() +} + +// GoType returns a shape's Go type +func (s *Shape) GoType() string { + return goType(s, false) +} + +// GoType returns a shape ref's Go type. +func (ref *ShapeRef) GoType() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoType() +} + +// GoTypeWithPkgName returns a shape's type as a string with the package name in +// . format. Package naming only applies to structures. +func (ref *ShapeRef) GoTypeWithPkgName() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoTypeWithPkgName() +} + +// Returns a string version of the Shape's type. +// If withPkgName is true, the package name will be added as a prefix +func goType(s *Shape, withPkgName bool) string { + switch s.Type { + case "structure": + if withPkgName || s.resolvePkg != "" { + pkg := s.resolvePkg + if pkg != "" { + s.API.imports[pkg] = true + pkg = path.Base(pkg) + } else { + pkg = s.API.PackageName() + } + return fmt.Sprintf("*%s.%s", pkg, s.ShapeName) + } + return "*" + s.ShapeName + case "map": + return "map[string]" + goType(s.ValueRef.Shape, withPkgName) + case "jsonvalue": + return "aws.JSONValue" + case "list": + return "[]" + goType(s.MemberRef.Shape, withPkgName) + case "boolean": + return "*bool" + case "string", "character": + return "*string" + case "blob": + return "[]byte" + case "byte", "short", "integer", "long": + return "*int64" + case "float", "double": + return "*float64" + case "timestamp": + s.API.imports["time"] = true + return "*time.Time" + default: + panic("Unsupported shape type: " + s.Type) + } +} + +// GoTypeElem returns the Go type for the Shape. If the shape type is a pointer just +// the type will be returned minus the pointer *. +func (s *Shape) GoTypeElem() string { + t := s.GoType() + if strings.HasPrefix(t, "*") { + return t[1:] + } + return t +} + +// GoTypeElem returns the Go type for the Shape. If the shape type is a pointer just +// the type will be returned minus the pointer *. +func (ref *ShapeRef) GoTypeElem() string { + if ref.Shape == nil { + panic(fmt.Errorf("missing shape definition on reference for %#v", ref)) + } + + return ref.Shape.GoTypeElem() +} + +// ShapeTag is a struct tag that will be applied to a shape's generated code +type ShapeTag struct { + Key, Val string +} + +// String returns the string representation of the shape tag +func (s ShapeTag) String() string { + return fmt.Sprintf(`%s:"%s"`, s.Key, s.Val) +} + +// ShapeTags is a collection of shape tags and provides serialization of the +// tags in an ordered list. +type ShapeTags []ShapeTag + +// Join returns an ordered serialization of the shape tags with the provided +// separator. +func (s ShapeTags) Join(sep string) string { + o := &bytes.Buffer{} + for i, t := range s { + o.WriteString(t.String()) + if i < len(s)-1 { + o.WriteString(sep) + } + } + + return o.String() +} + +// String is an alias for Join with the empty space separator. +func (s ShapeTags) String() string { + return s.Join(" ") +} + +// GoTags returns the rendered tags string for the ShapeRef +func (ref *ShapeRef) GoTags(toplevel bool, isRequired bool) string { + tags := append(ShapeTags{}, ref.CustomTags...) + + if ref.Location != "" { + tags = append(tags, ShapeTag{"location", ref.Location}) + } else if ref.Shape.Location != "" { + tags = append(tags, ShapeTag{"location", ref.Shape.Location}) + } else if ref.IsEventHeader { + tags = append(tags, ShapeTag{"location", "header"}) + } + + if ref.LocationName != "" { + tags = append(tags, ShapeTag{"locationName", ref.LocationName}) + } else if ref.Shape.LocationName != "" { + tags = append(tags, ShapeTag{"locationName", ref.Shape.LocationName}) + } else if len(ref.Shape.EventFor) != 0 && ref.API.Metadata.Protocol == "rest-xml" { + // RPC JSON events need to have location name modeled for round trip testing. + tags = append(tags, ShapeTag{"locationName", ref.Shape.OrigShapeName}) + } + + if ref.QueryName != "" { + tags = append(tags, ShapeTag{"queryName", ref.QueryName}) + } + if ref.Shape.MemberRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameList", ref.Shape.MemberRef.LocationName}) + } + if ref.Shape.KeyRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameKey", ref.Shape.KeyRef.LocationName}) + } + if ref.Shape.ValueRef.LocationName != "" { + tags = append(tags, ShapeTag{"locationNameValue", ref.Shape.ValueRef.LocationName}) + } + if ref.Shape.Min > 0 { + tags = append(tags, ShapeTag{"min", fmt.Sprintf("%v", ref.Shape.Min)}) + } + + if ref.Deprecated || ref.Shape.Deprecated { + tags = append(tags, ShapeTag{"deprecated", "true"}) + } + + // All shapes have a type + tags = append(tags, ShapeTag{"type", ref.Shape.Type}) + + // embed the timestamp type for easier lookups + if ref.Shape.Type == "timestamp" { + if format := ref.GetTimestampFormat(); len(format) > 0 { + tags = append(tags, ShapeTag{ + Key: "timestampFormat", + Val: format, + }) + } + } + + if ref.Shape.Flattened || ref.Flattened { + tags = append(tags, ShapeTag{"flattened", "true"}) + } + if ref.XMLAttribute { + tags = append(tags, ShapeTag{"xmlAttribute", "true"}) + } + if isRequired { + tags = append(tags, ShapeTag{"required", "true"}) + } + if ref.Shape.IsEnum() { + tags = append(tags, ShapeTag{"enum", ref.ShapeName}) + } + + if toplevel { + if name := ref.Shape.PayloadRefName(); len(name) > 0 { + tags = append(tags, ShapeTag{"payload", name}) + } + } + + if ref.XMLNamespace.Prefix != "" { + tags = append(tags, ShapeTag{"xmlPrefix", ref.XMLNamespace.Prefix}) + } else if ref.Shape.XMLNamespace.Prefix != "" { + tags = append(tags, ShapeTag{"xmlPrefix", ref.Shape.XMLNamespace.Prefix}) + } + + if ref.XMLNamespace.URI != "" { + tags = append(tags, ShapeTag{"xmlURI", ref.XMLNamespace.URI}) + } else if ref.Shape.XMLNamespace.URI != "" { + tags = append(tags, ShapeTag{"xmlURI", ref.Shape.XMLNamespace.URI}) + } + + if ref.IdempotencyToken || ref.Shape.IdempotencyToken { + tags = append(tags, ShapeTag{"idempotencyToken", "true"}) + } + + if ref.Ignore { + tags = append(tags, ShapeTag{"ignore", "true"}) + } + + if ref.Shape.Sensitive { + tags = append(tags, ShapeTag{"sensitive", "true"}) + } + + return fmt.Sprintf("`%s`", tags) +} + +// Docstring returns the godocs formated documentation +func (ref *ShapeRef) Docstring() string { + if ref.Documentation != "" { + return strings.Trim(ref.Documentation, "\n ") + } + return ref.Shape.Docstring() +} + +// Docstring returns the godocs formated documentation +func (s *Shape) Docstring() string { + return strings.Trim(s.Documentation, "\n ") +} + +// IndentedDocstring is the indented form of the doc string. +func (ref *ShapeRef) IndentedDocstring() string { + doc := ref.Docstring() + return strings.Replace(doc, "// ", "// ", -1) +} + +var goCodeStringerTmpl = template.Must(template.New("goCodeStringerTmpl").Parse(` +// String returns the string representation +func (s {{ $.ShapeName }}) String() string { + return awsutil.Prettify(s) +} +// GoString returns the string representation +func (s {{ $.ShapeName }}) GoString() string { + return s.String() +} +`)) + +// GoCodeStringers renders the Stringers for API input/output shapes +func (s *Shape) GoCodeStringers() string { + w := bytes.Buffer{} + if err := goCodeStringerTmpl.Execute(&w, s); err != nil { + panic(fmt.Sprintln("Unexpected error executing GoCodeStringers template", err)) + } + + return w.String() +} + +var enumStrip = regexp.MustCompile(`[^a-zA-Z0-9_:\./-]`) +var enumDelims = regexp.MustCompile(`[-_:\./]+`) +var enumCamelCase = regexp.MustCompile(`([a-z])([A-Z])`) + +// EnumName returns the Nth enum in the shapes Enum list +func (s *Shape) EnumName(n int) string { + enum := s.Enum[n] + enum = enumStrip.ReplaceAllLiteralString(enum, "") + enum = enumCamelCase.ReplaceAllString(enum, "$1-$2") + parts := enumDelims.Split(enum, -1) + for i, v := range parts { + v = strings.ToLower(v) + parts[i] = "" + if len(v) > 0 { + parts[i] = strings.ToUpper(v[0:1]) + } + if len(v) > 1 { + parts[i] += v[1:] + } + } + enum = strings.Join(parts, "") + enum = strings.ToUpper(enum[0:1]) + enum[1:] + return enum +} + +// NestedShape returns the shape pointer value for the shape which is nested +// under the current shape. If the shape is not nested nil will be returned. +// +// strucutures, the current shape is returned +// map: the value shape of the map is returned +// list: the element shape of the list is returned +func (s *Shape) NestedShape() *Shape { + var nestedShape *Shape + switch s.Type { + case "structure": + nestedShape = s + case "map": + nestedShape = s.ValueRef.Shape + case "list": + nestedShape = s.MemberRef.Shape + } + + return nestedShape +} + +var structShapeTmpl = func() *template.Template { + shapeTmpl := template.Must( + template.New("structShapeTmpl"). + Funcs(template.FuncMap{ + "GetDeprecatedMsg": getDeprecatedMessage, + "TrimExportedMembers": func(s *Shape) map[string]*ShapeRef { + members := map[string]*ShapeRef{} + for name, ref := range s.MemberRefs { + if ref.Shape.IsEventStream { + continue + } + members[name] = ref + } + return members + }, + }). + Parse(structShapeTmplDef), + ) + + template.Must( + shapeTmpl.AddParseTree( + "eventStreamEventShapeTmpl", eventStreamEventShapeTmpl.Tree), + ) + template.Must( + shapeTmpl.AddParseTree( + "exceptionShapeMethodTmpl", + exceptionShapeMethodTmpl.Tree), + ) + shapeTmpl.Funcs(eventStreamEventShapeTmplFuncs) + + template.Must( + shapeTmpl.AddParseTree( + "hostLabelsShapeTmpl", + hostLabelsShapeTmpl.Tree), + ) + + template.Must( + shapeTmpl.AddParseTree( + "endpointARNShapeTmpl", + endpointARNShapeTmpl.Tree), + ) + + template.Must( + shapeTmpl.AddParseTree( + "outpostIDShapeTmpl", + outpostIDShapeTmpl.Tree), + ) + + template.Must( + shapeTmpl.AddParseTree( + "accountIDWithARNShapeTmpl", + accountIDWithARNShapeTmpl.Tree), + ) + + return shapeTmpl +}() + +const structShapeTmplDef = ` +{{ $.Docstring }} +{{ if $.Deprecated -}} +{{ if $.Docstring -}} +// +{{ end -}} +// Deprecated: {{ GetDeprecatedMsg $.DeprecatedMsg $.ShapeName }} +{{ end -}} +type {{ $.ShapeName }} struct { + _ struct{} {{ $.GoTags true false }} + + {{- if $.Exception }} + {{- $_ := $.API.AddSDKImport "private/protocol" }} + RespMetadata protocol.ResponseMetadata` + "`json:\"-\" xml:\"-\"`" + ` + {{- end }} + + {{- if $.OutputEventStreamAPI }} + + {{ $.OutputEventStreamAPI.OutputMemberName }} *{{ $.OutputEventStreamAPI.Name }} + {{- end }} + + {{- range $name, $elem := (TrimExportedMembers $) }} + + {{ $isBlob := $.WillRefBeBase64Encoded $name -}} + {{ $isRequired := $.IsRequired $name -}} + {{ $doc := $elem.Docstring -}} + + {{ if $doc -}} + {{ $doc }} + {{ if $elem.Deprecated -}} + // + // Deprecated: {{ GetDeprecatedMsg $elem.DeprecatedMsg $name }} + {{ end -}} + {{ end -}} + {{ if $isBlob -}} + {{ if $doc -}} + // + {{ end -}} + // {{ $name }} is automatically base64 encoded/decoded by the SDK. + {{ end -}} + {{ if $isRequired -}} + {{ if or $doc $isBlob -}} + // + {{ end -}} + // {{ $name }} is a required field + {{ end -}} + {{ $name }} {{ $.GoStructType $name $elem }} {{ $elem.GoTags false $isRequired }} + {{- end }} +} + +{{- if not $.API.NoStringerMethods }} + {{ $.GoCodeStringers }} +{{- end }} + +{{- if not (or $.API.NoValidataShapeMethods $.Exception) }} + {{- if $.Validations }} + {{ $.Validations.GoCode $ }} + {{- end }} +{{- end }} + +{{- if not (or $.API.NoGenStructFieldAccessors $.Exception) }} + {{- $builderShapeName := print $.ShapeName }} + + {{- range $name, $elem := (TrimExportedMembers $) }} + // Set{{ $name }} sets the {{ $name }} field's value. + func (s *{{ $builderShapeName }}) Set{{ $name }}(v {{ $.GoStructValueType $name $elem }}) *{{ $builderShapeName }} { + {{- if $elem.UseIndirection }} + s.{{ $name }} = &v + {{- else }} + s.{{ $name }} = v + {{- end }} + return s + } + + {{- if $elem.GenerateGetter }} + + func (s *{{ $builderShapeName }}) get{{ $name }}() (v {{ $.GoStructValueType $name $elem }}) { + {{- if $elem.UseIndirection }} + if s.{{ $name }} == nil { + return v + } + return *s.{{ $name }} + {{- else }} + return s.{{ $name }} + {{- end }} + } + {{- end }} + {{- end }} +{{- end }} + +{{- if $.OutputEventStreamAPI }} + {{- $esMemberName := $.OutputEventStreamAPI.OutputMemberName }} + {{- if $.OutputEventStreamAPI.Legacy }} + func (s *{{ $.ShapeName }}) Set{{ $esMemberName }}(v *{{ $.OutputEventStreamAPI.Name }}) *{{ $.ShapeName }} { + s.{{ $esMemberName }} = v + return s + } + func (s *{{ $.ShapeName }}) Get{{ $esMemberName }}() *{{ $.OutputEventStreamAPI.Name }} { + return s.{{ $esMemberName }} + } + {{- end }} + + // GetStream returns the type to interact with the event stream. + func (s *{{ $.ShapeName }}) GetStream() *{{ $.OutputEventStreamAPI.Name }} { + return s.{{ $esMemberName }} + } +{{- end }} + +{{- if $.EventFor }} + {{ template "eventStreamEventShapeTmpl" $ }} +{{- end }} + +{{- if and $.Exception (or $.API.WithGeneratedTypedErrors $.EventFor) }} + {{ template "exceptionShapeMethodTmpl" $ }} +{{- end }} + +{{- if $.HasHostLabelMembers }} + {{ template "hostLabelsShapeTmpl" $ }} +{{- end }} + +{{- if $.HasEndpointARNMember }} + {{ template "endpointARNShapeTmpl" $ }} +{{- end }} + +{{- if $.HasOutpostIDMember }} + {{ template "outpostIDShapeTmpl" $ }} +{{- end }} + +{{- if $.HasAccountIdMemberWithARN }} + {{ template "accountIDWithARNShapeTmpl" $ }} +{{- end }} + +` + +var exceptionShapeMethodTmpl = template.Must( + template.New("exceptionShapeMethodTmpl").Parse(` +{{- $_ := $.API.AddImport "fmt" }} +{{/* TODO allow service custom input to be used */}} +func newError{{ $.ShapeName }}(v protocol.ResponseMetadata) error { + return &{{ $.ShapeName }}{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *{{ $.ShapeName }}) Code() string { + return "{{ $.ErrorName }}" +} + +// Message returns the exception's message. +func (s *{{ $.ShapeName }}) Message() string { + {{- if index $.MemberRefs "Message_" }} + if s.Message_ != nil { + return *s.Message_ + } + {{ end -}} + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *{{ $.ShapeName }}) OrigErr() error { + return nil +} + +func (s *{{ $.ShapeName }}) Error() string { + {{- if or (and (eq (len $.MemberRefs) 1) (not (index $.MemberRefs "Message_"))) (gt (len $.MemberRefs) 1) }} + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) + {{- else }} + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) + {{- end }} +} + +// Status code returns the HTTP status code for the request's response error. +func (s *{{ $.ShapeName }}) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *{{ $.ShapeName }}) RequestID() string { + return s.RespMetadata.RequestID +} +`)) + +var enumShapeTmpl = template.Must(template.New("EnumShape").Parse(` +{{ $.Docstring }} +const ( + {{ range $index, $elem := $.Enum -}} + {{ $name := index $.EnumConsts $index -}} + // {{ $name }} is a {{ $.ShapeName }} enum value + {{ $name }} = "{{ $elem }}" + + {{ end }} +) + +{{/* Enum iterators use non-idomatic _Values suffix to avoid naming collisions with other generated types, and enum values */}} +// {{ $.ShapeName }}_Values returns all elements of the {{ $.ShapeName }} enum +func {{ $.ShapeName }}_Values() []string { + return []string{ + {{ range $index, $elem := $.Enum -}} + {{ index $.EnumConsts $index }}, + {{ end }} + } +} +`)) + +// GoCode returns the rendered Go code for the Shape. +func (s *Shape) GoCode() string { + w := &bytes.Buffer{} + + switch { + case s.IsEventStream: + if err := renderEventStreamShape(w, s); err != nil { + panic( + fmt.Sprintf( + "failed to generate eventstream API shape, %s, %v", + s.ShapeName, err), + ) + } + case s.Type == "structure": + if err := structShapeTmpl.Execute(w, s); err != nil { + panic( + fmt.Sprintf( + "Failed to generate struct shape %s, %v", + s.ShapeName, err), + ) + } + case s.IsEnum(): + if err := enumShapeTmpl.Execute(w, s); err != nil { + panic( + fmt.Sprintf( + "Failed to generate enum shape %s, %v", + s.ShapeName, err), + ) + } + default: + panic(fmt.Sprintln("Cannot generate toplevel shape for", s.Type)) + } + + return w.String() +} + +// IsEnum returns whether this shape is an enum list +func (s *Shape) IsEnum() bool { + return s.Type == "string" && len(s.Enum) > 0 +} + +// IsRequired returns if member is a required field. Required fields are fields +// marked as required, hostLabels, or location of uri path. +func (s *Shape) IsRequired(member string) bool { + ref, ok := s.MemberRefs[member] + if !ok { + panic(fmt.Sprintf( + "attempted to check required for unknown member, %s.%s", + s.ShapeName, member, + )) + } + if ref.IdempotencyToken || ref.Shape.IdempotencyToken { + return false + } + if ref.Location == "uri" || ref.HostLabel { + return true + } + for _, n := range s.Required { + if n == member { + if ref.Shape.IsEventStream { + return false + } + return true + } + } + return false +} + +// IsInternal returns whether the shape was defined in this package +func (s *Shape) IsInternal() bool { + return s.resolvePkg == "" +} + +// removeRef removes a shape reference from the list of references this +// shape is used in. +func (s *Shape) removeRef(ref *ShapeRef) { + r := s.refs + for i := 0; i < len(r); i++ { + if r[i] == ref { + j := i + 1 + copy(r[i:], r[j:]) + for k, n := len(r)-j+i, len(r); k < n; k++ { + r[k] = nil // free up the end of the list + } // for k + s.refs = r[:len(r)-j+i] + break + } + } +} + +func (s *Shape) WillRefBeBase64Encoded(refName string) bool { + payloadRefName := s.Payload + if payloadRefName == refName { + return false + } + + ref, ok := s.MemberRefs[refName] + if !ok { + panic(fmt.Sprintf("shape %s does not contain %q refName", s.ShapeName, refName)) + } + + return ref.Shape.Type == "blob" +} + +// Clone returns a cloned version of the shape with all references clones. +// +// Does not clone EventStream or Validate related values. +func (s *Shape) Clone(newName string) *Shape { + if s.AliasedShapeName { + panic(fmt.Sprintf("attempted to clone and rename %s, but flagged as aliased", + s.ShapeName)) + } + + n := new(Shape) + *n = *s + + debugLogger.Logln("cloning", s.ShapeName, "to", newName) + + n.MemberRefs = map[string]*ShapeRef{} + for k, r := range s.MemberRefs { + nr := new(ShapeRef) + *nr = *r + nr.Shape.refs = append(nr.Shape.refs, nr) + n.MemberRefs[k] = nr + } + + if n.MemberRef.Shape != nil { + n.MemberRef.Shape.refs = append(n.MemberRef.Shape.refs, &n.MemberRef) + } + if n.KeyRef.Shape != nil { + n.KeyRef.Shape.refs = append(n.KeyRef.Shape.refs, &n.KeyRef) + } + if n.ValueRef.Shape != nil { + n.ValueRef.Shape.refs = append(n.ValueRef.Shape.refs, &n.ValueRef) + } + + n.refs = []*ShapeRef{} + + n.Required = append([]string{}, n.Required...) + n.Enum = append([]string{}, n.Enum...) + n.EnumConsts = append([]string{}, n.EnumConsts...) + + n.API.Shapes[newName] = n + n.ShapeName = newName + n.OrigShapeName = s.OrigShapeName + + return n +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_alias.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_alias.go new file mode 100644 index 00000000000..f5e7ad2318c --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_alias.go @@ -0,0 +1,19 @@ +package api + +var shapeNameAliases = map[string]map[string]string{ + "APIGateway": map[string]string{ + "RequestValidator": "UpdateRequestValidatorOutput", + "VpcLink": "UpdateVpcLinkOutput", + "GatewayResponse": "UpdateGatewayResponseOutput", + }, + "Lambda": map[string]string{ + "Concurrency": "PutFunctionConcurrencyOutput", + }, + "Neptune": map[string]string{ + "DBClusterParameterGroupNameMessage": "ResetDBClusterParameterGroupOutput", + "DBParameterGroupNameMessage": "ResetDBParameterGroupOutput", + }, + "RDS": map[string]string{ + "DBClusterBacktrack": "BacktrackDBClusterOutput", + }, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_validation.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_validation.go new file mode 100644 index 00000000000..5e6ec8bdf94 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_validation.go @@ -0,0 +1,169 @@ +// +build codegen + +package api + +import ( + "bytes" + "fmt" + "text/template" +) + +// A ShapeValidationType is the type of validation that a shape needs +type ShapeValidationType int + +const ( + // ShapeValidationRequired states the shape must be set + ShapeValidationRequired = iota + + // ShapeValidationMinVal states the shape must have at least a number of + // elements, or for numbers a minimum value + ShapeValidationMinVal + + // ShapeValidationNested states the shape has nested values that need + // to be validated + ShapeValidationNested +) + +// A ShapeValidation contains information about a shape and the type of validation +// that is needed +type ShapeValidation struct { + // Name of the shape to be validated + Name string + // Reference to the shape within the context the shape is referenced + Ref *ShapeRef + // Type of validation needed + Type ShapeValidationType +} + +var validationGoCodeTmpls = template.Must( + template.New("validationGoCodeTmpls"). + Funcs(template.FuncMap{ + "getMin": func(ref *ShapeRef) float64 { + if !ref.CanBeEmpty() && ref.Shape.Min <= 0 { + return 1 + } + + return ref.Shape.Min + }, + }). + Parse(` +{{ define "requiredValue" -}} + if s.{{ .Name }} == nil { + invalidParams.Add(request.NewErrParamRequired("{{ .Name }}")) + } +{{- end }} +{{ define "minLen" -}} + {{- $min := getMin .Ref -}} + if s.{{ .Name }} != nil && len(s.{{ .Name }}) < {{ $min }} { + invalidParams.Add(request.NewErrParamMinLen("{{ .Name }}", {{ $min }})) + } +{{- end }} +{{ define "minLenString" -}} + {{- $min := getMin .Ref -}} + if s.{{ .Name }} != nil && len(*s.{{ .Name }}) < {{ $min }} { + invalidParams.Add(request.NewErrParamMinLen("{{ .Name }}", {{ $min }})) + } +{{- end }} +{{ define "minVal" -}} + {{- $min := getMin .Ref -}} + if s.{{ .Name }} != nil && *s.{{ .Name }} < {{ $min }} { + invalidParams.Add(request.NewErrParamMinValue("{{ .Name }}", {{ $min }})) + } +{{- end }} +{{ define "nestedMapList" -}} + if s.{{ .Name }} != nil { + for i, v := range s.{{ .Name }} { + if v == nil { continue } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "{{ .Name }}", i), err.(request.ErrInvalidParams)) + } + } + } +{{- end }} +{{ define "nestedStruct" -}} + if s.{{ .Name }} != nil { + if err := s.{{ .Name }}.Validate(); err != nil { + invalidParams.AddNested("{{ .Name }}", err.(request.ErrInvalidParams)) + } + } +{{- end }} +`)) + +// GoCode returns the generated Go code for the Shape with its validation type. +func (sv ShapeValidation) GoCode() string { + var err error + + w := &bytes.Buffer{} + switch sv.Type { + case ShapeValidationRequired: + err = validationGoCodeTmpls.ExecuteTemplate(w, "requiredValue", sv) + case ShapeValidationMinVal: + switch sv.Ref.Shape.Type { + case "list", "map", "blob": + err = validationGoCodeTmpls.ExecuteTemplate(w, "minLen", sv) + case "string": + err = validationGoCodeTmpls.ExecuteTemplate(w, "minLenString", sv) + case "integer", "long", "float", "double": + err = validationGoCodeTmpls.ExecuteTemplate(w, "minVal", sv) + default: + panic(fmt.Sprintf("ShapeValidation.GoCode, %s's type %s, no min value handling", + sv.Name, sv.Ref.Shape.Type)) + } + case ShapeValidationNested: + switch sv.Ref.Shape.Type { + case "map", "list": + err = validationGoCodeTmpls.ExecuteTemplate(w, "nestedMapList", sv) + default: + err = validationGoCodeTmpls.ExecuteTemplate(w, "nestedStruct", sv) + } + default: + panic(fmt.Sprintf("ShapeValidation.GoCode, %s's type %d, unknown validation type", + sv.Name, sv.Type)) + } + + if err != nil { + panic(fmt.Sprintf("ShapeValidation.GoCode failed, err: %v", err)) + } + + return w.String() +} + +// A ShapeValidations is a collection of shape validations needed nested within +// a parent shape +type ShapeValidations []ShapeValidation + +var validateShapeTmpl = template.Must(template.New("ValidateShape").Parse(` +// Validate inspects the fields of the type to determine if they are valid. +func (s *{{ .Shape.ShapeName }}) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "{{ .Shape.ShapeName }}"} + {{ range $_, $v := .Validations -}} + {{ $v.GoCode }} + {{ end }} + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} +`)) + +// GoCode generates the Go code needed to perform validations for the +// shape and its nested fields. +func (vs ShapeValidations) GoCode(shape *Shape) string { + buf := &bytes.Buffer{} + validateShapeTmpl.Execute(buf, map[string]interface{}{ + "Shape": shape, + "Validations": vs, + }) + return buf.String() +} + +// Has returns true or false if the ShapeValidations already contains the +// the reference and validation type. +func (vs ShapeValidations) Has(ref *ShapeRef, typ ShapeValidationType) bool { + for _, v := range vs { + if v.Ref == ref && v.Type == typ { + return true + } + } + return false +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_value_builder.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_value_builder.go new file mode 100644 index 00000000000..9f5d0f4bbbc --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/shape_value_builder.go @@ -0,0 +1,276 @@ +// +build codegen + +package api + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// ShapeValueBuilder provides the logic to build the nested values for a shape. +// Base64BlobValues is true if the blob field in shapeRef.Shape.Type is base64 +// encoded. +type ShapeValueBuilder struct { + // Specifies if API shapes modeled as blob types, input values are base64 + // encoded or not, and strings values instead. + Base64BlobValues bool + + // The helper that will provide the logic and formated code to convert a + // timestamp input value into a Go time.Time. + ParseTimeString func(ref *ShapeRef, memberName, v string) string +} + +// NewShapeValueBuilder returns an initialized ShapeValueBuilder for generating +// API shape types initialized with values. +func NewShapeValueBuilder() ShapeValueBuilder { + return ShapeValueBuilder{ParseTimeString: parseUnixTimeString} +} + +// BuildShape will recursively build the referenced shape based on the json +// object provided. isMap will dictate how the field name is specified. If +// isMap is true, we will expect the member name to be quotes like "Foo". +func (b ShapeValueBuilder) BuildShape(ref *ShapeRef, shapes map[string]interface{}, isMap bool) string { + order := make([]string, len(shapes)) + for k := range shapes { + order = append(order, k) + } + sort.Strings(order) + + ret := "" + for _, name := range order { + if name == "" { + continue + } + shape := shapes[name] + + // If the shape isn't a map, we want to export the value, since every field + // defined in our shapes are exported. + if len(name) > 0 && !isMap && strings.ToLower(name[0:1]) == name[0:1] { + name = strings.Title(name) + } + + memName := name + passRef := ref.Shape.MemberRefs[name] + if isMap { + memName = fmt.Sprintf("%q", memName) + passRef = &ref.Shape.ValueRef + } + switch v := shape.(type) { + case map[string]interface{}: + ret += b.BuildComplex(name, memName, passRef, ref.Shape, v) + case []interface{}: + ret += b.BuildList(name, memName, passRef, v) + default: + + ret += b.BuildScalar(name, memName, passRef, v, ref.Shape.Payload == name) + } + } + return ret +} + +// BuildList will construct a list shape based off the service's definition of +// that list. +func (b ShapeValueBuilder) BuildList(name, memName string, ref *ShapeRef, v []interface{}) string { + ret := "" + + if len(v) == 0 || ref == nil { + return "" + } + + passRef := &ref.Shape.MemberRef + ret += fmt.Sprintf("%s: %s {\n", memName, b.GoType(ref, false)) + ret += b.buildListElements(passRef, v) + ret += "},\n" + return ret +} + +func (b ShapeValueBuilder) buildListElements(ref *ShapeRef, v []interface{}) string { + if len(v) == 0 || ref == nil { + return "" + } + + ret := "" + format := "" + isComplex := false + isList := false + + // get format for atomic type. If it is not an atomic type, + // get the element. + switch v[0].(type) { + case string: + format = "%s" + case bool: + format = "%t" + case float64: + switch ref.Shape.Type { + case "integer", "int64", "long": + format = "%d" + default: + format = "%f" + } + case []interface{}: + isList = true + case map[string]interface{}: + isComplex = true + } + + for _, elem := range v { + if isComplex { + ret += fmt.Sprintf("{\n%s\n},\n", b.BuildShape(ref, elem.(map[string]interface{}), ref.Shape.Type == "map")) + } else if isList { + ret += fmt.Sprintf("{\n%s\n},\n", b.buildListElements(&ref.Shape.MemberRef, elem.([]interface{}))) + } else { + switch ref.Shape.Type { + case "integer", "int64", "long": + elem = int(elem.(float64)) + } + ret += fmt.Sprintf("%s,\n", getValue(ref.Shape.Type, fmt.Sprintf(format, elem))) + } + } + return ret +} + +// BuildScalar will build atomic Go types. +func (b ShapeValueBuilder) BuildScalar(name, memName string, ref *ShapeRef, shape interface{}, isPayload bool) string { + if ref == nil || ref.Shape == nil { + return "" + } + + switch v := shape.(type) { + case bool: + return convertToCorrectType(memName, ref.Shape.Type, fmt.Sprintf("%t", v)) + case int: + if ref.Shape.Type == "timestamp" { + return b.ParseTimeString(ref, memName, fmt.Sprintf("%d", v)) + } + return convertToCorrectType(memName, ref.Shape.Type, fmt.Sprintf("%d", v)) + case float64: + + dataType := ref.Shape.Type + + if dataType == "timestamp" { + return b.ParseTimeString(ref, memName, fmt.Sprintf("%f", v)) + } + if dataType == "integer" || dataType == "int64" || dataType == "long" { + return convertToCorrectType(memName, ref.Shape.Type, fmt.Sprintf("%d", int(shape.(float64)))) + } + return convertToCorrectType(memName, ref.Shape.Type, fmt.Sprintf("%f", v)) + case string: + t := ref.Shape.Type + switch t { + case "timestamp": + return b.ParseTimeString(ref, memName, fmt.Sprintf("%s", v)) + + case "jsonvalue": + return fmt.Sprintf("%s: %#v,\n", memName, parseJSONString(v)) + + case "blob": + if (ref.Streaming || ref.Shape.Streaming) && isPayload { + return fmt.Sprintf("%s: aws.ReadSeekCloser(strings.NewReader(%q)),\n", memName, v) + } + if b.Base64BlobValues { + decodedBlob, err := base64.StdEncoding.DecodeString(v) + if err != nil { + panic(fmt.Errorf("Failed to decode string: %v", err)) + } + return fmt.Sprintf("%s: []byte(%q),\n", memName, decodedBlob) + } + return fmt.Sprintf("%s: []byte(%q),\n", memName, v) + default: + return convertToCorrectType(memName, t, v) + } + default: + panic(fmt.Errorf("Unsupported scalar type: %v", reflect.TypeOf(v))) + } +} + +// BuildComplex will build the shape's value for complex types such as structs, +// and maps. +func (b ShapeValueBuilder) BuildComplex(name, memName string, ref *ShapeRef, parent *Shape, v map[string]interface{}) string { + switch parent.Type { + case "structure": + if ref.Shape.Type == "map" { + return fmt.Sprintf(`%s: %s{ + %s + }, + `, memName, b.GoType(ref, true), b.BuildShape(ref, v, true)) + } else { + return fmt.Sprintf(`%s: &%s{ + %s + }, + `, memName, b.GoType(ref, true), b.BuildShape(ref, v, false)) + } + case "map": + if ref.Shape.Type == "map" { + return fmt.Sprintf(`%q: %s{ + %s + }, + `, name, b.GoType(ref, false), b.BuildShape(ref, v, true)) + } else { + return fmt.Sprintf(`%s: &%s{ + %s + }, + `, memName, b.GoType(ref, true), b.BuildShape(ref, v, false)) + } + default: + panic(fmt.Sprintf("Expected complex type but received %q", ref.Shape.Type)) + } +} + +// GoType returns the string of the shape's Go type identifier. +func (b ShapeValueBuilder) GoType(ref *ShapeRef, elem bool) string { + + if ref.Shape.Type != "structure" && ref.Shape.Type != "list" && ref.Shape.Type != "map" { + // Scalars are always pointers. + return ref.GoTypeWithPkgName() + } + + prefix := "" + if ref.Shape.Type == "list" { + ref = &ref.Shape.MemberRef + prefix = "[]" + } + + if elem { + return prefix + ref.Shape.GoTypeWithPkgNameElem() + } + return prefix + ref.GoTypeWithPkgName() +} + +// parseJSONString a json string and returns aws.JSONValue. +func parseJSONString(input string) aws.JSONValue { + var v aws.JSONValue + if err := json.Unmarshal([]byte(input), &v); err != nil { + panic(fmt.Sprintf("unable to unmarshal JSONValue, %v", err)) + } + return v +} + +// InlineParseModeledTime returns the string of an inline function which +// returns time. +func inlineParseModeledTime(format, v string) string { + const formatTimeTmpl = `func() *time.Time{ + v, err := protocol.ParseTime("%s", "%s") + if err != nil { + panic(err) + } + return &v + }()` + + return fmt.Sprintf(formatTimeTmpl, format, v) +} + +// parseUnixTimeString returns a string which assigns the value of a time +// member using an inline function Defined inline function parses time in +// UnixTimeFormat. +func parseUnixTimeString(ref *ShapeRef, memName, v string) string { + ref.API.AddSDKImport("private/protocol") + return fmt.Sprintf("%s: %s,\n", memName, inlineParseModeledTime(protocol.UnixTimeFormatName, v)) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/smoke.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/smoke.go new file mode 100644 index 00000000000..fd3ac9f2888 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/smoke.go @@ -0,0 +1,129 @@ +// +build codegen + +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "text/template" +) + +// SmokeTestSuite defines the test suite for smoke tests. +type SmokeTestSuite struct { + Version int `json:"version"` + DefaultRegion string `json:"defaultRegion"` + TestCases []SmokeTestCase `json:"testCases"` +} + +// SmokeTestCase provides the definition for a integration smoke test case. +type SmokeTestCase struct { + OpName string `json:"operationName"` + Input map[string]interface{} `json:"input"` + ExpectErr bool `json:"errorExpectedFromService"` +} + +// BuildInputShape returns the Go code as a string for initializing the test +// case's input shape. +func (c SmokeTestCase) BuildInputShape(ref *ShapeRef) string { + b := NewShapeValueBuilder() + return fmt.Sprintf("&%s{\n%s\n}", + b.GoType(ref, true), + b.BuildShape(ref, c.Input, false), + ) +} + +// AttachSmokeTests attaches the smoke test cases to the API model. +func (a *API) AttachSmokeTests(filename string) error { + f, err := os.Open(filename) + if err != nil { + return fmt.Errorf("failed to open smoke tests %s, err: %v", filename, err) + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&a.SmokeTests); err != nil { + return fmt.Errorf("failed to decode smoke tests %s, err: %v", filename, err) + } + + if v := a.SmokeTests.Version; v != 1 { + return fmt.Errorf("invalid smoke test version, %d", v) + } + + return nil +} + +// APISmokeTestsGoCode returns the Go Code string for the smoke tests. +func (a *API) APISmokeTestsGoCode() string { + w := bytes.NewBuffer(nil) + + a.resetImports() + a.AddImport("context") + a.AddImport("testing") + a.AddImport("time") + a.AddSDKImport("aws") + a.AddSDKImport("aws/request") + a.AddSDKImport("aws/awserr") + a.AddSDKImport("aws/request") + a.AddSDKImport("awstesting/integration") + a.AddImport(a.ImportPath()) + + smokeTests := struct { + API *API + SmokeTestSuite + }{ + API: a, + SmokeTestSuite: a.SmokeTests, + } + + if err := smokeTestTmpl.Execute(w, smokeTests); err != nil { + panic(fmt.Sprintf("failed to create smoke tests, %v", err)) + } + + ignoreImports := ` + var _ aws.Config + var _ awserr.Error + var _ request.Request + ` + + return a.importsGoCode() + ignoreImports + w.String() +} + +var smokeTestTmpl = template.Must(template.New(`smokeTestTmpl`).Parse(` +{{- range $i, $testCase := $.TestCases }} + {{- $op := index $.API.Operations $testCase.OpName }} + func TestInteg_{{ printf "%02d" $i }}_{{ $op.ExportedName }}(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5 *time.Second) + defer cancelFn() + + sess := integration.SessionWithDefaultRegion("{{ $.DefaultRegion }}") + svc := {{ $.API.PackageName }}.New(sess) + params := {{ $testCase.BuildInputShape $op.InputRef }} + _, err := svc.{{ $op.ExportedName }}WithContext(ctx, params, func(r *request.Request) { + r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") + }) + {{- if $testCase.ExpectErr }} + if err == nil { + t.Fatalf("expect request to fail") + } + aerr, ok := err.(awserr.RequestFailure) + if !ok { + t.Fatalf("expect awserr, was %T", err) + } + if len(aerr.Code()) == 0 { + t.Errorf("expect non-empty error code") + } + if len(aerr.Message()) == 0 { + t.Errorf("expect non-empty error message") + } + if v := aerr.Code(); v == request.ErrCodeSerialization { + t.Errorf("expect API error code got serialization failure") + } + {{- else }} + if err != nil { + t.Errorf("expect no error, got %v", err) + } + {{- end }} + } +{{- end }} +`)) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/waiters.go b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/waiters.go new file mode 100644 index 00000000000..d5d8826e49f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/model/api/waiters.go @@ -0,0 +1,192 @@ +// +build codegen + +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "sort" + "strings" + "text/template" +) + +// WaiterAcceptor is the acceptors defined in the model the SDK will use +// to wait on resource states with. +type WaiterAcceptor struct { + State string + Matcher string + Argument string + Expected interface{} +} + +// ExpectedString returns the string that was expected by the WaiterAcceptor +func (a *WaiterAcceptor) ExpectedString() string { + switch a.Expected.(type) { + case string: + return fmt.Sprintf("%q", a.Expected) + default: + return fmt.Sprintf("%v", a.Expected) + } +} + +// A Waiter is an individual waiter definition. +type Waiter struct { + Name string + Delay int + MaxAttempts int + OperationName string `json:"operation"` + Operation *Operation + Acceptors []WaiterAcceptor +} + +// WaitersGoCode generates and returns Go code for each of the waiters of +// this API. +func (a *API) WaitersGoCode() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "import (\n%q\n\n%q\n%q\n)", + "time", + SDKImportRoot+"/aws", + SDKImportRoot+"/aws/request", + ) + + for _, w := range a.Waiters { + buf.WriteString(w.GoCode()) + } + return buf.String() +} + +// used for unmarshaling from the waiter JSON file +type waiterDefinitions struct { + *API + Waiters map[string]Waiter +} + +// AttachWaiters reads a file of waiter definitions, and adds those to the API. +// Will panic if an error occurs. +func (a *API) AttachWaiters(filename string) error { + p := waiterDefinitions{API: a} + + f, err := os.Open(filename) + defer f.Close() + if err != nil { + return err + } + err = json.NewDecoder(f).Decode(&p) + if err != nil { + return err + } + + return p.setup() +} + +func (p *waiterDefinitions) setup() error { + p.API.Waiters = []Waiter{} + i, keys := 0, make([]string, len(p.Waiters)) + for k := range p.Waiters { + keys[i] = k + i++ + } + sort.Strings(keys) + + for _, n := range keys { + e := p.Waiters[n] + n = p.ExportableName(n) + e.Name = n + e.OperationName = p.ExportableName(e.OperationName) + e.Operation = p.API.Operations[e.OperationName] + if e.Operation == nil { + return fmt.Errorf("unknown operation %s for waiter %s", + e.OperationName, n) + } + p.API.Waiters = append(p.API.Waiters, e) + } + + return nil +} + +var waiterTmpls = template.Must(template.New("waiterTmpls").Funcs( + template.FuncMap{ + "titleCase": func(v string) string { + return strings.Title(v) + }, + }, +).Parse(` +{{ define "waiter"}} +// WaitUntil{{ .Name }} uses the {{ .Operation.API.NiceName }} API operation +// {{ .OperationName }} to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *{{ .Operation.API.StructName }}) WaitUntil{{ .Name }}(input {{ .Operation.InputRef.GoType }}) error { + return c.WaitUntil{{ .Name }}WithContext(aws.BackgroundContext(), input) +} + +// WaitUntil{{ .Name }}WithContext is an extended version of WaitUntil{{ .Name }}. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *{{ .Operation.API.StructName }}) WaitUntil{{ .Name }}WithContext(` + + `ctx aws.Context, input {{ .Operation.InputRef.GoType }}, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntil{{ .Name }}", + MaxAttempts: {{ .MaxAttempts }}, + Delay: request.ConstantWaiterDelay({{ .Delay }} * time.Second), + Acceptors: []request.WaiterAcceptor{ + {{ range $_, $a := .Acceptors }}{ + State: request.{{ titleCase .State }}WaiterState, + Matcher: request.{{ titleCase .Matcher }}WaiterMatch, + {{- if .Argument }}Argument: "{{ .Argument }}",{{ end }} + Expected: {{ .ExpectedString }}, + }, + {{ end }} + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy {{ .Operation.InputRef.GoType }} + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.{{ .OperationName }}Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} +{{- end }} + +{{ define "waiter interface" }} +WaitUntil{{ .Name }}({{ .Operation.InputRef.GoTypeWithPkgName }}) error +WaitUntil{{ .Name }}WithContext(aws.Context, {{ .Operation.InputRef.GoTypeWithPkgName }}, ...request.WaiterOption) error +{{- end }} +`)) + +// InterfaceSignature returns a string representing the Waiter's interface +// function signature. +func (w *Waiter) InterfaceSignature() string { + var buf bytes.Buffer + if err := waiterTmpls.ExecuteTemplate(&buf, "waiter interface", w); err != nil { + panic(err) + } + + return strings.TrimSpace(buf.String()) +} + +// GoCode returns the generated Go code for an individual waiter. +func (w *Waiter) GoCode() string { + var buf bytes.Buffer + if err := waiterTmpls.ExecuteTemplate(&buf, "waiter", w); err != nil { + panic(err) + } + + return buf.String() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go new file mode 100644 index 00000000000..5d500be27f3 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -0,0 +1,36 @@ +// Package ec2query provides serialization of AWS EC2 requests and responses. +package ec2query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/ec2.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building ec2query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.ec2query.Build", Fn: Build} + +// Build builds a request for the EC2 protocol. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, true); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed encoding EC2 Query request", err) + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go new file mode 100644 index 00000000000..c42b04a8d55 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go @@ -0,0 +1,77 @@ +package ec2query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/ec2.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError} + +// Unmarshal unmarshals a response body for the EC2 protocol. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed decoding EC2 Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals response headers for the EC2 protocol. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Code string `xml:"Errors>Error>Code"` + Message string `xml:"Errors>Error>Message"` + RequestID string `xml:"RequestID"` +} + +// UnmarshalError unmarshals a response error for the EC2 protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + respErr.RequestID, + ) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 00000000000..151054971a5 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + *hs = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 00000000000..47433939189 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,216 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ + r: r, + } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 00000000000..ffade3bc0c8 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,162 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + logger aws.Logger + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) (err error) { + e.headersBuf.Reset() + + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(writer, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err = hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 00000000000..5481ef30796 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 00000000000..34c2e89d539 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,77 @@ +package eventstreamapi + +import ( + "fmt" + "sync" +) + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go new file mode 100644 index 00000000000..0e4aa42f3e4 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -0,0 +1,173 @@ +package eventstreamapi + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + decoder *eventstream.Decoder, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + decoder: decoder, + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + return nil, r.unmarshalEventException(msg) + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, &UnknownMessageTypeError{ + Type: typ, Message: msg.Clone(), + } + } +} + +// UnknownMessageTypeError provides an error when a message is received from +// the stream, but the reader is unable to determine what kind of message it is. +type UnknownMessageTypeError struct { + Type string + Message eventstream.Message +} + +func (e *UnknownMessageTypeError) Error() string { + return "unknown eventstream message type, " + e.Type +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 00000000000..e46b8acc200 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 00000000000..3a7ba5cd57a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 00000000000..433bb1630a7 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 00000000000..10a3823dfa6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,109 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} + +//type EventEncoder struct { +// encoder Encoder +// ppayloadMarshaler protocol.PayloadMarshaler +// eventTypeFor func(Marshaler) (string, error) +//} +// +//func (e EventEncoder) Encode(event Marshaler) error { +// msg, err := e.marshal(event) +// if err != nil { +// return err +// } +// +// return w.encoder.Encode(msg) +//} +// +//func (e EventEncoder) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// +//func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 00000000000..f6f8c5674ed --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,175 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 00000000000..9f509d8f6dc --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,506 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 00000000000..f7427da039e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,117 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := EncodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 00000000000..d7d42db0a6a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 00000000000..915b0fcafd7 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 00000000000..53831dff984 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000000..864fb6704b4 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000000..8b2c9bbeba0 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,304 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/big" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var millisecondsFloat = new(big.Float).SetInt64(1e3) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case json.Number: + switch value.Interface().(type) { + case *int64: + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) + value.Set(reflect.ValueOf(&di)) + case *float64: + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) + case *time.Time: + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 00000000000..a029217e4c6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,88 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 00000000000..c0c52e2db0f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,107 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 00000000000..776d1101843 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 00000000000..0ea0647a57d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "PUT"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 00000000000..9d521dcb950 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 00000000000..d40346a7790 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 00000000000..75866d01218 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 00000000000..9231e95d160 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 00000000000..831b0110c54 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,69 @@ +package query + +import ( + "encoding/xml" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 00000000000..1301b149d35 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,310 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 00000000000..4366de2e1e8 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 00000000000..92f8b4d9a48 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,257 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } + } +} + +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + payload.Set(reflect.ValueOf(b)) + + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } + + return nil +} + +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, resp.StatusCode) + + case "header": + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) + if err != nil { + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + } + } + } + + return nil +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 00000000000..b1ae3648719 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,79 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, + r.RequestID, + ) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 00000000000..98f4caed91c --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,85 @@ +package protocol + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time with fractional second precision up to milliseconds + ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC().Truncate(time.Millisecond) + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 00000000000..f614ef898be --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,27 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 00000000000..cc857f136c5 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 00000000000..09ad951595e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,315 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 00000000000..c1a511851f6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 00000000000..107c053f8ac --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,299 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 00000000000..42f71648eee --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,159 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/util/sort_keys.go b/agent/vendor/github.com/aws/aws-sdk-go/private/util/sort_keys.go new file mode 100644 index 00000000000..48000565c86 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/util/sort_keys.go @@ -0,0 +1,14 @@ +package util + +import "sort" + +// SortedKeys returns a sorted slice of keys of a map. +func SortedKeys(m map[string]interface{}) []string { + i, sorted := 0, make([]string, len(m)) + for k := range m { + sorted[i] = k + i++ + } + sort.Strings(sorted) + return sorted +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/util/util.go b/agent/vendor/github.com/aws/aws-sdk-go/private/util/util.go new file mode 100644 index 00000000000..5f2dab25e20 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/private/util/util.go @@ -0,0 +1,109 @@ +package util + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "reflect" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// GoFmt returns the Go formated string of the input. +// +// Panics if the format fails. +func GoFmt(buf string) string { + formatted, err := format.Source([]byte(buf)) + if err != nil { + panic(fmt.Errorf("%s\nOriginal code:\n%s", err.Error(), buf)) + } + return string(formatted) +} + +var reTrim = regexp.MustCompile(`\s{2,}`) + +// Trim removes all leading and trailing white space. +// +// All consecutive spaces will be reduced to a single space. +func Trim(s string) string { + return strings.TrimSpace(reTrim.ReplaceAllString(s, " ")) +} + +// Capitalize capitalizes the first character of the string. +func Capitalize(s string) string { + if len(s) == 1 { + return strings.ToUpper(s) + } + return strings.ToUpper(s[0:1]) + s[1:] +} + +// SortXML sorts the reader's XML elements +func SortXML(r io.Reader) string { + var buf bytes.Buffer + d := xml.NewDecoder(r) + root, _ := xmlutil.XMLToStruct(d, nil) + e := xml.NewEncoder(&buf) + xmlutil.StructToXML(e, root, true) + return buf.String() +} + +// PrettyPrint generates a human readable representation of the value v. +// All values of v are recursively found and pretty printed also. +func PrettyPrint(v interface{}) string { + value := reflect.ValueOf(v) + switch value.Kind() { + case reflect.Struct: + str := fullName(value.Type()) + "{\n" + for i := 0; i < value.NumField(); i++ { + l := string(value.Type().Field(i).Name[0]) + if strings.ToUpper(l) == l { + str += value.Type().Field(i).Name + ": " + str += PrettyPrint(value.Field(i).Interface()) + str += ",\n" + } + } + str += "}" + return str + case reflect.Map: + str := "map[" + fullName(value.Type().Key()) + "]" + fullName(value.Type().Elem()) + "{\n" + for _, k := range value.MapKeys() { + str += "\"" + k.String() + "\": " + str += PrettyPrint(value.MapIndex(k).Interface()) + str += ",\n" + } + str += "}" + return str + case reflect.Ptr: + if e := value.Elem(); e.IsValid() { + return "&" + PrettyPrint(e.Interface()) + } + return "nil" + case reflect.Slice: + str := "[]" + fullName(value.Type().Elem()) + "{\n" + for i := 0; i < value.Len(); i++ { + str += PrettyPrint(value.Index(i).Interface()) + str += ",\n" + } + str += "}" + return str + default: + return fmt.Sprintf("%#v", v) + } +} + +func pkgName(t reflect.Type) string { + pkg := t.PkgPath() + c := strings.Split(pkg, "/") + return c[len(c)-1] +} + +func fullName(t reflect.Type) string { + if pkg := pkgName(t); pkg != "" { + return pkg + "." + t.Name() + } + return t.Name() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go new file mode 100644 index 00000000000..5deb417314b --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go @@ -0,0 +1,10213 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAssociateKmsKey = "AssociateKmsKey" + +// AssociateKmsKeyRequest generates a "aws/request.Request" representing the +// client's request for the AssociateKmsKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateKmsKey for more information on using the AssociateKmsKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateKmsKeyRequest method. +// req, resp := client.AssociateKmsKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/AssociateKmsKey +func (c *CloudWatchLogs) AssociateKmsKeyRequest(input *AssociateKmsKeyInput) (req *request.Request, output *AssociateKmsKeyOutput) { + op := &request.Operation{ + Name: opAssociateKmsKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateKmsKeyInput{} + } + + output = &AssociateKmsKeyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AssociateKmsKey API operation for Amazon CloudWatch Logs. +// +// Associates the specified AWS Key Management Service (AWS KMS) customer master +// key (CMK) with the specified log group. +// +// Associating an AWS KMS CMK with a log group overrides any existing associations +// between the log group and a CMK. After a CMK is associated with a log group, +// all newly ingested data for the log group is encrypted using the CMK. This +// association is stored as long as the data encrypted with the CMK is still +// within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt +// this data whenever it is requested. +// +// CloudWatch Logs supports only symmetric CMKs. Do not use an associate an +// asymmetric CMK with your log group. For more information, see Using Symmetric +// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). +// +// It can take up to 5 minutes for this operation to take effect. +// +// If you attempt to associate a CMK with a log group but the CMK does not exist +// or the CMK is disabled, you receive an InvalidParameterException error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation AssociateKmsKey for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/AssociateKmsKey +func (c *CloudWatchLogs) AssociateKmsKey(input *AssociateKmsKeyInput) (*AssociateKmsKeyOutput, error) { + req, out := c.AssociateKmsKeyRequest(input) + return out, req.Send() +} + +// AssociateKmsKeyWithContext is the same as AssociateKmsKey with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateKmsKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) AssociateKmsKeyWithContext(ctx aws.Context, input *AssociateKmsKeyInput, opts ...request.Option) (*AssociateKmsKeyOutput, error) { + req, out := c.AssociateKmsKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelExportTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelExportTask for more information on using the CancelExportTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelExportTaskRequest method. +// req, resp := client.CancelExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CancelExportTask +func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + output = &CancelExportTaskOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CancelExportTask API operation for Amazon CloudWatch Logs. +// +// Cancels the specified export task. +// +// The task must be in the PENDING or RUNNING state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CancelExportTask for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * InvalidOperationException +// The operation is not valid on the specified resource. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CancelExportTask +func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + return out, req.Send() +} + +// CancelExportTaskWithContext is the same as CancelExportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelExportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CancelExportTaskWithContext(ctx aws.Context, input *CancelExportTaskInput, opts ...request.Option) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateExportTask = "CreateExportTask" + +// CreateExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateExportTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateExportTask for more information on using the CreateExportTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateExportTaskRequest method. +// req, resp := client.CreateExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateExportTask +func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { + op := &request.Operation{ + Name: opCreateExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateExportTaskInput{} + } + + output = &CreateExportTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateExportTask API operation for Amazon CloudWatch Logs. +// +// Creates an export task, which allows you to efficiently export data from +// a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, +// you must use credentials that have permission to write to the S3 bucket that +// you specify as the destination. +// +// This is an asynchronous call. If all the required information is provided, +// this operation initiates an export task and responds with the ID of the task. +// After the task has started, you can use DescribeExportTasks (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeExportTasks.html) +// to get the status of the export task. Each account can only have one active +// (RUNNING or PENDING) export task at a time. To cancel an export task, use +// CancelExportTask (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CancelExportTask.html). +// +// You can export logs from multiple log groups or multiple time ranges to the +// same S3 bucket. To separate out log data for each export task, you can specify +// a prefix to be used as the Amazon S3 key prefix for all exported objects. +// +// Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting +// to S3 buckets encrypted with SSE-KMS is not supported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateExportTask for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ResourceAlreadyExistsException +// The specified resource already exists. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateExportTask +func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { + req, out := c.CreateExportTaskRequest(input) + return out, req.Send() +} + +// CreateExportTaskWithContext is the same as CreateExportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CreateExportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CreateExportTaskWithContext(ctx aws.Context, input *CreateExportTaskInput, opts ...request.Option) (*CreateExportTaskOutput, error) { + req, out := c.CreateExportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLogGroup = "CreateLogGroup" + +// CreateLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLogGroup for more information on using the CreateLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLogGroupRequest method. +// req, resp := client.CreateLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogGroup +func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { + op := &request.Operation{ + Name: opCreateLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogGroupInput{} + } + + output = &CreateLogGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateLogGroup API operation for Amazon CloudWatch Logs. +// +// Creates a log group with the specified name. You can create up to 20,000 +// log groups per account. +// +// You must use the following guidelines when naming a log group: +// +// * Log group names must be unique within a region for an AWS account. +// +// * Log group names can be between 1 and 512 characters long. +// +// * Log group names consist of the following characters: a-z, A-Z, 0-9, +// '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and +// '#' (number sign) +// +// When you create a log group, by default the log events in the log group never +// expire. To set a retention policy so that events expire and are deleted after +// a specified time, use PutRetentionPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html). +// +// If you associate a AWS Key Management Service (AWS KMS) customer master key +// (CMK) with the log group, ingested data is encrypted using the CMK. This +// association is stored as long as the data encrypted with the CMK is still +// within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt +// this data whenever it is requested. +// +// If you attempt to associate a CMK with the log group but the CMK does not +// exist or the CMK is disabled, you receive an InvalidParameterException error. +// +// CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric +// CMK with your log group. For more information, see Using Symmetric and Asymmetric +// Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateLogGroup for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceAlreadyExistsException +// The specified resource already exists. +// +// * LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogGroup +func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { + req, out := c.CreateLogGroupRequest(input) + return out, req.Send() +} + +// CreateLogGroupWithContext is the same as CreateLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CreateLogGroupWithContext(ctx aws.Context, input *CreateLogGroupInput, opts ...request.Option) (*CreateLogGroupOutput, error) { + req, out := c.CreateLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLogStream = "CreateLogStream" + +// CreateLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLogStream for more information on using the CreateLogStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLogStreamRequest method. +// req, resp := client.CreateLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogStream +func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { + op := &request.Operation{ + Name: opCreateLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLogStreamInput{} + } + + output = &CreateLogStreamOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateLogStream API operation for Amazon CloudWatch Logs. +// +// Creates a log stream for the specified log group. A log stream is a sequence +// of log events that originate from a single source, such as an application +// instance or a resource that is being monitored. +// +// There is no limit on the number of log streams that you can create for a +// log group. There is a limit of 50 TPS on CreateLogStream operations, after +// which transactions are throttled. +// +// You must use the following guidelines when naming a log stream: +// +// * Log stream names must be unique within the log group. +// +// * Log stream names can be between 1 and 512 characters long. +// +// * The ':' (colon) and '*' (asterisk) characters are not allowed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation CreateLogStream for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceAlreadyExistsException +// The specified resource already exists. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/CreateLogStream +func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { + req, out := c.CreateLogStreamRequest(input) + return out, req.Send() +} + +// CreateLogStreamWithContext is the same as CreateLogStream with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLogStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) CreateLogStreamWithContext(ctx aws.Context, input *CreateLogStreamInput, opts ...request.Option) (*CreateLogStreamOutput, error) { + req, out := c.CreateLogStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDestination = "DeleteDestination" + +// DeleteDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDestination for more information on using the DeleteDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDestinationRequest method. +// req, resp := client.DeleteDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDestination +func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { + op := &request.Operation{ + Name: opDeleteDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDestinationInput{} + } + + output = &DeleteDestinationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDestination API operation for Amazon CloudWatch Logs. +// +// Deletes the specified destination, and eventually disables all the subscription +// filters that publish to it. This operation does not delete the physical resource +// encapsulated by the destination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteDestination for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteDestination +func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { + req, out := c.DeleteDestinationRequest(input) + return out, req.Send() +} + +// DeleteDestinationWithContext is the same as DeleteDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteDestinationWithContext(ctx aws.Context, input *DeleteDestinationInput, opts ...request.Option) (*DeleteDestinationOutput, error) { + req, out := c.DeleteDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLogGroup = "DeleteLogGroup" + +// DeleteLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLogGroup for more information on using the DeleteLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLogGroupRequest method. +// req, resp := client.DeleteLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogGroup +func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { + op := &request.Operation{ + Name: opDeleteLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogGroupInput{} + } + + output = &DeleteLogGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLogGroup API operation for Amazon CloudWatch Logs. +// +// Deletes the specified log group and permanently deletes all the archived +// log events associated with the log group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteLogGroup for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogGroup +func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { + req, out := c.DeleteLogGroupRequest(input) + return out, req.Send() +} + +// DeleteLogGroupWithContext is the same as DeleteLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteLogGroupWithContext(ctx aws.Context, input *DeleteLogGroupInput, opts ...request.Option) (*DeleteLogGroupOutput, error) { + req, out := c.DeleteLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLogStream = "DeleteLogStream" + +// DeleteLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLogStream for more information on using the DeleteLogStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLogStreamRequest method. +// req, resp := client.DeleteLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogStream +func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { + op := &request.Operation{ + Name: opDeleteLogStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLogStreamInput{} + } + + output = &DeleteLogStreamOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLogStream API operation for Amazon CloudWatch Logs. +// +// Deletes the specified log stream and permanently deletes all the archived +// log events associated with the log stream. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteLogStream for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteLogStream +func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { + req, out := c.DeleteLogStreamRequest(input) + return out, req.Send() +} + +// DeleteLogStreamWithContext is the same as DeleteLogStream with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLogStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteLogStreamWithContext(ctx aws.Context, input *DeleteLogStreamInput, opts ...request.Option) (*DeleteLogStreamOutput, error) { + req, out := c.DeleteLogStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteMetricFilter = "DeleteMetricFilter" + +// DeleteMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMetricFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteMetricFilter for more information on using the DeleteMetricFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteMetricFilterRequest method. +// req, resp := client.DeleteMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteMetricFilter +func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { + op := &request.Operation{ + Name: opDeleteMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMetricFilterInput{} + } + + output = &DeleteMetricFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteMetricFilter API operation for Amazon CloudWatch Logs. +// +// Deletes the specified metric filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteMetricFilter for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteMetricFilter +func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { + req, out := c.DeleteMetricFilterRequest(input) + return out, req.Send() +} + +// DeleteMetricFilterWithContext is the same as DeleteMetricFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteMetricFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteMetricFilterWithContext(ctx aws.Context, input *DeleteMetricFilterInput, opts ...request.Option) (*DeleteMetricFilterOutput, error) { + req, out := c.DeleteMetricFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteQueryDefinition = "DeleteQueryDefinition" + +// DeleteQueryDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteQueryDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteQueryDefinition for more information on using the DeleteQueryDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteQueryDefinitionRequest method. +// req, resp := client.DeleteQueryDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteQueryDefinition +func (c *CloudWatchLogs) DeleteQueryDefinitionRequest(input *DeleteQueryDefinitionInput) (req *request.Request, output *DeleteQueryDefinitionOutput) { + op := &request.Operation{ + Name: opDeleteQueryDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteQueryDefinitionInput{} + } + + output = &DeleteQueryDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteQueryDefinition API operation for Amazon CloudWatch Logs. +// +// Deletes a saved CloudWatch Logs Insights query definition. A query definition +// contains details about a saved CloudWatch Logs Insights query. +// +// Each DeleteQueryDefinition operation can delete one query definition. +// +// You must have the logs:DeleteQueryDefinition permission to be able to perform +// this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteQueryDefinition for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteQueryDefinition +func (c *CloudWatchLogs) DeleteQueryDefinition(input *DeleteQueryDefinitionInput) (*DeleteQueryDefinitionOutput, error) { + req, out := c.DeleteQueryDefinitionRequest(input) + return out, req.Send() +} + +// DeleteQueryDefinitionWithContext is the same as DeleteQueryDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteQueryDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteQueryDefinitionWithContext(ctx aws.Context, input *DeleteQueryDefinitionInput, opts ...request.Option) (*DeleteQueryDefinitionOutput, error) { + req, out := c.DeleteQueryDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteResourcePolicy = "DeleteResourcePolicy" + +// DeleteResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteResourcePolicy for more information on using the DeleteResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteResourcePolicyRequest method. +// req, resp := client.DeleteResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteResourcePolicy +func (c *CloudWatchLogs) DeleteResourcePolicyRequest(input *DeleteResourcePolicyInput) (req *request.Request, output *DeleteResourcePolicyOutput) { + op := &request.Operation{ + Name: opDeleteResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteResourcePolicyInput{} + } + + output = &DeleteResourcePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteResourcePolicy API operation for Amazon CloudWatch Logs. +// +// Deletes a resource policy from this account. This revokes the access of the +// identities in that policy to put log events to this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteResourcePolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteResourcePolicy +func (c *CloudWatchLogs) DeleteResourcePolicy(input *DeleteResourcePolicyInput) (*DeleteResourcePolicyOutput, error) { + req, out := c.DeleteResourcePolicyRequest(input) + return out, req.Send() +} + +// DeleteResourcePolicyWithContext is the same as DeleteResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteResourcePolicyWithContext(ctx aws.Context, input *DeleteResourcePolicyInput, opts ...request.Option) (*DeleteResourcePolicyOutput, error) { + req, out := c.DeleteResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRetentionPolicy = "DeleteRetentionPolicy" + +// DeleteRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRetentionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRetentionPolicy for more information on using the DeleteRetentionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRetentionPolicyRequest method. +// req, resp := client.DeleteRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteRetentionPolicy +func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRetentionPolicyInput{} + } + + output = &DeleteRetentionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRetentionPolicy API operation for Amazon CloudWatch Logs. +// +// Deletes the specified retention policy. +// +// Log events do not expire if they belong to log groups without a retention +// policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteRetentionPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteRetentionPolicy +func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + return out, req.Send() +} + +// DeleteRetentionPolicyWithContext is the same as DeleteRetentionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRetentionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteRetentionPolicyWithContext(ctx aws.Context, input *DeleteRetentionPolicyInput, opts ...request.Option) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" + +// DeleteSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubscriptionFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSubscriptionFilter for more information on using the DeleteSubscriptionFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSubscriptionFilterRequest method. +// req, resp := client.DeleteSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteSubscriptionFilter +func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opDeleteSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubscriptionFilterInput{} + } + + output = &DeleteSubscriptionFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSubscriptionFilter API operation for Amazon CloudWatch Logs. +// +// Deletes the specified subscription filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteSubscriptionFilter for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteSubscriptionFilter +func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { + req, out := c.DeleteSubscriptionFilterRequest(input) + return out, req.Send() +} + +// DeleteSubscriptionFilterWithContext is the same as DeleteSubscriptionFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSubscriptionFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteSubscriptionFilterWithContext(ctx aws.Context, input *DeleteSubscriptionFilterInput, opts ...request.Option) (*DeleteSubscriptionFilterOutput, error) { + req, out := c.DeleteSubscriptionFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDestinations = "DescribeDestinations" + +// DescribeDestinationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDestinations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDestinations for more information on using the DescribeDestinations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDestinationsRequest method. +// req, resp := client.DescribeDestinationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDestinations +func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { + op := &request.Operation{ + Name: opDescribeDestinations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDestinationsInput{} + } + + output = &DescribeDestinationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDestinations API operation for Amazon CloudWatch Logs. +// +// Lists all your destinations. The results are ASCII-sorted by destination +// name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeDestinations for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeDestinations +func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { + req, out := c.DescribeDestinationsRequest(input) + return out, req.Send() +} + +// DescribeDestinationsWithContext is the same as DescribeDestinations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDestinations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDestinationsWithContext(ctx aws.Context, input *DescribeDestinationsInput, opts ...request.Option) (*DescribeDestinationsOutput, error) { + req, out := c.DescribeDestinationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDestinationsPages iterates over the pages of a DescribeDestinations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDestinations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDestinations operation. +// pageNum := 0 +// err := client.DescribeDestinationsPages(params, +// func(page *cloudwatchlogs.DescribeDestinationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(*DescribeDestinationsOutput, bool) bool) error { + return c.DescribeDestinationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDestinationsPagesWithContext same as DescribeDestinationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeDestinationsPagesWithContext(ctx aws.Context, input *DescribeDestinationsInput, fn func(*DescribeDestinationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDestinationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDestinationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDestinationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExportTasks for more information on using the DescribeExportTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportTasksRequest method. +// req, resp := client.DescribeExportTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeExportTasks +func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + output = &DescribeExportTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExportTasks API operation for Amazon CloudWatch Logs. +// +// Lists the specified export tasks. You can list all your export tasks or filter +// the results based on task ID or task status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeExportTasks for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeExportTasks +func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + return out, req.Send() +} + +// DescribeExportTasksWithContext is the same as DescribeExportTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeExportTasksWithContext(ctx aws.Context, input *DescribeExportTasksInput, opts ...request.Option) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLogGroups = "DescribeLogGroups" + +// DescribeLogGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLogGroups for more information on using the DescribeLogGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLogGroupsRequest method. +// req, resp := client.DescribeLogGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogGroups +func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { + op := &request.Operation{ + Name: opDescribeLogGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogGroupsInput{} + } + + output = &DescribeLogGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLogGroups API operation for Amazon CloudWatch Logs. +// +// Lists the specified log groups. You can list all your log groups or filter +// the results by prefix. The results are ASCII-sorted by log group name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeLogGroups for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogGroups +func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { + req, out := c.DescribeLogGroupsRequest(input) + return out, req.Send() +} + +// DescribeLogGroupsWithContext is the same as DescribeLogGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLogGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeLogGroupsWithContext(ctx aws.Context, input *DescribeLogGroupsInput, opts ...request.Option) (*DescribeLogGroupsOutput, error) { + req, out := c.DescribeLogGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLogGroupsPages iterates over the pages of a DescribeLogGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogGroups operation. +// pageNum := 0 +// err := client.DescribeLogGroupsPages(params, +// func(page *cloudwatchlogs.DescribeLogGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(*DescribeLogGroupsOutput, bool) bool) error { + return c.DescribeLogGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLogGroupsPagesWithContext same as DescribeLogGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeLogGroupsPagesWithContext(ctx aws.Context, input *DescribeLogGroupsInput, fn func(*DescribeLogGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLogGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLogGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLogGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLogStreams = "DescribeLogStreams" + +// DescribeLogStreamsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogStreams operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLogStreams for more information on using the DescribeLogStreams +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLogStreamsRequest method. +// req, resp := client.DescribeLogStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogStreams +func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { + op := &request.Operation{ + Name: opDescribeLogStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLogStreamsInput{} + } + + output = &DescribeLogStreamsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLogStreams API operation for Amazon CloudWatch Logs. +// +// Lists the log streams for the specified log group. You can list all the log +// streams or filter the results by prefix. You can also control how the results +// are ordered. +// +// This operation has a limit of five transactions per second, after which transactions +// are throttled. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeLogStreams for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeLogStreams +func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { + req, out := c.DescribeLogStreamsRequest(input) + return out, req.Send() +} + +// DescribeLogStreamsWithContext is the same as DescribeLogStreams with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLogStreams for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeLogStreamsWithContext(ctx aws.Context, input *DescribeLogStreamsInput, opts ...request.Option) (*DescribeLogStreamsOutput, error) { + req, out := c.DescribeLogStreamsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLogStreamsPages iterates over the pages of a DescribeLogStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogStreams operation. +// pageNum := 0 +// err := client.DescribeLogStreamsPages(params, +// func(page *cloudwatchlogs.DescribeLogStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(*DescribeLogStreamsOutput, bool) bool) error { + return c.DescribeLogStreamsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLogStreamsPagesWithContext same as DescribeLogStreamsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeLogStreamsPagesWithContext(ctx aws.Context, input *DescribeLogStreamsInput, fn func(*DescribeLogStreamsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLogStreamsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLogStreamsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLogStreamsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMetricFilters = "DescribeMetricFilters" + +// DescribeMetricFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMetricFilters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMetricFilters for more information on using the DescribeMetricFilters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMetricFiltersRequest method. +// req, resp := client.DescribeMetricFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeMetricFilters +func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { + op := &request.Operation{ + Name: opDescribeMetricFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMetricFiltersInput{} + } + + output = &DescribeMetricFiltersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMetricFilters API operation for Amazon CloudWatch Logs. +// +// Lists the specified metric filters. You can list all of the metric filters +// or filter the results by log name, prefix, metric name, or metric namespace. +// The results are ASCII-sorted by filter name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeMetricFilters for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeMetricFilters +func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { + req, out := c.DescribeMetricFiltersRequest(input) + return out, req.Send() +} + +// DescribeMetricFiltersWithContext is the same as DescribeMetricFilters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMetricFilters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeMetricFiltersWithContext(ctx aws.Context, input *DescribeMetricFiltersInput, opts ...request.Option) (*DescribeMetricFiltersOutput, error) { + req, out := c.DescribeMetricFiltersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMetricFiltersPages iterates over the pages of a DescribeMetricFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMetricFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMetricFilters operation. +// pageNum := 0 +// err := client.DescribeMetricFiltersPages(params, +// func(page *cloudwatchlogs.DescribeMetricFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(*DescribeMetricFiltersOutput, bool) bool) error { + return c.DescribeMetricFiltersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMetricFiltersPagesWithContext same as DescribeMetricFiltersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeMetricFiltersPagesWithContext(ctx aws.Context, input *DescribeMetricFiltersInput, fn func(*DescribeMetricFiltersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMetricFiltersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMetricFiltersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMetricFiltersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeQueries = "DescribeQueries" + +// DescribeQueriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeQueries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeQueries for more information on using the DescribeQueries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeQueriesRequest method. +// req, resp := client.DescribeQueriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueries +func (c *CloudWatchLogs) DescribeQueriesRequest(input *DescribeQueriesInput) (req *request.Request, output *DescribeQueriesOutput) { + op := &request.Operation{ + Name: opDescribeQueries, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeQueriesInput{} + } + + output = &DescribeQueriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeQueries API operation for Amazon CloudWatch Logs. +// +// Returns a list of CloudWatch Logs Insights queries that are scheduled, executing, +// or have been executed recently in this account. You can request all queries +// or limit it to queries of a specific log group or queries with a certain +// status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeQueries for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueries +func (c *CloudWatchLogs) DescribeQueries(input *DescribeQueriesInput) (*DescribeQueriesOutput, error) { + req, out := c.DescribeQueriesRequest(input) + return out, req.Send() +} + +// DescribeQueriesWithContext is the same as DescribeQueries with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeQueries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeQueriesWithContext(ctx aws.Context, input *DescribeQueriesInput, opts ...request.Option) (*DescribeQueriesOutput, error) { + req, out := c.DescribeQueriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeQueryDefinitions = "DescribeQueryDefinitions" + +// DescribeQueryDefinitionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeQueryDefinitions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeQueryDefinitions for more information on using the DescribeQueryDefinitions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeQueryDefinitionsRequest method. +// req, resp := client.DescribeQueryDefinitionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueryDefinitions +func (c *CloudWatchLogs) DescribeQueryDefinitionsRequest(input *DescribeQueryDefinitionsInput) (req *request.Request, output *DescribeQueryDefinitionsOutput) { + op := &request.Operation{ + Name: opDescribeQueryDefinitions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeQueryDefinitionsInput{} + } + + output = &DescribeQueryDefinitionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeQueryDefinitions API operation for Amazon CloudWatch Logs. +// +// This operation returns a paginated list of your saved CloudWatch Logs Insights +// query definitions. +// +// You can use the queryDefinitionNamePrefix parameter to limit the results +// to only the query definitions that have names that start with a certain string. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeQueryDefinitions for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueryDefinitions +func (c *CloudWatchLogs) DescribeQueryDefinitions(input *DescribeQueryDefinitionsInput) (*DescribeQueryDefinitionsOutput, error) { + req, out := c.DescribeQueryDefinitionsRequest(input) + return out, req.Send() +} + +// DescribeQueryDefinitionsWithContext is the same as DescribeQueryDefinitions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeQueryDefinitions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeQueryDefinitionsWithContext(ctx aws.Context, input *DescribeQueryDefinitionsInput, opts ...request.Option) (*DescribeQueryDefinitionsOutput, error) { + req, out := c.DescribeQueryDefinitionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeResourcePolicies = "DescribeResourcePolicies" + +// DescribeResourcePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeResourcePolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeResourcePolicies for more information on using the DescribeResourcePolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeResourcePoliciesRequest method. +// req, resp := client.DescribeResourcePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeResourcePolicies +func (c *CloudWatchLogs) DescribeResourcePoliciesRequest(input *DescribeResourcePoliciesInput) (req *request.Request, output *DescribeResourcePoliciesOutput) { + op := &request.Operation{ + Name: opDescribeResourcePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeResourcePoliciesInput{} + } + + output = &DescribeResourcePoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeResourcePolicies API operation for Amazon CloudWatch Logs. +// +// Lists the resource policies in this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeResourcePolicies for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeResourcePolicies +func (c *CloudWatchLogs) DescribeResourcePolicies(input *DescribeResourcePoliciesInput) (*DescribeResourcePoliciesOutput, error) { + req, out := c.DescribeResourcePoliciesRequest(input) + return out, req.Send() +} + +// DescribeResourcePoliciesWithContext is the same as DescribeResourcePolicies with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeResourcePolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeResourcePoliciesWithContext(ctx aws.Context, input *DescribeResourcePoliciesInput, opts ...request.Option) (*DescribeResourcePoliciesOutput, error) { + req, out := c.DescribeResourcePoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" + +// DescribeSubscriptionFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubscriptionFilters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSubscriptionFilters for more information on using the DescribeSubscriptionFilters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSubscriptionFiltersRequest method. +// req, resp := client.DescribeSubscriptionFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeSubscriptionFilters +func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { + op := &request.Operation{ + Name: opDescribeSubscriptionFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSubscriptionFiltersInput{} + } + + output = &DescribeSubscriptionFiltersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSubscriptionFilters API operation for Amazon CloudWatch Logs. +// +// Lists the subscription filters for the specified log group. You can list +// all the subscription filters or filter the results by prefix. The results +// are ASCII-sorted by filter name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeSubscriptionFilters for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeSubscriptionFilters +func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { + req, out := c.DescribeSubscriptionFiltersRequest(input) + return out, req.Send() +} + +// DescribeSubscriptionFiltersWithContext is the same as DescribeSubscriptionFilters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSubscriptionFilters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeSubscriptionFiltersWithContext(ctx aws.Context, input *DescribeSubscriptionFiltersInput, opts ...request.Option) (*DescribeSubscriptionFiltersOutput, error) { + req, out := c.DescribeSubscriptionFiltersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSubscriptionFiltersPages iterates over the pages of a DescribeSubscriptionFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSubscriptionFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSubscriptionFilters operation. +// pageNum := 0 +// err := client.DescribeSubscriptionFiltersPages(params, +// func(page *cloudwatchlogs.DescribeSubscriptionFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(*DescribeSubscriptionFiltersOutput, bool) bool) error { + return c.DescribeSubscriptionFiltersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSubscriptionFiltersPagesWithContext same as DescribeSubscriptionFiltersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeSubscriptionFiltersPagesWithContext(ctx aws.Context, input *DescribeSubscriptionFiltersInput, fn func(*DescribeSubscriptionFiltersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSubscriptionFiltersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSubscriptionFiltersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSubscriptionFiltersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDisassociateKmsKey = "DisassociateKmsKey" + +// DisassociateKmsKeyRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateKmsKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateKmsKey for more information on using the DisassociateKmsKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateKmsKeyRequest method. +// req, resp := client.DisassociateKmsKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DisassociateKmsKey +func (c *CloudWatchLogs) DisassociateKmsKeyRequest(input *DisassociateKmsKeyInput) (req *request.Request, output *DisassociateKmsKeyOutput) { + op := &request.Operation{ + Name: opDisassociateKmsKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateKmsKeyInput{} + } + + output = &DisassociateKmsKeyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateKmsKey API operation for Amazon CloudWatch Logs. +// +// Disassociates the associated AWS Key Management Service (AWS KMS) customer +// master key (CMK) from the specified log group. +// +// After the AWS KMS CMK is disassociated from the log group, AWS CloudWatch +// Logs stops encrypting newly ingested data for the log group. All previously +// ingested data remains encrypted, and AWS CloudWatch Logs requires permissions +// for the CMK whenever the encrypted data is requested. +// +// Note that it can take up to 5 minutes for this operation to take effect. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DisassociateKmsKey for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DisassociateKmsKey +func (c *CloudWatchLogs) DisassociateKmsKey(input *DisassociateKmsKeyInput) (*DisassociateKmsKeyOutput, error) { + req, out := c.DisassociateKmsKeyRequest(input) + return out, req.Send() +} + +// DisassociateKmsKeyWithContext is the same as DisassociateKmsKey with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateKmsKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DisassociateKmsKeyWithContext(ctx aws.Context, input *DisassociateKmsKeyInput, opts ...request.Option) (*DisassociateKmsKeyOutput, error) { + req, out := c.DisassociateKmsKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opFilterLogEvents = "FilterLogEvents" + +// FilterLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the FilterLogEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See FilterLogEvents for more information on using the FilterLogEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the FilterLogEventsRequest method. +// req, resp := client.FilterLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/FilterLogEvents +func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { + op := &request.Operation{ + Name: opFilterLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &FilterLogEventsInput{} + } + + output = &FilterLogEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// FilterLogEvents API operation for Amazon CloudWatch Logs. +// +// Lists log events from the specified log group. You can list all the log events +// or filter the results using a filter pattern, a time range, and the name +// of the log stream. +// +// By default, this operation returns as many log events as can fit in 1 MB +// (up to 10,000 log events) or all the events found within the time range that +// you specify. If the results include a token, then there are more log events +// available, and you can get additional results by specifying the token in +// a subsequent call. This operation can return empty results while there are +// more log events available through the token. +// +// The returned log events are sorted by event timestamp, the timestamp when +// the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents +// request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation FilterLogEvents for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/FilterLogEvents +func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { + req, out := c.FilterLogEventsRequest(input) + return out, req.Send() +} + +// FilterLogEventsWithContext is the same as FilterLogEvents with the addition of +// the ability to pass a context and additional request options. +// +// See FilterLogEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) FilterLogEventsWithContext(ctx aws.Context, input *FilterLogEventsInput, opts ...request.Option) (*FilterLogEventsOutput, error) { + req, out := c.FilterLogEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// FilterLogEventsPages iterates over the pages of a FilterLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See FilterLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a FilterLogEvents operation. +// pageNum := 0 +// err := client.FilterLogEventsPages(params, +// func(page *cloudwatchlogs.FilterLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(*FilterLogEventsOutput, bool) bool) error { + return c.FilterLogEventsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// FilterLogEventsPagesWithContext same as FilterLogEventsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) FilterLogEventsPagesWithContext(ctx aws.Context, input *FilterLogEventsInput, fn func(*FilterLogEventsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *FilterLogEventsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.FilterLogEventsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*FilterLogEventsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetLogEvents = "GetLogEvents" + +// GetLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the GetLogEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLogEvents for more information on using the GetLogEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLogEventsRequest method. +// req, resp := client.GetLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogEvents +func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { + op := &request.Operation{ + Name: opGetLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextForwardToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetLogEventsInput{} + } + + output = &GetLogEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLogEvents API operation for Amazon CloudWatch Logs. +// +// Lists log events from the specified log stream. You can list all of the log +// events or filter using a time range. +// +// By default, this operation returns as many log events as can fit in a response +// size of 1MB (up to 10,000 log events). You can get additional log events +// by specifying one of the tokens in a subsequent call. This operation can +// return empty results while there are more log events available through the +// token. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetLogEvents for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogEvents +func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { + req, out := c.GetLogEventsRequest(input) + return out, req.Send() +} + +// GetLogEventsWithContext is the same as GetLogEvents with the addition of +// the ability to pass a context and additional request options. +// +// See GetLogEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogEventsWithContext(ctx aws.Context, input *GetLogEventsInput, opts ...request.Option) (*GetLogEventsOutput, error) { + req, out := c.GetLogEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetLogEventsPages iterates over the pages of a GetLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLogEvents operation. +// pageNum := 0 +// err := client.GetLogEventsPages(params, +// func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(*GetLogEventsOutput, bool) bool) error { + return c.GetLogEventsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetLogEventsPagesWithContext same as GetLogEventsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogEventsPagesWithContext(ctx aws.Context, input *GetLogEventsInput, fn func(*GetLogEventsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + EndPageOnSameToken: true, + NewRequest: func() (*request.Request, error) { + var inCpy *GetLogEventsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetLogEventsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetLogEventsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetLogGroupFields = "GetLogGroupFields" + +// GetLogGroupFieldsRequest generates a "aws/request.Request" representing the +// client's request for the GetLogGroupFields operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLogGroupFields for more information on using the GetLogGroupFields +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLogGroupFieldsRequest method. +// req, resp := client.GetLogGroupFieldsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogGroupFields +func (c *CloudWatchLogs) GetLogGroupFieldsRequest(input *GetLogGroupFieldsInput) (req *request.Request, output *GetLogGroupFieldsOutput) { + op := &request.Operation{ + Name: opGetLogGroupFields, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLogGroupFieldsInput{} + } + + output = &GetLogGroupFieldsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLogGroupFields API operation for Amazon CloudWatch Logs. +// +// Returns a list of the fields that are included in log events in the specified +// log group, along with the percentage of log events that contain each field. +// The search is limited to a time period that you specify. +// +// In the results, fields that start with @ are fields generated by CloudWatch +// Logs. For example, @timestamp is the timestamp of each log event. For more +// information about the fields that are generated by CloudWatch logs, see Supported +// Logs and Discovered Fields (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html). +// +// The response results are sorted by the frequency percentage, starting with +// the highest percentage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetLogGroupFields for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogGroupFields +func (c *CloudWatchLogs) GetLogGroupFields(input *GetLogGroupFieldsInput) (*GetLogGroupFieldsOutput, error) { + req, out := c.GetLogGroupFieldsRequest(input) + return out, req.Send() +} + +// GetLogGroupFieldsWithContext is the same as GetLogGroupFields with the addition of +// the ability to pass a context and additional request options. +// +// See GetLogGroupFields for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogGroupFieldsWithContext(ctx aws.Context, input *GetLogGroupFieldsInput, opts ...request.Option) (*GetLogGroupFieldsOutput, error) { + req, out := c.GetLogGroupFieldsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLogRecord = "GetLogRecord" + +// GetLogRecordRequest generates a "aws/request.Request" representing the +// client's request for the GetLogRecord operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLogRecord for more information on using the GetLogRecord +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLogRecordRequest method. +// req, resp := client.GetLogRecordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogRecord +func (c *CloudWatchLogs) GetLogRecordRequest(input *GetLogRecordInput) (req *request.Request, output *GetLogRecordOutput) { + op := &request.Operation{ + Name: opGetLogRecord, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLogRecordInput{} + } + + output = &GetLogRecordOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLogRecord API operation for Amazon CloudWatch Logs. +// +// Retrieves all of the fields and values of a single log event. All fields +// are retrieved, even if the original query that produced the logRecordPointer +// retrieved only a subset of fields. Fields are returned as field name/field +// value pairs. +// +// The full unparsed log event is returned within @message. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetLogRecord for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogRecord +func (c *CloudWatchLogs) GetLogRecord(input *GetLogRecordInput) (*GetLogRecordOutput, error) { + req, out := c.GetLogRecordRequest(input) + return out, req.Send() +} + +// GetLogRecordWithContext is the same as GetLogRecord with the addition of +// the ability to pass a context and additional request options. +// +// See GetLogRecord for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetLogRecordWithContext(ctx aws.Context, input *GetLogRecordInput, opts ...request.Option) (*GetLogRecordOutput, error) { + req, out := c.GetLogRecordRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetQueryResults = "GetQueryResults" + +// GetQueryResultsRequest generates a "aws/request.Request" representing the +// client's request for the GetQueryResults operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetQueryResults for more information on using the GetQueryResults +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetQueryResultsRequest method. +// req, resp := client.GetQueryResultsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetQueryResults +func (c *CloudWatchLogs) GetQueryResultsRequest(input *GetQueryResultsInput) (req *request.Request, output *GetQueryResultsOutput) { + op := &request.Operation{ + Name: opGetQueryResults, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetQueryResultsInput{} + } + + output = &GetQueryResultsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetQueryResults API operation for Amazon CloudWatch Logs. +// +// Returns the results from the specified query. +// +// Only the fields requested in the query are returned, along with a @ptr field, +// which is the identifier for the log record. You can use the value of @ptr +// in a GetLogRecord (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogRecord.html) +// operation to get the full log record. +// +// GetQueryResults does not start a query execution. To run a query, use StartQuery +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html). +// +// If the value of the Status field in the output is Running, this operation +// returns only partial results. If you see a value of Scheduled or Running +// for the status, you can retry the operation later to see the final results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation GetQueryResults for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetQueryResults +func (c *CloudWatchLogs) GetQueryResults(input *GetQueryResultsInput) (*GetQueryResultsOutput, error) { + req, out := c.GetQueryResultsRequest(input) + return out, req.Send() +} + +// GetQueryResultsWithContext is the same as GetQueryResults with the addition of +// the ability to pass a context and additional request options. +// +// See GetQueryResults for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) GetQueryResultsWithContext(ctx aws.Context, input *GetQueryResultsInput, opts ...request.Option) (*GetQueryResultsOutput, error) { + req, out := c.GetQueryResultsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTagsLogGroup = "ListTagsLogGroup" + +// ListTagsLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsLogGroup for more information on using the ListTagsLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsLogGroupRequest method. +// req, resp := client.ListTagsLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListTagsLogGroup +func (c *CloudWatchLogs) ListTagsLogGroupRequest(input *ListTagsLogGroupInput) (req *request.Request, output *ListTagsLogGroupOutput) { + op := &request.Operation{ + Name: opListTagsLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsLogGroupInput{} + } + + output = &ListTagsLogGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsLogGroup API operation for Amazon CloudWatch Logs. +// +// Lists the tags for the specified log group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation ListTagsLogGroup for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListTagsLogGroup +func (c *CloudWatchLogs) ListTagsLogGroup(input *ListTagsLogGroupInput) (*ListTagsLogGroupOutput, error) { + req, out := c.ListTagsLogGroupRequest(input) + return out, req.Send() +} + +// ListTagsLogGroupWithContext is the same as ListTagsLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) ListTagsLogGroupWithContext(ctx aws.Context, input *ListTagsLogGroupInput, opts ...request.Option) (*ListTagsLogGroupOutput, error) { + req, out := c.ListTagsLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDestination = "PutDestination" + +// PutDestinationRequest generates a "aws/request.Request" representing the +// client's request for the PutDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDestination for more information on using the PutDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutDestinationRequest method. +// req, resp := client.PutDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestination +func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { + op := &request.Operation{ + Name: opPutDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationInput{} + } + + output = &PutDestinationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDestination API operation for Amazon CloudWatch Logs. +// +// Creates or updates a destination. This operation is used only to create destinations +// for cross-account subscriptions. +// +// A destination encapsulates a physical resource (such as an Amazon Kinesis +// stream) and enables you to subscribe to a real-time stream of log events +// for a different account, ingested using PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html). +// +// Through an access policy, a destination controls what is written to it. By +// default, PutDestination does not set any access policy with the destination, +// which means a cross-account user cannot call PutSubscriptionFilter (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html) +// against this destination. To enable this, the destination owner must call +// PutDestinationPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html) +// after PutDestination. +// +// To perform a PutDestination operation, you must also have the iam:PassRole +// permission. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDestination for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestination +func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { + req, out := c.PutDestinationRequest(input) + return out, req.Send() +} + +// PutDestinationWithContext is the same as PutDestination with the addition of +// the ability to pass a context and additional request options. +// +// See PutDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutDestinationWithContext(ctx aws.Context, input *PutDestinationInput, opts ...request.Option) (*PutDestinationOutput, error) { + req, out := c.PutDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDestinationPolicy = "PutDestinationPolicy" + +// PutDestinationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutDestinationPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDestinationPolicy for more information on using the PutDestinationPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutDestinationPolicyRequest method. +// req, resp := client.PutDestinationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestinationPolicy +func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { + op := &request.Operation{ + Name: opPutDestinationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDestinationPolicyInput{} + } + + output = &PutDestinationPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutDestinationPolicy API operation for Amazon CloudWatch Logs. +// +// Creates or updates an access policy associated with an existing destination. +// An access policy is an IAM policy document (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) +// that is used to authorize claims to register a subscription filter against +// a given destination. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutDestinationPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestinationPolicy +func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { + req, out := c.PutDestinationPolicyRequest(input) + return out, req.Send() +} + +// PutDestinationPolicyWithContext is the same as PutDestinationPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutDestinationPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutDestinationPolicyWithContext(ctx aws.Context, input *PutDestinationPolicyInput, opts ...request.Option) (*PutDestinationPolicyOutput, error) { + req, out := c.PutDestinationPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutLogEvents = "PutLogEvents" + +// PutLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutLogEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutLogEvents for more information on using the PutLogEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutLogEventsRequest method. +// req, resp := client.PutLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutLogEvents +func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { + op := &request.Operation{ + Name: opPutLogEvents, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutLogEventsInput{} + } + + output = &PutLogEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutLogEvents API operation for Amazon CloudWatch Logs. +// +// Uploads a batch of log events to the specified log stream. +// +// You must include the sequence token obtained from the response of the previous +// call. An upload in a newly created log stream does not require a sequence +// token. You can also get the sequence token in the expectedSequenceToken field +// from InvalidSequenceTokenException. If you call PutLogEvents twice within +// a narrow time period using the same value for sequenceToken, both calls might +// be successful or one might be rejected. +// +// The batch of events must satisfy the following constraints: +// +// * The maximum batch size is 1,048,576 bytes. This size is calculated as +// the sum of all event messages in UTF-8, plus 26 bytes for each log event. +// +// * None of the log events in the batch can be more than 2 hours in the +// future. +// +// * None of the log events in the batch can be older than 14 days or older +// than the retention period of the log group. +// +// * The log events in the batch must be in chronological order by their +// timestamp. The timestamp is the time the event occurred, expressed as +// the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools +// for PowerShell and the AWS SDK for .NET, the timestamp is specified in +// .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.) +// +// * A batch of log events in a single request cannot span more than 24 hours. +// Otherwise, the operation fails. +// +// * The maximum number of log events in a batch is 10,000. +// +// * There is a quota of 5 requests per second per log stream. Additional +// requests are throttled. This quota can't be changed. +// +// If a call to PutLogEvents returns "UnrecognizedClientException" the most +// likely cause is an invalid AWS access key ID or secret key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutLogEvents for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * InvalidSequenceTokenException +// The sequence token is not valid. You can get the correct sequence token in +// the expectedSequenceToken field in the InvalidSequenceTokenException message. +// +// * DataAlreadyAcceptedException +// The event was already logged. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// * UnrecognizedClientException +// The most likely cause is an invalid AWS access key ID or secret key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutLogEvents +func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { + req, out := c.PutLogEventsRequest(input) + return out, req.Send() +} + +// PutLogEventsWithContext is the same as PutLogEvents with the addition of +// the ability to pass a context and additional request options. +// +// See PutLogEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutLogEventsWithContext(ctx aws.Context, input *PutLogEventsInput, opts ...request.Option) (*PutLogEventsOutput, error) { + req, out := c.PutLogEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutMetricFilter = "PutMetricFilter" + +// PutMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutMetricFilter for more information on using the PutMetricFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutMetricFilterRequest method. +// req, resp := client.PutMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutMetricFilter +func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { + op := &request.Operation{ + Name: opPutMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricFilterInput{} + } + + output = &PutMetricFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutMetricFilter API operation for Amazon CloudWatch Logs. +// +// Creates or updates a metric filter and associates it with the specified log +// group. Metric filters allow you to configure rules to extract metric data +// from log events ingested through PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html). +// +// The maximum number of metric filters that can be associated with a log group +// is 100. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutMetricFilter for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutMetricFilter +func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { + req, out := c.PutMetricFilterRequest(input) + return out, req.Send() +} + +// PutMetricFilterWithContext is the same as PutMetricFilter with the addition of +// the ability to pass a context and additional request options. +// +// See PutMetricFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutMetricFilterWithContext(ctx aws.Context, input *PutMetricFilterInput, opts ...request.Option) (*PutMetricFilterOutput, error) { + req, out := c.PutMetricFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutQueryDefinition = "PutQueryDefinition" + +// PutQueryDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the PutQueryDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutQueryDefinition for more information on using the PutQueryDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutQueryDefinitionRequest method. +// req, resp := client.PutQueryDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutQueryDefinition +func (c *CloudWatchLogs) PutQueryDefinitionRequest(input *PutQueryDefinitionInput) (req *request.Request, output *PutQueryDefinitionOutput) { + op := &request.Operation{ + Name: opPutQueryDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutQueryDefinitionInput{} + } + + output = &PutQueryDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutQueryDefinition API operation for Amazon CloudWatch Logs. +// +// Creates or updates a query definition for CloudWatch Logs Insights. For more +// information, see Analyzing Log Data with CloudWatch Logs Insights (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html). +// +// To update a query definition, specify its queryDefinitionId in your request. +// The values of name, queryString, and logGroupNames are changed to the values +// that you specify in your update operation. No current values are retained +// from the current query definition. For example, if you update a current query +// definition that includes log groups, and you don't specify the logGroupNames +// parameter in your update operation, the query definition changes to contain +// no log groups. +// +// You must have the logs:PutQueryDefinition permission to be able to perform +// this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutQueryDefinition for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutQueryDefinition +func (c *CloudWatchLogs) PutQueryDefinition(input *PutQueryDefinitionInput) (*PutQueryDefinitionOutput, error) { + req, out := c.PutQueryDefinitionRequest(input) + return out, req.Send() +} + +// PutQueryDefinitionWithContext is the same as PutQueryDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See PutQueryDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutQueryDefinitionWithContext(ctx aws.Context, input *PutQueryDefinitionInput, opts ...request.Option) (*PutQueryDefinitionOutput, error) { + req, out := c.PutQueryDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutResourcePolicy = "PutResourcePolicy" + +// PutResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutResourcePolicy for more information on using the PutResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutResourcePolicyRequest method. +// req, resp := client.PutResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutResourcePolicy +func (c *CloudWatchLogs) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) { + op := &request.Operation{ + Name: opPutResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutResourcePolicyInput{} + } + + output = &PutResourcePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutResourcePolicy API operation for Amazon CloudWatch Logs. +// +// Creates or updates a resource policy allowing other AWS services to put log +// events to this account, such as Amazon Route 53. An account can have up to +// 10 resource policies per AWS Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutResourcePolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutResourcePolicy +func (c *CloudWatchLogs) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) + return out, req.Send() +} + +// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutRetentionPolicy = "PutRetentionPolicy" + +// PutRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRetentionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutRetentionPolicy for more information on using the PutRetentionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutRetentionPolicyRequest method. +// req, resp := client.PutRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutRetentionPolicy +func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { + op := &request.Operation{ + Name: opPutRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRetentionPolicyInput{} + } + + output = &PutRetentionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutRetentionPolicy API operation for Amazon CloudWatch Logs. +// +// Sets the retention of the specified log group. A retention policy allows +// you to configure the number of days for which to retain log events in the +// specified log group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutRetentionPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutRetentionPolicy +func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + return out, req.Send() +} + +// PutRetentionPolicyWithContext is the same as PutRetentionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutRetentionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutRetentionPolicyWithContext(ctx aws.Context, input *PutRetentionPolicyInput, opts ...request.Option) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutSubscriptionFilter = "PutSubscriptionFilter" + +// PutSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutSubscriptionFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutSubscriptionFilter for more information on using the PutSubscriptionFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutSubscriptionFilterRequest method. +// req, resp := client.PutSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutSubscriptionFilter +func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { + op := &request.Operation{ + Name: opPutSubscriptionFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutSubscriptionFilterInput{} + } + + output = &PutSubscriptionFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutSubscriptionFilter API operation for Amazon CloudWatch Logs. +// +// Creates or updates a subscription filter and associates it with the specified +// log group. Subscription filters allow you to subscribe to a real-time stream +// of log events ingested through PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) +// and have them delivered to a specific destination. When log events are sent +// to the receiving service, they are Base64 encoded and compressed with the +// gzip format. +// +// The following destinations are supported for subscription filters: +// +// * An Amazon Kinesis stream belonging to the same account as the subscription +// filter, for same-account delivery. +// +// * A logical destination that belongs to a different account, for cross-account +// delivery. +// +// * An Amazon Kinesis Firehose delivery stream that belongs to the same +// account as the subscription filter, for same-account delivery. +// +// * An AWS Lambda function that belongs to the same account as the subscription +// filter, for same-account delivery. +// +// There can only be one subscription filter associated with a log group. If +// you are updating an existing filter, you must specify the correct name in +// filterName. Otherwise, the call fails because you cannot associate a second +// filter with a log group. +// +// To perform a PutSubscriptionFilter operation, you must also have the iam:PassRole +// permission. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutSubscriptionFilter for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * OperationAbortedException +// Multiple requests to update the same resource were in conflict. +// +// * LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutSubscriptionFilter +func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { + req, out := c.PutSubscriptionFilterRequest(input) + return out, req.Send() +} + +// PutSubscriptionFilterWithContext is the same as PutSubscriptionFilter with the addition of +// the ability to pass a context and additional request options. +// +// See PutSubscriptionFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutSubscriptionFilterWithContext(ctx aws.Context, input *PutSubscriptionFilterInput, opts ...request.Option) (*PutSubscriptionFilterOutput, error) { + req, out := c.PutSubscriptionFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartQuery = "StartQuery" + +// StartQueryRequest generates a "aws/request.Request" representing the +// client's request for the StartQuery operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartQuery for more information on using the StartQuery +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartQueryRequest method. +// req, resp := client.StartQueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StartQuery +func (c *CloudWatchLogs) StartQueryRequest(input *StartQueryInput) (req *request.Request, output *StartQueryOutput) { + op := &request.Operation{ + Name: opStartQuery, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartQueryInput{} + } + + output = &StartQueryOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartQuery API operation for Amazon CloudWatch Logs. +// +// Schedules a query of a log group using CloudWatch Logs Insights. You specify +// the log group and time range to query and the query string to use. +// +// For more information, see CloudWatch Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). +// +// Queries time out after 15 minutes of execution. If your queries are timing +// out, reduce the time range being searched or partition your query into a +// number of queries. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation StartQuery for usage and error information. +// +// Returned Error Types: +// * MalformedQueryException +// The query string is not valid. Details about this error are displayed in +// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). +// +// For more information about valid query syntax, see CloudWatch Logs Insights +// Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). +// +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * LimitExceededException +// You have reached the maximum number of resources that can be created. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StartQuery +func (c *CloudWatchLogs) StartQuery(input *StartQueryInput) (*StartQueryOutput, error) { + req, out := c.StartQueryRequest(input) + return out, req.Send() +} + +// StartQueryWithContext is the same as StartQuery with the addition of +// the ability to pass a context and additional request options. +// +// See StartQuery for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) StartQueryWithContext(ctx aws.Context, input *StartQueryInput, opts ...request.Option) (*StartQueryOutput, error) { + req, out := c.StartQueryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopQuery = "StopQuery" + +// StopQueryRequest generates a "aws/request.Request" representing the +// client's request for the StopQuery operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopQuery for more information on using the StopQuery +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopQueryRequest method. +// req, resp := client.StopQueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StopQuery +func (c *CloudWatchLogs) StopQueryRequest(input *StopQueryInput) (req *request.Request, output *StopQueryOutput) { + op := &request.Operation{ + Name: opStopQuery, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopQueryInput{} + } + + output = &StopQueryOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopQuery API operation for Amazon CloudWatch Logs. +// +// Stops a CloudWatch Logs Insights query that is in progress. If the query +// has already ended, the operation returns an error indicating that the specified +// query is not running. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation StopQuery for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/StopQuery +func (c *CloudWatchLogs) StopQuery(input *StopQueryInput) (*StopQueryOutput, error) { + req, out := c.StopQueryRequest(input) + return out, req.Send() +} + +// StopQueryWithContext is the same as StopQuery with the addition of +// the ability to pass a context and additional request options. +// +// See StopQuery for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) StopQueryWithContext(ctx aws.Context, input *StopQueryInput, opts ...request.Option) (*StopQueryOutput, error) { + req, out := c.StopQueryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagLogGroup = "TagLogGroup" + +// TagLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the TagLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagLogGroup for more information on using the TagLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagLogGroupRequest method. +// req, resp := client.TagLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TagLogGroup +func (c *CloudWatchLogs) TagLogGroupRequest(input *TagLogGroupInput) (req *request.Request, output *TagLogGroupOutput) { + op := &request.Operation{ + Name: opTagLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagLogGroupInput{} + } + + output = &TagLogGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagLogGroup API operation for Amazon CloudWatch Logs. +// +// Adds or updates the specified tags for the specified log group. +// +// To list the tags for a log group, use ListTagsLogGroup (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html). +// To remove tags, use UntagLogGroup (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagLogGroup.html). +// +// For more information about tags, see Tag Log Groups in Amazon CloudWatch +// Logs (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html#log-group-tagging) +// in the Amazon CloudWatch Logs User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation TagLogGroup for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TagLogGroup +func (c *CloudWatchLogs) TagLogGroup(input *TagLogGroupInput) (*TagLogGroupOutput, error) { + req, out := c.TagLogGroupRequest(input) + return out, req.Send() +} + +// TagLogGroupWithContext is the same as TagLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See TagLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) TagLogGroupWithContext(ctx aws.Context, input *TagLogGroupInput, opts ...request.Option) (*TagLogGroupOutput, error) { + req, out := c.TagLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTestMetricFilter = "TestMetricFilter" + +// TestMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the TestMetricFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TestMetricFilter for more information on using the TestMetricFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TestMetricFilterRequest method. +// req, resp := client.TestMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TestMetricFilter +func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { + op := &request.Operation{ + Name: opTestMetricFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestMetricFilterInput{} + } + + output = &TestMetricFilterOutput{} + req = c.newRequest(op, input, output) + return +} + +// TestMetricFilter API operation for Amazon CloudWatch Logs. +// +// Tests the filter pattern of a metric filter against a sample of log event +// messages. You can use this operation to validate the correctness of a metric +// filter pattern. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation TestMetricFilter for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/TestMetricFilter +func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { + req, out := c.TestMetricFilterRequest(input) + return out, req.Send() +} + +// TestMetricFilterWithContext is the same as TestMetricFilter with the addition of +// the ability to pass a context and additional request options. +// +// See TestMetricFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) TestMetricFilterWithContext(ctx aws.Context, input *TestMetricFilterInput, opts ...request.Option) (*TestMetricFilterOutput, error) { + req, out := c.TestMetricFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagLogGroup = "UntagLogGroup" + +// UntagLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the UntagLogGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagLogGroup for more information on using the UntagLogGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagLogGroupRequest method. +// req, resp := client.UntagLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UntagLogGroup +func (c *CloudWatchLogs) UntagLogGroupRequest(input *UntagLogGroupInput) (req *request.Request, output *UntagLogGroupOutput) { + op := &request.Operation{ + Name: opUntagLogGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagLogGroupInput{} + } + + output = &UntagLogGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagLogGroup API operation for Amazon CloudWatch Logs. +// +// Removes the specified tags from the specified log group. +// +// To list the tags for a log group, use ListTagsLogGroup (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html). +// To add tags, use TagLogGroup (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagLogGroup.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation UntagLogGroup for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/UntagLogGroup +func (c *CloudWatchLogs) UntagLogGroup(input *UntagLogGroupInput) (*UntagLogGroupOutput, error) { + req, out := c.UntagLogGroupRequest(input) + return out, req.Send() +} + +// UntagLogGroupWithContext is the same as UntagLogGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UntagLogGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) UntagLogGroupWithContext(ctx aws.Context, input *UntagLogGroupInput, opts ...request.Option) (*UntagLogGroupOutput, error) { + req, out := c.UntagLogGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssociateKmsKeyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + // This must be a symmetric CMK. For more information, see Amazon Resource Names + // - AWS Key Management Service (AWS KMS) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) + // and Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). + // + // KmsKeyId is a required field + KmsKeyId *string `locationName:"kmsKeyId" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateKmsKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateKmsKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateKmsKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateKmsKeyInput"} + if s.KmsKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKeyId")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AssociateKmsKeyInput) SetKmsKeyId(v string) *AssociateKmsKeyInput { + s.KmsKeyId = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *AssociateKmsKeyInput) SetLogGroupName(v string) *AssociateKmsKeyInput { + s.LogGroupName = &v + return s +} + +type AssociateKmsKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateKmsKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateKmsKeyOutput) GoString() string { + return s.String() +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the export task. + // + // TaskId is a required field + TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskId sets the TaskId field's value. +func (s *CancelExportTaskInput) SetTaskId(v string) *CancelExportTaskInput { + s.TaskId = &v + return s +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CreateExportTaskInput struct { + _ struct{} `type:"structure"` + + // The name of S3 bucket for the exported log data. The bucket must be in the + // same AWS region. + // + // Destination is a required field + Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` + + // The prefix used as the start of the key for every object exported. If you + // don't specify a value, the default is exportedlogs. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // The start time of the range for the request, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. Events with a timestamp earlier than this + // time are not exported. + // + // From is a required field + From *int64 `locationName:"from" type:"long" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Export only log streams that match the provided prefix. If you don't specify + // a value, no prefix filter is applied. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // The end time of the range for the request, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time + // are not exported. + // + // To is a required field + To *int64 `locationName:"to" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateExportTaskInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Destination != nil && len(*s.Destination) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Destination", 1)) + } + if s.From == nil { + invalidParams.Add(request.NewErrParamRequired("From")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.TaskName != nil && len(*s.TaskName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskName", 1)) + } + if s.To == nil { + invalidParams.Add(request.NewErrParamRequired("To")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *CreateExportTaskInput) SetDestination(v string) *CreateExportTaskInput { + s.Destination = &v + return s +} + +// SetDestinationPrefix sets the DestinationPrefix field's value. +func (s *CreateExportTaskInput) SetDestinationPrefix(v string) *CreateExportTaskInput { + s.DestinationPrefix = &v + return s +} + +// SetFrom sets the From field's value. +func (s *CreateExportTaskInput) SetFrom(v int64) *CreateExportTaskInput { + s.From = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CreateExportTaskInput) SetLogGroupName(v string) *CreateExportTaskInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamNamePrefix sets the LogStreamNamePrefix field's value. +func (s *CreateExportTaskInput) SetLogStreamNamePrefix(v string) *CreateExportTaskInput { + s.LogStreamNamePrefix = &v + return s +} + +// SetTaskName sets the TaskName field's value. +func (s *CreateExportTaskInput) SetTaskName(v string) *CreateExportTaskInput { + s.TaskName = &v + return s +} + +// SetTo sets the To field's value. +func (s *CreateExportTaskInput) SetTo(v int64) *CreateExportTaskInput { + s.To = &v + return s +} + +type CreateExportTaskOutput struct { + _ struct{} `type:"structure"` + + // The ID of the export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateExportTaskOutput) GoString() string { + return s.String() +} + +// SetTaskId sets the TaskId field's value. +func (s *CreateExportTaskOutput) SetTaskId(v string) *CreateExportTaskOutput { + s.TaskId = &v + return s +} + +type CreateLogGroupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + // For more information, see Amazon Resource Names - AWS Key Management Service + // (AWS KMS) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms). + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The key-value pairs to use for the tags. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation +func (s CreateLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateLogGroupInput) SetKmsKeyId(v string) *CreateLogGroupInput { + s.KmsKeyId = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CreateLogGroupInput) SetLogGroupName(v string) *CreateLogGroupInput { + s.LogGroupName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLogGroupInput) SetTags(v map[string]*string) *CreateLogGroupInput { + s.Tags = v + return s +} + +type CreateLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogGroupOutput) GoString() string { + return s.String() +} + +type CreateLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLogStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLogStreamInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CreateLogStreamInput) SetLogGroupName(v string) *CreateLogStreamInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *CreateLogStreamInput) SetLogStreamName(v string) *CreateLogStreamInput { + s.LogStreamName = &v + return s +} + +type CreateLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLogStreamOutput) GoString() string { + return s.String() +} + +// The event was already logged. +type DataAlreadyAcceptedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + ExpectedSequenceToken *string `locationName:"expectedSequenceToken" min:"1" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DataAlreadyAcceptedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataAlreadyAcceptedException) GoString() string { + return s.String() +} + +func newErrorDataAlreadyAcceptedException(v protocol.ResponseMetadata) error { + return &DataAlreadyAcceptedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DataAlreadyAcceptedException) Code() string { + return "DataAlreadyAcceptedException" +} + +// Message returns the exception's message. +func (s *DataAlreadyAcceptedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DataAlreadyAcceptedException) OrigErr() error { + return nil +} + +func (s *DataAlreadyAcceptedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DataAlreadyAcceptedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DataAlreadyAcceptedException) RequestID() string { + return s.RespMetadata.RequestID +} + +type DeleteDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the destination. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDestinationInput"} + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationName sets the DestinationName field's value. +func (s *DeleteDestinationInput) SetDestinationName(v string) *DeleteDestinationInput { + s.DestinationName = &v + return s +} + +type DeleteDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDestinationOutput) GoString() string { + return s.String() +} + +type DeleteLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteLogGroupInput) SetLogGroupName(v string) *DeleteLogGroupInput { + s.LogGroupName = &v + return s +} + +type DeleteLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogGroupOutput) GoString() string { + return s.String() +} + +type DeleteLogStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLogStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLogStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLogStreamInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteLogStreamInput) SetLogGroupName(v string) *DeleteLogStreamInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *DeleteLogStreamInput) SetLogStreamName(v string) *DeleteLogStreamInput { + s.LogStreamName = &v + return s +} + +type DeleteLogStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLogStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLogStreamOutput) GoString() string { + return s.String() +} + +type DeleteMetricFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the metric filter. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMetricFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterName sets the FilterName field's value. +func (s *DeleteMetricFilterInput) SetFilterName(v string) *DeleteMetricFilterInput { + s.FilterName = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteMetricFilterInput) SetLogGroupName(v string) *DeleteMetricFilterInput { + s.LogGroupName = &v + return s +} + +type DeleteMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricFilterOutput) GoString() string { + return s.String() +} + +type DeleteQueryDefinitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the query definition that you want to delete. You can use DescribeQueryDefinitions + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html) + // to retrieve the IDs of your saved query definitions. + // + // QueryDefinitionId is a required field + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteQueryDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueryDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteQueryDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteQueryDefinitionInput"} + if s.QueryDefinitionId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryDefinitionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *DeleteQueryDefinitionInput) SetQueryDefinitionId(v string) *DeleteQueryDefinitionInput { + s.QueryDefinitionId = &v + return s +} + +type DeleteQueryDefinitionOutput struct { + _ struct{} `type:"structure"` + + // A value of TRUE indicates that the operation succeeded. FALSE indicates that + // the operation failed. + Success *bool `locationName:"success" type:"boolean"` +} + +// String returns the string representation +func (s DeleteQueryDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueryDefinitionOutput) GoString() string { + return s.String() +} + +// SetSuccess sets the Success field's value. +func (s *DeleteQueryDefinitionOutput) SetSuccess(v bool) *DeleteQueryDefinitionOutput { + s.Success = &v + return s +} + +type DeleteResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy to be revoked. This parameter is required. + PolicyName *string `locationName:"policyName" type:"string"` +} + +// String returns the string representation +func (s DeleteResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourcePolicyInput) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *DeleteResourcePolicyInput) SetPolicyName(v string) *DeleteResourcePolicyInput { + s.PolicyName = &v + return s +} + +type DeleteResourcePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourcePolicyOutput) GoString() string { + return s.String() +} + +type DeleteRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionPolicyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteRetentionPolicyInput) SetLogGroupName(v string) *DeleteRetentionPolicyInput { + s.LogGroupName = &v + return s +} + +type DeleteRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyOutput) GoString() string { + return s.String() +} + +type DeleteSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the subscription filter. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSubscriptionFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriptionFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterName sets the FilterName field's value. +func (s *DeleteSubscriptionFilterInput) SetFilterName(v string) *DeleteSubscriptionFilterInput { + s.FilterName = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DeleteSubscriptionFilterInput) SetLogGroupName(v string) *DeleteSubscriptionFilterInput { + s.LogGroupName = &v + return s +} + +type DeleteSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionFilterOutput) GoString() string { + return s.String() +} + +type DescribeDestinationsInput struct { + _ struct{} `type:"structure"` + + // The prefix to match. If you don't specify a value, no prefix filter is applied. + DestinationNamePrefix *string `min:"1" type:"string"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDestinationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDestinationsInput"} + if s.DestinationNamePrefix != nil && len(*s.DestinationNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationNamePrefix sets the DestinationNamePrefix field's value. +func (s *DescribeDestinationsInput) SetDestinationNamePrefix(v string) *DescribeDestinationsInput { + s.DestinationNamePrefix = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeDestinationsInput) SetLimit(v int64) *DescribeDestinationsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDestinationsInput) SetNextToken(v string) *DescribeDestinationsInput { + s.NextToken = &v + return s +} + +type DescribeDestinationsOutput struct { + _ struct{} `type:"structure"` + + // The destinations. + Destinations []*Destination `locationName:"destinations" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDestinationsOutput) GoString() string { + return s.String() +} + +// SetDestinations sets the Destinations field's value. +func (s *DescribeDestinationsOutput) SetDestinations(v []*Destination) *DescribeDestinationsOutput { + s.Destinations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDestinationsOutput) SetNextToken(v string) *DescribeDestinationsOutput { + s.NextToken = &v + return s +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The status code of the export task. Specifying a status code filters the + // results to zero or more export tasks. + StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` + + // The ID of the export task. Specifying a task ID filters the results to zero + // or one export tasks. + TaskId *string `locationName:"taskId" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportTasksInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.TaskId != nil && len(*s.TaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeExportTasksInput) SetLimit(v int64) *DescribeExportTasksInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportTasksInput) SetNextToken(v string) *DescribeExportTasksInput { + s.NextToken = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *DescribeExportTasksInput) SetStatusCode(v string) *DescribeExportTasksInput { + s.StatusCode = &v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *DescribeExportTasksInput) SetTaskId(v string) *DescribeExportTasksInput { + s.TaskId = &v + return s +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // The export tasks. + ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +// SetExportTasks sets the ExportTasks field's value. +func (s *DescribeExportTasksOutput) SetExportTasks(v []*ExportTask) *DescribeExportTasksOutput { + s.ExportTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportTasksOutput) SetNextToken(v string) *DescribeExportTasksOutput { + s.NextToken = &v + return s +} + +type DescribeLogGroupsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The prefix to match. + LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLogGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLogGroupsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupNamePrefix != nil && len(*s.LogGroupNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupNamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeLogGroupsInput) SetLimit(v int64) *DescribeLogGroupsInput { + s.Limit = &v + return s +} + +// SetLogGroupNamePrefix sets the LogGroupNamePrefix field's value. +func (s *DescribeLogGroupsInput) SetLogGroupNamePrefix(v string) *DescribeLogGroupsInput { + s.LogGroupNamePrefix = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLogGroupsInput) SetNextToken(v string) *DescribeLogGroupsInput { + s.NextToken = &v + return s +} + +type DescribeLogGroupsOutput struct { + _ struct{} `type:"structure"` + + // The log groups. + // + // If the retentionInDays value if not included for a log group, then that log + // group is set to have its events never expire. + LogGroups []*LogGroup `locationName:"logGroups" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogGroupsOutput) GoString() string { + return s.String() +} + +// SetLogGroups sets the LogGroups field's value. +func (s *DescribeLogGroupsOutput) SetLogGroups(v []*LogGroup) *DescribeLogGroupsOutput { + s.LogGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLogGroupsOutput) SetNextToken(v string) *DescribeLogGroupsOutput { + s.NextToken = &v + return s +} + +type DescribeLogStreamsInput struct { + _ struct{} `type:"structure"` + + // If the value is true, results are returned in descending order. If the value + // is to false, results are returned in ascending order. The default value is + // false. + Descending *bool `locationName:"descending" type:"boolean"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The prefix to match. + // + // If orderBy is LastEventTime, you cannot specify this parameter. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // If the value is LogStreamName, the results are ordered by log stream name. + // If the value is LastEventTime, the results are ordered by the event time. + // The default value is LogStreamName. + // + // If you order the results by event time, you cannot specify the logStreamNamePrefix + // parameter. + // + // lastEventTimeStamp represents the time of the most recent log event in the + // log stream in CloudWatch Logs. This number is expressed as the number of + // milliseconds after Jan 1, 1970 00:00:00 UTC. lastEventTimeStamp updates on + // an eventual consistency basis. It typically updates in less than an hour + // from ingestion, but in rare situations might take longer. + OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` +} + +// String returns the string representation +func (s DescribeLogStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLogStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLogStreamsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescending sets the Descending field's value. +func (s *DescribeLogStreamsInput) SetDescending(v bool) *DescribeLogStreamsInput { + s.Descending = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeLogStreamsInput) SetLimit(v int64) *DescribeLogStreamsInput { + s.Limit = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DescribeLogStreamsInput) SetLogGroupName(v string) *DescribeLogStreamsInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamNamePrefix sets the LogStreamNamePrefix field's value. +func (s *DescribeLogStreamsInput) SetLogStreamNamePrefix(v string) *DescribeLogStreamsInput { + s.LogStreamNamePrefix = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLogStreamsInput) SetNextToken(v string) *DescribeLogStreamsInput { + s.NextToken = &v + return s +} + +// SetOrderBy sets the OrderBy field's value. +func (s *DescribeLogStreamsInput) SetOrderBy(v string) *DescribeLogStreamsInput { + s.OrderBy = &v + return s +} + +type DescribeLogStreamsOutput struct { + _ struct{} `type:"structure"` + + // The log streams. + LogStreams []*LogStream `locationName:"logStreams" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeLogStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLogStreamsOutput) GoString() string { + return s.String() +} + +// SetLogStreams sets the LogStreams field's value. +func (s *DescribeLogStreamsOutput) SetLogStreams(v []*LogStream) *DescribeLogStreamsOutput { + s.LogStreams = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLogStreamsOutput) SetNextToken(v string) *DescribeLogStreamsOutput { + s.NextToken = &v + return s +} + +type DescribeMetricFiltersInput struct { + _ struct{} `type:"structure"` + + // The prefix to match. CloudWatch Logs uses the value you set here only if + // you also include the logGroupName parameter in your request. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Filters results to include only those with the specified metric name. If + // you include this parameter in your request, you must also include the metricNamespace + // parameter. + MetricName *string `locationName:"metricName" type:"string"` + + // Filters results to include only those in the specified namespace. If you + // include this parameter in your request, you must also include the metricName + // parameter. + MetricNamespace *string `locationName:"metricNamespace" type:"string"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMetricFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMetricFiltersInput"} + if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterNamePrefix sets the FilterNamePrefix field's value. +func (s *DescribeMetricFiltersInput) SetFilterNamePrefix(v string) *DescribeMetricFiltersInput { + s.FilterNamePrefix = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeMetricFiltersInput) SetLimit(v int64) *DescribeMetricFiltersInput { + s.Limit = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DescribeMetricFiltersInput) SetLogGroupName(v string) *DescribeMetricFiltersInput { + s.LogGroupName = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *DescribeMetricFiltersInput) SetMetricName(v string) *DescribeMetricFiltersInput { + s.MetricName = &v + return s +} + +// SetMetricNamespace sets the MetricNamespace field's value. +func (s *DescribeMetricFiltersInput) SetMetricNamespace(v string) *DescribeMetricFiltersInput { + s.MetricNamespace = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMetricFiltersInput) SetNextToken(v string) *DescribeMetricFiltersInput { + s.NextToken = &v + return s +} + +type DescribeMetricFiltersOutput struct { + _ struct{} `type:"structure"` + + // The metric filters. + MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeMetricFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMetricFiltersOutput) GoString() string { + return s.String() +} + +// SetMetricFilters sets the MetricFilters field's value. +func (s *DescribeMetricFiltersOutput) SetMetricFilters(v []*MetricFilter) *DescribeMetricFiltersOutput { + s.MetricFilters = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMetricFiltersOutput) SetNextToken(v string) *DescribeMetricFiltersOutput { + s.NextToken = &v + return s +} + +type DescribeQueriesInput struct { + _ struct{} `type:"structure"` + + // Limits the returned queries to only those for the specified log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // Limits the number of returned queries to the specified number. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Limits the returned queries to only those that have the specified status. + // Valid values are Cancelled, Complete, Failed, Running, and Scheduled. + Status *string `locationName:"status" type:"string" enum:"QueryStatus"` +} + +// String returns the string representation +func (s DescribeQueriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeQueriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeQueriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeQueriesInput"} + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DescribeQueriesInput) SetLogGroupName(v string) *DescribeQueriesInput { + s.LogGroupName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeQueriesInput) SetMaxResults(v int64) *DescribeQueriesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueriesInput) SetNextToken(v string) *DescribeQueriesInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeQueriesInput) SetStatus(v string) *DescribeQueriesInput { + s.Status = &v + return s +} + +type DescribeQueriesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The list of queries that match the request. + Queries []*QueryInfo `locationName:"queries" type:"list"` +} + +// String returns the string representation +func (s DescribeQueriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeQueriesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueriesOutput) SetNextToken(v string) *DescribeQueriesOutput { + s.NextToken = &v + return s +} + +// SetQueries sets the Queries field's value. +func (s *DescribeQueriesOutput) SetQueries(v []*QueryInfo) *DescribeQueriesOutput { + s.Queries = v + return s +} + +type DescribeQueryDefinitionsInput struct { + _ struct{} `type:"structure"` + + // Limits the number of returned query definitions to the specified number. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Use this parameter to filter your results to only the query definitions that + // have names that start with the prefix you specify. + QueryDefinitionNamePrefix *string `locationName:"queryDefinitionNamePrefix" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeQueryDefinitionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeQueryDefinitionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeQueryDefinitionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeQueryDefinitionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.QueryDefinitionNamePrefix != nil && len(*s.QueryDefinitionNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryDefinitionNamePrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeQueryDefinitionsInput) SetMaxResults(v int64) *DescribeQueryDefinitionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueryDefinitionsInput) SetNextToken(v string) *DescribeQueryDefinitionsInput { + s.NextToken = &v + return s +} + +// SetQueryDefinitionNamePrefix sets the QueryDefinitionNamePrefix field's value. +func (s *DescribeQueryDefinitionsInput) SetQueryDefinitionNamePrefix(v string) *DescribeQueryDefinitionsInput { + s.QueryDefinitionNamePrefix = &v + return s +} + +type DescribeQueryDefinitionsOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The list of query definitions that match your request. + QueryDefinitions []*QueryDefinition `locationName:"queryDefinitions" type:"list"` +} + +// String returns the string representation +func (s DescribeQueryDefinitionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeQueryDefinitionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueryDefinitionsOutput) SetNextToken(v string) *DescribeQueryDefinitionsOutput { + s.NextToken = &v + return s +} + +// SetQueryDefinitions sets the QueryDefinitions field's value. +func (s *DescribeQueryDefinitionsOutput) SetQueryDefinitions(v []*QueryDefinition) *DescribeQueryDefinitionsOutput { + s.QueryDefinitions = v + return s +} + +type DescribeResourcePoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of resource policies to be displayed with one call of + // this API. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeResourcePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResourcePoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeResourcePoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeResourcePoliciesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *DescribeResourcePoliciesInput) SetLimit(v int64) *DescribeResourcePoliciesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeResourcePoliciesInput) SetNextToken(v string) *DescribeResourcePoliciesInput { + s.NextToken = &v + return s +} + +type DescribeResourcePoliciesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The resource policies that exist in this account. + ResourcePolicies []*ResourcePolicy `locationName:"resourcePolicies" type:"list"` +} + +// String returns the string representation +func (s DescribeResourcePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeResourcePoliciesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeResourcePoliciesOutput) SetNextToken(v string) *DescribeResourcePoliciesOutput { + s.NextToken = &v + return s +} + +// SetResourcePolicies sets the ResourcePolicies field's value. +func (s *DescribeResourcePoliciesOutput) SetResourcePolicies(v []*ResourcePolicy) *DescribeResourcePoliciesOutput { + s.ResourcePolicies = v + return s +} + +type DescribeSubscriptionFiltersInput struct { + _ struct{} `type:"structure"` + + // The prefix to match. If you don't specify a value, no prefix filter is applied. + FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` + + // The maximum number of items returned. If you don't specify a value, the default + // is up to 50 items. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSubscriptionFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSubscriptionFiltersInput"} + if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterNamePrefix sets the FilterNamePrefix field's value. +func (s *DescribeSubscriptionFiltersInput) SetFilterNamePrefix(v string) *DescribeSubscriptionFiltersInput { + s.FilterNamePrefix = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeSubscriptionFiltersInput) SetLimit(v int64) *DescribeSubscriptionFiltersInput { + s.Limit = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DescribeSubscriptionFiltersInput) SetLogGroupName(v string) *DescribeSubscriptionFiltersInput { + s.LogGroupName = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubscriptionFiltersInput) SetNextToken(v string) *DescribeSubscriptionFiltersInput { + s.NextToken = &v + return s +} + +type DescribeSubscriptionFiltersOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The subscription filters. + SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` +} + +// String returns the string representation +func (s DescribeSubscriptionFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionFiltersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubscriptionFiltersOutput) SetNextToken(v string) *DescribeSubscriptionFiltersOutput { + s.NextToken = &v + return s +} + +// SetSubscriptionFilters sets the SubscriptionFilters field's value. +func (s *DescribeSubscriptionFiltersOutput) SetSubscriptionFilters(v []*SubscriptionFilter) *DescribeSubscriptionFiltersOutput { + s.SubscriptionFilters = v + return s +} + +// Represents a cross-account destination that receives subscription log events. +type Destination struct { + _ struct{} `type:"structure"` + + // An IAM policy document that governs which AWS accounts can create subscription + // filters against this destination. + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` + + // The ARN of this destination. + Arn *string `locationName:"arn" type:"string"` + + // The creation time of the destination, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The name of the destination. + DestinationName *string `locationName:"destinationName" min:"1" type:"string"` + + // A role for impersonation, used when delivering log events to the target. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the physical target where the log events + // are delivered (for example, a Kinesis stream). + TargetArn *string `locationName:"targetArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// SetAccessPolicy sets the AccessPolicy field's value. +func (s *Destination) SetAccessPolicy(v string) *Destination { + s.AccessPolicy = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *Destination) SetArn(v string) *Destination { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Destination) SetCreationTime(v int64) *Destination { + s.CreationTime = &v + return s +} + +// SetDestinationName sets the DestinationName field's value. +func (s *Destination) SetDestinationName(v string) *Destination { + s.DestinationName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *Destination) SetRoleArn(v string) *Destination { + s.RoleArn = &v + return s +} + +// SetTargetArn sets the TargetArn field's value. +func (s *Destination) SetTargetArn(v string) *Destination { + s.TargetArn = &v + return s +} + +type DisassociateKmsKeyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateKmsKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateKmsKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateKmsKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateKmsKeyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *DisassociateKmsKeyInput) SetLogGroupName(v string) *DisassociateKmsKeyInput { + s.LogGroupName = &v + return s +} + +type DisassociateKmsKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateKmsKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateKmsKeyOutput) GoString() string { + return s.String() +} + +// Represents an export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket to which the log data was exported. + Destination *string `locationName:"destination" min:"1" type:"string"` + + // The prefix that was used as the start of Amazon S3 key for every object exported. + DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` + + // Execution information about the export task. + ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` + + // The start time, expressed as the number of milliseconds after Jan 1, 1970 + // 00:00:00 UTC. Events with a timestamp before this time are not exported. + From *int64 `locationName:"from" type:"long"` + + // The name of the log group from which logs data was exported. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The status of the export task. + Status *ExportTaskStatus `locationName:"status" type:"structure"` + + // The ID of the export task. + TaskId *string `locationName:"taskId" min:"1" type:"string"` + + // The name of the export task. + TaskName *string `locationName:"taskName" min:"1" type:"string"` + + // The end time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 + // UTC. Events with a timestamp later than this time are not exported. + To *int64 `locationName:"to" type:"long"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *ExportTask) SetDestination(v string) *ExportTask { + s.Destination = &v + return s +} + +// SetDestinationPrefix sets the DestinationPrefix field's value. +func (s *ExportTask) SetDestinationPrefix(v string) *ExportTask { + s.DestinationPrefix = &v + return s +} + +// SetExecutionInfo sets the ExecutionInfo field's value. +func (s *ExportTask) SetExecutionInfo(v *ExportTaskExecutionInfo) *ExportTask { + s.ExecutionInfo = v + return s +} + +// SetFrom sets the From field's value. +func (s *ExportTask) SetFrom(v int64) *ExportTask { + s.From = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *ExportTask) SetLogGroupName(v string) *ExportTask { + s.LogGroupName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportTask) SetStatus(v *ExportTaskStatus) *ExportTask { + s.Status = v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *ExportTask) SetTaskId(v string) *ExportTask { + s.TaskId = &v + return s +} + +// SetTaskName sets the TaskName field's value. +func (s *ExportTask) SetTaskName(v string) *ExportTask { + s.TaskName = &v + return s +} + +// SetTo sets the To field's value. +func (s *ExportTask) SetTo(v int64) *ExportTask { + s.To = &v + return s +} + +// Represents the status of an export task. +type ExportTaskExecutionInfo struct { + _ struct{} `type:"structure"` + + // The completion time of the export task, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CompletionTime *int64 `locationName:"completionTime" type:"long"` + + // The creation time of the export task, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` +} + +// String returns the string representation +func (s ExportTaskExecutionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskExecutionInfo) GoString() string { + return s.String() +} + +// SetCompletionTime sets the CompletionTime field's value. +func (s *ExportTaskExecutionInfo) SetCompletionTime(v int64) *ExportTaskExecutionInfo { + s.CompletionTime = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ExportTaskExecutionInfo) SetCreationTime(v int64) *ExportTaskExecutionInfo { + s.CreationTime = &v + return s +} + +// Represents the status of an export task. +type ExportTaskStatus struct { + _ struct{} `type:"structure"` + + // The status code of the export task. + Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` + + // The status message related to the status code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportTaskStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ExportTaskStatus) SetCode(v string) *ExportTaskStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ExportTaskStatus) SetMessage(v string) *ExportTaskStatus { + s.Message = &v + return s +} + +type FilterLogEventsInput struct { + _ struct{} `type:"structure"` + + // The end of the time range, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are + // not returned. + EndTime *int64 `locationName:"endTime" type:"long"` + + // The filter pattern to use. For more information, see Filter and Pattern Syntax + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). + // + // If not provided, all the events are matched. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // If the value is true, the operation makes a best effort to provide responses + // that contain events from multiple log streams within the log group, interleaved + // in a single response. If the value is false, all the matched log events in + // the first log stream are searched first, then those in the next log stream, + // and so on. The default is false. + // + // Important: Starting on June 17, 2019, this parameter is ignored and the value + // is assumed to be true. The response from this operation always interleaves + // events from multiple log streams within a log group. + // + // Deprecated: Starting on June 17, 2019, this parameter will be ignored and the value will be assumed to be true. The response from this operation will always interleave events from multiple log streams within a log group. + Interleaved *bool `locationName:"interleaved" deprecated:"true" type:"boolean"` + + // The maximum number of events to return. The default is 10,000 events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group to search. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // Filters the results to include only events from log streams that have names + // starting with this prefix. + // + // If you specify a value for both logStreamNamePrefix and logStreamNames, but + // the value for logStreamNamePrefix does not match any log stream names specified + // in logStreamNames, the action returns an InvalidParameterException error. + LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` + + // Filters the results to only logs from the log streams in this list. + // + // If you specify a value for both logStreamNamePrefix and logStreamNames, the + // action returns an InvalidParameterException error. + LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` + + // The token for the next set of events to return. (You received this token + // from a previous call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The start of the time range, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not + // returned. + // + // If you omit startTime and endTime the most recent log events are retrieved, + // to up 1 MB or 10,000 log events. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s FilterLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterLogEventsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) + } + if s.LogStreamNames != nil && len(s.LogStreamNames) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamNames", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *FilterLogEventsInput) SetEndTime(v int64) *FilterLogEventsInput { + s.EndTime = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *FilterLogEventsInput) SetFilterPattern(v string) *FilterLogEventsInput { + s.FilterPattern = &v + return s +} + +// SetInterleaved sets the Interleaved field's value. +func (s *FilterLogEventsInput) SetInterleaved(v bool) *FilterLogEventsInput { + s.Interleaved = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *FilterLogEventsInput) SetLimit(v int64) *FilterLogEventsInput { + s.Limit = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *FilterLogEventsInput) SetLogGroupName(v string) *FilterLogEventsInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamNamePrefix sets the LogStreamNamePrefix field's value. +func (s *FilterLogEventsInput) SetLogStreamNamePrefix(v string) *FilterLogEventsInput { + s.LogStreamNamePrefix = &v + return s +} + +// SetLogStreamNames sets the LogStreamNames field's value. +func (s *FilterLogEventsInput) SetLogStreamNames(v []*string) *FilterLogEventsInput { + s.LogStreamNames = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *FilterLogEventsInput) SetNextToken(v string) *FilterLogEventsInput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *FilterLogEventsInput) SetStartTime(v int64) *FilterLogEventsInput { + s.StartTime = &v + return s +} + +type FilterLogEventsOutput struct { + _ struct{} `type:"structure"` + + // The matched events. + Events []*FilteredLogEvent `locationName:"events" type:"list"` + + // The token to use when requesting the next set of items. The token expires + // after 24 hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // IMPORTANT Starting on May 15, 2020, this parameter will be deprecated. This + // parameter will be an empty list after the deprecation occurs. + // + // Indicates which log streams have been searched and whether each has been + // searched completely. + SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` +} + +// String returns the string representation +func (s FilterLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterLogEventsOutput) GoString() string { + return s.String() +} + +// SetEvents sets the Events field's value. +func (s *FilterLogEventsOutput) SetEvents(v []*FilteredLogEvent) *FilterLogEventsOutput { + s.Events = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *FilterLogEventsOutput) SetNextToken(v string) *FilterLogEventsOutput { + s.NextToken = &v + return s +} + +// SetSearchedLogStreams sets the SearchedLogStreams field's value. +func (s *FilterLogEventsOutput) SetSearchedLogStreams(v []*SearchedLogStream) *FilterLogEventsOutput { + s.SearchedLogStreams = v + return s +} + +// Represents a matched event. +type FilteredLogEvent struct { + _ struct{} `type:"structure"` + + // The ID of the event. + EventId *string `locationName:"eventId" type:"string"` + + // The time the event was ingested, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The name of the log stream to which this event belongs. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The data contained in the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // The time the event occurred, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s FilteredLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilteredLogEvent) GoString() string { + return s.String() +} + +// SetEventId sets the EventId field's value. +func (s *FilteredLogEvent) SetEventId(v string) *FilteredLogEvent { + s.EventId = &v + return s +} + +// SetIngestionTime sets the IngestionTime field's value. +func (s *FilteredLogEvent) SetIngestionTime(v int64) *FilteredLogEvent { + s.IngestionTime = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *FilteredLogEvent) SetLogStreamName(v string) *FilteredLogEvent { + s.LogStreamName = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *FilteredLogEvent) SetMessage(v string) *FilteredLogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *FilteredLogEvent) SetTimestamp(v int64) *FilteredLogEvent { + s.Timestamp = &v + return s +} + +type GetLogEventsInput struct { + _ struct{} `type:"structure"` + + // The end of the time range, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. Events with a timestamp equal to or later than + // this time are not included. + EndTime *int64 `locationName:"endTime" type:"long"` + + // The maximum number of log events returned. If you don't specify a value, + // the maximum is as many log events as can fit in a response size of 1 MB, + // up to 10,000 log events. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + // + // Using this token works only when you specify true for startFromHead. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // If the value is true, the earliest log events are returned first. If the + // value is false, the latest log events are returned first. The default value + // is false. + // + // If you are using nextToken in this operation, you must specify true for startFromHead. + StartFromHead *bool `locationName:"startFromHead" type:"boolean"` + + // The start of the time range, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. Events with a timestamp equal to this time or later + // than this time are included. Events with a timestamp earlier than this time + // are not included. + StartTime *int64 `locationName:"startTime" type:"long"` +} + +// String returns the string representation +func (s GetLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogEventsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *GetLogEventsInput) SetEndTime(v int64) *GetLogEventsInput { + s.EndTime = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *GetLogEventsInput) SetLimit(v int64) *GetLogEventsInput { + s.Limit = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *GetLogEventsInput) SetLogGroupName(v string) *GetLogEventsInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *GetLogEventsInput) SetLogStreamName(v string) *GetLogEventsInput { + s.LogStreamName = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLogEventsInput) SetNextToken(v string) *GetLogEventsInput { + s.NextToken = &v + return s +} + +// SetStartFromHead sets the StartFromHead field's value. +func (s *GetLogEventsInput) SetStartFromHead(v bool) *GetLogEventsInput { + s.StartFromHead = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetLogEventsInput) SetStartTime(v int64) *GetLogEventsInput { + s.StartTime = &v + return s +} + +type GetLogEventsOutput struct { + _ struct{} `type:"structure"` + + // The events. + Events []*OutputLogEvent `locationName:"events" type:"list"` + + // The token for the next set of items in the backward direction. The token + // expires after 24 hours. This token is never null. If you have reached the + // end of the stream, it returns the same token you passed in. + NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` + + // The token for the next set of items in the forward direction. The token expires + // after 24 hours. If you have reached the end of the stream, it returns the + // same token you passed in. + NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s GetLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogEventsOutput) GoString() string { + return s.String() +} + +// SetEvents sets the Events field's value. +func (s *GetLogEventsOutput) SetEvents(v []*OutputLogEvent) *GetLogEventsOutput { + s.Events = v + return s +} + +// SetNextBackwardToken sets the NextBackwardToken field's value. +func (s *GetLogEventsOutput) SetNextBackwardToken(v string) *GetLogEventsOutput { + s.NextBackwardToken = &v + return s +} + +// SetNextForwardToken sets the NextForwardToken field's value. +func (s *GetLogEventsOutput) SetNextForwardToken(v string) *GetLogEventsOutput { + s.NextForwardToken = &v + return s +} + +type GetLogGroupFieldsInput struct { + _ struct{} `type:"structure"` + + // The name of the log group to search. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The time to set as the center of the query. If you specify time, the 8 minutes + // before and 8 minutes after this time are searched. If you omit time, the + // past 15 minutes are queried. + // + // The time value is specified as epoch time, the number of seconds since January + // 1, 1970, 00:00:00 UTC. + Time *int64 `locationName:"time" type:"long"` +} + +// String returns the string representation +func (s GetLogGroupFieldsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogGroupFieldsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogGroupFieldsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogGroupFieldsInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *GetLogGroupFieldsInput) SetLogGroupName(v string) *GetLogGroupFieldsInput { + s.LogGroupName = &v + return s +} + +// SetTime sets the Time field's value. +func (s *GetLogGroupFieldsInput) SetTime(v int64) *GetLogGroupFieldsInput { + s.Time = &v + return s +} + +type GetLogGroupFieldsOutput struct { + _ struct{} `type:"structure"` + + // The array of fields found in the query. Each object in the array contains + // the name of the field, along with the percentage of time it appeared in the + // log events that were queried. + LogGroupFields []*LogGroupField `locationName:"logGroupFields" type:"list"` +} + +// String returns the string representation +func (s GetLogGroupFieldsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogGroupFieldsOutput) GoString() string { + return s.String() +} + +// SetLogGroupFields sets the LogGroupFields field's value. +func (s *GetLogGroupFieldsOutput) SetLogGroupFields(v []*LogGroupField) *GetLogGroupFieldsOutput { + s.LogGroupFields = v + return s +} + +type GetLogRecordInput struct { + _ struct{} `type:"structure"` + + // The pointer corresponding to the log event record you want to retrieve. You + // get this from the response of a GetQueryResults operation. In that response, + // the value of the @ptr field for a log event is the value to use as logRecordPointer + // to retrieve that complete log event record. + // + // LogRecordPointer is a required field + LogRecordPointer *string `locationName:"logRecordPointer" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLogRecordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogRecordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLogRecordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLogRecordInput"} + if s.LogRecordPointer == nil { + invalidParams.Add(request.NewErrParamRequired("LogRecordPointer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogRecordPointer sets the LogRecordPointer field's value. +func (s *GetLogRecordInput) SetLogRecordPointer(v string) *GetLogRecordInput { + s.LogRecordPointer = &v + return s +} + +type GetLogRecordOutput struct { + _ struct{} `type:"structure"` + + // The requested log event, as a JSON string. + LogRecord map[string]*string `locationName:"logRecord" type:"map"` +} + +// String returns the string representation +func (s GetLogRecordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLogRecordOutput) GoString() string { + return s.String() +} + +// SetLogRecord sets the LogRecord field's value. +func (s *GetLogRecordOutput) SetLogRecord(v map[string]*string) *GetLogRecordOutput { + s.LogRecord = v + return s +} + +type GetQueryResultsInput struct { + _ struct{} `type:"structure"` + + // The ID number of the query. + // + // QueryId is a required field + QueryId *string `locationName:"queryId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetQueryResultsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueryResultsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetQueryResultsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetQueryResultsInput"} + if s.QueryId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryId sets the QueryId field's value. +func (s *GetQueryResultsInput) SetQueryId(v string) *GetQueryResultsInput { + s.QueryId = &v + return s +} + +type GetQueryResultsOutput struct { + _ struct{} `type:"structure"` + + // The log events that matched the query criteria during the most recent time + // it ran. + // + // The results value is an array of arrays. Each log event is one object in + // the top-level array. Each of these log event objects is an array of field/value + // pairs. + Results [][]*ResultField `locationName:"results" type:"list"` + + // Includes the number of log events scanned by the query, the number of log + // events that matched the query criteria, and the total number of bytes in + // the log events that were scanned. These values reflect the full raw results + // of the query. + Statistics *QueryStatistics `locationName:"statistics" type:"structure"` + + // The status of the most recent running of the query. Possible values are Cancelled, + // Complete, Failed, Running, Scheduled, Timeout, and Unknown. + // + // Queries time out after 15 minutes of execution. To avoid having your queries + // time out, reduce the time range being searched or partition your query into + // a number of queries. + Status *string `locationName:"status" type:"string" enum:"QueryStatus"` +} + +// String returns the string representation +func (s GetQueryResultsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueryResultsOutput) GoString() string { + return s.String() +} + +// SetResults sets the Results field's value. +func (s *GetQueryResultsOutput) SetResults(v [][]*ResultField) *GetQueryResultsOutput { + s.Results = v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *GetQueryResultsOutput) SetStatistics(v *QueryStatistics) *GetQueryResultsOutput { + s.Statistics = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetQueryResultsOutput) SetStatus(v string) *GetQueryResultsOutput { + s.Status = &v + return s +} + +// Represents a log event, which is a record of activity that was recorded by +// the application or resource being monitored. +type InputLogEvent struct { + _ struct{} `type:"structure"` + + // The raw event message. + // + // Message is a required field + Message *string `locationName:"message" min:"1" type:"string" required:"true"` + + // The time the event occurred, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. + // + // Timestamp is a required field + Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` +} + +// String returns the string representation +func (s InputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputLogEvent) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputLogEvent) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputLogEvent"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Message != nil && len(*s.Message) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Message", 1)) + } + if s.Timestamp == nil { + invalidParams.Add(request.NewErrParamRequired("Timestamp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMessage sets the Message field's value. +func (s *InputLogEvent) SetMessage(v string) *InputLogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *InputLogEvent) SetTimestamp(v int64) *InputLogEvent { + s.Timestamp = &v + return s +} + +// The operation is not valid on the specified resource. +type InvalidOperationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidOperationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidOperationException) GoString() string { + return s.String() +} + +func newErrorInvalidOperationException(v protocol.ResponseMetadata) error { + return &InvalidOperationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidOperationException) Code() string { + return "InvalidOperationException" +} + +// Message returns the exception's message. +func (s *InvalidOperationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidOperationException) OrigErr() error { + return nil +} + +func (s *InvalidOperationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A parameter is specified incorrectly. +type InvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidParameterException) GoString() string { + return s.String() +} + +func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { + return &InvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidParameterException) Code() string { + return "InvalidParameterException" +} + +// Message returns the exception's message. +func (s *InvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameterException) OrigErr() error { + return nil +} + +func (s *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The sequence token is not valid. You can get the correct sequence token in +// the expectedSequenceToken field in the InvalidSequenceTokenException message. +type InvalidSequenceTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + ExpectedSequenceToken *string `locationName:"expectedSequenceToken" min:"1" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidSequenceTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidSequenceTokenException) GoString() string { + return s.String() +} + +func newErrorInvalidSequenceTokenException(v protocol.ResponseMetadata) error { + return &InvalidSequenceTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidSequenceTokenException) Code() string { + return "InvalidSequenceTokenException" +} + +// Message returns the exception's message. +func (s *InvalidSequenceTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidSequenceTokenException) OrigErr() error { + return nil +} + +func (s *InvalidSequenceTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidSequenceTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidSequenceTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You have reached the maximum number of resources that can be created. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LimitExceededException) GoString() string { + return s.String() +} + +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" +} + +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListTagsLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *ListTagsLogGroupInput) SetLogGroupName(v string) *ListTagsLogGroupInput { + s.LogGroupName = &v + return s +} + +type ListTagsLogGroupOutput struct { + _ struct{} `type:"structure"` + + // The tags for the log group. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsLogGroupOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsLogGroupOutput) SetTags(v map[string]*string) *ListTagsLogGroupOutput { + s.Tags = v + return s +} + +// Represents a log group. +type LogGroup struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the log group. + Arn *string `locationName:"arn" type:"string"` + + // The creation time of the log group, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The name of the log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The number of metric filters. + MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` + + // The number of days to retain the log events in the specified log group. Possible + // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, + // 1827, and 3653. + // + // If you omit retentionInDays in a PutRetentionPolicy operation, the events + // in the log group are always retained and never expire. + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` + + // The number of bytes stored. + StoredBytes *int64 `locationName:"storedBytes" type:"long"` +} + +// String returns the string representation +func (s LogGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogGroup) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *LogGroup) SetArn(v string) *LogGroup { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *LogGroup) SetCreationTime(v int64) *LogGroup { + s.CreationTime = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *LogGroup) SetKmsKeyId(v string) *LogGroup { + s.KmsKeyId = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *LogGroup) SetLogGroupName(v string) *LogGroup { + s.LogGroupName = &v + return s +} + +// SetMetricFilterCount sets the MetricFilterCount field's value. +func (s *LogGroup) SetMetricFilterCount(v int64) *LogGroup { + s.MetricFilterCount = &v + return s +} + +// SetRetentionInDays sets the RetentionInDays field's value. +func (s *LogGroup) SetRetentionInDays(v int64) *LogGroup { + s.RetentionInDays = &v + return s +} + +// SetStoredBytes sets the StoredBytes field's value. +func (s *LogGroup) SetStoredBytes(v int64) *LogGroup { + s.StoredBytes = &v + return s +} + +// The fields contained in log events found by a GetLogGroupFields operation, +// along with the percentage of queried log events in which each field appears. +type LogGroupField struct { + _ struct{} `type:"structure"` + + // The name of a log field. + Name *string `locationName:"name" type:"string"` + + // The percentage of log events queried that contained the field. + Percent *int64 `locationName:"percent" type:"integer"` +} + +// String returns the string representation +func (s LogGroupField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogGroupField) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *LogGroupField) SetName(v string) *LogGroupField { + s.Name = &v + return s +} + +// SetPercent sets the Percent field's value. +func (s *LogGroupField) SetPercent(v int64) *LogGroupField { + s.Percent = &v + return s +} + +// Represents a log stream, which is a sequence of log events from a single +// emitter of logs. +type LogStream struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the log stream. + Arn *string `locationName:"arn" type:"string"` + + // The creation time of the stream, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The time of the first event, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. + FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` + + // The time of the most recent log event in the log stream in CloudWatch Logs. + // This number is expressed as the number of milliseconds after Jan 1, 1970 + // 00:00:00 UTC. The lastEventTime value updates on an eventual consistency + // basis. It typically updates in less than an hour from ingestion, but in rare + // situations might take longer. + LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` + + // The ingestion time, expressed as the number of milliseconds after Jan 1, + // 1970 00:00:00 UTC. + LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` + + // The name of the log stream. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // The number of bytes stored. + // + // Important: On June 17, 2019, this parameter was deprecated for log streams, + // and is always reported as zero. This change applies only to log streams. + // The storedBytes parameter for log groups is not affected. + // + // Deprecated: Starting on June 17, 2019, this parameter will be deprecated for log streams, and will be reported as zero. This change applies only to log streams. The storedBytes parameter for log groups is not affected. + StoredBytes *int64 `locationName:"storedBytes" deprecated:"true" type:"long"` + + // The sequence token. + UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s LogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogStream) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *LogStream) SetArn(v string) *LogStream { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *LogStream) SetCreationTime(v int64) *LogStream { + s.CreationTime = &v + return s +} + +// SetFirstEventTimestamp sets the FirstEventTimestamp field's value. +func (s *LogStream) SetFirstEventTimestamp(v int64) *LogStream { + s.FirstEventTimestamp = &v + return s +} + +// SetLastEventTimestamp sets the LastEventTimestamp field's value. +func (s *LogStream) SetLastEventTimestamp(v int64) *LogStream { + s.LastEventTimestamp = &v + return s +} + +// SetLastIngestionTime sets the LastIngestionTime field's value. +func (s *LogStream) SetLastIngestionTime(v int64) *LogStream { + s.LastIngestionTime = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *LogStream) SetLogStreamName(v string) *LogStream { + s.LogStreamName = &v + return s +} + +// SetStoredBytes sets the StoredBytes field's value. +func (s *LogStream) SetStoredBytes(v int64) *LogStream { + s.StoredBytes = &v + return s +} + +// SetUploadSequenceToken sets the UploadSequenceToken field's value. +func (s *LogStream) SetUploadSequenceToken(v string) *LogStream { + s.UploadSequenceToken = &v + return s +} + +// The query string is not valid. Details about this error are displayed in +// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). +// +// For more information about valid query syntax, see CloudWatch Logs Insights +// Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). +type MalformedQueryException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` + + // Reserved. + QueryCompileError *QueryCompileError `locationName:"queryCompileError" type:"structure"` +} + +// String returns the string representation +func (s MalformedQueryException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MalformedQueryException) GoString() string { + return s.String() +} + +func newErrorMalformedQueryException(v protocol.ResponseMetadata) error { + return &MalformedQueryException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *MalformedQueryException) Code() string { + return "MalformedQueryException" +} + +// Message returns the exception's message. +func (s *MalformedQueryException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MalformedQueryException) OrigErr() error { + return nil +} + +func (s *MalformedQueryException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *MalformedQueryException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *MalformedQueryException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Metric filters express how CloudWatch Logs would extract metric observations +// from ingested log events and transform them into metric data in a CloudWatch +// metric. +type MetricFilter struct { + _ struct{} `type:"structure"` + + // The creation time of the metric filter, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The name of the metric filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // The name of the log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The metric transformations. + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` +} + +// String returns the string representation +func (s MetricFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilter) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *MetricFilter) SetCreationTime(v int64) *MetricFilter { + s.CreationTime = &v + return s +} + +// SetFilterName sets the FilterName field's value. +func (s *MetricFilter) SetFilterName(v string) *MetricFilter { + s.FilterName = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *MetricFilter) SetFilterPattern(v string) *MetricFilter { + s.FilterPattern = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *MetricFilter) SetLogGroupName(v string) *MetricFilter { + s.LogGroupName = &v + return s +} + +// SetMetricTransformations sets the MetricTransformations field's value. +func (s *MetricFilter) SetMetricTransformations(v []*MetricTransformation) *MetricFilter { + s.MetricTransformations = v + return s +} + +// Represents a matched event. +type MetricFilterMatchRecord struct { + _ struct{} `type:"structure"` + + // The raw event data. + EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` + + // The event number. + EventNumber *int64 `locationName:"eventNumber" type:"long"` + + // The values extracted from the event data by the filter. + ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` +} + +// String returns the string representation +func (s MetricFilterMatchRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricFilterMatchRecord) GoString() string { + return s.String() +} + +// SetEventMessage sets the EventMessage field's value. +func (s *MetricFilterMatchRecord) SetEventMessage(v string) *MetricFilterMatchRecord { + s.EventMessage = &v + return s +} + +// SetEventNumber sets the EventNumber field's value. +func (s *MetricFilterMatchRecord) SetEventNumber(v int64) *MetricFilterMatchRecord { + s.EventNumber = &v + return s +} + +// SetExtractedValues sets the ExtractedValues field's value. +func (s *MetricFilterMatchRecord) SetExtractedValues(v map[string]*string) *MetricFilterMatchRecord { + s.ExtractedValues = v + return s +} + +// Indicates how to transform ingested log events to metric data in a CloudWatch +// metric. +type MetricTransformation struct { + _ struct{} `type:"structure"` + + // (Optional) The value to emit when a filter pattern does not match a log event. + // This value can be null. + DefaultValue *float64 `locationName:"defaultValue" type:"double"` + + // The name of the CloudWatch metric. + // + // MetricName is a required field + MetricName *string `locationName:"metricName" type:"string" required:"true"` + + // A custom namespace to contain your metric in CloudWatch. Use namespaces to + // group together metrics that are similar. For more information, see Namespaces + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Namespace). + // + // MetricNamespace is a required field + MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` + + // The value to publish to the CloudWatch metric when a filter pattern matches + // a log event. + // + // MetricValue is a required field + MetricValue *string `locationName:"metricValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricTransformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricTransformation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricTransformation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricTransformation"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("MetricNamespace")) + } + if s.MetricValue == nil { + invalidParams.Add(request.NewErrParamRequired("MetricValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultValue sets the DefaultValue field's value. +func (s *MetricTransformation) SetDefaultValue(v float64) *MetricTransformation { + s.DefaultValue = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *MetricTransformation) SetMetricName(v string) *MetricTransformation { + s.MetricName = &v + return s +} + +// SetMetricNamespace sets the MetricNamespace field's value. +func (s *MetricTransformation) SetMetricNamespace(v string) *MetricTransformation { + s.MetricNamespace = &v + return s +} + +// SetMetricValue sets the MetricValue field's value. +func (s *MetricTransformation) SetMetricValue(v string) *MetricTransformation { + s.MetricValue = &v + return s +} + +// Multiple requests to update the same resource were in conflict. +type OperationAbortedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s OperationAbortedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OperationAbortedException) GoString() string { + return s.String() +} + +func newErrorOperationAbortedException(v protocol.ResponseMetadata) error { + return &OperationAbortedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *OperationAbortedException) Code() string { + return "OperationAbortedException" +} + +// Message returns the exception's message. +func (s *OperationAbortedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OperationAbortedException) OrigErr() error { + return nil +} + +func (s *OperationAbortedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *OperationAbortedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *OperationAbortedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents a log event. +type OutputLogEvent struct { + _ struct{} `type:"structure"` + + // The time the event was ingested, expressed as the number of milliseconds + // after Jan 1, 1970 00:00:00 UTC. + IngestionTime *int64 `locationName:"ingestionTime" type:"long"` + + // The data contained in the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // The time the event occurred, expressed as the number of milliseconds after + // Jan 1, 1970 00:00:00 UTC. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation +func (s OutputLogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLogEvent) GoString() string { + return s.String() +} + +// SetIngestionTime sets the IngestionTime field's value. +func (s *OutputLogEvent) SetIngestionTime(v int64) *OutputLogEvent { + s.IngestionTime = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *OutputLogEvent) SetMessage(v string) *OutputLogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *OutputLogEvent) SetTimestamp(v int64) *OutputLogEvent { + s.Timestamp = &v + return s +} + +type PutDestinationInput struct { + _ struct{} `type:"structure"` + + // A name for the destination. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants CloudWatch Logs permissions to call the + // Amazon Kinesis PutRecord operation on the destination stream. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` + + // The ARN of an Amazon Kinesis stream to which to deliver matching log events. + // + // TargetArn is a required field + TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDestinationInput"} + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.TargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetArn")) + } + if s.TargetArn != nil && len(*s.TargetArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationName sets the DestinationName field's value. +func (s *PutDestinationInput) SetDestinationName(v string) *PutDestinationInput { + s.DestinationName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *PutDestinationInput) SetRoleArn(v string) *PutDestinationInput { + s.RoleArn = &v + return s +} + +// SetTargetArn sets the TargetArn field's value. +func (s *PutDestinationInput) SetTargetArn(v string) *PutDestinationInput { + s.TargetArn = &v + return s +} + +type PutDestinationOutput struct { + _ struct{} `type:"structure"` + + // The destination. + Destination *Destination `locationName:"destination" type:"structure"` +} + +// String returns the string representation +func (s PutDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationOutput) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *PutDestinationOutput) SetDestination(v *Destination) *PutDestinationOutput { + s.Destination = v + return s +} + +type PutDestinationPolicyInput struct { + _ struct{} `type:"structure"` + + // An IAM policy document that authorizes cross-account users to deliver their + // log events to the associated destination. This can be up to 5120 bytes. + // + // AccessPolicy is a required field + AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` + + // A name for an existing destination. + // + // DestinationName is a required field + DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDestinationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDestinationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDestinationPolicyInput"} + if s.AccessPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("AccessPolicy")) + } + if s.AccessPolicy != nil && len(*s.AccessPolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccessPolicy", 1)) + } + if s.DestinationName == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationName")) + } + if s.DestinationName != nil && len(*s.DestinationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPolicy sets the AccessPolicy field's value. +func (s *PutDestinationPolicyInput) SetAccessPolicy(v string) *PutDestinationPolicyInput { + s.AccessPolicy = &v + return s +} + +// SetDestinationName sets the DestinationName field's value. +func (s *PutDestinationPolicyInput) SetDestinationName(v string) *PutDestinationPolicyInput { + s.DestinationName = &v + return s +} + +type PutDestinationPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDestinationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutDestinationPolicyOutput) GoString() string { + return s.String() +} + +type PutLogEventsInput struct { + _ struct{} `type:"structure"` + + // The log events. + // + // LogEvents is a required field + LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The name of the log stream. + // + // LogStreamName is a required field + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` + + // The sequence token obtained from the response of the previous PutLogEvents + // call. An upload in a newly created log stream does not require a sequence + // token. You can also get the sequence token using DescribeLogStreams (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogStreams.html). + // If you call PutLogEvents twice within a narrow time period using the same + // value for sequenceToken, both calls might be successful or one might be rejected. + SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutLogEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLogEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLogEventsInput"} + if s.LogEvents == nil { + invalidParams.Add(request.NewErrParamRequired("LogEvents")) + } + if s.LogEvents != nil && len(s.LogEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogEvents", 1)) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.LogStreamName == nil { + invalidParams.Add(request.NewErrParamRequired("LogStreamName")) + } + if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) + } + if s.SequenceToken != nil && len(*s.SequenceToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SequenceToken", 1)) + } + if s.LogEvents != nil { + for i, v := range s.LogEvents { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogEvents", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogEvents sets the LogEvents field's value. +func (s *PutLogEventsInput) SetLogEvents(v []*InputLogEvent) *PutLogEventsInput { + s.LogEvents = v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *PutLogEventsInput) SetLogGroupName(v string) *PutLogEventsInput { + s.LogGroupName = &v + return s +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *PutLogEventsInput) SetLogStreamName(v string) *PutLogEventsInput { + s.LogStreamName = &v + return s +} + +// SetSequenceToken sets the SequenceToken field's value. +func (s *PutLogEventsInput) SetSequenceToken(v string) *PutLogEventsInput { + s.SequenceToken = &v + return s +} + +type PutLogEventsOutput struct { + _ struct{} `type:"structure"` + + // The next sequence token. + NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` + + // The rejected events. + RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` +} + +// String returns the string representation +func (s PutLogEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLogEventsOutput) GoString() string { + return s.String() +} + +// SetNextSequenceToken sets the NextSequenceToken field's value. +func (s *PutLogEventsOutput) SetNextSequenceToken(v string) *PutLogEventsOutput { + s.NextSequenceToken = &v + return s +} + +// SetRejectedLogEventsInfo sets the RejectedLogEventsInfo field's value. +func (s *PutLogEventsOutput) SetRejectedLogEventsInfo(v *RejectedLogEventsInfo) *PutLogEventsOutput { + s.RejectedLogEventsInfo = v + return s +} + +type PutMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A name for the metric filter. + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A filter pattern for extracting metric data out of ingested log events. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // A collection of information that defines how metric data gets emitted. + // + // MetricTransformations is a required field + MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.MetricTransformations == nil { + invalidParams.Add(request.NewErrParamRequired("MetricTransformations")) + } + if s.MetricTransformations != nil && len(s.MetricTransformations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricTransformations", 1)) + } + if s.MetricTransformations != nil { + for i, v := range s.MetricTransformations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricTransformations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterName sets the FilterName field's value. +func (s *PutMetricFilterInput) SetFilterName(v string) *PutMetricFilterInput { + s.FilterName = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *PutMetricFilterInput) SetFilterPattern(v string) *PutMetricFilterInput { + s.FilterPattern = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *PutMetricFilterInput) SetLogGroupName(v string) *PutMetricFilterInput { + s.LogGroupName = &v + return s +} + +// SetMetricTransformations sets the MetricTransformations field's value. +func (s *PutMetricFilterInput) SetMetricTransformations(v []*MetricTransformation) *PutMetricFilterInput { + s.MetricTransformations = v + return s +} + +type PutMetricFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricFilterOutput) GoString() string { + return s.String() +} + +type PutQueryDefinitionInput struct { + _ struct{} `type:"structure"` + + // Use this parameter to include specific log groups as part of your query definition. + // + // If you are updating a query definition and you omit this parameter, then + // the updated definition will contain no log groups. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` + + // A name for the query definition. If you are saving a lot of query definitions, + // we recommend that you name them so that you can easily find the ones you + // want by using the first part of the name as a filter in the queryDefinitionNamePrefix + // parameter of DescribeQueryDefinitions (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html). + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // If you are updating a query definition, use this parameter to specify the + // ID of the query definition that you want to update. You can use DescribeQueryDefinitions + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html) + // to retrieve the IDs of your saved query definitions. + // + // If you are creating a query definition, do not specify this parameter. CloudWatch + // generates a unique ID for the new query definition and include it in the + // response to this operation. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` + + // The query string to use for this definition. For more information, see CloudWatch + // Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + // + // QueryString is a required field + QueryString *string `locationName:"queryString" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutQueryDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutQueryDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutQueryDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutQueryDefinitionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *PutQueryDefinitionInput) SetLogGroupNames(v []*string) *PutQueryDefinitionInput { + s.LogGroupNames = v + return s +} + +// SetName sets the Name field's value. +func (s *PutQueryDefinitionInput) SetName(v string) *PutQueryDefinitionInput { + s.Name = &v + return s +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *PutQueryDefinitionInput) SetQueryDefinitionId(v string) *PutQueryDefinitionInput { + s.QueryDefinitionId = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *PutQueryDefinitionInput) SetQueryString(v string) *PutQueryDefinitionInput { + s.QueryString = &v + return s +} + +type PutQueryDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the query definition. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` +} + +// String returns the string representation +func (s PutQueryDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutQueryDefinitionOutput) GoString() string { + return s.String() +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *PutQueryDefinitionOutput) SetQueryDefinitionId(v string) *PutQueryDefinitionOutput { + s.QueryDefinitionId = &v + return s +} + +type PutResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // Details of the new policy, including the identity of the principal that is + // enabled to put logs to this account. This is formatted as a JSON string. + // This parameter is required. + // + // The following example creates a resource policy enabling the Route 53 service + // to put DNS query logs in to the specified log group. Replace "logArn" with + // the ARN of your CloudWatch Logs resource, such as a log group or log stream. + // + // { "Version": "2012-10-17", "Statement": [ { "Sid": "Route53LogsToCloudWatchLogs", + // "Effect": "Allow", "Principal": { "Service": [ "route53.amazonaws.com" ] + // }, "Action":"logs:PutLogEvents", "Resource": "logArn" } ] } + PolicyDocument *string `locationName:"policyDocument" min:"1" type:"string"` + + // Name of the new policy. This parameter is required. + PolicyName *string `locationName:"policyName" type:"string"` +} + +// String returns the string representation +func (s PutResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PutResourcePolicyInput) SetPolicyDocument(v string) *PutResourcePolicyInput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PutResourcePolicyInput) SetPolicyName(v string) *PutResourcePolicyInput { + s.PolicyName = &v + return s +} + +type PutResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // The new policy. + ResourcePolicy *ResourcePolicy `locationName:"resourcePolicy" type:"structure"` +} + +// String returns the string representation +func (s PutResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *PutResourcePolicyOutput) SetResourcePolicy(v *ResourcePolicy) *PutResourcePolicyOutput { + s.ResourcePolicy = v + return s +} + +type PutRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The number of days to retain the log events in the specified log group. Possible + // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, + // 1827, and 3653. + // + // If you omit retentionInDays in a PutRetentionPolicy operation, the events + // in the log group are always retained and never expire. + // + // RetentionInDays is a required field + RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` +} + +// String returns the string representation +func (s PutRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRetentionPolicyInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.RetentionInDays == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionInDays")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *PutRetentionPolicyInput) SetLogGroupName(v string) *PutRetentionPolicyInput { + s.LogGroupName = &v + return s +} + +// SetRetentionInDays sets the RetentionInDays field's value. +func (s *PutRetentionPolicyInput) SetRetentionInDays(v int64) *PutRetentionPolicyInput { + s.RetentionInDays = &v + return s +} + +type PutRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyOutput) GoString() string { + return s.String() +} + +type PutSubscriptionFilterInput struct { + _ struct{} `type:"structure"` + + // The ARN of the destination to deliver matching log events to. Currently, + // the supported destinations are: + // + // * An Amazon Kinesis stream belonging to the same account as the subscription + // filter, for same-account delivery. + // + // * A logical destination (specified using an ARN) belonging to a different + // account, for cross-account delivery. + // + // * An Amazon Kinesis Firehose delivery stream belonging to the same account + // as the subscription filter, for same-account delivery. + // + // * An AWS Lambda function belonging to the same account as the subscription + // filter, for same-account delivery. + // + // DestinationArn is a required field + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` + + // The method used to distribute log data to the destination. By default, log + // data is grouped by log stream, but the grouping can be set to random for + // a more even distribution. This property is only applicable when the destination + // is an Amazon Kinesis stream. + Distribution *string `locationName:"distribution" type:"string" enum:"Distribution"` + + // A name for the subscription filter. If you are updating an existing filter, + // you must specify the correct name in filterName. Otherwise, the call fails + // because you cannot associate a second filter with a log group. To find the + // name of the filter currently associated with a log group, use DescribeSubscriptionFilters + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeSubscriptionFilters.html). + // + // FilterName is a required field + FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` + + // A filter pattern for subscribing to a filtered stream of log events. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that grants CloudWatch Logs permissions to deliver + // ingested log events to the destination stream. You don't need to provide + // the ARN when you are working with a logical destination for cross-account + // delivery. + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s PutSubscriptionFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutSubscriptionFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutSubscriptionFilterInput"} + if s.DestinationArn == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationArn")) + } + if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) + } + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + if s.FilterName != nil && len(*s.FilterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) + } + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationArn sets the DestinationArn field's value. +func (s *PutSubscriptionFilterInput) SetDestinationArn(v string) *PutSubscriptionFilterInput { + s.DestinationArn = &v + return s +} + +// SetDistribution sets the Distribution field's value. +func (s *PutSubscriptionFilterInput) SetDistribution(v string) *PutSubscriptionFilterInput { + s.Distribution = &v + return s +} + +// SetFilterName sets the FilterName field's value. +func (s *PutSubscriptionFilterInput) SetFilterName(v string) *PutSubscriptionFilterInput { + s.FilterName = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *PutSubscriptionFilterInput) SetFilterPattern(v string) *PutSubscriptionFilterInput { + s.FilterPattern = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *PutSubscriptionFilterInput) SetLogGroupName(v string) *PutSubscriptionFilterInput { + s.LogGroupName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *PutSubscriptionFilterInput) SetRoleArn(v string) *PutSubscriptionFilterInput { + s.RoleArn = &v + return s +} + +type PutSubscriptionFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutSubscriptionFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSubscriptionFilterOutput) GoString() string { + return s.String() +} + +// Reserved. +type QueryCompileError struct { + _ struct{} `type:"structure"` + + // Reserved. + Location *QueryCompileErrorLocation `locationName:"location" type:"structure"` + + // Reserved. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s QueryCompileError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryCompileError) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *QueryCompileError) SetLocation(v *QueryCompileErrorLocation) *QueryCompileError { + s.Location = v + return s +} + +// SetMessage sets the Message field's value. +func (s *QueryCompileError) SetMessage(v string) *QueryCompileError { + s.Message = &v + return s +} + +// Reserved. +type QueryCompileErrorLocation struct { + _ struct{} `type:"structure"` + + // Reserved. + EndCharOffset *int64 `locationName:"endCharOffset" type:"integer"` + + // Reserved. + StartCharOffset *int64 `locationName:"startCharOffset" type:"integer"` +} + +// String returns the string representation +func (s QueryCompileErrorLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryCompileErrorLocation) GoString() string { + return s.String() +} + +// SetEndCharOffset sets the EndCharOffset field's value. +func (s *QueryCompileErrorLocation) SetEndCharOffset(v int64) *QueryCompileErrorLocation { + s.EndCharOffset = &v + return s +} + +// SetStartCharOffset sets the StartCharOffset field's value. +func (s *QueryCompileErrorLocation) SetStartCharOffset(v int64) *QueryCompileErrorLocation { + s.StartCharOffset = &v + return s +} + +// This structure contains details about a saved CloudWatch Logs Insights query +// definition. +type QueryDefinition struct { + _ struct{} `type:"structure"` + + // The date that the query definition was most recently modified. + LastModified *int64 `locationName:"lastModified" type:"long"` + + // If this query definition contains a list of log groups that it is limited + // to, that list appears here. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` + + // The name of the query definition. + Name *string `locationName:"name" min:"1" type:"string"` + + // The unique ID of the query definition. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` + + // The query string to use for this definition. For more information, see CloudWatch + // Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + QueryString *string `locationName:"queryString" min:"1" type:"string"` +} + +// String returns the string representation +func (s QueryDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryDefinition) GoString() string { + return s.String() +} + +// SetLastModified sets the LastModified field's value. +func (s *QueryDefinition) SetLastModified(v int64) *QueryDefinition { + s.LastModified = &v + return s +} + +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *QueryDefinition) SetLogGroupNames(v []*string) *QueryDefinition { + s.LogGroupNames = v + return s +} + +// SetName sets the Name field's value. +func (s *QueryDefinition) SetName(v string) *QueryDefinition { + s.Name = &v + return s +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *QueryDefinition) SetQueryDefinitionId(v string) *QueryDefinition { + s.QueryDefinitionId = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *QueryDefinition) SetQueryString(v string) *QueryDefinition { + s.QueryString = &v + return s +} + +// Information about one CloudWatch Logs Insights query that matches the request +// in a DescribeQueries operation. +type QueryInfo struct { + _ struct{} `type:"structure"` + + // The date and time that this query was created. + CreateTime *int64 `locationName:"createTime" type:"long"` + + // The name of the log group scanned by this query. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The unique ID number of this query. + QueryId *string `locationName:"queryId" type:"string"` + + // The query string used in this query. + QueryString *string `locationName:"queryString" type:"string"` + + // The status of this query. Possible values are Cancelled, Complete, Failed, + // Running, Scheduled, and Unknown. + Status *string `locationName:"status" type:"string" enum:"QueryStatus"` +} + +// String returns the string representation +func (s QueryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryInfo) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *QueryInfo) SetCreateTime(v int64) *QueryInfo { + s.CreateTime = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *QueryInfo) SetLogGroupName(v string) *QueryInfo { + s.LogGroupName = &v + return s +} + +// SetQueryId sets the QueryId field's value. +func (s *QueryInfo) SetQueryId(v string) *QueryInfo { + s.QueryId = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *QueryInfo) SetQueryString(v string) *QueryInfo { + s.QueryString = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *QueryInfo) SetStatus(v string) *QueryInfo { + s.Status = &v + return s +} + +// Contains the number of log events scanned by the query, the number of log +// events that matched the query criteria, and the total number of bytes in +// the log events that were scanned. +type QueryStatistics struct { + _ struct{} `type:"structure"` + + // The total number of bytes in the log events scanned during the query. + BytesScanned *float64 `locationName:"bytesScanned" type:"double"` + + // The number of log events that matched the query string. + RecordsMatched *float64 `locationName:"recordsMatched" type:"double"` + + // The total number of log events scanned during the query. + RecordsScanned *float64 `locationName:"recordsScanned" type:"double"` +} + +// String returns the string representation +func (s QueryStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryStatistics) GoString() string { + return s.String() +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *QueryStatistics) SetBytesScanned(v float64) *QueryStatistics { + s.BytesScanned = &v + return s +} + +// SetRecordsMatched sets the RecordsMatched field's value. +func (s *QueryStatistics) SetRecordsMatched(v float64) *QueryStatistics { + s.RecordsMatched = &v + return s +} + +// SetRecordsScanned sets the RecordsScanned field's value. +func (s *QueryStatistics) SetRecordsScanned(v float64) *QueryStatistics { + s.RecordsScanned = &v + return s +} + +// Represents the rejected events. +type RejectedLogEventsInfo struct { + _ struct{} `type:"structure"` + + // The expired log events. + ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` + + // The log events that are too new. + TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` + + // The log events that are too old. + TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` +} + +// String returns the string representation +func (s RejectedLogEventsInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectedLogEventsInfo) GoString() string { + return s.String() +} + +// SetExpiredLogEventEndIndex sets the ExpiredLogEventEndIndex field's value. +func (s *RejectedLogEventsInfo) SetExpiredLogEventEndIndex(v int64) *RejectedLogEventsInfo { + s.ExpiredLogEventEndIndex = &v + return s +} + +// SetTooNewLogEventStartIndex sets the TooNewLogEventStartIndex field's value. +func (s *RejectedLogEventsInfo) SetTooNewLogEventStartIndex(v int64) *RejectedLogEventsInfo { + s.TooNewLogEventStartIndex = &v + return s +} + +// SetTooOldLogEventEndIndex sets the TooOldLogEventEndIndex field's value. +func (s *RejectedLogEventsInfo) SetTooOldLogEventEndIndex(v int64) *RejectedLogEventsInfo { + s.TooOldLogEventEndIndex = &v + return s +} + +// The specified resource already exists. +type ResourceAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResourceAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { + return &ResourceAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceAlreadyExistsException) Code() string { + return "ResourceAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *ResourceAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *ResourceAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified resource does not exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A policy enabling one or more entities to put logs to a log group in this +// account. +type ResourcePolicy struct { + _ struct{} `type:"structure"` + + // Timestamp showing when this policy was last updated, expressed as the number + // of milliseconds after Jan 1, 1970 00:00:00 UTC. + LastUpdatedTime *int64 `locationName:"lastUpdatedTime" type:"long"` + + // The details of the policy. + PolicyDocument *string `locationName:"policyDocument" min:"1" type:"string"` + + // The name of the resource policy. + PolicyName *string `locationName:"policyName" type:"string"` +} + +// String returns the string representation +func (s ResourcePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourcePolicy) GoString() string { + return s.String() +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *ResourcePolicy) SetLastUpdatedTime(v int64) *ResourcePolicy { + s.LastUpdatedTime = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *ResourcePolicy) SetPolicyDocument(v string) *ResourcePolicy { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *ResourcePolicy) SetPolicyName(v string) *ResourcePolicy { + s.PolicyName = &v + return s +} + +// Contains one field from one log event returned by a CloudWatch Logs Insights +// query, along with the value of that field. +// +// For more information about the fields that are generated by CloudWatch logs, +// see Supported Logs and Discovered Fields (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html). +type ResultField struct { + _ struct{} `type:"structure"` + + // The log event field. + Field *string `locationName:"field" type:"string"` + + // The value of this field. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ResultField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResultField) GoString() string { + return s.String() +} + +// SetField sets the Field field's value. +func (s *ResultField) SetField(v string) *ResultField { + s.Field = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ResultField) SetValue(v string) *ResultField { + s.Value = &v + return s +} + +// Represents the search status of a log stream. +type SearchedLogStream struct { + _ struct{} `type:"structure"` + + // The name of the log stream. + LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` + + // Indicates whether all the events in this log stream were searched. + SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` +} + +// String returns the string representation +func (s SearchedLogStream) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchedLogStream) GoString() string { + return s.String() +} + +// SetLogStreamName sets the LogStreamName field's value. +func (s *SearchedLogStream) SetLogStreamName(v string) *SearchedLogStream { + s.LogStreamName = &v + return s +} + +// SetSearchedCompletely sets the SearchedCompletely field's value. +func (s *SearchedLogStream) SetSearchedCompletely(v bool) *SearchedLogStream { + s.SearchedCompletely = &v + return s +} + +// The service cannot complete the request. +type ServiceUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceUnavailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceUnavailableException) GoString() string { + return s.String() +} + +func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { + return &ServiceUnavailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceUnavailableException) Code() string { + return "ServiceUnavailableException" +} + +// Message returns the exception's message. +func (s *ServiceUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceUnavailableException) OrigErr() error { + return nil +} + +func (s *ServiceUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartQueryInput struct { + _ struct{} `type:"structure"` + + // The end of the time range to query. The range is inclusive, so the specified + // end time is included in the query. Specified as epoch time, the number of + // seconds since January 1, 1970, 00:00:00 UTC. + // + // EndTime is a required field + EndTime *int64 `locationName:"endTime" type:"long" required:"true"` + + // The maximum number of log events to return in the query. If the query string + // uses the fields command, only the specified fields and their values are returned. + // The default is 1000. + Limit *int64 `locationName:"limit" min:"1" type:"integer"` + + // The log group on which to perform the query. + // + // A StartQuery operation must include a logGroupNames or a logGroupName parameter, + // but not both. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + // The list of log groups to be queried. You can include up to 20 log groups. + // + // A StartQuery operation must include a logGroupNames or a logGroupName parameter, + // but not both. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` + + // The query string to use. For more information, see CloudWatch Logs Insights + // Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + // + // QueryString is a required field + QueryString *string `locationName:"queryString" type:"string" required:"true"` + + // The beginning of the time range to query. The range is inclusive, so the + // specified start time is included in the query. Specified as epoch time, the + // number of seconds since January 1, 1970, 00:00:00 UTC. + // + // StartTime is a required field + StartTime *int64 `locationName:"startTime" type:"long" required:"true"` +} + +// String returns the string representation +func (s StartQueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartQueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartQueryInput"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *StartQueryInput) SetEndTime(v int64) *StartQueryInput { + s.EndTime = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *StartQueryInput) SetLimit(v int64) *StartQueryInput { + s.Limit = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *StartQueryInput) SetLogGroupName(v string) *StartQueryInput { + s.LogGroupName = &v + return s +} + +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *StartQueryInput) SetLogGroupNames(v []*string) *StartQueryInput { + s.LogGroupNames = v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *StartQueryInput) SetQueryString(v string) *StartQueryInput { + s.QueryString = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *StartQueryInput) SetStartTime(v int64) *StartQueryInput { + s.StartTime = &v + return s +} + +type StartQueryOutput struct { + _ struct{} `type:"structure"` + + // The unique ID of the query. + QueryId *string `locationName:"queryId" type:"string"` +} + +// String returns the string representation +func (s StartQueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartQueryOutput) GoString() string { + return s.String() +} + +// SetQueryId sets the QueryId field's value. +func (s *StartQueryOutput) SetQueryId(v string) *StartQueryOutput { + s.QueryId = &v + return s +} + +type StopQueryInput struct { + _ struct{} `type:"structure"` + + // The ID number of the query to stop. To find this ID number, use DescribeQueries. + // + // QueryId is a required field + QueryId *string `locationName:"queryId" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopQueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopQueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopQueryInput"} + if s.QueryId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryId sets the QueryId field's value. +func (s *StopQueryInput) SetQueryId(v string) *StopQueryInput { + s.QueryId = &v + return s +} + +type StopQueryOutput struct { + _ struct{} `type:"structure"` + + // This is true if the query was stopped by the StopQuery operation. + Success *bool `locationName:"success" type:"boolean"` +} + +// String returns the string representation +func (s StopQueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopQueryOutput) GoString() string { + return s.String() +} + +// SetSuccess sets the Success field's value. +func (s *StopQueryOutput) SetSuccess(v bool) *StopQueryOutput { + s.Success = &v + return s +} + +// Represents a subscription filter. +type SubscriptionFilter struct { + _ struct{} `type:"structure"` + + // The creation time of the subscription filter, expressed as the number of + // milliseconds after Jan 1, 1970 00:00:00 UTC. + CreationTime *int64 `locationName:"creationTime" type:"long"` + + // The Amazon Resource Name (ARN) of the destination. + DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` + + // The method used to distribute log data to the destination, which can be either + // random or grouped by log stream. + Distribution *string `locationName:"distribution" type:"string" enum:"Distribution"` + + // The name of the subscription filter. + FilterName *string `locationName:"filterName" min:"1" type:"string"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + FilterPattern *string `locationName:"filterPattern" type:"string"` + + // The name of the log group. + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` + + RoleArn *string `locationName:"roleArn" min:"1" type:"string"` +} + +// String returns the string representation +func (s SubscriptionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubscriptionFilter) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *SubscriptionFilter) SetCreationTime(v int64) *SubscriptionFilter { + s.CreationTime = &v + return s +} + +// SetDestinationArn sets the DestinationArn field's value. +func (s *SubscriptionFilter) SetDestinationArn(v string) *SubscriptionFilter { + s.DestinationArn = &v + return s +} + +// SetDistribution sets the Distribution field's value. +func (s *SubscriptionFilter) SetDistribution(v string) *SubscriptionFilter { + s.Distribution = &v + return s +} + +// SetFilterName sets the FilterName field's value. +func (s *SubscriptionFilter) SetFilterName(v string) *SubscriptionFilter { + s.FilterName = &v + return s +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *SubscriptionFilter) SetFilterPattern(v string) *SubscriptionFilter { + s.FilterPattern = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *SubscriptionFilter) SetLogGroupName(v string) *SubscriptionFilter { + s.LogGroupName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *SubscriptionFilter) SetRoleArn(v string) *SubscriptionFilter { + s.RoleArn = &v + return s +} + +type TagLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The key-value pairs to use for the tags. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *TagLogGroupInput) SetLogGroupName(v string) *TagLogGroupInput { + s.LogGroupName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagLogGroupInput) SetTags(v map[string]*string) *TagLogGroupInput { + s.Tags = v + return s +} + +type TagLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagLogGroupOutput) GoString() string { + return s.String() +} + +type TestMetricFilterInput struct { + _ struct{} `type:"structure"` + + // A symbolic description of how CloudWatch Logs should interpret the data in + // each log event. For example, a log event can contain timestamps, IP addresses, + // strings, and so on. You use the filter pattern to specify what to look for + // in the log event message. + // + // FilterPattern is a required field + FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` + + // The log event messages to test. + // + // LogEventMessages is a required field + LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TestMetricFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestMetricFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestMetricFilterInput"} + if s.FilterPattern == nil { + invalidParams.Add(request.NewErrParamRequired("FilterPattern")) + } + if s.LogEventMessages == nil { + invalidParams.Add(request.NewErrParamRequired("LogEventMessages")) + } + if s.LogEventMessages != nil && len(s.LogEventMessages) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogEventMessages", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterPattern sets the FilterPattern field's value. +func (s *TestMetricFilterInput) SetFilterPattern(v string) *TestMetricFilterInput { + s.FilterPattern = &v + return s +} + +// SetLogEventMessages sets the LogEventMessages field's value. +func (s *TestMetricFilterInput) SetLogEventMessages(v []*string) *TestMetricFilterInput { + s.LogEventMessages = v + return s +} + +type TestMetricFilterOutput struct { + _ struct{} `type:"structure"` + + // The matched events. + Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` +} + +// String returns the string representation +func (s TestMetricFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestMetricFilterOutput) GoString() string { + return s.String() +} + +// SetMatches sets the Matches field's value. +func (s *TestMetricFilterOutput) SetMatches(v []*MetricFilterMatchRecord) *TestMetricFilterOutput { + s.Matches = v + return s +} + +// The most likely cause is an invalid AWS access key ID or secret key. +type UnrecognizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnrecognizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnrecognizedClientException) GoString() string { + return s.String() +} + +func newErrorUnrecognizedClientException(v protocol.ResponseMetadata) error { + return &UnrecognizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnrecognizedClientException) Code() string { + return "UnrecognizedClientException" +} + +// Message returns the exception's message. +func (s *UnrecognizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnrecognizedClientException) OrigErr() error { + return nil +} + +func (s *UnrecognizedClientException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnrecognizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnrecognizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagLogGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the log group. + // + // LogGroupName is a required field + LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` + + // The tag keys. The corresponding tags are removed from the log group. + // + // Tags is a required field + Tags []*string `locationName:"tags" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagLogGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagLogGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagLogGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagLogGroupInput"} + if s.LogGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *UntagLogGroupInput) SetLogGroupName(v string) *UntagLogGroupInput { + s.LogGroupName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *UntagLogGroupInput) SetTags(v []*string) *UntagLogGroupInput { + s.Tags = v + return s +} + +type UntagLogGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagLogGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagLogGroupOutput) GoString() string { + return s.String() +} + +// The method used to distribute log data to the destination, which can be either +// random or grouped by log stream. +const ( + // DistributionRandom is a Distribution enum value + DistributionRandom = "Random" + + // DistributionByLogStream is a Distribution enum value + DistributionByLogStream = "ByLogStream" +) + +// Distribution_Values returns all elements of the Distribution enum +func Distribution_Values() []string { + return []string{ + DistributionRandom, + DistributionByLogStream, + } +} + +const ( + // ExportTaskStatusCodeCancelled is a ExportTaskStatusCode enum value + ExportTaskStatusCodeCancelled = "CANCELLED" + + // ExportTaskStatusCodeCompleted is a ExportTaskStatusCode enum value + ExportTaskStatusCodeCompleted = "COMPLETED" + + // ExportTaskStatusCodeFailed is a ExportTaskStatusCode enum value + ExportTaskStatusCodeFailed = "FAILED" + + // ExportTaskStatusCodePending is a ExportTaskStatusCode enum value + ExportTaskStatusCodePending = "PENDING" + + // ExportTaskStatusCodePendingCancel is a ExportTaskStatusCode enum value + ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" + + // ExportTaskStatusCodeRunning is a ExportTaskStatusCode enum value + ExportTaskStatusCodeRunning = "RUNNING" +) + +// ExportTaskStatusCode_Values returns all elements of the ExportTaskStatusCode enum +func ExportTaskStatusCode_Values() []string { + return []string{ + ExportTaskStatusCodeCancelled, + ExportTaskStatusCodeCompleted, + ExportTaskStatusCodeFailed, + ExportTaskStatusCodePending, + ExportTaskStatusCodePendingCancel, + ExportTaskStatusCodeRunning, + } +} + +const ( + // OrderByLogStreamName is a OrderBy enum value + OrderByLogStreamName = "LogStreamName" + + // OrderByLastEventTime is a OrderBy enum value + OrderByLastEventTime = "LastEventTime" +) + +// OrderBy_Values returns all elements of the OrderBy enum +func OrderBy_Values() []string { + return []string{ + OrderByLogStreamName, + OrderByLastEventTime, + } +} + +const ( + // QueryStatusScheduled is a QueryStatus enum value + QueryStatusScheduled = "Scheduled" + + // QueryStatusRunning is a QueryStatus enum value + QueryStatusRunning = "Running" + + // QueryStatusComplete is a QueryStatus enum value + QueryStatusComplete = "Complete" + + // QueryStatusFailed is a QueryStatus enum value + QueryStatusFailed = "Failed" + + // QueryStatusCancelled is a QueryStatus enum value + QueryStatusCancelled = "Cancelled" +) + +// QueryStatus_Values returns all elements of the QueryStatus enum +func QueryStatus_Values() []string { + return []string{ + QueryStatusScheduled, + QueryStatusRunning, + QueryStatusComplete, + QueryStatusFailed, + QueryStatusCancelled, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go new file mode 100644 index 00000000000..647d6830165 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go @@ -0,0 +1,57 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package cloudwatchlogs provides the client and types for making API +// requests to Amazon CloudWatch Logs. +// +// You can use Amazon CloudWatch Logs to monitor, store, and access your log +// files from EC2 instances, AWS CloudTrail, or other sources. You can then +// retrieve the associated log data from CloudWatch Logs using the CloudWatch +// console, CloudWatch Logs commands in the AWS CLI, CloudWatch Logs API, or +// CloudWatch Logs SDK. +// +// You can use CloudWatch Logs to: +// +// * Monitor logs from EC2 instances in real-time: You can use CloudWatch +// Logs to monitor applications and systems using log data. For example, +// CloudWatch Logs can track the number of errors that occur in your application +// logs and send you a notification whenever the rate of errors exceeds a +// threshold that you specify. CloudWatch Logs uses your log data for monitoring +// so no code changes are required. For example, you can monitor application +// logs for specific literal terms (such as "NullReferenceException") or +// count the number of occurrences of a literal term at a particular position +// in log data (such as "404" status codes in an Apache access log). When +// the term you are searching for is found, CloudWatch Logs reports the data +// to a CloudWatch metric that you specify. +// +// * Monitor AWS CloudTrail logged events: You can create alarms in CloudWatch +// and receive notifications of particular API activity as captured by CloudTrail. +// You can use the notification to perform troubleshooting. +// +// * Archive log data: You can use CloudWatch Logs to store your log data +// in highly durable storage. You can change the log retention setting so +// that any log events older than this setting are automatically deleted. +// The CloudWatch Logs agent makes it easy to quickly send both rotated and +// non-rotated log data off of a host and into the log service. You can then +// access the raw log data when you need it. +// +// See https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28 for more information on this service. +// +// See cloudwatchlogs package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatchlogs/ +// +// Using the Client +// +// To contact Amazon CloudWatch Logs with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon CloudWatch Logs client CloudWatchLogs for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatchlogs/#New +package cloudwatchlogs diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go new file mode 100644 index 00000000000..44e3bb576f1 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeDataAlreadyAcceptedException for service response error code + // "DataAlreadyAcceptedException". + // + // The event was already logged. + ErrCodeDataAlreadyAcceptedException = "DataAlreadyAcceptedException" + + // ErrCodeInvalidOperationException for service response error code + // "InvalidOperationException". + // + // The operation is not valid on the specified resource. + ErrCodeInvalidOperationException = "InvalidOperationException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // A parameter is specified incorrectly. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeInvalidSequenceTokenException for service response error code + // "InvalidSequenceTokenException". + // + // The sequence token is not valid. You can get the correct sequence token in + // the expectedSequenceToken field in the InvalidSequenceTokenException message. + ErrCodeInvalidSequenceTokenException = "InvalidSequenceTokenException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // You have reached the maximum number of resources that can be created. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeMalformedQueryException for service response error code + // "MalformedQueryException". + // + // The query string is not valid. Details about this error are displayed in + // a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). + // + // For more information about valid query syntax, see CloudWatch Logs Insights + // Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + ErrCodeMalformedQueryException = "MalformedQueryException" + + // ErrCodeOperationAbortedException for service response error code + // "OperationAbortedException". + // + // Multiple requests to update the same resource were in conflict. + ErrCodeOperationAbortedException = "OperationAbortedException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // The specified resource already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource does not exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeServiceUnavailableException for service response error code + // "ServiceUnavailableException". + // + // The service cannot complete the request. + ErrCodeServiceUnavailableException = "ServiceUnavailableException" + + // ErrCodeUnrecognizedClientException for service response error code + // "UnrecognizedClientException". + // + // The most likely cause is an invalid AWS access key ID or secret key. + ErrCodeUnrecognizedClientException = "UnrecognizedClientException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "DataAlreadyAcceptedException": newErrorDataAlreadyAcceptedException, + "InvalidOperationException": newErrorInvalidOperationException, + "InvalidParameterException": newErrorInvalidParameterException, + "InvalidSequenceTokenException": newErrorInvalidSequenceTokenException, + "LimitExceededException": newErrorLimitExceededException, + "MalformedQueryException": newErrorMalformedQueryException, + "OperationAbortedException": newErrorOperationAbortedException, + "ResourceAlreadyExistsException": newErrorResourceAlreadyExistsException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ServiceUnavailableException": newErrorServiceUnavailableException, + "UnrecognizedClientException": newErrorUnrecognizedClientException, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go new file mode 100644 index 00000000000..41520eda946 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -0,0 +1,103 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatchlogs + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// CloudWatchLogs provides the API operation methods for making requests to +// Amazon CloudWatch Logs. See this package's package overview docs +// for details on the service. +// +// CloudWatchLogs methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type CloudWatchLogs struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "logs" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "CloudWatch Logs" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the CloudWatchLogs client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a CloudWatchLogs client from just a session. +// svc := cloudwatchlogs.New(mySession) +// +// // Create a CloudWatchLogs client with additional configuration +// svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatchLogs { + svc := &CloudWatchLogs{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2014-03-28", + JSONVersion: "1.1", + TargetPrefix: "Logs_20140328", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatchLogs operation and runs any +// custom request initialization. +func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go new file mode 100644 index 00000000000..6f0f6fbb1eb --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -0,0 +1,122563 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" +) + +const opAcceptReservedInstancesExchangeQuote = "AcceptReservedInstancesExchangeQuote" + +// AcceptReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the +// client's request for the AcceptReservedInstancesExchangeQuote operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AcceptReservedInstancesExchangeQuote for more information on using the AcceptReservedInstancesExchangeQuote +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AcceptReservedInstancesExchangeQuoteRequest method. +// req, resp := client.AcceptReservedInstancesExchangeQuoteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptReservedInstancesExchangeQuote +func (c *EC2) AcceptReservedInstancesExchangeQuoteRequest(input *AcceptReservedInstancesExchangeQuoteInput) (req *request.Request, output *AcceptReservedInstancesExchangeQuoteOutput) { + op := &request.Operation{ + Name: opAcceptReservedInstancesExchangeQuote, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptReservedInstancesExchangeQuoteInput{} + } + + output = &AcceptReservedInstancesExchangeQuoteOutput{} + req = c.newRequest(op, input, output) + return +} + +// AcceptReservedInstancesExchangeQuote API operation for Amazon Elastic Compute Cloud. +// +// Accepts the Convertible Reserved Instance exchange quote described in the +// GetReservedInstancesExchangeQuote call. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AcceptReservedInstancesExchangeQuote for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptReservedInstancesExchangeQuote +func (c *EC2) AcceptReservedInstancesExchangeQuote(input *AcceptReservedInstancesExchangeQuoteInput) (*AcceptReservedInstancesExchangeQuoteOutput, error) { + req, out := c.AcceptReservedInstancesExchangeQuoteRequest(input) + return out, req.Send() +} + +// AcceptReservedInstancesExchangeQuoteWithContext is the same as AcceptReservedInstancesExchangeQuote with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptReservedInstancesExchangeQuote for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AcceptReservedInstancesExchangeQuoteWithContext(ctx aws.Context, input *AcceptReservedInstancesExchangeQuoteInput, opts ...request.Option) (*AcceptReservedInstancesExchangeQuoteOutput, error) { + req, out := c.AcceptReservedInstancesExchangeQuoteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAcceptTransitGatewayPeeringAttachment = "AcceptTransitGatewayPeeringAttachment" + +// AcceptTransitGatewayPeeringAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the AcceptTransitGatewayPeeringAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AcceptTransitGatewayPeeringAttachment for more information on using the AcceptTransitGatewayPeeringAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AcceptTransitGatewayPeeringAttachmentRequest method. +// req, resp := client.AcceptTransitGatewayPeeringAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptTransitGatewayPeeringAttachment +func (c *EC2) AcceptTransitGatewayPeeringAttachmentRequest(input *AcceptTransitGatewayPeeringAttachmentInput) (req *request.Request, output *AcceptTransitGatewayPeeringAttachmentOutput) { + op := &request.Operation{ + Name: opAcceptTransitGatewayPeeringAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptTransitGatewayPeeringAttachmentInput{} + } + + output = &AcceptTransitGatewayPeeringAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// AcceptTransitGatewayPeeringAttachment API operation for Amazon Elastic Compute Cloud. +// +// Accepts a transit gateway peering attachment request. The peering attachment +// must be in the pendingAcceptance state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AcceptTransitGatewayPeeringAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptTransitGatewayPeeringAttachment +func (c *EC2) AcceptTransitGatewayPeeringAttachment(input *AcceptTransitGatewayPeeringAttachmentInput) (*AcceptTransitGatewayPeeringAttachmentOutput, error) { + req, out := c.AcceptTransitGatewayPeeringAttachmentRequest(input) + return out, req.Send() +} + +// AcceptTransitGatewayPeeringAttachmentWithContext is the same as AcceptTransitGatewayPeeringAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptTransitGatewayPeeringAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AcceptTransitGatewayPeeringAttachmentWithContext(ctx aws.Context, input *AcceptTransitGatewayPeeringAttachmentInput, opts ...request.Option) (*AcceptTransitGatewayPeeringAttachmentOutput, error) { + req, out := c.AcceptTransitGatewayPeeringAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAcceptTransitGatewayVpcAttachment = "AcceptTransitGatewayVpcAttachment" + +// AcceptTransitGatewayVpcAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the AcceptTransitGatewayVpcAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AcceptTransitGatewayVpcAttachment for more information on using the AcceptTransitGatewayVpcAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AcceptTransitGatewayVpcAttachmentRequest method. +// req, resp := client.AcceptTransitGatewayVpcAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptTransitGatewayVpcAttachment +func (c *EC2) AcceptTransitGatewayVpcAttachmentRequest(input *AcceptTransitGatewayVpcAttachmentInput) (req *request.Request, output *AcceptTransitGatewayVpcAttachmentOutput) { + op := &request.Operation{ + Name: opAcceptTransitGatewayVpcAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptTransitGatewayVpcAttachmentInput{} + } + + output = &AcceptTransitGatewayVpcAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// AcceptTransitGatewayVpcAttachment API operation for Amazon Elastic Compute Cloud. +// +// Accepts a request to attach a VPC to a transit gateway. +// +// The VPC attachment must be in the pendingAcceptance state. Use DescribeTransitGatewayVpcAttachments +// to view your pending VPC attachment requests. Use RejectTransitGatewayVpcAttachment +// to reject a VPC attachment request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AcceptTransitGatewayVpcAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptTransitGatewayVpcAttachment +func (c *EC2) AcceptTransitGatewayVpcAttachment(input *AcceptTransitGatewayVpcAttachmentInput) (*AcceptTransitGatewayVpcAttachmentOutput, error) { + req, out := c.AcceptTransitGatewayVpcAttachmentRequest(input) + return out, req.Send() +} + +// AcceptTransitGatewayVpcAttachmentWithContext is the same as AcceptTransitGatewayVpcAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptTransitGatewayVpcAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AcceptTransitGatewayVpcAttachmentWithContext(ctx aws.Context, input *AcceptTransitGatewayVpcAttachmentInput, opts ...request.Option) (*AcceptTransitGatewayVpcAttachmentOutput, error) { + req, out := c.AcceptTransitGatewayVpcAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAcceptVpcEndpointConnections = "AcceptVpcEndpointConnections" + +// AcceptVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the AcceptVpcEndpointConnections operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AcceptVpcEndpointConnections for more information on using the AcceptVpcEndpointConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AcceptVpcEndpointConnectionsRequest method. +// req, resp := client.AcceptVpcEndpointConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcEndpointConnections +func (c *EC2) AcceptVpcEndpointConnectionsRequest(input *AcceptVpcEndpointConnectionsInput) (req *request.Request, output *AcceptVpcEndpointConnectionsOutput) { + op := &request.Operation{ + Name: opAcceptVpcEndpointConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptVpcEndpointConnectionsInput{} + } + + output = &AcceptVpcEndpointConnectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// AcceptVpcEndpointConnections API operation for Amazon Elastic Compute Cloud. +// +// Accepts one or more interface VPC endpoint connection requests to your VPC +// endpoint service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AcceptVpcEndpointConnections for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcEndpointConnections +func (c *EC2) AcceptVpcEndpointConnections(input *AcceptVpcEndpointConnectionsInput) (*AcceptVpcEndpointConnectionsOutput, error) { + req, out := c.AcceptVpcEndpointConnectionsRequest(input) + return out, req.Send() +} + +// AcceptVpcEndpointConnectionsWithContext is the same as AcceptVpcEndpointConnections with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptVpcEndpointConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AcceptVpcEndpointConnectionsWithContext(ctx aws.Context, input *AcceptVpcEndpointConnectionsInput, opts ...request.Option) (*AcceptVpcEndpointConnectionsOutput, error) { + req, out := c.AcceptVpcEndpointConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" + +// AcceptVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the AcceptVpcPeeringConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AcceptVpcPeeringConnection for more information on using the AcceptVpcPeeringConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AcceptVpcPeeringConnectionRequest method. +// req, resp := client.AcceptVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcPeeringConnection +func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectionInput) (req *request.Request, output *AcceptVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opAcceptVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AcceptVpcPeeringConnectionInput{} + } + + output = &AcceptVpcPeeringConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// AcceptVpcPeeringConnection API operation for Amazon Elastic Compute Cloud. +// +// Accept a VPC peering connection request. To accept a request, the VPC peering +// connection must be in the pending-acceptance state, and you must be the owner +// of the peer VPC. Use DescribeVpcPeeringConnections to view your outstanding +// VPC peering connection requests. +// +// For an inter-Region VPC peering connection request, you must accept the VPC +// peering connection in the Region of the accepter VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AcceptVpcPeeringConnection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcPeeringConnection +func (c *EC2) AcceptVpcPeeringConnection(input *AcceptVpcPeeringConnectionInput) (*AcceptVpcPeeringConnectionOutput, error) { + req, out := c.AcceptVpcPeeringConnectionRequest(input) + return out, req.Send() +} + +// AcceptVpcPeeringConnectionWithContext is the same as AcceptVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AcceptVpcPeeringConnectionWithContext(ctx aws.Context, input *AcceptVpcPeeringConnectionInput, opts ...request.Option) (*AcceptVpcPeeringConnectionOutput, error) { + req, out := c.AcceptVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAdvertiseByoipCidr = "AdvertiseByoipCidr" + +// AdvertiseByoipCidrRequest generates a "aws/request.Request" representing the +// client's request for the AdvertiseByoipCidr operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AdvertiseByoipCidr for more information on using the AdvertiseByoipCidr +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AdvertiseByoipCidrRequest method. +// req, resp := client.AdvertiseByoipCidrRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AdvertiseByoipCidr +func (c *EC2) AdvertiseByoipCidrRequest(input *AdvertiseByoipCidrInput) (req *request.Request, output *AdvertiseByoipCidrOutput) { + op := &request.Operation{ + Name: opAdvertiseByoipCidr, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AdvertiseByoipCidrInput{} + } + + output = &AdvertiseByoipCidrOutput{} + req = c.newRequest(op, input, output) + return +} + +// AdvertiseByoipCidr API operation for Amazon Elastic Compute Cloud. +// +// Advertises an IPv4 or IPv6 address range that is provisioned for use with +// your AWS resources through bring your own IP addresses (BYOIP). +// +// You can perform this operation at most once every 10 seconds, even if you +// specify different address ranges each time. +// +// We recommend that you stop advertising the BYOIP CIDR from other locations +// when you advertise it from AWS. To minimize down time, you can configure +// your AWS resources to use an address from a BYOIP CIDR before it is advertised, +// and then simultaneously stop advertising it from the current location and +// start advertising it through AWS. +// +// It can take a few minutes before traffic to the specified addresses starts +// routing to AWS because of BGP propagation delays. +// +// To stop advertising the BYOIP CIDR, use WithdrawByoipCidr. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AdvertiseByoipCidr for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AdvertiseByoipCidr +func (c *EC2) AdvertiseByoipCidr(input *AdvertiseByoipCidrInput) (*AdvertiseByoipCidrOutput, error) { + req, out := c.AdvertiseByoipCidrRequest(input) + return out, req.Send() +} + +// AdvertiseByoipCidrWithContext is the same as AdvertiseByoipCidr with the addition of +// the ability to pass a context and additional request options. +// +// See AdvertiseByoipCidr for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AdvertiseByoipCidrWithContext(ctx aws.Context, input *AdvertiseByoipCidrInput, opts ...request.Option) (*AdvertiseByoipCidrOutput, error) { + req, out := c.AdvertiseByoipCidrRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAllocateAddress = "AllocateAddress" + +// AllocateAddressRequest generates a "aws/request.Request" representing the +// client's request for the AllocateAddress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AllocateAddress for more information on using the AllocateAddress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AllocateAddressRequest method. +// req, resp := client.AllocateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateAddress +func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request.Request, output *AllocateAddressOutput) { + op := &request.Operation{ + Name: opAllocateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateAddressInput{} + } + + output = &AllocateAddressOutput{} + req = c.newRequest(op, input, output) + return +} + +// AllocateAddress API operation for Amazon Elastic Compute Cloud. +// +// Allocates an Elastic IP address to your AWS account. After you allocate the +// Elastic IP address you can associate it with an instance or network interface. +// After you release an Elastic IP address, it is released to the IP address +// pool and can be allocated to a different AWS account. +// +// You can allocate an Elastic IP address from an address pool owned by AWS +// or from an address pool created from a public IPv4 address range that you +// have brought to AWS for use with your AWS resources using bring your own +// IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses +// (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-VPC] If you release an Elastic IP address, you might be able to recover +// it. You cannot recover an Elastic IP address that you released after it is +// allocated to another AWS account. You cannot recover an Elastic IP address +// for EC2-Classic. To attempt to recover an Elastic IP address that you released, +// specify it in this operation. +// +// An Elastic IP address is for use either in the EC2-Classic platform or in +// a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic +// per Region and 5 Elastic IP addresses for EC2-VPC per Region. +// +// For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can allocate a carrier IP address which is a public IP address from a +// telecommunication carrier, to a network interface which resides in a subnet +// in a Wavelength Zone (for example an EC2 instance). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AllocateAddress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateAddress +func (c *EC2) AllocateAddress(input *AllocateAddressInput) (*AllocateAddressOutput, error) { + req, out := c.AllocateAddressRequest(input) + return out, req.Send() +} + +// AllocateAddressWithContext is the same as AllocateAddress with the addition of +// the ability to pass a context and additional request options. +// +// See AllocateAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AllocateAddressWithContext(ctx aws.Context, input *AllocateAddressInput, opts ...request.Option) (*AllocateAddressOutput, error) { + req, out := c.AllocateAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAllocateHosts = "AllocateHosts" + +// AllocateHostsRequest generates a "aws/request.Request" representing the +// client's request for the AllocateHosts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AllocateHosts for more information on using the AllocateHosts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AllocateHostsRequest method. +// req, resp := client.AllocateHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateHosts +func (c *EC2) AllocateHostsRequest(input *AllocateHostsInput) (req *request.Request, output *AllocateHostsOutput) { + op := &request.Operation{ + Name: opAllocateHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateHostsInput{} + } + + output = &AllocateHostsOutput{} + req = c.newRequest(op, input, output) + return +} + +// AllocateHosts API operation for Amazon Elastic Compute Cloud. +// +// Allocates a Dedicated Host to your account. At a minimum, specify the supported +// instance type or instance family, the Availability Zone in which to allocate +// the host, and the number of hosts to allocate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AllocateHosts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateHosts +func (c *EC2) AllocateHosts(input *AllocateHostsInput) (*AllocateHostsOutput, error) { + req, out := c.AllocateHostsRequest(input) + return out, req.Send() +} + +// AllocateHostsWithContext is the same as AllocateHosts with the addition of +// the ability to pass a context and additional request options. +// +// See AllocateHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AllocateHostsWithContext(ctx aws.Context, input *AllocateHostsInput, opts ...request.Option) (*AllocateHostsOutput, error) { + req, out := c.AllocateHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opApplySecurityGroupsToClientVpnTargetNetwork = "ApplySecurityGroupsToClientVpnTargetNetwork" + +// ApplySecurityGroupsToClientVpnTargetNetworkRequest generates a "aws/request.Request" representing the +// client's request for the ApplySecurityGroupsToClientVpnTargetNetwork operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ApplySecurityGroupsToClientVpnTargetNetwork for more information on using the ApplySecurityGroupsToClientVpnTargetNetwork +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ApplySecurityGroupsToClientVpnTargetNetworkRequest method. +// req, resp := client.ApplySecurityGroupsToClientVpnTargetNetworkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ApplySecurityGroupsToClientVpnTargetNetwork +func (c *EC2) ApplySecurityGroupsToClientVpnTargetNetworkRequest(input *ApplySecurityGroupsToClientVpnTargetNetworkInput) (req *request.Request, output *ApplySecurityGroupsToClientVpnTargetNetworkOutput) { + op := &request.Operation{ + Name: opApplySecurityGroupsToClientVpnTargetNetwork, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ApplySecurityGroupsToClientVpnTargetNetworkInput{} + } + + output = &ApplySecurityGroupsToClientVpnTargetNetworkOutput{} + req = c.newRequest(op, input, output) + return +} + +// ApplySecurityGroupsToClientVpnTargetNetwork API operation for Amazon Elastic Compute Cloud. +// +// Applies a security group to the association between the target network and +// the Client VPN endpoint. This action replaces the existing security groups +// with the specified security groups. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ApplySecurityGroupsToClientVpnTargetNetwork for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ApplySecurityGroupsToClientVpnTargetNetwork +func (c *EC2) ApplySecurityGroupsToClientVpnTargetNetwork(input *ApplySecurityGroupsToClientVpnTargetNetworkInput) (*ApplySecurityGroupsToClientVpnTargetNetworkOutput, error) { + req, out := c.ApplySecurityGroupsToClientVpnTargetNetworkRequest(input) + return out, req.Send() +} + +// ApplySecurityGroupsToClientVpnTargetNetworkWithContext is the same as ApplySecurityGroupsToClientVpnTargetNetwork with the addition of +// the ability to pass a context and additional request options. +// +// See ApplySecurityGroupsToClientVpnTargetNetwork for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ApplySecurityGroupsToClientVpnTargetNetworkWithContext(ctx aws.Context, input *ApplySecurityGroupsToClientVpnTargetNetworkInput, opts ...request.Option) (*ApplySecurityGroupsToClientVpnTargetNetworkOutput, error) { + req, out := c.ApplySecurityGroupsToClientVpnTargetNetworkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssignIpv6Addresses = "AssignIpv6Addresses" + +// AssignIpv6AddressesRequest generates a "aws/request.Request" representing the +// client's request for the AssignIpv6Addresses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssignIpv6Addresses for more information on using the AssignIpv6Addresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssignIpv6AddressesRequest method. +// req, resp := client.AssignIpv6AddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignIpv6Addresses +func (c *EC2) AssignIpv6AddressesRequest(input *AssignIpv6AddressesInput) (req *request.Request, output *AssignIpv6AddressesOutput) { + op := &request.Operation{ + Name: opAssignIpv6Addresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignIpv6AddressesInput{} + } + + output = &AssignIpv6AddressesOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssignIpv6Addresses API operation for Amazon Elastic Compute Cloud. +// +// Assigns one or more IPv6 addresses to the specified network interface. You +// can specify one or more specific IPv6 addresses, or you can specify the number +// of IPv6 addresses to be automatically assigned from within the subnet's IPv6 +// CIDR block range. You can assign as many IPv6 addresses to a network interface +// as you can assign private IPv4 addresses, and the limit varies per instance +// type. For information, see IP Addresses Per Network Interface Per Instance +// Type (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You must specify either the IPv6 addresses or the IPv6 address count in the +// request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssignIpv6Addresses for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignIpv6Addresses +func (c *EC2) AssignIpv6Addresses(input *AssignIpv6AddressesInput) (*AssignIpv6AddressesOutput, error) { + req, out := c.AssignIpv6AddressesRequest(input) + return out, req.Send() +} + +// AssignIpv6AddressesWithContext is the same as AssignIpv6Addresses with the addition of +// the ability to pass a context and additional request options. +// +// See AssignIpv6Addresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssignIpv6AddressesWithContext(ctx aws.Context, input *AssignIpv6AddressesInput, opts ...request.Option) (*AssignIpv6AddressesOutput, error) { + req, out := c.AssignIpv6AddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" + +// AssignPrivateIpAddressesRequest generates a "aws/request.Request" representing the +// client's request for the AssignPrivateIpAddresses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssignPrivateIpAddresses for more information on using the AssignPrivateIpAddresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssignPrivateIpAddressesRequest method. +// req, resp := client.AssignPrivateIpAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignPrivateIpAddresses +func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInput) (req *request.Request, output *AssignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opAssignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssignPrivateIpAddressesInput{} + } + + output = &AssignPrivateIpAddressesOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssignPrivateIpAddresses API operation for Amazon Elastic Compute Cloud. +// +// Assigns one or more secondary private IP addresses to the specified network +// interface. +// +// You can specify one or more specific secondary IP addresses, or you can specify +// the number of secondary IP addresses to be automatically assigned within +// the subnet's CIDR block range. The number of secondary IP addresses that +// you can assign to an instance varies by instance type. For information about +// instance types, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// Elastic IP addresses, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// When you move a secondary private IP address to another network interface, +// any Elastic IP address that is associated with the IP address is also moved. +// +// Remapping an IP address is an asynchronous operation. When you move an IP +// address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s +// in the instance metadata to confirm that the remapping is complete. +// +// You must specify either the IP addresses or the IP address count in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssignPrivateIpAddresses for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignPrivateIpAddresses +func (c *EC2) AssignPrivateIpAddresses(input *AssignPrivateIpAddressesInput) (*AssignPrivateIpAddressesOutput, error) { + req, out := c.AssignPrivateIpAddressesRequest(input) + return out, req.Send() +} + +// AssignPrivateIpAddressesWithContext is the same as AssignPrivateIpAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See AssignPrivateIpAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssignPrivateIpAddressesWithContext(ctx aws.Context, input *AssignPrivateIpAddressesInput, opts ...request.Option) (*AssignPrivateIpAddressesOutput, error) { + req, out := c.AssignPrivateIpAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateAddress = "AssociateAddress" + +// AssociateAddressRequest generates a "aws/request.Request" representing the +// client's request for the AssociateAddress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateAddress for more information on using the AssociateAddress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateAddressRequest method. +// req, resp := client.AssociateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateAddress +func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *request.Request, output *AssociateAddressOutput) { + op := &request.Operation{ + Name: opAssociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateAddressInput{} + } + + output = &AssociateAddressOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateAddress API operation for Amazon Elastic Compute Cloud. +// +// Associates an Elastic IP address, or carrier IP address (for instances that +// are in subnets in Wavelength Zones) with an instance or a network interface. +// Before you can use an Elastic IP address, you must allocate it to your account. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is +// already associated with a different instance, it is disassociated from that +// instance and associated with the specified instance. If you associate an +// Elastic IP address with an instance that has an existing Elastic IP address, +// the existing address is disassociated from the instance, but remains allocated +// to your account. +// +// [VPC in an EC2-Classic account] If you don't specify a private IP address, +// the Elastic IP address is associated with the primary IP address. If the +// Elastic IP address is already associated with a different instance or a network +// interface, you get an error unless you allow reassociation. You cannot associate +// an Elastic IP address with an instance or network interface that has an existing +// Elastic IP address. +// +// [Subnets in Wavelength Zones] You can associate an IP address from the telecommunication +// carrier to the instance or network interface. +// +// You cannot associate an Elastic IP address with an interface in a different +// network border group. +// +// This is an idempotent operation. If you perform the operation more than once, +// Amazon EC2 doesn't return an error, and you may be charged for each time +// the Elastic IP address is remapped to the same instance. For more information, +// see the Elastic IP Addresses section of Amazon EC2 Pricing (http://aws.amazon.com/ec2/pricing/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateAddress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateAddress +func (c *EC2) AssociateAddress(input *AssociateAddressInput) (*AssociateAddressOutput, error) { + req, out := c.AssociateAddressRequest(input) + return out, req.Send() +} + +// AssociateAddressWithContext is the same as AssociateAddress with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateAddressWithContext(ctx aws.Context, input *AssociateAddressInput, opts ...request.Option) (*AssociateAddressOutput, error) { + req, out := c.AssociateAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateClientVpnTargetNetwork = "AssociateClientVpnTargetNetwork" + +// AssociateClientVpnTargetNetworkRequest generates a "aws/request.Request" representing the +// client's request for the AssociateClientVpnTargetNetwork operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateClientVpnTargetNetwork for more information on using the AssociateClientVpnTargetNetwork +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateClientVpnTargetNetworkRequest method. +// req, resp := client.AssociateClientVpnTargetNetworkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateClientVpnTargetNetwork +func (c *EC2) AssociateClientVpnTargetNetworkRequest(input *AssociateClientVpnTargetNetworkInput) (req *request.Request, output *AssociateClientVpnTargetNetworkOutput) { + op := &request.Operation{ + Name: opAssociateClientVpnTargetNetwork, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateClientVpnTargetNetworkInput{} + } + + output = &AssociateClientVpnTargetNetworkOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateClientVpnTargetNetwork API operation for Amazon Elastic Compute Cloud. +// +// Associates a target network with a Client VPN endpoint. A target network +// is a subnet in a VPC. You can associate multiple subnets from the same VPC +// with a Client VPN endpoint. You can associate only one subnet in each Availability +// Zone. We recommend that you associate at least two subnets to provide Availability +// Zone redundancy. +// +// If you specified a VPC when you created the Client VPN endpoint or if you +// have previous subnet associations, the specified subnet must be in the same +// VPC. To specify a subnet that's in a different VPC, you must first modify +// the Client VPN endpoint (ModifyClientVpnEndpoint) and change the VPC that's +// associated with it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateClientVpnTargetNetwork for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateClientVpnTargetNetwork +func (c *EC2) AssociateClientVpnTargetNetwork(input *AssociateClientVpnTargetNetworkInput) (*AssociateClientVpnTargetNetworkOutput, error) { + req, out := c.AssociateClientVpnTargetNetworkRequest(input) + return out, req.Send() +} + +// AssociateClientVpnTargetNetworkWithContext is the same as AssociateClientVpnTargetNetwork with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateClientVpnTargetNetwork for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateClientVpnTargetNetworkWithContext(ctx aws.Context, input *AssociateClientVpnTargetNetworkInput, opts ...request.Option) (*AssociateClientVpnTargetNetworkOutput, error) { + req, out := c.AssociateClientVpnTargetNetworkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateDhcpOptions = "AssociateDhcpOptions" + +// AssociateDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the AssociateDhcpOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateDhcpOptions for more information on using the AssociateDhcpOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateDhcpOptionsRequest method. +// req, resp := client.AssociateDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateDhcpOptions +func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req *request.Request, output *AssociateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opAssociateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateDhcpOptionsInput{} + } + + output = &AssociateDhcpOptionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AssociateDhcpOptions API operation for Amazon Elastic Compute Cloud. +// +// Associates a set of DHCP options (that you've previously created) with the +// specified VPC, or associates no DHCP options with the VPC. +// +// After you associate the options with the VPC, any existing instances and +// all new instances that you launch in that VPC use the options. You don't +// need to restart or relaunch the instances. They automatically pick up the +// changes within a few hours, depending on how frequently the instance renews +// its DHCP lease. You can explicitly renew the lease using the operating system +// on the instance. +// +// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateDhcpOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateDhcpOptions +func (c *EC2) AssociateDhcpOptions(input *AssociateDhcpOptionsInput) (*AssociateDhcpOptionsOutput, error) { + req, out := c.AssociateDhcpOptionsRequest(input) + return out, req.Send() +} + +// AssociateDhcpOptionsWithContext is the same as AssociateDhcpOptions with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateDhcpOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateDhcpOptionsWithContext(ctx aws.Context, input *AssociateDhcpOptionsInput, opts ...request.Option) (*AssociateDhcpOptionsOutput, error) { + req, out := c.AssociateDhcpOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateEnclaveCertificateIamRole = "AssociateEnclaveCertificateIamRole" + +// AssociateEnclaveCertificateIamRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssociateEnclaveCertificateIamRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateEnclaveCertificateIamRole for more information on using the AssociateEnclaveCertificateIamRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateEnclaveCertificateIamRoleRequest method. +// req, resp := client.AssociateEnclaveCertificateIamRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateEnclaveCertificateIamRole +func (c *EC2) AssociateEnclaveCertificateIamRoleRequest(input *AssociateEnclaveCertificateIamRoleInput) (req *request.Request, output *AssociateEnclaveCertificateIamRoleOutput) { + op := &request.Operation{ + Name: opAssociateEnclaveCertificateIamRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateEnclaveCertificateIamRoleInput{} + } + + output = &AssociateEnclaveCertificateIamRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateEnclaveCertificateIamRole API operation for Amazon Elastic Compute Cloud. +// +// Associates an AWS Identity and Access Management (IAM) role with an AWS Certificate +// Manager (ACM) certificate. This enables the certificate to be used by the +// ACM for Nitro Enclaves application inside an enclave. For more information, +// see AWS Certificate Manager for Nitro Enclaves (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html) +// in the AWS Nitro Enclaves User Guide. +// +// When the IAM role is associated with the ACM certificate, places the certificate, +// certificate chain, and encrypted private key in an Amazon S3 bucket that +// only the associated IAM role can access. The private key of the certificate +// is encrypted with an AWS-managed KMS customer master (CMK) that has an attached +// attestation-based CMK policy. +// +// To enable the IAM role to access the Amazon S3 object, you must grant it +// permission to call s3:GetObject on the Amazon S3 bucket returned by the command. +// To enable the IAM role to access the AWS KMS CMK, you must grant it permission +// to call kms:Decrypt on AWS KMS CMK returned by the command. For more information, +// see Grant the role permission to access the certificate and encryption key +// (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html#add-policy) +// in the AWS Nitro Enclaves User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateEnclaveCertificateIamRole for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateEnclaveCertificateIamRole +func (c *EC2) AssociateEnclaveCertificateIamRole(input *AssociateEnclaveCertificateIamRoleInput) (*AssociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.AssociateEnclaveCertificateIamRoleRequest(input) + return out, req.Send() +} + +// AssociateEnclaveCertificateIamRoleWithContext is the same as AssociateEnclaveCertificateIamRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateEnclaveCertificateIamRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateEnclaveCertificateIamRoleWithContext(ctx aws.Context, input *AssociateEnclaveCertificateIamRoleInput, opts ...request.Option) (*AssociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.AssociateEnclaveCertificateIamRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateIamInstanceProfile = "AssociateIamInstanceProfile" + +// AssociateIamInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the AssociateIamInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateIamInstanceProfile for more information on using the AssociateIamInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateIamInstanceProfileRequest method. +// req, resp := client.AssociateIamInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIamInstanceProfile +func (c *EC2) AssociateIamInstanceProfileRequest(input *AssociateIamInstanceProfileInput) (req *request.Request, output *AssociateIamInstanceProfileOutput) { + op := &request.Operation{ + Name: opAssociateIamInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateIamInstanceProfileInput{} + } + + output = &AssociateIamInstanceProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateIamInstanceProfile API operation for Amazon Elastic Compute Cloud. +// +// Associates an IAM instance profile with a running or stopped instance. You +// cannot associate more than one IAM instance profile with an instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateIamInstanceProfile for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIamInstanceProfile +func (c *EC2) AssociateIamInstanceProfile(input *AssociateIamInstanceProfileInput) (*AssociateIamInstanceProfileOutput, error) { + req, out := c.AssociateIamInstanceProfileRequest(input) + return out, req.Send() +} + +// AssociateIamInstanceProfileWithContext is the same as AssociateIamInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateIamInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateIamInstanceProfileWithContext(ctx aws.Context, input *AssociateIamInstanceProfileInput, opts ...request.Option) (*AssociateIamInstanceProfileOutput, error) { + req, out := c.AssociateIamInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateRouteTable = "AssociateRouteTable" + +// AssociateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the AssociateRouteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateRouteTable for more information on using the AssociateRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateRouteTableRequest method. +// req, resp := client.AssociateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateRouteTable +func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req *request.Request, output *AssociateRouteTableOutput) { + op := &request.Operation{ + Name: opAssociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateRouteTableInput{} + } + + output = &AssociateRouteTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateRouteTable API operation for Amazon Elastic Compute Cloud. +// +// Associates a subnet in your VPC or an internet gateway or virtual private +// gateway attached to your VPC with a route table in your VPC. This association +// causes traffic from the subnet or gateway to be routed according to the routes +// in the route table. The action returns an association ID, which you need +// in order to disassociate the route table later. A route table can be associated +// with multiple subnets. +// +// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateRouteTable for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateRouteTable +func (c *EC2) AssociateRouteTable(input *AssociateRouteTableInput) (*AssociateRouteTableOutput, error) { + req, out := c.AssociateRouteTableRequest(input) + return out, req.Send() +} + +// AssociateRouteTableWithContext is the same as AssociateRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateRouteTableWithContext(ctx aws.Context, input *AssociateRouteTableInput, opts ...request.Option) (*AssociateRouteTableOutput, error) { + req, out := c.AssociateRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateSubnetCidrBlock = "AssociateSubnetCidrBlock" + +// AssociateSubnetCidrBlockRequest generates a "aws/request.Request" representing the +// client's request for the AssociateSubnetCidrBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateSubnetCidrBlock for more information on using the AssociateSubnetCidrBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateSubnetCidrBlockRequest method. +// req, resp := client.AssociateSubnetCidrBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateSubnetCidrBlock +func (c *EC2) AssociateSubnetCidrBlockRequest(input *AssociateSubnetCidrBlockInput) (req *request.Request, output *AssociateSubnetCidrBlockOutput) { + op := &request.Operation{ + Name: opAssociateSubnetCidrBlock, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateSubnetCidrBlockInput{} + } + + output = &AssociateSubnetCidrBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateSubnetCidrBlock API operation for Amazon Elastic Compute Cloud. +// +// Associates a CIDR block with your subnet. You can only associate a single +// IPv6 CIDR block with your subnet. An IPv6 CIDR block must have a prefix length +// of /64. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateSubnetCidrBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateSubnetCidrBlock +func (c *EC2) AssociateSubnetCidrBlock(input *AssociateSubnetCidrBlockInput) (*AssociateSubnetCidrBlockOutput, error) { + req, out := c.AssociateSubnetCidrBlockRequest(input) + return out, req.Send() +} + +// AssociateSubnetCidrBlockWithContext is the same as AssociateSubnetCidrBlock with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateSubnetCidrBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateSubnetCidrBlockWithContext(ctx aws.Context, input *AssociateSubnetCidrBlockInput, opts ...request.Option) (*AssociateSubnetCidrBlockOutput, error) { + req, out := c.AssociateSubnetCidrBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateTransitGatewayMulticastDomain = "AssociateTransitGatewayMulticastDomain" + +// AssociateTransitGatewayMulticastDomainRequest generates a "aws/request.Request" representing the +// client's request for the AssociateTransitGatewayMulticastDomain operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateTransitGatewayMulticastDomain for more information on using the AssociateTransitGatewayMulticastDomain +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateTransitGatewayMulticastDomainRequest method. +// req, resp := client.AssociateTransitGatewayMulticastDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateTransitGatewayMulticastDomain +func (c *EC2) AssociateTransitGatewayMulticastDomainRequest(input *AssociateTransitGatewayMulticastDomainInput) (req *request.Request, output *AssociateTransitGatewayMulticastDomainOutput) { + op := &request.Operation{ + Name: opAssociateTransitGatewayMulticastDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateTransitGatewayMulticastDomainInput{} + } + + output = &AssociateTransitGatewayMulticastDomainOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateTransitGatewayMulticastDomain API operation for Amazon Elastic Compute Cloud. +// +// Associates the specified subnets and transit gateway attachments with the +// specified transit gateway multicast domain. +// +// The transit gateway attachment must be in the available state before you +// can add a resource. Use DescribeTransitGatewayAttachments (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayAttachments.html) +// to see the state of the attachment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateTransitGatewayMulticastDomain for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateTransitGatewayMulticastDomain +func (c *EC2) AssociateTransitGatewayMulticastDomain(input *AssociateTransitGatewayMulticastDomainInput) (*AssociateTransitGatewayMulticastDomainOutput, error) { + req, out := c.AssociateTransitGatewayMulticastDomainRequest(input) + return out, req.Send() +} + +// AssociateTransitGatewayMulticastDomainWithContext is the same as AssociateTransitGatewayMulticastDomain with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateTransitGatewayMulticastDomain for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateTransitGatewayMulticastDomainWithContext(ctx aws.Context, input *AssociateTransitGatewayMulticastDomainInput, opts ...request.Option) (*AssociateTransitGatewayMulticastDomainOutput, error) { + req, out := c.AssociateTransitGatewayMulticastDomainRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateTransitGatewayRouteTable = "AssociateTransitGatewayRouteTable" + +// AssociateTransitGatewayRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the AssociateTransitGatewayRouteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateTransitGatewayRouteTable for more information on using the AssociateTransitGatewayRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateTransitGatewayRouteTableRequest method. +// req, resp := client.AssociateTransitGatewayRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateTransitGatewayRouteTable +func (c *EC2) AssociateTransitGatewayRouteTableRequest(input *AssociateTransitGatewayRouteTableInput) (req *request.Request, output *AssociateTransitGatewayRouteTableOutput) { + op := &request.Operation{ + Name: opAssociateTransitGatewayRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateTransitGatewayRouteTableInput{} + } + + output = &AssociateTransitGatewayRouteTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateTransitGatewayRouteTable API operation for Amazon Elastic Compute Cloud. +// +// Associates the specified attachment with the specified transit gateway route +// table. You can associate only one route table with an attachment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateTransitGatewayRouteTable for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateTransitGatewayRouteTable +func (c *EC2) AssociateTransitGatewayRouteTable(input *AssociateTransitGatewayRouteTableInput) (*AssociateTransitGatewayRouteTableOutput, error) { + req, out := c.AssociateTransitGatewayRouteTableRequest(input) + return out, req.Send() +} + +// AssociateTransitGatewayRouteTableWithContext is the same as AssociateTransitGatewayRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateTransitGatewayRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateTransitGatewayRouteTableWithContext(ctx aws.Context, input *AssociateTransitGatewayRouteTableInput, opts ...request.Option) (*AssociateTransitGatewayRouteTableOutput, error) { + req, out := c.AssociateTransitGatewayRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateVpcCidrBlock = "AssociateVpcCidrBlock" + +// AssociateVpcCidrBlockRequest generates a "aws/request.Request" representing the +// client's request for the AssociateVpcCidrBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateVpcCidrBlock for more information on using the AssociateVpcCidrBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateVpcCidrBlockRequest method. +// req, resp := client.AssociateVpcCidrBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateVpcCidrBlock +func (c *EC2) AssociateVpcCidrBlockRequest(input *AssociateVpcCidrBlockInput) (req *request.Request, output *AssociateVpcCidrBlockOutput) { + op := &request.Operation{ + Name: opAssociateVpcCidrBlock, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateVpcCidrBlockInput{} + } + + output = &AssociateVpcCidrBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateVpcCidrBlock API operation for Amazon Elastic Compute Cloud. +// +// Associates a CIDR block with your VPC. You can associate a secondary IPv4 +// CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from +// an IPv6 address pool that you provisioned through bring your own IP addresses +// (BYOIP (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)). +// The IPv6 CIDR block size is fixed at /56. +// +// You must specify one of the following in the request: an IPv4 CIDR block, +// an IPv6 pool, or an Amazon-provided IPv6 CIDR block. +// +// For more information about associating CIDR blocks with your VPC and applicable +// restrictions, see VPC and Subnet Sizing (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#VPC_Sizing) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateVpcCidrBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateVpcCidrBlock +func (c *EC2) AssociateVpcCidrBlock(input *AssociateVpcCidrBlockInput) (*AssociateVpcCidrBlockOutput, error) { + req, out := c.AssociateVpcCidrBlockRequest(input) + return out, req.Send() +} + +// AssociateVpcCidrBlockWithContext is the same as AssociateVpcCidrBlock with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateVpcCidrBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateVpcCidrBlockWithContext(ctx aws.Context, input *AssociateVpcCidrBlockInput, opts ...request.Option) (*AssociateVpcCidrBlockOutput, error) { + req, out := c.AssociateVpcCidrBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAttachClassicLinkVpc = "AttachClassicLinkVpc" + +// AttachClassicLinkVpcRequest generates a "aws/request.Request" representing the +// client's request for the AttachClassicLinkVpc operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachClassicLinkVpc for more information on using the AttachClassicLinkVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachClassicLinkVpcRequest method. +// req, resp := client.AttachClassicLinkVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachClassicLinkVpc +func (c *EC2) AttachClassicLinkVpcRequest(input *AttachClassicLinkVpcInput) (req *request.Request, output *AttachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opAttachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachClassicLinkVpcInput{} + } + + output = &AttachClassicLinkVpcOutput{} + req = c.newRequest(op, input, output) + return +} + +// AttachClassicLinkVpc API operation for Amazon Elastic Compute Cloud. +// +// Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or +// more of the VPC's security groups. You cannot link an EC2-Classic instance +// to more than one VPC at a time. You can only link an instance that's in the +// running state. An instance is automatically unlinked from a VPC when it's +// stopped - you can link it to the VPC again when you restart it. +// +// After you've linked an instance, you cannot change the VPC security groups +// that are associated with it. To change the security groups, you must first +// unlink the instance, and then link it again. +// +// Linking your instance to a VPC is sometimes referred to as attaching your +// instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AttachClassicLinkVpc for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachClassicLinkVpc +func (c *EC2) AttachClassicLinkVpc(input *AttachClassicLinkVpcInput) (*AttachClassicLinkVpcOutput, error) { + req, out := c.AttachClassicLinkVpcRequest(input) + return out, req.Send() +} + +// AttachClassicLinkVpcWithContext is the same as AttachClassicLinkVpc with the addition of +// the ability to pass a context and additional request options. +// +// See AttachClassicLinkVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachClassicLinkVpcWithContext(ctx aws.Context, input *AttachClassicLinkVpcInput, opts ...request.Option) (*AttachClassicLinkVpcOutput, error) { + req, out := c.AttachClassicLinkVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAttachInternetGateway = "AttachInternetGateway" + +// AttachInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the AttachInternetGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachInternetGateway for more information on using the AttachInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachInternetGatewayRequest method. +// req, resp := client.AttachInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachInternetGateway +func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (req *request.Request, output *AttachInternetGatewayOutput) { + op := &request.Operation{ + Name: opAttachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachInternetGatewayInput{} + } + + output = &AttachInternetGatewayOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AttachInternetGateway API operation for Amazon Elastic Compute Cloud. +// +// Attaches an internet gateway or a virtual private gateway to a VPC, enabling +// connectivity between the internet and the VPC. For more information about +// your VPC and internet gateway, see the Amazon Virtual Private Cloud User +// Guide (https://docs.aws.amazon.com/vpc/latest/userguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AttachInternetGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachInternetGateway +func (c *EC2) AttachInternetGateway(input *AttachInternetGatewayInput) (*AttachInternetGatewayOutput, error) { + req, out := c.AttachInternetGatewayRequest(input) + return out, req.Send() +} + +// AttachInternetGatewayWithContext is the same as AttachInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See AttachInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachInternetGatewayWithContext(ctx aws.Context, input *AttachInternetGatewayInput, opts ...request.Option) (*AttachInternetGatewayOutput, error) { + req, out := c.AttachInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAttachNetworkInterface = "AttachNetworkInterface" + +// AttachNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the AttachNetworkInterface operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachNetworkInterface for more information on using the AttachNetworkInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachNetworkInterfaceRequest method. +// req, resp := client.AttachNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachNetworkInterface +func (c *EC2) AttachNetworkInterfaceRequest(input *AttachNetworkInterfaceInput) (req *request.Request, output *AttachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opAttachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachNetworkInterfaceInput{} + } + + output = &AttachNetworkInterfaceOutput{} + req = c.newRequest(op, input, output) + return +} + +// AttachNetworkInterface API operation for Amazon Elastic Compute Cloud. +// +// Attaches a network interface to an instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AttachNetworkInterface for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachNetworkInterface +func (c *EC2) AttachNetworkInterface(input *AttachNetworkInterfaceInput) (*AttachNetworkInterfaceOutput, error) { + req, out := c.AttachNetworkInterfaceRequest(input) + return out, req.Send() +} + +// AttachNetworkInterfaceWithContext is the same as AttachNetworkInterface with the addition of +// the ability to pass a context and additional request options. +// +// See AttachNetworkInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachNetworkInterfaceWithContext(ctx aws.Context, input *AttachNetworkInterfaceInput, opts ...request.Option) (*AttachNetworkInterfaceOutput, error) { + req, out := c.AttachNetworkInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAttachVolume = "AttachVolume" + +// AttachVolumeRequest generates a "aws/request.Request" representing the +// client's request for the AttachVolume operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachVolume for more information on using the AttachVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachVolumeRequest method. +// req, resp := client.AttachVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVolume +func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opAttachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVolumeInput{} + } + + output = &VolumeAttachment{} + req = c.newRequest(op, input, output) + return +} + +// AttachVolume API operation for Amazon Elastic Compute Cloud. +// +// Attaches an EBS volume to a running or stopped instance and exposes it to +// the instance with the specified device name. +// +// Encrypted EBS volumes must be attached to instances that support Amazon EBS +// encryption. For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// After you attach an EBS volume, you must make it available. For more information, +// see Making an EBS volume available for use (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html). +// +// If a volume has an AWS Marketplace product code: +// +// * The volume can be attached only to a stopped instance. +// +// * AWS Marketplace product codes are copied from the volume to the instance. +// +// * You must be subscribed to the product. +// +// * The instance type and operating system of the instance must support +// the product. For example, you can't detach a volume from a Windows instance +// and attach it to a Linux instance. +// +// For more information, see Attaching Amazon EBS volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AttachVolume for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVolume +func (c *EC2) AttachVolume(input *AttachVolumeInput) (*VolumeAttachment, error) { + req, out := c.AttachVolumeRequest(input) + return out, req.Send() +} + +// AttachVolumeWithContext is the same as AttachVolume with the addition of +// the ability to pass a context and additional request options. +// +// See AttachVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachVolumeWithContext(ctx aws.Context, input *AttachVolumeInput, opts ...request.Option) (*VolumeAttachment, error) { + req, out := c.AttachVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAttachVpnGateway = "AttachVpnGateway" + +// AttachVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the AttachVpnGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachVpnGateway for more information on using the AttachVpnGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachVpnGatewayRequest method. +// req, resp := client.AttachVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVpnGateway +func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *request.Request, output *AttachVpnGatewayOutput) { + op := &request.Operation{ + Name: opAttachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachVpnGatewayInput{} + } + + output = &AttachVpnGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// AttachVpnGateway API operation for Amazon Elastic Compute Cloud. +// +// Attaches a virtual private gateway to a VPC. You can attach one virtual private +// gateway to one VPC at a time. +// +// For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AttachVpnGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVpnGateway +func (c *EC2) AttachVpnGateway(input *AttachVpnGatewayInput) (*AttachVpnGatewayOutput, error) { + req, out := c.AttachVpnGatewayRequest(input) + return out, req.Send() +} + +// AttachVpnGatewayWithContext is the same as AttachVpnGateway with the addition of +// the ability to pass a context and additional request options. +// +// See AttachVpnGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachVpnGatewayWithContext(ctx aws.Context, input *AttachVpnGatewayInput, opts ...request.Option) (*AttachVpnGatewayOutput, error) { + req, out := c.AttachVpnGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAuthorizeClientVpnIngress = "AuthorizeClientVpnIngress" + +// AuthorizeClientVpnIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeClientVpnIngress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AuthorizeClientVpnIngress for more information on using the AuthorizeClientVpnIngress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AuthorizeClientVpnIngressRequest method. +// req, resp := client.AuthorizeClientVpnIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeClientVpnIngress +func (c *EC2) AuthorizeClientVpnIngressRequest(input *AuthorizeClientVpnIngressInput) (req *request.Request, output *AuthorizeClientVpnIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeClientVpnIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeClientVpnIngressInput{} + } + + output = &AuthorizeClientVpnIngressOutput{} + req = c.newRequest(op, input, output) + return +} + +// AuthorizeClientVpnIngress API operation for Amazon Elastic Compute Cloud. +// +// Adds an ingress authorization rule to a Client VPN endpoint. Ingress authorization +// rules act as firewall rules that grant access to networks. You must configure +// ingress authorization rules to enable clients to access resources in AWS +// or on-premises networks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AuthorizeClientVpnIngress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeClientVpnIngress +func (c *EC2) AuthorizeClientVpnIngress(input *AuthorizeClientVpnIngressInput) (*AuthorizeClientVpnIngressOutput, error) { + req, out := c.AuthorizeClientVpnIngressRequest(input) + return out, req.Send() +} + +// AuthorizeClientVpnIngressWithContext is the same as AuthorizeClientVpnIngress with the addition of +// the ability to pass a context and additional request options. +// +// See AuthorizeClientVpnIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AuthorizeClientVpnIngressWithContext(ctx aws.Context, input *AuthorizeClientVpnIngressInput, opts ...request.Option) (*AuthorizeClientVpnIngressOutput, error) { + req, out := c.AuthorizeClientVpnIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" + +// AuthorizeSecurityGroupEgressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeSecurityGroupEgress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AuthorizeSecurityGroupEgress for more information on using the AuthorizeSecurityGroupEgress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AuthorizeSecurityGroupEgressRequest method. +// req, resp := client.AuthorizeSecurityGroupEgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupEgress +func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupEgressInput) (req *request.Request, output *AuthorizeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupEgressInput{} + } + + output = &AuthorizeSecurityGroupEgressOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AuthorizeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud. +// +// [VPC only] Adds the specified egress rules to a security group for use with +// a VPC. +// +// An outbound rule permits instances to send traffic to the specified IPv4 +// or IPv6 CIDR address ranges, or to the instances associated with the specified +// destination security groups. +// +// You specify a protocol for each rule (for example, TCP). For the TCP and +// UDP protocols, you must also specify the destination port or port range. +// For the ICMP protocol, you must also specify the ICMP type and code. You +// can use -1 for the type or code to mean all types or all codes. +// +// Rule changes are propagated to affected instances as quickly as possible. +// However, a small delay might occur. +// +// For more information about VPC security group limits, see Amazon VPC Limits +// (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AuthorizeSecurityGroupEgress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupEgress +func (c *EC2) AuthorizeSecurityGroupEgress(input *AuthorizeSecurityGroupEgressInput) (*AuthorizeSecurityGroupEgressOutput, error) { + req, out := c.AuthorizeSecurityGroupEgressRequest(input) + return out, req.Send() +} + +// AuthorizeSecurityGroupEgressWithContext is the same as AuthorizeSecurityGroupEgress with the addition of +// the ability to pass a context and additional request options. +// +// See AuthorizeSecurityGroupEgress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AuthorizeSecurityGroupEgressWithContext(ctx aws.Context, input *AuthorizeSecurityGroupEgressInput, opts ...request.Option) (*AuthorizeSecurityGroupEgressOutput, error) { + req, out := c.AuthorizeSecurityGroupEgressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" + +// AuthorizeSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeSecurityGroupIngress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AuthorizeSecurityGroupIngress for more information on using the AuthorizeSecurityGroupIngress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AuthorizeSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupIngress +func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroupIngressInput) (req *request.Request, output *AuthorizeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opAuthorizeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AuthorizeSecurityGroupIngressInput{} + } + + output = &AuthorizeSecurityGroupIngressOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AuthorizeSecurityGroupIngress API operation for Amazon Elastic Compute Cloud. +// +// Adds the specified ingress rules to a security group. +// +// An inbound rule permits instances to receive traffic from the specified IPv4 +// or IPv6 CIDR address ranges, or from the instances associated with the specified +// destination security groups. +// +// You specify a protocol for each rule (for example, TCP). For TCP and UDP, +// you must also specify the destination port or port range. For ICMP/ICMPv6, +// you must also specify the ICMP/ICMPv6 type and code. You can use -1 to mean +// all types or all codes. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +// +// For more information about VPC security group limits, see Amazon VPC Limits +// (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AuthorizeSecurityGroupIngress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupIngress +func (c *EC2) AuthorizeSecurityGroupIngress(input *AuthorizeSecurityGroupIngressInput) (*AuthorizeSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeSecurityGroupIngressRequest(input) + return out, req.Send() +} + +// AuthorizeSecurityGroupIngressWithContext is the same as AuthorizeSecurityGroupIngress with the addition of +// the ability to pass a context and additional request options. +// +// See AuthorizeSecurityGroupIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AuthorizeSecurityGroupIngressWithContext(ctx aws.Context, input *AuthorizeSecurityGroupIngressInput, opts ...request.Option) (*AuthorizeSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeSecurityGroupIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBundleInstance = "BundleInstance" + +// BundleInstanceRequest generates a "aws/request.Request" representing the +// client's request for the BundleInstance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BundleInstance for more information on using the BundleInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BundleInstanceRequest method. +// req, resp := client.BundleInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BundleInstance +func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Request, output *BundleInstanceOutput) { + op := &request.Operation{ + Name: opBundleInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BundleInstanceInput{} + } + + output = &BundleInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// BundleInstance API operation for Amazon Elastic Compute Cloud. +// +// Bundles an Amazon instance store-backed Windows instance. +// +// During bundling, only the root device volume (C:\) is bundled. Data on other +// instance store volumes is not preserved. +// +// This action is not applicable for Linux/Unix instances or Windows instances +// that are backed by Amazon EBS. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation BundleInstance for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BundleInstance +func (c *EC2) BundleInstance(input *BundleInstanceInput) (*BundleInstanceOutput, error) { + req, out := c.BundleInstanceRequest(input) + return out, req.Send() +} + +// BundleInstanceWithContext is the same as BundleInstance with the addition of +// the ability to pass a context and additional request options. +// +// See BundleInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) BundleInstanceWithContext(ctx aws.Context, input *BundleInstanceInput, opts ...request.Option) (*BundleInstanceOutput, error) { + req, out := c.BundleInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelBundleTask = "CancelBundleTask" + +// CancelBundleTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelBundleTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelBundleTask for more information on using the CancelBundleTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelBundleTaskRequest method. +// req, resp := client.CancelBundleTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelBundleTask +func (c *EC2) CancelBundleTaskRequest(input *CancelBundleTaskInput) (req *request.Request, output *CancelBundleTaskOutput) { + op := &request.Operation{ + Name: opCancelBundleTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelBundleTaskInput{} + } + + output = &CancelBundleTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelBundleTask API operation for Amazon Elastic Compute Cloud. +// +// Cancels a bundling operation for an instance store-backed Windows instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelBundleTask for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelBundleTask +func (c *EC2) CancelBundleTask(input *CancelBundleTaskInput) (*CancelBundleTaskOutput, error) { + req, out := c.CancelBundleTaskRequest(input) + return out, req.Send() +} + +// CancelBundleTaskWithContext is the same as CancelBundleTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelBundleTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelBundleTaskWithContext(ctx aws.Context, input *CancelBundleTaskInput, opts ...request.Option) (*CancelBundleTaskOutput, error) { + req, out := c.CancelBundleTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelCapacityReservation = "CancelCapacityReservation" + +// CancelCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the CancelCapacityReservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelCapacityReservation for more information on using the CancelCapacityReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelCapacityReservationRequest method. +// req, resp := client.CancelCapacityReservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelCapacityReservation +func (c *EC2) CancelCapacityReservationRequest(input *CancelCapacityReservationInput) (req *request.Request, output *CancelCapacityReservationOutput) { + op := &request.Operation{ + Name: opCancelCapacityReservation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelCapacityReservationInput{} + } + + output = &CancelCapacityReservationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelCapacityReservation API operation for Amazon Elastic Compute Cloud. +// +// Cancels the specified Capacity Reservation, releases the reserved capacity, +// and changes the Capacity Reservation's state to cancelled. +// +// Instances running in the reserved capacity continue running until you stop +// them. Stopped instances that target the Capacity Reservation can no longer +// launch. Modify these instances to either target a different Capacity Reservation, +// launch On-Demand Instance capacity, or run in any open Capacity Reservation +// that has matching attributes and sufficient capacity. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelCapacityReservation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelCapacityReservation +func (c *EC2) CancelCapacityReservation(input *CancelCapacityReservationInput) (*CancelCapacityReservationOutput, error) { + req, out := c.CancelCapacityReservationRequest(input) + return out, req.Send() +} + +// CancelCapacityReservationWithContext is the same as CancelCapacityReservation with the addition of +// the ability to pass a context and additional request options. +// +// See CancelCapacityReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelCapacityReservationWithContext(ctx aws.Context, input *CancelCapacityReservationInput, opts ...request.Option) (*CancelCapacityReservationOutput, error) { + req, out := c.CancelCapacityReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelConversionTask = "CancelConversionTask" + +// CancelConversionTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelConversionTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelConversionTask for more information on using the CancelConversionTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelConversionTaskRequest method. +// req, resp := client.CancelConversionTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelConversionTask +func (c *EC2) CancelConversionTaskRequest(input *CancelConversionTaskInput) (req *request.Request, output *CancelConversionTaskOutput) { + op := &request.Operation{ + Name: opCancelConversionTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelConversionTaskInput{} + } + + output = &CancelConversionTaskOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CancelConversionTask API operation for Amazon Elastic Compute Cloud. +// +// Cancels an active conversion task. The task can be the import of an instance +// or volume. The action removes all artifacts of the conversion, including +// a partially uploaded volume or instance. If the conversion is complete or +// is in the process of transferring the final disk image, the command fails +// and returns an exception. +// +// For more information, see Importing a Virtual Machine Using the Amazon EC2 +// CLI (https://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ec2-cli-vmimport-export.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelConversionTask for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelConversionTask +func (c *EC2) CancelConversionTask(input *CancelConversionTaskInput) (*CancelConversionTaskOutput, error) { + req, out := c.CancelConversionTaskRequest(input) + return out, req.Send() +} + +// CancelConversionTaskWithContext is the same as CancelConversionTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelConversionTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelConversionTaskWithContext(ctx aws.Context, input *CancelConversionTaskInput, opts ...request.Option) (*CancelConversionTaskOutput, error) { + req, out := c.CancelConversionTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelExportTask = "CancelExportTask" + +// CancelExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelExportTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelExportTask for more information on using the CancelExportTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelExportTaskRequest method. +// req, resp := client.CancelExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelExportTask +func (c *EC2) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { + op := &request.Operation{ + Name: opCancelExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelExportTaskInput{} + } + + output = &CancelExportTaskOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CancelExportTask API operation for Amazon Elastic Compute Cloud. +// +// Cancels an active export task. The request removes all artifacts of the export, +// including any partially-created Amazon S3 objects. If the export task is +// complete or is in the process of transferring the final disk image, the command +// fails and returns an error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelExportTask for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelExportTask +func (c *EC2) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + return out, req.Send() +} + +// CancelExportTaskWithContext is the same as CancelExportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelExportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelExportTaskWithContext(ctx aws.Context, input *CancelExportTaskInput, opts ...request.Option) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelImportTask = "CancelImportTask" + +// CancelImportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelImportTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelImportTask for more information on using the CancelImportTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelImportTaskRequest method. +// req, resp := client.CancelImportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImportTask +func (c *EC2) CancelImportTaskRequest(input *CancelImportTaskInput) (req *request.Request, output *CancelImportTaskOutput) { + op := &request.Operation{ + Name: opCancelImportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelImportTaskInput{} + } + + output = &CancelImportTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelImportTask API operation for Amazon Elastic Compute Cloud. +// +// Cancels an in-process import virtual machine or import snapshot task. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelImportTask for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImportTask +func (c *EC2) CancelImportTask(input *CancelImportTaskInput) (*CancelImportTaskOutput, error) { + req, out := c.CancelImportTaskRequest(input) + return out, req.Send() +} + +// CancelImportTaskWithContext is the same as CancelImportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelImportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelImportTaskWithContext(ctx aws.Context, input *CancelImportTaskInput, opts ...request.Option) (*CancelImportTaskOutput, error) { + req, out := c.CancelImportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelReservedInstancesListing = "CancelReservedInstancesListing" + +// CancelReservedInstancesListingRequest generates a "aws/request.Request" representing the +// client's request for the CancelReservedInstancesListing operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelReservedInstancesListing for more information on using the CancelReservedInstancesListing +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelReservedInstancesListingRequest method. +// req, resp := client.CancelReservedInstancesListingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelReservedInstancesListing +func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstancesListingInput) (req *request.Request, output *CancelReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCancelReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelReservedInstancesListingInput{} + } + + output = &CancelReservedInstancesListingOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelReservedInstancesListing API operation for Amazon Elastic Compute Cloud. +// +// Cancels the specified Reserved Instance listing in the Reserved Instance +// Marketplace. +// +// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelReservedInstancesListing for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelReservedInstancesListing +func (c *EC2) CancelReservedInstancesListing(input *CancelReservedInstancesListingInput) (*CancelReservedInstancesListingOutput, error) { + req, out := c.CancelReservedInstancesListingRequest(input) + return out, req.Send() +} + +// CancelReservedInstancesListingWithContext is the same as CancelReservedInstancesListing with the addition of +// the ability to pass a context and additional request options. +// +// See CancelReservedInstancesListing for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelReservedInstancesListingWithContext(ctx aws.Context, input *CancelReservedInstancesListingInput, opts ...request.Option) (*CancelReservedInstancesListingOutput, error) { + req, out := c.CancelReservedInstancesListingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelSpotFleetRequests = "CancelSpotFleetRequests" + +// CancelSpotFleetRequestsRequest generates a "aws/request.Request" representing the +// client's request for the CancelSpotFleetRequests operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelSpotFleetRequests for more information on using the CancelSpotFleetRequests +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelSpotFleetRequestsRequest method. +// req, resp := client.CancelSpotFleetRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotFleetRequests +func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput) (req *request.Request, output *CancelSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotFleetRequestsInput{} + } + + output = &CancelSpotFleetRequestsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelSpotFleetRequests API operation for Amazon Elastic Compute Cloud. +// +// Cancels the specified Spot Fleet requests. +// +// After you cancel a Spot Fleet request, the Spot Fleet launches no new Spot +// Instances. You must specify whether the Spot Fleet should also terminate +// its Spot Instances. If you terminate the instances, the Spot Fleet request +// enters the cancelled_terminating state. Otherwise, the Spot Fleet request +// enters the cancelled_running state and the instances continue to run until +// they are interrupted or you terminate them manually. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelSpotFleetRequests for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotFleetRequests +func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*CancelSpotFleetRequestsOutput, error) { + req, out := c.CancelSpotFleetRequestsRequest(input) + return out, req.Send() +} + +// CancelSpotFleetRequestsWithContext is the same as CancelSpotFleetRequests with the addition of +// the ability to pass a context and additional request options. +// +// See CancelSpotFleetRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelSpotFleetRequestsWithContext(ctx aws.Context, input *CancelSpotFleetRequestsInput, opts ...request.Option) (*CancelSpotFleetRequestsOutput, error) { + req, out := c.CancelSpotFleetRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" + +// CancelSpotInstanceRequestsRequest generates a "aws/request.Request" representing the +// client's request for the CancelSpotInstanceRequests operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelSpotInstanceRequests for more information on using the CancelSpotInstanceRequests +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelSpotInstanceRequestsRequest method. +// req, resp := client.CancelSpotInstanceRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotInstanceRequests +func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequestsInput) (req *request.Request, output *CancelSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opCancelSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelSpotInstanceRequestsInput{} + } + + output = &CancelSpotInstanceRequestsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelSpotInstanceRequests API operation for Amazon Elastic Compute Cloud. +// +// Cancels one or more Spot Instance requests. +// +// Canceling a Spot Instance request does not terminate running Spot Instances +// associated with the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CancelSpotInstanceRequests for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotInstanceRequests +func (c *EC2) CancelSpotInstanceRequests(input *CancelSpotInstanceRequestsInput) (*CancelSpotInstanceRequestsOutput, error) { + req, out := c.CancelSpotInstanceRequestsRequest(input) + return out, req.Send() +} + +// CancelSpotInstanceRequestsWithContext is the same as CancelSpotInstanceRequests with the addition of +// the ability to pass a context and additional request options. +// +// See CancelSpotInstanceRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelSpotInstanceRequestsWithContext(ctx aws.Context, input *CancelSpotInstanceRequestsInput, opts ...request.Option) (*CancelSpotInstanceRequestsOutput, error) { + req, out := c.CancelSpotInstanceRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opConfirmProductInstance = "ConfirmProductInstance" + +// ConfirmProductInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmProductInstance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ConfirmProductInstance for more information on using the ConfirmProductInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ConfirmProductInstanceRequest method. +// req, resp := client.ConfirmProductInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ConfirmProductInstance +func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) (req *request.Request, output *ConfirmProductInstanceOutput) { + op := &request.Operation{ + Name: opConfirmProductInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmProductInstanceInput{} + } + + output = &ConfirmProductInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ConfirmProductInstance API operation for Amazon Elastic Compute Cloud. +// +// Determines whether a product code is associated with an instance. This action +// can only be used by the owner of the product code. It is useful when a product +// code owner must verify whether another user's instance is eligible for support. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ConfirmProductInstance for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ConfirmProductInstance +func (c *EC2) ConfirmProductInstance(input *ConfirmProductInstanceInput) (*ConfirmProductInstanceOutput, error) { + req, out := c.ConfirmProductInstanceRequest(input) + return out, req.Send() +} + +// ConfirmProductInstanceWithContext is the same as ConfirmProductInstance with the addition of +// the ability to pass a context and additional request options. +// +// See ConfirmProductInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ConfirmProductInstanceWithContext(ctx aws.Context, input *ConfirmProductInstanceInput, opts ...request.Option) (*ConfirmProductInstanceOutput, error) { + req, out := c.ConfirmProductInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyFpgaImage = "CopyFpgaImage" + +// CopyFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyFpgaImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyFpgaImage for more information on using the CopyFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyFpgaImageRequest method. +// req, resp := client.CopyFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImage +func (c *EC2) CopyFpgaImageRequest(input *CopyFpgaImageInput) (req *request.Request, output *CopyFpgaImageOutput) { + op := &request.Operation{ + Name: opCopyFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyFpgaImageInput{} + } + + output = &CopyFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Copies the specified Amazon FPGA Image (AFI) to the current Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CopyFpgaImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImage +func (c *EC2) CopyFpgaImage(input *CopyFpgaImageInput) (*CopyFpgaImageOutput, error) { + req, out := c.CopyFpgaImageRequest(input) + return out, req.Send() +} + +// CopyFpgaImageWithContext is the same as CopyFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See CopyFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CopyFpgaImageWithContext(ctx aws.Context, input *CopyFpgaImageInput, opts ...request.Option) (*CopyFpgaImageOutput, error) { + req, out := c.CopyFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyImage = "CopyImage" + +// CopyImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyImage for more information on using the CopyImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyImageRequest method. +// req, resp := client.CopyImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyImage +func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, output *CopyImageOutput) { + op := &request.Operation{ + Name: opCopyImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyImageInput{} + } + + output = &CopyImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyImage API operation for Amazon Elastic Compute Cloud. +// +// Initiates the copy of an AMI from the specified source Region to the current +// Region. You specify the destination Region by using its endpoint when making +// the request. +// +// Copies of encrypted backing snapshots for the AMI are encrypted. Copies of +// unencrypted backing snapshots remain unencrypted, unless you set Encrypted +// during the copy operation. You cannot create an unencrypted copy of an encrypted +// backing snapshot. +// +// For more information about the prerequisites and limits when copying an AMI, +// see Copying an AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CopyImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyImage +func (c *EC2) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + return out, req.Send() +} + +// CopyImageWithContext is the same as CopyImage with the addition of +// the ability to pass a context and additional request options. +// +// See CopyImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CopyImageWithContext(ctx aws.Context, input *CopyImageInput, opts ...request.Option) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopySnapshot = "CopySnapshot" + +// CopySnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopySnapshot operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopySnapshot for more information on using the CopySnapshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopySnapshotRequest method. +// req, resp := client.CopySnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopySnapshot +func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { + op := &request.Operation{ + Name: opCopySnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopySnapshotInput{} + } + + output = &CopySnapshotOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopySnapshot API operation for Amazon Elastic Compute Cloud. +// +// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon +// S3. You can copy the snapshot within the same Region or from one Region to +// another. You can use the snapshot to create EBS volumes or Amazon Machine +// Images (AMIs). +// +// Copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted +// snapshots remain unencrypted, unless you enable encryption for the snapshot +// copy operation. By default, encrypted snapshot copies use the default AWS +// Key Management Service (AWS KMS) customer master key (CMK); however, you +// can specify a different CMK. +// +// To copy an encrypted snapshot that has been shared from another account, +// you must have permissions for the CMK used to encrypt the snapshot. +// +// Snapshots created by copying another snapshot have an arbitrary volume ID +// that should not be used for any purpose. +// +// For more information, see Copying an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CopySnapshot for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopySnapshot +func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + return out, req.Send() +} + +// CopySnapshotWithContext is the same as CopySnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See CopySnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CopySnapshotWithContext(ctx aws.Context, input *CopySnapshotInput, opts ...request.Option) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateCapacityReservation = "CreateCapacityReservation" + +// CreateCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the CreateCapacityReservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCapacityReservation for more information on using the CreateCapacityReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCapacityReservationRequest method. +// req, resp := client.CreateCapacityReservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCapacityReservation +func (c *EC2) CreateCapacityReservationRequest(input *CreateCapacityReservationInput) (req *request.Request, output *CreateCapacityReservationOutput) { + op := &request.Operation{ + Name: opCreateCapacityReservation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCapacityReservationInput{} + } + + output = &CreateCapacityReservationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCapacityReservation API operation for Amazon Elastic Compute Cloud. +// +// Creates a new Capacity Reservation with the specified attributes. +// +// Capacity Reservations enable you to reserve capacity for your Amazon EC2 +// instances in a specific Availability Zone for any duration. This gives you +// the flexibility to selectively add capacity reservations and still get the +// Regional RI discounts for that usage. By creating Capacity Reservations, +// you ensure that you always have access to Amazon EC2 capacity when you need +// it, for as long as you need it. For more information, see Capacity Reservations +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Your request to create a Capacity Reservation could fail if Amazon EC2 does +// not have sufficient capacity to fulfill the request. If your request fails +// due to Amazon EC2 capacity constraints, either try again at a later time, +// try in a different Availability Zone, or request a smaller capacity reservation. +// If your application is flexible across instance types and sizes, try to create +// a Capacity Reservation with different instance attributes. +// +// Your request could also fail if the requested quantity exceeds your On-Demand +// Instance limit for the selected instance type. If your request fails due +// to limit constraints, increase your On-Demand Instance limit for the required +// instance type and try again. For more information about increasing your instance +// limits, see Amazon EC2 Service Limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateCapacityReservation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCapacityReservation +func (c *EC2) CreateCapacityReservation(input *CreateCapacityReservationInput) (*CreateCapacityReservationOutput, error) { + req, out := c.CreateCapacityReservationRequest(input) + return out, req.Send() +} + +// CreateCapacityReservationWithContext is the same as CreateCapacityReservation with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCapacityReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateCapacityReservationWithContext(ctx aws.Context, input *CreateCapacityReservationInput, opts ...request.Option) (*CreateCapacityReservationOutput, error) { + req, out := c.CreateCapacityReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateCarrierGateway = "CreateCarrierGateway" + +// CreateCarrierGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateCarrierGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCarrierGateway for more information on using the CreateCarrierGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCarrierGatewayRequest method. +// req, resp := client.CreateCarrierGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCarrierGateway +func (c *EC2) CreateCarrierGatewayRequest(input *CreateCarrierGatewayInput) (req *request.Request, output *CreateCarrierGatewayOutput) { + op := &request.Operation{ + Name: opCreateCarrierGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCarrierGatewayInput{} + } + + output = &CreateCarrierGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCarrierGateway API operation for Amazon Elastic Compute Cloud. +// +// Creates a carrier gateway. For more information about carrier gateways, see +// Carrier gateways (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#wavelength-carrier-gateway) +// in the AWS Wavelength Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateCarrierGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCarrierGateway +func (c *EC2) CreateCarrierGateway(input *CreateCarrierGatewayInput) (*CreateCarrierGatewayOutput, error) { + req, out := c.CreateCarrierGatewayRequest(input) + return out, req.Send() +} + +// CreateCarrierGatewayWithContext is the same as CreateCarrierGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCarrierGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateCarrierGatewayWithContext(ctx aws.Context, input *CreateCarrierGatewayInput, opts ...request.Option) (*CreateCarrierGatewayOutput, error) { + req, out := c.CreateCarrierGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateClientVpnEndpoint = "CreateClientVpnEndpoint" + +// CreateClientVpnEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateClientVpnEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateClientVpnEndpoint for more information on using the CreateClientVpnEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateClientVpnEndpointRequest method. +// req, resp := client.CreateClientVpnEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateClientVpnEndpoint +func (c *EC2) CreateClientVpnEndpointRequest(input *CreateClientVpnEndpointInput) (req *request.Request, output *CreateClientVpnEndpointOutput) { + op := &request.Operation{ + Name: opCreateClientVpnEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClientVpnEndpointInput{} + } + + output = &CreateClientVpnEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateClientVpnEndpoint API operation for Amazon Elastic Compute Cloud. +// +// Creates a Client VPN endpoint. A Client VPN endpoint is the resource you +// create and configure to enable and manage client VPN sessions. It is the +// destination endpoint at which all client VPN sessions are terminated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateClientVpnEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateClientVpnEndpoint +func (c *EC2) CreateClientVpnEndpoint(input *CreateClientVpnEndpointInput) (*CreateClientVpnEndpointOutput, error) { + req, out := c.CreateClientVpnEndpointRequest(input) + return out, req.Send() +} + +// CreateClientVpnEndpointWithContext is the same as CreateClientVpnEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See CreateClientVpnEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateClientVpnEndpointWithContext(ctx aws.Context, input *CreateClientVpnEndpointInput, opts ...request.Option) (*CreateClientVpnEndpointOutput, error) { + req, out := c.CreateClientVpnEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateClientVpnRoute = "CreateClientVpnRoute" + +// CreateClientVpnRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateClientVpnRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateClientVpnRoute for more information on using the CreateClientVpnRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateClientVpnRouteRequest method. +// req, resp := client.CreateClientVpnRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateClientVpnRoute +func (c *EC2) CreateClientVpnRouteRequest(input *CreateClientVpnRouteInput) (req *request.Request, output *CreateClientVpnRouteOutput) { + op := &request.Operation{ + Name: opCreateClientVpnRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateClientVpnRouteInput{} + } + + output = &CreateClientVpnRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateClientVpnRoute API operation for Amazon Elastic Compute Cloud. +// +// Adds a route to a network to a Client VPN endpoint. Each Client VPN endpoint +// has a route table that describes the available destination network routes. +// Each route in the route table specifies the path for traffic to specific +// resources or networks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateClientVpnRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateClientVpnRoute +func (c *EC2) CreateClientVpnRoute(input *CreateClientVpnRouteInput) (*CreateClientVpnRouteOutput, error) { + req, out := c.CreateClientVpnRouteRequest(input) + return out, req.Send() +} + +// CreateClientVpnRouteWithContext is the same as CreateClientVpnRoute with the addition of +// the ability to pass a context and additional request options. +// +// See CreateClientVpnRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateClientVpnRouteWithContext(ctx aws.Context, input *CreateClientVpnRouteInput, opts ...request.Option) (*CreateClientVpnRouteOutput, error) { + req, out := c.CreateClientVpnRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateCustomerGateway = "CreateCustomerGateway" + +// CreateCustomerGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateCustomerGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCustomerGateway for more information on using the CreateCustomerGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCustomerGatewayRequest method. +// req, resp := client.CreateCustomerGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCustomerGateway +func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (req *request.Request, output *CreateCustomerGatewayOutput) { + op := &request.Operation{ + Name: opCreateCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomerGatewayInput{} + } + + output = &CreateCustomerGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCustomerGateway API operation for Amazon Elastic Compute Cloud. +// +// Provides information to AWS about your VPN customer gateway device. The customer +// gateway is the appliance at your end of the VPN connection. (The device on +// the AWS side of the VPN connection is the virtual private gateway.) You must +// provide the internet-routable IP address of the customer gateway's external +// interface. The IP address must be static and can be behind a device performing +// network address translation (NAT). +// +// For devices that use Border Gateway Protocol (BGP), you can also provide +// the device's BGP Autonomous System Number (ASN). You can use an existing +// ASN assigned to your network. If you don't have an ASN already, you can use +// a private ASN (in the 64512 - 65534 range). +// +// Amazon EC2 supports all 4-byte ASN numbers in the range of 1 - 2147483647, +// with the exception of the following: +// +// * 7224 - reserved in the us-east-1 Region +// +// * 9059 - reserved in the eu-west-1 Region +// +// * 17943 - reserved in the ap-southeast-1 Region +// +// * 10124 - reserved in the ap-northeast-1 Region +// +// For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) +// in the AWS Site-to-Site VPN User Guide. +// +// To create more than one customer gateway with the same VPN type, IP address, +// and BGP ASN, specify a unique device name for each customer gateway. Identical +// requests return information about the existing customer gateway and do not +// create new customer gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateCustomerGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCustomerGateway +func (c *EC2) CreateCustomerGateway(input *CreateCustomerGatewayInput) (*CreateCustomerGatewayOutput, error) { + req, out := c.CreateCustomerGatewayRequest(input) + return out, req.Send() +} + +// CreateCustomerGatewayWithContext is the same as CreateCustomerGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCustomerGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateCustomerGatewayWithContext(ctx aws.Context, input *CreateCustomerGatewayInput, opts ...request.Option) (*CreateCustomerGatewayOutput, error) { + req, out := c.CreateCustomerGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDefaultSubnet = "CreateDefaultSubnet" + +// CreateDefaultSubnetRequest generates a "aws/request.Request" representing the +// client's request for the CreateDefaultSubnet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDefaultSubnet for more information on using the CreateDefaultSubnet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDefaultSubnetRequest method. +// req, resp := client.CreateDefaultSubnetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultSubnet +func (c *EC2) CreateDefaultSubnetRequest(input *CreateDefaultSubnetInput) (req *request.Request, output *CreateDefaultSubnetOutput) { + op := &request.Operation{ + Name: opCreateDefaultSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDefaultSubnetInput{} + } + + output = &CreateDefaultSubnetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDefaultSubnet API operation for Amazon Elastic Compute Cloud. +// +// Creates a default subnet with a size /20 IPv4 CIDR block in the specified +// Availability Zone in your default VPC. You can have only one default subnet +// per Availability Zone. For more information, see Creating a Default Subnet +// (https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html#create-default-subnet) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateDefaultSubnet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultSubnet +func (c *EC2) CreateDefaultSubnet(input *CreateDefaultSubnetInput) (*CreateDefaultSubnetOutput, error) { + req, out := c.CreateDefaultSubnetRequest(input) + return out, req.Send() +} + +// CreateDefaultSubnetWithContext is the same as CreateDefaultSubnet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDefaultSubnet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateDefaultSubnetWithContext(ctx aws.Context, input *CreateDefaultSubnetInput, opts ...request.Option) (*CreateDefaultSubnetOutput, error) { + req, out := c.CreateDefaultSubnetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDefaultVpc = "CreateDefaultVpc" + +// CreateDefaultVpcRequest generates a "aws/request.Request" representing the +// client's request for the CreateDefaultVpc operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDefaultVpc for more information on using the CreateDefaultVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDefaultVpcRequest method. +// req, resp := client.CreateDefaultVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultVpc +func (c *EC2) CreateDefaultVpcRequest(input *CreateDefaultVpcInput) (req *request.Request, output *CreateDefaultVpcOutput) { + op := &request.Operation{ + Name: opCreateDefaultVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDefaultVpcInput{} + } + + output = &CreateDefaultVpcOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDefaultVpc API operation for Amazon Elastic Compute Cloud. +// +// Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet +// in each Availability Zone. For more information about the components of a +// default VPC, see Default VPC and Default Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html) +// in the Amazon Virtual Private Cloud User Guide. You cannot specify the components +// of the default VPC yourself. +// +// If you deleted your previous default VPC, you can create a default VPC. You +// cannot have more than one default VPC per Region. +// +// If your account supports EC2-Classic, you cannot use this action to create +// a default VPC in a Region that supports EC2-Classic. If you want a default +// VPC in a Region that supports EC2-Classic, see "I really want a default VPC +// for my existing EC2 account. Is that possible?" in the Default VPCs FAQ (http://aws.amazon.com/vpc/faqs/#Default_VPCs). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateDefaultVpc for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultVpc +func (c *EC2) CreateDefaultVpc(input *CreateDefaultVpcInput) (*CreateDefaultVpcOutput, error) { + req, out := c.CreateDefaultVpcRequest(input) + return out, req.Send() +} + +// CreateDefaultVpcWithContext is the same as CreateDefaultVpc with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDefaultVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateDefaultVpcWithContext(ctx aws.Context, input *CreateDefaultVpcInput, opts ...request.Option) (*CreateDefaultVpcOutput, error) { + req, out := c.CreateDefaultVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDhcpOptions = "CreateDhcpOptions" + +// CreateDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the CreateDhcpOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDhcpOptions for more information on using the CreateDhcpOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDhcpOptionsRequest method. +// req, resp := client.CreateDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDhcpOptions +func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *request.Request, output *CreateDhcpOptionsOutput) { + op := &request.Operation{ + Name: opCreateDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDhcpOptionsInput{} + } + + output = &CreateDhcpOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDhcpOptions API operation for Amazon Elastic Compute Cloud. +// +// Creates a set of DHCP options for your VPC. After creating the set, you must +// associate it with the VPC, causing all existing and new instances that you +// launch in the VPC to use this set of DHCP options. The following are the +// individual DHCP options you can specify. For more information about the options, +// see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). +// +// * domain-name-servers - The IP addresses of up to four domain name servers, +// or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. +// If specifying more than one domain name server, specify the IP addresses +// in a single parameter, separated by commas. To have your instance receive +// a custom DNS hostname as specified in domain-name, you must set domain-name-servers +// to a custom DNS server. +// +// * domain-name - If you're using AmazonProvidedDNS in us-east-1, specify +// ec2.internal. If you're using AmazonProvidedDNS in another Region, specify +// region.compute.internal (for example, ap-northeast-1.compute.internal). +// Otherwise, specify a domain name (for example, ExampleCompany.com). This +// value is used to complete unqualified DNS hostnames. Important: Some Linux +// operating systems accept multiple domain names separated by spaces. However, +// Windows and other Linux operating systems treat the value as a single +// domain, which results in unexpected behavior. If your DHCP options set +// is associated with a VPC that has instances with multiple operating systems, +// specify only one domain name. +// +// * ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) +// servers. +// +// * netbios-name-servers - The IP addresses of up to four NetBIOS name servers. +// +// * netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend +// that you specify 2 (broadcast and multicast are not currently supported). +// For more information about these node types, see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). +// +// Your VPC automatically starts out with a set of DHCP options that includes +// only a DNS server that we provide (AmazonProvidedDNS). If you create a set +// of options, and if your VPC has an internet gateway, make sure to set the +// domain-name-servers option either to AmazonProvidedDNS or to a domain name +// server of your choice. For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateDhcpOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDhcpOptions +func (c *EC2) CreateDhcpOptions(input *CreateDhcpOptionsInput) (*CreateDhcpOptionsOutput, error) { + req, out := c.CreateDhcpOptionsRequest(input) + return out, req.Send() +} + +// CreateDhcpOptionsWithContext is the same as CreateDhcpOptions with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDhcpOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateDhcpOptionsWithContext(ctx aws.Context, input *CreateDhcpOptionsInput, opts ...request.Option) (*CreateDhcpOptionsOutput, error) { + req, out := c.CreateDhcpOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateEgressOnlyInternetGateway = "CreateEgressOnlyInternetGateway" + +// CreateEgressOnlyInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateEgressOnlyInternetGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateEgressOnlyInternetGateway for more information on using the CreateEgressOnlyInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateEgressOnlyInternetGatewayRequest method. +// req, resp := client.CreateEgressOnlyInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateEgressOnlyInternetGateway +func (c *EC2) CreateEgressOnlyInternetGatewayRequest(input *CreateEgressOnlyInternetGatewayInput) (req *request.Request, output *CreateEgressOnlyInternetGatewayOutput) { + op := &request.Operation{ + Name: opCreateEgressOnlyInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateEgressOnlyInternetGatewayInput{} + } + + output = &CreateEgressOnlyInternetGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateEgressOnlyInternetGateway API operation for Amazon Elastic Compute Cloud. +// +// [IPv6 only] Creates an egress-only internet gateway for your VPC. An egress-only +// internet gateway is used to enable outbound communication over IPv6 from +// instances in your VPC to the internet, and prevents hosts outside of your +// VPC from initiating an IPv6 connection with your instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateEgressOnlyInternetGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateEgressOnlyInternetGateway +func (c *EC2) CreateEgressOnlyInternetGateway(input *CreateEgressOnlyInternetGatewayInput) (*CreateEgressOnlyInternetGatewayOutput, error) { + req, out := c.CreateEgressOnlyInternetGatewayRequest(input) + return out, req.Send() +} + +// CreateEgressOnlyInternetGatewayWithContext is the same as CreateEgressOnlyInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateEgressOnlyInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateEgressOnlyInternetGatewayWithContext(ctx aws.Context, input *CreateEgressOnlyInternetGatewayInput, opts ...request.Option) (*CreateEgressOnlyInternetGatewayOutput, error) { + req, out := c.CreateEgressOnlyInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateFleet = "CreateFleet" + +// CreateFleetRequest generates a "aws/request.Request" representing the +// client's request for the CreateFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFleet for more information on using the CreateFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFleetRequest method. +// req, resp := client.CreateFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFleet +func (c *EC2) CreateFleetRequest(input *CreateFleetInput) (req *request.Request, output *CreateFleetOutput) { + op := &request.Operation{ + Name: opCreateFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFleetInput{} + } + + output = &CreateFleetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFleet API operation for Amazon Elastic Compute Cloud. +// +// Launches an EC2 Fleet. +// +// You can create a single EC2 Fleet that includes multiple launch specifications +// that vary by instance type, AMI, Availability Zone, or subnet. +// +// For more information, see Launching an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateFleet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFleet +func (c *EC2) CreateFleet(input *CreateFleetInput) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + return out, req.Send() +} + +// CreateFleetWithContext is the same as CreateFleet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateFleetWithContext(ctx aws.Context, input *CreateFleetInput, opts ...request.Option) (*CreateFleetOutput, error) { + req, out := c.CreateFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateFlowLogs = "CreateFlowLogs" + +// CreateFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the CreateFlowLogs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFlowLogs for more information on using the CreateFlowLogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFlowLogsRequest method. +// req, resp := client.CreateFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFlowLogs +func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Request, output *CreateFlowLogsOutput) { + op := &request.Operation{ + Name: opCreateFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFlowLogsInput{} + } + + output = &CreateFlowLogsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFlowLogs API operation for Amazon Elastic Compute Cloud. +// +// Creates one or more flow logs to capture information about IP traffic for +// a specific network interface, subnet, or VPC. +// +// Flow log data for a monitored network interface is recorded as flow log records, +// which are log events consisting of fields that describe the traffic flow. +// For more information, see Flow Log Records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records) +// in the Amazon Virtual Private Cloud User Guide. +// +// When publishing to CloudWatch Logs, flow log records are published to a log +// group, and each network interface has a unique log stream in the log group. +// When publishing to Amazon S3, flow log records for all of the monitored network +// interfaces are published to a single log file object that is stored in the +// specified bucket. +// +// For more information, see VPC Flow Logs (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateFlowLogs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFlowLogs +func (c *EC2) CreateFlowLogs(input *CreateFlowLogsInput) (*CreateFlowLogsOutput, error) { + req, out := c.CreateFlowLogsRequest(input) + return out, req.Send() +} + +// CreateFlowLogsWithContext is the same as CreateFlowLogs with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFlowLogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateFlowLogsWithContext(ctx aws.Context, input *CreateFlowLogsInput, opts ...request.Option) (*CreateFlowLogsOutput, error) { + req, out := c.CreateFlowLogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateFpgaImage = "CreateFpgaImage" + +// CreateFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the CreateFpgaImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFpgaImage for more information on using the CreateFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFpgaImageRequest method. +// req, resp := client.CreateFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFpgaImage +func (c *EC2) CreateFpgaImageRequest(input *CreateFpgaImageInput) (req *request.Request, output *CreateFpgaImageOutput) { + op := &request.Operation{ + Name: opCreateFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFpgaImageInput{} + } + + output = &CreateFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Creates an Amazon FPGA Image (AFI) from the specified design checkpoint (DCP). +// +// The create operation is asynchronous. To verify that the AFI is ready for +// use, check the output logs. +// +// An AFI contains the FPGA bitstream that is ready to download to an FPGA. +// You can securely deploy an AFI on multiple FPGA-accelerated instances. For +// more information, see the AWS FPGA Hardware Development Kit (https://github.com/aws/aws-fpga/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateFpgaImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFpgaImage +func (c *EC2) CreateFpgaImage(input *CreateFpgaImageInput) (*CreateFpgaImageOutput, error) { + req, out := c.CreateFpgaImageRequest(input) + return out, req.Send() +} + +// CreateFpgaImageWithContext is the same as CreateFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateFpgaImageWithContext(ctx aws.Context, input *CreateFpgaImageInput, opts ...request.Option) (*CreateFpgaImageOutput, error) { + req, out := c.CreateFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateImage = "CreateImage" + +// CreateImageRequest generates a "aws/request.Request" representing the +// client's request for the CreateImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateImage for more information on using the CreateImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateImageRequest method. +// req, resp := client.CreateImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateImage +func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, output *CreateImageOutput) { + op := &request.Operation{ + Name: opCreateImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateImageInput{} + } + + output = &CreateImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateImage API operation for Amazon Elastic Compute Cloud. +// +// Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that +// is either running or stopped. +// +// If you customized your instance with instance store volumes or EBS volumes +// in addition to the root device volume, the new AMI contains block device +// mapping information for those volumes. When you launch an instance from this +// new AMI, the instance automatically launches with those additional volumes. +// +// For more information, see Creating Amazon EBS-Backed Linux AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateImage +func (c *EC2) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) { + req, out := c.CreateImageRequest(input) + return out, req.Send() +} + +// CreateImageWithContext is the same as CreateImage with the addition of +// the ability to pass a context and additional request options. +// +// See CreateImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateImageWithContext(ctx aws.Context, input *CreateImageInput, opts ...request.Option) (*CreateImageOutput, error) { + req, out := c.CreateImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateInstanceExportTask = "CreateInstanceExportTask" + +// CreateInstanceExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstanceExportTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateInstanceExportTask for more information on using the CreateInstanceExportTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateInstanceExportTaskRequest method. +// req, resp := client.CreateInstanceExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceExportTask +func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInput) (req *request.Request, output *CreateInstanceExportTaskOutput) { + op := &request.Operation{ + Name: opCreateInstanceExportTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceExportTaskInput{} + } + + output = &CreateInstanceExportTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateInstanceExportTask API operation for Amazon Elastic Compute Cloud. +// +// Exports a running or stopped instance to an Amazon S3 bucket. +// +// For information about the supported operating systems, image formats, and +// known limitations for the types of instances you can export, see Exporting +// an Instance as a VM Using VM Import/Export (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport.html) +// in the VM Import/Export User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateInstanceExportTask for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceExportTask +func (c *EC2) CreateInstanceExportTask(input *CreateInstanceExportTaskInput) (*CreateInstanceExportTaskOutput, error) { + req, out := c.CreateInstanceExportTaskRequest(input) + return out, req.Send() +} + +// CreateInstanceExportTaskWithContext is the same as CreateInstanceExportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInstanceExportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateInstanceExportTaskWithContext(ctx aws.Context, input *CreateInstanceExportTaskInput, opts ...request.Option) (*CreateInstanceExportTaskOutput, error) { + req, out := c.CreateInstanceExportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateInternetGateway = "CreateInternetGateway" + +// CreateInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateInternetGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateInternetGateway for more information on using the CreateInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateInternetGatewayRequest method. +// req, resp := client.CreateInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInternetGateway +func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (req *request.Request, output *CreateInternetGatewayOutput) { + op := &request.Operation{ + Name: opCreateInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInternetGatewayInput{} + } + + output = &CreateInternetGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateInternetGateway API operation for Amazon Elastic Compute Cloud. +// +// Creates an internet gateway for use with a VPC. After creating the internet +// gateway, you attach it to a VPC using AttachInternetGateway. +// +// For more information about your VPC and internet gateway, see the Amazon +// Virtual Private Cloud User Guide (https://docs.aws.amazon.com/vpc/latest/userguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateInternetGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInternetGateway +func (c *EC2) CreateInternetGateway(input *CreateInternetGatewayInput) (*CreateInternetGatewayOutput, error) { + req, out := c.CreateInternetGatewayRequest(input) + return out, req.Send() +} + +// CreateInternetGatewayWithContext is the same as CreateInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateInternetGatewayWithContext(ctx aws.Context, input *CreateInternetGatewayInput, opts ...request.Option) (*CreateInternetGatewayOutput, error) { + req, out := c.CreateInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateKeyPair = "CreateKeyPair" + +// CreateKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the CreateKeyPair operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateKeyPair for more information on using the CreateKeyPair +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateKeyPairRequest method. +// req, resp := client.CreateKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateKeyPair +func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Request, output *CreateKeyPairOutput) { + op := &request.Operation{ + Name: opCreateKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateKeyPairInput{} + } + + output = &CreateKeyPairOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateKeyPair API operation for Amazon Elastic Compute Cloud. +// +// Creates a 2048-bit RSA key pair with the specified name. Amazon EC2 stores +// the public key and displays the private key for you to save to a file. The +// private key is returned as an unencrypted PEM encoded PKCS#1 private key. +// If a key with the specified name already exists, Amazon EC2 returns an error. +// +// You can have up to five thousand key pairs per Region. +// +// The key pair returned to you is available only in the Region in which you +// create it. If you prefer, you can create your own key pair using a third-party +// tool and upload it to any Region using ImportKeyPair. +// +// For more information, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateKeyPair for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateKeyPair +func (c *EC2) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, error) { + req, out := c.CreateKeyPairRequest(input) + return out, req.Send() +} + +// CreateKeyPairWithContext is the same as CreateKeyPair with the addition of +// the ability to pass a context and additional request options. +// +// See CreateKeyPair for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateKeyPairWithContext(ctx aws.Context, input *CreateKeyPairInput, opts ...request.Option) (*CreateKeyPairOutput, error) { + req, out := c.CreateKeyPairRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLaunchTemplate = "CreateLaunchTemplate" + +// CreateLaunchTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateLaunchTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLaunchTemplate for more information on using the CreateLaunchTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLaunchTemplateRequest method. +// req, resp := client.CreateLaunchTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLaunchTemplate +func (c *EC2) CreateLaunchTemplateRequest(input *CreateLaunchTemplateInput) (req *request.Request, output *CreateLaunchTemplateOutput) { + op := &request.Operation{ + Name: opCreateLaunchTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLaunchTemplateInput{} + } + + output = &CreateLaunchTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLaunchTemplate API operation for Amazon Elastic Compute Cloud. +// +// Creates a launch template. A launch template contains the parameters to launch +// an instance. When you launch an instance using RunInstances, you can specify +// a launch template instead of providing the launch parameters in the request. +// For more information, see Launching an instance from a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)in +// the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateLaunchTemplate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLaunchTemplate +func (c *EC2) CreateLaunchTemplate(input *CreateLaunchTemplateInput) (*CreateLaunchTemplateOutput, error) { + req, out := c.CreateLaunchTemplateRequest(input) + return out, req.Send() +} + +// CreateLaunchTemplateWithContext is the same as CreateLaunchTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLaunchTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateLaunchTemplateWithContext(ctx aws.Context, input *CreateLaunchTemplateInput, opts ...request.Option) (*CreateLaunchTemplateOutput, error) { + req, out := c.CreateLaunchTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLaunchTemplateVersion = "CreateLaunchTemplateVersion" + +// CreateLaunchTemplateVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreateLaunchTemplateVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLaunchTemplateVersion for more information on using the CreateLaunchTemplateVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLaunchTemplateVersionRequest method. +// req, resp := client.CreateLaunchTemplateVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLaunchTemplateVersion +func (c *EC2) CreateLaunchTemplateVersionRequest(input *CreateLaunchTemplateVersionInput) (req *request.Request, output *CreateLaunchTemplateVersionOutput) { + op := &request.Operation{ + Name: opCreateLaunchTemplateVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLaunchTemplateVersionInput{} + } + + output = &CreateLaunchTemplateVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLaunchTemplateVersion API operation for Amazon Elastic Compute Cloud. +// +// Creates a new version for a launch template. You can specify an existing +// version of launch template from which to base the new version. +// +// Launch template versions are numbered in the order in which they are created. +// You cannot specify, change, or replace the numbering of launch template versions. +// +// For more information, see Managing launch template versions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#manage-launch-template-versions)in +// the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateLaunchTemplateVersion for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLaunchTemplateVersion +func (c *EC2) CreateLaunchTemplateVersion(input *CreateLaunchTemplateVersionInput) (*CreateLaunchTemplateVersionOutput, error) { + req, out := c.CreateLaunchTemplateVersionRequest(input) + return out, req.Send() +} + +// CreateLaunchTemplateVersionWithContext is the same as CreateLaunchTemplateVersion with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLaunchTemplateVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateLaunchTemplateVersionWithContext(ctx aws.Context, input *CreateLaunchTemplateVersionInput, opts ...request.Option) (*CreateLaunchTemplateVersionOutput, error) { + req, out := c.CreateLaunchTemplateVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLocalGatewayRoute = "CreateLocalGatewayRoute" + +// CreateLocalGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocalGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocalGatewayRoute for more information on using the CreateLocalGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLocalGatewayRouteRequest method. +// req, resp := client.CreateLocalGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLocalGatewayRoute +func (c *EC2) CreateLocalGatewayRouteRequest(input *CreateLocalGatewayRouteInput) (req *request.Request, output *CreateLocalGatewayRouteOutput) { + op := &request.Operation{ + Name: opCreateLocalGatewayRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocalGatewayRouteInput{} + } + + output = &CreateLocalGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocalGatewayRoute API operation for Amazon Elastic Compute Cloud. +// +// Creates a static route for the specified local gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateLocalGatewayRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLocalGatewayRoute +func (c *EC2) CreateLocalGatewayRoute(input *CreateLocalGatewayRouteInput) (*CreateLocalGatewayRouteOutput, error) { + req, out := c.CreateLocalGatewayRouteRequest(input) + return out, req.Send() +} + +// CreateLocalGatewayRouteWithContext is the same as CreateLocalGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocalGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateLocalGatewayRouteWithContext(ctx aws.Context, input *CreateLocalGatewayRouteInput, opts ...request.Option) (*CreateLocalGatewayRouteOutput, error) { + req, out := c.CreateLocalGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLocalGatewayRouteTableVpcAssociation = "CreateLocalGatewayRouteTableVpcAssociation" + +// CreateLocalGatewayRouteTableVpcAssociationRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocalGatewayRouteTableVpcAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocalGatewayRouteTableVpcAssociation for more information on using the CreateLocalGatewayRouteTableVpcAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLocalGatewayRouteTableVpcAssociationRequest method. +// req, resp := client.CreateLocalGatewayRouteTableVpcAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLocalGatewayRouteTableVpcAssociation +func (c *EC2) CreateLocalGatewayRouteTableVpcAssociationRequest(input *CreateLocalGatewayRouteTableVpcAssociationInput) (req *request.Request, output *CreateLocalGatewayRouteTableVpcAssociationOutput) { + op := &request.Operation{ + Name: opCreateLocalGatewayRouteTableVpcAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocalGatewayRouteTableVpcAssociationInput{} + } + + output = &CreateLocalGatewayRouteTableVpcAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocalGatewayRouteTableVpcAssociation API operation for Amazon Elastic Compute Cloud. +// +// Associates the specified VPC with the specified local gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateLocalGatewayRouteTableVpcAssociation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateLocalGatewayRouteTableVpcAssociation +func (c *EC2) CreateLocalGatewayRouteTableVpcAssociation(input *CreateLocalGatewayRouteTableVpcAssociationInput) (*CreateLocalGatewayRouteTableVpcAssociationOutput, error) { + req, out := c.CreateLocalGatewayRouteTableVpcAssociationRequest(input) + return out, req.Send() +} + +// CreateLocalGatewayRouteTableVpcAssociationWithContext is the same as CreateLocalGatewayRouteTableVpcAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocalGatewayRouteTableVpcAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateLocalGatewayRouteTableVpcAssociationWithContext(ctx aws.Context, input *CreateLocalGatewayRouteTableVpcAssociationInput, opts ...request.Option) (*CreateLocalGatewayRouteTableVpcAssociationOutput, error) { + req, out := c.CreateLocalGatewayRouteTableVpcAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateManagedPrefixList = "CreateManagedPrefixList" + +// CreateManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the CreateManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateManagedPrefixList for more information on using the CreateManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateManagedPrefixListRequest method. +// req, resp := client.CreateManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateManagedPrefixList +func (c *EC2) CreateManagedPrefixListRequest(input *CreateManagedPrefixListInput) (req *request.Request, output *CreateManagedPrefixListOutput) { + op := &request.Operation{ + Name: opCreateManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateManagedPrefixListInput{} + } + + output = &CreateManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Creates a managed prefix list. You can specify one or more entries for the +// prefix list. Each entry consists of a CIDR block and an optional description. +// +// You must specify the maximum number of entries for the prefix list. The maximum +// number of entries cannot be changed later. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateManagedPrefixList +func (c *EC2) CreateManagedPrefixList(input *CreateManagedPrefixListInput) (*CreateManagedPrefixListOutput, error) { + req, out := c.CreateManagedPrefixListRequest(input) + return out, req.Send() +} + +// CreateManagedPrefixListWithContext is the same as CreateManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See CreateManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateManagedPrefixListWithContext(ctx aws.Context, input *CreateManagedPrefixListInput, opts ...request.Option) (*CreateManagedPrefixListOutput, error) { + req, out := c.CreateManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateNatGateway = "CreateNatGateway" + +// CreateNatGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateNatGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateNatGateway for more information on using the CreateNatGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateNatGatewayRequest method. +// req, resp := client.CreateNatGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNatGateway +func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *request.Request, output *CreateNatGatewayOutput) { + op := &request.Operation{ + Name: opCreateNatGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNatGatewayInput{} + } + + output = &CreateNatGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateNatGateway API operation for Amazon Elastic Compute Cloud. +// +// Creates a NAT gateway in the specified public subnet. This action creates +// a network interface in the specified subnet with a private IP address from +// the IP address range of the subnet. Internet-bound traffic from a private +// subnet can be routed to the NAT gateway, therefore enabling instances in +// the private subnet to connect to the internet. For more information, see +// NAT Gateways (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateNatGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNatGateway +func (c *EC2) CreateNatGateway(input *CreateNatGatewayInput) (*CreateNatGatewayOutput, error) { + req, out := c.CreateNatGatewayRequest(input) + return out, req.Send() +} + +// CreateNatGatewayWithContext is the same as CreateNatGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNatGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNatGatewayWithContext(ctx aws.Context, input *CreateNatGatewayInput, opts ...request.Option) (*CreateNatGatewayOutput, error) { + req, out := c.CreateNatGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateNetworkAcl = "CreateNetworkAcl" + +// CreateNetworkAclRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateNetworkAcl for more information on using the CreateNetworkAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateNetworkAclRequest method. +// req, resp := client.CreateNetworkAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAcl +func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *request.Request, output *CreateNetworkAclOutput) { + op := &request.Operation{ + Name: opCreateNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclInput{} + } + + output = &CreateNetworkAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateNetworkAcl API operation for Amazon Elastic Compute Cloud. +// +// Creates a network ACL in a VPC. Network ACLs provide an optional layer of +// security (in addition to security groups) for the instances in your VPC. +// +// For more information, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateNetworkAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAcl +func (c *EC2) CreateNetworkAcl(input *CreateNetworkAclInput) (*CreateNetworkAclOutput, error) { + req, out := c.CreateNetworkAclRequest(input) + return out, req.Send() +} + +// CreateNetworkAclWithContext is the same as CreateNetworkAcl with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkAclWithContext(ctx aws.Context, input *CreateNetworkAclInput, opts ...request.Option) (*CreateNetworkAclOutput, error) { + req, out := c.CreateNetworkAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateNetworkAclEntry = "CreateNetworkAclEntry" + +// CreateNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkAclEntry operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateNetworkAclEntry for more information on using the CreateNetworkAclEntry +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateNetworkAclEntryRequest method. +// req, resp := client.CreateNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAclEntry +func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (req *request.Request, output *CreateNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opCreateNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkAclEntryInput{} + } + + output = &CreateNetworkAclEntryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateNetworkAclEntry API operation for Amazon Elastic Compute Cloud. +// +// Creates an entry (a rule) in a network ACL with the specified rule number. +// Each network ACL has a set of numbered ingress rules and a separate set of +// numbered egress rules. When determining whether a packet should be allowed +// in or out of a subnet associated with the ACL, we process the entries in +// the ACL according to the rule numbers, in ascending order. Each network ACL +// has a set of ingress rules and a separate set of egress rules. +// +// We recommend that you leave room between the rule numbers (for example, 100, +// 110, 120, ...), and not number them one right after the other (for example, +// 101, 102, 103, ...). This makes it easier to add a rule between existing +// ones without having to renumber the rules. +// +// After you add an entry, you can't modify it; you must either replace it, +// or create an entry and delete the old one. +// +// For more information about network ACLs, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateNetworkAclEntry for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAclEntry +func (c *EC2) CreateNetworkAclEntry(input *CreateNetworkAclEntryInput) (*CreateNetworkAclEntryOutput, error) { + req, out := c.CreateNetworkAclEntryRequest(input) + return out, req.Send() +} + +// CreateNetworkAclEntryWithContext is the same as CreateNetworkAclEntry with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkAclEntry for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkAclEntryWithContext(ctx aws.Context, input *CreateNetworkAclEntryInput, opts ...request.Option) (*CreateNetworkAclEntryOutput, error) { + req, out := c.CreateNetworkAclEntryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateNetworkInterface = "CreateNetworkInterface" + +// CreateNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkInterface operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateNetworkInterface for more information on using the CreateNetworkInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateNetworkInterfaceRequest method. +// req, resp := client.CreateNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterface +func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) (req *request.Request, output *CreateNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opCreateNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkInterfaceInput{} + } + + output = &CreateNetworkInterfaceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateNetworkInterface API operation for Amazon Elastic Compute Cloud. +// +// Creates a network interface in the specified subnet. +// +// For more information about network interfaces, see Elastic Network Interfaces +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) in the +// Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateNetworkInterface for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterface +func (c *EC2) CreateNetworkInterface(input *CreateNetworkInterfaceInput) (*CreateNetworkInterfaceOutput, error) { + req, out := c.CreateNetworkInterfaceRequest(input) + return out, req.Send() +} + +// CreateNetworkInterfaceWithContext is the same as CreateNetworkInterface with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkInterfaceWithContext(ctx aws.Context, input *CreateNetworkInterfaceInput, opts ...request.Option) (*CreateNetworkInterfaceOutput, error) { + req, out := c.CreateNetworkInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateNetworkInterfacePermission = "CreateNetworkInterfacePermission" + +// CreateNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkInterfacePermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateNetworkInterfacePermission for more information on using the CreateNetworkInterfacePermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateNetworkInterfacePermissionRequest method. +// req, resp := client.CreateNetworkInterfacePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermission +func (c *EC2) CreateNetworkInterfacePermissionRequest(input *CreateNetworkInterfacePermissionInput) (req *request.Request, output *CreateNetworkInterfacePermissionOutput) { + op := &request.Operation{ + Name: opCreateNetworkInterfacePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkInterfacePermissionInput{} + } + + output = &CreateNetworkInterfacePermissionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateNetworkInterfacePermission API operation for Amazon Elastic Compute Cloud. +// +// Grants an AWS-authorized account permission to attach the specified network +// interface to an instance in their account. +// +// You can grant permission to a single AWS account only, and only one account +// at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateNetworkInterfacePermission for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermission +func (c *EC2) CreateNetworkInterfacePermission(input *CreateNetworkInterfacePermissionInput) (*CreateNetworkInterfacePermissionOutput, error) { + req, out := c.CreateNetworkInterfacePermissionRequest(input) + return out, req.Send() +} + +// CreateNetworkInterfacePermissionWithContext is the same as CreateNetworkInterfacePermission with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkInterfacePermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkInterfacePermissionWithContext(ctx aws.Context, input *CreateNetworkInterfacePermissionInput, opts ...request.Option) (*CreateNetworkInterfacePermissionOutput, error) { + req, out := c.CreateNetworkInterfacePermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePlacementGroup = "CreatePlacementGroup" + +// CreatePlacementGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlacementGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePlacementGroup for more information on using the CreatePlacementGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePlacementGroupRequest method. +// req, resp := client.CreatePlacementGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreatePlacementGroup +func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req *request.Request, output *CreatePlacementGroupOutput) { + op := &request.Operation{ + Name: opCreatePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePlacementGroupInput{} + } + + output = &CreatePlacementGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePlacementGroup API operation for Amazon Elastic Compute Cloud. +// +// Creates a placement group in which to launch instances. The strategy of the +// placement group determines how the instances are organized within the group. +// +// A cluster placement group is a logical grouping of instances within a single +// Availability Zone that benefit from low network latency, high network throughput. +// A spread placement group places instances on distinct hardware. A partition +// placement group places groups of instances in different partitions, where +// instances in one partition do not share the same hardware with instances +// in another partition. +// +// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreatePlacementGroup for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreatePlacementGroup +func (c *EC2) CreatePlacementGroup(input *CreatePlacementGroupInput) (*CreatePlacementGroupOutput, error) { + req, out := c.CreatePlacementGroupRequest(input) + return out, req.Send() +} + +// CreatePlacementGroupWithContext is the same as CreatePlacementGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePlacementGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreatePlacementGroupWithContext(ctx aws.Context, input *CreatePlacementGroupInput, opts ...request.Option) (*CreatePlacementGroupOutput, error) { + req, out := c.CreatePlacementGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateReservedInstancesListing = "CreateReservedInstancesListing" + +// CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the +// client's request for the CreateReservedInstancesListing operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateReservedInstancesListing for more information on using the CreateReservedInstancesListing +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateReservedInstancesListingRequest method. +// req, resp := client.CreateReservedInstancesListingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReservedInstancesListing +func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstancesListingInput) (req *request.Request, output *CreateReservedInstancesListingOutput) { + op := &request.Operation{ + Name: opCreateReservedInstancesListing, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReservedInstancesListingInput{} + } + + output = &CreateReservedInstancesListingOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateReservedInstancesListing API operation for Amazon Elastic Compute Cloud. +// +// Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in +// the Reserved Instance Marketplace. You can submit one Standard Reserved Instance +// listing at a time. To get a list of your Standard Reserved Instances, you +// can use the DescribeReservedInstances operation. +// +// Only Standard Reserved Instances can be sold in the Reserved Instance Marketplace. +// Convertible Reserved Instances cannot be sold. +// +// The Reserved Instance Marketplace matches sellers who want to resell Standard +// Reserved Instance capacity that they no longer need with buyers who want +// to purchase additional capacity. Reserved Instances bought and sold through +// the Reserved Instance Marketplace work like any other Reserved Instances. +// +// To sell your Standard Reserved Instances, you must first register as a seller +// in the Reserved Instance Marketplace. After completing the registration process, +// you can create a Reserved Instance Marketplace listing of some or all of +// your Standard Reserved Instances, and specify the upfront price to receive +// for them. Your Standard Reserved Instance listings then become available +// for purchase. To view the details of your Standard Reserved Instance listing, +// you can use the DescribeReservedInstancesListings operation. +// +// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateReservedInstancesListing for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReservedInstancesListing +func (c *EC2) CreateReservedInstancesListing(input *CreateReservedInstancesListingInput) (*CreateReservedInstancesListingOutput, error) { + req, out := c.CreateReservedInstancesListingRequest(input) + return out, req.Send() +} + +// CreateReservedInstancesListingWithContext is the same as CreateReservedInstancesListing with the addition of +// the ability to pass a context and additional request options. +// +// See CreateReservedInstancesListing for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateReservedInstancesListingWithContext(ctx aws.Context, input *CreateReservedInstancesListingInput, opts ...request.Option) (*CreateReservedInstancesListingOutput, error) { + req, out := c.CreateReservedInstancesListingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateRoute = "CreateRoute" + +// CreateRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateRoute for more information on using the CreateRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateRouteRequest method. +// req, resp := client.CreateRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRoute +func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, output *CreateRouteOutput) { + op := &request.Operation{ + Name: opCreateRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteInput{} + } + + output = &CreateRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRoute API operation for Amazon Elastic Compute Cloud. +// +// Creates a route in a route table within a VPC. +// +// You must specify one of the following targets: internet gateway or virtual +// private gateway, NAT instance, NAT gateway, VPC peering connection, network +// interface, egress-only internet gateway, or transit gateway. +// +// When determining how to route traffic, we use the route with the most specific +// match. For example, traffic is destined for the IPv4 address 192.0.2.3, and +// the route table includes the following two IPv4 routes: +// +// * 192.0.2.0/24 (goes to some target A) +// +// * 192.0.2.0/28 (goes to some target B) +// +// Both routes apply to the traffic destined for 192.0.2.3. However, the second +// route in the list covers a smaller number of IP addresses and is therefore +// more specific, so we use that route to determine where to target the traffic. +// +// For more information about route tables, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRoute +func (c *EC2) CreateRoute(input *CreateRouteInput) (*CreateRouteOutput, error) { + req, out := c.CreateRouteRequest(input) + return out, req.Send() +} + +// CreateRouteWithContext is the same as CreateRoute with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateRouteWithContext(ctx aws.Context, input *CreateRouteInput, opts ...request.Option) (*CreateRouteOutput, error) { + req, out := c.CreateRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateRouteTable = "CreateRouteTable" + +// CreateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateRouteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateRouteTable for more information on using the CreateRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateRouteTableRequest method. +// req, resp := client.CreateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRouteTable +func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *request.Request, output *CreateRouteTableOutput) { + op := &request.Operation{ + Name: opCreateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRouteTableInput{} + } + + output = &CreateRouteTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRouteTable API operation for Amazon Elastic Compute Cloud. +// +// Creates a route table for the specified VPC. After you create a route table, +// you can add routes and associate the table with a subnet. +// +// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateRouteTable for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRouteTable +func (c *EC2) CreateRouteTable(input *CreateRouteTableInput) (*CreateRouteTableOutput, error) { + req, out := c.CreateRouteTableRequest(input) + return out, req.Send() +} + +// CreateRouteTableWithContext is the same as CreateRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateRouteTableWithContext(ctx aws.Context, input *CreateRouteTableInput, opts ...request.Option) (*CreateRouteTableOutput, error) { + req, out := c.CreateRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSecurityGroup = "CreateSecurityGroup" + +// CreateSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateSecurityGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSecurityGroup for more information on using the CreateSecurityGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSecurityGroupRequest method. +// req, resp := client.CreateSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSecurityGroup +func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req *request.Request, output *CreateSecurityGroupOutput) { + op := &request.Operation{ + Name: opCreateSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSecurityGroupInput{} + } + + output = &CreateSecurityGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSecurityGroup API operation for Amazon Elastic Compute Cloud. +// +// Creates a security group. +// +// A security group acts as a virtual firewall for your instance to control +// inbound and outbound traffic. For more information, see Amazon EC2 Security +// Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// When you create a security group, you specify a friendly name of your choice. +// You can have a security group for use in EC2-Classic with the same name as +// a security group for use in a VPC. However, you can't have two security groups +// for use in EC2-Classic with the same name or two security groups for use +// in a VPC with the same name. +// +// You have a default security group for use in EC2-Classic and a default security +// group for use in your VPC. If you don't specify a security group when you +// launch an instance, the instance is launched into the appropriate default +// security group. A default security group includes a default rule that grants +// instances unrestricted network access to each other. +// +// You can add or remove rules from your security groups using AuthorizeSecurityGroupIngress, +// AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress. +// +// For more information about VPC security group limits, see Amazon VPC Limits +// (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateSecurityGroup for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSecurityGroup +func (c *EC2) CreateSecurityGroup(input *CreateSecurityGroupInput) (*CreateSecurityGroupOutput, error) { + req, out := c.CreateSecurityGroupRequest(input) + return out, req.Send() +} + +// CreateSecurityGroupWithContext is the same as CreateSecurityGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSecurityGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSecurityGroupWithContext(ctx aws.Context, input *CreateSecurityGroupInput, opts ...request.Option) (*CreateSecurityGroupOutput, error) { + req, out := c.CreateSecurityGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSnapshot = "CreateSnapshot" + +// CreateSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshot operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSnapshot for more information on using the CreateSnapshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSnapshotRequest method. +// req, resp := client.CreateSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshot +func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *Snapshot) { + op := &request.Operation{ + Name: opCreateSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotInput{} + } + + output = &Snapshot{} + req = c.newRequest(op, input, output) + return +} + +// CreateSnapshot API operation for Amazon Elastic Compute Cloud. +// +// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use +// snapshots for backups, to make copies of EBS volumes, and to save data before +// shutting down an instance. +// +// When a snapshot is created, any AWS Marketplace product codes that are associated +// with the source volume are propagated to the snapshot. +// +// You can take a snapshot of an attached volume that is in use. However, snapshots +// only capture data that has been written to your EBS volume at the time the +// snapshot command is issued; this might exclude any data that has been cached +// by any applications or the operating system. If you can pause any file systems +// on the volume long enough to take a snapshot, your snapshot should be complete. +// However, if you cannot pause all file writes to the volume, you should unmount +// the volume from within the instance, issue the snapshot command, and then +// remount the volume to ensure a consistent and complete snapshot. You may +// remount and use your volume while the snapshot status is pending. +// +// To create a snapshot for EBS volumes that serve as root devices, you should +// stop the instance before taking the snapshot. +// +// Snapshots that are taken from encrypted volumes are automatically encrypted. +// Volumes that are created from encrypted snapshots are also automatically +// encrypted. Your encrypted volumes and any associated snapshots always remain +// protected. +// +// You can tag your snapshots during creation. For more information, see Tagging +// your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Amazon Elastic Block Store (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) +// and Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateSnapshot for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshot +func (c *EC2) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) { + req, out := c.CreateSnapshotRequest(input) + return out, req.Send() +} + +// CreateSnapshotWithContext is the same as CreateSnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSnapshotWithContext(ctx aws.Context, input *CreateSnapshotInput, opts ...request.Option) (*Snapshot, error) { + req, out := c.CreateSnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSnapshots = "CreateSnapshots" + +// CreateSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshots operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSnapshots for more information on using the CreateSnapshots +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSnapshotsRequest method. +// req, resp := client.CreateSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshots +func (c *EC2) CreateSnapshotsRequest(input *CreateSnapshotsInput) (req *request.Request, output *CreateSnapshotsOutput) { + op := &request.Operation{ + Name: opCreateSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotsInput{} + } + + output = &CreateSnapshotsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSnapshots API operation for Amazon Elastic Compute Cloud. +// +// Creates crash-consistent snapshots of multiple EBS volumes and stores the +// data in S3. Volumes are chosen by specifying an instance. Any attached volumes +// will produce one snapshot each that is crash-consistent across the instance. +// Boot volumes can be excluded by changing the parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateSnapshots for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshots +func (c *EC2) CreateSnapshots(input *CreateSnapshotsInput) (*CreateSnapshotsOutput, error) { + req, out := c.CreateSnapshotsRequest(input) + return out, req.Send() +} + +// CreateSnapshotsWithContext is the same as CreateSnapshots with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSnapshots for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSnapshotsWithContext(ctx aws.Context, input *CreateSnapshotsInput, opts ...request.Option) (*CreateSnapshotsOutput, error) { + req, out := c.CreateSnapshotsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" + +// CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSpotDatafeedSubscription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSpotDatafeedSubscription for more information on using the CreateSpotDatafeedSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSpotDatafeedSubscriptionRequest method. +// req, resp := client.CreateSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSpotDatafeedSubscription +func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSubscriptionInput) (req *request.Request, output *CreateSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSpotDatafeedSubscriptionInput{} + } + + output = &CreateSpotDatafeedSubscriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud. +// +// Creates a data feed for Spot Instances, enabling you to view Spot Instance +// usage logs. You can create one data feed per AWS account. For more information, +// see Spot Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon EC2 User Guide for Linux Instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateSpotDatafeedSubscription for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSpotDatafeedSubscription +func (c *EC2) CreateSpotDatafeedSubscription(input *CreateSpotDatafeedSubscriptionInput) (*CreateSpotDatafeedSubscriptionOutput, error) { + req, out := c.CreateSpotDatafeedSubscriptionRequest(input) + return out, req.Send() +} + +// CreateSpotDatafeedSubscriptionWithContext is the same as CreateSpotDatafeedSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSpotDatafeedSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSpotDatafeedSubscriptionWithContext(ctx aws.Context, input *CreateSpotDatafeedSubscriptionInput, opts ...request.Option) (*CreateSpotDatafeedSubscriptionOutput, error) { + req, out := c.CreateSpotDatafeedSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSubnet = "CreateSubnet" + +// CreateSubnetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSubnet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSubnet for more information on using the CreateSubnet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSubnetRequest method. +// req, resp := client.CreateSubnetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSubnet +func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Request, output *CreateSubnetOutput) { + op := &request.Operation{ + Name: opCreateSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSubnetInput{} + } + + output = &CreateSubnetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSubnet API operation for Amazon Elastic Compute Cloud. +// +// Creates a subnet in a specified VPC. +// +// You must specify an IPv4 CIDR block for the subnet. After you create a subnet, +// you can't change its CIDR block. The allowed block size is between a /16 +// netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR +// block must not overlap with the CIDR block of an existing subnet in the VPC. +// +// If you've associated an IPv6 CIDR block with your VPC, you can create a subnet +// with an IPv6 CIDR block that uses a /64 prefix length. +// +// AWS reserves both the first four and the last IPv4 address in each subnet's +// CIDR block. They're not available for use. +// +// If you add more than one subnet to a VPC, they're set up in a star topology +// with a logical router in the middle. +// +// When you stop an instance in a subnet, it retains its private IPv4 address. +// It's therefore possible to have a subnet with no running instances (they're +// all stopped), but no remaining IP addresses available. +// +// For more information about subnets, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateSubnet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSubnet +func (c *EC2) CreateSubnet(input *CreateSubnetInput) (*CreateSubnetOutput, error) { + req, out := c.CreateSubnetRequest(input) + return out, req.Send() +} + +// CreateSubnetWithContext is the same as CreateSubnet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSubnet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSubnetWithContext(ctx aws.Context, input *CreateSubnetInput, opts ...request.Option) (*CreateSubnetOutput, error) { + req, out := c.CreateSubnetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTags for more information on using the CreateTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTags +func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + output = &CreateTagsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateTags API operation for Amazon Elastic Compute Cloud. +// +// Adds or overwrites only the specified tags for the specified Amazon EC2 resource +// or resources. When you specify an existing tag key, the value is overwritten +// with the new value. Each resource can have a maximum of 50 tags. Each tag +// consists of a key and optional value. Tag keys must be unique per resource. +// +// For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. For more information about +// creating IAM policies that control users' access to resources based on tags, +// see Supported Resource-Level Permissions for Amazon EC2 API Actions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-iam-actions-resources.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTags for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTags +func (c *EC2) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + return out, req.Send() +} + +// CreateTagsWithContext is the same as CreateTags with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTagsWithContext(ctx aws.Context, input *CreateTagsInput, opts ...request.Option) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTrafficMirrorFilter = "CreateTrafficMirrorFilter" + +// CreateTrafficMirrorFilterRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficMirrorFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrafficMirrorFilter for more information on using the CreateTrafficMirrorFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrafficMirrorFilterRequest method. +// req, resp := client.CreateTrafficMirrorFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorFilter +func (c *EC2) CreateTrafficMirrorFilterRequest(input *CreateTrafficMirrorFilterInput) (req *request.Request, output *CreateTrafficMirrorFilterOutput) { + op := &request.Operation{ + Name: opCreateTrafficMirrorFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrafficMirrorFilterInput{} + } + + output = &CreateTrafficMirrorFilterOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrafficMirrorFilter API operation for Amazon Elastic Compute Cloud. +// +// Creates a Traffic Mirror filter. +// +// A Traffic Mirror filter is a set of rules that defines the traffic to mirror. +// +// By default, no traffic is mirrored. To mirror traffic, use CreateTrafficMirrorFilterRule +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTrafficMirrorFilterRule.htm) +// to add Traffic Mirror rules to the filter. The rules you add define what +// traffic gets mirrored. You can also use ModifyTrafficMirrorFilterNetworkServices +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyTrafficMirrorFilterNetworkServices.html) +// to mirror supported network services. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTrafficMirrorFilter for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorFilter +func (c *EC2) CreateTrafficMirrorFilter(input *CreateTrafficMirrorFilterInput) (*CreateTrafficMirrorFilterOutput, error) { + req, out := c.CreateTrafficMirrorFilterRequest(input) + return out, req.Send() +} + +// CreateTrafficMirrorFilterWithContext is the same as CreateTrafficMirrorFilter with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrafficMirrorFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTrafficMirrorFilterWithContext(ctx aws.Context, input *CreateTrafficMirrorFilterInput, opts ...request.Option) (*CreateTrafficMirrorFilterOutput, error) { + req, out := c.CreateTrafficMirrorFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTrafficMirrorFilterRule = "CreateTrafficMirrorFilterRule" + +// CreateTrafficMirrorFilterRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficMirrorFilterRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrafficMirrorFilterRule for more information on using the CreateTrafficMirrorFilterRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrafficMirrorFilterRuleRequest method. +// req, resp := client.CreateTrafficMirrorFilterRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorFilterRule +func (c *EC2) CreateTrafficMirrorFilterRuleRequest(input *CreateTrafficMirrorFilterRuleInput) (req *request.Request, output *CreateTrafficMirrorFilterRuleOutput) { + op := &request.Operation{ + Name: opCreateTrafficMirrorFilterRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrafficMirrorFilterRuleInput{} + } + + output = &CreateTrafficMirrorFilterRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrafficMirrorFilterRule API operation for Amazon Elastic Compute Cloud. +// +// Creates a Traffic Mirror filter rule. +// +// A Traffic Mirror rule defines the Traffic Mirror source traffic to mirror. +// +// You need the Traffic Mirror filter ID when you create the rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTrafficMirrorFilterRule for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorFilterRule +func (c *EC2) CreateTrafficMirrorFilterRule(input *CreateTrafficMirrorFilterRuleInput) (*CreateTrafficMirrorFilterRuleOutput, error) { + req, out := c.CreateTrafficMirrorFilterRuleRequest(input) + return out, req.Send() +} + +// CreateTrafficMirrorFilterRuleWithContext is the same as CreateTrafficMirrorFilterRule with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrafficMirrorFilterRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTrafficMirrorFilterRuleWithContext(ctx aws.Context, input *CreateTrafficMirrorFilterRuleInput, opts ...request.Option) (*CreateTrafficMirrorFilterRuleOutput, error) { + req, out := c.CreateTrafficMirrorFilterRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTrafficMirrorSession = "CreateTrafficMirrorSession" + +// CreateTrafficMirrorSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficMirrorSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrafficMirrorSession for more information on using the CreateTrafficMirrorSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrafficMirrorSessionRequest method. +// req, resp := client.CreateTrafficMirrorSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorSession +func (c *EC2) CreateTrafficMirrorSessionRequest(input *CreateTrafficMirrorSessionInput) (req *request.Request, output *CreateTrafficMirrorSessionOutput) { + op := &request.Operation{ + Name: opCreateTrafficMirrorSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrafficMirrorSessionInput{} + } + + output = &CreateTrafficMirrorSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrafficMirrorSession API operation for Amazon Elastic Compute Cloud. +// +// Creates a Traffic Mirror session. +// +// A Traffic Mirror session actively copies packets from a Traffic Mirror source +// to a Traffic Mirror target. Create a filter, and then assign it to the session +// to define a subset of the traffic to mirror, for example all TCP traffic. +// +// The Traffic Mirror source and the Traffic Mirror target (monitoring appliances) +// can be in the same VPC, or in a different VPC connected via VPC peering or +// a transit gateway. +// +// By default, no traffic is mirrored. Use CreateTrafficMirrorFilter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTrafficMirrorFilter.htm) +// to create filter rules that specify the traffic to mirror. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTrafficMirrorSession for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorSession +func (c *EC2) CreateTrafficMirrorSession(input *CreateTrafficMirrorSessionInput) (*CreateTrafficMirrorSessionOutput, error) { + req, out := c.CreateTrafficMirrorSessionRequest(input) + return out, req.Send() +} + +// CreateTrafficMirrorSessionWithContext is the same as CreateTrafficMirrorSession with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrafficMirrorSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTrafficMirrorSessionWithContext(ctx aws.Context, input *CreateTrafficMirrorSessionInput, opts ...request.Option) (*CreateTrafficMirrorSessionOutput, error) { + req, out := c.CreateTrafficMirrorSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTrafficMirrorTarget = "CreateTrafficMirrorTarget" + +// CreateTrafficMirrorTargetRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficMirrorTarget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTrafficMirrorTarget for more information on using the CreateTrafficMirrorTarget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTrafficMirrorTargetRequest method. +// req, resp := client.CreateTrafficMirrorTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorTarget +func (c *EC2) CreateTrafficMirrorTargetRequest(input *CreateTrafficMirrorTargetInput) (req *request.Request, output *CreateTrafficMirrorTargetOutput) { + op := &request.Operation{ + Name: opCreateTrafficMirrorTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTrafficMirrorTargetInput{} + } + + output = &CreateTrafficMirrorTargetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTrafficMirrorTarget API operation for Amazon Elastic Compute Cloud. +// +// Creates a target for your Traffic Mirror session. +// +// A Traffic Mirror target is the destination for mirrored traffic. The Traffic +// Mirror source and the Traffic Mirror target (monitoring appliances) can be +// in the same VPC, or in different VPCs connected via VPC peering or a transit +// gateway. +// +// A Traffic Mirror target can be a network interface, or a Network Load Balancer. +// +// To use the target in a Traffic Mirror session, use CreateTrafficMirrorSession +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTrafficMirrorSession.htm). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTrafficMirrorTarget for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTrafficMirrorTarget +func (c *EC2) CreateTrafficMirrorTarget(input *CreateTrafficMirrorTargetInput) (*CreateTrafficMirrorTargetOutput, error) { + req, out := c.CreateTrafficMirrorTargetRequest(input) + return out, req.Send() +} + +// CreateTrafficMirrorTargetWithContext is the same as CreateTrafficMirrorTarget with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTrafficMirrorTarget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTrafficMirrorTargetWithContext(ctx aws.Context, input *CreateTrafficMirrorTargetInput, opts ...request.Option) (*CreateTrafficMirrorTargetOutput, error) { + req, out := c.CreateTrafficMirrorTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTransitGateway = "CreateTransitGateway" + +// CreateTransitGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGateway for more information on using the CreateTransitGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayRequest method. +// req, resp := client.CreateTransitGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGateway +func (c *EC2) CreateTransitGatewayRequest(input *CreateTransitGatewayInput) (req *request.Request, output *CreateTransitGatewayOutput) { + op := &request.Operation{ + Name: opCreateTransitGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayInput{} + } + + output = &CreateTransitGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGateway API operation for Amazon Elastic Compute Cloud. +// +// Creates a transit gateway. +// +// You can use a transit gateway to interconnect your virtual private clouds +// (VPC) and on-premises networks. After the transit gateway enters the available +// state, you can attach your VPCs and VPN connections to the transit gateway. +// +// To attach your VPCs, use CreateTransitGatewayVpcAttachment. +// +// To attach a VPN connection, use CreateCustomerGateway to create a customer +// gateway and specify the ID of the customer gateway and the ID of the transit +// gateway in a call to CreateVpnConnection. +// +// When you create a transit gateway, we create a default transit gateway route +// table and use it as the default association route table and the default propagation +// route table. You can use CreateTransitGatewayRouteTable to create additional +// transit gateway route tables. If you disable automatic route propagation, +// we do not create a default transit gateway route table. You can use EnableTransitGatewayRouteTablePropagation +// to propagate routes from a resource attachment to a transit gateway route +// table. If you disable automatic associations, you can use AssociateTransitGatewayRouteTable +// to associate a resource attachment with a transit gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGateway +func (c *EC2) CreateTransitGateway(input *CreateTransitGatewayInput) (*CreateTransitGatewayOutput, error) { + req, out := c.CreateTransitGatewayRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayWithContext is the same as CreateTransitGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayWithContext(ctx aws.Context, input *CreateTransitGatewayInput, opts ...request.Option) (*CreateTransitGatewayOutput, error) { + req, out := c.CreateTransitGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTransitGatewayMulticastDomain = "CreateTransitGatewayMulticastDomain" + +// CreateTransitGatewayMulticastDomainRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGatewayMulticastDomain operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGatewayMulticastDomain for more information on using the CreateTransitGatewayMulticastDomain +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayMulticastDomainRequest method. +// req, resp := client.CreateTransitGatewayMulticastDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayMulticastDomain +func (c *EC2) CreateTransitGatewayMulticastDomainRequest(input *CreateTransitGatewayMulticastDomainInput) (req *request.Request, output *CreateTransitGatewayMulticastDomainOutput) { + op := &request.Operation{ + Name: opCreateTransitGatewayMulticastDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayMulticastDomainInput{} + } + + output = &CreateTransitGatewayMulticastDomainOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGatewayMulticastDomain API operation for Amazon Elastic Compute Cloud. +// +// Creates a multicast domain using the specified transit gateway. +// +// The transit gateway must be in the available state before you create a domain. +// Use DescribeTransitGateways (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) +// to see the state of transit gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGatewayMulticastDomain for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayMulticastDomain +func (c *EC2) CreateTransitGatewayMulticastDomain(input *CreateTransitGatewayMulticastDomainInput) (*CreateTransitGatewayMulticastDomainOutput, error) { + req, out := c.CreateTransitGatewayMulticastDomainRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayMulticastDomainWithContext is the same as CreateTransitGatewayMulticastDomain with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGatewayMulticastDomain for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayMulticastDomainWithContext(ctx aws.Context, input *CreateTransitGatewayMulticastDomainInput, opts ...request.Option) (*CreateTransitGatewayMulticastDomainOutput, error) { + req, out := c.CreateTransitGatewayMulticastDomainRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTransitGatewayPeeringAttachment = "CreateTransitGatewayPeeringAttachment" + +// CreateTransitGatewayPeeringAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGatewayPeeringAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGatewayPeeringAttachment for more information on using the CreateTransitGatewayPeeringAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayPeeringAttachmentRequest method. +// req, resp := client.CreateTransitGatewayPeeringAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayPeeringAttachment +func (c *EC2) CreateTransitGatewayPeeringAttachmentRequest(input *CreateTransitGatewayPeeringAttachmentInput) (req *request.Request, output *CreateTransitGatewayPeeringAttachmentOutput) { + op := &request.Operation{ + Name: opCreateTransitGatewayPeeringAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayPeeringAttachmentInput{} + } + + output = &CreateTransitGatewayPeeringAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGatewayPeeringAttachment API operation for Amazon Elastic Compute Cloud. +// +// Requests a transit gateway peering attachment between the specified transit +// gateway (requester) and a peer transit gateway (accepter). The transit gateways +// must be in different Regions. The peer transit gateway can be in your account +// or a different AWS account. +// +// After you create the peering attachment, the owner of the accepter transit +// gateway must accept the attachment request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGatewayPeeringAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayPeeringAttachment +func (c *EC2) CreateTransitGatewayPeeringAttachment(input *CreateTransitGatewayPeeringAttachmentInput) (*CreateTransitGatewayPeeringAttachmentOutput, error) { + req, out := c.CreateTransitGatewayPeeringAttachmentRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayPeeringAttachmentWithContext is the same as CreateTransitGatewayPeeringAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGatewayPeeringAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayPeeringAttachmentWithContext(ctx aws.Context, input *CreateTransitGatewayPeeringAttachmentInput, opts ...request.Option) (*CreateTransitGatewayPeeringAttachmentOutput, error) { + req, out := c.CreateTransitGatewayPeeringAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTransitGatewayPrefixListReference = "CreateTransitGatewayPrefixListReference" + +// CreateTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGatewayPrefixListReference for more information on using the CreateTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.CreateTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayPrefixListReference +func (c *EC2) CreateTransitGatewayPrefixListReferenceRequest(input *CreateTransitGatewayPrefixListReferenceInput) (req *request.Request, output *CreateTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opCreateTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayPrefixListReferenceInput{} + } + + output = &CreateTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Creates a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayPrefixListReference +func (c *EC2) CreateTransitGatewayPrefixListReference(input *CreateTransitGatewayPrefixListReferenceInput) (*CreateTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.CreateTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayPrefixListReferenceWithContext is the same as CreateTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *CreateTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*CreateTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.CreateTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTransitGatewayRoute = "CreateTransitGatewayRoute" + +// CreateTransitGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGatewayRoute for more information on using the CreateTransitGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayRouteRequest method. +// req, resp := client.CreateTransitGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayRoute +func (c *EC2) CreateTransitGatewayRouteRequest(input *CreateTransitGatewayRouteInput) (req *request.Request, output *CreateTransitGatewayRouteOutput) { + op := &request.Operation{ + Name: opCreateTransitGatewayRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayRouteInput{} + } + + output = &CreateTransitGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGatewayRoute API operation for Amazon Elastic Compute Cloud. +// +// Creates a static route for the specified transit gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGatewayRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayRoute +func (c *EC2) CreateTransitGatewayRoute(input *CreateTransitGatewayRouteInput) (*CreateTransitGatewayRouteOutput, error) { + req, out := c.CreateTransitGatewayRouteRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayRouteWithContext is the same as CreateTransitGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayRouteWithContext(ctx aws.Context, input *CreateTransitGatewayRouteInput, opts ...request.Option) (*CreateTransitGatewayRouteOutput, error) { + req, out := c.CreateTransitGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTransitGatewayRouteTable = "CreateTransitGatewayRouteTable" + +// CreateTransitGatewayRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGatewayRouteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGatewayRouteTable for more information on using the CreateTransitGatewayRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayRouteTableRequest method. +// req, resp := client.CreateTransitGatewayRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayRouteTable +func (c *EC2) CreateTransitGatewayRouteTableRequest(input *CreateTransitGatewayRouteTableInput) (req *request.Request, output *CreateTransitGatewayRouteTableOutput) { + op := &request.Operation{ + Name: opCreateTransitGatewayRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayRouteTableInput{} + } + + output = &CreateTransitGatewayRouteTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGatewayRouteTable API operation for Amazon Elastic Compute Cloud. +// +// Creates a route table for the specified transit gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGatewayRouteTable for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayRouteTable +func (c *EC2) CreateTransitGatewayRouteTable(input *CreateTransitGatewayRouteTableInput) (*CreateTransitGatewayRouteTableOutput, error) { + req, out := c.CreateTransitGatewayRouteTableRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayRouteTableWithContext is the same as CreateTransitGatewayRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGatewayRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayRouteTableWithContext(ctx aws.Context, input *CreateTransitGatewayRouteTableInput, opts ...request.Option) (*CreateTransitGatewayRouteTableOutput, error) { + req, out := c.CreateTransitGatewayRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTransitGatewayVpcAttachment = "CreateTransitGatewayVpcAttachment" + +// CreateTransitGatewayVpcAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGatewayVpcAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGatewayVpcAttachment for more information on using the CreateTransitGatewayVpcAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayVpcAttachmentRequest method. +// req, resp := client.CreateTransitGatewayVpcAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayVpcAttachment +func (c *EC2) CreateTransitGatewayVpcAttachmentRequest(input *CreateTransitGatewayVpcAttachmentInput) (req *request.Request, output *CreateTransitGatewayVpcAttachmentOutput) { + op := &request.Operation{ + Name: opCreateTransitGatewayVpcAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayVpcAttachmentInput{} + } + + output = &CreateTransitGatewayVpcAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGatewayVpcAttachment API operation for Amazon Elastic Compute Cloud. +// +// Attaches the specified VPC to the specified transit gateway. +// +// If you attach a VPC with a CIDR range that overlaps the CIDR range of a VPC +// that is already attached, the new VPC CIDR range is not propagated to the +// default propagation route table. +// +// To send VPC traffic to an attached transit gateway, add a route to the VPC +// route table using CreateRoute. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGatewayVpcAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayVpcAttachment +func (c *EC2) CreateTransitGatewayVpcAttachment(input *CreateTransitGatewayVpcAttachmentInput) (*CreateTransitGatewayVpcAttachmentOutput, error) { + req, out := c.CreateTransitGatewayVpcAttachmentRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayVpcAttachmentWithContext is the same as CreateTransitGatewayVpcAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGatewayVpcAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayVpcAttachmentWithContext(ctx aws.Context, input *CreateTransitGatewayVpcAttachmentInput, opts ...request.Option) (*CreateTransitGatewayVpcAttachmentOutput, error) { + req, out := c.CreateTransitGatewayVpcAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVolume = "CreateVolume" + +// CreateVolumeRequest generates a "aws/request.Request" representing the +// client's request for the CreateVolume operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVolume for more information on using the CreateVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVolumeRequest method. +// req, resp := client.CreateVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVolume +func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Request, output *Volume) { + op := &request.Operation{ + Name: opCreateVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVolumeInput{} + } + + output = &Volume{} + req = c.newRequest(op, input, output) + return +} + +// CreateVolume API operation for Amazon Elastic Compute Cloud. +// +// Creates an EBS volume that can be attached to an instance in the same Availability +// Zone. +// +// You can create a new empty volume or restore a volume from an EBS snapshot. +// Any AWS Marketplace product codes from the snapshot are propagated to the +// volume. +// +// You can create encrypted volumes. Encrypted volumes must be attached to instances +// that support Amazon EBS encryption. Volumes that are created from encrypted +// snapshots are also automatically encrypted. For more information, see Amazon +// EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can tag your volumes during creation. For more information, see Tagging +// your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information, see Creating an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVolume for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVolume +func (c *EC2) CreateVolume(input *CreateVolumeInput) (*Volume, error) { + req, out := c.CreateVolumeRequest(input) + return out, req.Send() +} + +// CreateVolumeWithContext is the same as CreateVolume with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVolumeWithContext(ctx aws.Context, input *CreateVolumeInput, opts ...request.Option) (*Volume, error) { + req, out := c.CreateVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpc = "CreateVpc" + +// CreateVpcRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpc operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpc for more information on using the CreateVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpcRequest method. +// req, resp := client.CreateVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpc +func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, output *CreateVpcOutput) { + op := &request.Operation{ + Name: opCreateVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcInput{} + } + + output = &CreateVpcOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpc API operation for Amazon Elastic Compute Cloud. +// +// Creates a VPC with the specified IPv4 CIDR block. The smallest VPC you can +// create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 +// netmask (65,536 IPv4 addresses). For more information about how large to +// make your VPC, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can optionally request an IPv6 CIDR block for the VPC. You can request +// an Amazon-provided IPv6 CIDR block from Amazon's pool of IPv6 addresses, +// or an IPv6 CIDR block from an IPv6 address pool that you provisioned through +// bring your own IP addresses (BYOIP (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)). +// +// By default, each instance you launch in the VPC has the default DHCP options, +// which include only a default DNS server that we provide (AmazonProvidedDNS). +// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can specify the instance tenancy value for the VPC when you create it. +// You can't change this value for the VPC after you create it. For more information, +// see Dedicated Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVpc for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpc +func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) { + req, out := c.CreateVpcRequest(input) + return out, req.Send() +} + +// CreateVpcWithContext is the same as CreateVpc with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpcWithContext(ctx aws.Context, input *CreateVpcInput, opts ...request.Option) (*CreateVpcOutput, error) { + req, out := c.CreateVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpcEndpoint = "CreateVpcEndpoint" + +// CreateVpcEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpcEndpoint for more information on using the CreateVpcEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpcEndpointRequest method. +// req, resp := client.CreateVpcEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpoint +func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *request.Request, output *CreateVpcEndpointOutput) { + op := &request.Operation{ + Name: opCreateVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcEndpointInput{} + } + + output = &CreateVpcEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpcEndpoint API operation for Amazon Elastic Compute Cloud. +// +// Creates a VPC endpoint for a specified service. An endpoint enables you to +// create a private connection between your VPC and the service. The service +// may be provided by AWS, an AWS Marketplace Partner, or another AWS account. +// For more information, see VPC Endpoints (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// A gateway endpoint serves as a target for a route in your route table for +// traffic destined for the AWS service. You can specify an endpoint policy +// to attach to the endpoint, which will control access to the service from +// your VPC. You can also specify the VPC route tables that use the endpoint. +// +// An interface endpoint is a network interface in your subnet that serves as +// an endpoint for communicating with the specified service. You can specify +// the subnets in which to create an endpoint, and the security groups to associate +// with the endpoint network interface. +// +// A GatewayLoadBalancer endpoint is a network interface in your subnet that +// serves an endpoint for communicating with a Gateway Load Balancer that you've +// configured as a VPC endpoint service. +// +// Use DescribeVpcEndpointServices to get a list of supported services. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVpcEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpoint +func (c *EC2) CreateVpcEndpoint(input *CreateVpcEndpointInput) (*CreateVpcEndpointOutput, error) { + req, out := c.CreateVpcEndpointRequest(input) + return out, req.Send() +} + +// CreateVpcEndpointWithContext is the same as CreateVpcEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpcEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpcEndpointWithContext(ctx aws.Context, input *CreateVpcEndpointInput, opts ...request.Option) (*CreateVpcEndpointOutput, error) { + req, out := c.CreateVpcEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpcEndpointConnectionNotification = "CreateVpcEndpointConnectionNotification" + +// CreateVpcEndpointConnectionNotificationRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcEndpointConnectionNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpcEndpointConnectionNotification for more information on using the CreateVpcEndpointConnectionNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpcEndpointConnectionNotificationRequest method. +// req, resp := client.CreateVpcEndpointConnectionNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointConnectionNotification +func (c *EC2) CreateVpcEndpointConnectionNotificationRequest(input *CreateVpcEndpointConnectionNotificationInput) (req *request.Request, output *CreateVpcEndpointConnectionNotificationOutput) { + op := &request.Operation{ + Name: opCreateVpcEndpointConnectionNotification, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcEndpointConnectionNotificationInput{} + } + + output = &CreateVpcEndpointConnectionNotificationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpcEndpointConnectionNotification API operation for Amazon Elastic Compute Cloud. +// +// Creates a connection notification for a specified VPC endpoint or VPC endpoint +// service. A connection notification notifies you of specific endpoint events. +// You must create an SNS topic to receive notifications. For more information, +// see Create a Topic (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html) +// in the Amazon Simple Notification Service Developer Guide. +// +// You can create a connection notification for interface endpoints only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVpcEndpointConnectionNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointConnectionNotification +func (c *EC2) CreateVpcEndpointConnectionNotification(input *CreateVpcEndpointConnectionNotificationInput) (*CreateVpcEndpointConnectionNotificationOutput, error) { + req, out := c.CreateVpcEndpointConnectionNotificationRequest(input) + return out, req.Send() +} + +// CreateVpcEndpointConnectionNotificationWithContext is the same as CreateVpcEndpointConnectionNotification with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpcEndpointConnectionNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpcEndpointConnectionNotificationWithContext(ctx aws.Context, input *CreateVpcEndpointConnectionNotificationInput, opts ...request.Option) (*CreateVpcEndpointConnectionNotificationOutput, error) { + req, out := c.CreateVpcEndpointConnectionNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpcEndpointServiceConfiguration = "CreateVpcEndpointServiceConfiguration" + +// CreateVpcEndpointServiceConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcEndpointServiceConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpcEndpointServiceConfiguration for more information on using the CreateVpcEndpointServiceConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpcEndpointServiceConfigurationRequest method. +// req, resp := client.CreateVpcEndpointServiceConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointServiceConfiguration +func (c *EC2) CreateVpcEndpointServiceConfigurationRequest(input *CreateVpcEndpointServiceConfigurationInput) (req *request.Request, output *CreateVpcEndpointServiceConfigurationOutput) { + op := &request.Operation{ + Name: opCreateVpcEndpointServiceConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcEndpointServiceConfigurationInput{} + } + + output = &CreateVpcEndpointServiceConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpcEndpointServiceConfiguration API operation for Amazon Elastic Compute Cloud. +// +// Creates a VPC endpoint service configuration to which service consumers (AWS +// accounts, IAM users, and IAM roles) can connect. +// +// To create an endpoint service configuration, you must first create one of +// the following for your service: +// +// * A Network Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html). +// Service consumers connect to your service using an interface endpoint. +// +// * A Gateway Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/gateway/introduction.html). +// Service consumers connect to your service using a Gateway Load Balancer +// endpoint. +// +// For more information, see VPC Endpoint Services (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// If you set the private DNS name, you must prove that you own the private +// DNS domain name. For more information, see VPC Endpoint Service Private DNS +// Name Verification (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-services-dns-validation.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVpcEndpointServiceConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpointServiceConfiguration +func (c *EC2) CreateVpcEndpointServiceConfiguration(input *CreateVpcEndpointServiceConfigurationInput) (*CreateVpcEndpointServiceConfigurationOutput, error) { + req, out := c.CreateVpcEndpointServiceConfigurationRequest(input) + return out, req.Send() +} + +// CreateVpcEndpointServiceConfigurationWithContext is the same as CreateVpcEndpointServiceConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpcEndpointServiceConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpcEndpointServiceConfigurationWithContext(ctx aws.Context, input *CreateVpcEndpointServiceConfigurationInput, opts ...request.Option) (*CreateVpcEndpointServiceConfigurationOutput, error) { + req, out := c.CreateVpcEndpointServiceConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" + +// CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcPeeringConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpcPeeringConnection for more information on using the CreateVpcPeeringConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpcPeeringConnectionRequest method. +// req, resp := client.CreateVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcPeeringConnection +func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpcPeeringConnectionInput{} + } + + output = &CreateVpcPeeringConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpcPeeringConnection API operation for Amazon Elastic Compute Cloud. +// +// Requests a VPC peering connection between two VPCs: a requester VPC that +// you own and an accepter VPC with which to create the connection. The accepter +// VPC can belong to another AWS account and can be in a different Region to +// the requester VPC. The requester VPC and accepter VPC cannot have overlapping +// CIDR blocks. +// +// Limitations and rules apply to a VPC peering connection. For more information, +// see the limitations (https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-basics.html#vpc-peering-limitations) +// section in the VPC Peering Guide. +// +// The owner of the accepter VPC must accept the peering request to activate +// the peering connection. The VPC peering connection request expires after +// 7 days, after which it cannot be accepted or rejected. +// +// If you create a VPC peering connection request between VPCs with overlapping +// CIDR blocks, the VPC peering connection has a status of failed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVpcPeeringConnection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcPeeringConnection +func (c *EC2) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + return out, req.Send() +} + +// CreateVpcPeeringConnectionWithContext is the same as CreateVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpcPeeringConnectionWithContext(ctx aws.Context, input *CreateVpcPeeringConnectionInput, opts ...request.Option) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpnConnection = "CreateVpnConnection" + +// CreateVpnConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpnConnection for more information on using the CreateVpnConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpnConnectionRequest method. +// req, resp := client.CreateVpnConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnection +func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req *request.Request, output *CreateVpnConnectionOutput) { + op := &request.Operation{ + Name: opCreateVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionInput{} + } + + output = &CreateVpnConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpnConnection API operation for Amazon Elastic Compute Cloud. +// +// Creates a VPN connection between an existing virtual private gateway or transit +// gateway and a customer gateway. The supported connection type is ipsec.1. +// +// The response includes information that you need to give to your network administrator +// to configure your customer gateway. +// +// We strongly recommend that you use HTTPS when calling this operation because +// the response contains sensitive cryptographic information for configuring +// your customer gateway device. +// +// If you decide to shut down your VPN connection for any reason and later create +// a new VPN connection, you must reconfigure your customer gateway with the +// new information returned from this call. +// +// This is an idempotent operation. If you perform the operation more than once, +// Amazon EC2 doesn't return an error. +// +// For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVpnConnection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnection +func (c *EC2) CreateVpnConnection(input *CreateVpnConnectionInput) (*CreateVpnConnectionOutput, error) { + req, out := c.CreateVpnConnectionRequest(input) + return out, req.Send() +} + +// CreateVpnConnectionWithContext is the same as CreateVpnConnection with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpnConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpnConnectionWithContext(ctx aws.Context, input *CreateVpnConnectionInput, opts ...request.Option) (*CreateVpnConnectionOutput, error) { + req, out := c.CreateVpnConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" + +// CreateVpnConnectionRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnConnectionRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpnConnectionRoute for more information on using the CreateVpnConnectionRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpnConnectionRouteRequest method. +// req, resp := client.CreateVpnConnectionRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnectionRoute +func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInput) (req *request.Request, output *CreateVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opCreateVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnConnectionRouteInput{} + } + + output = &CreateVpnConnectionRouteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateVpnConnectionRoute API operation for Amazon Elastic Compute Cloud. +// +// Creates a static route associated with a VPN connection between an existing +// virtual private gateway and a VPN customer gateway. The static route allows +// traffic to be routed from the virtual private gateway to the VPN customer +// gateway. +// +// For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVpnConnectionRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnectionRoute +func (c *EC2) CreateVpnConnectionRoute(input *CreateVpnConnectionRouteInput) (*CreateVpnConnectionRouteOutput, error) { + req, out := c.CreateVpnConnectionRouteRequest(input) + return out, req.Send() +} + +// CreateVpnConnectionRouteWithContext is the same as CreateVpnConnectionRoute with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpnConnectionRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpnConnectionRouteWithContext(ctx aws.Context, input *CreateVpnConnectionRouteInput, opts ...request.Option) (*CreateVpnConnectionRouteOutput, error) { + req, out := c.CreateVpnConnectionRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVpnGateway = "CreateVpnGateway" + +// CreateVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVpnGateway for more information on using the CreateVpnGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVpnGatewayRequest method. +// req, resp := client.CreateVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnGateway +func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *request.Request, output *CreateVpnGatewayOutput) { + op := &request.Operation{ + Name: opCreateVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVpnGatewayInput{} + } + + output = &CreateVpnGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVpnGateway API operation for Amazon Elastic Compute Cloud. +// +// Creates a virtual private gateway. A virtual private gateway is the endpoint +// on the VPC side of your VPN connection. You can create a virtual private +// gateway before creating the VPC itself. +// +// For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateVpnGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnGateway +func (c *EC2) CreateVpnGateway(input *CreateVpnGatewayInput) (*CreateVpnGatewayOutput, error) { + req, out := c.CreateVpnGatewayRequest(input) + return out, req.Send() +} + +// CreateVpnGatewayWithContext is the same as CreateVpnGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpnGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpnGatewayWithContext(ctx aws.Context, input *CreateVpnGatewayInput, opts ...request.Option) (*CreateVpnGatewayOutput, error) { + req, out := c.CreateVpnGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteCarrierGateway = "DeleteCarrierGateway" + +// DeleteCarrierGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCarrierGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCarrierGateway for more information on using the DeleteCarrierGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCarrierGatewayRequest method. +// req, resp := client.DeleteCarrierGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCarrierGateway +func (c *EC2) DeleteCarrierGatewayRequest(input *DeleteCarrierGatewayInput) (req *request.Request, output *DeleteCarrierGatewayOutput) { + op := &request.Operation{ + Name: opDeleteCarrierGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCarrierGatewayInput{} + } + + output = &DeleteCarrierGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCarrierGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes a carrier gateway. +// +// If you do not delete the route that contains the carrier gateway as the Target, +// the route is a blackhole route. For information about how to delete a route, +// see DeleteRoute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteRoute.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteCarrierGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCarrierGateway +func (c *EC2) DeleteCarrierGateway(input *DeleteCarrierGatewayInput) (*DeleteCarrierGatewayOutput, error) { + req, out := c.DeleteCarrierGatewayRequest(input) + return out, req.Send() +} + +// DeleteCarrierGatewayWithContext is the same as DeleteCarrierGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCarrierGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteCarrierGatewayWithContext(ctx aws.Context, input *DeleteCarrierGatewayInput, opts ...request.Option) (*DeleteCarrierGatewayOutput, error) { + req, out := c.DeleteCarrierGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteClientVpnEndpoint = "DeleteClientVpnEndpoint" + +// DeleteClientVpnEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClientVpnEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteClientVpnEndpoint for more information on using the DeleteClientVpnEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteClientVpnEndpointRequest method. +// req, resp := client.DeleteClientVpnEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteClientVpnEndpoint +func (c *EC2) DeleteClientVpnEndpointRequest(input *DeleteClientVpnEndpointInput) (req *request.Request, output *DeleteClientVpnEndpointOutput) { + op := &request.Operation{ + Name: opDeleteClientVpnEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClientVpnEndpointInput{} + } + + output = &DeleteClientVpnEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteClientVpnEndpoint API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Client VPN endpoint. You must disassociate all target +// networks before you can delete a Client VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteClientVpnEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteClientVpnEndpoint +func (c *EC2) DeleteClientVpnEndpoint(input *DeleteClientVpnEndpointInput) (*DeleteClientVpnEndpointOutput, error) { + req, out := c.DeleteClientVpnEndpointRequest(input) + return out, req.Send() +} + +// DeleteClientVpnEndpointWithContext is the same as DeleteClientVpnEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteClientVpnEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteClientVpnEndpointWithContext(ctx aws.Context, input *DeleteClientVpnEndpointInput, opts ...request.Option) (*DeleteClientVpnEndpointOutput, error) { + req, out := c.DeleteClientVpnEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteClientVpnRoute = "DeleteClientVpnRoute" + +// DeleteClientVpnRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClientVpnRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteClientVpnRoute for more information on using the DeleteClientVpnRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteClientVpnRouteRequest method. +// req, resp := client.DeleteClientVpnRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteClientVpnRoute +func (c *EC2) DeleteClientVpnRouteRequest(input *DeleteClientVpnRouteInput) (req *request.Request, output *DeleteClientVpnRouteOutput) { + op := &request.Operation{ + Name: opDeleteClientVpnRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteClientVpnRouteInput{} + } + + output = &DeleteClientVpnRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteClientVpnRoute API operation for Amazon Elastic Compute Cloud. +// +// Deletes a route from a Client VPN endpoint. You can only delete routes that +// you manually added using the CreateClientVpnRoute action. You cannot delete +// routes that were automatically added when associating a subnet. To remove +// routes that have been automatically added, disassociate the target subnet +// from the Client VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteClientVpnRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteClientVpnRoute +func (c *EC2) DeleteClientVpnRoute(input *DeleteClientVpnRouteInput) (*DeleteClientVpnRouteOutput, error) { + req, out := c.DeleteClientVpnRouteRequest(input) + return out, req.Send() +} + +// DeleteClientVpnRouteWithContext is the same as DeleteClientVpnRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteClientVpnRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteClientVpnRouteWithContext(ctx aws.Context, input *DeleteClientVpnRouteInput, opts ...request.Option) (*DeleteClientVpnRouteOutput, error) { + req, out := c.DeleteClientVpnRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteCustomerGateway = "DeleteCustomerGateway" + +// DeleteCustomerGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCustomerGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCustomerGateway for more information on using the DeleteCustomerGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCustomerGatewayRequest method. +// req, resp := client.DeleteCustomerGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCustomerGateway +func (c *EC2) DeleteCustomerGatewayRequest(input *DeleteCustomerGatewayInput) (req *request.Request, output *DeleteCustomerGatewayOutput) { + op := &request.Operation{ + Name: opDeleteCustomerGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomerGatewayInput{} + } + + output = &DeleteCustomerGatewayOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteCustomerGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified customer gateway. You must delete the VPN connection +// before you can delete the customer gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteCustomerGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCustomerGateway +func (c *EC2) DeleteCustomerGateway(input *DeleteCustomerGatewayInput) (*DeleteCustomerGatewayOutput, error) { + req, out := c.DeleteCustomerGatewayRequest(input) + return out, req.Send() +} + +// DeleteCustomerGatewayWithContext is the same as DeleteCustomerGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCustomerGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteCustomerGatewayWithContext(ctx aws.Context, input *DeleteCustomerGatewayInput, opts ...request.Option) (*DeleteCustomerGatewayOutput, error) { + req, out := c.DeleteCustomerGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDhcpOptions = "DeleteDhcpOptions" + +// DeleteDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDhcpOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDhcpOptions for more information on using the DeleteDhcpOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDhcpOptionsRequest method. +// req, resp := client.DeleteDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteDhcpOptions +func (c *EC2) DeleteDhcpOptionsRequest(input *DeleteDhcpOptionsInput) (req *request.Request, output *DeleteDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDeleteDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDhcpOptionsInput{} + } + + output = &DeleteDhcpOptionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDhcpOptions API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified set of DHCP options. You must disassociate the set +// of DHCP options before you can delete it. You can disassociate the set of +// DHCP options by associating either a new set of options or the default set +// of options with the VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteDhcpOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteDhcpOptions +func (c *EC2) DeleteDhcpOptions(input *DeleteDhcpOptionsInput) (*DeleteDhcpOptionsOutput, error) { + req, out := c.DeleteDhcpOptionsRequest(input) + return out, req.Send() +} + +// DeleteDhcpOptionsWithContext is the same as DeleteDhcpOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDhcpOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteDhcpOptionsWithContext(ctx aws.Context, input *DeleteDhcpOptionsInput, opts ...request.Option) (*DeleteDhcpOptionsOutput, error) { + req, out := c.DeleteDhcpOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteEgressOnlyInternetGateway = "DeleteEgressOnlyInternetGateway" + +// DeleteEgressOnlyInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEgressOnlyInternetGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteEgressOnlyInternetGateway for more information on using the DeleteEgressOnlyInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteEgressOnlyInternetGatewayRequest method. +// req, resp := client.DeleteEgressOnlyInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteEgressOnlyInternetGateway +func (c *EC2) DeleteEgressOnlyInternetGatewayRequest(input *DeleteEgressOnlyInternetGatewayInput) (req *request.Request, output *DeleteEgressOnlyInternetGatewayOutput) { + op := &request.Operation{ + Name: opDeleteEgressOnlyInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteEgressOnlyInternetGatewayInput{} + } + + output = &DeleteEgressOnlyInternetGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteEgressOnlyInternetGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes an egress-only internet gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteEgressOnlyInternetGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteEgressOnlyInternetGateway +func (c *EC2) DeleteEgressOnlyInternetGateway(input *DeleteEgressOnlyInternetGatewayInput) (*DeleteEgressOnlyInternetGatewayOutput, error) { + req, out := c.DeleteEgressOnlyInternetGatewayRequest(input) + return out, req.Send() +} + +// DeleteEgressOnlyInternetGatewayWithContext is the same as DeleteEgressOnlyInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteEgressOnlyInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteEgressOnlyInternetGatewayWithContext(ctx aws.Context, input *DeleteEgressOnlyInternetGatewayInput, opts ...request.Option) (*DeleteEgressOnlyInternetGatewayOutput, error) { + req, out := c.DeleteEgressOnlyInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteFleets = "DeleteFleets" + +// DeleteFleetsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFleets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFleets for more information on using the DeleteFleets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFleetsRequest method. +// req, resp := client.DeleteFleetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFleets +func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Request, output *DeleteFleetsOutput) { + op := &request.Operation{ + Name: opDeleteFleets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFleetsInput{} + } + + output = &DeleteFleetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFleets API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified EC2 Fleet. +// +// After you delete an EC2 Fleet, it launches no new instances. +// +// You must specify whether a deleted EC2 Fleet should also terminate its instances. +// If you choose to terminate the instances, the EC2 Fleet enters the deleted_terminating +// state. Otherwise, the EC2 Fleet enters the deleted_running state, and the +// instances continue to run until they are interrupted or you terminate them +// manually. +// +// For instant fleets, EC2 Fleet must terminate the instances when the fleet +// is deleted. A deleted instant fleet with running instances is not supported. +// +// Restrictions +// +// * You can delete up to 25 instant fleets in a single request. If you exceed +// this number, no instant fleets are deleted and an error is returned. There +// is no restriction on the number of fleets of type maintain or request +// that can be deleted in a single request. +// +// * Up to 1000 instances can be terminated in a single request to delete +// instant fleets. +// +// For more information, see Deleting an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteFleets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFleets +func (c *EC2) DeleteFleets(input *DeleteFleetsInput) (*DeleteFleetsOutput, error) { + req, out := c.DeleteFleetsRequest(input) + return out, req.Send() +} + +// DeleteFleetsWithContext is the same as DeleteFleets with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFleets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteFleetsWithContext(ctx aws.Context, input *DeleteFleetsInput, opts ...request.Option) (*DeleteFleetsOutput, error) { + req, out := c.DeleteFleetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteFlowLogs = "DeleteFlowLogs" + +// DeleteFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFlowLogs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFlowLogs for more information on using the DeleteFlowLogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFlowLogsRequest method. +// req, resp := client.DeleteFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFlowLogs +func (c *EC2) DeleteFlowLogsRequest(input *DeleteFlowLogsInput) (req *request.Request, output *DeleteFlowLogsOutput) { + op := &request.Operation{ + Name: opDeleteFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFlowLogsInput{} + } + + output = &DeleteFlowLogsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFlowLogs API operation for Amazon Elastic Compute Cloud. +// +// Deletes one or more flow logs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteFlowLogs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFlowLogs +func (c *EC2) DeleteFlowLogs(input *DeleteFlowLogsInput) (*DeleteFlowLogsOutput, error) { + req, out := c.DeleteFlowLogsRequest(input) + return out, req.Send() +} + +// DeleteFlowLogsWithContext is the same as DeleteFlowLogs with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFlowLogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteFlowLogsWithContext(ctx aws.Context, input *DeleteFlowLogsInput, opts ...request.Option) (*DeleteFlowLogsOutput, error) { + req, out := c.DeleteFlowLogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteFpgaImage = "DeleteFpgaImage" + +// DeleteFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFpgaImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFpgaImage for more information on using the DeleteFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFpgaImageRequest method. +// req, resp := client.DeleteFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImage +func (c *EC2) DeleteFpgaImageRequest(input *DeleteFpgaImageInput) (req *request.Request, output *DeleteFpgaImageOutput) { + op := &request.Operation{ + Name: opDeleteFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFpgaImageInput{} + } + + output = &DeleteFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteFpgaImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImage +func (c *EC2) DeleteFpgaImage(input *DeleteFpgaImageInput) (*DeleteFpgaImageOutput, error) { + req, out := c.DeleteFpgaImageRequest(input) + return out, req.Send() +} + +// DeleteFpgaImageWithContext is the same as DeleteFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteFpgaImageWithContext(ctx aws.Context, input *DeleteFpgaImageInput, opts ...request.Option) (*DeleteFpgaImageOutput, error) { + req, out := c.DeleteFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteInternetGateway = "DeleteInternetGateway" + +// DeleteInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInternetGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInternetGateway for more information on using the DeleteInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInternetGatewayRequest method. +// req, resp := client.DeleteInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInternetGateway +func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (req *request.Request, output *DeleteInternetGatewayOutput) { + op := &request.Operation{ + Name: opDeleteInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInternetGatewayInput{} + } + + output = &DeleteInternetGatewayOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteInternetGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified internet gateway. You must detach the internet gateway +// from the VPC before you can delete it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteInternetGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInternetGateway +func (c *EC2) DeleteInternetGateway(input *DeleteInternetGatewayInput) (*DeleteInternetGatewayOutput, error) { + req, out := c.DeleteInternetGatewayRequest(input) + return out, req.Send() +} + +// DeleteInternetGatewayWithContext is the same as DeleteInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteInternetGatewayWithContext(ctx aws.Context, input *DeleteInternetGatewayInput, opts ...request.Option) (*DeleteInternetGatewayOutput, error) { + req, out := c.DeleteInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteKeyPair = "DeleteKeyPair" + +// DeleteKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the DeleteKeyPair operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteKeyPair for more information on using the DeleteKeyPair +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteKeyPairRequest method. +// req, resp := client.DeleteKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteKeyPair +func (c *EC2) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Request, output *DeleteKeyPairOutput) { + op := &request.Operation{ + Name: opDeleteKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteKeyPairInput{} + } + + output = &DeleteKeyPairOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteKeyPair API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified key pair, by removing the public key from Amazon EC2. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteKeyPair for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteKeyPair +func (c *EC2) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, error) { + req, out := c.DeleteKeyPairRequest(input) + return out, req.Send() +} + +// DeleteKeyPairWithContext is the same as DeleteKeyPair with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteKeyPair for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteKeyPairWithContext(ctx aws.Context, input *DeleteKeyPairInput, opts ...request.Option) (*DeleteKeyPairOutput, error) { + req, out := c.DeleteKeyPairRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLaunchTemplate = "DeleteLaunchTemplate" + +// DeleteLaunchTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLaunchTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLaunchTemplate for more information on using the DeleteLaunchTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLaunchTemplateRequest method. +// req, resp := client.DeleteLaunchTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplate +func (c *EC2) DeleteLaunchTemplateRequest(input *DeleteLaunchTemplateInput) (req *request.Request, output *DeleteLaunchTemplateOutput) { + op := &request.Operation{ + Name: opDeleteLaunchTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLaunchTemplateInput{} + } + + output = &DeleteLaunchTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLaunchTemplate API operation for Amazon Elastic Compute Cloud. +// +// Deletes a launch template. Deleting a launch template deletes all of its +// versions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteLaunchTemplate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplate +func (c *EC2) DeleteLaunchTemplate(input *DeleteLaunchTemplateInput) (*DeleteLaunchTemplateOutput, error) { + req, out := c.DeleteLaunchTemplateRequest(input) + return out, req.Send() +} + +// DeleteLaunchTemplateWithContext is the same as DeleteLaunchTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLaunchTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteLaunchTemplateWithContext(ctx aws.Context, input *DeleteLaunchTemplateInput, opts ...request.Option) (*DeleteLaunchTemplateOutput, error) { + req, out := c.DeleteLaunchTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLaunchTemplateVersions = "DeleteLaunchTemplateVersions" + +// DeleteLaunchTemplateVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLaunchTemplateVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLaunchTemplateVersions for more information on using the DeleteLaunchTemplateVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLaunchTemplateVersionsRequest method. +// req, resp := client.DeleteLaunchTemplateVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplateVersions +func (c *EC2) DeleteLaunchTemplateVersionsRequest(input *DeleteLaunchTemplateVersionsInput) (req *request.Request, output *DeleteLaunchTemplateVersionsOutput) { + op := &request.Operation{ + Name: opDeleteLaunchTemplateVersions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLaunchTemplateVersionsInput{} + } + + output = &DeleteLaunchTemplateVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLaunchTemplateVersions API operation for Amazon Elastic Compute Cloud. +// +// Deletes one or more versions of a launch template. You cannot delete the +// default version of a launch template; you must first assign a different version +// as the default. If the default version is the only version for the launch +// template, you must delete the entire launch template using DeleteLaunchTemplate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteLaunchTemplateVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLaunchTemplateVersions +func (c *EC2) DeleteLaunchTemplateVersions(input *DeleteLaunchTemplateVersionsInput) (*DeleteLaunchTemplateVersionsOutput, error) { + req, out := c.DeleteLaunchTemplateVersionsRequest(input) + return out, req.Send() +} + +// DeleteLaunchTemplateVersionsWithContext is the same as DeleteLaunchTemplateVersions with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLaunchTemplateVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteLaunchTemplateVersionsWithContext(ctx aws.Context, input *DeleteLaunchTemplateVersionsInput, opts ...request.Option) (*DeleteLaunchTemplateVersionsOutput, error) { + req, out := c.DeleteLaunchTemplateVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLocalGatewayRoute = "DeleteLocalGatewayRoute" + +// DeleteLocalGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLocalGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLocalGatewayRoute for more information on using the DeleteLocalGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLocalGatewayRouteRequest method. +// req, resp := client.DeleteLocalGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLocalGatewayRoute +func (c *EC2) DeleteLocalGatewayRouteRequest(input *DeleteLocalGatewayRouteInput) (req *request.Request, output *DeleteLocalGatewayRouteOutput) { + op := &request.Operation{ + Name: opDeleteLocalGatewayRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLocalGatewayRouteInput{} + } + + output = &DeleteLocalGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLocalGatewayRoute API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified route from the specified local gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteLocalGatewayRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLocalGatewayRoute +func (c *EC2) DeleteLocalGatewayRoute(input *DeleteLocalGatewayRouteInput) (*DeleteLocalGatewayRouteOutput, error) { + req, out := c.DeleteLocalGatewayRouteRequest(input) + return out, req.Send() +} + +// DeleteLocalGatewayRouteWithContext is the same as DeleteLocalGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLocalGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteLocalGatewayRouteWithContext(ctx aws.Context, input *DeleteLocalGatewayRouteInput, opts ...request.Option) (*DeleteLocalGatewayRouteOutput, error) { + req, out := c.DeleteLocalGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLocalGatewayRouteTableVpcAssociation = "DeleteLocalGatewayRouteTableVpcAssociation" + +// DeleteLocalGatewayRouteTableVpcAssociationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLocalGatewayRouteTableVpcAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLocalGatewayRouteTableVpcAssociation for more information on using the DeleteLocalGatewayRouteTableVpcAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLocalGatewayRouteTableVpcAssociationRequest method. +// req, resp := client.DeleteLocalGatewayRouteTableVpcAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLocalGatewayRouteTableVpcAssociation +func (c *EC2) DeleteLocalGatewayRouteTableVpcAssociationRequest(input *DeleteLocalGatewayRouteTableVpcAssociationInput) (req *request.Request, output *DeleteLocalGatewayRouteTableVpcAssociationOutput) { + op := &request.Operation{ + Name: opDeleteLocalGatewayRouteTableVpcAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLocalGatewayRouteTableVpcAssociationInput{} + } + + output = &DeleteLocalGatewayRouteTableVpcAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLocalGatewayRouteTableVpcAssociation API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified association between a VPC and local gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteLocalGatewayRouteTableVpcAssociation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteLocalGatewayRouteTableVpcAssociation +func (c *EC2) DeleteLocalGatewayRouteTableVpcAssociation(input *DeleteLocalGatewayRouteTableVpcAssociationInput) (*DeleteLocalGatewayRouteTableVpcAssociationOutput, error) { + req, out := c.DeleteLocalGatewayRouteTableVpcAssociationRequest(input) + return out, req.Send() +} + +// DeleteLocalGatewayRouteTableVpcAssociationWithContext is the same as DeleteLocalGatewayRouteTableVpcAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLocalGatewayRouteTableVpcAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteLocalGatewayRouteTableVpcAssociationWithContext(ctx aws.Context, input *DeleteLocalGatewayRouteTableVpcAssociationInput, opts ...request.Option) (*DeleteLocalGatewayRouteTableVpcAssociationOutput, error) { + req, out := c.DeleteLocalGatewayRouteTableVpcAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteManagedPrefixList = "DeleteManagedPrefixList" + +// DeleteManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the DeleteManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteManagedPrefixList for more information on using the DeleteManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteManagedPrefixListRequest method. +// req, resp := client.DeleteManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteManagedPrefixList +func (c *EC2) DeleteManagedPrefixListRequest(input *DeleteManagedPrefixListInput) (req *request.Request, output *DeleteManagedPrefixListOutput) { + op := &request.Operation{ + Name: opDeleteManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteManagedPrefixListInput{} + } + + output = &DeleteManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified managed prefix list. You must first remove all references +// to the prefix list in your resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteManagedPrefixList +func (c *EC2) DeleteManagedPrefixList(input *DeleteManagedPrefixListInput) (*DeleteManagedPrefixListOutput, error) { + req, out := c.DeleteManagedPrefixListRequest(input) + return out, req.Send() +} + +// DeleteManagedPrefixListWithContext is the same as DeleteManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteManagedPrefixListWithContext(ctx aws.Context, input *DeleteManagedPrefixListInput, opts ...request.Option) (*DeleteManagedPrefixListOutput, error) { + req, out := c.DeleteManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteNatGateway = "DeleteNatGateway" + +// DeleteNatGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNatGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteNatGateway for more information on using the DeleteNatGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteNatGatewayRequest method. +// req, resp := client.DeleteNatGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNatGateway +func (c *EC2) DeleteNatGatewayRequest(input *DeleteNatGatewayInput) (req *request.Request, output *DeleteNatGatewayOutput) { + op := &request.Operation{ + Name: opDeleteNatGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNatGatewayInput{} + } + + output = &DeleteNatGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteNatGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified NAT gateway. Deleting a NAT gateway disassociates its +// Elastic IP address, but does not release the address from your account. Deleting +// a NAT gateway does not delete any NAT gateway routes in your route tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteNatGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNatGateway +func (c *EC2) DeleteNatGateway(input *DeleteNatGatewayInput) (*DeleteNatGatewayOutput, error) { + req, out := c.DeleteNatGatewayRequest(input) + return out, req.Send() +} + +// DeleteNatGatewayWithContext is the same as DeleteNatGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNatGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNatGatewayWithContext(ctx aws.Context, input *DeleteNatGatewayInput, opts ...request.Option) (*DeleteNatGatewayOutput, error) { + req, out := c.DeleteNatGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteNetworkAcl = "DeleteNetworkAcl" + +// DeleteNetworkAclRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteNetworkAcl for more information on using the DeleteNetworkAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteNetworkAclRequest method. +// req, resp := client.DeleteNetworkAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAcl +func (c *EC2) DeleteNetworkAclRequest(input *DeleteNetworkAclInput) (req *request.Request, output *DeleteNetworkAclOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAcl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclInput{} + } + + output = &DeleteNetworkAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteNetworkAcl API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified network ACL. You can't delete the ACL if it's associated +// with any subnets. You can't delete the default network ACL. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteNetworkAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAcl +func (c *EC2) DeleteNetworkAcl(input *DeleteNetworkAclInput) (*DeleteNetworkAclOutput, error) { + req, out := c.DeleteNetworkAclRequest(input) + return out, req.Send() +} + +// DeleteNetworkAclWithContext is the same as DeleteNetworkAcl with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkAclWithContext(ctx aws.Context, input *DeleteNetworkAclInput, opts ...request.Option) (*DeleteNetworkAclOutput, error) { + req, out := c.DeleteNetworkAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" + +// DeleteNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkAclEntry operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteNetworkAclEntry for more information on using the DeleteNetworkAclEntry +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteNetworkAclEntryRequest method. +// req, resp := client.DeleteNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAclEntry +func (c *EC2) DeleteNetworkAclEntryRequest(input *DeleteNetworkAclEntryInput) (req *request.Request, output *DeleteNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opDeleteNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkAclEntryInput{} + } + + output = &DeleteNetworkAclEntryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteNetworkAclEntry API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified ingress or egress entry (rule) from the specified network +// ACL. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteNetworkAclEntry for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAclEntry +func (c *EC2) DeleteNetworkAclEntry(input *DeleteNetworkAclEntryInput) (*DeleteNetworkAclEntryOutput, error) { + req, out := c.DeleteNetworkAclEntryRequest(input) + return out, req.Send() +} + +// DeleteNetworkAclEntryWithContext is the same as DeleteNetworkAclEntry with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkAclEntry for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkAclEntryWithContext(ctx aws.Context, input *DeleteNetworkAclEntryInput, opts ...request.Option) (*DeleteNetworkAclEntryOutput, error) { + req, out := c.DeleteNetworkAclEntryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteNetworkInterface = "DeleteNetworkInterface" + +// DeleteNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkInterface operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteNetworkInterface for more information on using the DeleteNetworkInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteNetworkInterfaceRequest method. +// req, resp := client.DeleteNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterface +func (c *EC2) DeleteNetworkInterfaceRequest(input *DeleteNetworkInterfaceInput) (req *request.Request, output *DeleteNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDeleteNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkInterfaceInput{} + } + + output = &DeleteNetworkInterfaceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteNetworkInterface API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified network interface. You must detach the network interface +// before you can delete it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteNetworkInterface for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterface +func (c *EC2) DeleteNetworkInterface(input *DeleteNetworkInterfaceInput) (*DeleteNetworkInterfaceOutput, error) { + req, out := c.DeleteNetworkInterfaceRequest(input) + return out, req.Send() +} + +// DeleteNetworkInterfaceWithContext is the same as DeleteNetworkInterface with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkInterfaceWithContext(ctx aws.Context, input *DeleteNetworkInterfaceInput, opts ...request.Option) (*DeleteNetworkInterfaceOutput, error) { + req, out := c.DeleteNetworkInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteNetworkInterfacePermission = "DeleteNetworkInterfacePermission" + +// DeleteNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkInterfacePermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteNetworkInterfacePermission for more information on using the DeleteNetworkInterfacePermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteNetworkInterfacePermissionRequest method. +// req, resp := client.DeleteNetworkInterfacePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermission +func (c *EC2) DeleteNetworkInterfacePermissionRequest(input *DeleteNetworkInterfacePermissionInput) (req *request.Request, output *DeleteNetworkInterfacePermissionOutput) { + op := &request.Operation{ + Name: opDeleteNetworkInterfacePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkInterfacePermissionInput{} + } + + output = &DeleteNetworkInterfacePermissionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteNetworkInterfacePermission API operation for Amazon Elastic Compute Cloud. +// +// Deletes a permission for a network interface. By default, you cannot delete +// the permission if the account for which you're removing the permission has +// attached the network interface to an instance. However, you can force delete +// the permission, regardless of any attachment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteNetworkInterfacePermission for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermission +func (c *EC2) DeleteNetworkInterfacePermission(input *DeleteNetworkInterfacePermissionInput) (*DeleteNetworkInterfacePermissionOutput, error) { + req, out := c.DeleteNetworkInterfacePermissionRequest(input) + return out, req.Send() +} + +// DeleteNetworkInterfacePermissionWithContext is the same as DeleteNetworkInterfacePermission with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkInterfacePermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkInterfacePermissionWithContext(ctx aws.Context, input *DeleteNetworkInterfacePermissionInput, opts ...request.Option) (*DeleteNetworkInterfacePermissionOutput, error) { + req, out := c.DeleteNetworkInterfacePermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePlacementGroup = "DeletePlacementGroup" + +// DeletePlacementGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeletePlacementGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePlacementGroup for more information on using the DeletePlacementGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePlacementGroupRequest method. +// req, resp := client.DeletePlacementGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeletePlacementGroup +func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req *request.Request, output *DeletePlacementGroupOutput) { + op := &request.Operation{ + Name: opDeletePlacementGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePlacementGroupInput{} + } + + output = &DeletePlacementGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePlacementGroup API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified placement group. You must terminate all instances in +// the placement group before you can delete the placement group. For more information, +// see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeletePlacementGroup for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeletePlacementGroup +func (c *EC2) DeletePlacementGroup(input *DeletePlacementGroupInput) (*DeletePlacementGroupOutput, error) { + req, out := c.DeletePlacementGroupRequest(input) + return out, req.Send() +} + +// DeletePlacementGroupWithContext is the same as DeletePlacementGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePlacementGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeletePlacementGroupWithContext(ctx aws.Context, input *DeletePlacementGroupInput, opts ...request.Option) (*DeletePlacementGroupOutput, error) { + req, out := c.DeletePlacementGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteQueuedReservedInstances = "DeleteQueuedReservedInstances" + +// DeleteQueuedReservedInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteQueuedReservedInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteQueuedReservedInstances for more information on using the DeleteQueuedReservedInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteQueuedReservedInstancesRequest method. +// req, resp := client.DeleteQueuedReservedInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteQueuedReservedInstances +func (c *EC2) DeleteQueuedReservedInstancesRequest(input *DeleteQueuedReservedInstancesInput) (req *request.Request, output *DeleteQueuedReservedInstancesOutput) { + op := &request.Operation{ + Name: opDeleteQueuedReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteQueuedReservedInstancesInput{} + } + + output = &DeleteQueuedReservedInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteQueuedReservedInstances API operation for Amazon Elastic Compute Cloud. +// +// Deletes the queued purchases for the specified Reserved Instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteQueuedReservedInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteQueuedReservedInstances +func (c *EC2) DeleteQueuedReservedInstances(input *DeleteQueuedReservedInstancesInput) (*DeleteQueuedReservedInstancesOutput, error) { + req, out := c.DeleteQueuedReservedInstancesRequest(input) + return out, req.Send() +} + +// DeleteQueuedReservedInstancesWithContext is the same as DeleteQueuedReservedInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteQueuedReservedInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteQueuedReservedInstancesWithContext(ctx aws.Context, input *DeleteQueuedReservedInstancesInput, opts ...request.Option) (*DeleteQueuedReservedInstancesOutput, error) { + req, out := c.DeleteQueuedReservedInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRoute = "DeleteRoute" + +// DeleteRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRoute for more information on using the DeleteRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRouteRequest method. +// req, resp := client.DeleteRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRoute +func (c *EC2) DeleteRouteRequest(input *DeleteRouteInput) (req *request.Request, output *DeleteRouteOutput) { + op := &request.Operation{ + Name: opDeleteRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteInput{} + } + + output = &DeleteRouteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRoute API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified route from the specified route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRoute +func (c *EC2) DeleteRoute(input *DeleteRouteInput) (*DeleteRouteOutput, error) { + req, out := c.DeleteRouteRequest(input) + return out, req.Send() +} + +// DeleteRouteWithContext is the same as DeleteRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteRouteWithContext(ctx aws.Context, input *DeleteRouteInput, opts ...request.Option) (*DeleteRouteOutput, error) { + req, out := c.DeleteRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRouteTable = "DeleteRouteTable" + +// DeleteRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRouteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRouteTable for more information on using the DeleteRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRouteTableRequest method. +// req, resp := client.DeleteRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRouteTable +func (c *EC2) DeleteRouteTableRequest(input *DeleteRouteTableInput) (req *request.Request, output *DeleteRouteTableOutput) { + op := &request.Operation{ + Name: opDeleteRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRouteTableInput{} + } + + output = &DeleteRouteTableOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRouteTable API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified route table. You must disassociate the route table +// from any subnets before you can delete it. You can't delete the main route +// table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteRouteTable for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRouteTable +func (c *EC2) DeleteRouteTable(input *DeleteRouteTableInput) (*DeleteRouteTableOutput, error) { + req, out := c.DeleteRouteTableRequest(input) + return out, req.Send() +} + +// DeleteRouteTableWithContext is the same as DeleteRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteRouteTableWithContext(ctx aws.Context, input *DeleteRouteTableInput, opts ...request.Option) (*DeleteRouteTableOutput, error) { + req, out := c.DeleteRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSecurityGroup = "DeleteSecurityGroup" + +// DeleteSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSecurityGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSecurityGroup for more information on using the DeleteSecurityGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSecurityGroupRequest method. +// req, resp := client.DeleteSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSecurityGroup +func (c *EC2) DeleteSecurityGroupRequest(input *DeleteSecurityGroupInput) (req *request.Request, output *DeleteSecurityGroupOutput) { + op := &request.Operation{ + Name: opDeleteSecurityGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSecurityGroupInput{} + } + + output = &DeleteSecurityGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSecurityGroup API operation for Amazon Elastic Compute Cloud. +// +// Deletes a security group. +// +// If you attempt to delete a security group that is associated with an instance, +// or is referenced by another security group, the operation fails with InvalidGroup.InUse +// in EC2-Classic or DependencyViolation in EC2-VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteSecurityGroup for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSecurityGroup +func (c *EC2) DeleteSecurityGroup(input *DeleteSecurityGroupInput) (*DeleteSecurityGroupOutput, error) { + req, out := c.DeleteSecurityGroupRequest(input) + return out, req.Send() +} + +// DeleteSecurityGroupWithContext is the same as DeleteSecurityGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSecurityGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteSecurityGroupWithContext(ctx aws.Context, input *DeleteSecurityGroupInput, opts ...request.Option) (*DeleteSecurityGroupOutput, error) { + req, out := c.DeleteSecurityGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSnapshot = "DeleteSnapshot" + +// DeleteSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshot operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSnapshot for more information on using the DeleteSnapshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSnapshotRequest method. +// req, resp := client.DeleteSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSnapshot +func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { + op := &request.Operation{ + Name: opDeleteSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSnapshotInput{} + } + + output = &DeleteSnapshotOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSnapshot API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified snapshot. +// +// When you make periodic snapshots of a volume, the snapshots are incremental, +// and only the blocks on the device that have changed since your last snapshot +// are saved in the new snapshot. When you delete a snapshot, only the data +// not needed for any other snapshot is removed. So regardless of which prior +// snapshots have been deleted, all active snapshots will have access to all +// the information needed to restore the volume. +// +// You cannot delete a snapshot of the root device of an EBS volume used by +// a registered AMI. You must first de-register the AMI before you can delete +// the snapshot. +// +// For more information, see Deleting an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteSnapshot for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSnapshot +func (c *EC2) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + return out, req.Send() +} + +// DeleteSnapshotWithContext is the same as DeleteSnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteSnapshotWithContext(ctx aws.Context, input *DeleteSnapshotInput, opts ...request.Option) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" + +// DeleteSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSpotDatafeedSubscription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSpotDatafeedSubscription for more information on using the DeleteSpotDatafeedSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSpotDatafeedSubscriptionRequest method. +// req, resp := client.DeleteSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSpotDatafeedSubscription +func (c *EC2) DeleteSpotDatafeedSubscriptionRequest(input *DeleteSpotDatafeedSubscriptionInput) (req *request.Request, output *DeleteSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSpotDatafeedSubscriptionInput{} + } + + output = &DeleteSpotDatafeedSubscriptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud. +// +// Deletes the data feed for Spot Instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteSpotDatafeedSubscription for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSpotDatafeedSubscription +func (c *EC2) DeleteSpotDatafeedSubscription(input *DeleteSpotDatafeedSubscriptionInput) (*DeleteSpotDatafeedSubscriptionOutput, error) { + req, out := c.DeleteSpotDatafeedSubscriptionRequest(input) + return out, req.Send() +} + +// DeleteSpotDatafeedSubscriptionWithContext is the same as DeleteSpotDatafeedSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSpotDatafeedSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteSpotDatafeedSubscriptionWithContext(ctx aws.Context, input *DeleteSpotDatafeedSubscriptionInput, opts ...request.Option) (*DeleteSpotDatafeedSubscriptionOutput, error) { + req, out := c.DeleteSpotDatafeedSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSubnet = "DeleteSubnet" + +// DeleteSubnetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubnet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSubnet for more information on using the DeleteSubnet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSubnetRequest method. +// req, resp := client.DeleteSubnetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSubnet +func (c *EC2) DeleteSubnetRequest(input *DeleteSubnetInput) (req *request.Request, output *DeleteSubnetOutput) { + op := &request.Operation{ + Name: opDeleteSubnet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubnetInput{} + } + + output = &DeleteSubnetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSubnet API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified subnet. You must terminate all running instances in +// the subnet before you can delete the subnet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteSubnet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSubnet +func (c *EC2) DeleteSubnet(input *DeleteSubnetInput) (*DeleteSubnetOutput, error) { + req, out := c.DeleteSubnetRequest(input) + return out, req.Send() +} + +// DeleteSubnetWithContext is the same as DeleteSubnet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSubnet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteSubnetWithContext(ctx aws.Context, input *DeleteSubnetInput, opts ...request.Option) (*DeleteSubnetOutput, error) { + req, out := c.DeleteSubnetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTags for more information on using the DeleteTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTags +func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + output = &DeleteTagsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteTags API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified set of tags from the specified set of resources. +// +// To list the current tags, use DescribeTags. For more information about tags, +// see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTags for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTags +func (c *EC2) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + return out, req.Send() +} + +// DeleteTagsWithContext is the same as DeleteTags with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTrafficMirrorFilter = "DeleteTrafficMirrorFilter" + +// DeleteTrafficMirrorFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficMirrorFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTrafficMirrorFilter for more information on using the DeleteTrafficMirrorFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTrafficMirrorFilterRequest method. +// req, resp := client.DeleteTrafficMirrorFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorFilter +func (c *EC2) DeleteTrafficMirrorFilterRequest(input *DeleteTrafficMirrorFilterInput) (req *request.Request, output *DeleteTrafficMirrorFilterOutput) { + op := &request.Operation{ + Name: opDeleteTrafficMirrorFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrafficMirrorFilterInput{} + } + + output = &DeleteTrafficMirrorFilterOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTrafficMirrorFilter API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Traffic Mirror filter. +// +// You cannot delete a Traffic Mirror filter that is in use by a Traffic Mirror +// session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTrafficMirrorFilter for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorFilter +func (c *EC2) DeleteTrafficMirrorFilter(input *DeleteTrafficMirrorFilterInput) (*DeleteTrafficMirrorFilterOutput, error) { + req, out := c.DeleteTrafficMirrorFilterRequest(input) + return out, req.Send() +} + +// DeleteTrafficMirrorFilterWithContext is the same as DeleteTrafficMirrorFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTrafficMirrorFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTrafficMirrorFilterWithContext(ctx aws.Context, input *DeleteTrafficMirrorFilterInput, opts ...request.Option) (*DeleteTrafficMirrorFilterOutput, error) { + req, out := c.DeleteTrafficMirrorFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTrafficMirrorFilterRule = "DeleteTrafficMirrorFilterRule" + +// DeleteTrafficMirrorFilterRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficMirrorFilterRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTrafficMirrorFilterRule for more information on using the DeleteTrafficMirrorFilterRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTrafficMirrorFilterRuleRequest method. +// req, resp := client.DeleteTrafficMirrorFilterRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorFilterRule +func (c *EC2) DeleteTrafficMirrorFilterRuleRequest(input *DeleteTrafficMirrorFilterRuleInput) (req *request.Request, output *DeleteTrafficMirrorFilterRuleOutput) { + op := &request.Operation{ + Name: opDeleteTrafficMirrorFilterRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrafficMirrorFilterRuleInput{} + } + + output = &DeleteTrafficMirrorFilterRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTrafficMirrorFilterRule API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Traffic Mirror rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTrafficMirrorFilterRule for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorFilterRule +func (c *EC2) DeleteTrafficMirrorFilterRule(input *DeleteTrafficMirrorFilterRuleInput) (*DeleteTrafficMirrorFilterRuleOutput, error) { + req, out := c.DeleteTrafficMirrorFilterRuleRequest(input) + return out, req.Send() +} + +// DeleteTrafficMirrorFilterRuleWithContext is the same as DeleteTrafficMirrorFilterRule with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTrafficMirrorFilterRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTrafficMirrorFilterRuleWithContext(ctx aws.Context, input *DeleteTrafficMirrorFilterRuleInput, opts ...request.Option) (*DeleteTrafficMirrorFilterRuleOutput, error) { + req, out := c.DeleteTrafficMirrorFilterRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTrafficMirrorSession = "DeleteTrafficMirrorSession" + +// DeleteTrafficMirrorSessionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficMirrorSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTrafficMirrorSession for more information on using the DeleteTrafficMirrorSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTrafficMirrorSessionRequest method. +// req, resp := client.DeleteTrafficMirrorSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorSession +func (c *EC2) DeleteTrafficMirrorSessionRequest(input *DeleteTrafficMirrorSessionInput) (req *request.Request, output *DeleteTrafficMirrorSessionOutput) { + op := &request.Operation{ + Name: opDeleteTrafficMirrorSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrafficMirrorSessionInput{} + } + + output = &DeleteTrafficMirrorSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTrafficMirrorSession API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Traffic Mirror session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTrafficMirrorSession for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorSession +func (c *EC2) DeleteTrafficMirrorSession(input *DeleteTrafficMirrorSessionInput) (*DeleteTrafficMirrorSessionOutput, error) { + req, out := c.DeleteTrafficMirrorSessionRequest(input) + return out, req.Send() +} + +// DeleteTrafficMirrorSessionWithContext is the same as DeleteTrafficMirrorSession with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTrafficMirrorSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTrafficMirrorSessionWithContext(ctx aws.Context, input *DeleteTrafficMirrorSessionInput, opts ...request.Option) (*DeleteTrafficMirrorSessionOutput, error) { + req, out := c.DeleteTrafficMirrorSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTrafficMirrorTarget = "DeleteTrafficMirrorTarget" + +// DeleteTrafficMirrorTargetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficMirrorTarget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTrafficMirrorTarget for more information on using the DeleteTrafficMirrorTarget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTrafficMirrorTargetRequest method. +// req, resp := client.DeleteTrafficMirrorTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorTarget +func (c *EC2) DeleteTrafficMirrorTargetRequest(input *DeleteTrafficMirrorTargetInput) (req *request.Request, output *DeleteTrafficMirrorTargetOutput) { + op := &request.Operation{ + Name: opDeleteTrafficMirrorTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTrafficMirrorTargetInput{} + } + + output = &DeleteTrafficMirrorTargetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTrafficMirrorTarget API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Traffic Mirror target. +// +// You cannot delete a Traffic Mirror target that is in use by a Traffic Mirror +// session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTrafficMirrorTarget for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTrafficMirrorTarget +func (c *EC2) DeleteTrafficMirrorTarget(input *DeleteTrafficMirrorTargetInput) (*DeleteTrafficMirrorTargetOutput, error) { + req, out := c.DeleteTrafficMirrorTargetRequest(input) + return out, req.Send() +} + +// DeleteTrafficMirrorTargetWithContext is the same as DeleteTrafficMirrorTarget with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTrafficMirrorTarget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTrafficMirrorTargetWithContext(ctx aws.Context, input *DeleteTrafficMirrorTargetInput, opts ...request.Option) (*DeleteTrafficMirrorTargetOutput, error) { + req, out := c.DeleteTrafficMirrorTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTransitGateway = "DeleteTransitGateway" + +// DeleteTransitGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGateway for more information on using the DeleteTransitGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayRequest method. +// req, resp := client.DeleteTransitGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGateway +func (c *EC2) DeleteTransitGatewayRequest(input *DeleteTransitGatewayInput) (req *request.Request, output *DeleteTransitGatewayOutput) { + op := &request.Operation{ + Name: opDeleteTransitGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayInput{} + } + + output = &DeleteTransitGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified transit gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGateway +func (c *EC2) DeleteTransitGateway(input *DeleteTransitGatewayInput) (*DeleteTransitGatewayOutput, error) { + req, out := c.DeleteTransitGatewayRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayWithContext is the same as DeleteTransitGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayWithContext(ctx aws.Context, input *DeleteTransitGatewayInput, opts ...request.Option) (*DeleteTransitGatewayOutput, error) { + req, out := c.DeleteTransitGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTransitGatewayMulticastDomain = "DeleteTransitGatewayMulticastDomain" + +// DeleteTransitGatewayMulticastDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGatewayMulticastDomain operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGatewayMulticastDomain for more information on using the DeleteTransitGatewayMulticastDomain +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayMulticastDomainRequest method. +// req, resp := client.DeleteTransitGatewayMulticastDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayMulticastDomain +func (c *EC2) DeleteTransitGatewayMulticastDomainRequest(input *DeleteTransitGatewayMulticastDomainInput) (req *request.Request, output *DeleteTransitGatewayMulticastDomainOutput) { + op := &request.Operation{ + Name: opDeleteTransitGatewayMulticastDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayMulticastDomainInput{} + } + + output = &DeleteTransitGatewayMulticastDomainOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGatewayMulticastDomain API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified transit gateway multicast domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGatewayMulticastDomain for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayMulticastDomain +func (c *EC2) DeleteTransitGatewayMulticastDomain(input *DeleteTransitGatewayMulticastDomainInput) (*DeleteTransitGatewayMulticastDomainOutput, error) { + req, out := c.DeleteTransitGatewayMulticastDomainRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayMulticastDomainWithContext is the same as DeleteTransitGatewayMulticastDomain with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGatewayMulticastDomain for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayMulticastDomainWithContext(ctx aws.Context, input *DeleteTransitGatewayMulticastDomainInput, opts ...request.Option) (*DeleteTransitGatewayMulticastDomainOutput, error) { + req, out := c.DeleteTransitGatewayMulticastDomainRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTransitGatewayPeeringAttachment = "DeleteTransitGatewayPeeringAttachment" + +// DeleteTransitGatewayPeeringAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGatewayPeeringAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGatewayPeeringAttachment for more information on using the DeleteTransitGatewayPeeringAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayPeeringAttachmentRequest method. +// req, resp := client.DeleteTransitGatewayPeeringAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayPeeringAttachment +func (c *EC2) DeleteTransitGatewayPeeringAttachmentRequest(input *DeleteTransitGatewayPeeringAttachmentInput) (req *request.Request, output *DeleteTransitGatewayPeeringAttachmentOutput) { + op := &request.Operation{ + Name: opDeleteTransitGatewayPeeringAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayPeeringAttachmentInput{} + } + + output = &DeleteTransitGatewayPeeringAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGatewayPeeringAttachment API operation for Amazon Elastic Compute Cloud. +// +// Deletes a transit gateway peering attachment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGatewayPeeringAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayPeeringAttachment +func (c *EC2) DeleteTransitGatewayPeeringAttachment(input *DeleteTransitGatewayPeeringAttachmentInput) (*DeleteTransitGatewayPeeringAttachmentOutput, error) { + req, out := c.DeleteTransitGatewayPeeringAttachmentRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayPeeringAttachmentWithContext is the same as DeleteTransitGatewayPeeringAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGatewayPeeringAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayPeeringAttachmentWithContext(ctx aws.Context, input *DeleteTransitGatewayPeeringAttachmentInput, opts ...request.Option) (*DeleteTransitGatewayPeeringAttachmentOutput, error) { + req, out := c.DeleteTransitGatewayPeeringAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTransitGatewayPrefixListReference = "DeleteTransitGatewayPrefixListReference" + +// DeleteTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGatewayPrefixListReference for more information on using the DeleteTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.DeleteTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayPrefixListReference +func (c *EC2) DeleteTransitGatewayPrefixListReferenceRequest(input *DeleteTransitGatewayPrefixListReferenceInput) (req *request.Request, output *DeleteTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opDeleteTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayPrefixListReferenceInput{} + } + + output = &DeleteTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Deletes a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayPrefixListReference +func (c *EC2) DeleteTransitGatewayPrefixListReference(input *DeleteTransitGatewayPrefixListReferenceInput) (*DeleteTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.DeleteTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayPrefixListReferenceWithContext is the same as DeleteTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *DeleteTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*DeleteTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.DeleteTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTransitGatewayRoute = "DeleteTransitGatewayRoute" + +// DeleteTransitGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGatewayRoute for more information on using the DeleteTransitGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayRouteRequest method. +// req, resp := client.DeleteTransitGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayRoute +func (c *EC2) DeleteTransitGatewayRouteRequest(input *DeleteTransitGatewayRouteInput) (req *request.Request, output *DeleteTransitGatewayRouteOutput) { + op := &request.Operation{ + Name: opDeleteTransitGatewayRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayRouteInput{} + } + + output = &DeleteTransitGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGatewayRoute API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified route from the specified transit gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGatewayRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayRoute +func (c *EC2) DeleteTransitGatewayRoute(input *DeleteTransitGatewayRouteInput) (*DeleteTransitGatewayRouteOutput, error) { + req, out := c.DeleteTransitGatewayRouteRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayRouteWithContext is the same as DeleteTransitGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayRouteWithContext(ctx aws.Context, input *DeleteTransitGatewayRouteInput, opts ...request.Option) (*DeleteTransitGatewayRouteOutput, error) { + req, out := c.DeleteTransitGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTransitGatewayRouteTable = "DeleteTransitGatewayRouteTable" + +// DeleteTransitGatewayRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGatewayRouteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGatewayRouteTable for more information on using the DeleteTransitGatewayRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayRouteTableRequest method. +// req, resp := client.DeleteTransitGatewayRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayRouteTable +func (c *EC2) DeleteTransitGatewayRouteTableRequest(input *DeleteTransitGatewayRouteTableInput) (req *request.Request, output *DeleteTransitGatewayRouteTableOutput) { + op := &request.Operation{ + Name: opDeleteTransitGatewayRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayRouteTableInput{} + } + + output = &DeleteTransitGatewayRouteTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGatewayRouteTable API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified transit gateway route table. You must disassociate +// the route table from any transit gateway route tables before you can delete +// it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGatewayRouteTable for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayRouteTable +func (c *EC2) DeleteTransitGatewayRouteTable(input *DeleteTransitGatewayRouteTableInput) (*DeleteTransitGatewayRouteTableOutput, error) { + req, out := c.DeleteTransitGatewayRouteTableRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayRouteTableWithContext is the same as DeleteTransitGatewayRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGatewayRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayRouteTableWithContext(ctx aws.Context, input *DeleteTransitGatewayRouteTableInput, opts ...request.Option) (*DeleteTransitGatewayRouteTableOutput, error) { + req, out := c.DeleteTransitGatewayRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTransitGatewayVpcAttachment = "DeleteTransitGatewayVpcAttachment" + +// DeleteTransitGatewayVpcAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGatewayVpcAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGatewayVpcAttachment for more information on using the DeleteTransitGatewayVpcAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayVpcAttachmentRequest method. +// req, resp := client.DeleteTransitGatewayVpcAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayVpcAttachment +func (c *EC2) DeleteTransitGatewayVpcAttachmentRequest(input *DeleteTransitGatewayVpcAttachmentInput) (req *request.Request, output *DeleteTransitGatewayVpcAttachmentOutput) { + op := &request.Operation{ + Name: opDeleteTransitGatewayVpcAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayVpcAttachmentInput{} + } + + output = &DeleteTransitGatewayVpcAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGatewayVpcAttachment API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified VPC attachment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGatewayVpcAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayVpcAttachment +func (c *EC2) DeleteTransitGatewayVpcAttachment(input *DeleteTransitGatewayVpcAttachmentInput) (*DeleteTransitGatewayVpcAttachmentOutput, error) { + req, out := c.DeleteTransitGatewayVpcAttachmentRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayVpcAttachmentWithContext is the same as DeleteTransitGatewayVpcAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGatewayVpcAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayVpcAttachmentWithContext(ctx aws.Context, input *DeleteTransitGatewayVpcAttachmentInput, opts ...request.Option) (*DeleteTransitGatewayVpcAttachmentOutput, error) { + req, out := c.DeleteTransitGatewayVpcAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVolume = "DeleteVolume" + +// DeleteVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVolume operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVolume for more information on using the DeleteVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVolumeRequest method. +// req, resp := client.DeleteVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVolume +func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { + op := &request.Operation{ + Name: opDeleteVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVolumeInput{} + } + + output = &DeleteVolumeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteVolume API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified EBS volume. The volume must be in the available state +// (not attached to an instance). +// +// The volume can remain in the deleting state for several minutes. +// +// For more information, see Deleting an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVolume for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVolume +func (c *EC2) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + return out, req.Send() +} + +// DeleteVolumeWithContext is the same as DeleteVolume with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVolumeWithContext(ctx aws.Context, input *DeleteVolumeInput, opts ...request.Option) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpc = "DeleteVpc" + +// DeleteVpcRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpc operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpc for more information on using the DeleteVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpcRequest method. +// req, resp := client.DeleteVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpc +func (c *EC2) DeleteVpcRequest(input *DeleteVpcInput) (req *request.Request, output *DeleteVpcOutput) { + op := &request.Operation{ + Name: opDeleteVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcInput{} + } + + output = &DeleteVpcOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteVpc API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified VPC. You must detach or delete all gateways and resources +// that are associated with the VPC before you can delete it. For example, you +// must terminate all instances running in the VPC, delete all security groups +// associated with the VPC (except the default one), delete all route tables +// associated with the VPC (except the default one), and so on. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVpc for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpc +func (c *EC2) DeleteVpc(input *DeleteVpcInput) (*DeleteVpcOutput, error) { + req, out := c.DeleteVpcRequest(input) + return out, req.Send() +} + +// DeleteVpcWithContext is the same as DeleteVpc with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpcWithContext(ctx aws.Context, input *DeleteVpcInput, opts ...request.Option) (*DeleteVpcOutput, error) { + req, out := c.DeleteVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpcEndpointConnectionNotifications = "DeleteVpcEndpointConnectionNotifications" + +// DeleteVpcEndpointConnectionNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcEndpointConnectionNotifications operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpcEndpointConnectionNotifications for more information on using the DeleteVpcEndpointConnectionNotifications +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpcEndpointConnectionNotificationsRequest method. +// req, resp := client.DeleteVpcEndpointConnectionNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointConnectionNotifications +func (c *EC2) DeleteVpcEndpointConnectionNotificationsRequest(input *DeleteVpcEndpointConnectionNotificationsInput) (req *request.Request, output *DeleteVpcEndpointConnectionNotificationsOutput) { + op := &request.Operation{ + Name: opDeleteVpcEndpointConnectionNotifications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcEndpointConnectionNotificationsInput{} + } + + output = &DeleteVpcEndpointConnectionNotificationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteVpcEndpointConnectionNotifications API operation for Amazon Elastic Compute Cloud. +// +// Deletes one or more VPC endpoint connection notifications. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVpcEndpointConnectionNotifications for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointConnectionNotifications +func (c *EC2) DeleteVpcEndpointConnectionNotifications(input *DeleteVpcEndpointConnectionNotificationsInput) (*DeleteVpcEndpointConnectionNotificationsOutput, error) { + req, out := c.DeleteVpcEndpointConnectionNotificationsRequest(input) + return out, req.Send() +} + +// DeleteVpcEndpointConnectionNotificationsWithContext is the same as DeleteVpcEndpointConnectionNotifications with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpcEndpointConnectionNotifications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpcEndpointConnectionNotificationsWithContext(ctx aws.Context, input *DeleteVpcEndpointConnectionNotificationsInput, opts ...request.Option) (*DeleteVpcEndpointConnectionNotificationsOutput, error) { + req, out := c.DeleteVpcEndpointConnectionNotificationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpcEndpointServiceConfigurations = "DeleteVpcEndpointServiceConfigurations" + +// DeleteVpcEndpointServiceConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcEndpointServiceConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpcEndpointServiceConfigurations for more information on using the DeleteVpcEndpointServiceConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpcEndpointServiceConfigurationsRequest method. +// req, resp := client.DeleteVpcEndpointServiceConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointServiceConfigurations +func (c *EC2) DeleteVpcEndpointServiceConfigurationsRequest(input *DeleteVpcEndpointServiceConfigurationsInput) (req *request.Request, output *DeleteVpcEndpointServiceConfigurationsOutput) { + op := &request.Operation{ + Name: opDeleteVpcEndpointServiceConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcEndpointServiceConfigurationsInput{} + } + + output = &DeleteVpcEndpointServiceConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteVpcEndpointServiceConfigurations API operation for Amazon Elastic Compute Cloud. +// +// Deletes one or more VPC endpoint service configurations in your account. +// Before you delete the endpoint service configuration, you must reject any +// Available or PendingAcceptance interface endpoint connections that are attached +// to the service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVpcEndpointServiceConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpointServiceConfigurations +func (c *EC2) DeleteVpcEndpointServiceConfigurations(input *DeleteVpcEndpointServiceConfigurationsInput) (*DeleteVpcEndpointServiceConfigurationsOutput, error) { + req, out := c.DeleteVpcEndpointServiceConfigurationsRequest(input) + return out, req.Send() +} + +// DeleteVpcEndpointServiceConfigurationsWithContext is the same as DeleteVpcEndpointServiceConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpcEndpointServiceConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpcEndpointServiceConfigurationsWithContext(ctx aws.Context, input *DeleteVpcEndpointServiceConfigurationsInput, opts ...request.Option) (*DeleteVpcEndpointServiceConfigurationsOutput, error) { + req, out := c.DeleteVpcEndpointServiceConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpcEndpoints = "DeleteVpcEndpoints" + +// DeleteVpcEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpcEndpoints for more information on using the DeleteVpcEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpcEndpointsRequest method. +// req, resp := client.DeleteVpcEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpoints +func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *request.Request, output *DeleteVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDeleteVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcEndpointsInput{} + } + + output = &DeleteVpcEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteVpcEndpoints API operation for Amazon Elastic Compute Cloud. +// +// Deletes one or more specified VPC endpoints. Deleting a gateway endpoint +// also deletes the endpoint routes in the route tables that were associated +// with the endpoint. Deleting an interface endpoint or a Gateway Load Balancer +// endpoint deletes the endpoint network interfaces. Gateway Load Balancer endpoints +// can only be deleted if the routes that are associated with the endpoint are +// deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVpcEndpoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpoints +func (c *EC2) DeleteVpcEndpoints(input *DeleteVpcEndpointsInput) (*DeleteVpcEndpointsOutput, error) { + req, out := c.DeleteVpcEndpointsRequest(input) + return out, req.Send() +} + +// DeleteVpcEndpointsWithContext is the same as DeleteVpcEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpcEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpcEndpointsWithContext(ctx aws.Context, input *DeleteVpcEndpointsInput, opts ...request.Option) (*DeleteVpcEndpointsOutput, error) { + req, out := c.DeleteVpcEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" + +// DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcPeeringConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpcPeeringConnection for more information on using the DeleteVpcPeeringConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpcPeeringConnectionRequest method. +// req, resp := client.DeleteVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcPeeringConnection +func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpcPeeringConnectionInput{} + } + + output = &DeleteVpcPeeringConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteVpcPeeringConnection API operation for Amazon Elastic Compute Cloud. +// +// Deletes a VPC peering connection. Either the owner of the requester VPC or +// the owner of the accepter VPC can delete the VPC peering connection if it's +// in the active state. The owner of the requester VPC can delete a VPC peering +// connection in the pending-acceptance state. You cannot delete a VPC peering +// connection that's in the failed state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVpcPeeringConnection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcPeeringConnection +func (c *EC2) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + return out, req.Send() +} + +// DeleteVpcPeeringConnectionWithContext is the same as DeleteVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpcPeeringConnectionWithContext(ctx aws.Context, input *DeleteVpcPeeringConnectionInput, opts ...request.Option) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpnConnection = "DeleteVpnConnection" + +// DeleteVpnConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpnConnection for more information on using the DeleteVpnConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpnConnectionRequest method. +// req, resp := client.DeleteVpnConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnection +func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req *request.Request, output *DeleteVpnConnectionOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionInput{} + } + + output = &DeleteVpnConnectionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteVpnConnection API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified VPN connection. +// +// If you're deleting the VPC and its associated components, we recommend that +// you detach the virtual private gateway from the VPC and delete the VPC before +// deleting the VPN connection. If you believe that the tunnel credentials for +// your VPN connection have been compromised, you can delete the VPN connection +// and create a new one that has new keys, without needing to delete the VPC +// or virtual private gateway. If you create a new VPN connection, you must +// reconfigure the customer gateway device using the new configuration information +// returned with the new VPN connection ID. +// +// For certificate-based authentication, delete all AWS Certificate Manager +// (ACM) private certificates used for the AWS-side tunnel endpoints for the +// VPN connection before deleting the VPN connection. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVpnConnection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnection +func (c *EC2) DeleteVpnConnection(input *DeleteVpnConnectionInput) (*DeleteVpnConnectionOutput, error) { + req, out := c.DeleteVpnConnectionRequest(input) + return out, req.Send() +} + +// DeleteVpnConnectionWithContext is the same as DeleteVpnConnection with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpnConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpnConnectionWithContext(ctx aws.Context, input *DeleteVpnConnectionInput, opts ...request.Option) (*DeleteVpnConnectionOutput, error) { + req, out := c.DeleteVpnConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" + +// DeleteVpnConnectionRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnConnectionRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpnConnectionRoute for more information on using the DeleteVpnConnectionRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpnConnectionRouteRequest method. +// req, resp := client.DeleteVpnConnectionRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnectionRoute +func (c *EC2) DeleteVpnConnectionRouteRequest(input *DeleteVpnConnectionRouteInput) (req *request.Request, output *DeleteVpnConnectionRouteOutput) { + op := &request.Operation{ + Name: opDeleteVpnConnectionRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnConnectionRouteInput{} + } + + output = &DeleteVpnConnectionRouteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteVpnConnectionRoute API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified static route associated with a VPN connection between +// an existing virtual private gateway and a VPN customer gateway. The static +// route allows traffic to be routed from the virtual private gateway to the +// VPN customer gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVpnConnectionRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnectionRoute +func (c *EC2) DeleteVpnConnectionRoute(input *DeleteVpnConnectionRouteInput) (*DeleteVpnConnectionRouteOutput, error) { + req, out := c.DeleteVpnConnectionRouteRequest(input) + return out, req.Send() +} + +// DeleteVpnConnectionRouteWithContext is the same as DeleteVpnConnectionRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpnConnectionRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpnConnectionRouteWithContext(ctx aws.Context, input *DeleteVpnConnectionRouteInput, opts ...request.Option) (*DeleteVpnConnectionRouteOutput, error) { + req, out := c.DeleteVpnConnectionRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVpnGateway = "DeleteVpnGateway" + +// DeleteVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVpnGateway for more information on using the DeleteVpnGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVpnGatewayRequest method. +// req, resp := client.DeleteVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnGateway +func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *request.Request, output *DeleteVpnGatewayOutput) { + op := &request.Operation{ + Name: opDeleteVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVpnGatewayInput{} + } + + output = &DeleteVpnGatewayOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteVpnGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified virtual private gateway. You must first detach the +// virtual private gateway from the VPC. Note that you don't need to delete +// the virtual private gateway if you plan to delete and recreate the VPN connection +// between your VPC and your network. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteVpnGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnGateway +func (c *EC2) DeleteVpnGateway(input *DeleteVpnGatewayInput) (*DeleteVpnGatewayOutput, error) { + req, out := c.DeleteVpnGatewayRequest(input) + return out, req.Send() +} + +// DeleteVpnGatewayWithContext is the same as DeleteVpnGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpnGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpnGatewayWithContext(ctx aws.Context, input *DeleteVpnGatewayInput, opts ...request.Option) (*DeleteVpnGatewayOutput, error) { + req, out := c.DeleteVpnGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeprovisionByoipCidr = "DeprovisionByoipCidr" + +// DeprovisionByoipCidrRequest generates a "aws/request.Request" representing the +// client's request for the DeprovisionByoipCidr operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeprovisionByoipCidr for more information on using the DeprovisionByoipCidr +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeprovisionByoipCidrRequest method. +// req, resp := client.DeprovisionByoipCidrRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeprovisionByoipCidr +func (c *EC2) DeprovisionByoipCidrRequest(input *DeprovisionByoipCidrInput) (req *request.Request, output *DeprovisionByoipCidrOutput) { + op := &request.Operation{ + Name: opDeprovisionByoipCidr, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeprovisionByoipCidrInput{} + } + + output = &DeprovisionByoipCidrOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeprovisionByoipCidr API operation for Amazon Elastic Compute Cloud. +// +// Releases the specified address range that you provisioned for use with your +// AWS resources through bring your own IP addresses (BYOIP) and deletes the +// corresponding address pool. +// +// Before you can release an address range, you must stop advertising it using +// WithdrawByoipCidr and you must not have any IP addresses allocated from its +// address range. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeprovisionByoipCidr for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeprovisionByoipCidr +func (c *EC2) DeprovisionByoipCidr(input *DeprovisionByoipCidrInput) (*DeprovisionByoipCidrOutput, error) { + req, out := c.DeprovisionByoipCidrRequest(input) + return out, req.Send() +} + +// DeprovisionByoipCidrWithContext is the same as DeprovisionByoipCidr with the addition of +// the ability to pass a context and additional request options. +// +// See DeprovisionByoipCidr for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeprovisionByoipCidrWithContext(ctx aws.Context, input *DeprovisionByoipCidrInput, opts ...request.Option) (*DeprovisionByoipCidrOutput, error) { + req, out := c.DeprovisionByoipCidrRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterImage = "DeregisterImage" + +// DeregisterImageRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterImage for more information on using the DeregisterImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterImageRequest method. +// req, resp := client.DeregisterImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterImage +func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request.Request, output *DeregisterImageOutput) { + op := &request.Operation{ + Name: opDeregisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterImageInput{} + } + + output = &DeregisterImageOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeregisterImage API operation for Amazon Elastic Compute Cloud. +// +// Deregisters the specified AMI. After you deregister an AMI, it can't be used +// to launch new instances; however, it doesn't affect any instances that you've +// already launched from the AMI. You'll continue to incur usage costs for those +// instances until you terminate them. +// +// When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot +// that was created for the root volume of the instance during the AMI creation +// process. When you deregister an instance store-backed AMI, it doesn't affect +// the files that you uploaded to Amazon S3 when you created the AMI. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeregisterImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterImage +func (c *EC2) DeregisterImage(input *DeregisterImageInput) (*DeregisterImageOutput, error) { + req, out := c.DeregisterImageRequest(input) + return out, req.Send() +} + +// DeregisterImageWithContext is the same as DeregisterImage with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeregisterImageWithContext(ctx aws.Context, input *DeregisterImageInput, opts ...request.Option) (*DeregisterImageOutput, error) { + req, out := c.DeregisterImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterInstanceEventNotificationAttributes = "DeregisterInstanceEventNotificationAttributes" + +// DeregisterInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterInstanceEventNotificationAttributes for more information on using the DeregisterInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterInstanceEventNotificationAttributesRequest method. +// req, resp := client.DeregisterInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterInstanceEventNotificationAttributes +func (c *EC2) DeregisterInstanceEventNotificationAttributesRequest(input *DeregisterInstanceEventNotificationAttributesInput) (req *request.Request, output *DeregisterInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opDeregisterInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstanceEventNotificationAttributesInput{} + } + + output = &DeregisterInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Deregisters tag keys to prevent tags that have the specified tag keys from +// being included in scheduled event notifications for resources in the Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeregisterInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterInstanceEventNotificationAttributes +func (c *EC2) DeregisterInstanceEventNotificationAttributes(input *DeregisterInstanceEventNotificationAttributesInput) (*DeregisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.DeregisterInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// DeregisterInstanceEventNotificationAttributesWithContext is the same as DeregisterInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeregisterInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *DeregisterInstanceEventNotificationAttributesInput, opts ...request.Option) (*DeregisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.DeregisterInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterTransitGatewayMulticastGroupMembers = "DeregisterTransitGatewayMulticastGroupMembers" + +// DeregisterTransitGatewayMulticastGroupMembersRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterTransitGatewayMulticastGroupMembers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterTransitGatewayMulticastGroupMembers for more information on using the DeregisterTransitGatewayMulticastGroupMembers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterTransitGatewayMulticastGroupMembersRequest method. +// req, resp := client.DeregisterTransitGatewayMulticastGroupMembersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterTransitGatewayMulticastGroupMembers +func (c *EC2) DeregisterTransitGatewayMulticastGroupMembersRequest(input *DeregisterTransitGatewayMulticastGroupMembersInput) (req *request.Request, output *DeregisterTransitGatewayMulticastGroupMembersOutput) { + op := &request.Operation{ + Name: opDeregisterTransitGatewayMulticastGroupMembers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTransitGatewayMulticastGroupMembersInput{} + } + + output = &DeregisterTransitGatewayMulticastGroupMembersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterTransitGatewayMulticastGroupMembers API operation for Amazon Elastic Compute Cloud. +// +// Deregisters the specified members (network interfaces) from the transit gateway +// multicast group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeregisterTransitGatewayMulticastGroupMembers for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterTransitGatewayMulticastGroupMembers +func (c *EC2) DeregisterTransitGatewayMulticastGroupMembers(input *DeregisterTransitGatewayMulticastGroupMembersInput) (*DeregisterTransitGatewayMulticastGroupMembersOutput, error) { + req, out := c.DeregisterTransitGatewayMulticastGroupMembersRequest(input) + return out, req.Send() +} + +// DeregisterTransitGatewayMulticastGroupMembersWithContext is the same as DeregisterTransitGatewayMulticastGroupMembers with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterTransitGatewayMulticastGroupMembers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeregisterTransitGatewayMulticastGroupMembersWithContext(ctx aws.Context, input *DeregisterTransitGatewayMulticastGroupMembersInput, opts ...request.Option) (*DeregisterTransitGatewayMulticastGroupMembersOutput, error) { + req, out := c.DeregisterTransitGatewayMulticastGroupMembersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterTransitGatewayMulticastGroupSources = "DeregisterTransitGatewayMulticastGroupSources" + +// DeregisterTransitGatewayMulticastGroupSourcesRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterTransitGatewayMulticastGroupSources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterTransitGatewayMulticastGroupSources for more information on using the DeregisterTransitGatewayMulticastGroupSources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterTransitGatewayMulticastGroupSourcesRequest method. +// req, resp := client.DeregisterTransitGatewayMulticastGroupSourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterTransitGatewayMulticastGroupSources +func (c *EC2) DeregisterTransitGatewayMulticastGroupSourcesRequest(input *DeregisterTransitGatewayMulticastGroupSourcesInput) (req *request.Request, output *DeregisterTransitGatewayMulticastGroupSourcesOutput) { + op := &request.Operation{ + Name: opDeregisterTransitGatewayMulticastGroupSources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTransitGatewayMulticastGroupSourcesInput{} + } + + output = &DeregisterTransitGatewayMulticastGroupSourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterTransitGatewayMulticastGroupSources API operation for Amazon Elastic Compute Cloud. +// +// Deregisters the specified sources (network interfaces) from the transit gateway +// multicast group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeregisterTransitGatewayMulticastGroupSources for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterTransitGatewayMulticastGroupSources +func (c *EC2) DeregisterTransitGatewayMulticastGroupSources(input *DeregisterTransitGatewayMulticastGroupSourcesInput) (*DeregisterTransitGatewayMulticastGroupSourcesOutput, error) { + req, out := c.DeregisterTransitGatewayMulticastGroupSourcesRequest(input) + return out, req.Send() +} + +// DeregisterTransitGatewayMulticastGroupSourcesWithContext is the same as DeregisterTransitGatewayMulticastGroupSources with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterTransitGatewayMulticastGroupSources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeregisterTransitGatewayMulticastGroupSourcesWithContext(ctx aws.Context, input *DeregisterTransitGatewayMulticastGroupSourcesInput, opts ...request.Option) (*DeregisterTransitGatewayMulticastGroupSourcesOutput, error) { + req, out := c.DeregisterTransitGatewayMulticastGroupSourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAccountAttributes = "DescribeAccountAttributes" + +// DescribeAccountAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAccountAttributes for more information on using the DescribeAccountAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAccountAttributesRequest method. +// req, resp := client.DescribeAccountAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAccountAttributes +func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { + op := &request.Operation{ + Name: opDescribeAccountAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountAttributesInput{} + } + + output = &DescribeAccountAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAccountAttributes API operation for Amazon Elastic Compute Cloud. +// +// Describes attributes of your AWS account. The following are the supported +// account attributes: +// +// * supported-platforms: Indicates whether your account can launch instances +// into EC2-Classic and EC2-VPC, or only into EC2-VPC. +// +// * default-vpc: The ID of the default VPC for your account, or none. +// +// * max-instances: This attribute is no longer supported. The returned value +// does not reflect your actual vCPU limit for running On-Demand Instances. +// For more information, see On-Demand Instance Limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html#ec2-on-demand-instances-limits) +// in the Amazon Elastic Compute Cloud User Guide. +// +// * vpc-max-security-groups-per-interface: The maximum number of security +// groups that you can assign to a network interface. +// +// * max-elastic-ips: The maximum number of Elastic IP addresses that you +// can allocate for use with EC2-Classic. +// +// * vpc-max-elastic-ips: The maximum number of Elastic IP addresses that +// you can allocate for use with EC2-VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeAccountAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAccountAttributes +func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + return out, req.Send() +} + +// DescribeAccountAttributesWithContext is the same as DescribeAccountAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAccountAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAccountAttributesWithContext(ctx aws.Context, input *DescribeAccountAttributesInput, opts ...request.Option) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAddresses = "DescribeAddresses" + +// DescribeAddressesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAddresses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAddresses for more information on using the DescribeAddresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAddressesRequest method. +// req, resp := client.DescribeAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddresses +func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *request.Request, output *DescribeAddressesOutput) { + op := &request.Operation{ + Name: opDescribeAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAddressesInput{} + } + + output = &DescribeAddressesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAddresses API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified Elastic IP addresses or all of your Elastic IP addresses. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeAddresses for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddresses +func (c *EC2) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddressesOutput, error) { + req, out := c.DescribeAddressesRequest(input) + return out, req.Send() +} + +// DescribeAddressesWithContext is the same as DescribeAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAddressesWithContext(ctx aws.Context, input *DescribeAddressesInput, opts ...request.Option) (*DescribeAddressesOutput, error) { + req, out := c.DescribeAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAggregateIdFormat = "DescribeAggregateIdFormat" + +// DescribeAggregateIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAggregateIdFormat operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAggregateIdFormat for more information on using the DescribeAggregateIdFormat +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAggregateIdFormatRequest method. +// req, resp := client.DescribeAggregateIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAggregateIdFormat +func (c *EC2) DescribeAggregateIdFormatRequest(input *DescribeAggregateIdFormatInput) (req *request.Request, output *DescribeAggregateIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeAggregateIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAggregateIdFormatInput{} + } + + output = &DescribeAggregateIdFormatOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAggregateIdFormat API operation for Amazon Elastic Compute Cloud. +// +// Describes the longer ID format settings for all resource types in a specific +// Region. This request is useful for performing a quick audit to determine +// whether a specific Region is fully opted in for longer IDs (17-character +// IDs). +// +// This request only returns information about resource types that support longer +// IDs. +// +// The following resource types support longer IDs: bundle | conversion-task +// | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association +// | export-task | flow-log | image | import-task | instance | internet-gateway +// | network-acl | network-acl-association | network-interface | network-interface-attachment +// | prefix-list | reservation | route-table | route-table-association | security-group +// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeAggregateIdFormat for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAggregateIdFormat +func (c *EC2) DescribeAggregateIdFormat(input *DescribeAggregateIdFormatInput) (*DescribeAggregateIdFormatOutput, error) { + req, out := c.DescribeAggregateIdFormatRequest(input) + return out, req.Send() +} + +// DescribeAggregateIdFormatWithContext is the same as DescribeAggregateIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAggregateIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAggregateIdFormatWithContext(ctx aws.Context, input *DescribeAggregateIdFormatInput, opts ...request.Option) (*DescribeAggregateIdFormatOutput, error) { + req, out := c.DescribeAggregateIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAvailabilityZones = "DescribeAvailabilityZones" + +// DescribeAvailabilityZonesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAvailabilityZones operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAvailabilityZones for more information on using the DescribeAvailabilityZones +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAvailabilityZonesRequest method. +// req, resp := client.DescribeAvailabilityZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAvailabilityZones +func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesInput) (req *request.Request, output *DescribeAvailabilityZonesOutput) { + op := &request.Operation{ + Name: opDescribeAvailabilityZones, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAvailabilityZonesInput{} + } + + output = &DescribeAvailabilityZonesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAvailabilityZones API operation for Amazon Elastic Compute Cloud. +// +// Describes the Availability Zones, Local Zones, and Wavelength Zones that +// are available to you. If there is an event impacting a zone, you can use +// this request to view the state and any provided messages for that zone. +// +// For more information about Availability Zones, Local Zones, and Wavelength +// Zones, see Regions, Zones and Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeAvailabilityZones for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAvailabilityZones +func (c *EC2) DescribeAvailabilityZones(input *DescribeAvailabilityZonesInput) (*DescribeAvailabilityZonesOutput, error) { + req, out := c.DescribeAvailabilityZonesRequest(input) + return out, req.Send() +} + +// DescribeAvailabilityZonesWithContext is the same as DescribeAvailabilityZones with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAvailabilityZones for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *DescribeAvailabilityZonesInput, opts ...request.Option) (*DescribeAvailabilityZonesOutput, error) { + req, out := c.DescribeAvailabilityZonesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeBundleTasks = "DescribeBundleTasks" + +// DescribeBundleTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBundleTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBundleTasks for more information on using the DescribeBundleTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeBundleTasksRequest method. +// req, resp := client.DescribeBundleTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeBundleTasks +func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req *request.Request, output *DescribeBundleTasksOutput) { + op := &request.Operation{ + Name: opDescribeBundleTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBundleTasksInput{} + } + + output = &DescribeBundleTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeBundleTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified bundle tasks or all of your bundle tasks. +// +// Completed bundle tasks are listed for only a limited time. If your bundle +// task is no longer in the list, you can still register an AMI from it. Just +// use RegisterImage with the Amazon S3 bucket name and image manifest name +// you provided to the bundle task. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeBundleTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeBundleTasks +func (c *EC2) DescribeBundleTasks(input *DescribeBundleTasksInput) (*DescribeBundleTasksOutput, error) { + req, out := c.DescribeBundleTasksRequest(input) + return out, req.Send() +} + +// DescribeBundleTasksWithContext is the same as DescribeBundleTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBundleTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeBundleTasksWithContext(ctx aws.Context, input *DescribeBundleTasksInput, opts ...request.Option) (*DescribeBundleTasksOutput, error) { + req, out := c.DescribeBundleTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeByoipCidrs = "DescribeByoipCidrs" + +// DescribeByoipCidrsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeByoipCidrs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeByoipCidrs for more information on using the DescribeByoipCidrs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeByoipCidrsRequest method. +// req, resp := client.DescribeByoipCidrsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeByoipCidrs +func (c *EC2) DescribeByoipCidrsRequest(input *DescribeByoipCidrsInput) (req *request.Request, output *DescribeByoipCidrsOutput) { + op := &request.Operation{ + Name: opDescribeByoipCidrs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeByoipCidrsInput{} + } + + output = &DescribeByoipCidrsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeByoipCidrs API operation for Amazon Elastic Compute Cloud. +// +// Describes the IP address ranges that were specified in calls to ProvisionByoipCidr. +// +// To describe the address pools that were created when you provisioned the +// address ranges, use DescribePublicIpv4Pools or DescribeIpv6Pools. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeByoipCidrs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeByoipCidrs +func (c *EC2) DescribeByoipCidrs(input *DescribeByoipCidrsInput) (*DescribeByoipCidrsOutput, error) { + req, out := c.DescribeByoipCidrsRequest(input) + return out, req.Send() +} + +// DescribeByoipCidrsWithContext is the same as DescribeByoipCidrs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeByoipCidrs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeByoipCidrsWithContext(ctx aws.Context, input *DescribeByoipCidrsInput, opts ...request.Option) (*DescribeByoipCidrsOutput, error) { + req, out := c.DescribeByoipCidrsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeByoipCidrsPages iterates over the pages of a DescribeByoipCidrs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeByoipCidrs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeByoipCidrs operation. +// pageNum := 0 +// err := client.DescribeByoipCidrsPages(params, +// func(page *ec2.DescribeByoipCidrsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeByoipCidrsPages(input *DescribeByoipCidrsInput, fn func(*DescribeByoipCidrsOutput, bool) bool) error { + return c.DescribeByoipCidrsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeByoipCidrsPagesWithContext same as DescribeByoipCidrsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeByoipCidrsPagesWithContext(ctx aws.Context, input *DescribeByoipCidrsInput, fn func(*DescribeByoipCidrsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeByoipCidrsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeByoipCidrsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeByoipCidrsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeCapacityReservations = "DescribeCapacityReservations" + +// DescribeCapacityReservationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCapacityReservations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCapacityReservations for more information on using the DescribeCapacityReservations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCapacityReservationsRequest method. +// req, resp := client.DescribeCapacityReservationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCapacityReservations +func (c *EC2) DescribeCapacityReservationsRequest(input *DescribeCapacityReservationsInput) (req *request.Request, output *DescribeCapacityReservationsOutput) { + op := &request.Operation{ + Name: opDescribeCapacityReservations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCapacityReservationsInput{} + } + + output = &DescribeCapacityReservationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCapacityReservations API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your Capacity Reservations. The results describe +// only the Capacity Reservations in the AWS Region that you're currently using. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeCapacityReservations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCapacityReservations +func (c *EC2) DescribeCapacityReservations(input *DescribeCapacityReservationsInput) (*DescribeCapacityReservationsOutput, error) { + req, out := c.DescribeCapacityReservationsRequest(input) + return out, req.Send() +} + +// DescribeCapacityReservationsWithContext is the same as DescribeCapacityReservations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCapacityReservations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCapacityReservationsWithContext(ctx aws.Context, input *DescribeCapacityReservationsInput, opts ...request.Option) (*DescribeCapacityReservationsOutput, error) { + req, out := c.DescribeCapacityReservationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCapacityReservationsPages iterates over the pages of a DescribeCapacityReservations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCapacityReservations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCapacityReservations operation. +// pageNum := 0 +// err := client.DescribeCapacityReservationsPages(params, +// func(page *ec2.DescribeCapacityReservationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeCapacityReservationsPages(input *DescribeCapacityReservationsInput, fn func(*DescribeCapacityReservationsOutput, bool) bool) error { + return c.DescribeCapacityReservationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCapacityReservationsPagesWithContext same as DescribeCapacityReservationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCapacityReservationsPagesWithContext(ctx aws.Context, input *DescribeCapacityReservationsInput, fn func(*DescribeCapacityReservationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCapacityReservationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCapacityReservationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCapacityReservationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeCarrierGateways = "DescribeCarrierGateways" + +// DescribeCarrierGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCarrierGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCarrierGateways for more information on using the DescribeCarrierGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCarrierGatewaysRequest method. +// req, resp := client.DescribeCarrierGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCarrierGateways +func (c *EC2) DescribeCarrierGatewaysRequest(input *DescribeCarrierGatewaysInput) (req *request.Request, output *DescribeCarrierGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeCarrierGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCarrierGatewaysInput{} + } + + output = &DescribeCarrierGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCarrierGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your carrier gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeCarrierGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCarrierGateways +func (c *EC2) DescribeCarrierGateways(input *DescribeCarrierGatewaysInput) (*DescribeCarrierGatewaysOutput, error) { + req, out := c.DescribeCarrierGatewaysRequest(input) + return out, req.Send() +} + +// DescribeCarrierGatewaysWithContext is the same as DescribeCarrierGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCarrierGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCarrierGatewaysWithContext(ctx aws.Context, input *DescribeCarrierGatewaysInput, opts ...request.Option) (*DescribeCarrierGatewaysOutput, error) { + req, out := c.DescribeCarrierGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCarrierGatewaysPages iterates over the pages of a DescribeCarrierGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCarrierGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCarrierGateways operation. +// pageNum := 0 +// err := client.DescribeCarrierGatewaysPages(params, +// func(page *ec2.DescribeCarrierGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeCarrierGatewaysPages(input *DescribeCarrierGatewaysInput, fn func(*DescribeCarrierGatewaysOutput, bool) bool) error { + return c.DescribeCarrierGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCarrierGatewaysPagesWithContext same as DescribeCarrierGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCarrierGatewaysPagesWithContext(ctx aws.Context, input *DescribeCarrierGatewaysInput, fn func(*DescribeCarrierGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCarrierGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCarrierGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCarrierGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" + +// DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClassicLinkInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClassicLinkInstances for more information on using the DescribeClassicLinkInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClassicLinkInstancesRequest method. +// req, resp := client.DescribeClassicLinkInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClassicLinkInstances +func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInstancesInput) (req *request.Request, output *DescribeClassicLinkInstancesOutput) { + op := &request.Operation{ + Name: opDescribeClassicLinkInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClassicLinkInstancesInput{} + } + + output = &DescribeClassicLinkInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClassicLinkInstances API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your linked EC2-Classic instances. This request +// only returns information about EC2-Classic instances linked to a VPC through +// ClassicLink. You cannot use this request to return information about other +// instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeClassicLinkInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClassicLinkInstances +func (c *EC2) DescribeClassicLinkInstances(input *DescribeClassicLinkInstancesInput) (*DescribeClassicLinkInstancesOutput, error) { + req, out := c.DescribeClassicLinkInstancesRequest(input) + return out, req.Send() +} + +// DescribeClassicLinkInstancesWithContext is the same as DescribeClassicLinkInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClassicLinkInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClassicLinkInstancesWithContext(ctx aws.Context, input *DescribeClassicLinkInstancesInput, opts ...request.Option) (*DescribeClassicLinkInstancesOutput, error) { + req, out := c.DescribeClassicLinkInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeClassicLinkInstancesPages iterates over the pages of a DescribeClassicLinkInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClassicLinkInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClassicLinkInstances operation. +// pageNum := 0 +// err := client.DescribeClassicLinkInstancesPages(params, +// func(page *ec2.DescribeClassicLinkInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeClassicLinkInstancesPages(input *DescribeClassicLinkInstancesInput, fn func(*DescribeClassicLinkInstancesOutput, bool) bool) error { + return c.DescribeClassicLinkInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeClassicLinkInstancesPagesWithContext same as DescribeClassicLinkInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClassicLinkInstancesPagesWithContext(ctx aws.Context, input *DescribeClassicLinkInstancesInput, fn func(*DescribeClassicLinkInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeClassicLinkInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeClassicLinkInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeClassicLinkInstancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeClientVpnAuthorizationRules = "DescribeClientVpnAuthorizationRules" + +// DescribeClientVpnAuthorizationRulesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClientVpnAuthorizationRules operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClientVpnAuthorizationRules for more information on using the DescribeClientVpnAuthorizationRules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClientVpnAuthorizationRulesRequest method. +// req, resp := client.DescribeClientVpnAuthorizationRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnAuthorizationRules +func (c *EC2) DescribeClientVpnAuthorizationRulesRequest(input *DescribeClientVpnAuthorizationRulesInput) (req *request.Request, output *DescribeClientVpnAuthorizationRulesOutput) { + op := &request.Operation{ + Name: opDescribeClientVpnAuthorizationRules, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClientVpnAuthorizationRulesInput{} + } + + output = &DescribeClientVpnAuthorizationRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClientVpnAuthorizationRules API operation for Amazon Elastic Compute Cloud. +// +// Describes the authorization rules for a specified Client VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeClientVpnAuthorizationRules for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnAuthorizationRules +func (c *EC2) DescribeClientVpnAuthorizationRules(input *DescribeClientVpnAuthorizationRulesInput) (*DescribeClientVpnAuthorizationRulesOutput, error) { + req, out := c.DescribeClientVpnAuthorizationRulesRequest(input) + return out, req.Send() +} + +// DescribeClientVpnAuthorizationRulesWithContext is the same as DescribeClientVpnAuthorizationRules with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClientVpnAuthorizationRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnAuthorizationRulesWithContext(ctx aws.Context, input *DescribeClientVpnAuthorizationRulesInput, opts ...request.Option) (*DescribeClientVpnAuthorizationRulesOutput, error) { + req, out := c.DescribeClientVpnAuthorizationRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeClientVpnAuthorizationRulesPages iterates over the pages of a DescribeClientVpnAuthorizationRules operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClientVpnAuthorizationRules method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClientVpnAuthorizationRules operation. +// pageNum := 0 +// err := client.DescribeClientVpnAuthorizationRulesPages(params, +// func(page *ec2.DescribeClientVpnAuthorizationRulesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeClientVpnAuthorizationRulesPages(input *DescribeClientVpnAuthorizationRulesInput, fn func(*DescribeClientVpnAuthorizationRulesOutput, bool) bool) error { + return c.DescribeClientVpnAuthorizationRulesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeClientVpnAuthorizationRulesPagesWithContext same as DescribeClientVpnAuthorizationRulesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnAuthorizationRulesPagesWithContext(ctx aws.Context, input *DescribeClientVpnAuthorizationRulesInput, fn func(*DescribeClientVpnAuthorizationRulesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeClientVpnAuthorizationRulesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeClientVpnAuthorizationRulesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnAuthorizationRulesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeClientVpnConnections = "DescribeClientVpnConnections" + +// DescribeClientVpnConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClientVpnConnections operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClientVpnConnections for more information on using the DescribeClientVpnConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClientVpnConnectionsRequest method. +// req, resp := client.DescribeClientVpnConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnConnections +func (c *EC2) DescribeClientVpnConnectionsRequest(input *DescribeClientVpnConnectionsInput) (req *request.Request, output *DescribeClientVpnConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeClientVpnConnections, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClientVpnConnectionsInput{} + } + + output = &DescribeClientVpnConnectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClientVpnConnections API operation for Amazon Elastic Compute Cloud. +// +// Describes active client connections and connections that have been terminated +// within the last 60 minutes for the specified Client VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeClientVpnConnections for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnConnections +func (c *EC2) DescribeClientVpnConnections(input *DescribeClientVpnConnectionsInput) (*DescribeClientVpnConnectionsOutput, error) { + req, out := c.DescribeClientVpnConnectionsRequest(input) + return out, req.Send() +} + +// DescribeClientVpnConnectionsWithContext is the same as DescribeClientVpnConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClientVpnConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnConnectionsWithContext(ctx aws.Context, input *DescribeClientVpnConnectionsInput, opts ...request.Option) (*DescribeClientVpnConnectionsOutput, error) { + req, out := c.DescribeClientVpnConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeClientVpnConnectionsPages iterates over the pages of a DescribeClientVpnConnections operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClientVpnConnections method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClientVpnConnections operation. +// pageNum := 0 +// err := client.DescribeClientVpnConnectionsPages(params, +// func(page *ec2.DescribeClientVpnConnectionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeClientVpnConnectionsPages(input *DescribeClientVpnConnectionsInput, fn func(*DescribeClientVpnConnectionsOutput, bool) bool) error { + return c.DescribeClientVpnConnectionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeClientVpnConnectionsPagesWithContext same as DescribeClientVpnConnectionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnConnectionsPagesWithContext(ctx aws.Context, input *DescribeClientVpnConnectionsInput, fn func(*DescribeClientVpnConnectionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeClientVpnConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeClientVpnConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnConnectionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeClientVpnEndpoints = "DescribeClientVpnEndpoints" + +// DescribeClientVpnEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClientVpnEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClientVpnEndpoints for more information on using the DescribeClientVpnEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClientVpnEndpointsRequest method. +// req, resp := client.DescribeClientVpnEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnEndpoints +func (c *EC2) DescribeClientVpnEndpointsRequest(input *DescribeClientVpnEndpointsInput) (req *request.Request, output *DescribeClientVpnEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeClientVpnEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClientVpnEndpointsInput{} + } + + output = &DescribeClientVpnEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClientVpnEndpoints API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more Client VPN endpoints in the account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeClientVpnEndpoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnEndpoints +func (c *EC2) DescribeClientVpnEndpoints(input *DescribeClientVpnEndpointsInput) (*DescribeClientVpnEndpointsOutput, error) { + req, out := c.DescribeClientVpnEndpointsRequest(input) + return out, req.Send() +} + +// DescribeClientVpnEndpointsWithContext is the same as DescribeClientVpnEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClientVpnEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnEndpointsWithContext(ctx aws.Context, input *DescribeClientVpnEndpointsInput, opts ...request.Option) (*DescribeClientVpnEndpointsOutput, error) { + req, out := c.DescribeClientVpnEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeClientVpnEndpointsPages iterates over the pages of a DescribeClientVpnEndpoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClientVpnEndpoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClientVpnEndpoints operation. +// pageNum := 0 +// err := client.DescribeClientVpnEndpointsPages(params, +// func(page *ec2.DescribeClientVpnEndpointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeClientVpnEndpointsPages(input *DescribeClientVpnEndpointsInput, fn func(*DescribeClientVpnEndpointsOutput, bool) bool) error { + return c.DescribeClientVpnEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeClientVpnEndpointsPagesWithContext same as DescribeClientVpnEndpointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnEndpointsPagesWithContext(ctx aws.Context, input *DescribeClientVpnEndpointsInput, fn func(*DescribeClientVpnEndpointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeClientVpnEndpointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeClientVpnEndpointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnEndpointsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeClientVpnRoutes = "DescribeClientVpnRoutes" + +// DescribeClientVpnRoutesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClientVpnRoutes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClientVpnRoutes for more information on using the DescribeClientVpnRoutes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClientVpnRoutesRequest method. +// req, resp := client.DescribeClientVpnRoutesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnRoutes +func (c *EC2) DescribeClientVpnRoutesRequest(input *DescribeClientVpnRoutesInput) (req *request.Request, output *DescribeClientVpnRoutesOutput) { + op := &request.Operation{ + Name: opDescribeClientVpnRoutes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClientVpnRoutesInput{} + } + + output = &DescribeClientVpnRoutesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClientVpnRoutes API operation for Amazon Elastic Compute Cloud. +// +// Describes the routes for the specified Client VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeClientVpnRoutes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnRoutes +func (c *EC2) DescribeClientVpnRoutes(input *DescribeClientVpnRoutesInput) (*DescribeClientVpnRoutesOutput, error) { + req, out := c.DescribeClientVpnRoutesRequest(input) + return out, req.Send() +} + +// DescribeClientVpnRoutesWithContext is the same as DescribeClientVpnRoutes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClientVpnRoutes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnRoutesWithContext(ctx aws.Context, input *DescribeClientVpnRoutesInput, opts ...request.Option) (*DescribeClientVpnRoutesOutput, error) { + req, out := c.DescribeClientVpnRoutesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeClientVpnRoutesPages iterates over the pages of a DescribeClientVpnRoutes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClientVpnRoutes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClientVpnRoutes operation. +// pageNum := 0 +// err := client.DescribeClientVpnRoutesPages(params, +// func(page *ec2.DescribeClientVpnRoutesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeClientVpnRoutesPages(input *DescribeClientVpnRoutesInput, fn func(*DescribeClientVpnRoutesOutput, bool) bool) error { + return c.DescribeClientVpnRoutesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeClientVpnRoutesPagesWithContext same as DescribeClientVpnRoutesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnRoutesPagesWithContext(ctx aws.Context, input *DescribeClientVpnRoutesInput, fn func(*DescribeClientVpnRoutesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeClientVpnRoutesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeClientVpnRoutesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnRoutesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeClientVpnTargetNetworks = "DescribeClientVpnTargetNetworks" + +// DescribeClientVpnTargetNetworksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClientVpnTargetNetworks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClientVpnTargetNetworks for more information on using the DescribeClientVpnTargetNetworks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClientVpnTargetNetworksRequest method. +// req, resp := client.DescribeClientVpnTargetNetworksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnTargetNetworks +func (c *EC2) DescribeClientVpnTargetNetworksRequest(input *DescribeClientVpnTargetNetworksInput) (req *request.Request, output *DescribeClientVpnTargetNetworksOutput) { + op := &request.Operation{ + Name: opDescribeClientVpnTargetNetworks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeClientVpnTargetNetworksInput{} + } + + output = &DescribeClientVpnTargetNetworksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClientVpnTargetNetworks API operation for Amazon Elastic Compute Cloud. +// +// Describes the target networks associated with the specified Client VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeClientVpnTargetNetworks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClientVpnTargetNetworks +func (c *EC2) DescribeClientVpnTargetNetworks(input *DescribeClientVpnTargetNetworksInput) (*DescribeClientVpnTargetNetworksOutput, error) { + req, out := c.DescribeClientVpnTargetNetworksRequest(input) + return out, req.Send() +} + +// DescribeClientVpnTargetNetworksWithContext is the same as DescribeClientVpnTargetNetworks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClientVpnTargetNetworks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnTargetNetworksWithContext(ctx aws.Context, input *DescribeClientVpnTargetNetworksInput, opts ...request.Option) (*DescribeClientVpnTargetNetworksOutput, error) { + req, out := c.DescribeClientVpnTargetNetworksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeClientVpnTargetNetworksPages iterates over the pages of a DescribeClientVpnTargetNetworks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClientVpnTargetNetworks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClientVpnTargetNetworks operation. +// pageNum := 0 +// err := client.DescribeClientVpnTargetNetworksPages(params, +// func(page *ec2.DescribeClientVpnTargetNetworksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeClientVpnTargetNetworksPages(input *DescribeClientVpnTargetNetworksInput, fn func(*DescribeClientVpnTargetNetworksOutput, bool) bool) error { + return c.DescribeClientVpnTargetNetworksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeClientVpnTargetNetworksPagesWithContext same as DescribeClientVpnTargetNetworksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClientVpnTargetNetworksPagesWithContext(ctx aws.Context, input *DescribeClientVpnTargetNetworksInput, fn func(*DescribeClientVpnTargetNetworksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeClientVpnTargetNetworksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeClientVpnTargetNetworksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeClientVpnTargetNetworksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeCoipPools = "DescribeCoipPools" + +// DescribeCoipPoolsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCoipPools operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCoipPools for more information on using the DescribeCoipPools +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCoipPoolsRequest method. +// req, resp := client.DescribeCoipPoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCoipPools +func (c *EC2) DescribeCoipPoolsRequest(input *DescribeCoipPoolsInput) (req *request.Request, output *DescribeCoipPoolsOutput) { + op := &request.Operation{ + Name: opDescribeCoipPools, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCoipPoolsInput{} + } + + output = &DescribeCoipPoolsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCoipPools API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified customer-owned address pools or all of your customer-owned +// address pools. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeCoipPools for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCoipPools +func (c *EC2) DescribeCoipPools(input *DescribeCoipPoolsInput) (*DescribeCoipPoolsOutput, error) { + req, out := c.DescribeCoipPoolsRequest(input) + return out, req.Send() +} + +// DescribeCoipPoolsWithContext is the same as DescribeCoipPools with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCoipPools for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCoipPoolsWithContext(ctx aws.Context, input *DescribeCoipPoolsInput, opts ...request.Option) (*DescribeCoipPoolsOutput, error) { + req, out := c.DescribeCoipPoolsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCoipPoolsPages iterates over the pages of a DescribeCoipPools operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCoipPools method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCoipPools operation. +// pageNum := 0 +// err := client.DescribeCoipPoolsPages(params, +// func(page *ec2.DescribeCoipPoolsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeCoipPoolsPages(input *DescribeCoipPoolsInput, fn func(*DescribeCoipPoolsOutput, bool) bool) error { + return c.DescribeCoipPoolsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCoipPoolsPagesWithContext same as DescribeCoipPoolsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCoipPoolsPagesWithContext(ctx aws.Context, input *DescribeCoipPoolsInput, fn func(*DescribeCoipPoolsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCoipPoolsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCoipPoolsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCoipPoolsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeConversionTasks = "DescribeConversionTasks" + +// DescribeConversionTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConversionTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeConversionTasks for more information on using the DescribeConversionTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeConversionTasksRequest method. +// req, resp := client.DescribeConversionTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks +func (c *EC2) DescribeConversionTasksRequest(input *DescribeConversionTasksInput) (req *request.Request, output *DescribeConversionTasksOutput) { + op := &request.Operation{ + Name: opDescribeConversionTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConversionTasksInput{} + } + + output = &DescribeConversionTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeConversionTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified conversion tasks or all your conversion tasks. For +// more information, see the VM Import/Export User Guide (https://docs.aws.amazon.com/vm-import/latest/userguide/). +// +// For information about the import manifest referenced by this API action, +// see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeConversionTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks +func (c *EC2) DescribeConversionTasks(input *DescribeConversionTasksInput) (*DescribeConversionTasksOutput, error) { + req, out := c.DescribeConversionTasksRequest(input) + return out, req.Send() +} + +// DescribeConversionTasksWithContext is the same as DescribeConversionTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeConversionTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeConversionTasksWithContext(ctx aws.Context, input *DescribeConversionTasksInput, opts ...request.Option) (*DescribeConversionTasksOutput, error) { + req, out := c.DescribeConversionTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeCustomerGateways = "DescribeCustomerGateways" + +// DescribeCustomerGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCustomerGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCustomerGateways for more information on using the DescribeCustomerGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCustomerGatewaysRequest method. +// req, resp := client.DescribeCustomerGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCustomerGateways +func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInput) (req *request.Request, output *DescribeCustomerGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeCustomerGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeCustomerGatewaysInput{} + } + + output = &DescribeCustomerGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCustomerGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your VPN customer gateways. +// +// For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeCustomerGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCustomerGateways +func (c *EC2) DescribeCustomerGateways(input *DescribeCustomerGatewaysInput) (*DescribeCustomerGatewaysOutput, error) { + req, out := c.DescribeCustomerGatewaysRequest(input) + return out, req.Send() +} + +// DescribeCustomerGatewaysWithContext is the same as DescribeCustomerGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCustomerGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCustomerGatewaysWithContext(ctx aws.Context, input *DescribeCustomerGatewaysInput, opts ...request.Option) (*DescribeCustomerGatewaysOutput, error) { + req, out := c.DescribeCustomerGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDhcpOptions = "DescribeDhcpOptions" + +// DescribeDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDhcpOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDhcpOptions for more information on using the DescribeDhcpOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDhcpOptionsRequest method. +// req, resp := client.DescribeDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeDhcpOptions +func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req *request.Request, output *DescribeDhcpOptionsOutput) { + op := &request.Operation{ + Name: opDescribeDhcpOptions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDhcpOptionsInput{} + } + + output = &DescribeDhcpOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDhcpOptions API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your DHCP options sets. +// +// For more information, see DHCP Options Sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeDhcpOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeDhcpOptions +func (c *EC2) DescribeDhcpOptions(input *DescribeDhcpOptionsInput) (*DescribeDhcpOptionsOutput, error) { + req, out := c.DescribeDhcpOptionsRequest(input) + return out, req.Send() +} + +// DescribeDhcpOptionsWithContext is the same as DescribeDhcpOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDhcpOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeDhcpOptionsWithContext(ctx aws.Context, input *DescribeDhcpOptionsInput, opts ...request.Option) (*DescribeDhcpOptionsOutput, error) { + req, out := c.DescribeDhcpOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDhcpOptionsPages iterates over the pages of a DescribeDhcpOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDhcpOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDhcpOptions operation. +// pageNum := 0 +// err := client.DescribeDhcpOptionsPages(params, +// func(page *ec2.DescribeDhcpOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeDhcpOptionsPages(input *DescribeDhcpOptionsInput, fn func(*DescribeDhcpOptionsOutput, bool) bool) error { + return c.DescribeDhcpOptionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDhcpOptionsPagesWithContext same as DescribeDhcpOptionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeDhcpOptionsPagesWithContext(ctx aws.Context, input *DescribeDhcpOptionsInput, fn func(*DescribeDhcpOptionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDhcpOptionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDhcpOptionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDhcpOptionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeEgressOnlyInternetGateways = "DescribeEgressOnlyInternetGateways" + +// DescribeEgressOnlyInternetGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEgressOnlyInternetGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEgressOnlyInternetGateways for more information on using the DescribeEgressOnlyInternetGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEgressOnlyInternetGatewaysRequest method. +// req, resp := client.DescribeEgressOnlyInternetGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeEgressOnlyInternetGateways +func (c *EC2) DescribeEgressOnlyInternetGatewaysRequest(input *DescribeEgressOnlyInternetGatewaysInput) (req *request.Request, output *DescribeEgressOnlyInternetGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeEgressOnlyInternetGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEgressOnlyInternetGatewaysInput{} + } + + output = &DescribeEgressOnlyInternetGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEgressOnlyInternetGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your egress-only internet gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeEgressOnlyInternetGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeEgressOnlyInternetGateways +func (c *EC2) DescribeEgressOnlyInternetGateways(input *DescribeEgressOnlyInternetGatewaysInput) (*DescribeEgressOnlyInternetGatewaysOutput, error) { + req, out := c.DescribeEgressOnlyInternetGatewaysRequest(input) + return out, req.Send() +} + +// DescribeEgressOnlyInternetGatewaysWithContext is the same as DescribeEgressOnlyInternetGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEgressOnlyInternetGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeEgressOnlyInternetGatewaysWithContext(ctx aws.Context, input *DescribeEgressOnlyInternetGatewaysInput, opts ...request.Option) (*DescribeEgressOnlyInternetGatewaysOutput, error) { + req, out := c.DescribeEgressOnlyInternetGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeEgressOnlyInternetGatewaysPages iterates over the pages of a DescribeEgressOnlyInternetGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEgressOnlyInternetGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEgressOnlyInternetGateways operation. +// pageNum := 0 +// err := client.DescribeEgressOnlyInternetGatewaysPages(params, +// func(page *ec2.DescribeEgressOnlyInternetGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeEgressOnlyInternetGatewaysPages(input *DescribeEgressOnlyInternetGatewaysInput, fn func(*DescribeEgressOnlyInternetGatewaysOutput, bool) bool) error { + return c.DescribeEgressOnlyInternetGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeEgressOnlyInternetGatewaysPagesWithContext same as DescribeEgressOnlyInternetGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeEgressOnlyInternetGatewaysPagesWithContext(ctx aws.Context, input *DescribeEgressOnlyInternetGatewaysInput, fn func(*DescribeEgressOnlyInternetGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeEgressOnlyInternetGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEgressOnlyInternetGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeEgressOnlyInternetGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeElasticGpus = "DescribeElasticGpus" + +// DescribeElasticGpusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticGpus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeElasticGpus for more information on using the DescribeElasticGpus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeElasticGpusRequest method. +// req, resp := client.DescribeElasticGpusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeElasticGpus +func (c *EC2) DescribeElasticGpusRequest(input *DescribeElasticGpusInput) (req *request.Request, output *DescribeElasticGpusOutput) { + op := &request.Operation{ + Name: opDescribeElasticGpus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeElasticGpusInput{} + } + + output = &DescribeElasticGpusOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeElasticGpus API operation for Amazon Elastic Compute Cloud. +// +// Describes the Elastic Graphics accelerator associated with your instances. +// For more information about Elastic Graphics, see Amazon Elastic Graphics +// (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeElasticGpus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeElasticGpus +func (c *EC2) DescribeElasticGpus(input *DescribeElasticGpusInput) (*DescribeElasticGpusOutput, error) { + req, out := c.DescribeElasticGpusRequest(input) + return out, req.Send() +} + +// DescribeElasticGpusWithContext is the same as DescribeElasticGpus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeElasticGpus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeElasticGpusWithContext(ctx aws.Context, input *DescribeElasticGpusInput, opts ...request.Option) (*DescribeElasticGpusOutput, error) { + req, out := c.DescribeElasticGpusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeExportImageTasks = "DescribeExportImageTasks" + +// DescribeExportImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportImageTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExportImageTasks for more information on using the DescribeExportImageTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportImageTasksRequest method. +// req, resp := client.DescribeExportImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasksRequest(input *DescribeExportImageTasksInput) (req *request.Request, output *DescribeExportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeExportImageTasksInput{} + } + + output = &DescribeExportImageTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExportImageTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified export image tasks or all of your export image tasks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeExportImageTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasks(input *DescribeExportImageTasksInput) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + return out, req.Send() +} + +// DescribeExportImageTasksWithContext is the same as DescribeExportImageTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportImageTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeExportImageTasksWithContext(ctx aws.Context, input *DescribeExportImageTasksInput, opts ...request.Option) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeExportImageTasksPages iterates over the pages of a DescribeExportImageTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeExportImageTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeExportImageTasks operation. +// pageNum := 0 +// err := client.DescribeExportImageTasksPages(params, +// func(page *ec2.DescribeExportImageTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeExportImageTasksPages(input *DescribeExportImageTasksInput, fn func(*DescribeExportImageTasksOutput, bool) bool) error { + return c.DescribeExportImageTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeExportImageTasksPagesWithContext same as DescribeExportImageTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeExportImageTasksPagesWithContext(ctx aws.Context, input *DescribeExportImageTasksInput, fn func(*DescribeExportImageTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeExportImageTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeExportImageTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeExportImageTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeExportTasks = "DescribeExportTasks" + +// DescribeExportTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExportTasks for more information on using the DescribeExportTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportTasksRequest method. +// req, resp := client.DescribeExportTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasks +func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportTasksInput{} + } + + output = &DescribeExportTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExportTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified export instance tasks or all of your export instance +// tasks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeExportTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasks +func (c *EC2) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + return out, req.Send() +} + +// DescribeExportTasksWithContext is the same as DescribeExportTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeExportTasksWithContext(ctx aws.Context, input *DescribeExportTasksInput, opts ...request.Option) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFastSnapshotRestores = "DescribeFastSnapshotRestores" + +// DescribeFastSnapshotRestoresRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFastSnapshotRestores operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFastSnapshotRestores for more information on using the DescribeFastSnapshotRestores +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFastSnapshotRestoresRequest method. +// req, resp := client.DescribeFastSnapshotRestoresRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFastSnapshotRestores +func (c *EC2) DescribeFastSnapshotRestoresRequest(input *DescribeFastSnapshotRestoresInput) (req *request.Request, output *DescribeFastSnapshotRestoresOutput) { + op := &request.Operation{ + Name: opDescribeFastSnapshotRestores, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeFastSnapshotRestoresInput{} + } + + output = &DescribeFastSnapshotRestoresOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFastSnapshotRestores API operation for Amazon Elastic Compute Cloud. +// +// Describes the state of fast snapshot restores for your snapshots. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFastSnapshotRestores for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFastSnapshotRestores +func (c *EC2) DescribeFastSnapshotRestores(input *DescribeFastSnapshotRestoresInput) (*DescribeFastSnapshotRestoresOutput, error) { + req, out := c.DescribeFastSnapshotRestoresRequest(input) + return out, req.Send() +} + +// DescribeFastSnapshotRestoresWithContext is the same as DescribeFastSnapshotRestores with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFastSnapshotRestores for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFastSnapshotRestoresWithContext(ctx aws.Context, input *DescribeFastSnapshotRestoresInput, opts ...request.Option) (*DescribeFastSnapshotRestoresOutput, error) { + req, out := c.DescribeFastSnapshotRestoresRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeFastSnapshotRestoresPages iterates over the pages of a DescribeFastSnapshotRestores operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFastSnapshotRestores method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFastSnapshotRestores operation. +// pageNum := 0 +// err := client.DescribeFastSnapshotRestoresPages(params, +// func(page *ec2.DescribeFastSnapshotRestoresOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeFastSnapshotRestoresPages(input *DescribeFastSnapshotRestoresInput, fn func(*DescribeFastSnapshotRestoresOutput, bool) bool) error { + return c.DescribeFastSnapshotRestoresPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFastSnapshotRestoresPagesWithContext same as DescribeFastSnapshotRestoresPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFastSnapshotRestoresPagesWithContext(ctx aws.Context, input *DescribeFastSnapshotRestoresInput, fn func(*DescribeFastSnapshotRestoresOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFastSnapshotRestoresInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFastSnapshotRestoresRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFastSnapshotRestoresOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeFleetHistory = "DescribeFleetHistory" + +// DescribeFleetHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetHistory for more information on using the DescribeFleetHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetHistoryRequest method. +// req, resp := client.DescribeFleetHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleetHistory +func (c *EC2) DescribeFleetHistoryRequest(input *DescribeFleetHistoryInput) (req *request.Request, output *DescribeFleetHistoryOutput) { + op := &request.Operation{ + Name: opDescribeFleetHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetHistoryInput{} + } + + output = &DescribeFleetHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetHistory API operation for Amazon Elastic Compute Cloud. +// +// Describes the events for the specified EC2 Fleet during the specified time. +// +// EC2 Fleet events are delayed by up to 30 seconds before they can be described. +// This ensures that you can query by the last evaluated time and not miss a +// recorded event. EC2 Fleet events are available for 48 hours. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFleetHistory for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleetHistory +func (c *EC2) DescribeFleetHistory(input *DescribeFleetHistoryInput) (*DescribeFleetHistoryOutput, error) { + req, out := c.DescribeFleetHistoryRequest(input) + return out, req.Send() +} + +// DescribeFleetHistoryWithContext is the same as DescribeFleetHistory with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFleetHistoryWithContext(ctx aws.Context, input *DescribeFleetHistoryInput, opts ...request.Option) (*DescribeFleetHistoryOutput, error) { + req, out := c.DescribeFleetHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleetInstances = "DescribeFleetInstances" + +// DescribeFleetInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetInstances for more information on using the DescribeFleetInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetInstancesRequest method. +// req, resp := client.DescribeFleetInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleetInstances +func (c *EC2) DescribeFleetInstancesRequest(input *DescribeFleetInstancesInput) (req *request.Request, output *DescribeFleetInstancesOutput) { + op := &request.Operation{ + Name: opDescribeFleetInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFleetInstancesInput{} + } + + output = &DescribeFleetInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleetInstances API operation for Amazon Elastic Compute Cloud. +// +// Describes the running instances for the specified EC2 Fleet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFleetInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleetInstances +func (c *EC2) DescribeFleetInstances(input *DescribeFleetInstancesInput) (*DescribeFleetInstancesOutput, error) { + req, out := c.DescribeFleetInstancesRequest(input) + return out, req.Send() +} + +// DescribeFleetInstancesWithContext is the same as DescribeFleetInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleetInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFleetInstancesWithContext(ctx aws.Context, input *DescribeFleetInstancesInput, opts ...request.Option) (*DescribeFleetInstancesOutput, error) { + req, out := c.DescribeFleetInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFleets = "DescribeFleets" + +// DescribeFleetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleets for more information on using the DescribeFleets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFleetsRequest method. +// req, resp := client.DescribeFleetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleets +func (c *EC2) DescribeFleetsRequest(input *DescribeFleetsInput) (req *request.Request, output *DescribeFleetsOutput) { + op := &request.Operation{ + Name: opDescribeFleets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeFleetsInput{} + } + + output = &DescribeFleetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFleets API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified EC2 Fleets or all of your EC2 Fleets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFleets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFleets +func (c *EC2) DescribeFleets(input *DescribeFleetsInput) (*DescribeFleetsOutput, error) { + req, out := c.DescribeFleetsRequest(input) + return out, req.Send() +} + +// DescribeFleetsWithContext is the same as DescribeFleets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFleets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFleetsWithContext(ctx aws.Context, input *DescribeFleetsInput, opts ...request.Option) (*DescribeFleetsOutput, error) { + req, out := c.DescribeFleetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeFleetsPages iterates over the pages of a DescribeFleets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFleets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFleets operation. +// pageNum := 0 +// err := client.DescribeFleetsPages(params, +// func(page *ec2.DescribeFleetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeFleetsPages(input *DescribeFleetsInput, fn func(*DescribeFleetsOutput, bool) bool) error { + return c.DescribeFleetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFleetsPagesWithContext same as DescribeFleetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFleetsPagesWithContext(ctx aws.Context, input *DescribeFleetsInput, fn func(*DescribeFleetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFleetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFleetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFleetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeFlowLogs = "DescribeFlowLogs" + +// DescribeFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFlowLogs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFlowLogs for more information on using the DescribeFlowLogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFlowLogsRequest method. +// req, resp := client.DescribeFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFlowLogs +func (c *EC2) DescribeFlowLogsRequest(input *DescribeFlowLogsInput) (req *request.Request, output *DescribeFlowLogsOutput) { + op := &request.Operation{ + Name: opDescribeFlowLogs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeFlowLogsInput{} + } + + output = &DescribeFlowLogsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFlowLogs API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more flow logs. To view the information in your flow logs +// (the log streams for the network interfaces), you must use the CloudWatch +// Logs console or the CloudWatch Logs API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFlowLogs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFlowLogs +func (c *EC2) DescribeFlowLogs(input *DescribeFlowLogsInput) (*DescribeFlowLogsOutput, error) { + req, out := c.DescribeFlowLogsRequest(input) + return out, req.Send() +} + +// DescribeFlowLogsWithContext is the same as DescribeFlowLogs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFlowLogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFlowLogsWithContext(ctx aws.Context, input *DescribeFlowLogsInput, opts ...request.Option) (*DescribeFlowLogsOutput, error) { + req, out := c.DescribeFlowLogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeFlowLogsPages iterates over the pages of a DescribeFlowLogs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFlowLogs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFlowLogs operation. +// pageNum := 0 +// err := client.DescribeFlowLogsPages(params, +// func(page *ec2.DescribeFlowLogsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeFlowLogsPages(input *DescribeFlowLogsInput, fn func(*DescribeFlowLogsOutput, bool) bool) error { + return c.DescribeFlowLogsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFlowLogsPagesWithContext same as DescribeFlowLogsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFlowLogsPagesWithContext(ctx aws.Context, input *DescribeFlowLogsInput, fn func(*DescribeFlowLogsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFlowLogsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFlowLogsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFlowLogsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeFpgaImageAttribute = "DescribeFpgaImageAttribute" + +// DescribeFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFpgaImageAttribute for more information on using the DescribeFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFpgaImageAttributeRequest method. +// req, resp := client.DescribeFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttribute +func (c *EC2) DescribeFpgaImageAttributeRequest(input *DescribeFpgaImageAttributeInput) (req *request.Request, output *DescribeFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFpgaImageAttributeInput{} + } + + output = &DescribeFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFpgaImageAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttribute +func (c *EC2) DescribeFpgaImageAttribute(input *DescribeFpgaImageAttributeInput) (*DescribeFpgaImageAttributeOutput, error) { + req, out := c.DescribeFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// DescribeFpgaImageAttributeWithContext is the same as DescribeFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFpgaImageAttributeWithContext(ctx aws.Context, input *DescribeFpgaImageAttributeInput, opts ...request.Option) (*DescribeFpgaImageAttributeOutput, error) { + req, out := c.DescribeFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFpgaImages = "DescribeFpgaImages" + +// DescribeFpgaImagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFpgaImages operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFpgaImages for more information on using the DescribeFpgaImages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFpgaImagesRequest method. +// req, resp := client.DescribeFpgaImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImages +func (c *EC2) DescribeFpgaImagesRequest(input *DescribeFpgaImagesInput) (req *request.Request, output *DescribeFpgaImagesOutput) { + op := &request.Operation{ + Name: opDescribeFpgaImages, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeFpgaImagesInput{} + } + + output = &DescribeFpgaImagesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFpgaImages API operation for Amazon Elastic Compute Cloud. +// +// Describes the Amazon FPGA Images (AFIs) available to you. These include public +// AFIs, private AFIs that you own, and AFIs owned by other AWS accounts for +// which you have load permissions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFpgaImages for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImages +func (c *EC2) DescribeFpgaImages(input *DescribeFpgaImagesInput) (*DescribeFpgaImagesOutput, error) { + req, out := c.DescribeFpgaImagesRequest(input) + return out, req.Send() +} + +// DescribeFpgaImagesWithContext is the same as DescribeFpgaImages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFpgaImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFpgaImagesWithContext(ctx aws.Context, input *DescribeFpgaImagesInput, opts ...request.Option) (*DescribeFpgaImagesOutput, error) { + req, out := c.DescribeFpgaImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeFpgaImagesPages iterates over the pages of a DescribeFpgaImages operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFpgaImages method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFpgaImages operation. +// pageNum := 0 +// err := client.DescribeFpgaImagesPages(params, +// func(page *ec2.DescribeFpgaImagesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeFpgaImagesPages(input *DescribeFpgaImagesInput, fn func(*DescribeFpgaImagesOutput, bool) bool) error { + return c.DescribeFpgaImagesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFpgaImagesPagesWithContext same as DescribeFpgaImagesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFpgaImagesPagesWithContext(ctx aws.Context, input *DescribeFpgaImagesInput, fn func(*DescribeFpgaImagesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFpgaImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFpgaImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFpgaImagesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeHostReservationOfferings = "DescribeHostReservationOfferings" + +// DescribeHostReservationOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHostReservationOfferings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeHostReservationOfferings for more information on using the DescribeHostReservationOfferings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeHostReservationOfferingsRequest method. +// req, resp := client.DescribeHostReservationOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationOfferings +func (c *EC2) DescribeHostReservationOfferingsRequest(input *DescribeHostReservationOfferingsInput) (req *request.Request, output *DescribeHostReservationOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeHostReservationOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHostReservationOfferingsInput{} + } + + output = &DescribeHostReservationOfferingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeHostReservationOfferings API operation for Amazon Elastic Compute Cloud. +// +// Describes the Dedicated Host reservations that are available to purchase. +// +// The results describe all of the Dedicated Host reservation offerings, including +// offerings that might not match the instance family and Region of your Dedicated +// Hosts. When purchasing an offering, ensure that the instance family and Region +// of the offering matches that of the Dedicated Hosts with which it is to be +// associated. For more information about supported instance types, see Dedicated +// Hosts Overview (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeHostReservationOfferings for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationOfferings +func (c *EC2) DescribeHostReservationOfferings(input *DescribeHostReservationOfferingsInput) (*DescribeHostReservationOfferingsOutput, error) { + req, out := c.DescribeHostReservationOfferingsRequest(input) + return out, req.Send() +} + +// DescribeHostReservationOfferingsWithContext is the same as DescribeHostReservationOfferings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeHostReservationOfferings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostReservationOfferingsWithContext(ctx aws.Context, input *DescribeHostReservationOfferingsInput, opts ...request.Option) (*DescribeHostReservationOfferingsOutput, error) { + req, out := c.DescribeHostReservationOfferingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeHostReservationOfferingsPages iterates over the pages of a DescribeHostReservationOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeHostReservationOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeHostReservationOfferings operation. +// pageNum := 0 +// err := client.DescribeHostReservationOfferingsPages(params, +// func(page *ec2.DescribeHostReservationOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeHostReservationOfferingsPages(input *DescribeHostReservationOfferingsInput, fn func(*DescribeHostReservationOfferingsOutput, bool) bool) error { + return c.DescribeHostReservationOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeHostReservationOfferingsPagesWithContext same as DescribeHostReservationOfferingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostReservationOfferingsPagesWithContext(ctx aws.Context, input *DescribeHostReservationOfferingsInput, fn func(*DescribeHostReservationOfferingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeHostReservationOfferingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeHostReservationOfferingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeHostReservationOfferingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeHostReservations = "DescribeHostReservations" + +// DescribeHostReservationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHostReservations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeHostReservations for more information on using the DescribeHostReservations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeHostReservationsRequest method. +// req, resp := client.DescribeHostReservationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservations +func (c *EC2) DescribeHostReservationsRequest(input *DescribeHostReservationsInput) (req *request.Request, output *DescribeHostReservationsOutput) { + op := &request.Operation{ + Name: opDescribeHostReservations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHostReservationsInput{} + } + + output = &DescribeHostReservationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeHostReservations API operation for Amazon Elastic Compute Cloud. +// +// Describes reservations that are associated with Dedicated Hosts in your account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeHostReservations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservations +func (c *EC2) DescribeHostReservations(input *DescribeHostReservationsInput) (*DescribeHostReservationsOutput, error) { + req, out := c.DescribeHostReservationsRequest(input) + return out, req.Send() +} + +// DescribeHostReservationsWithContext is the same as DescribeHostReservations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeHostReservations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostReservationsWithContext(ctx aws.Context, input *DescribeHostReservationsInput, opts ...request.Option) (*DescribeHostReservationsOutput, error) { + req, out := c.DescribeHostReservationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeHostReservationsPages iterates over the pages of a DescribeHostReservations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeHostReservations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeHostReservations operation. +// pageNum := 0 +// err := client.DescribeHostReservationsPages(params, +// func(page *ec2.DescribeHostReservationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeHostReservationsPages(input *DescribeHostReservationsInput, fn func(*DescribeHostReservationsOutput, bool) bool) error { + return c.DescribeHostReservationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeHostReservationsPagesWithContext same as DescribeHostReservationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostReservationsPagesWithContext(ctx aws.Context, input *DescribeHostReservationsInput, fn func(*DescribeHostReservationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeHostReservationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeHostReservationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeHostReservationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeHosts = "DescribeHosts" + +// DescribeHostsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHosts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeHosts for more information on using the DescribeHosts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeHostsRequest method. +// req, resp := client.DescribeHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHosts +func (c *EC2) DescribeHostsRequest(input *DescribeHostsInput) (req *request.Request, output *DescribeHostsOutput) { + op := &request.Operation{ + Name: opDescribeHosts, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHostsInput{} + } + + output = &DescribeHostsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeHosts API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified Dedicated Hosts or all your Dedicated Hosts. +// +// The results describe only the Dedicated Hosts in the Region you're currently +// using. All listed instances consume capacity on your Dedicated Host. Dedicated +// Hosts that have recently been released are listed with the state released. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeHosts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHosts +func (c *EC2) DescribeHosts(input *DescribeHostsInput) (*DescribeHostsOutput, error) { + req, out := c.DescribeHostsRequest(input) + return out, req.Send() +} + +// DescribeHostsWithContext is the same as DescribeHosts with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostsWithContext(ctx aws.Context, input *DescribeHostsInput, opts ...request.Option) (*DescribeHostsOutput, error) { + req, out := c.DescribeHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeHostsPages iterates over the pages of a DescribeHosts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeHosts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeHosts operation. +// pageNum := 0 +// err := client.DescribeHostsPages(params, +// func(page *ec2.DescribeHostsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeHostsPages(input *DescribeHostsInput, fn func(*DescribeHostsOutput, bool) bool) error { + return c.DescribeHostsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeHostsPagesWithContext same as DescribeHostsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostsPagesWithContext(ctx aws.Context, input *DescribeHostsInput, fn func(*DescribeHostsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeHostsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeHostsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeHostsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeIamInstanceProfileAssociations = "DescribeIamInstanceProfileAssociations" + +// DescribeIamInstanceProfileAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIamInstanceProfileAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeIamInstanceProfileAssociations for more information on using the DescribeIamInstanceProfileAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeIamInstanceProfileAssociationsRequest method. +// req, resp := client.DescribeIamInstanceProfileAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociations +func (c *EC2) DescribeIamInstanceProfileAssociationsRequest(input *DescribeIamInstanceProfileAssociationsInput) (req *request.Request, output *DescribeIamInstanceProfileAssociationsOutput) { + op := &request.Operation{ + Name: opDescribeIamInstanceProfileAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeIamInstanceProfileAssociationsInput{} + } + + output = &DescribeIamInstanceProfileAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIamInstanceProfileAssociations API operation for Amazon Elastic Compute Cloud. +// +// Describes your IAM instance profile associations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeIamInstanceProfileAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociations +func (c *EC2) DescribeIamInstanceProfileAssociations(input *DescribeIamInstanceProfileAssociationsInput) (*DescribeIamInstanceProfileAssociationsOutput, error) { + req, out := c.DescribeIamInstanceProfileAssociationsRequest(input) + return out, req.Send() +} + +// DescribeIamInstanceProfileAssociationsWithContext is the same as DescribeIamInstanceProfileAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIamInstanceProfileAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIamInstanceProfileAssociationsWithContext(ctx aws.Context, input *DescribeIamInstanceProfileAssociationsInput, opts ...request.Option) (*DescribeIamInstanceProfileAssociationsOutput, error) { + req, out := c.DescribeIamInstanceProfileAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeIamInstanceProfileAssociationsPages iterates over the pages of a DescribeIamInstanceProfileAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeIamInstanceProfileAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeIamInstanceProfileAssociations operation. +// pageNum := 0 +// err := client.DescribeIamInstanceProfileAssociationsPages(params, +// func(page *ec2.DescribeIamInstanceProfileAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeIamInstanceProfileAssociationsPages(input *DescribeIamInstanceProfileAssociationsInput, fn func(*DescribeIamInstanceProfileAssociationsOutput, bool) bool) error { + return c.DescribeIamInstanceProfileAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeIamInstanceProfileAssociationsPagesWithContext same as DescribeIamInstanceProfileAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIamInstanceProfileAssociationsPagesWithContext(ctx aws.Context, input *DescribeIamInstanceProfileAssociationsInput, fn func(*DescribeIamInstanceProfileAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeIamInstanceProfileAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeIamInstanceProfileAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeIamInstanceProfileAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeIdFormat = "DescribeIdFormat" + +// DescribeIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdFormat operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeIdFormat for more information on using the DescribeIdFormat +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeIdFormatRequest method. +// req, resp := client.DescribeIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdFormat +func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *request.Request, output *DescribeIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdFormatInput{} + } + + output = &DescribeIdFormatOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIdFormat API operation for Amazon Elastic Compute Cloud. +// +// Describes the ID format settings for your resources on a per-Region basis, +// for example, to view which resource types are enabled for longer IDs. This +// request only returns information about resource types whose ID formats can +// be modified; it does not return information about other resource types. +// +// The following resource types support longer IDs: bundle | conversion-task +// | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association +// | export-task | flow-log | image | import-task | instance | internet-gateway +// | network-acl | network-acl-association | network-interface | network-interface-attachment +// | prefix-list | reservation | route-table | route-table-association | security-group +// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// These settings apply to the IAM user who makes the request; they do not apply +// to the entire AWS account. By default, an IAM user defaults to the same settings +// as the root user, unless they explicitly override the settings by running +// the ModifyIdFormat command. Resources created with longer IDs are visible +// to all IAM users, regardless of these settings and provided that they have +// permission to use the relevant Describe command for the resource type. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeIdFormat for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdFormat +func (c *EC2) DescribeIdFormat(input *DescribeIdFormatInput) (*DescribeIdFormatOutput, error) { + req, out := c.DescribeIdFormatRequest(input) + return out, req.Send() +} + +// DescribeIdFormatWithContext is the same as DescribeIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIdFormatWithContext(ctx aws.Context, input *DescribeIdFormatInput, opts ...request.Option) (*DescribeIdFormatOutput, error) { + req, out := c.DescribeIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeIdentityIdFormat = "DescribeIdentityIdFormat" + +// DescribeIdentityIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentityIdFormat operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeIdentityIdFormat for more information on using the DescribeIdentityIdFormat +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeIdentityIdFormatRequest method. +// req, resp := client.DescribeIdentityIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdentityIdFormat +func (c *EC2) DescribeIdentityIdFormatRequest(input *DescribeIdentityIdFormatInput) (req *request.Request, output *DescribeIdentityIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeIdentityIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityIdFormatInput{} + } + + output = &DescribeIdentityIdFormatOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIdentityIdFormat API operation for Amazon Elastic Compute Cloud. +// +// Describes the ID format settings for resources for the specified IAM user, +// IAM role, or root user. For example, you can view the resource types that +// are enabled for longer IDs. This request only returns information about resource +// types whose ID formats can be modified; it does not return information about +// other resource types. For more information, see Resource IDs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// The following resource types support longer IDs: bundle | conversion-task +// | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association +// | export-task | flow-log | image | import-task | instance | internet-gateway +// | network-acl | network-acl-association | network-interface | network-interface-attachment +// | prefix-list | reservation | route-table | route-table-association | security-group +// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// These settings apply to the principal specified in the request. They do not +// apply to the principal that makes the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeIdentityIdFormat for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdentityIdFormat +func (c *EC2) DescribeIdentityIdFormat(input *DescribeIdentityIdFormatInput) (*DescribeIdentityIdFormatOutput, error) { + req, out := c.DescribeIdentityIdFormatRequest(input) + return out, req.Send() +} + +// DescribeIdentityIdFormatWithContext is the same as DescribeIdentityIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIdentityIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIdentityIdFormatWithContext(ctx aws.Context, input *DescribeIdentityIdFormatInput, opts ...request.Option) (*DescribeIdentityIdFormatOutput, error) { + req, out := c.DescribeIdentityIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeImageAttribute = "DescribeImageAttribute" + +// DescribeImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImageAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImageAttribute for more information on using the DescribeImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImageAttributeRequest method. +// req, resp := client.DescribeImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImageAttribute +func (c *EC2) DescribeImageAttributeRequest(input *DescribeImageAttributeInput) (req *request.Request, output *DescribeImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImageAttributeInput{} + } + + output = &DescribeImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified AMI. You can specify only +// one attribute at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeImageAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImageAttribute +func (c *EC2) DescribeImageAttribute(input *DescribeImageAttributeInput) (*DescribeImageAttributeOutput, error) { + req, out := c.DescribeImageAttributeRequest(input) + return out, req.Send() +} + +// DescribeImageAttributeWithContext is the same as DescribeImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImageAttributeWithContext(ctx aws.Context, input *DescribeImageAttributeInput, opts ...request.Option) (*DescribeImageAttributeOutput, error) { + req, out := c.DescribeImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeImages = "DescribeImages" + +// DescribeImagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImages operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImages for more information on using the DescribeImages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImagesRequest method. +// req, resp := client.DescribeImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImages +func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { + op := &request.Operation{ + Name: opDescribeImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImagesInput{} + } + + output = &DescribeImagesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImages API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified images (AMIs, AKIs, and ARIs) available to you or +// all of the images available to you. +// +// The images available to you include public images, private images that you +// own, and private images owned by other AWS accounts for which you have explicit +// launch permissions. +// +// Recently deregistered images appear in the returned results for a short interval +// and then return empty results. After all instances that reference a deregistered +// AMI are terminated, specifying the ID of the image results in an error indicating +// that the AMI ID cannot be found. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeImages for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImages +func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + return out, req.Send() +} + +// DescribeImagesWithContext is the same as DescribeImages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.Option) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeImportImageTasks = "DescribeImportImageTasks" + +// DescribeImportImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImportImageTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImportImageTasks for more information on using the DescribeImportImageTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImportImageTasksRequest method. +// req, resp := client.DescribeImportImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportImageTasks +func (c *EC2) DescribeImportImageTasksRequest(input *DescribeImportImageTasksInput) (req *request.Request, output *DescribeImportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImportImageTasksInput{} + } + + output = &DescribeImportImageTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImportImageTasks API operation for Amazon Elastic Compute Cloud. +// +// Displays details about an import virtual machine or import snapshot tasks +// that are already created. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeImportImageTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportImageTasks +func (c *EC2) DescribeImportImageTasks(input *DescribeImportImageTasksInput) (*DescribeImportImageTasksOutput, error) { + req, out := c.DescribeImportImageTasksRequest(input) + return out, req.Send() +} + +// DescribeImportImageTasksWithContext is the same as DescribeImportImageTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImportImageTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImportImageTasksWithContext(ctx aws.Context, input *DescribeImportImageTasksInput, opts ...request.Option) (*DescribeImportImageTasksOutput, error) { + req, out := c.DescribeImportImageTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImportImageTasksPages iterates over the pages of a DescribeImportImageTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImportImageTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImportImageTasks operation. +// pageNum := 0 +// err := client.DescribeImportImageTasksPages(params, +// func(page *ec2.DescribeImportImageTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeImportImageTasksPages(input *DescribeImportImageTasksInput, fn func(*DescribeImportImageTasksOutput, bool) bool) error { + return c.DescribeImportImageTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImportImageTasksPagesWithContext same as DescribeImportImageTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImportImageTasksPagesWithContext(ctx aws.Context, input *DescribeImportImageTasksInput, fn func(*DescribeImportImageTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImportImageTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImportImageTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeImportImageTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" + +// DescribeImportSnapshotTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImportSnapshotTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImportSnapshotTasks for more information on using the DescribeImportSnapshotTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImportSnapshotTasksRequest method. +// req, resp := client.DescribeImportSnapshotTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportSnapshotTasks +func (c *EC2) DescribeImportSnapshotTasksRequest(input *DescribeImportSnapshotTasksInput) (req *request.Request, output *DescribeImportSnapshotTasksOutput) { + op := &request.Operation{ + Name: opDescribeImportSnapshotTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImportSnapshotTasksInput{} + } + + output = &DescribeImportSnapshotTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImportSnapshotTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes your import snapshot tasks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeImportSnapshotTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportSnapshotTasks +func (c *EC2) DescribeImportSnapshotTasks(input *DescribeImportSnapshotTasksInput) (*DescribeImportSnapshotTasksOutput, error) { + req, out := c.DescribeImportSnapshotTasksRequest(input) + return out, req.Send() +} + +// DescribeImportSnapshotTasksWithContext is the same as DescribeImportSnapshotTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImportSnapshotTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImportSnapshotTasksWithContext(ctx aws.Context, input *DescribeImportSnapshotTasksInput, opts ...request.Option) (*DescribeImportSnapshotTasksOutput, error) { + req, out := c.DescribeImportSnapshotTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImportSnapshotTasksPages iterates over the pages of a DescribeImportSnapshotTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImportSnapshotTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImportSnapshotTasks operation. +// pageNum := 0 +// err := client.DescribeImportSnapshotTasksPages(params, +// func(page *ec2.DescribeImportSnapshotTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeImportSnapshotTasksPages(input *DescribeImportSnapshotTasksInput, fn func(*DescribeImportSnapshotTasksOutput, bool) bool) error { + return c.DescribeImportSnapshotTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImportSnapshotTasksPagesWithContext same as DescribeImportSnapshotTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImportSnapshotTasksPagesWithContext(ctx aws.Context, input *DescribeImportSnapshotTasksInput, fn func(*DescribeImportSnapshotTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImportSnapshotTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImportSnapshotTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeImportSnapshotTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstanceAttribute = "DescribeInstanceAttribute" + +// DescribeInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceAttribute for more information on using the DescribeInstanceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceAttributeRequest method. +// req, resp := client.DescribeInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceAttribute +func (c *EC2) DescribeInstanceAttributeRequest(input *DescribeInstanceAttributeInput) (req *request.Request, output *DescribeInstanceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceAttributeInput{} + } + + output = &DescribeInstanceAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified instance. You can specify +// only one attribute at a time. Valid attribute values are: instanceType | +// kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior +// | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | +// groupSet | ebsOptimized | sriovNetSupport +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceAttribute +func (c *EC2) DescribeInstanceAttribute(input *DescribeInstanceAttributeInput) (*DescribeInstanceAttributeOutput, error) { + req, out := c.DescribeInstanceAttributeRequest(input) + return out, req.Send() +} + +// DescribeInstanceAttributeWithContext is the same as DescribeInstanceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceAttributeWithContext(ctx aws.Context, input *DescribeInstanceAttributeInput, opts ...request.Option) (*DescribeInstanceAttributeOutput, error) { + req, out := c.DescribeInstanceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeInstanceCreditSpecifications = "DescribeInstanceCreditSpecifications" + +// DescribeInstanceCreditSpecificationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceCreditSpecifications operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceCreditSpecifications for more information on using the DescribeInstanceCreditSpecifications +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceCreditSpecificationsRequest method. +// req, resp := client.DescribeInstanceCreditSpecificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceCreditSpecifications +func (c *EC2) DescribeInstanceCreditSpecificationsRequest(input *DescribeInstanceCreditSpecificationsInput) (req *request.Request, output *DescribeInstanceCreditSpecificationsOutput) { + op := &request.Operation{ + Name: opDescribeInstanceCreditSpecifications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceCreditSpecificationsInput{} + } + + output = &DescribeInstanceCreditSpecificationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceCreditSpecifications API operation for Amazon Elastic Compute Cloud. +// +// Describes the credit option for CPU usage of the specified burstable performance +// instances. The credit options are standard and unlimited. +// +// If you do not specify an instance ID, Amazon EC2 returns burstable performance +// instances with the unlimited credit option, as well as instances that were +// previously configured as T2, T3, and T3a with the unlimited credit option. +// For example, if you resize a T2 instance, while it is configured as unlimited, +// to an M4 instance, Amazon EC2 returns the M4 instance. +// +// If you specify one or more instance IDs, Amazon EC2 returns the credit option +// (standard or unlimited) of those instances. If you specify an instance ID +// that is not valid, such as an instance that is not a burstable performance +// instance, an error is returned. +// +// Recently terminated instances might appear in the returned results. This +// interval is usually less than one hour. +// +// If an Availability Zone is experiencing a service disruption and you specify +// instance IDs in the affected zone, or do not specify any instance IDs at +// all, the call fails. If you specify only instance IDs in an unaffected zone, +// the call works normally. +// +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceCreditSpecifications for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceCreditSpecifications +func (c *EC2) DescribeInstanceCreditSpecifications(input *DescribeInstanceCreditSpecificationsInput) (*DescribeInstanceCreditSpecificationsOutput, error) { + req, out := c.DescribeInstanceCreditSpecificationsRequest(input) + return out, req.Send() +} + +// DescribeInstanceCreditSpecificationsWithContext is the same as DescribeInstanceCreditSpecifications with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceCreditSpecifications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceCreditSpecificationsWithContext(ctx aws.Context, input *DescribeInstanceCreditSpecificationsInput, opts ...request.Option) (*DescribeInstanceCreditSpecificationsOutput, error) { + req, out := c.DescribeInstanceCreditSpecificationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstanceCreditSpecificationsPages iterates over the pages of a DescribeInstanceCreditSpecifications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceCreditSpecifications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceCreditSpecifications operation. +// pageNum := 0 +// err := client.DescribeInstanceCreditSpecificationsPages(params, +// func(page *ec2.DescribeInstanceCreditSpecificationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstanceCreditSpecificationsPages(input *DescribeInstanceCreditSpecificationsInput, fn func(*DescribeInstanceCreditSpecificationsOutput, bool) bool) error { + return c.DescribeInstanceCreditSpecificationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceCreditSpecificationsPagesWithContext same as DescribeInstanceCreditSpecificationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceCreditSpecificationsPagesWithContext(ctx aws.Context, input *DescribeInstanceCreditSpecificationsInput, fn func(*DescribeInstanceCreditSpecificationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceCreditSpecificationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceCreditSpecificationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceCreditSpecificationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstanceEventNotificationAttributes = "DescribeInstanceEventNotificationAttributes" + +// DescribeInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceEventNotificationAttributes for more information on using the DescribeInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceEventNotificationAttributesRequest method. +// req, resp := client.DescribeInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceEventNotificationAttributes +func (c *EC2) DescribeInstanceEventNotificationAttributesRequest(input *DescribeInstanceEventNotificationAttributesInput) (req *request.Request, output *DescribeInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opDescribeInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceEventNotificationAttributesInput{} + } + + output = &DescribeInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Describes the tag keys that are registered to appear in scheduled event notifications +// for resources in the current Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceEventNotificationAttributes +func (c *EC2) DescribeInstanceEventNotificationAttributes(input *DescribeInstanceEventNotificationAttributesInput) (*DescribeInstanceEventNotificationAttributesOutput, error) { + req, out := c.DescribeInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// DescribeInstanceEventNotificationAttributesWithContext is the same as DescribeInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *DescribeInstanceEventNotificationAttributesInput, opts ...request.Option) (*DescribeInstanceEventNotificationAttributesOutput, error) { + req, out := c.DescribeInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeInstanceStatus = "DescribeInstanceStatus" + +// DescribeInstanceStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceStatus for more information on using the DescribeInstanceStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceStatusRequest method. +// req, resp := client.DescribeInstanceStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatus +func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) (req *request.Request, output *DescribeInstanceStatusOutput) { + op := &request.Operation{ + Name: opDescribeInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceStatusInput{} + } + + output = &DescribeInstanceStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceStatus API operation for Amazon Elastic Compute Cloud. +// +// Describes the status of the specified instances or all of your instances. +// By default, only running instances are described, unless you specifically +// indicate to return the status of all instances. +// +// Instance status includes the following components: +// +// * Status checks - Amazon EC2 performs status checks on running EC2 instances +// to identify hardware and software issues. For more information, see Status +// checks for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) +// and Troubleshooting instances with failed status checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// * Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, +// or terminate) for your instances related to hardware issues, software +// updates, or system maintenance. For more information, see Scheduled events +// for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// * Instance state - You can manage your instances from the moment you launch +// them through their termination. For more information, see Instance lifecycle +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatus +func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*DescribeInstanceStatusOutput, error) { + req, out := c.DescribeInstanceStatusRequest(input) + return out, req.Send() +} + +// DescribeInstanceStatusWithContext is the same as DescribeInstanceStatus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceStatusWithContext(ctx aws.Context, input *DescribeInstanceStatusInput, opts ...request.Option) (*DescribeInstanceStatusOutput, error) { + req, out := c.DescribeInstanceStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstanceStatusPages iterates over the pages of a DescribeInstanceStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceStatus operation. +// pageNum := 0 +// err := client.DescribeInstanceStatusPages(params, +// func(page *ec2.DescribeInstanceStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(*DescribeInstanceStatusOutput, bool) bool) error { + return c.DescribeInstanceStatusPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceStatusPagesWithContext same as DescribeInstanceStatusPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceStatusPagesWithContext(ctx aws.Context, input *DescribeInstanceStatusInput, fn func(*DescribeInstanceStatusOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceStatusOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstanceTypeOfferings = "DescribeInstanceTypeOfferings" + +// DescribeInstanceTypeOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceTypeOfferings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceTypeOfferings for more information on using the DescribeInstanceTypeOfferings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceTypeOfferingsRequest method. +// req, resp := client.DescribeInstanceTypeOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceTypeOfferings +func (c *EC2) DescribeInstanceTypeOfferingsRequest(input *DescribeInstanceTypeOfferingsInput) (req *request.Request, output *DescribeInstanceTypeOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeInstanceTypeOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceTypeOfferingsInput{} + } + + output = &DescribeInstanceTypeOfferingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceTypeOfferings API operation for Amazon Elastic Compute Cloud. +// +// Returns a list of all instance types offered. The results can be filtered +// by location (Region or Availability Zone). If no location is specified, the +// instance types offered in the current Region are returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceTypeOfferings for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceTypeOfferings +func (c *EC2) DescribeInstanceTypeOfferings(input *DescribeInstanceTypeOfferingsInput) (*DescribeInstanceTypeOfferingsOutput, error) { + req, out := c.DescribeInstanceTypeOfferingsRequest(input) + return out, req.Send() +} + +// DescribeInstanceTypeOfferingsWithContext is the same as DescribeInstanceTypeOfferings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceTypeOfferings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceTypeOfferingsWithContext(ctx aws.Context, input *DescribeInstanceTypeOfferingsInput, opts ...request.Option) (*DescribeInstanceTypeOfferingsOutput, error) { + req, out := c.DescribeInstanceTypeOfferingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstanceTypeOfferingsPages iterates over the pages of a DescribeInstanceTypeOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceTypeOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceTypeOfferings operation. +// pageNum := 0 +// err := client.DescribeInstanceTypeOfferingsPages(params, +// func(page *ec2.DescribeInstanceTypeOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstanceTypeOfferingsPages(input *DescribeInstanceTypeOfferingsInput, fn func(*DescribeInstanceTypeOfferingsOutput, bool) bool) error { + return c.DescribeInstanceTypeOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceTypeOfferingsPagesWithContext same as DescribeInstanceTypeOfferingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceTypeOfferingsPagesWithContext(ctx aws.Context, input *DescribeInstanceTypeOfferingsInput, fn func(*DescribeInstanceTypeOfferingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceTypeOfferingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceTypeOfferingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceTypeOfferingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstanceTypes = "DescribeInstanceTypes" + +// DescribeInstanceTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceTypes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceTypes for more information on using the DescribeInstanceTypes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceTypesRequest method. +// req, resp := client.DescribeInstanceTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceTypes +func (c *EC2) DescribeInstanceTypesRequest(input *DescribeInstanceTypesInput) (req *request.Request, output *DescribeInstanceTypesOutput) { + op := &request.Operation{ + Name: opDescribeInstanceTypes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceTypesInput{} + } + + output = &DescribeInstanceTypesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceTypes API operation for Amazon Elastic Compute Cloud. +// +// Describes the details of the instance types that are offered in a location. +// The results can be filtered by the attributes of the instance types. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceTypes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceTypes +func (c *EC2) DescribeInstanceTypes(input *DescribeInstanceTypesInput) (*DescribeInstanceTypesOutput, error) { + req, out := c.DescribeInstanceTypesRequest(input) + return out, req.Send() +} + +// DescribeInstanceTypesWithContext is the same as DescribeInstanceTypes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceTypesWithContext(ctx aws.Context, input *DescribeInstanceTypesInput, opts ...request.Option) (*DescribeInstanceTypesOutput, error) { + req, out := c.DescribeInstanceTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstanceTypesPages iterates over the pages of a DescribeInstanceTypes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceTypes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceTypes operation. +// pageNum := 0 +// err := client.DescribeInstanceTypesPages(params, +// func(page *ec2.DescribeInstanceTypesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstanceTypesPages(input *DescribeInstanceTypesInput, fn func(*DescribeInstanceTypesOutput, bool) bool) error { + return c.DescribeInstanceTypesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceTypesPagesWithContext same as DescribeInstanceTypesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceTypesPagesWithContext(ctx aws.Context, input *DescribeInstanceTypesInput, fn func(*DescribeInstanceTypesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceTypesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceTypesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceTypesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstances for more information on using the DescribeInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstancesRequest method. +// req, resp := client.DescribeInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances +func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + output = &DescribeInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstances API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified instances or all instances. +// +// If you specify instance IDs, the output includes information for only the +// specified instances. If you specify filters, the output includes information +// for only those instances that meet the filter criteria. If you do not specify +// instance IDs or filters, the output includes information for all instances, +// which can affect performance. We recommend that you use pagination to ensure +// that the operation returns quickly and successfully. +// +// If you specify an instance ID that is not valid, an error is returned. If +// you specify an instance that you do not own, it is not included in the output. +// +// Recently terminated instances might appear in the returned results. This +// interval is usually less than one hour. +// +// If you describe instances in the rare case where an Availability Zone is +// experiencing a service disruption and you specify instance IDs that are in +// the affected zone, or do not specify any instance IDs at all, the call fails. +// If you describe instances and specify only instance IDs that are in an unaffected +// zone, the call works normally. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances +func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + return out, req.Send() +} + +// DescribeInstancesWithContext is the same as DescribeInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstancesWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.Option) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstancesPages iterates over the pages of a DescribeInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstances operation. +// pageNum := 0 +// err := client.DescribeInstancesPages(params, +// func(page *ec2.DescribeInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(*DescribeInstancesOutput, bool) bool) error { + return c.DescribeInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancesPagesWithContext same as DescribeInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstancesPagesWithContext(ctx aws.Context, input *DescribeInstancesInput, fn func(*DescribeInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInternetGateways = "DescribeInternetGateways" + +// DescribeInternetGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInternetGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInternetGateways for more information on using the DescribeInternetGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInternetGatewaysRequest method. +// req, resp := client.DescribeInternetGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInternetGateways +func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInput) (req *request.Request, output *DescribeInternetGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeInternetGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInternetGatewaysInput{} + } + + output = &DescribeInternetGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInternetGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your internet gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInternetGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInternetGateways +func (c *EC2) DescribeInternetGateways(input *DescribeInternetGatewaysInput) (*DescribeInternetGatewaysOutput, error) { + req, out := c.DescribeInternetGatewaysRequest(input) + return out, req.Send() +} + +// DescribeInternetGatewaysWithContext is the same as DescribeInternetGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInternetGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInternetGatewaysWithContext(ctx aws.Context, input *DescribeInternetGatewaysInput, opts ...request.Option) (*DescribeInternetGatewaysOutput, error) { + req, out := c.DescribeInternetGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInternetGatewaysPages iterates over the pages of a DescribeInternetGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInternetGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInternetGateways operation. +// pageNum := 0 +// err := client.DescribeInternetGatewaysPages(params, +// func(page *ec2.DescribeInternetGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInternetGatewaysPages(input *DescribeInternetGatewaysInput, fn func(*DescribeInternetGatewaysOutput, bool) bool) error { + return c.DescribeInternetGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInternetGatewaysPagesWithContext same as DescribeInternetGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInternetGatewaysPagesWithContext(ctx aws.Context, input *DescribeInternetGatewaysInput, fn func(*DescribeInternetGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInternetGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInternetGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInternetGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeIpv6Pools = "DescribeIpv6Pools" + +// DescribeIpv6PoolsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIpv6Pools operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeIpv6Pools for more information on using the DescribeIpv6Pools +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeIpv6PoolsRequest method. +// req, resp := client.DescribeIpv6PoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpv6Pools +func (c *EC2) DescribeIpv6PoolsRequest(input *DescribeIpv6PoolsInput) (req *request.Request, output *DescribeIpv6PoolsOutput) { + op := &request.Operation{ + Name: opDescribeIpv6Pools, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeIpv6PoolsInput{} + } + + output = &DescribeIpv6PoolsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIpv6Pools API operation for Amazon Elastic Compute Cloud. +// +// Describes your IPv6 address pools. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeIpv6Pools for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpv6Pools +func (c *EC2) DescribeIpv6Pools(input *DescribeIpv6PoolsInput) (*DescribeIpv6PoolsOutput, error) { + req, out := c.DescribeIpv6PoolsRequest(input) + return out, req.Send() +} + +// DescribeIpv6PoolsWithContext is the same as DescribeIpv6Pools with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIpv6Pools for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIpv6PoolsWithContext(ctx aws.Context, input *DescribeIpv6PoolsInput, opts ...request.Option) (*DescribeIpv6PoolsOutput, error) { + req, out := c.DescribeIpv6PoolsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeIpv6PoolsPages iterates over the pages of a DescribeIpv6Pools operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeIpv6Pools method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeIpv6Pools operation. +// pageNum := 0 +// err := client.DescribeIpv6PoolsPages(params, +// func(page *ec2.DescribeIpv6PoolsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeIpv6PoolsPages(input *DescribeIpv6PoolsInput, fn func(*DescribeIpv6PoolsOutput, bool) bool) error { + return c.DescribeIpv6PoolsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeIpv6PoolsPagesWithContext same as DescribeIpv6PoolsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIpv6PoolsPagesWithContext(ctx aws.Context, input *DescribeIpv6PoolsInput, fn func(*DescribeIpv6PoolsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeIpv6PoolsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeIpv6PoolsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeIpv6PoolsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeKeyPairs = "DescribeKeyPairs" + +// DescribeKeyPairsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeKeyPairs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeKeyPairs for more information on using the DescribeKeyPairs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeKeyPairsRequest method. +// req, resp := client.DescribeKeyPairsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeKeyPairs +func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *request.Request, output *DescribeKeyPairsOutput) { + op := &request.Operation{ + Name: opDescribeKeyPairs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKeyPairsInput{} + } + + output = &DescribeKeyPairsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeKeyPairs API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified key pairs or all of your key pairs. +// +// For more information about key pairs, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeKeyPairs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeKeyPairs +func (c *EC2) DescribeKeyPairs(input *DescribeKeyPairsInput) (*DescribeKeyPairsOutput, error) { + req, out := c.DescribeKeyPairsRequest(input) + return out, req.Send() +} + +// DescribeKeyPairsWithContext is the same as DescribeKeyPairs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeKeyPairs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeKeyPairsWithContext(ctx aws.Context, input *DescribeKeyPairsInput, opts ...request.Option) (*DescribeKeyPairsOutput, error) { + req, out := c.DescribeKeyPairsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLaunchTemplateVersions = "DescribeLaunchTemplateVersions" + +// DescribeLaunchTemplateVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLaunchTemplateVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLaunchTemplateVersions for more information on using the DescribeLaunchTemplateVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLaunchTemplateVersionsRequest method. +// req, resp := client.DescribeLaunchTemplateVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplateVersions +func (c *EC2) DescribeLaunchTemplateVersionsRequest(input *DescribeLaunchTemplateVersionsInput) (req *request.Request, output *DescribeLaunchTemplateVersionsOutput) { + op := &request.Operation{ + Name: opDescribeLaunchTemplateVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLaunchTemplateVersionsInput{} + } + + output = &DescribeLaunchTemplateVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLaunchTemplateVersions API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more versions of a specified launch template. You can describe +// all versions, individual versions, or a range of versions. You can also describe +// all the latest versions or all the default versions of all the launch templates +// in your account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeLaunchTemplateVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplateVersions +func (c *EC2) DescribeLaunchTemplateVersions(input *DescribeLaunchTemplateVersionsInput) (*DescribeLaunchTemplateVersionsOutput, error) { + req, out := c.DescribeLaunchTemplateVersionsRequest(input) + return out, req.Send() +} + +// DescribeLaunchTemplateVersionsWithContext is the same as DescribeLaunchTemplateVersions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLaunchTemplateVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLaunchTemplateVersionsWithContext(ctx aws.Context, input *DescribeLaunchTemplateVersionsInput, opts ...request.Option) (*DescribeLaunchTemplateVersionsOutput, error) { + req, out := c.DescribeLaunchTemplateVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLaunchTemplateVersionsPages iterates over the pages of a DescribeLaunchTemplateVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLaunchTemplateVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLaunchTemplateVersions operation. +// pageNum := 0 +// err := client.DescribeLaunchTemplateVersionsPages(params, +// func(page *ec2.DescribeLaunchTemplateVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLaunchTemplateVersionsPages(input *DescribeLaunchTemplateVersionsInput, fn func(*DescribeLaunchTemplateVersionsOutput, bool) bool) error { + return c.DescribeLaunchTemplateVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLaunchTemplateVersionsPagesWithContext same as DescribeLaunchTemplateVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLaunchTemplateVersionsPagesWithContext(ctx aws.Context, input *DescribeLaunchTemplateVersionsInput, fn func(*DescribeLaunchTemplateVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLaunchTemplateVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLaunchTemplateVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLaunchTemplateVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLaunchTemplates = "DescribeLaunchTemplates" + +// DescribeLaunchTemplatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLaunchTemplates operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLaunchTemplates for more information on using the DescribeLaunchTemplates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLaunchTemplatesRequest method. +// req, resp := client.DescribeLaunchTemplatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplates +func (c *EC2) DescribeLaunchTemplatesRequest(input *DescribeLaunchTemplatesInput) (req *request.Request, output *DescribeLaunchTemplatesOutput) { + op := &request.Operation{ + Name: opDescribeLaunchTemplates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLaunchTemplatesInput{} + } + + output = &DescribeLaunchTemplatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLaunchTemplates API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more launch templates. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeLaunchTemplates for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLaunchTemplates +func (c *EC2) DescribeLaunchTemplates(input *DescribeLaunchTemplatesInput) (*DescribeLaunchTemplatesOutput, error) { + req, out := c.DescribeLaunchTemplatesRequest(input) + return out, req.Send() +} + +// DescribeLaunchTemplatesWithContext is the same as DescribeLaunchTemplates with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLaunchTemplates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLaunchTemplatesWithContext(ctx aws.Context, input *DescribeLaunchTemplatesInput, opts ...request.Option) (*DescribeLaunchTemplatesOutput, error) { + req, out := c.DescribeLaunchTemplatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLaunchTemplatesPages iterates over the pages of a DescribeLaunchTemplates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLaunchTemplates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLaunchTemplates operation. +// pageNum := 0 +// err := client.DescribeLaunchTemplatesPages(params, +// func(page *ec2.DescribeLaunchTemplatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLaunchTemplatesPages(input *DescribeLaunchTemplatesInput, fn func(*DescribeLaunchTemplatesOutput, bool) bool) error { + return c.DescribeLaunchTemplatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLaunchTemplatesPagesWithContext same as DescribeLaunchTemplatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLaunchTemplatesPagesWithContext(ctx aws.Context, input *DescribeLaunchTemplatesInput, fn func(*DescribeLaunchTemplatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLaunchTemplatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLaunchTemplatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLaunchTemplatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations = "DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations" + +// DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations for more information on using the DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest method. +// req, resp := client.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations +func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(input *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) (req *request.Request, output *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput) { + op := &request.Operation{ + Name: opDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput{} + } + + output = &DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations API operation for Amazon Elastic Compute Cloud. +// +// Describes the associations between virtual interface groups and local gateway +// route tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations +func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations(input *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) (*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, error) { + req, out := c.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(input) + return out, req.Send() +} + +// DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsWithContext is the same as DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, opts ...request.Option) (*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, error) { + req, out := c.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages iterates over the pages of a DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages(params, +// func(page *ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages(input *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, fn func(*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, bool) bool) error { + return c.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPagesWithContext same as DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, fn func(*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLocalGatewayRouteTableVpcAssociations = "DescribeLocalGatewayRouteTableVpcAssociations" + +// DescribeLocalGatewayRouteTableVpcAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocalGatewayRouteTableVpcAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocalGatewayRouteTableVpcAssociations for more information on using the DescribeLocalGatewayRouteTableVpcAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocalGatewayRouteTableVpcAssociationsRequest method. +// req, resp := client.DescribeLocalGatewayRouteTableVpcAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayRouteTableVpcAssociations +func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociationsRequest(input *DescribeLocalGatewayRouteTableVpcAssociationsInput) (req *request.Request, output *DescribeLocalGatewayRouteTableVpcAssociationsOutput) { + op := &request.Operation{ + Name: opDescribeLocalGatewayRouteTableVpcAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLocalGatewayRouteTableVpcAssociationsInput{} + } + + output = &DescribeLocalGatewayRouteTableVpcAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocalGatewayRouteTableVpcAssociations API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified associations between VPCs and local gateway route +// tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeLocalGatewayRouteTableVpcAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayRouteTableVpcAssociations +func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociations(input *DescribeLocalGatewayRouteTableVpcAssociationsInput) (*DescribeLocalGatewayRouteTableVpcAssociationsOutput, error) { + req, out := c.DescribeLocalGatewayRouteTableVpcAssociationsRequest(input) + return out, req.Send() +} + +// DescribeLocalGatewayRouteTableVpcAssociationsWithContext is the same as DescribeLocalGatewayRouteTableVpcAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocalGatewayRouteTableVpcAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociationsWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTableVpcAssociationsInput, opts ...request.Option) (*DescribeLocalGatewayRouteTableVpcAssociationsOutput, error) { + req, out := c.DescribeLocalGatewayRouteTableVpcAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLocalGatewayRouteTableVpcAssociationsPages iterates over the pages of a DescribeLocalGatewayRouteTableVpcAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayRouteTableVpcAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayRouteTableVpcAssociations operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayRouteTableVpcAssociationsPages(params, +// func(page *ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociationsPages(input *DescribeLocalGatewayRouteTableVpcAssociationsInput, fn func(*DescribeLocalGatewayRouteTableVpcAssociationsOutput, bool) bool) error { + return c.DescribeLocalGatewayRouteTableVpcAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayRouteTableVpcAssociationsPagesWithContext same as DescribeLocalGatewayRouteTableVpcAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociationsPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTableVpcAssociationsInput, fn func(*DescribeLocalGatewayRouteTableVpcAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayRouteTableVpcAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayRouteTableVpcAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayRouteTableVpcAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLocalGatewayRouteTables = "DescribeLocalGatewayRouteTables" + +// DescribeLocalGatewayRouteTablesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocalGatewayRouteTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocalGatewayRouteTables for more information on using the DescribeLocalGatewayRouteTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocalGatewayRouteTablesRequest method. +// req, resp := client.DescribeLocalGatewayRouteTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayRouteTables +func (c *EC2) DescribeLocalGatewayRouteTablesRequest(input *DescribeLocalGatewayRouteTablesInput) (req *request.Request, output *DescribeLocalGatewayRouteTablesOutput) { + op := &request.Operation{ + Name: opDescribeLocalGatewayRouteTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLocalGatewayRouteTablesInput{} + } + + output = &DescribeLocalGatewayRouteTablesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocalGatewayRouteTables API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more local gateway route tables. By default, all local gateway +// route tables are described. Alternatively, you can filter the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeLocalGatewayRouteTables for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayRouteTables +func (c *EC2) DescribeLocalGatewayRouteTables(input *DescribeLocalGatewayRouteTablesInput) (*DescribeLocalGatewayRouteTablesOutput, error) { + req, out := c.DescribeLocalGatewayRouteTablesRequest(input) + return out, req.Send() +} + +// DescribeLocalGatewayRouteTablesWithContext is the same as DescribeLocalGatewayRouteTables with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocalGatewayRouteTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTablesWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTablesInput, opts ...request.Option) (*DescribeLocalGatewayRouteTablesOutput, error) { + req, out := c.DescribeLocalGatewayRouteTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLocalGatewayRouteTablesPages iterates over the pages of a DescribeLocalGatewayRouteTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayRouteTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayRouteTables operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayRouteTablesPages(params, +// func(page *ec2.DescribeLocalGatewayRouteTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayRouteTablesPages(input *DescribeLocalGatewayRouteTablesInput, fn func(*DescribeLocalGatewayRouteTablesOutput, bool) bool) error { + return c.DescribeLocalGatewayRouteTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayRouteTablesPagesWithContext same as DescribeLocalGatewayRouteTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTablesPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTablesInput, fn func(*DescribeLocalGatewayRouteTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayRouteTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayRouteTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayRouteTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLocalGatewayVirtualInterfaceGroups = "DescribeLocalGatewayVirtualInterfaceGroups" + +// DescribeLocalGatewayVirtualInterfaceGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocalGatewayVirtualInterfaceGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocalGatewayVirtualInterfaceGroups for more information on using the DescribeLocalGatewayVirtualInterfaceGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocalGatewayVirtualInterfaceGroupsRequest method. +// req, resp := client.DescribeLocalGatewayVirtualInterfaceGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayVirtualInterfaceGroups +func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroupsRequest(input *DescribeLocalGatewayVirtualInterfaceGroupsInput) (req *request.Request, output *DescribeLocalGatewayVirtualInterfaceGroupsOutput) { + op := &request.Operation{ + Name: opDescribeLocalGatewayVirtualInterfaceGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLocalGatewayVirtualInterfaceGroupsInput{} + } + + output = &DescribeLocalGatewayVirtualInterfaceGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocalGatewayVirtualInterfaceGroups API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified local gateway virtual interface groups. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeLocalGatewayVirtualInterfaceGroups for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayVirtualInterfaceGroups +func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroups(input *DescribeLocalGatewayVirtualInterfaceGroupsInput) (*DescribeLocalGatewayVirtualInterfaceGroupsOutput, error) { + req, out := c.DescribeLocalGatewayVirtualInterfaceGroupsRequest(input) + return out, req.Send() +} + +// DescribeLocalGatewayVirtualInterfaceGroupsWithContext is the same as DescribeLocalGatewayVirtualInterfaceGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocalGatewayVirtualInterfaceGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroupsWithContext(ctx aws.Context, input *DescribeLocalGatewayVirtualInterfaceGroupsInput, opts ...request.Option) (*DescribeLocalGatewayVirtualInterfaceGroupsOutput, error) { + req, out := c.DescribeLocalGatewayVirtualInterfaceGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLocalGatewayVirtualInterfaceGroupsPages iterates over the pages of a DescribeLocalGatewayVirtualInterfaceGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayVirtualInterfaceGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayVirtualInterfaceGroups operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayVirtualInterfaceGroupsPages(params, +// func(page *ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroupsPages(input *DescribeLocalGatewayVirtualInterfaceGroupsInput, fn func(*DescribeLocalGatewayVirtualInterfaceGroupsOutput, bool) bool) error { + return c.DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext same as DescribeLocalGatewayVirtualInterfaceGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayVirtualInterfaceGroupsInput, fn func(*DescribeLocalGatewayVirtualInterfaceGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayVirtualInterfaceGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayVirtualInterfaceGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayVirtualInterfaceGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLocalGatewayVirtualInterfaces = "DescribeLocalGatewayVirtualInterfaces" + +// DescribeLocalGatewayVirtualInterfacesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocalGatewayVirtualInterfaces operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocalGatewayVirtualInterfaces for more information on using the DescribeLocalGatewayVirtualInterfaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocalGatewayVirtualInterfacesRequest method. +// req, resp := client.DescribeLocalGatewayVirtualInterfacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayVirtualInterfaces +func (c *EC2) DescribeLocalGatewayVirtualInterfacesRequest(input *DescribeLocalGatewayVirtualInterfacesInput) (req *request.Request, output *DescribeLocalGatewayVirtualInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeLocalGatewayVirtualInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLocalGatewayVirtualInterfacesInput{} + } + + output = &DescribeLocalGatewayVirtualInterfacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocalGatewayVirtualInterfaces API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified local gateway virtual interfaces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeLocalGatewayVirtualInterfaces for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGatewayVirtualInterfaces +func (c *EC2) DescribeLocalGatewayVirtualInterfaces(input *DescribeLocalGatewayVirtualInterfacesInput) (*DescribeLocalGatewayVirtualInterfacesOutput, error) { + req, out := c.DescribeLocalGatewayVirtualInterfacesRequest(input) + return out, req.Send() +} + +// DescribeLocalGatewayVirtualInterfacesWithContext is the same as DescribeLocalGatewayVirtualInterfaces with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocalGatewayVirtualInterfaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayVirtualInterfacesWithContext(ctx aws.Context, input *DescribeLocalGatewayVirtualInterfacesInput, opts ...request.Option) (*DescribeLocalGatewayVirtualInterfacesOutput, error) { + req, out := c.DescribeLocalGatewayVirtualInterfacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLocalGatewayVirtualInterfacesPages iterates over the pages of a DescribeLocalGatewayVirtualInterfaces operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayVirtualInterfaces method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayVirtualInterfaces operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayVirtualInterfacesPages(params, +// func(page *ec2.DescribeLocalGatewayVirtualInterfacesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayVirtualInterfacesPages(input *DescribeLocalGatewayVirtualInterfacesInput, fn func(*DescribeLocalGatewayVirtualInterfacesOutput, bool) bool) error { + return c.DescribeLocalGatewayVirtualInterfacesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayVirtualInterfacesPagesWithContext same as DescribeLocalGatewayVirtualInterfacesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayVirtualInterfacesPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayVirtualInterfacesInput, fn func(*DescribeLocalGatewayVirtualInterfacesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayVirtualInterfacesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayVirtualInterfacesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayVirtualInterfacesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeLocalGateways = "DescribeLocalGateways" + +// DescribeLocalGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocalGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocalGateways for more information on using the DescribeLocalGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocalGatewaysRequest method. +// req, resp := client.DescribeLocalGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGateways +func (c *EC2) DescribeLocalGatewaysRequest(input *DescribeLocalGatewaysInput) (req *request.Request, output *DescribeLocalGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeLocalGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLocalGatewaysInput{} + } + + output = &DescribeLocalGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocalGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more local gateways. By default, all local gateways are +// described. Alternatively, you can filter the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeLocalGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeLocalGateways +func (c *EC2) DescribeLocalGateways(input *DescribeLocalGatewaysInput) (*DescribeLocalGatewaysOutput, error) { + req, out := c.DescribeLocalGatewaysRequest(input) + return out, req.Send() +} + +// DescribeLocalGatewaysWithContext is the same as DescribeLocalGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocalGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewaysWithContext(ctx aws.Context, input *DescribeLocalGatewaysInput, opts ...request.Option) (*DescribeLocalGatewaysOutput, error) { + req, out := c.DescribeLocalGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLocalGatewaysPages iterates over the pages of a DescribeLocalGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGateways operation. +// pageNum := 0 +// err := client.DescribeLocalGatewaysPages(params, +// func(page *ec2.DescribeLocalGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewaysPages(input *DescribeLocalGatewaysInput, fn func(*DescribeLocalGatewaysOutput, bool) bool) error { + return c.DescribeLocalGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewaysPagesWithContext same as DescribeLocalGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewaysPagesWithContext(ctx aws.Context, input *DescribeLocalGatewaysInput, fn func(*DescribeLocalGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeManagedPrefixLists = "DescribeManagedPrefixLists" + +// DescribeManagedPrefixListsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeManagedPrefixLists operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeManagedPrefixLists for more information on using the DescribeManagedPrefixLists +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeManagedPrefixListsRequest method. +// req, resp := client.DescribeManagedPrefixListsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeManagedPrefixLists +func (c *EC2) DescribeManagedPrefixListsRequest(input *DescribeManagedPrefixListsInput) (req *request.Request, output *DescribeManagedPrefixListsOutput) { + op := &request.Operation{ + Name: opDescribeManagedPrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeManagedPrefixListsInput{} + } + + output = &DescribeManagedPrefixListsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeManagedPrefixLists API operation for Amazon Elastic Compute Cloud. +// +// Describes your managed prefix lists and any AWS-managed prefix lists. +// +// To view the entries for your prefix list, use GetManagedPrefixListEntries. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeManagedPrefixLists for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeManagedPrefixLists +func (c *EC2) DescribeManagedPrefixLists(input *DescribeManagedPrefixListsInput) (*DescribeManagedPrefixListsOutput, error) { + req, out := c.DescribeManagedPrefixListsRequest(input) + return out, req.Send() +} + +// DescribeManagedPrefixListsWithContext is the same as DescribeManagedPrefixLists with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeManagedPrefixLists for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeManagedPrefixListsWithContext(ctx aws.Context, input *DescribeManagedPrefixListsInput, opts ...request.Option) (*DescribeManagedPrefixListsOutput, error) { + req, out := c.DescribeManagedPrefixListsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeManagedPrefixListsPages iterates over the pages of a DescribeManagedPrefixLists operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeManagedPrefixLists method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeManagedPrefixLists operation. +// pageNum := 0 +// err := client.DescribeManagedPrefixListsPages(params, +// func(page *ec2.DescribeManagedPrefixListsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeManagedPrefixListsPages(input *DescribeManagedPrefixListsInput, fn func(*DescribeManagedPrefixListsOutput, bool) bool) error { + return c.DescribeManagedPrefixListsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeManagedPrefixListsPagesWithContext same as DescribeManagedPrefixListsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeManagedPrefixListsPagesWithContext(ctx aws.Context, input *DescribeManagedPrefixListsInput, fn func(*DescribeManagedPrefixListsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeManagedPrefixListsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeManagedPrefixListsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeManagedPrefixListsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMovingAddresses = "DescribeMovingAddresses" + +// DescribeMovingAddressesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMovingAddresses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMovingAddresses for more information on using the DescribeMovingAddresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMovingAddressesRequest method. +// req, resp := client.DescribeMovingAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeMovingAddresses +func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput) (req *request.Request, output *DescribeMovingAddressesOutput) { + op := &request.Operation{ + Name: opDescribeMovingAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMovingAddressesInput{} + } + + output = &DescribeMovingAddressesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMovingAddresses API operation for Amazon Elastic Compute Cloud. +// +// Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, +// or that are being restored to the EC2-Classic platform. This request does +// not return information about any other Elastic IP addresses in your account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeMovingAddresses for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeMovingAddresses +func (c *EC2) DescribeMovingAddresses(input *DescribeMovingAddressesInput) (*DescribeMovingAddressesOutput, error) { + req, out := c.DescribeMovingAddressesRequest(input) + return out, req.Send() +} + +// DescribeMovingAddressesWithContext is the same as DescribeMovingAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMovingAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeMovingAddressesWithContext(ctx aws.Context, input *DescribeMovingAddressesInput, opts ...request.Option) (*DescribeMovingAddressesOutput, error) { + req, out := c.DescribeMovingAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMovingAddressesPages iterates over the pages of a DescribeMovingAddresses operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMovingAddresses method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMovingAddresses operation. +// pageNum := 0 +// err := client.DescribeMovingAddressesPages(params, +// func(page *ec2.DescribeMovingAddressesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeMovingAddressesPages(input *DescribeMovingAddressesInput, fn func(*DescribeMovingAddressesOutput, bool) bool) error { + return c.DescribeMovingAddressesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMovingAddressesPagesWithContext same as DescribeMovingAddressesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeMovingAddressesPagesWithContext(ctx aws.Context, input *DescribeMovingAddressesInput, fn func(*DescribeMovingAddressesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMovingAddressesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMovingAddressesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMovingAddressesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeNatGateways = "DescribeNatGateways" + +// DescribeNatGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNatGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNatGateways for more information on using the DescribeNatGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNatGatewaysRequest method. +// req, resp := client.DescribeNatGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNatGateways +func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req *request.Request, output *DescribeNatGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeNatGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeNatGatewaysInput{} + } + + output = &DescribeNatGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNatGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your NAT gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeNatGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNatGateways +func (c *EC2) DescribeNatGateways(input *DescribeNatGatewaysInput) (*DescribeNatGatewaysOutput, error) { + req, out := c.DescribeNatGatewaysRequest(input) + return out, req.Send() +} + +// DescribeNatGatewaysWithContext is the same as DescribeNatGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNatGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNatGatewaysWithContext(ctx aws.Context, input *DescribeNatGatewaysInput, opts ...request.Option) (*DescribeNatGatewaysOutput, error) { + req, out := c.DescribeNatGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeNatGatewaysPages iterates over the pages of a DescribeNatGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNatGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNatGateways operation. +// pageNum := 0 +// err := client.DescribeNatGatewaysPages(params, +// func(page *ec2.DescribeNatGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeNatGatewaysPages(input *DescribeNatGatewaysInput, fn func(*DescribeNatGatewaysOutput, bool) bool) error { + return c.DescribeNatGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeNatGatewaysPagesWithContext same as DescribeNatGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNatGatewaysPagesWithContext(ctx aws.Context, input *DescribeNatGatewaysInput, fn func(*DescribeNatGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeNatGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNatGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeNatGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeNetworkAcls = "DescribeNetworkAcls" + +// DescribeNetworkAclsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkAcls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNetworkAcls for more information on using the DescribeNetworkAcls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNetworkAclsRequest method. +// req, resp := client.DescribeNetworkAclsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkAcls +func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req *request.Request, output *DescribeNetworkAclsOutput) { + op := &request.Operation{ + Name: opDescribeNetworkAcls, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeNetworkAclsInput{} + } + + output = &DescribeNetworkAclsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNetworkAcls API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your network ACLs. +// +// For more information, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeNetworkAcls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkAcls +func (c *EC2) DescribeNetworkAcls(input *DescribeNetworkAclsInput) (*DescribeNetworkAclsOutput, error) { + req, out := c.DescribeNetworkAclsRequest(input) + return out, req.Send() +} + +// DescribeNetworkAclsWithContext is the same as DescribeNetworkAcls with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkAcls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkAclsWithContext(ctx aws.Context, input *DescribeNetworkAclsInput, opts ...request.Option) (*DescribeNetworkAclsOutput, error) { + req, out := c.DescribeNetworkAclsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeNetworkAclsPages iterates over the pages of a DescribeNetworkAcls operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNetworkAcls method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNetworkAcls operation. +// pageNum := 0 +// err := client.DescribeNetworkAclsPages(params, +// func(page *ec2.DescribeNetworkAclsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeNetworkAclsPages(input *DescribeNetworkAclsInput, fn func(*DescribeNetworkAclsOutput, bool) bool) error { + return c.DescribeNetworkAclsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeNetworkAclsPagesWithContext same as DescribeNetworkAclsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkAclsPagesWithContext(ctx aws.Context, input *DescribeNetworkAclsInput, fn func(*DescribeNetworkAclsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeNetworkAclsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNetworkAclsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeNetworkAclsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" + +// DescribeNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfaceAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNetworkInterfaceAttribute for more information on using the DescribeNetworkInterfaceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNetworkInterfaceAttributeRequest method. +// req, resp := client.DescribeNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaceAttribute +func (c *EC2) DescribeNetworkInterfaceAttributeRequest(input *DescribeNetworkInterfaceAttributeInput) (req *request.Request, output *DescribeNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfaceAttributeInput{} + } + + output = &DescribeNetworkInterfaceAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNetworkInterfaceAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes a network interface attribute. You can specify only one attribute +// at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeNetworkInterfaceAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaceAttribute +func (c *EC2) DescribeNetworkInterfaceAttribute(input *DescribeNetworkInterfaceAttributeInput) (*DescribeNetworkInterfaceAttributeOutput, error) { + req, out := c.DescribeNetworkInterfaceAttributeRequest(input) + return out, req.Send() +} + +// DescribeNetworkInterfaceAttributeWithContext is the same as DescribeNetworkInterfaceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkInterfaceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfaceAttributeWithContext(ctx aws.Context, input *DescribeNetworkInterfaceAttributeInput, opts ...request.Option) (*DescribeNetworkInterfaceAttributeOutput, error) { + req, out := c.DescribeNetworkInterfaceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeNetworkInterfacePermissions = "DescribeNetworkInterfacePermissions" + +// DescribeNetworkInterfacePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfacePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNetworkInterfacePermissions for more information on using the DescribeNetworkInterfacePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNetworkInterfacePermissionsRequest method. +// req, resp := client.DescribeNetworkInterfacePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissions +func (c *EC2) DescribeNetworkInterfacePermissionsRequest(input *DescribeNetworkInterfacePermissionsInput) (req *request.Request, output *DescribeNetworkInterfacePermissionsOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfacePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeNetworkInterfacePermissionsInput{} + } + + output = &DescribeNetworkInterfacePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNetworkInterfacePermissions API operation for Amazon Elastic Compute Cloud. +// +// Describes the permissions for your network interfaces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeNetworkInterfacePermissions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissions +func (c *EC2) DescribeNetworkInterfacePermissions(input *DescribeNetworkInterfacePermissionsInput) (*DescribeNetworkInterfacePermissionsOutput, error) { + req, out := c.DescribeNetworkInterfacePermissionsRequest(input) + return out, req.Send() +} + +// DescribeNetworkInterfacePermissionsWithContext is the same as DescribeNetworkInterfacePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkInterfacePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfacePermissionsWithContext(ctx aws.Context, input *DescribeNetworkInterfacePermissionsInput, opts ...request.Option) (*DescribeNetworkInterfacePermissionsOutput, error) { + req, out := c.DescribeNetworkInterfacePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeNetworkInterfacePermissionsPages iterates over the pages of a DescribeNetworkInterfacePermissions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNetworkInterfacePermissions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNetworkInterfacePermissions operation. +// pageNum := 0 +// err := client.DescribeNetworkInterfacePermissionsPages(params, +// func(page *ec2.DescribeNetworkInterfacePermissionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeNetworkInterfacePermissionsPages(input *DescribeNetworkInterfacePermissionsInput, fn func(*DescribeNetworkInterfacePermissionsOutput, bool) bool) error { + return c.DescribeNetworkInterfacePermissionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeNetworkInterfacePermissionsPagesWithContext same as DescribeNetworkInterfacePermissionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfacePermissionsPagesWithContext(ctx aws.Context, input *DescribeNetworkInterfacePermissionsInput, fn func(*DescribeNetworkInterfacePermissionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeNetworkInterfacePermissionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNetworkInterfacePermissionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeNetworkInterfacePermissionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" + +// DescribeNetworkInterfacesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfaces operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNetworkInterfaces for more information on using the DescribeNetworkInterfaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNetworkInterfacesRequest method. +// req, resp := client.DescribeNetworkInterfacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaces +func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesInput) (req *request.Request, output *DescribeNetworkInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeNetworkInterfacesInput{} + } + + output = &DescribeNetworkInterfacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNetworkInterfaces API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your network interfaces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeNetworkInterfaces for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaces +func (c *EC2) DescribeNetworkInterfaces(input *DescribeNetworkInterfacesInput) (*DescribeNetworkInterfacesOutput, error) { + req, out := c.DescribeNetworkInterfacesRequest(input) + return out, req.Send() +} + +// DescribeNetworkInterfacesWithContext is the same as DescribeNetworkInterfaces with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkInterfaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfacesWithContext(ctx aws.Context, input *DescribeNetworkInterfacesInput, opts ...request.Option) (*DescribeNetworkInterfacesOutput, error) { + req, out := c.DescribeNetworkInterfacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeNetworkInterfacesPages iterates over the pages of a DescribeNetworkInterfaces operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNetworkInterfaces method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNetworkInterfaces operation. +// pageNum := 0 +// err := client.DescribeNetworkInterfacesPages(params, +// func(page *ec2.DescribeNetworkInterfacesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeNetworkInterfacesPages(input *DescribeNetworkInterfacesInput, fn func(*DescribeNetworkInterfacesOutput, bool) bool) error { + return c.DescribeNetworkInterfacesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeNetworkInterfacesPagesWithContext same as DescribeNetworkInterfacesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfacesPagesWithContext(ctx aws.Context, input *DescribeNetworkInterfacesInput, fn func(*DescribeNetworkInterfacesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeNetworkInterfacesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNetworkInterfacesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeNetworkInterfacesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribePlacementGroups = "DescribePlacementGroups" + +// DescribePlacementGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePlacementGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePlacementGroups for more information on using the DescribePlacementGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePlacementGroupsRequest method. +// req, resp := client.DescribePlacementGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePlacementGroups +func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput) (req *request.Request, output *DescribePlacementGroupsOutput) { + op := &request.Operation{ + Name: opDescribePlacementGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePlacementGroupsInput{} + } + + output = &DescribePlacementGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePlacementGroups API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified placement groups or all of your placement groups. +// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribePlacementGroups for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePlacementGroups +func (c *EC2) DescribePlacementGroups(input *DescribePlacementGroupsInput) (*DescribePlacementGroupsOutput, error) { + req, out := c.DescribePlacementGroupsRequest(input) + return out, req.Send() +} + +// DescribePlacementGroupsWithContext is the same as DescribePlacementGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePlacementGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePlacementGroupsWithContext(ctx aws.Context, input *DescribePlacementGroupsInput, opts ...request.Option) (*DescribePlacementGroupsOutput, error) { + req, out := c.DescribePlacementGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribePrefixLists = "DescribePrefixLists" + +// DescribePrefixListsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePrefixLists operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePrefixLists for more information on using the DescribePrefixLists +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePrefixListsRequest method. +// req, resp := client.DescribePrefixListsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrefixLists +func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req *request.Request, output *DescribePrefixListsOutput) { + op := &request.Operation{ + Name: opDescribePrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePrefixListsInput{} + } + + output = &DescribePrefixListsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePrefixLists API operation for Amazon Elastic Compute Cloud. +// +// Describes available AWS services in a prefix list format, which includes +// the prefix list name and prefix list ID of the service and the IP address +// range for the service. +// +// We recommend that you use DescribeManagedPrefixLists instead. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribePrefixLists for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrefixLists +func (c *EC2) DescribePrefixLists(input *DescribePrefixListsInput) (*DescribePrefixListsOutput, error) { + req, out := c.DescribePrefixListsRequest(input) + return out, req.Send() +} + +// DescribePrefixListsWithContext is the same as DescribePrefixLists with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePrefixLists for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePrefixListsWithContext(ctx aws.Context, input *DescribePrefixListsInput, opts ...request.Option) (*DescribePrefixListsOutput, error) { + req, out := c.DescribePrefixListsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribePrefixListsPages iterates over the pages of a DescribePrefixLists operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePrefixLists method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePrefixLists operation. +// pageNum := 0 +// err := client.DescribePrefixListsPages(params, +// func(page *ec2.DescribePrefixListsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribePrefixListsPages(input *DescribePrefixListsInput, fn func(*DescribePrefixListsOutput, bool) bool) error { + return c.DescribePrefixListsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePrefixListsPagesWithContext same as DescribePrefixListsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePrefixListsPagesWithContext(ctx aws.Context, input *DescribePrefixListsInput, fn func(*DescribePrefixListsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePrefixListsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePrefixListsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePrefixListsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribePrincipalIdFormat = "DescribePrincipalIdFormat" + +// DescribePrincipalIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribePrincipalIdFormat operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePrincipalIdFormat for more information on using the DescribePrincipalIdFormat +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePrincipalIdFormatRequest method. +// req, resp := client.DescribePrincipalIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrincipalIdFormat +func (c *EC2) DescribePrincipalIdFormatRequest(input *DescribePrincipalIdFormatInput) (req *request.Request, output *DescribePrincipalIdFormatOutput) { + op := &request.Operation{ + Name: opDescribePrincipalIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePrincipalIdFormatInput{} + } + + output = &DescribePrincipalIdFormatOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePrincipalIdFormat API operation for Amazon Elastic Compute Cloud. +// +// Describes the ID format settings for the root user and all IAM roles and +// IAM users that have explicitly specified a longer ID (17-character ID) preference. +// +// By default, all IAM roles and IAM users default to the same ID settings as +// the root user, unless they explicitly override the settings. This request +// is useful for identifying those IAM users and IAM roles that have overridden +// the default ID settings. +// +// The following resource types support longer IDs: bundle | conversion-task +// | customer-gateway | dhcp-options | elastic-ip-allocation | elastic-ip-association +// | export-task | flow-log | image | import-task | instance | internet-gateway +// | network-acl | network-acl-association | network-interface | network-interface-attachment +// | prefix-list | reservation | route-table | route-table-association | security-group +// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribePrincipalIdFormat for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrincipalIdFormat +func (c *EC2) DescribePrincipalIdFormat(input *DescribePrincipalIdFormatInput) (*DescribePrincipalIdFormatOutput, error) { + req, out := c.DescribePrincipalIdFormatRequest(input) + return out, req.Send() +} + +// DescribePrincipalIdFormatWithContext is the same as DescribePrincipalIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePrincipalIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePrincipalIdFormatWithContext(ctx aws.Context, input *DescribePrincipalIdFormatInput, opts ...request.Option) (*DescribePrincipalIdFormatOutput, error) { + req, out := c.DescribePrincipalIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribePrincipalIdFormatPages iterates over the pages of a DescribePrincipalIdFormat operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePrincipalIdFormat method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePrincipalIdFormat operation. +// pageNum := 0 +// err := client.DescribePrincipalIdFormatPages(params, +// func(page *ec2.DescribePrincipalIdFormatOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribePrincipalIdFormatPages(input *DescribePrincipalIdFormatInput, fn func(*DescribePrincipalIdFormatOutput, bool) bool) error { + return c.DescribePrincipalIdFormatPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePrincipalIdFormatPagesWithContext same as DescribePrincipalIdFormatPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePrincipalIdFormatPagesWithContext(ctx aws.Context, input *DescribePrincipalIdFormatInput, fn func(*DescribePrincipalIdFormatOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePrincipalIdFormatInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePrincipalIdFormatRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePrincipalIdFormatOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribePublicIpv4Pools = "DescribePublicIpv4Pools" + +// DescribePublicIpv4PoolsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePublicIpv4Pools operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePublicIpv4Pools for more information on using the DescribePublicIpv4Pools +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePublicIpv4PoolsRequest method. +// req, resp := client.DescribePublicIpv4PoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePublicIpv4Pools +func (c *EC2) DescribePublicIpv4PoolsRequest(input *DescribePublicIpv4PoolsInput) (req *request.Request, output *DescribePublicIpv4PoolsOutput) { + op := &request.Operation{ + Name: opDescribePublicIpv4Pools, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePublicIpv4PoolsInput{} + } + + output = &DescribePublicIpv4PoolsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePublicIpv4Pools API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified IPv4 address pools. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribePublicIpv4Pools for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePublicIpv4Pools +func (c *EC2) DescribePublicIpv4Pools(input *DescribePublicIpv4PoolsInput) (*DescribePublicIpv4PoolsOutput, error) { + req, out := c.DescribePublicIpv4PoolsRequest(input) + return out, req.Send() +} + +// DescribePublicIpv4PoolsWithContext is the same as DescribePublicIpv4Pools with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePublicIpv4Pools for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePublicIpv4PoolsWithContext(ctx aws.Context, input *DescribePublicIpv4PoolsInput, opts ...request.Option) (*DescribePublicIpv4PoolsOutput, error) { + req, out := c.DescribePublicIpv4PoolsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribePublicIpv4PoolsPages iterates over the pages of a DescribePublicIpv4Pools operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePublicIpv4Pools method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePublicIpv4Pools operation. +// pageNum := 0 +// err := client.DescribePublicIpv4PoolsPages(params, +// func(page *ec2.DescribePublicIpv4PoolsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribePublicIpv4PoolsPages(input *DescribePublicIpv4PoolsInput, fn func(*DescribePublicIpv4PoolsOutput, bool) bool) error { + return c.DescribePublicIpv4PoolsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePublicIpv4PoolsPagesWithContext same as DescribePublicIpv4PoolsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePublicIpv4PoolsPagesWithContext(ctx aws.Context, input *DescribePublicIpv4PoolsInput, fn func(*DescribePublicIpv4PoolsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePublicIpv4PoolsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePublicIpv4PoolsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePublicIpv4PoolsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeRegions = "DescribeRegions" + +// DescribeRegionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRegions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeRegions for more information on using the DescribeRegions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeRegionsRequest method. +// req, resp := client.DescribeRegionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRegions +func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request.Request, output *DescribeRegionsOutput) { + op := &request.Operation{ + Name: opDescribeRegions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRegionsInput{} + } + + output = &DescribeRegionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeRegions API operation for Amazon Elastic Compute Cloud. +// +// Describes the Regions that are enabled for your account, or all Regions. +// +// For a list of the Regions supported by Amazon EC2, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). +// +// For information about enabling and disabling Regions for your account, see +// Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeRegions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRegions +func (c *EC2) DescribeRegions(input *DescribeRegionsInput) (*DescribeRegionsOutput, error) { + req, out := c.DescribeRegionsRequest(input) + return out, req.Send() +} + +// DescribeRegionsWithContext is the same as DescribeRegions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRegions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeRegionsWithContext(ctx aws.Context, input *DescribeRegionsInput, opts ...request.Option) (*DescribeRegionsOutput, error) { + req, out := c.DescribeRegionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeReservedInstances = "DescribeReservedInstances" + +// DescribeReservedInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReservedInstances for more information on using the DescribeReservedInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReservedInstancesRequest method. +// req, resp := client.DescribeReservedInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstances +func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesInput) (req *request.Request, output *DescribeReservedInstancesOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesInput{} + } + + output = &DescribeReservedInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReservedInstances API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of the Reserved Instances that you purchased. +// +// For more information about Reserved Instances, see Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeReservedInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstances +func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) (*DescribeReservedInstancesOutput, error) { + req, out := c.DescribeReservedInstancesRequest(input) + return out, req.Send() +} + +// DescribeReservedInstancesWithContext is the same as DescribeReservedInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReservedInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesWithContext(ctx aws.Context, input *DescribeReservedInstancesInput, opts ...request.Option) (*DescribeReservedInstancesOutput, error) { + req, out := c.DescribeReservedInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" + +// DescribeReservedInstancesListingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesListings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReservedInstancesListings for more information on using the DescribeReservedInstancesListings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReservedInstancesListingsRequest method. +// req, resp := client.DescribeReservedInstancesListingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesListings +func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedInstancesListingsInput) (req *request.Request, output *DescribeReservedInstancesListingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesListings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReservedInstancesListingsInput{} + } + + output = &DescribeReservedInstancesListingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReservedInstancesListings API operation for Amazon Elastic Compute Cloud. +// +// Describes your account's Reserved Instance listings in the Reserved Instance +// Marketplace. +// +// The Reserved Instance Marketplace matches sellers who want to resell Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. +// +// As a seller, you choose to list some or all of your Reserved Instances, and +// you specify the upfront price to receive for them. Your Reserved Instances +// are then listed in the Reserved Instance Marketplace and are available for +// purchase. +// +// As a buyer, you specify the configuration of the Reserved Instance to purchase, +// and the Marketplace matches what you're searching for with what's available. +// The Marketplace first sells the lowest priced Reserved Instances to you, +// and continues to sell available Reserved Instance listings to you until your +// demand is met. You are charged based on the total price of all of the listings +// that you purchase. +// +// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeReservedInstancesListings for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesListings +func (c *EC2) DescribeReservedInstancesListings(input *DescribeReservedInstancesListingsInput) (*DescribeReservedInstancesListingsOutput, error) { + req, out := c.DescribeReservedInstancesListingsRequest(input) + return out, req.Send() +} + +// DescribeReservedInstancesListingsWithContext is the same as DescribeReservedInstancesListings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReservedInstancesListings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesListingsWithContext(ctx aws.Context, input *DescribeReservedInstancesListingsInput, opts ...request.Option) (*DescribeReservedInstancesListingsOutput, error) { + req, out := c.DescribeReservedInstancesListingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModifications" + +// DescribeReservedInstancesModificationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesModifications operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReservedInstancesModifications for more information on using the DescribeReservedInstancesModifications +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReservedInstancesModificationsRequest method. +// req, resp := client.DescribeReservedInstancesModificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesModifications +func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReservedInstancesModificationsInput) (req *request.Request, output *DescribeReservedInstancesModificationsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesModifications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesModificationsInput{} + } + + output = &DescribeReservedInstancesModificationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReservedInstancesModifications API operation for Amazon Elastic Compute Cloud. +// +// Describes the modifications made to your Reserved Instances. If no parameter +// is specified, information about all your Reserved Instances modification +// requests is returned. If a modification ID is specified, only information +// about the specific modification is returned. +// +// For more information, see Modifying Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeReservedInstancesModifications for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesModifications +func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInstancesModificationsInput) (*DescribeReservedInstancesModificationsOutput, error) { + req, out := c.DescribeReservedInstancesModificationsRequest(input) + return out, req.Send() +} + +// DescribeReservedInstancesModificationsWithContext is the same as DescribeReservedInstancesModifications with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReservedInstancesModifications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesModificationsWithContext(ctx aws.Context, input *DescribeReservedInstancesModificationsInput, opts ...request.Option) (*DescribeReservedInstancesModificationsOutput, error) { + req, out := c.DescribeReservedInstancesModificationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeReservedInstancesModificationsPages iterates over the pages of a DescribeReservedInstancesModifications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedInstancesModifications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedInstancesModifications operation. +// pageNum := 0 +// err := client.DescribeReservedInstancesModificationsPages(params, +// func(page *ec2.DescribeReservedInstancesModificationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(*DescribeReservedInstancesModificationsOutput, bool) bool) error { + return c.DescribeReservedInstancesModificationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReservedInstancesModificationsPagesWithContext same as DescribeReservedInstancesModificationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesModificationsPagesWithContext(ctx aws.Context, input *DescribeReservedInstancesModificationsInput, fn func(*DescribeReservedInstancesModificationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReservedInstancesModificationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReservedInstancesModificationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReservedInstancesModificationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings" + +// DescribeReservedInstancesOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesOfferings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReservedInstancesOfferings for more information on using the DescribeReservedInstancesOfferings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReservedInstancesOfferingsRequest method. +// req, resp := client.DescribeReservedInstancesOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesOfferings +func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedInstancesOfferingsInput) (req *request.Request, output *DescribeReservedInstancesOfferingsOutput) { + op := &request.Operation{ + Name: opDescribeReservedInstancesOfferings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReservedInstancesOfferingsInput{} + } + + output = &DescribeReservedInstancesOfferingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReservedInstancesOfferings API operation for Amazon Elastic Compute Cloud. +// +// Describes Reserved Instance offerings that are available for purchase. With +// Reserved Instances, you purchase the right to launch instances for a period +// of time. During that time period, you do not receive insufficient capacity +// errors, and you pay a lower usage rate than the rate charged for On-Demand +// instances for the actual time used. +// +// If you have listed your own Reserved Instances for sale in the Reserved Instance +// Marketplace, they will be excluded from these results. This is to ensure +// that you do not purchase your own Reserved Instances. +// +// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeReservedInstancesOfferings for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesOfferings +func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstancesOfferingsInput) (*DescribeReservedInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedInstancesOfferingsRequest(input) + return out, req.Send() +} + +// DescribeReservedInstancesOfferingsWithContext is the same as DescribeReservedInstancesOfferings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReservedInstancesOfferings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesOfferingsWithContext(ctx aws.Context, input *DescribeReservedInstancesOfferingsInput, opts ...request.Option) (*DescribeReservedInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedInstancesOfferingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeReservedInstancesOfferingsPages iterates over the pages of a DescribeReservedInstancesOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedInstancesOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedInstancesOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedInstancesOfferingsPages(params, +// func(page *ec2.DescribeReservedInstancesOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(*DescribeReservedInstancesOfferingsOutput, bool) bool) error { + return c.DescribeReservedInstancesOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReservedInstancesOfferingsPagesWithContext same as DescribeReservedInstancesOfferingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesOfferingsPagesWithContext(ctx aws.Context, input *DescribeReservedInstancesOfferingsInput, fn func(*DescribeReservedInstancesOfferingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReservedInstancesOfferingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReservedInstancesOfferingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReservedInstancesOfferingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeRouteTables = "DescribeRouteTables" + +// DescribeRouteTablesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRouteTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeRouteTables for more information on using the DescribeRouteTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeRouteTablesRequest method. +// req, resp := client.DescribeRouteTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRouteTables +func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req *request.Request, output *DescribeRouteTablesOutput) { + op := &request.Operation{ + Name: opDescribeRouteTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeRouteTablesInput{} + } + + output = &DescribeRouteTablesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeRouteTables API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your route tables. +// +// Each subnet in your VPC must be associated with a route table. If a subnet +// is not explicitly associated with any route table, it is implicitly associated +// with the main route table. This command does not return the subnet ID for +// implicit associations. +// +// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeRouteTables for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRouteTables +func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRouteTablesOutput, error) { + req, out := c.DescribeRouteTablesRequest(input) + return out, req.Send() +} + +// DescribeRouteTablesWithContext is the same as DescribeRouteTables with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRouteTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeRouteTablesWithContext(ctx aws.Context, input *DescribeRouteTablesInput, opts ...request.Option) (*DescribeRouteTablesOutput, error) { + req, out := c.DescribeRouteTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeRouteTablesPages iterates over the pages of a DescribeRouteTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeRouteTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeRouteTables operation. +// pageNum := 0 +// err := client.DescribeRouteTablesPages(params, +// func(page *ec2.DescribeRouteTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeRouteTablesPages(input *DescribeRouteTablesInput, fn func(*DescribeRouteTablesOutput, bool) bool) error { + return c.DescribeRouteTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeRouteTablesPagesWithContext same as DescribeRouteTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeRouteTablesPagesWithContext(ctx aws.Context, input *DescribeRouteTablesInput, fn func(*DescribeRouteTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeRouteTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeRouteTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeRouteTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvailability" + +// DescribeScheduledInstanceAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScheduledInstanceAvailability operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeScheduledInstanceAvailability for more information on using the DescribeScheduledInstanceAvailability +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeScheduledInstanceAvailabilityRequest method. +// req, resp := client.DescribeScheduledInstanceAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstanceAvailability +func (c *EC2) DescribeScheduledInstanceAvailabilityRequest(input *DescribeScheduledInstanceAvailabilityInput) (req *request.Request, output *DescribeScheduledInstanceAvailabilityOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstanceAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScheduledInstanceAvailabilityInput{} + } + + output = &DescribeScheduledInstanceAvailabilityOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeScheduledInstanceAvailability API operation for Amazon Elastic Compute Cloud. +// +// Finds available schedules that meet the specified criteria. +// +// You can search for an available schedule no more than 3 months in advance. +// You must meet the minimum required duration of 1,200 hours per year. For +// example, the minimum daily schedule is 4 hours, the minimum weekly schedule +// is 24 hours, and the minimum monthly schedule is 100 hours. +// +// After you find a schedule that meets your needs, call PurchaseScheduledInstances +// to purchase Scheduled Instances with that schedule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeScheduledInstanceAvailability for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstanceAvailability +func (c *EC2) DescribeScheduledInstanceAvailability(input *DescribeScheduledInstanceAvailabilityInput) (*DescribeScheduledInstanceAvailabilityOutput, error) { + req, out := c.DescribeScheduledInstanceAvailabilityRequest(input) + return out, req.Send() +} + +// DescribeScheduledInstanceAvailabilityWithContext is the same as DescribeScheduledInstanceAvailability with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScheduledInstanceAvailability for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeScheduledInstanceAvailabilityWithContext(ctx aws.Context, input *DescribeScheduledInstanceAvailabilityInput, opts ...request.Option) (*DescribeScheduledInstanceAvailabilityOutput, error) { + req, out := c.DescribeScheduledInstanceAvailabilityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeScheduledInstanceAvailabilityPages iterates over the pages of a DescribeScheduledInstanceAvailability operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScheduledInstanceAvailability method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScheduledInstanceAvailability operation. +// pageNum := 0 +// err := client.DescribeScheduledInstanceAvailabilityPages(params, +// func(page *ec2.DescribeScheduledInstanceAvailabilityOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeScheduledInstanceAvailabilityPages(input *DescribeScheduledInstanceAvailabilityInput, fn func(*DescribeScheduledInstanceAvailabilityOutput, bool) bool) error { + return c.DescribeScheduledInstanceAvailabilityPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeScheduledInstanceAvailabilityPagesWithContext same as DescribeScheduledInstanceAvailabilityPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeScheduledInstanceAvailabilityPagesWithContext(ctx aws.Context, input *DescribeScheduledInstanceAvailabilityInput, fn func(*DescribeScheduledInstanceAvailabilityOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeScheduledInstanceAvailabilityInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeScheduledInstanceAvailabilityRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeScheduledInstanceAvailabilityOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeScheduledInstances = "DescribeScheduledInstances" + +// DescribeScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScheduledInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeScheduledInstances for more information on using the DescribeScheduledInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeScheduledInstancesRequest method. +// req, resp := client.DescribeScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstances +func (c *EC2) DescribeScheduledInstancesRequest(input *DescribeScheduledInstancesInput) (req *request.Request, output *DescribeScheduledInstancesOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeScheduledInstancesInput{} + } + + output = &DescribeScheduledInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeScheduledInstances API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified Scheduled Instances or all your Scheduled Instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeScheduledInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstances +func (c *EC2) DescribeScheduledInstances(input *DescribeScheduledInstancesInput) (*DescribeScheduledInstancesOutput, error) { + req, out := c.DescribeScheduledInstancesRequest(input) + return out, req.Send() +} + +// DescribeScheduledInstancesWithContext is the same as DescribeScheduledInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScheduledInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeScheduledInstancesWithContext(ctx aws.Context, input *DescribeScheduledInstancesInput, opts ...request.Option) (*DescribeScheduledInstancesOutput, error) { + req, out := c.DescribeScheduledInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeScheduledInstancesPages iterates over the pages of a DescribeScheduledInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScheduledInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScheduledInstances operation. +// pageNum := 0 +// err := client.DescribeScheduledInstancesPages(params, +// func(page *ec2.DescribeScheduledInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeScheduledInstancesPages(input *DescribeScheduledInstancesInput, fn func(*DescribeScheduledInstancesOutput, bool) bool) error { + return c.DescribeScheduledInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeScheduledInstancesPagesWithContext same as DescribeScheduledInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeScheduledInstancesPagesWithContext(ctx aws.Context, input *DescribeScheduledInstancesInput, fn func(*DescribeScheduledInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeScheduledInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeScheduledInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeScheduledInstancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSecurityGroupReferences = "DescribeSecurityGroupReferences" + +// DescribeSecurityGroupReferencesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSecurityGroupReferences operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSecurityGroupReferences for more information on using the DescribeSecurityGroupReferences +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSecurityGroupReferencesRequest method. +// req, resp := client.DescribeSecurityGroupReferencesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupReferences +func (c *EC2) DescribeSecurityGroupReferencesRequest(input *DescribeSecurityGroupReferencesInput) (req *request.Request, output *DescribeSecurityGroupReferencesOutput) { + op := &request.Operation{ + Name: opDescribeSecurityGroupReferences, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecurityGroupReferencesInput{} + } + + output = &DescribeSecurityGroupReferencesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSecurityGroupReferences API operation for Amazon Elastic Compute Cloud. +// +// [VPC only] Describes the VPCs on the other side of a VPC peering connection +// that are referencing the security groups you've specified in this request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSecurityGroupReferences for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupReferences +func (c *EC2) DescribeSecurityGroupReferences(input *DescribeSecurityGroupReferencesInput) (*DescribeSecurityGroupReferencesOutput, error) { + req, out := c.DescribeSecurityGroupReferencesRequest(input) + return out, req.Send() +} + +// DescribeSecurityGroupReferencesWithContext is the same as DescribeSecurityGroupReferences with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSecurityGroupReferences for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSecurityGroupReferencesWithContext(ctx aws.Context, input *DescribeSecurityGroupReferencesInput, opts ...request.Option) (*DescribeSecurityGroupReferencesOutput, error) { + req, out := c.DescribeSecurityGroupReferencesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSecurityGroups = "DescribeSecurityGroups" + +// DescribeSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSecurityGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSecurityGroups for more information on using the DescribeSecurityGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSecurityGroupsRequest method. +// req, resp := client.DescribeSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroups +func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput) (req *request.Request, output *DescribeSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSecurityGroupsInput{} + } + + output = &DescribeSecurityGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSecurityGroups API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified security groups or all of your security groups. +// +// A security group is for use with instances either in the EC2-Classic platform +// or in a specific VPC. For more information, see Amazon EC2 Security Groups +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) +// in the Amazon Elastic Compute Cloud User Guide and Security Groups for Your +// VPC (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSecurityGroups for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroups +func (c *EC2) DescribeSecurityGroups(input *DescribeSecurityGroupsInput) (*DescribeSecurityGroupsOutput, error) { + req, out := c.DescribeSecurityGroupsRequest(input) + return out, req.Send() +} + +// DescribeSecurityGroupsWithContext is the same as DescribeSecurityGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSecurityGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSecurityGroupsWithContext(ctx aws.Context, input *DescribeSecurityGroupsInput, opts ...request.Option) (*DescribeSecurityGroupsOutput, error) { + req, out := c.DescribeSecurityGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSecurityGroupsPages iterates over the pages of a DescribeSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSecurityGroups operation. +// pageNum := 0 +// err := client.DescribeSecurityGroupsPages(params, +// func(page *ec2.DescribeSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSecurityGroupsPages(input *DescribeSecurityGroupsInput, fn func(*DescribeSecurityGroupsOutput, bool) bool) error { + return c.DescribeSecurityGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSecurityGroupsPagesWithContext same as DescribeSecurityGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSecurityGroupsPagesWithContext(ctx aws.Context, input *DescribeSecurityGroupsInput, fn func(*DescribeSecurityGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSecurityGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSecurityGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSecurityGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" + +// DescribeSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshotAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSnapshotAttribute for more information on using the DescribeSnapshotAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSnapshotAttributeRequest method. +// req, resp := client.DescribeSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshotAttribute +func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeInput) (req *request.Request, output *DescribeSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opDescribeSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSnapshotAttributeInput{} + } + + output = &DescribeSnapshotAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSnapshotAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified snapshot. You can specify +// only one attribute at a time. +// +// For more information about EBS snapshots, see Amazon EBS snapshots (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSnapshotAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshotAttribute +func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) (*DescribeSnapshotAttributeOutput, error) { + req, out := c.DescribeSnapshotAttributeRequest(input) + return out, req.Send() +} + +// DescribeSnapshotAttributeWithContext is the same as DescribeSnapshotAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSnapshotAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSnapshotAttributeWithContext(ctx aws.Context, input *DescribeSnapshotAttributeInput, opts ...request.Option) (*DescribeSnapshotAttributeOutput, error) { + req, out := c.DescribeSnapshotAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSnapshots for more information on using the DescribeSnapshots +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshots +func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { + op := &request.Operation{ + Name: opDescribeSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSnapshotsInput{} + } + + output = &DescribeSnapshotsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSnapshots API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified EBS snapshots available to you or all of the EBS +// snapshots available to you. +// +// The snapshots available to you include public snapshots, private snapshots +// that you own, and private snapshots owned by other AWS accounts for which +// you have explicit create volume permissions. +// +// The create volume permissions fall into the following categories: +// +// * public: The owner of the snapshot granted create volume permissions +// for the snapshot to the all group. All AWS accounts have create volume +// permissions for these snapshots. +// +// * explicit: The owner of the snapshot granted create volume permissions +// to a specific AWS account. +// +// * implicit: An AWS account has implicit create volume permissions for +// all snapshots it owns. +// +// The list of snapshots returned can be filtered by specifying snapshot IDs, +// snapshot owners, or AWS accounts with create volume permissions. If no options +// are specified, Amazon EC2 returns all snapshots for which you have create +// volume permissions. +// +// If you specify one or more snapshot IDs, only snapshots that have the specified +// IDs are returned. If you specify an invalid snapshot ID, an error is returned. +// If you specify a snapshot ID for which you do not have access, it is not +// included in the returned results. +// +// If you specify one or more snapshot owners using the OwnerIds option, only +// snapshots from the specified owners and for which you have access are returned. +// The results can include the AWS account IDs of the specified owners, amazon +// for snapshots owned by Amazon, or self for snapshots that you own. +// +// If you specify a list of restorable users, only snapshots with create snapshot +// permissions for those users are returned. You can specify AWS account IDs +// (if you own the snapshots), self for snapshots for which you own or have +// explicit permissions, or all for public snapshots. +// +// If you are describing a long list of snapshots, we recommend that you paginate +// the output to make the list more manageable. The MaxResults parameter sets +// the maximum number of results returned in a single page. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeSnapshots +// request to retrieve the remaining results. +// +// To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores. +// +// For more information about EBS snapshots, see Amazon EBS snapshots (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSnapshots for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshots +func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + return out, req.Send() +} + +// DescribeSnapshotsWithContext is the same as DescribeSnapshots with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSnapshots for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSnapshotsWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.Option) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// pageNum := 0 +// err := client.DescribeSnapshotsPages(params, +// func(page *ec2.DescribeSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool) error { + return c.DescribeSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSnapshotsPagesWithContext same as DescribeSnapshotsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSnapshotsPagesWithContext(ctx aws.Context, input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" + +// DescribeSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotDatafeedSubscription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSpotDatafeedSubscription for more information on using the DescribeSpotDatafeedSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSpotDatafeedSubscriptionRequest method. +// req, resp := client.DescribeSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotDatafeedSubscription +func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafeedSubscriptionInput) (req *request.Request, output *DescribeSpotDatafeedSubscriptionOutput) { + op := &request.Operation{ + Name: opDescribeSpotDatafeedSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotDatafeedSubscriptionInput{} + } + + output = &DescribeSpotDatafeedSubscriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud. +// +// Describes the data feed for Spot Instances. For more information, see Spot +// Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// in the Amazon EC2 User Guide for Linux Instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSpotDatafeedSubscription for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotDatafeedSubscription +func (c *EC2) DescribeSpotDatafeedSubscription(input *DescribeSpotDatafeedSubscriptionInput) (*DescribeSpotDatafeedSubscriptionOutput, error) { + req, out := c.DescribeSpotDatafeedSubscriptionRequest(input) + return out, req.Send() +} + +// DescribeSpotDatafeedSubscriptionWithContext is the same as DescribeSpotDatafeedSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotDatafeedSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotDatafeedSubscriptionWithContext(ctx aws.Context, input *DescribeSpotDatafeedSubscriptionInput, opts ...request.Option) (*DescribeSpotDatafeedSubscriptionOutput, error) { + req, out := c.DescribeSpotDatafeedSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" + +// DescribeSpotFleetInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSpotFleetInstances for more information on using the DescribeSpotFleetInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSpotFleetInstancesRequest method. +// req, resp := client.DescribeSpotFleetInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetInstances +func (c *EC2) DescribeSpotFleetInstancesRequest(input *DescribeSpotFleetInstancesInput) (req *request.Request, output *DescribeSpotFleetInstancesOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetInstancesInput{} + } + + output = &DescribeSpotFleetInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSpotFleetInstances API operation for Amazon Elastic Compute Cloud. +// +// Describes the running instances for the specified Spot Fleet. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSpotFleetInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetInstances +func (c *EC2) DescribeSpotFleetInstances(input *DescribeSpotFleetInstancesInput) (*DescribeSpotFleetInstancesOutput, error) { + req, out := c.DescribeSpotFleetInstancesRequest(input) + return out, req.Send() +} + +// DescribeSpotFleetInstancesWithContext is the same as DescribeSpotFleetInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotFleetInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotFleetInstancesWithContext(ctx aws.Context, input *DescribeSpotFleetInstancesInput, opts ...request.Option) (*DescribeSpotFleetInstancesOutput, error) { + req, out := c.DescribeSpotFleetInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" + +// DescribeSpotFleetRequestHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetRequestHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSpotFleetRequestHistory for more information on using the DescribeSpotFleetRequestHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSpotFleetRequestHistoryRequest method. +// req, resp := client.DescribeSpotFleetRequestHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequestHistory +func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetRequestHistoryInput) (req *request.Request, output *DescribeSpotFleetRequestHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequestHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSpotFleetRequestHistoryInput{} + } + + output = &DescribeSpotFleetRequestHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSpotFleetRequestHistory API operation for Amazon Elastic Compute Cloud. +// +// Describes the events for the specified Spot Fleet request during the specified +// time. +// +// Spot Fleet events are delayed by up to 30 seconds before they can be described. +// This ensures that you can query by the last evaluated time and not miss a +// recorded event. Spot Fleet events are available for 48 hours. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSpotFleetRequestHistory for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequestHistory +func (c *EC2) DescribeSpotFleetRequestHistory(input *DescribeSpotFleetRequestHistoryInput) (*DescribeSpotFleetRequestHistoryOutput, error) { + req, out := c.DescribeSpotFleetRequestHistoryRequest(input) + return out, req.Send() +} + +// DescribeSpotFleetRequestHistoryWithContext is the same as DescribeSpotFleetRequestHistory with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotFleetRequestHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotFleetRequestHistoryWithContext(ctx aws.Context, input *DescribeSpotFleetRequestHistoryInput, opts ...request.Option) (*DescribeSpotFleetRequestHistoryOutput, error) { + req, out := c.DescribeSpotFleetRequestHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" + +// DescribeSpotFleetRequestsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetRequests operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSpotFleetRequests for more information on using the DescribeSpotFleetRequests +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSpotFleetRequestsRequest method. +// req, resp := client.DescribeSpotFleetRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequests +func (c *EC2) DescribeSpotFleetRequestsRequest(input *DescribeSpotFleetRequestsInput) (req *request.Request, output *DescribeSpotFleetRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotFleetRequests, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSpotFleetRequestsInput{} + } + + output = &DescribeSpotFleetRequestsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSpotFleetRequests API operation for Amazon Elastic Compute Cloud. +// +// Describes your Spot Fleet requests. +// +// Spot Fleet requests are deleted 48 hours after they are canceled and their +// instances are terminated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSpotFleetRequests for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequests +func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) (*DescribeSpotFleetRequestsOutput, error) { + req, out := c.DescribeSpotFleetRequestsRequest(input) + return out, req.Send() +} + +// DescribeSpotFleetRequestsWithContext is the same as DescribeSpotFleetRequests with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotFleetRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotFleetRequestsWithContext(ctx aws.Context, input *DescribeSpotFleetRequestsInput, opts ...request.Option) (*DescribeSpotFleetRequestsOutput, error) { + req, out := c.DescribeSpotFleetRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSpotFleetRequestsPages iterates over the pages of a DescribeSpotFleetRequests operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSpotFleetRequests method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSpotFleetRequests operation. +// pageNum := 0 +// err := client.DescribeSpotFleetRequestsPages(params, +// func(page *ec2.DescribeSpotFleetRequestsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSpotFleetRequestsPages(input *DescribeSpotFleetRequestsInput, fn func(*DescribeSpotFleetRequestsOutput, bool) bool) error { + return c.DescribeSpotFleetRequestsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSpotFleetRequestsPagesWithContext same as DescribeSpotFleetRequestsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotFleetRequestsPagesWithContext(ctx aws.Context, input *DescribeSpotFleetRequestsInput, fn func(*DescribeSpotFleetRequestsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSpotFleetRequestsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSpotFleetRequestsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSpotFleetRequestsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" + +// DescribeSpotInstanceRequestsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotInstanceRequests operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSpotInstanceRequests for more information on using the DescribeSpotInstanceRequests +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSpotInstanceRequestsRequest method. +// req, resp := client.DescribeSpotInstanceRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotInstanceRequests +func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceRequestsInput) (req *request.Request, output *DescribeSpotInstanceRequestsOutput) { + op := &request.Operation{ + Name: opDescribeSpotInstanceRequests, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSpotInstanceRequestsInput{} + } + + output = &DescribeSpotInstanceRequestsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSpotInstanceRequests API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified Spot Instance requests. +// +// You can use DescribeSpotInstanceRequests to find a running Spot Instance +// by examining the response. If the status of the Spot Instance is fulfilled, +// the instance ID appears in the response and contains the identifier of the +// instance. Alternatively, you can use DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances) +// with a filter to look for instances where the instance lifecycle is spot. +// +// We recommend that you set MaxResults to a value between 5 and 1000 to limit +// the number of results returned. This paginates the output, which makes the +// list more manageable and returns the results faster. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeSpotInstanceRequests +// request to retrieve the remaining results. +// +// Spot Instance requests are deleted four hours after they are canceled and +// their instances are terminated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSpotInstanceRequests for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotInstanceRequests +func (c *EC2) DescribeSpotInstanceRequests(input *DescribeSpotInstanceRequestsInput) (*DescribeSpotInstanceRequestsOutput, error) { + req, out := c.DescribeSpotInstanceRequestsRequest(input) + return out, req.Send() +} + +// DescribeSpotInstanceRequestsWithContext is the same as DescribeSpotInstanceRequests with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotInstanceRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotInstanceRequestsWithContext(ctx aws.Context, input *DescribeSpotInstanceRequestsInput, opts ...request.Option) (*DescribeSpotInstanceRequestsOutput, error) { + req, out := c.DescribeSpotInstanceRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSpotInstanceRequestsPages iterates over the pages of a DescribeSpotInstanceRequests operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSpotInstanceRequests method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSpotInstanceRequests operation. +// pageNum := 0 +// err := client.DescribeSpotInstanceRequestsPages(params, +// func(page *ec2.DescribeSpotInstanceRequestsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSpotInstanceRequestsPages(input *DescribeSpotInstanceRequestsInput, fn func(*DescribeSpotInstanceRequestsOutput, bool) bool) error { + return c.DescribeSpotInstanceRequestsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSpotInstanceRequestsPagesWithContext same as DescribeSpotInstanceRequestsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotInstanceRequestsPagesWithContext(ctx aws.Context, input *DescribeSpotInstanceRequestsInput, fn func(*DescribeSpotInstanceRequestsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSpotInstanceRequestsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSpotInstanceRequestsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSpotInstanceRequestsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" + +// DescribeSpotPriceHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotPriceHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSpotPriceHistory for more information on using the DescribeSpotPriceHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSpotPriceHistoryRequest method. +// req, resp := client.DescribeSpotPriceHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotPriceHistory +func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInput) (req *request.Request, output *DescribeSpotPriceHistoryOutput) { + op := &request.Operation{ + Name: opDescribeSpotPriceHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSpotPriceHistoryInput{} + } + + output = &DescribeSpotPriceHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSpotPriceHistory API operation for Amazon Elastic Compute Cloud. +// +// Describes the Spot price history. For more information, see Spot Instance +// pricing history (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) +// in the Amazon EC2 User Guide for Linux Instances. +// +// When you specify a start and end time, this operation returns the prices +// of the instance types within the time range that you specified and the time +// when the price changed. The price is valid within the time period that you +// specified; the response merely indicates the last time that the price changed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSpotPriceHistory for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotPriceHistory +func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*DescribeSpotPriceHistoryOutput, error) { + req, out := c.DescribeSpotPriceHistoryRequest(input) + return out, req.Send() +} + +// DescribeSpotPriceHistoryWithContext is the same as DescribeSpotPriceHistory with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotPriceHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotPriceHistoryWithContext(ctx aws.Context, input *DescribeSpotPriceHistoryInput, opts ...request.Option) (*DescribeSpotPriceHistoryOutput, error) { + req, out := c.DescribeSpotPriceHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSpotPriceHistoryPages iterates over the pages of a DescribeSpotPriceHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSpotPriceHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSpotPriceHistory operation. +// pageNum := 0 +// err := client.DescribeSpotPriceHistoryPages(params, +// func(page *ec2.DescribeSpotPriceHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(*DescribeSpotPriceHistoryOutput, bool) bool) error { + return c.DescribeSpotPriceHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSpotPriceHistoryPagesWithContext same as DescribeSpotPriceHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotPriceHistoryPagesWithContext(ctx aws.Context, input *DescribeSpotPriceHistoryInput, fn func(*DescribeSpotPriceHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSpotPriceHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSpotPriceHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSpotPriceHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeStaleSecurityGroups = "DescribeStaleSecurityGroups" + +// DescribeStaleSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStaleSecurityGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeStaleSecurityGroups for more information on using the DescribeStaleSecurityGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeStaleSecurityGroupsRequest method. +// req, resp := client.DescribeStaleSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStaleSecurityGroups +func (c *EC2) DescribeStaleSecurityGroupsRequest(input *DescribeStaleSecurityGroupsInput) (req *request.Request, output *DescribeStaleSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeStaleSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeStaleSecurityGroupsInput{} + } + + output = &DescribeStaleSecurityGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeStaleSecurityGroups API operation for Amazon Elastic Compute Cloud. +// +// [VPC only] Describes the stale security group rules for security groups in +// a specified VPC. Rules are stale when they reference a deleted security group +// in a peer VPC, or a security group in a peer VPC for which the VPC peering +// connection has been deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeStaleSecurityGroups for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStaleSecurityGroups +func (c *EC2) DescribeStaleSecurityGroups(input *DescribeStaleSecurityGroupsInput) (*DescribeStaleSecurityGroupsOutput, error) { + req, out := c.DescribeStaleSecurityGroupsRequest(input) + return out, req.Send() +} + +// DescribeStaleSecurityGroupsWithContext is the same as DescribeStaleSecurityGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeStaleSecurityGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeStaleSecurityGroupsWithContext(ctx aws.Context, input *DescribeStaleSecurityGroupsInput, opts ...request.Option) (*DescribeStaleSecurityGroupsOutput, error) { + req, out := c.DescribeStaleSecurityGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeStaleSecurityGroupsPages iterates over the pages of a DescribeStaleSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeStaleSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeStaleSecurityGroups operation. +// pageNum := 0 +// err := client.DescribeStaleSecurityGroupsPages(params, +// func(page *ec2.DescribeStaleSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeStaleSecurityGroupsPages(input *DescribeStaleSecurityGroupsInput, fn func(*DescribeStaleSecurityGroupsOutput, bool) bool) error { + return c.DescribeStaleSecurityGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeStaleSecurityGroupsPagesWithContext same as DescribeStaleSecurityGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeStaleSecurityGroupsPagesWithContext(ctx aws.Context, input *DescribeStaleSecurityGroupsInput, fn func(*DescribeStaleSecurityGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeStaleSecurityGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeStaleSecurityGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeStaleSecurityGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSubnets = "DescribeSubnets" + +// DescribeSubnetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubnets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSubnets for more information on using the DescribeSubnets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSubnetsRequest method. +// req, resp := client.DescribeSubnetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSubnets +func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request.Request, output *DescribeSubnetsOutput) { + op := &request.Operation{ + Name: opDescribeSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSubnetsInput{} + } + + output = &DescribeSubnetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSubnets API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your subnets. +// +// For more information, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeSubnets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSubnets +func (c *EC2) DescribeSubnets(input *DescribeSubnetsInput) (*DescribeSubnetsOutput, error) { + req, out := c.DescribeSubnetsRequest(input) + return out, req.Send() +} + +// DescribeSubnetsWithContext is the same as DescribeSubnets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSubnets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSubnetsWithContext(ctx aws.Context, input *DescribeSubnetsInput, opts ...request.Option) (*DescribeSubnetsOutput, error) { + req, out := c.DescribeSubnetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSubnetsPages iterates over the pages of a DescribeSubnets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSubnets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSubnets operation. +// pageNum := 0 +// err := client.DescribeSubnetsPages(params, +// func(page *ec2.DescribeSubnetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSubnetsPages(input *DescribeSubnetsInput, fn func(*DescribeSubnetsOutput, bool) bool) error { + return c.DescribeSubnetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSubnetsPagesWithContext same as DescribeSubnetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSubnetsPagesWithContext(ctx aws.Context, input *DescribeSubnetsInput, fn func(*DescribeSubnetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSubnetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSubnetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSubnetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTags for more information on using the DescribeTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTags +func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTagsInput{} + } + + output = &DescribeTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTags API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified tags for your EC2 resources. +// +// For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTags for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTags +func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + return out, req.Send() +} + +// DescribeTagsWithContext is the same as DescribeTags with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTagsPages iterates over the pages of a DescribeTags operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTags method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTags operation. +// pageNum := 0 +// err := client.DescribeTagsPages(params, +// func(page *ec2.DescribeTagsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(*DescribeTagsOutput, bool) bool) error { + return c.DescribeTagsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTagsPagesWithContext same as DescribeTagsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTagsPagesWithContext(ctx aws.Context, input *DescribeTagsInput, fn func(*DescribeTagsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTagsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTagsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTagsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTrafficMirrorFilters = "DescribeTrafficMirrorFilters" + +// DescribeTrafficMirrorFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrafficMirrorFilters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTrafficMirrorFilters for more information on using the DescribeTrafficMirrorFilters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTrafficMirrorFiltersRequest method. +// req, resp := client.DescribeTrafficMirrorFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorFilters +func (c *EC2) DescribeTrafficMirrorFiltersRequest(input *DescribeTrafficMirrorFiltersInput) (req *request.Request, output *DescribeTrafficMirrorFiltersOutput) { + op := &request.Operation{ + Name: opDescribeTrafficMirrorFilters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTrafficMirrorFiltersInput{} + } + + output = &DescribeTrafficMirrorFiltersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTrafficMirrorFilters API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more Traffic Mirror filters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTrafficMirrorFilters for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorFilters +func (c *EC2) DescribeTrafficMirrorFilters(input *DescribeTrafficMirrorFiltersInput) (*DescribeTrafficMirrorFiltersOutput, error) { + req, out := c.DescribeTrafficMirrorFiltersRequest(input) + return out, req.Send() +} + +// DescribeTrafficMirrorFiltersWithContext is the same as DescribeTrafficMirrorFilters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTrafficMirrorFilters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTrafficMirrorFiltersWithContext(ctx aws.Context, input *DescribeTrafficMirrorFiltersInput, opts ...request.Option) (*DescribeTrafficMirrorFiltersOutput, error) { + req, out := c.DescribeTrafficMirrorFiltersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTrafficMirrorFiltersPages iterates over the pages of a DescribeTrafficMirrorFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTrafficMirrorFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTrafficMirrorFilters operation. +// pageNum := 0 +// err := client.DescribeTrafficMirrorFiltersPages(params, +// func(page *ec2.DescribeTrafficMirrorFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTrafficMirrorFiltersPages(input *DescribeTrafficMirrorFiltersInput, fn func(*DescribeTrafficMirrorFiltersOutput, bool) bool) error { + return c.DescribeTrafficMirrorFiltersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTrafficMirrorFiltersPagesWithContext same as DescribeTrafficMirrorFiltersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTrafficMirrorFiltersPagesWithContext(ctx aws.Context, input *DescribeTrafficMirrorFiltersInput, fn func(*DescribeTrafficMirrorFiltersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTrafficMirrorFiltersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTrafficMirrorFiltersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTrafficMirrorFiltersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTrafficMirrorSessions = "DescribeTrafficMirrorSessions" + +// DescribeTrafficMirrorSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrafficMirrorSessions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTrafficMirrorSessions for more information on using the DescribeTrafficMirrorSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTrafficMirrorSessionsRequest method. +// req, resp := client.DescribeTrafficMirrorSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorSessions +func (c *EC2) DescribeTrafficMirrorSessionsRequest(input *DescribeTrafficMirrorSessionsInput) (req *request.Request, output *DescribeTrafficMirrorSessionsOutput) { + op := &request.Operation{ + Name: opDescribeTrafficMirrorSessions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTrafficMirrorSessionsInput{} + } + + output = &DescribeTrafficMirrorSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTrafficMirrorSessions API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more Traffic Mirror sessions. By default, all Traffic Mirror +// sessions are described. Alternatively, you can filter the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTrafficMirrorSessions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorSessions +func (c *EC2) DescribeTrafficMirrorSessions(input *DescribeTrafficMirrorSessionsInput) (*DescribeTrafficMirrorSessionsOutput, error) { + req, out := c.DescribeTrafficMirrorSessionsRequest(input) + return out, req.Send() +} + +// DescribeTrafficMirrorSessionsWithContext is the same as DescribeTrafficMirrorSessions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTrafficMirrorSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTrafficMirrorSessionsWithContext(ctx aws.Context, input *DescribeTrafficMirrorSessionsInput, opts ...request.Option) (*DescribeTrafficMirrorSessionsOutput, error) { + req, out := c.DescribeTrafficMirrorSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTrafficMirrorSessionsPages iterates over the pages of a DescribeTrafficMirrorSessions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTrafficMirrorSessions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTrafficMirrorSessions operation. +// pageNum := 0 +// err := client.DescribeTrafficMirrorSessionsPages(params, +// func(page *ec2.DescribeTrafficMirrorSessionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTrafficMirrorSessionsPages(input *DescribeTrafficMirrorSessionsInput, fn func(*DescribeTrafficMirrorSessionsOutput, bool) bool) error { + return c.DescribeTrafficMirrorSessionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTrafficMirrorSessionsPagesWithContext same as DescribeTrafficMirrorSessionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTrafficMirrorSessionsPagesWithContext(ctx aws.Context, input *DescribeTrafficMirrorSessionsInput, fn func(*DescribeTrafficMirrorSessionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTrafficMirrorSessionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTrafficMirrorSessionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTrafficMirrorSessionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTrafficMirrorTargets = "DescribeTrafficMirrorTargets" + +// DescribeTrafficMirrorTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrafficMirrorTargets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTrafficMirrorTargets for more information on using the DescribeTrafficMirrorTargets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTrafficMirrorTargetsRequest method. +// req, resp := client.DescribeTrafficMirrorTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorTargets +func (c *EC2) DescribeTrafficMirrorTargetsRequest(input *DescribeTrafficMirrorTargetsInput) (req *request.Request, output *DescribeTrafficMirrorTargetsOutput) { + op := &request.Operation{ + Name: opDescribeTrafficMirrorTargets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTrafficMirrorTargetsInput{} + } + + output = &DescribeTrafficMirrorTargetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTrafficMirrorTargets API operation for Amazon Elastic Compute Cloud. +// +// Information about one or more Traffic Mirror targets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTrafficMirrorTargets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorTargets +func (c *EC2) DescribeTrafficMirrorTargets(input *DescribeTrafficMirrorTargetsInput) (*DescribeTrafficMirrorTargetsOutput, error) { + req, out := c.DescribeTrafficMirrorTargetsRequest(input) + return out, req.Send() +} + +// DescribeTrafficMirrorTargetsWithContext is the same as DescribeTrafficMirrorTargets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTrafficMirrorTargets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTrafficMirrorTargetsWithContext(ctx aws.Context, input *DescribeTrafficMirrorTargetsInput, opts ...request.Option) (*DescribeTrafficMirrorTargetsOutput, error) { + req, out := c.DescribeTrafficMirrorTargetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTrafficMirrorTargetsPages iterates over the pages of a DescribeTrafficMirrorTargets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTrafficMirrorTargets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTrafficMirrorTargets operation. +// pageNum := 0 +// err := client.DescribeTrafficMirrorTargetsPages(params, +// func(page *ec2.DescribeTrafficMirrorTargetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTrafficMirrorTargetsPages(input *DescribeTrafficMirrorTargetsInput, fn func(*DescribeTrafficMirrorTargetsOutput, bool) bool) error { + return c.DescribeTrafficMirrorTargetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTrafficMirrorTargetsPagesWithContext same as DescribeTrafficMirrorTargetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTrafficMirrorTargetsPagesWithContext(ctx aws.Context, input *DescribeTrafficMirrorTargetsInput, fn func(*DescribeTrafficMirrorTargetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTrafficMirrorTargetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTrafficMirrorTargetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTrafficMirrorTargetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTransitGatewayAttachments = "DescribeTransitGatewayAttachments" + +// DescribeTransitGatewayAttachmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGatewayAttachments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTransitGatewayAttachments for more information on using the DescribeTransitGatewayAttachments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTransitGatewayAttachmentsRequest method. +// req, resp := client.DescribeTransitGatewayAttachmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachments +func (c *EC2) DescribeTransitGatewayAttachmentsRequest(input *DescribeTransitGatewayAttachmentsInput) (req *request.Request, output *DescribeTransitGatewayAttachmentsOutput) { + op := &request.Operation{ + Name: opDescribeTransitGatewayAttachments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTransitGatewayAttachmentsInput{} + } + + output = &DescribeTransitGatewayAttachmentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTransitGatewayAttachments API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more attachments between resources and transit gateways. +// By default, all attachments are described. Alternatively, you can filter +// the results by attachment ID, attachment state, resource ID, or resource +// owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTransitGatewayAttachments for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachments +func (c *EC2) DescribeTransitGatewayAttachments(input *DescribeTransitGatewayAttachmentsInput) (*DescribeTransitGatewayAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayAttachmentsRequest(input) + return out, req.Send() +} + +// DescribeTransitGatewayAttachmentsWithContext is the same as DescribeTransitGatewayAttachments with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTransitGatewayAttachments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayAttachmentsWithContext(ctx aws.Context, input *DescribeTransitGatewayAttachmentsInput, opts ...request.Option) (*DescribeTransitGatewayAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayAttachmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTransitGatewayAttachmentsPages iterates over the pages of a DescribeTransitGatewayAttachments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayAttachments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayAttachments operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayAttachmentsPages(params, +// func(page *ec2.DescribeTransitGatewayAttachmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayAttachmentsPages(input *DescribeTransitGatewayAttachmentsInput, fn func(*DescribeTransitGatewayAttachmentsOutput, bool) bool) error { + return c.DescribeTransitGatewayAttachmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewayAttachmentsPagesWithContext same as DescribeTransitGatewayAttachmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayAttachmentsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayAttachmentsInput, fn func(*DescribeTransitGatewayAttachmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayAttachmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayAttachmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayAttachmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTransitGatewayMulticastDomains = "DescribeTransitGatewayMulticastDomains" + +// DescribeTransitGatewayMulticastDomainsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGatewayMulticastDomains operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTransitGatewayMulticastDomains for more information on using the DescribeTransitGatewayMulticastDomains +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTransitGatewayMulticastDomainsRequest method. +// req, resp := client.DescribeTransitGatewayMulticastDomainsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayMulticastDomains +func (c *EC2) DescribeTransitGatewayMulticastDomainsRequest(input *DescribeTransitGatewayMulticastDomainsInput) (req *request.Request, output *DescribeTransitGatewayMulticastDomainsOutput) { + op := &request.Operation{ + Name: opDescribeTransitGatewayMulticastDomains, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTransitGatewayMulticastDomainsInput{} + } + + output = &DescribeTransitGatewayMulticastDomainsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTransitGatewayMulticastDomains API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more transit gateway multicast domains. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTransitGatewayMulticastDomains for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayMulticastDomains +func (c *EC2) DescribeTransitGatewayMulticastDomains(input *DescribeTransitGatewayMulticastDomainsInput) (*DescribeTransitGatewayMulticastDomainsOutput, error) { + req, out := c.DescribeTransitGatewayMulticastDomainsRequest(input) + return out, req.Send() +} + +// DescribeTransitGatewayMulticastDomainsWithContext is the same as DescribeTransitGatewayMulticastDomains with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTransitGatewayMulticastDomains for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayMulticastDomainsWithContext(ctx aws.Context, input *DescribeTransitGatewayMulticastDomainsInput, opts ...request.Option) (*DescribeTransitGatewayMulticastDomainsOutput, error) { + req, out := c.DescribeTransitGatewayMulticastDomainsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTransitGatewayMulticastDomainsPages iterates over the pages of a DescribeTransitGatewayMulticastDomains operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayMulticastDomains method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayMulticastDomains operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayMulticastDomainsPages(params, +// func(page *ec2.DescribeTransitGatewayMulticastDomainsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayMulticastDomainsPages(input *DescribeTransitGatewayMulticastDomainsInput, fn func(*DescribeTransitGatewayMulticastDomainsOutput, bool) bool) error { + return c.DescribeTransitGatewayMulticastDomainsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewayMulticastDomainsPagesWithContext same as DescribeTransitGatewayMulticastDomainsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayMulticastDomainsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayMulticastDomainsInput, fn func(*DescribeTransitGatewayMulticastDomainsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayMulticastDomainsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayMulticastDomainsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayMulticastDomainsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTransitGatewayPeeringAttachments = "DescribeTransitGatewayPeeringAttachments" + +// DescribeTransitGatewayPeeringAttachmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGatewayPeeringAttachments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTransitGatewayPeeringAttachments for more information on using the DescribeTransitGatewayPeeringAttachments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTransitGatewayPeeringAttachmentsRequest method. +// req, resp := client.DescribeTransitGatewayPeeringAttachmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayPeeringAttachments +func (c *EC2) DescribeTransitGatewayPeeringAttachmentsRequest(input *DescribeTransitGatewayPeeringAttachmentsInput) (req *request.Request, output *DescribeTransitGatewayPeeringAttachmentsOutput) { + op := &request.Operation{ + Name: opDescribeTransitGatewayPeeringAttachments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTransitGatewayPeeringAttachmentsInput{} + } + + output = &DescribeTransitGatewayPeeringAttachmentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTransitGatewayPeeringAttachments API operation for Amazon Elastic Compute Cloud. +// +// Describes your transit gateway peering attachments. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTransitGatewayPeeringAttachments for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayPeeringAttachments +func (c *EC2) DescribeTransitGatewayPeeringAttachments(input *DescribeTransitGatewayPeeringAttachmentsInput) (*DescribeTransitGatewayPeeringAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayPeeringAttachmentsRequest(input) + return out, req.Send() +} + +// DescribeTransitGatewayPeeringAttachmentsWithContext is the same as DescribeTransitGatewayPeeringAttachments with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTransitGatewayPeeringAttachments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayPeeringAttachmentsWithContext(ctx aws.Context, input *DescribeTransitGatewayPeeringAttachmentsInput, opts ...request.Option) (*DescribeTransitGatewayPeeringAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayPeeringAttachmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTransitGatewayPeeringAttachmentsPages iterates over the pages of a DescribeTransitGatewayPeeringAttachments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayPeeringAttachments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayPeeringAttachments operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayPeeringAttachmentsPages(params, +// func(page *ec2.DescribeTransitGatewayPeeringAttachmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayPeeringAttachmentsPages(input *DescribeTransitGatewayPeeringAttachmentsInput, fn func(*DescribeTransitGatewayPeeringAttachmentsOutput, bool) bool) error { + return c.DescribeTransitGatewayPeeringAttachmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewayPeeringAttachmentsPagesWithContext same as DescribeTransitGatewayPeeringAttachmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayPeeringAttachmentsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayPeeringAttachmentsInput, fn func(*DescribeTransitGatewayPeeringAttachmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayPeeringAttachmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayPeeringAttachmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayPeeringAttachmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTransitGatewayRouteTables = "DescribeTransitGatewayRouteTables" + +// DescribeTransitGatewayRouteTablesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGatewayRouteTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTransitGatewayRouteTables for more information on using the DescribeTransitGatewayRouteTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTransitGatewayRouteTablesRequest method. +// req, resp := client.DescribeTransitGatewayRouteTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayRouteTables +func (c *EC2) DescribeTransitGatewayRouteTablesRequest(input *DescribeTransitGatewayRouteTablesInput) (req *request.Request, output *DescribeTransitGatewayRouteTablesOutput) { + op := &request.Operation{ + Name: opDescribeTransitGatewayRouteTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTransitGatewayRouteTablesInput{} + } + + output = &DescribeTransitGatewayRouteTablesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTransitGatewayRouteTables API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more transit gateway route tables. By default, all transit +// gateway route tables are described. Alternatively, you can filter the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTransitGatewayRouteTables for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayRouteTables +func (c *EC2) DescribeTransitGatewayRouteTables(input *DescribeTransitGatewayRouteTablesInput) (*DescribeTransitGatewayRouteTablesOutput, error) { + req, out := c.DescribeTransitGatewayRouteTablesRequest(input) + return out, req.Send() +} + +// DescribeTransitGatewayRouteTablesWithContext is the same as DescribeTransitGatewayRouteTables with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTransitGatewayRouteTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayRouteTablesWithContext(ctx aws.Context, input *DescribeTransitGatewayRouteTablesInput, opts ...request.Option) (*DescribeTransitGatewayRouteTablesOutput, error) { + req, out := c.DescribeTransitGatewayRouteTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTransitGatewayRouteTablesPages iterates over the pages of a DescribeTransitGatewayRouteTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayRouteTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayRouteTables operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayRouteTablesPages(params, +// func(page *ec2.DescribeTransitGatewayRouteTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayRouteTablesPages(input *DescribeTransitGatewayRouteTablesInput, fn func(*DescribeTransitGatewayRouteTablesOutput, bool) bool) error { + return c.DescribeTransitGatewayRouteTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewayRouteTablesPagesWithContext same as DescribeTransitGatewayRouteTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayRouteTablesPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayRouteTablesInput, fn func(*DescribeTransitGatewayRouteTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayRouteTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayRouteTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayRouteTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTransitGatewayVpcAttachments = "DescribeTransitGatewayVpcAttachments" + +// DescribeTransitGatewayVpcAttachmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGatewayVpcAttachments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTransitGatewayVpcAttachments for more information on using the DescribeTransitGatewayVpcAttachments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTransitGatewayVpcAttachmentsRequest method. +// req, resp := client.DescribeTransitGatewayVpcAttachmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayVpcAttachments +func (c *EC2) DescribeTransitGatewayVpcAttachmentsRequest(input *DescribeTransitGatewayVpcAttachmentsInput) (req *request.Request, output *DescribeTransitGatewayVpcAttachmentsOutput) { + op := &request.Operation{ + Name: opDescribeTransitGatewayVpcAttachments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTransitGatewayVpcAttachmentsInput{} + } + + output = &DescribeTransitGatewayVpcAttachmentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTransitGatewayVpcAttachments API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more VPC attachments. By default, all VPC attachments are +// described. Alternatively, you can filter the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTransitGatewayVpcAttachments for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayVpcAttachments +func (c *EC2) DescribeTransitGatewayVpcAttachments(input *DescribeTransitGatewayVpcAttachmentsInput) (*DescribeTransitGatewayVpcAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayVpcAttachmentsRequest(input) + return out, req.Send() +} + +// DescribeTransitGatewayVpcAttachmentsWithContext is the same as DescribeTransitGatewayVpcAttachments with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTransitGatewayVpcAttachments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayVpcAttachmentsWithContext(ctx aws.Context, input *DescribeTransitGatewayVpcAttachmentsInput, opts ...request.Option) (*DescribeTransitGatewayVpcAttachmentsOutput, error) { + req, out := c.DescribeTransitGatewayVpcAttachmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTransitGatewayVpcAttachmentsPages iterates over the pages of a DescribeTransitGatewayVpcAttachments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayVpcAttachments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayVpcAttachments operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayVpcAttachmentsPages(params, +// func(page *ec2.DescribeTransitGatewayVpcAttachmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayVpcAttachmentsPages(input *DescribeTransitGatewayVpcAttachmentsInput, fn func(*DescribeTransitGatewayVpcAttachmentsOutput, bool) bool) error { + return c.DescribeTransitGatewayVpcAttachmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewayVpcAttachmentsPagesWithContext same as DescribeTransitGatewayVpcAttachmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayVpcAttachmentsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayVpcAttachmentsInput, fn func(*DescribeTransitGatewayVpcAttachmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayVpcAttachmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayVpcAttachmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayVpcAttachmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeTransitGateways = "DescribeTransitGateways" + +// DescribeTransitGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTransitGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTransitGateways for more information on using the DescribeTransitGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTransitGatewaysRequest method. +// req, resp := client.DescribeTransitGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGateways +func (c *EC2) DescribeTransitGatewaysRequest(input *DescribeTransitGatewaysInput) (req *request.Request, output *DescribeTransitGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeTransitGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTransitGatewaysInput{} + } + + output = &DescribeTransitGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTransitGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more transit gateways. By default, all transit gateways +// are described. Alternatively, you can filter the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTransitGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGateways +func (c *EC2) DescribeTransitGateways(input *DescribeTransitGatewaysInput) (*DescribeTransitGatewaysOutput, error) { + req, out := c.DescribeTransitGatewaysRequest(input) + return out, req.Send() +} + +// DescribeTransitGatewaysWithContext is the same as DescribeTransitGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTransitGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewaysWithContext(ctx aws.Context, input *DescribeTransitGatewaysInput, opts ...request.Option) (*DescribeTransitGatewaysOutput, error) { + req, out := c.DescribeTransitGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTransitGatewaysPages iterates over the pages of a DescribeTransitGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGateways operation. +// pageNum := 0 +// err := client.DescribeTransitGatewaysPages(params, +// func(page *ec2.DescribeTransitGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewaysPages(input *DescribeTransitGatewaysInput, fn func(*DescribeTransitGatewaysOutput, bool) bool) error { + return c.DescribeTransitGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewaysPagesWithContext same as DescribeTransitGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewaysPagesWithContext(ctx aws.Context, input *DescribeTransitGatewaysInput, fn func(*DescribeTransitGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVolumeAttribute = "DescribeVolumeAttribute" + +// DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumeAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVolumeAttribute for more information on using the DescribeVolumeAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVolumeAttributeRequest method. +// req, resp := client.DescribeVolumeAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttribute +func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumeAttributeInput{} + } + + output = &DescribeVolumeAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVolumeAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified volume. You can specify +// only one attribute at a time. +// +// For more information about EBS volumes, see Amazon EBS volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVolumeAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttribute +func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + return out, req.Send() +} + +// DescribeVolumeAttributeWithContext is the same as DescribeVolumeAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumeAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumeAttributeWithContext(ctx aws.Context, input *DescribeVolumeAttributeInput, opts ...request.Option) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVolumeStatus = "DescribeVolumeStatus" + +// DescribeVolumeStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumeStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVolumeStatus for more information on using the DescribeVolumeStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVolumeStatusRequest method. +// req, resp := client.DescribeVolumeStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeStatus +func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req *request.Request, output *DescribeVolumeStatusOutput) { + op := &request.Operation{ + Name: opDescribeVolumeStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumeStatusInput{} + } + + output = &DescribeVolumeStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVolumeStatus API operation for Amazon Elastic Compute Cloud. +// +// Describes the status of the specified volumes. Volume status provides the +// result of the checks performed on your volumes to determine events that can +// impair the performance of your volumes. The performance of a volume can be +// affected if an issue occurs on the volume's underlying host. If the volume's +// underlying host experiences a power outage or system issue, after the system +// is restored, there could be data inconsistencies on the volume. Volume events +// notify you if this occurs. Volume actions notify you if any action needs +// to be taken in response to the event. +// +// The DescribeVolumeStatus operation provides the following information about +// the specified volumes: +// +// Status: Reflects the current status of the volume. The possible values are +// ok, impaired , warning, or insufficient-data. If all checks pass, the overall +// status of the volume is ok. If the check fails, the overall status is impaired. +// If the status is insufficient-data, then the checks might still be taking +// place on your volume at the time. We recommend that you retry the request. +// For more information about volume status, see Monitoring the status of your +// volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Events: Reflect the cause of a volume status and might require you to take +// action. For example, if your volume returns an impaired status, then the +// volume event might be potential-data-inconsistency. This means that your +// volume has been affected by an issue with the underlying host, has all I/O +// operations disabled, and might have inconsistent data. +// +// Actions: Reflect the actions you might have to take in response to an event. +// For example, if the status of the volume is impaired and the volume event +// shows potential-data-inconsistency, then the action shows enable-volume-io. +// This means that you may want to enable the I/O operations for the volume +// by calling the EnableVolumeIO action and then check the volume for data consistency. +// +// Volume status is based on the volume status checks, and does not reflect +// the volume state. Therefore, volume status does not indicate volumes in the +// error state (for example, when a volume is incapable of accepting I/O.) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVolumeStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeStatus +func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeVolumeStatusOutput, error) { + req, out := c.DescribeVolumeStatusRequest(input) + return out, req.Send() +} + +// DescribeVolumeStatusWithContext is the same as DescribeVolumeStatus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumeStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumeStatusWithContext(ctx aws.Context, input *DescribeVolumeStatusInput, opts ...request.Option) (*DescribeVolumeStatusOutput, error) { + req, out := c.DescribeVolumeStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVolumeStatusPages iterates over the pages of a DescribeVolumeStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVolumeStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVolumeStatus operation. +// pageNum := 0 +// err := client.DescribeVolumeStatusPages(params, +// func(page *ec2.DescribeVolumeStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(*DescribeVolumeStatusOutput, bool) bool) error { + return c.DescribeVolumeStatusPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVolumeStatusPagesWithContext same as DescribeVolumeStatusPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumeStatusPagesWithContext(ctx aws.Context, input *DescribeVolumeStatusInput, fn func(*DescribeVolumeStatusOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVolumeStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumeStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVolumeStatusOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVolumes = "DescribeVolumes" + +// DescribeVolumesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVolumes for more information on using the DescribeVolumes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVolumesRequest method. +// req, resp := client.DescribeVolumesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes +func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { + op := &request.Operation{ + Name: opDescribeVolumes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumesInput{} + } + + output = &DescribeVolumesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVolumes API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified EBS volumes or all of your EBS volumes. +// +// If you are describing a long list of volumes, we recommend that you paginate +// the output to make the list more manageable. The MaxResults parameter sets +// the maximum number of results returned in a single page. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeVolumes +// request to retrieve the remaining results. +// +// For more information about EBS volumes, see Amazon EBS volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVolumes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes +func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + return out, req.Send() +} + +// DescribeVolumesWithContext is the same as DescribeVolumes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumesWithContext(ctx aws.Context, input *DescribeVolumesInput, opts ...request.Option) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVolumesPages iterates over the pages of a DescribeVolumes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVolumes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVolumes operation. +// pageNum := 0 +// err := client.DescribeVolumesPages(params, +// func(page *ec2.DescribeVolumesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(*DescribeVolumesOutput, bool) bool) error { + return c.DescribeVolumesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVolumesPagesWithContext same as DescribeVolumesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumesPagesWithContext(ctx aws.Context, input *DescribeVolumesInput, fn func(*DescribeVolumesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVolumesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVolumesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVolumesModifications = "DescribeVolumesModifications" + +// DescribeVolumesModificationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumesModifications operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVolumesModifications for more information on using the DescribeVolumesModifications +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVolumesModificationsRequest method. +// req, resp := client.DescribeVolumesModificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModifications +func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModificationsInput) (req *request.Request, output *DescribeVolumesModificationsOutput) { + op := &request.Operation{ + Name: opDescribeVolumesModifications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVolumesModificationsInput{} + } + + output = &DescribeVolumesModificationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVolumesModifications API operation for Amazon Elastic Compute Cloud. +// +// Describes the most recent volume modification request for the specified EBS +// volumes. +// +// If a volume has never been modified, some information in the output will +// be null. If a volume has been modified more than once, the output includes +// only the most recent modification request. +// +// You can also use CloudWatch Events to check the status of a modification +// to an EBS volume. For information about CloudWatch Events, see the Amazon +// CloudWatch Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). +// For more information, see Monitoring volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVolumesModifications for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModifications +func (c *EC2) DescribeVolumesModifications(input *DescribeVolumesModificationsInput) (*DescribeVolumesModificationsOutput, error) { + req, out := c.DescribeVolumesModificationsRequest(input) + return out, req.Send() +} + +// DescribeVolumesModificationsWithContext is the same as DescribeVolumesModifications with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumesModifications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumesModificationsWithContext(ctx aws.Context, input *DescribeVolumesModificationsInput, opts ...request.Option) (*DescribeVolumesModificationsOutput, error) { + req, out := c.DescribeVolumesModificationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVolumesModificationsPages iterates over the pages of a DescribeVolumesModifications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVolumesModifications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVolumesModifications operation. +// pageNum := 0 +// err := client.DescribeVolumesModificationsPages(params, +// func(page *ec2.DescribeVolumesModificationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVolumesModificationsPages(input *DescribeVolumesModificationsInput, fn func(*DescribeVolumesModificationsOutput, bool) bool) error { + return c.DescribeVolumesModificationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVolumesModificationsPagesWithContext same as DescribeVolumesModificationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumesModificationsPagesWithContext(ctx aws.Context, input *DescribeVolumesModificationsInput, fn func(*DescribeVolumesModificationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVolumesModificationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesModificationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVolumesModificationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpcAttribute = "DescribeVpcAttribute" + +// DescribeVpcAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcAttribute for more information on using the DescribeVpcAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcAttributeRequest method. +// req, resp := client.DescribeVpcAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcAttribute +func (c *EC2) DescribeVpcAttributeRequest(input *DescribeVpcAttributeInput) (req *request.Request, output *DescribeVpcAttributeOutput) { + op := &request.Operation{ + Name: opDescribeVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcAttributeInput{} + } + + output = &DescribeVpcAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified VPC. You can specify only +// one attribute at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcAttribute +func (c *EC2) DescribeVpcAttribute(input *DescribeVpcAttributeInput) (*DescribeVpcAttributeOutput, error) { + req, out := c.DescribeVpcAttributeRequest(input) + return out, req.Send() +} + +// DescribeVpcAttributeWithContext is the same as DescribeVpcAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcAttributeWithContext(ctx aws.Context, input *DescribeVpcAttributeInput, opts ...request.Option) (*DescribeVpcAttributeOutput, error) { + req, out := c.DescribeVpcAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVpcClassicLink = "DescribeVpcClassicLink" + +// DescribeVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcClassicLink operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcClassicLink for more information on using the DescribeVpcClassicLink +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcClassicLinkRequest method. +// req, resp := client.DescribeVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLink +func (c *EC2) DescribeVpcClassicLinkRequest(input *DescribeVpcClassicLinkInput) (req *request.Request, output *DescribeVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkInput{} + } + + output = &DescribeVpcClassicLinkOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcClassicLink API operation for Amazon Elastic Compute Cloud. +// +// Describes the ClassicLink status of one or more VPCs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcClassicLink for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLink +func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*DescribeVpcClassicLinkOutput, error) { + req, out := c.DescribeVpcClassicLinkRequest(input) + return out, req.Send() +} + +// DescribeVpcClassicLinkWithContext is the same as DescribeVpcClassicLink with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcClassicLink for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcClassicLinkWithContext(ctx aws.Context, input *DescribeVpcClassicLinkInput, opts ...request.Option) (*DescribeVpcClassicLinkOutput, error) { + req, out := c.DescribeVpcClassicLinkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" + +// DescribeVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcClassicLinkDnsSupport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcClassicLinkDnsSupport for more information on using the DescribeVpcClassicLinkDnsSupport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcClassicLinkDnsSupportRequest method. +// req, resp := client.DescribeVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLinkDnsSupport +func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicLinkDnsSupportInput) (req *request.Request, output *DescribeVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVpcClassicLinkDnsSupportInput{} + } + + output = &DescribeVpcClassicLinkDnsSupportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcClassicLinkDnsSupport API operation for Amazon Elastic Compute Cloud. +// +// Describes the ClassicLink DNS support status of one or more VPCs. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information, +// see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcClassicLinkDnsSupport for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLinkDnsSupport +func (c *EC2) DescribeVpcClassicLinkDnsSupport(input *DescribeVpcClassicLinkDnsSupportInput) (*DescribeVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input) + return out, req.Send() +} + +// DescribeVpcClassicLinkDnsSupportWithContext is the same as DescribeVpcClassicLinkDnsSupport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcClassicLinkDnsSupport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcClassicLinkDnsSupportWithContext(ctx aws.Context, input *DescribeVpcClassicLinkDnsSupportInput, opts ...request.Option) (*DescribeVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVpcClassicLinkDnsSupportPages iterates over the pages of a DescribeVpcClassicLinkDnsSupport operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVpcClassicLinkDnsSupport method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVpcClassicLinkDnsSupport operation. +// pageNum := 0 +// err := client.DescribeVpcClassicLinkDnsSupportPages(params, +// func(page *ec2.DescribeVpcClassicLinkDnsSupportOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVpcClassicLinkDnsSupportPages(input *DescribeVpcClassicLinkDnsSupportInput, fn func(*DescribeVpcClassicLinkDnsSupportOutput, bool) bool) error { + return c.DescribeVpcClassicLinkDnsSupportPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVpcClassicLinkDnsSupportPagesWithContext same as DescribeVpcClassicLinkDnsSupportPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcClassicLinkDnsSupportPagesWithContext(ctx aws.Context, input *DescribeVpcClassicLinkDnsSupportInput, fn func(*DescribeVpcClassicLinkDnsSupportOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVpcClassicLinkDnsSupportInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcClassicLinkDnsSupportRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVpcClassicLinkDnsSupportOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpcEndpointConnectionNotifications = "DescribeVpcEndpointConnectionNotifications" + +// DescribeVpcEndpointConnectionNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpointConnectionNotifications operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcEndpointConnectionNotifications for more information on using the DescribeVpcEndpointConnectionNotifications +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcEndpointConnectionNotificationsRequest method. +// req, resp := client.DescribeVpcEndpointConnectionNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnectionNotifications +func (c *EC2) DescribeVpcEndpointConnectionNotificationsRequest(input *DescribeVpcEndpointConnectionNotificationsInput) (req *request.Request, output *DescribeVpcEndpointConnectionNotificationsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointConnectionNotifications, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVpcEndpointConnectionNotificationsInput{} + } + + output = &DescribeVpcEndpointConnectionNotificationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcEndpointConnectionNotifications API operation for Amazon Elastic Compute Cloud. +// +// Describes the connection notifications for VPC endpoints and VPC endpoint +// services. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcEndpointConnectionNotifications for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnectionNotifications +func (c *EC2) DescribeVpcEndpointConnectionNotifications(input *DescribeVpcEndpointConnectionNotificationsInput) (*DescribeVpcEndpointConnectionNotificationsOutput, error) { + req, out := c.DescribeVpcEndpointConnectionNotificationsRequest(input) + return out, req.Send() +} + +// DescribeVpcEndpointConnectionNotificationsWithContext is the same as DescribeVpcEndpointConnectionNotifications with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcEndpointConnectionNotifications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointConnectionNotificationsWithContext(ctx aws.Context, input *DescribeVpcEndpointConnectionNotificationsInput, opts ...request.Option) (*DescribeVpcEndpointConnectionNotificationsOutput, error) { + req, out := c.DescribeVpcEndpointConnectionNotificationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVpcEndpointConnectionNotificationsPages iterates over the pages of a DescribeVpcEndpointConnectionNotifications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVpcEndpointConnectionNotifications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVpcEndpointConnectionNotifications operation. +// pageNum := 0 +// err := client.DescribeVpcEndpointConnectionNotificationsPages(params, +// func(page *ec2.DescribeVpcEndpointConnectionNotificationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVpcEndpointConnectionNotificationsPages(input *DescribeVpcEndpointConnectionNotificationsInput, fn func(*DescribeVpcEndpointConnectionNotificationsOutput, bool) bool) error { + return c.DescribeVpcEndpointConnectionNotificationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVpcEndpointConnectionNotificationsPagesWithContext same as DescribeVpcEndpointConnectionNotificationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointConnectionNotificationsPagesWithContext(ctx aws.Context, input *DescribeVpcEndpointConnectionNotificationsInput, fn func(*DescribeVpcEndpointConnectionNotificationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVpcEndpointConnectionNotificationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcEndpointConnectionNotificationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointConnectionNotificationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpcEndpointConnections = "DescribeVpcEndpointConnections" + +// DescribeVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpointConnections operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcEndpointConnections for more information on using the DescribeVpcEndpointConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcEndpointConnectionsRequest method. +// req, resp := client.DescribeVpcEndpointConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnections +func (c *EC2) DescribeVpcEndpointConnectionsRequest(input *DescribeVpcEndpointConnectionsInput) (req *request.Request, output *DescribeVpcEndpointConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointConnections, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVpcEndpointConnectionsInput{} + } + + output = &DescribeVpcEndpointConnectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcEndpointConnections API operation for Amazon Elastic Compute Cloud. +// +// Describes the VPC endpoint connections to your VPC endpoint services, including +// any endpoints that are pending your acceptance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcEndpointConnections for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointConnections +func (c *EC2) DescribeVpcEndpointConnections(input *DescribeVpcEndpointConnectionsInput) (*DescribeVpcEndpointConnectionsOutput, error) { + req, out := c.DescribeVpcEndpointConnectionsRequest(input) + return out, req.Send() +} + +// DescribeVpcEndpointConnectionsWithContext is the same as DescribeVpcEndpointConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcEndpointConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointConnectionsWithContext(ctx aws.Context, input *DescribeVpcEndpointConnectionsInput, opts ...request.Option) (*DescribeVpcEndpointConnectionsOutput, error) { + req, out := c.DescribeVpcEndpointConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVpcEndpointConnectionsPages iterates over the pages of a DescribeVpcEndpointConnections operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVpcEndpointConnections method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVpcEndpointConnections operation. +// pageNum := 0 +// err := client.DescribeVpcEndpointConnectionsPages(params, +// func(page *ec2.DescribeVpcEndpointConnectionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVpcEndpointConnectionsPages(input *DescribeVpcEndpointConnectionsInput, fn func(*DescribeVpcEndpointConnectionsOutput, bool) bool) error { + return c.DescribeVpcEndpointConnectionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVpcEndpointConnectionsPagesWithContext same as DescribeVpcEndpointConnectionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointConnectionsPagesWithContext(ctx aws.Context, input *DescribeVpcEndpointConnectionsInput, fn func(*DescribeVpcEndpointConnectionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVpcEndpointConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcEndpointConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointConnectionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpcEndpointServiceConfigurations = "DescribeVpcEndpointServiceConfigurations" + +// DescribeVpcEndpointServiceConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpointServiceConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcEndpointServiceConfigurations for more information on using the DescribeVpcEndpointServiceConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcEndpointServiceConfigurationsRequest method. +// req, resp := client.DescribeVpcEndpointServiceConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServiceConfigurations +func (c *EC2) DescribeVpcEndpointServiceConfigurationsRequest(input *DescribeVpcEndpointServiceConfigurationsInput) (req *request.Request, output *DescribeVpcEndpointServiceConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointServiceConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVpcEndpointServiceConfigurationsInput{} + } + + output = &DescribeVpcEndpointServiceConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcEndpointServiceConfigurations API operation for Amazon Elastic Compute Cloud. +// +// Describes the VPC endpoint service configurations in your account (your services). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcEndpointServiceConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServiceConfigurations +func (c *EC2) DescribeVpcEndpointServiceConfigurations(input *DescribeVpcEndpointServiceConfigurationsInput) (*DescribeVpcEndpointServiceConfigurationsOutput, error) { + req, out := c.DescribeVpcEndpointServiceConfigurationsRequest(input) + return out, req.Send() +} + +// DescribeVpcEndpointServiceConfigurationsWithContext is the same as DescribeVpcEndpointServiceConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcEndpointServiceConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointServiceConfigurationsWithContext(ctx aws.Context, input *DescribeVpcEndpointServiceConfigurationsInput, opts ...request.Option) (*DescribeVpcEndpointServiceConfigurationsOutput, error) { + req, out := c.DescribeVpcEndpointServiceConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVpcEndpointServiceConfigurationsPages iterates over the pages of a DescribeVpcEndpointServiceConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVpcEndpointServiceConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVpcEndpointServiceConfigurations operation. +// pageNum := 0 +// err := client.DescribeVpcEndpointServiceConfigurationsPages(params, +// func(page *ec2.DescribeVpcEndpointServiceConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVpcEndpointServiceConfigurationsPages(input *DescribeVpcEndpointServiceConfigurationsInput, fn func(*DescribeVpcEndpointServiceConfigurationsOutput, bool) bool) error { + return c.DescribeVpcEndpointServiceConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVpcEndpointServiceConfigurationsPagesWithContext same as DescribeVpcEndpointServiceConfigurationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointServiceConfigurationsPagesWithContext(ctx aws.Context, input *DescribeVpcEndpointServiceConfigurationsInput, fn func(*DescribeVpcEndpointServiceConfigurationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVpcEndpointServiceConfigurationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcEndpointServiceConfigurationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointServiceConfigurationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpcEndpointServicePermissions = "DescribeVpcEndpointServicePermissions" + +// DescribeVpcEndpointServicePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpointServicePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcEndpointServicePermissions for more information on using the DescribeVpcEndpointServicePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcEndpointServicePermissionsRequest method. +// req, resp := client.DescribeVpcEndpointServicePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServicePermissions +func (c *EC2) DescribeVpcEndpointServicePermissionsRequest(input *DescribeVpcEndpointServicePermissionsInput) (req *request.Request, output *DescribeVpcEndpointServicePermissionsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointServicePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVpcEndpointServicePermissionsInput{} + } + + output = &DescribeVpcEndpointServicePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcEndpointServicePermissions API operation for Amazon Elastic Compute Cloud. +// +// Describes the principals (service consumers) that are permitted to discover +// your VPC endpoint service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcEndpointServicePermissions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServicePermissions +func (c *EC2) DescribeVpcEndpointServicePermissions(input *DescribeVpcEndpointServicePermissionsInput) (*DescribeVpcEndpointServicePermissionsOutput, error) { + req, out := c.DescribeVpcEndpointServicePermissionsRequest(input) + return out, req.Send() +} + +// DescribeVpcEndpointServicePermissionsWithContext is the same as DescribeVpcEndpointServicePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcEndpointServicePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointServicePermissionsWithContext(ctx aws.Context, input *DescribeVpcEndpointServicePermissionsInput, opts ...request.Option) (*DescribeVpcEndpointServicePermissionsOutput, error) { + req, out := c.DescribeVpcEndpointServicePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVpcEndpointServicePermissionsPages iterates over the pages of a DescribeVpcEndpointServicePermissions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVpcEndpointServicePermissions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVpcEndpointServicePermissions operation. +// pageNum := 0 +// err := client.DescribeVpcEndpointServicePermissionsPages(params, +// func(page *ec2.DescribeVpcEndpointServicePermissionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVpcEndpointServicePermissionsPages(input *DescribeVpcEndpointServicePermissionsInput, fn func(*DescribeVpcEndpointServicePermissionsOutput, bool) bool) error { + return c.DescribeVpcEndpointServicePermissionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVpcEndpointServicePermissionsPagesWithContext same as DescribeVpcEndpointServicePermissionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointServicePermissionsPagesWithContext(ctx aws.Context, input *DescribeVpcEndpointServicePermissionsInput, fn func(*DescribeVpcEndpointServicePermissionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVpcEndpointServicePermissionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcEndpointServicePermissionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointServicePermissionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" + +// DescribeVpcEndpointServicesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpointServices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcEndpointServices for more information on using the DescribeVpcEndpointServices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcEndpointServicesRequest method. +// req, resp := client.DescribeVpcEndpointServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServices +func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServicesInput) (req *request.Request, output *DescribeVpcEndpointServicesOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpointServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcEndpointServicesInput{} + } + + output = &DescribeVpcEndpointServicesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcEndpointServices API operation for Amazon Elastic Compute Cloud. +// +// Describes available services to which you can create a VPC endpoint. +// +// When the service provider and the consumer have different accounts multiple +// Availability Zones, and the consumer views the VPC endpoint service information, +// the response only includes the common Availability Zones. For example, when +// the service provider account uses us-east-1a and us-east-1c and the consumer +// uses us-east-1a and us-east-1a and us-east-1b, the response includes the +// VPC endpoint services in the common Availability Zone, us-east-1a. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcEndpointServices for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServices +func (c *EC2) DescribeVpcEndpointServices(input *DescribeVpcEndpointServicesInput) (*DescribeVpcEndpointServicesOutput, error) { + req, out := c.DescribeVpcEndpointServicesRequest(input) + return out, req.Send() +} + +// DescribeVpcEndpointServicesWithContext is the same as DescribeVpcEndpointServices with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcEndpointServices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointServicesWithContext(ctx aws.Context, input *DescribeVpcEndpointServicesInput, opts ...request.Option) (*DescribeVpcEndpointServicesOutput, error) { + req, out := c.DescribeVpcEndpointServicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVpcEndpoints = "DescribeVpcEndpoints" + +// DescribeVpcEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcEndpoints for more information on using the DescribeVpcEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcEndpointsRequest method. +// req, resp := client.DescribeVpcEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpoints +func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req *request.Request, output *DescribeVpcEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeVpcEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVpcEndpointsInput{} + } + + output = &DescribeVpcEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcEndpoints API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your VPC endpoints. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcEndpoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpoints +func (c *EC2) DescribeVpcEndpoints(input *DescribeVpcEndpointsInput) (*DescribeVpcEndpointsOutput, error) { + req, out := c.DescribeVpcEndpointsRequest(input) + return out, req.Send() +} + +// DescribeVpcEndpointsWithContext is the same as DescribeVpcEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointsWithContext(ctx aws.Context, input *DescribeVpcEndpointsInput, opts ...request.Option) (*DescribeVpcEndpointsOutput, error) { + req, out := c.DescribeVpcEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVpcEndpointsPages iterates over the pages of a DescribeVpcEndpoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVpcEndpoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVpcEndpoints operation. +// pageNum := 0 +// err := client.DescribeVpcEndpointsPages(params, +// func(page *ec2.DescribeVpcEndpointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVpcEndpointsPages(input *DescribeVpcEndpointsInput, fn func(*DescribeVpcEndpointsOutput, bool) bool) error { + return c.DescribeVpcEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVpcEndpointsPagesWithContext same as DescribeVpcEndpointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointsPagesWithContext(ctx aws.Context, input *DescribeVpcEndpointsInput, fn func(*DescribeVpcEndpointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVpcEndpointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcEndpointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVpcEndpointsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" + +// DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcPeeringConnections operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcPeeringConnections for more information on using the DescribeVpcPeeringConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcPeeringConnectionsRequest method. +// req, resp := client.DescribeVpcPeeringConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnections +func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpcPeeringConnections, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVpcPeeringConnectionsInput{} + } + + output = &DescribeVpcPeeringConnectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcPeeringConnections API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your VPC peering connections. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcPeeringConnections for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnections +func (c *EC2) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + return out, req.Send() +} + +// DescribeVpcPeeringConnectionsWithContext is the same as DescribeVpcPeeringConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcPeeringConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcPeeringConnectionsWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, opts ...request.Option) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVpcPeeringConnectionsPages iterates over the pages of a DescribeVpcPeeringConnections operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVpcPeeringConnections method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVpcPeeringConnections operation. +// pageNum := 0 +// err := client.DescribeVpcPeeringConnectionsPages(params, +// func(page *ec2.DescribeVpcPeeringConnectionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVpcPeeringConnectionsPages(input *DescribeVpcPeeringConnectionsInput, fn func(*DescribeVpcPeeringConnectionsOutput, bool) bool) error { + return c.DescribeVpcPeeringConnectionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVpcPeeringConnectionsPagesWithContext same as DescribeVpcPeeringConnectionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcPeeringConnectionsPagesWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, fn func(*DescribeVpcPeeringConnectionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVpcPeeringConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcPeeringConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVpcPeeringConnectionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpcs = "DescribeVpcs" + +// DescribeVpcsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpcs for more information on using the DescribeVpcs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpcsRequest method. +// req, resp := client.DescribeVpcsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcs +func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Request, output *DescribeVpcsOutput) { + op := &request.Operation{ + Name: opDescribeVpcs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeVpcsInput{} + } + + output = &DescribeVpcsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpcs API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your VPCs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpcs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcs +func (c *EC2) DescribeVpcs(input *DescribeVpcsInput) (*DescribeVpcsOutput, error) { + req, out := c.DescribeVpcsRequest(input) + return out, req.Send() +} + +// DescribeVpcsWithContext is the same as DescribeVpcs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcsWithContext(ctx aws.Context, input *DescribeVpcsInput, opts ...request.Option) (*DescribeVpcsOutput, error) { + req, out := c.DescribeVpcsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeVpcsPages iterates over the pages of a DescribeVpcs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVpcs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVpcs operation. +// pageNum := 0 +// err := client.DescribeVpcsPages(params, +// func(page *ec2.DescribeVpcsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeVpcsPages(input *DescribeVpcsInput, fn func(*DescribeVpcsOutput, bool) bool) error { + return c.DescribeVpcsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVpcsPagesWithContext same as DescribeVpcsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcsPagesWithContext(ctx aws.Context, input *DescribeVpcsInput, fn func(*DescribeVpcsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVpcsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeVpcsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeVpnConnections = "DescribeVpnConnections" + +// DescribeVpnConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpnConnections operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpnConnections for more information on using the DescribeVpnConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpnConnectionsRequest method. +// req, resp := client.DescribeVpnConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnections +func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) (req *request.Request, output *DescribeVpnConnectionsOutput) { + op := &request.Operation{ + Name: opDescribeVpnConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnConnectionsInput{} + } + + output = &DescribeVpnConnectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpnConnections API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your VPN connections. +// +// For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpnConnections for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnections +func (c *EC2) DescribeVpnConnections(input *DescribeVpnConnectionsInput) (*DescribeVpnConnectionsOutput, error) { + req, out := c.DescribeVpnConnectionsRequest(input) + return out, req.Send() +} + +// DescribeVpnConnectionsWithContext is the same as DescribeVpnConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpnConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpnConnectionsWithContext(ctx aws.Context, input *DescribeVpnConnectionsInput, opts ...request.Option) (*DescribeVpnConnectionsOutput, error) { + req, out := c.DescribeVpnConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVpnGateways = "DescribeVpnGateways" + +// DescribeVpnGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpnGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVpnGateways for more information on using the DescribeVpnGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVpnGatewaysRequest method. +// req, resp := client.DescribeVpnGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnGateways +func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req *request.Request, output *DescribeVpnGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeVpnGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpnGatewaysInput{} + } + + output = &DescribeVpnGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVpnGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your virtual private gateways. +// +// For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVpnGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnGateways +func (c *EC2) DescribeVpnGateways(input *DescribeVpnGatewaysInput) (*DescribeVpnGatewaysOutput, error) { + req, out := c.DescribeVpnGatewaysRequest(input) + return out, req.Send() +} + +// DescribeVpnGatewaysWithContext is the same as DescribeVpnGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpnGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpnGatewaysWithContext(ctx aws.Context, input *DescribeVpnGatewaysInput, opts ...request.Option) (*DescribeVpnGatewaysOutput, error) { + req, out := c.DescribeVpnGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDetachClassicLinkVpc = "DetachClassicLinkVpc" + +// DetachClassicLinkVpcRequest generates a "aws/request.Request" representing the +// client's request for the DetachClassicLinkVpc operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachClassicLinkVpc for more information on using the DetachClassicLinkVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachClassicLinkVpcRequest method. +// req, resp := client.DetachClassicLinkVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachClassicLinkVpc +func (c *EC2) DetachClassicLinkVpcRequest(input *DetachClassicLinkVpcInput) (req *request.Request, output *DetachClassicLinkVpcOutput) { + op := &request.Operation{ + Name: opDetachClassicLinkVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachClassicLinkVpcInput{} + } + + output = &DetachClassicLinkVpcOutput{} + req = c.newRequest(op, input, output) + return +} + +// DetachClassicLinkVpc API operation for Amazon Elastic Compute Cloud. +// +// Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the instance +// has been unlinked, the VPC security groups are no longer associated with +// it. An instance is automatically unlinked from a VPC when it's stopped. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DetachClassicLinkVpc for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachClassicLinkVpc +func (c *EC2) DetachClassicLinkVpc(input *DetachClassicLinkVpcInput) (*DetachClassicLinkVpcOutput, error) { + req, out := c.DetachClassicLinkVpcRequest(input) + return out, req.Send() +} + +// DetachClassicLinkVpcWithContext is the same as DetachClassicLinkVpc with the addition of +// the ability to pass a context and additional request options. +// +// See DetachClassicLinkVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachClassicLinkVpcWithContext(ctx aws.Context, input *DetachClassicLinkVpcInput, opts ...request.Option) (*DetachClassicLinkVpcOutput, error) { + req, out := c.DetachClassicLinkVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDetachInternetGateway = "DetachInternetGateway" + +// DetachInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DetachInternetGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachInternetGateway for more information on using the DetachInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachInternetGatewayRequest method. +// req, resp := client.DetachInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachInternetGateway +func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (req *request.Request, output *DetachInternetGatewayOutput) { + op := &request.Operation{ + Name: opDetachInternetGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachInternetGatewayInput{} + } + + output = &DetachInternetGatewayOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DetachInternetGateway API operation for Amazon Elastic Compute Cloud. +// +// Detaches an internet gateway from a VPC, disabling connectivity between the +// internet and the VPC. The VPC must not contain any running instances with +// Elastic IP addresses or public IPv4 addresses. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DetachInternetGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachInternetGateway +func (c *EC2) DetachInternetGateway(input *DetachInternetGatewayInput) (*DetachInternetGatewayOutput, error) { + req, out := c.DetachInternetGatewayRequest(input) + return out, req.Send() +} + +// DetachInternetGatewayWithContext is the same as DetachInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DetachInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachInternetGatewayWithContext(ctx aws.Context, input *DetachInternetGatewayInput, opts ...request.Option) (*DetachInternetGatewayOutput, error) { + req, out := c.DetachInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDetachNetworkInterface = "DetachNetworkInterface" + +// DetachNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the DetachNetworkInterface operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachNetworkInterface for more information on using the DetachNetworkInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachNetworkInterfaceRequest method. +// req, resp := client.DetachNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachNetworkInterface +func (c *EC2) DetachNetworkInterfaceRequest(input *DetachNetworkInterfaceInput) (req *request.Request, output *DetachNetworkInterfaceOutput) { + op := &request.Operation{ + Name: opDetachNetworkInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachNetworkInterfaceInput{} + } + + output = &DetachNetworkInterfaceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DetachNetworkInterface API operation for Amazon Elastic Compute Cloud. +// +// Detaches a network interface from an instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DetachNetworkInterface for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachNetworkInterface +func (c *EC2) DetachNetworkInterface(input *DetachNetworkInterfaceInput) (*DetachNetworkInterfaceOutput, error) { + req, out := c.DetachNetworkInterfaceRequest(input) + return out, req.Send() +} + +// DetachNetworkInterfaceWithContext is the same as DetachNetworkInterface with the addition of +// the ability to pass a context and additional request options. +// +// See DetachNetworkInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachNetworkInterfaceWithContext(ctx aws.Context, input *DetachNetworkInterfaceInput, opts ...request.Option) (*DetachNetworkInterfaceOutput, error) { + req, out := c.DetachNetworkInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDetachVolume = "DetachVolume" + +// DetachVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DetachVolume operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachVolume for more information on using the DetachVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachVolumeRequest method. +// req, resp := client.DetachVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVolume +func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Request, output *VolumeAttachment) { + op := &request.Operation{ + Name: opDetachVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVolumeInput{} + } + + output = &VolumeAttachment{} + req = c.newRequest(op, input, output) + return +} + +// DetachVolume API operation for Amazon Elastic Compute Cloud. +// +// Detaches an EBS volume from an instance. Make sure to unmount any file systems +// on the device within your operating system before detaching the volume. Failure +// to do so can result in the volume becoming stuck in the busy state while +// detaching. If this happens, detachment can be delayed indefinitely until +// you unmount the volume, force detachment, reboot the instance, or all three. +// If an EBS volume is the root device of an instance, it can't be detached +// while the instance is running. To detach the root volume, stop the instance +// first. +// +// When a volume with an AWS Marketplace product code is detached from an instance, +// the product code is no longer associated with the instance. +// +// For more information, see Detaching an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DetachVolume for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVolume +func (c *EC2) DetachVolume(input *DetachVolumeInput) (*VolumeAttachment, error) { + req, out := c.DetachVolumeRequest(input) + return out, req.Send() +} + +// DetachVolumeWithContext is the same as DetachVolume with the addition of +// the ability to pass a context and additional request options. +// +// See DetachVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachVolumeWithContext(ctx aws.Context, input *DetachVolumeInput, opts ...request.Option) (*VolumeAttachment, error) { + req, out := c.DetachVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDetachVpnGateway = "DetachVpnGateway" + +// DetachVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DetachVpnGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachVpnGateway for more information on using the DetachVpnGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachVpnGatewayRequest method. +// req, resp := client.DetachVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVpnGateway +func (c *EC2) DetachVpnGatewayRequest(input *DetachVpnGatewayInput) (req *request.Request, output *DetachVpnGatewayOutput) { + op := &request.Operation{ + Name: opDetachVpnGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachVpnGatewayInput{} + } + + output = &DetachVpnGatewayOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DetachVpnGateway API operation for Amazon Elastic Compute Cloud. +// +// Detaches a virtual private gateway from a VPC. You do this if you're planning +// to turn off the VPC and not use it anymore. You can confirm a virtual private +// gateway has been completely detached from a VPC by describing the virtual +// private gateway (any attachments to the virtual private gateway are also +// described). +// +// You must wait for the attachment's state to switch to detached before you +// can delete the VPC or attach a different VPC to the virtual private gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DetachVpnGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVpnGateway +func (c *EC2) DetachVpnGateway(input *DetachVpnGatewayInput) (*DetachVpnGatewayOutput, error) { + req, out := c.DetachVpnGatewayRequest(input) + return out, req.Send() +} + +// DetachVpnGatewayWithContext is the same as DetachVpnGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DetachVpnGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachVpnGatewayWithContext(ctx aws.Context, input *DetachVpnGatewayInput, opts ...request.Option) (*DetachVpnGatewayOutput, error) { + req, out := c.DetachVpnGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableEbsEncryptionByDefault = "DisableEbsEncryptionByDefault" + +// DisableEbsEncryptionByDefaultRequest generates a "aws/request.Request" representing the +// client's request for the DisableEbsEncryptionByDefault operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableEbsEncryptionByDefault for more information on using the DisableEbsEncryptionByDefault +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableEbsEncryptionByDefaultRequest method. +// req, resp := client.DisableEbsEncryptionByDefaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableEbsEncryptionByDefault +func (c *EC2) DisableEbsEncryptionByDefaultRequest(input *DisableEbsEncryptionByDefaultInput) (req *request.Request, output *DisableEbsEncryptionByDefaultOutput) { + op := &request.Operation{ + Name: opDisableEbsEncryptionByDefault, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableEbsEncryptionByDefaultInput{} + } + + output = &DisableEbsEncryptionByDefaultOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableEbsEncryptionByDefault API operation for Amazon Elastic Compute Cloud. +// +// Disables EBS encryption by default for your account in the current Region. +// +// After you disable encryption by default, you can still create encrypted volumes +// by enabling encryption when you create each volume. +// +// Disabling encryption by default does not change the encryption status of +// your existing volumes. +// +// For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableEbsEncryptionByDefault for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableEbsEncryptionByDefault +func (c *EC2) DisableEbsEncryptionByDefault(input *DisableEbsEncryptionByDefaultInput) (*DisableEbsEncryptionByDefaultOutput, error) { + req, out := c.DisableEbsEncryptionByDefaultRequest(input) + return out, req.Send() +} + +// DisableEbsEncryptionByDefaultWithContext is the same as DisableEbsEncryptionByDefault with the addition of +// the ability to pass a context and additional request options. +// +// See DisableEbsEncryptionByDefault for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableEbsEncryptionByDefaultWithContext(ctx aws.Context, input *DisableEbsEncryptionByDefaultInput, opts ...request.Option) (*DisableEbsEncryptionByDefaultOutput, error) { + req, out := c.DisableEbsEncryptionByDefaultRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableFastSnapshotRestores = "DisableFastSnapshotRestores" + +// DisableFastSnapshotRestoresRequest generates a "aws/request.Request" representing the +// client's request for the DisableFastSnapshotRestores operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableFastSnapshotRestores for more information on using the DisableFastSnapshotRestores +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableFastSnapshotRestoresRequest method. +// req, resp := client.DisableFastSnapshotRestoresRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableFastSnapshotRestores +func (c *EC2) DisableFastSnapshotRestoresRequest(input *DisableFastSnapshotRestoresInput) (req *request.Request, output *DisableFastSnapshotRestoresOutput) { + op := &request.Operation{ + Name: opDisableFastSnapshotRestores, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableFastSnapshotRestoresInput{} + } + + output = &DisableFastSnapshotRestoresOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableFastSnapshotRestores API operation for Amazon Elastic Compute Cloud. +// +// Disables fast snapshot restores for the specified snapshots in the specified +// Availability Zones. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableFastSnapshotRestores for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableFastSnapshotRestores +func (c *EC2) DisableFastSnapshotRestores(input *DisableFastSnapshotRestoresInput) (*DisableFastSnapshotRestoresOutput, error) { + req, out := c.DisableFastSnapshotRestoresRequest(input) + return out, req.Send() +} + +// DisableFastSnapshotRestoresWithContext is the same as DisableFastSnapshotRestores with the addition of +// the ability to pass a context and additional request options. +// +// See DisableFastSnapshotRestores for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableFastSnapshotRestoresWithContext(ctx aws.Context, input *DisableFastSnapshotRestoresInput, opts ...request.Option) (*DisableFastSnapshotRestoresOutput, error) { + req, out := c.DisableFastSnapshotRestoresRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableTransitGatewayRouteTablePropagation = "DisableTransitGatewayRouteTablePropagation" + +// DisableTransitGatewayRouteTablePropagationRequest generates a "aws/request.Request" representing the +// client's request for the DisableTransitGatewayRouteTablePropagation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableTransitGatewayRouteTablePropagation for more information on using the DisableTransitGatewayRouteTablePropagation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableTransitGatewayRouteTablePropagationRequest method. +// req, resp := client.DisableTransitGatewayRouteTablePropagationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableTransitGatewayRouteTablePropagation +func (c *EC2) DisableTransitGatewayRouteTablePropagationRequest(input *DisableTransitGatewayRouteTablePropagationInput) (req *request.Request, output *DisableTransitGatewayRouteTablePropagationOutput) { + op := &request.Operation{ + Name: opDisableTransitGatewayRouteTablePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableTransitGatewayRouteTablePropagationInput{} + } + + output = &DisableTransitGatewayRouteTablePropagationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableTransitGatewayRouteTablePropagation API operation for Amazon Elastic Compute Cloud. +// +// Disables the specified resource attachment from propagating routes to the +// specified propagation route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableTransitGatewayRouteTablePropagation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableTransitGatewayRouteTablePropagation +func (c *EC2) DisableTransitGatewayRouteTablePropagation(input *DisableTransitGatewayRouteTablePropagationInput) (*DisableTransitGatewayRouteTablePropagationOutput, error) { + req, out := c.DisableTransitGatewayRouteTablePropagationRequest(input) + return out, req.Send() +} + +// DisableTransitGatewayRouteTablePropagationWithContext is the same as DisableTransitGatewayRouteTablePropagation with the addition of +// the ability to pass a context and additional request options. +// +// See DisableTransitGatewayRouteTablePropagation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableTransitGatewayRouteTablePropagationWithContext(ctx aws.Context, input *DisableTransitGatewayRouteTablePropagationInput, opts ...request.Option) (*DisableTransitGatewayRouteTablePropagationOutput, error) { + req, out := c.DisableTransitGatewayRouteTablePropagationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" + +// DisableVgwRoutePropagationRequest generates a "aws/request.Request" representing the +// client's request for the DisableVgwRoutePropagation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableVgwRoutePropagation for more information on using the DisableVgwRoutePropagation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableVgwRoutePropagationRequest method. +// req, resp := client.DisableVgwRoutePropagationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVgwRoutePropagation +func (c *EC2) DisableVgwRoutePropagationRequest(input *DisableVgwRoutePropagationInput) (req *request.Request, output *DisableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opDisableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVgwRoutePropagationInput{} + } + + output = &DisableVgwRoutePropagationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisableVgwRoutePropagation API operation for Amazon Elastic Compute Cloud. +// +// Disables a virtual private gateway (VGW) from propagating routes to a specified +// route table of a VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableVgwRoutePropagation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVgwRoutePropagation +func (c *EC2) DisableVgwRoutePropagation(input *DisableVgwRoutePropagationInput) (*DisableVgwRoutePropagationOutput, error) { + req, out := c.DisableVgwRoutePropagationRequest(input) + return out, req.Send() +} + +// DisableVgwRoutePropagationWithContext is the same as DisableVgwRoutePropagation with the addition of +// the ability to pass a context and additional request options. +// +// See DisableVgwRoutePropagation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableVgwRoutePropagationWithContext(ctx aws.Context, input *DisableVgwRoutePropagationInput, opts ...request.Option) (*DisableVgwRoutePropagationOutput, error) { + req, out := c.DisableVgwRoutePropagationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableVpcClassicLink = "DisableVpcClassicLink" + +// DisableVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the DisableVpcClassicLink operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableVpcClassicLink for more information on using the DisableVpcClassicLink +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableVpcClassicLinkRequest method. +// req, resp := client.DisableVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLink +func (c *EC2) DisableVpcClassicLinkRequest(input *DisableVpcClassicLinkInput) (req *request.Request, output *DisableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkInput{} + } + + output = &DisableVpcClassicLinkOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableVpcClassicLink API operation for Amazon Elastic Compute Cloud. +// +// Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC +// that has EC2-Classic instances linked to it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableVpcClassicLink for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLink +func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*DisableVpcClassicLinkOutput, error) { + req, out := c.DisableVpcClassicLinkRequest(input) + return out, req.Send() +} + +// DisableVpcClassicLinkWithContext is the same as DisableVpcClassicLink with the addition of +// the ability to pass a context and additional request options. +// +// See DisableVpcClassicLink for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableVpcClassicLinkWithContext(ctx aws.Context, input *DisableVpcClassicLinkInput, opts ...request.Option) (*DisableVpcClassicLinkOutput, error) { + req, out := c.DisableVpcClassicLinkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" + +// DisableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the DisableVpcClassicLinkDnsSupport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableVpcClassicLinkDnsSupport for more information on using the DisableVpcClassicLinkDnsSupport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableVpcClassicLinkDnsSupportRequest method. +// req, resp := client.DisableVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLinkDnsSupport +func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLinkDnsSupportInput) (req *request.Request, output *DisableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkDnsSupportInput{} + } + + output = &DisableVpcClassicLinkDnsSupportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableVpcClassicLinkDnsSupport API operation for Amazon Elastic Compute Cloud. +// +// Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve +// to public IP addresses when addressed between a linked EC2-Classic instance +// and instances in the VPC to which it's linked. For more information, see +// ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You must specify a VPC ID in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisableVpcClassicLinkDnsSupport for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLinkDnsSupport +func (c *EC2) DisableVpcClassicLinkDnsSupport(input *DisableVpcClassicLinkDnsSupportInput) (*DisableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DisableVpcClassicLinkDnsSupportRequest(input) + return out, req.Send() +} + +// DisableVpcClassicLinkDnsSupportWithContext is the same as DisableVpcClassicLinkDnsSupport with the addition of +// the ability to pass a context and additional request options. +// +// See DisableVpcClassicLinkDnsSupport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableVpcClassicLinkDnsSupportWithContext(ctx aws.Context, input *DisableVpcClassicLinkDnsSupportInput, opts ...request.Option) (*DisableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DisableVpcClassicLinkDnsSupportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateAddress = "DisassociateAddress" + +// DisassociateAddressRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateAddress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateAddress for more information on using the DisassociateAddress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateAddressRequest method. +// req, resp := client.DisassociateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateAddress +func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req *request.Request, output *DisassociateAddressOutput) { + op := &request.Operation{ + Name: opDisassociateAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateAddressInput{} + } + + output = &DisassociateAddressOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateAddress API operation for Amazon Elastic Compute Cloud. +// +// Disassociates an Elastic IP address from the instance or network interface +// it's associated with. +// +// An Elastic IP address is for use in either the EC2-Classic platform or in +// a VPC. For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// This is an idempotent operation. If you perform the operation more than once, +// Amazon EC2 doesn't return an error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateAddress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateAddress +func (c *EC2) DisassociateAddress(input *DisassociateAddressInput) (*DisassociateAddressOutput, error) { + req, out := c.DisassociateAddressRequest(input) + return out, req.Send() +} + +// DisassociateAddressWithContext is the same as DisassociateAddress with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateAddressWithContext(ctx aws.Context, input *DisassociateAddressInput, opts ...request.Option) (*DisassociateAddressOutput, error) { + req, out := c.DisassociateAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateClientVpnTargetNetwork = "DisassociateClientVpnTargetNetwork" + +// DisassociateClientVpnTargetNetworkRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateClientVpnTargetNetwork operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateClientVpnTargetNetwork for more information on using the DisassociateClientVpnTargetNetwork +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateClientVpnTargetNetworkRequest method. +// req, resp := client.DisassociateClientVpnTargetNetworkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateClientVpnTargetNetwork +func (c *EC2) DisassociateClientVpnTargetNetworkRequest(input *DisassociateClientVpnTargetNetworkInput) (req *request.Request, output *DisassociateClientVpnTargetNetworkOutput) { + op := &request.Operation{ + Name: opDisassociateClientVpnTargetNetwork, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateClientVpnTargetNetworkInput{} + } + + output = &DisassociateClientVpnTargetNetworkOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateClientVpnTargetNetwork API operation for Amazon Elastic Compute Cloud. +// +// Disassociates a target network from the specified Client VPN endpoint. When +// you disassociate the last target network from a Client VPN, the following +// happens: +// +// * The route that was automatically added for the VPC is deleted +// +// * All active client connections are terminated +// +// * New client connections are disallowed +// +// * The Client VPN endpoint's status changes to pending-associate +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateClientVpnTargetNetwork for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateClientVpnTargetNetwork +func (c *EC2) DisassociateClientVpnTargetNetwork(input *DisassociateClientVpnTargetNetworkInput) (*DisassociateClientVpnTargetNetworkOutput, error) { + req, out := c.DisassociateClientVpnTargetNetworkRequest(input) + return out, req.Send() +} + +// DisassociateClientVpnTargetNetworkWithContext is the same as DisassociateClientVpnTargetNetwork with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateClientVpnTargetNetwork for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateClientVpnTargetNetworkWithContext(ctx aws.Context, input *DisassociateClientVpnTargetNetworkInput, opts ...request.Option) (*DisassociateClientVpnTargetNetworkOutput, error) { + req, out := c.DisassociateClientVpnTargetNetworkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateEnclaveCertificateIamRole = "DisassociateEnclaveCertificateIamRole" + +// DisassociateEnclaveCertificateIamRoleRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateEnclaveCertificateIamRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateEnclaveCertificateIamRole for more information on using the DisassociateEnclaveCertificateIamRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateEnclaveCertificateIamRoleRequest method. +// req, resp := client.DisassociateEnclaveCertificateIamRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateEnclaveCertificateIamRole +func (c *EC2) DisassociateEnclaveCertificateIamRoleRequest(input *DisassociateEnclaveCertificateIamRoleInput) (req *request.Request, output *DisassociateEnclaveCertificateIamRoleOutput) { + op := &request.Operation{ + Name: opDisassociateEnclaveCertificateIamRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateEnclaveCertificateIamRoleInput{} + } + + output = &DisassociateEnclaveCertificateIamRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateEnclaveCertificateIamRole API operation for Amazon Elastic Compute Cloud. +// +// Disassociates an IAM role from an AWS Certificate Manager (ACM) certificate. +// Disassociating an IAM role from an ACM certificate removes the Amazon S3 +// object that contains the certificate, certificate chain, and encrypted private +// key from the Amazon S3 bucket. It also revokes the IAM role's permission +// to use the AWS Key Management Service (KMS) customer master key (CMK) used +// to encrypt the private key. This effectively revokes the role's permission +// to use the certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateEnclaveCertificateIamRole for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateEnclaveCertificateIamRole +func (c *EC2) DisassociateEnclaveCertificateIamRole(input *DisassociateEnclaveCertificateIamRoleInput) (*DisassociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.DisassociateEnclaveCertificateIamRoleRequest(input) + return out, req.Send() +} + +// DisassociateEnclaveCertificateIamRoleWithContext is the same as DisassociateEnclaveCertificateIamRole with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateEnclaveCertificateIamRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateEnclaveCertificateIamRoleWithContext(ctx aws.Context, input *DisassociateEnclaveCertificateIamRoleInput, opts ...request.Option) (*DisassociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.DisassociateEnclaveCertificateIamRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateIamInstanceProfile = "DisassociateIamInstanceProfile" + +// DisassociateIamInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateIamInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateIamInstanceProfile for more information on using the DisassociateIamInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateIamInstanceProfileRequest method. +// req, resp := client.DisassociateIamInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIamInstanceProfile +func (c *EC2) DisassociateIamInstanceProfileRequest(input *DisassociateIamInstanceProfileInput) (req *request.Request, output *DisassociateIamInstanceProfileOutput) { + op := &request.Operation{ + Name: opDisassociateIamInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateIamInstanceProfileInput{} + } + + output = &DisassociateIamInstanceProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateIamInstanceProfile API operation for Amazon Elastic Compute Cloud. +// +// Disassociates an IAM instance profile from a running or stopped instance. +// +// Use DescribeIamInstanceProfileAssociations to get the association ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateIamInstanceProfile for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIamInstanceProfile +func (c *EC2) DisassociateIamInstanceProfile(input *DisassociateIamInstanceProfileInput) (*DisassociateIamInstanceProfileOutput, error) { + req, out := c.DisassociateIamInstanceProfileRequest(input) + return out, req.Send() +} + +// DisassociateIamInstanceProfileWithContext is the same as DisassociateIamInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateIamInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateIamInstanceProfileWithContext(ctx aws.Context, input *DisassociateIamInstanceProfileInput, opts ...request.Option) (*DisassociateIamInstanceProfileOutput, error) { + req, out := c.DisassociateIamInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateRouteTable = "DisassociateRouteTable" + +// DisassociateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateRouteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateRouteTable for more information on using the DisassociateRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateRouteTableRequest method. +// req, resp := client.DisassociateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateRouteTable +func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) (req *request.Request, output *DisassociateRouteTableOutput) { + op := &request.Operation{ + Name: opDisassociateRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateRouteTableInput{} + } + + output = &DisassociateRouteTableOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateRouteTable API operation for Amazon Elastic Compute Cloud. +// +// Disassociates a subnet or gateway from a route table. +// +// After you perform this action, the subnet no longer uses the routes in the +// route table. Instead, it uses the routes in the VPC's main route table. For +// more information about route tables, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateRouteTable for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateRouteTable +func (c *EC2) DisassociateRouteTable(input *DisassociateRouteTableInput) (*DisassociateRouteTableOutput, error) { + req, out := c.DisassociateRouteTableRequest(input) + return out, req.Send() +} + +// DisassociateRouteTableWithContext is the same as DisassociateRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateRouteTableWithContext(ctx aws.Context, input *DisassociateRouteTableInput, opts ...request.Option) (*DisassociateRouteTableOutput, error) { + req, out := c.DisassociateRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateSubnetCidrBlock = "DisassociateSubnetCidrBlock" + +// DisassociateSubnetCidrBlockRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateSubnetCidrBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateSubnetCidrBlock for more information on using the DisassociateSubnetCidrBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateSubnetCidrBlockRequest method. +// req, resp := client.DisassociateSubnetCidrBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateSubnetCidrBlock +func (c *EC2) DisassociateSubnetCidrBlockRequest(input *DisassociateSubnetCidrBlockInput) (req *request.Request, output *DisassociateSubnetCidrBlockOutput) { + op := &request.Operation{ + Name: opDisassociateSubnetCidrBlock, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateSubnetCidrBlockInput{} + } + + output = &DisassociateSubnetCidrBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateSubnetCidrBlock API operation for Amazon Elastic Compute Cloud. +// +// Disassociates a CIDR block from a subnet. Currently, you can disassociate +// an IPv6 CIDR block only. You must detach or delete all gateways and resources +// that are associated with the CIDR block before you can disassociate it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateSubnetCidrBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateSubnetCidrBlock +func (c *EC2) DisassociateSubnetCidrBlock(input *DisassociateSubnetCidrBlockInput) (*DisassociateSubnetCidrBlockOutput, error) { + req, out := c.DisassociateSubnetCidrBlockRequest(input) + return out, req.Send() +} + +// DisassociateSubnetCidrBlockWithContext is the same as DisassociateSubnetCidrBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateSubnetCidrBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateSubnetCidrBlockWithContext(ctx aws.Context, input *DisassociateSubnetCidrBlockInput, opts ...request.Option) (*DisassociateSubnetCidrBlockOutput, error) { + req, out := c.DisassociateSubnetCidrBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateTransitGatewayMulticastDomain = "DisassociateTransitGatewayMulticastDomain" + +// DisassociateTransitGatewayMulticastDomainRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateTransitGatewayMulticastDomain operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateTransitGatewayMulticastDomain for more information on using the DisassociateTransitGatewayMulticastDomain +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateTransitGatewayMulticastDomainRequest method. +// req, resp := client.DisassociateTransitGatewayMulticastDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateTransitGatewayMulticastDomain +func (c *EC2) DisassociateTransitGatewayMulticastDomainRequest(input *DisassociateTransitGatewayMulticastDomainInput) (req *request.Request, output *DisassociateTransitGatewayMulticastDomainOutput) { + op := &request.Operation{ + Name: opDisassociateTransitGatewayMulticastDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateTransitGatewayMulticastDomainInput{} + } + + output = &DisassociateTransitGatewayMulticastDomainOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateTransitGatewayMulticastDomain API operation for Amazon Elastic Compute Cloud. +// +// Disassociates the specified subnets from the transit gateway multicast domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateTransitGatewayMulticastDomain for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateTransitGatewayMulticastDomain +func (c *EC2) DisassociateTransitGatewayMulticastDomain(input *DisassociateTransitGatewayMulticastDomainInput) (*DisassociateTransitGatewayMulticastDomainOutput, error) { + req, out := c.DisassociateTransitGatewayMulticastDomainRequest(input) + return out, req.Send() +} + +// DisassociateTransitGatewayMulticastDomainWithContext is the same as DisassociateTransitGatewayMulticastDomain with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateTransitGatewayMulticastDomain for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateTransitGatewayMulticastDomainWithContext(ctx aws.Context, input *DisassociateTransitGatewayMulticastDomainInput, opts ...request.Option) (*DisassociateTransitGatewayMulticastDomainOutput, error) { + req, out := c.DisassociateTransitGatewayMulticastDomainRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateTransitGatewayRouteTable = "DisassociateTransitGatewayRouteTable" + +// DisassociateTransitGatewayRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateTransitGatewayRouteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateTransitGatewayRouteTable for more information on using the DisassociateTransitGatewayRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateTransitGatewayRouteTableRequest method. +// req, resp := client.DisassociateTransitGatewayRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateTransitGatewayRouteTable +func (c *EC2) DisassociateTransitGatewayRouteTableRequest(input *DisassociateTransitGatewayRouteTableInput) (req *request.Request, output *DisassociateTransitGatewayRouteTableOutput) { + op := &request.Operation{ + Name: opDisassociateTransitGatewayRouteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateTransitGatewayRouteTableInput{} + } + + output = &DisassociateTransitGatewayRouteTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateTransitGatewayRouteTable API operation for Amazon Elastic Compute Cloud. +// +// Disassociates a resource attachment from a transit gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateTransitGatewayRouteTable for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateTransitGatewayRouteTable +func (c *EC2) DisassociateTransitGatewayRouteTable(input *DisassociateTransitGatewayRouteTableInput) (*DisassociateTransitGatewayRouteTableOutput, error) { + req, out := c.DisassociateTransitGatewayRouteTableRequest(input) + return out, req.Send() +} + +// DisassociateTransitGatewayRouteTableWithContext is the same as DisassociateTransitGatewayRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateTransitGatewayRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateTransitGatewayRouteTableWithContext(ctx aws.Context, input *DisassociateTransitGatewayRouteTableInput, opts ...request.Option) (*DisassociateTransitGatewayRouteTableOutput, error) { + req, out := c.DisassociateTransitGatewayRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateVpcCidrBlock = "DisassociateVpcCidrBlock" + +// DisassociateVpcCidrBlockRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateVpcCidrBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateVpcCidrBlock for more information on using the DisassociateVpcCidrBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateVpcCidrBlockRequest method. +// req, resp := client.DisassociateVpcCidrBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateVpcCidrBlock +func (c *EC2) DisassociateVpcCidrBlockRequest(input *DisassociateVpcCidrBlockInput) (req *request.Request, output *DisassociateVpcCidrBlockOutput) { + op := &request.Operation{ + Name: opDisassociateVpcCidrBlock, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateVpcCidrBlockInput{} + } + + output = &DisassociateVpcCidrBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateVpcCidrBlock API operation for Amazon Elastic Compute Cloud. +// +// Disassociates a CIDR block from a VPC. To disassociate the CIDR block, you +// must specify its association ID. You can get the association ID by using +// DescribeVpcs. You must detach or delete all gateways and resources that are +// associated with the CIDR block before you can disassociate it. +// +// You cannot disassociate the CIDR block with which you originally created +// the VPC (the primary CIDR block). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateVpcCidrBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateVpcCidrBlock +func (c *EC2) DisassociateVpcCidrBlock(input *DisassociateVpcCidrBlockInput) (*DisassociateVpcCidrBlockOutput, error) { + req, out := c.DisassociateVpcCidrBlockRequest(input) + return out, req.Send() +} + +// DisassociateVpcCidrBlockWithContext is the same as DisassociateVpcCidrBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateVpcCidrBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateVpcCidrBlockWithContext(ctx aws.Context, input *DisassociateVpcCidrBlockInput, opts ...request.Option) (*DisassociateVpcCidrBlockOutput, error) { + req, out := c.DisassociateVpcCidrBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableEbsEncryptionByDefault = "EnableEbsEncryptionByDefault" + +// EnableEbsEncryptionByDefaultRequest generates a "aws/request.Request" representing the +// client's request for the EnableEbsEncryptionByDefault operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableEbsEncryptionByDefault for more information on using the EnableEbsEncryptionByDefault +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableEbsEncryptionByDefaultRequest method. +// req, resp := client.EnableEbsEncryptionByDefaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableEbsEncryptionByDefault +func (c *EC2) EnableEbsEncryptionByDefaultRequest(input *EnableEbsEncryptionByDefaultInput) (req *request.Request, output *EnableEbsEncryptionByDefaultOutput) { + op := &request.Operation{ + Name: opEnableEbsEncryptionByDefault, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableEbsEncryptionByDefaultInput{} + } + + output = &EnableEbsEncryptionByDefaultOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableEbsEncryptionByDefault API operation for Amazon Elastic Compute Cloud. +// +// Enables EBS encryption by default for your account in the current Region. +// +// After you enable encryption by default, the EBS volumes that you create are +// are always encrypted, either using the default CMK or the CMK that you specified +// when you created each volume. For more information, see Amazon EBS encryption +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId +// or ResetEbsDefaultKmsKeyId. +// +// Enabling encryption by default has no effect on the encryption status of +// your existing volumes. +// +// After you enable encryption by default, you can no longer launch instances +// using instance types that do not support encryption. For more information, +// see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableEbsEncryptionByDefault for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableEbsEncryptionByDefault +func (c *EC2) EnableEbsEncryptionByDefault(input *EnableEbsEncryptionByDefaultInput) (*EnableEbsEncryptionByDefaultOutput, error) { + req, out := c.EnableEbsEncryptionByDefaultRequest(input) + return out, req.Send() +} + +// EnableEbsEncryptionByDefaultWithContext is the same as EnableEbsEncryptionByDefault with the addition of +// the ability to pass a context and additional request options. +// +// See EnableEbsEncryptionByDefault for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableEbsEncryptionByDefaultWithContext(ctx aws.Context, input *EnableEbsEncryptionByDefaultInput, opts ...request.Option) (*EnableEbsEncryptionByDefaultOutput, error) { + req, out := c.EnableEbsEncryptionByDefaultRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableFastSnapshotRestores = "EnableFastSnapshotRestores" + +// EnableFastSnapshotRestoresRequest generates a "aws/request.Request" representing the +// client's request for the EnableFastSnapshotRestores operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableFastSnapshotRestores for more information on using the EnableFastSnapshotRestores +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableFastSnapshotRestoresRequest method. +// req, resp := client.EnableFastSnapshotRestoresRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableFastSnapshotRestores +func (c *EC2) EnableFastSnapshotRestoresRequest(input *EnableFastSnapshotRestoresInput) (req *request.Request, output *EnableFastSnapshotRestoresOutput) { + op := &request.Operation{ + Name: opEnableFastSnapshotRestores, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableFastSnapshotRestoresInput{} + } + + output = &EnableFastSnapshotRestoresOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableFastSnapshotRestores API operation for Amazon Elastic Compute Cloud. +// +// Enables fast snapshot restores for the specified snapshots in the specified +// Availability Zones. +// +// You get the full benefit of fast snapshot restores after they enter the enabled +// state. To get the current state of fast snapshot restores, use DescribeFastSnapshotRestores. +// To disable fast snapshot restores, use DisableFastSnapshotRestores. +// +// For more information, see Amazon EBS fast snapshot restore (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-fast-snapshot-restore.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableFastSnapshotRestores for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableFastSnapshotRestores +func (c *EC2) EnableFastSnapshotRestores(input *EnableFastSnapshotRestoresInput) (*EnableFastSnapshotRestoresOutput, error) { + req, out := c.EnableFastSnapshotRestoresRequest(input) + return out, req.Send() +} + +// EnableFastSnapshotRestoresWithContext is the same as EnableFastSnapshotRestores with the addition of +// the ability to pass a context and additional request options. +// +// See EnableFastSnapshotRestores for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableFastSnapshotRestoresWithContext(ctx aws.Context, input *EnableFastSnapshotRestoresInput, opts ...request.Option) (*EnableFastSnapshotRestoresOutput, error) { + req, out := c.EnableFastSnapshotRestoresRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableTransitGatewayRouteTablePropagation = "EnableTransitGatewayRouteTablePropagation" + +// EnableTransitGatewayRouteTablePropagationRequest generates a "aws/request.Request" representing the +// client's request for the EnableTransitGatewayRouteTablePropagation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableTransitGatewayRouteTablePropagation for more information on using the EnableTransitGatewayRouteTablePropagation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableTransitGatewayRouteTablePropagationRequest method. +// req, resp := client.EnableTransitGatewayRouteTablePropagationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableTransitGatewayRouteTablePropagation +func (c *EC2) EnableTransitGatewayRouteTablePropagationRequest(input *EnableTransitGatewayRouteTablePropagationInput) (req *request.Request, output *EnableTransitGatewayRouteTablePropagationOutput) { + op := &request.Operation{ + Name: opEnableTransitGatewayRouteTablePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableTransitGatewayRouteTablePropagationInput{} + } + + output = &EnableTransitGatewayRouteTablePropagationOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableTransitGatewayRouteTablePropagation API operation for Amazon Elastic Compute Cloud. +// +// Enables the specified attachment to propagate routes to the specified propagation +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableTransitGatewayRouteTablePropagation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableTransitGatewayRouteTablePropagation +func (c *EC2) EnableTransitGatewayRouteTablePropagation(input *EnableTransitGatewayRouteTablePropagationInput) (*EnableTransitGatewayRouteTablePropagationOutput, error) { + req, out := c.EnableTransitGatewayRouteTablePropagationRequest(input) + return out, req.Send() +} + +// EnableTransitGatewayRouteTablePropagationWithContext is the same as EnableTransitGatewayRouteTablePropagation with the addition of +// the ability to pass a context and additional request options. +// +// See EnableTransitGatewayRouteTablePropagation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableTransitGatewayRouteTablePropagationWithContext(ctx aws.Context, input *EnableTransitGatewayRouteTablePropagationInput, opts ...request.Option) (*EnableTransitGatewayRouteTablePropagationOutput, error) { + req, out := c.EnableTransitGatewayRouteTablePropagationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" + +// EnableVgwRoutePropagationRequest generates a "aws/request.Request" representing the +// client's request for the EnableVgwRoutePropagation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableVgwRoutePropagation for more information on using the EnableVgwRoutePropagation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableVgwRoutePropagationRequest method. +// req, resp := client.EnableVgwRoutePropagationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVgwRoutePropagation +func (c *EC2) EnableVgwRoutePropagationRequest(input *EnableVgwRoutePropagationInput) (req *request.Request, output *EnableVgwRoutePropagationOutput) { + op := &request.Operation{ + Name: opEnableVgwRoutePropagation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVgwRoutePropagationInput{} + } + + output = &EnableVgwRoutePropagationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// EnableVgwRoutePropagation API operation for Amazon Elastic Compute Cloud. +// +// Enables a virtual private gateway (VGW) to propagate routes to the specified +// route table of a VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableVgwRoutePropagation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVgwRoutePropagation +func (c *EC2) EnableVgwRoutePropagation(input *EnableVgwRoutePropagationInput) (*EnableVgwRoutePropagationOutput, error) { + req, out := c.EnableVgwRoutePropagationRequest(input) + return out, req.Send() +} + +// EnableVgwRoutePropagationWithContext is the same as EnableVgwRoutePropagation with the addition of +// the ability to pass a context and additional request options. +// +// See EnableVgwRoutePropagation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableVgwRoutePropagationWithContext(ctx aws.Context, input *EnableVgwRoutePropagationInput, opts ...request.Option) (*EnableVgwRoutePropagationOutput, error) { + req, out := c.EnableVgwRoutePropagationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableVolumeIO = "EnableVolumeIO" + +// EnableVolumeIORequest generates a "aws/request.Request" representing the +// client's request for the EnableVolumeIO operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableVolumeIO for more information on using the EnableVolumeIO +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableVolumeIORequest method. +// req, resp := client.EnableVolumeIORequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVolumeIO +func (c *EC2) EnableVolumeIORequest(input *EnableVolumeIOInput) (req *request.Request, output *EnableVolumeIOOutput) { + op := &request.Operation{ + Name: opEnableVolumeIO, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVolumeIOInput{} + } + + output = &EnableVolumeIOOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// EnableVolumeIO API operation for Amazon Elastic Compute Cloud. +// +// Enables I/O operations for a volume that had I/O operations disabled because +// the data on the volume was potentially inconsistent. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableVolumeIO for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVolumeIO +func (c *EC2) EnableVolumeIO(input *EnableVolumeIOInput) (*EnableVolumeIOOutput, error) { + req, out := c.EnableVolumeIORequest(input) + return out, req.Send() +} + +// EnableVolumeIOWithContext is the same as EnableVolumeIO with the addition of +// the ability to pass a context and additional request options. +// +// See EnableVolumeIO for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableVolumeIOWithContext(ctx aws.Context, input *EnableVolumeIOInput, opts ...request.Option) (*EnableVolumeIOOutput, error) { + req, out := c.EnableVolumeIORequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableVpcClassicLink = "EnableVpcClassicLink" + +// EnableVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the EnableVpcClassicLink operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableVpcClassicLink for more information on using the EnableVpcClassicLink +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableVpcClassicLinkRequest method. +// req, resp := client.EnableVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLink +func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req *request.Request, output *EnableVpcClassicLinkOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLink, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkInput{} + } + + output = &EnableVpcClassicLinkOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableVpcClassicLink API operation for Amazon Elastic Compute Cloud. +// +// Enables a VPC for ClassicLink. You can then link EC2-Classic instances to +// your ClassicLink-enabled VPC to allow communication over private IP addresses. +// You cannot enable your VPC for ClassicLink if any of your VPC route tables +// have existing routes for address ranges within the 10.0.0.0/8 IP address +// range, excluding local routes for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 +// IP address ranges. For more information, see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableVpcClassicLink for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLink +func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpcClassicLinkOutput, error) { + req, out := c.EnableVpcClassicLinkRequest(input) + return out, req.Send() +} + +// EnableVpcClassicLinkWithContext is the same as EnableVpcClassicLink with the addition of +// the ability to pass a context and additional request options. +// +// See EnableVpcClassicLink for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableVpcClassicLinkWithContext(ctx aws.Context, input *EnableVpcClassicLinkInput, opts ...request.Option) (*EnableVpcClassicLinkOutput, error) { + req, out := c.EnableVpcClassicLinkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" + +// EnableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the EnableVpcClassicLinkDnsSupport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableVpcClassicLinkDnsSupport for more information on using the EnableVpcClassicLinkDnsSupport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableVpcClassicLinkDnsSupportRequest method. +// req, resp := client.EnableVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLinkDnsSupport +func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkDnsSupportInput) (req *request.Request, output *EnableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkDnsSupportInput{} + } + + output = &EnableVpcClassicLinkDnsSupportOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableVpcClassicLinkDnsSupport API operation for Amazon Elastic Compute Cloud. +// +// Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information, +// see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// You must specify a VPC ID in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation EnableVpcClassicLinkDnsSupport for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLinkDnsSupport +func (c *EC2) EnableVpcClassicLinkDnsSupport(input *EnableVpcClassicLinkDnsSupportInput) (*EnableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.EnableVpcClassicLinkDnsSupportRequest(input) + return out, req.Send() +} + +// EnableVpcClassicLinkDnsSupportWithContext is the same as EnableVpcClassicLinkDnsSupport with the addition of +// the ability to pass a context and additional request options. +// +// See EnableVpcClassicLinkDnsSupport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableVpcClassicLinkDnsSupportWithContext(ctx aws.Context, input *EnableVpcClassicLinkDnsSupportInput, opts ...request.Option) (*EnableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.EnableVpcClassicLinkDnsSupportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExportClientVpnClientCertificateRevocationList = "ExportClientVpnClientCertificateRevocationList" + +// ExportClientVpnClientCertificateRevocationListRequest generates a "aws/request.Request" representing the +// client's request for the ExportClientVpnClientCertificateRevocationList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportClientVpnClientCertificateRevocationList for more information on using the ExportClientVpnClientCertificateRevocationList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportClientVpnClientCertificateRevocationListRequest method. +// req, resp := client.ExportClientVpnClientCertificateRevocationListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportClientVpnClientCertificateRevocationList +func (c *EC2) ExportClientVpnClientCertificateRevocationListRequest(input *ExportClientVpnClientCertificateRevocationListInput) (req *request.Request, output *ExportClientVpnClientCertificateRevocationListOutput) { + op := &request.Operation{ + Name: opExportClientVpnClientCertificateRevocationList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportClientVpnClientCertificateRevocationListInput{} + } + + output = &ExportClientVpnClientCertificateRevocationListOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportClientVpnClientCertificateRevocationList API operation for Amazon Elastic Compute Cloud. +// +// Downloads the client certificate revocation list for the specified Client +// VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ExportClientVpnClientCertificateRevocationList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportClientVpnClientCertificateRevocationList +func (c *EC2) ExportClientVpnClientCertificateRevocationList(input *ExportClientVpnClientCertificateRevocationListInput) (*ExportClientVpnClientCertificateRevocationListOutput, error) { + req, out := c.ExportClientVpnClientCertificateRevocationListRequest(input) + return out, req.Send() +} + +// ExportClientVpnClientCertificateRevocationListWithContext is the same as ExportClientVpnClientCertificateRevocationList with the addition of +// the ability to pass a context and additional request options. +// +// See ExportClientVpnClientCertificateRevocationList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ExportClientVpnClientCertificateRevocationListWithContext(ctx aws.Context, input *ExportClientVpnClientCertificateRevocationListInput, opts ...request.Option) (*ExportClientVpnClientCertificateRevocationListOutput, error) { + req, out := c.ExportClientVpnClientCertificateRevocationListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExportClientVpnClientConfiguration = "ExportClientVpnClientConfiguration" + +// ExportClientVpnClientConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the ExportClientVpnClientConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportClientVpnClientConfiguration for more information on using the ExportClientVpnClientConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportClientVpnClientConfigurationRequest method. +// req, resp := client.ExportClientVpnClientConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportClientVpnClientConfiguration +func (c *EC2) ExportClientVpnClientConfigurationRequest(input *ExportClientVpnClientConfigurationInput) (req *request.Request, output *ExportClientVpnClientConfigurationOutput) { + op := &request.Operation{ + Name: opExportClientVpnClientConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportClientVpnClientConfigurationInput{} + } + + output = &ExportClientVpnClientConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportClientVpnClientConfiguration API operation for Amazon Elastic Compute Cloud. +// +// Downloads the contents of the Client VPN endpoint configuration file for +// the specified Client VPN endpoint. The Client VPN endpoint configuration +// file includes the Client VPN endpoint and certificate information clients +// need to establish a connection with the Client VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ExportClientVpnClientConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportClientVpnClientConfiguration +func (c *EC2) ExportClientVpnClientConfiguration(input *ExportClientVpnClientConfigurationInput) (*ExportClientVpnClientConfigurationOutput, error) { + req, out := c.ExportClientVpnClientConfigurationRequest(input) + return out, req.Send() +} + +// ExportClientVpnClientConfigurationWithContext is the same as ExportClientVpnClientConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See ExportClientVpnClientConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ExportClientVpnClientConfigurationWithContext(ctx aws.Context, input *ExportClientVpnClientConfigurationInput, opts ...request.Option) (*ExportClientVpnClientConfigurationOutput, error) { + req, out := c.ExportClientVpnClientConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExportImage = "ExportImage" + +// ExportImageRequest generates a "aws/request.Request" representing the +// client's request for the ExportImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportImage for more information on using the ExportImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportImageRequest method. +// req, resp := client.ExportImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImageRequest(input *ExportImageInput) (req *request.Request, output *ExportImageOutput) { + op := &request.Operation{ + Name: opExportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportImageInput{} + } + + output = &ExportImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportImage API operation for Amazon Elastic Compute Cloud. +// +// Exports an Amazon Machine Image (AMI) to a VM file. For more information, +// see Exporting a VM Directory from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html) +// in the VM Import/Export User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ExportImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImage(input *ExportImageInput) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + return out, req.Send() +} + +// ExportImageWithContext is the same as ExportImage with the addition of +// the ability to pass a context and additional request options. +// +// See ExportImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ExportImageWithContext(ctx aws.Context, input *ExportImageInput, opts ...request.Option) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opExportTransitGatewayRoutes = "ExportTransitGatewayRoutes" + +// ExportTransitGatewayRoutesRequest generates a "aws/request.Request" representing the +// client's request for the ExportTransitGatewayRoutes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportTransitGatewayRoutes for more information on using the ExportTransitGatewayRoutes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportTransitGatewayRoutesRequest method. +// req, resp := client.ExportTransitGatewayRoutesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportTransitGatewayRoutes +func (c *EC2) ExportTransitGatewayRoutesRequest(input *ExportTransitGatewayRoutesInput) (req *request.Request, output *ExportTransitGatewayRoutesOutput) { + op := &request.Operation{ + Name: opExportTransitGatewayRoutes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportTransitGatewayRoutesInput{} + } + + output = &ExportTransitGatewayRoutesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportTransitGatewayRoutes API operation for Amazon Elastic Compute Cloud. +// +// Exports routes from the specified transit gateway route table to the specified +// S3 bucket. By default, all routes are exported. Alternatively, you can filter +// by CIDR range. +// +// The routes are saved to the specified bucket in a JSON file. For more information, +// see Export Route Tables to Amazon S3 (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-route-tables.html#tgw-export-route-tables) +// in Transit Gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ExportTransitGatewayRoutes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportTransitGatewayRoutes +func (c *EC2) ExportTransitGatewayRoutes(input *ExportTransitGatewayRoutesInput) (*ExportTransitGatewayRoutesOutput, error) { + req, out := c.ExportTransitGatewayRoutesRequest(input) + return out, req.Send() +} + +// ExportTransitGatewayRoutesWithContext is the same as ExportTransitGatewayRoutes with the addition of +// the ability to pass a context and additional request options. +// +// See ExportTransitGatewayRoutes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ExportTransitGatewayRoutesWithContext(ctx aws.Context, input *ExportTransitGatewayRoutesInput, opts ...request.Option) (*ExportTransitGatewayRoutesOutput, error) { + req, out := c.ExportTransitGatewayRoutesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAssociatedEnclaveCertificateIamRoles = "GetAssociatedEnclaveCertificateIamRoles" + +// GetAssociatedEnclaveCertificateIamRolesRequest generates a "aws/request.Request" representing the +// client's request for the GetAssociatedEnclaveCertificateIamRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAssociatedEnclaveCertificateIamRoles for more information on using the GetAssociatedEnclaveCertificateIamRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAssociatedEnclaveCertificateIamRolesRequest method. +// req, resp := client.GetAssociatedEnclaveCertificateIamRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedEnclaveCertificateIamRoles +func (c *EC2) GetAssociatedEnclaveCertificateIamRolesRequest(input *GetAssociatedEnclaveCertificateIamRolesInput) (req *request.Request, output *GetAssociatedEnclaveCertificateIamRolesOutput) { + op := &request.Operation{ + Name: opGetAssociatedEnclaveCertificateIamRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAssociatedEnclaveCertificateIamRolesInput{} + } + + output = &GetAssociatedEnclaveCertificateIamRolesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAssociatedEnclaveCertificateIamRoles API operation for Amazon Elastic Compute Cloud. +// +// Returns the IAM roles that are associated with the specified AWS Certificate +// Manager (ACM) certificate. It also returns the name of the Amazon S3 bucket +// and the Amazon S3 object key where the certificate, certificate chain, and +// encrypted private key bundle are stored, and the ARN of the AWS Key Management +// Service (KMS) customer master key (CMK) that's used to encrypt the private +// key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetAssociatedEnclaveCertificateIamRoles for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedEnclaveCertificateIamRoles +func (c *EC2) GetAssociatedEnclaveCertificateIamRoles(input *GetAssociatedEnclaveCertificateIamRolesInput) (*GetAssociatedEnclaveCertificateIamRolesOutput, error) { + req, out := c.GetAssociatedEnclaveCertificateIamRolesRequest(input) + return out, req.Send() +} + +// GetAssociatedEnclaveCertificateIamRolesWithContext is the same as GetAssociatedEnclaveCertificateIamRoles with the addition of +// the ability to pass a context and additional request options. +// +// See GetAssociatedEnclaveCertificateIamRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAssociatedEnclaveCertificateIamRolesWithContext(ctx aws.Context, input *GetAssociatedEnclaveCertificateIamRolesInput, opts ...request.Option) (*GetAssociatedEnclaveCertificateIamRolesOutput, error) { + req, out := c.GetAssociatedEnclaveCertificateIamRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAssociatedIpv6PoolCidrs = "GetAssociatedIpv6PoolCidrs" + +// GetAssociatedIpv6PoolCidrsRequest generates a "aws/request.Request" representing the +// client's request for the GetAssociatedIpv6PoolCidrs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAssociatedIpv6PoolCidrs for more information on using the GetAssociatedIpv6PoolCidrs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAssociatedIpv6PoolCidrsRequest method. +// req, resp := client.GetAssociatedIpv6PoolCidrsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedIpv6PoolCidrs +func (c *EC2) GetAssociatedIpv6PoolCidrsRequest(input *GetAssociatedIpv6PoolCidrsInput) (req *request.Request, output *GetAssociatedIpv6PoolCidrsOutput) { + op := &request.Operation{ + Name: opGetAssociatedIpv6PoolCidrs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetAssociatedIpv6PoolCidrsInput{} + } + + output = &GetAssociatedIpv6PoolCidrsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAssociatedIpv6PoolCidrs API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the IPv6 CIDR block associations for a specified IPv6 +// address pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetAssociatedIpv6PoolCidrs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedIpv6PoolCidrs +func (c *EC2) GetAssociatedIpv6PoolCidrs(input *GetAssociatedIpv6PoolCidrsInput) (*GetAssociatedIpv6PoolCidrsOutput, error) { + req, out := c.GetAssociatedIpv6PoolCidrsRequest(input) + return out, req.Send() +} + +// GetAssociatedIpv6PoolCidrsWithContext is the same as GetAssociatedIpv6PoolCidrs with the addition of +// the ability to pass a context and additional request options. +// +// See GetAssociatedIpv6PoolCidrs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAssociatedIpv6PoolCidrsWithContext(ctx aws.Context, input *GetAssociatedIpv6PoolCidrsInput, opts ...request.Option) (*GetAssociatedIpv6PoolCidrsOutput, error) { + req, out := c.GetAssociatedIpv6PoolCidrsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetAssociatedIpv6PoolCidrsPages iterates over the pages of a GetAssociatedIpv6PoolCidrs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetAssociatedIpv6PoolCidrs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetAssociatedIpv6PoolCidrs operation. +// pageNum := 0 +// err := client.GetAssociatedIpv6PoolCidrsPages(params, +// func(page *ec2.GetAssociatedIpv6PoolCidrsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetAssociatedIpv6PoolCidrsPages(input *GetAssociatedIpv6PoolCidrsInput, fn func(*GetAssociatedIpv6PoolCidrsOutput, bool) bool) error { + return c.GetAssociatedIpv6PoolCidrsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetAssociatedIpv6PoolCidrsPagesWithContext same as GetAssociatedIpv6PoolCidrsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAssociatedIpv6PoolCidrsPagesWithContext(ctx aws.Context, input *GetAssociatedIpv6PoolCidrsInput, fn func(*GetAssociatedIpv6PoolCidrsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetAssociatedIpv6PoolCidrsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetAssociatedIpv6PoolCidrsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetAssociatedIpv6PoolCidrsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetCapacityReservationUsage = "GetCapacityReservationUsage" + +// GetCapacityReservationUsageRequest generates a "aws/request.Request" representing the +// client's request for the GetCapacityReservationUsage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCapacityReservationUsage for more information on using the GetCapacityReservationUsage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCapacityReservationUsageRequest method. +// req, resp := client.GetCapacityReservationUsageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetCapacityReservationUsage +func (c *EC2) GetCapacityReservationUsageRequest(input *GetCapacityReservationUsageInput) (req *request.Request, output *GetCapacityReservationUsageOutput) { + op := &request.Operation{ + Name: opGetCapacityReservationUsage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCapacityReservationUsageInput{} + } + + output = &GetCapacityReservationUsageOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCapacityReservationUsage API operation for Amazon Elastic Compute Cloud. +// +// Gets usage information about a Capacity Reservation. If the Capacity Reservation +// is shared, it shows usage information for the Capacity Reservation owner +// and each AWS account that is currently using the shared capacity. If the +// Capacity Reservation is not shared, it shows only the Capacity Reservation +// owner's usage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetCapacityReservationUsage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetCapacityReservationUsage +func (c *EC2) GetCapacityReservationUsage(input *GetCapacityReservationUsageInput) (*GetCapacityReservationUsageOutput, error) { + req, out := c.GetCapacityReservationUsageRequest(input) + return out, req.Send() +} + +// GetCapacityReservationUsageWithContext is the same as GetCapacityReservationUsage with the addition of +// the ability to pass a context and additional request options. +// +// See GetCapacityReservationUsage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetCapacityReservationUsageWithContext(ctx aws.Context, input *GetCapacityReservationUsageInput, opts ...request.Option) (*GetCapacityReservationUsageOutput, error) { + req, out := c.GetCapacityReservationUsageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCoipPoolUsage = "GetCoipPoolUsage" + +// GetCoipPoolUsageRequest generates a "aws/request.Request" representing the +// client's request for the GetCoipPoolUsage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCoipPoolUsage for more information on using the GetCoipPoolUsage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCoipPoolUsageRequest method. +// req, resp := client.GetCoipPoolUsageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetCoipPoolUsage +func (c *EC2) GetCoipPoolUsageRequest(input *GetCoipPoolUsageInput) (req *request.Request, output *GetCoipPoolUsageOutput) { + op := &request.Operation{ + Name: opGetCoipPoolUsage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCoipPoolUsageInput{} + } + + output = &GetCoipPoolUsageOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCoipPoolUsage API operation for Amazon Elastic Compute Cloud. +// +// Describes the allocations from the specified customer-owned address pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetCoipPoolUsage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetCoipPoolUsage +func (c *EC2) GetCoipPoolUsage(input *GetCoipPoolUsageInput) (*GetCoipPoolUsageOutput, error) { + req, out := c.GetCoipPoolUsageRequest(input) + return out, req.Send() +} + +// GetCoipPoolUsageWithContext is the same as GetCoipPoolUsage with the addition of +// the ability to pass a context and additional request options. +// +// See GetCoipPoolUsage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetCoipPoolUsageWithContext(ctx aws.Context, input *GetCoipPoolUsageInput, opts ...request.Option) (*GetCoipPoolUsageOutput, error) { + req, out := c.GetCoipPoolUsageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetConsoleOutput = "GetConsoleOutput" + +// GetConsoleOutputRequest generates a "aws/request.Request" representing the +// client's request for the GetConsoleOutput operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetConsoleOutput for more information on using the GetConsoleOutput +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetConsoleOutputRequest method. +// req, resp := client.GetConsoleOutputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleOutput +func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *request.Request, output *GetConsoleOutputOutput) { + op := &request.Operation{ + Name: opGetConsoleOutput, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConsoleOutputInput{} + } + + output = &GetConsoleOutputOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetConsoleOutput API operation for Amazon Elastic Compute Cloud. +// +// Gets the console output for the specified instance. For Linux instances, +// the instance console output displays the exact console output that would +// normally be displayed on a physical monitor attached to a computer. For Windows +// instances, the instance console output includes the last three system event +// log errors. +// +// By default, the console output returns buffered information that was posted +// shortly after an instance transition state (start, stop, reboot, or terminate). +// This information is available for at least one hour after the most recent +// post. Only the most recent 64 KB of console output is available. +// +// You can optionally retrieve the latest serial console output at any time +// during the instance lifecycle. This option is supported on instance types +// that use the Nitro hypervisor. +// +// For more information, see Instance Console Output (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html#instance-console-console-output) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetConsoleOutput for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleOutput +func (c *EC2) GetConsoleOutput(input *GetConsoleOutputInput) (*GetConsoleOutputOutput, error) { + req, out := c.GetConsoleOutputRequest(input) + return out, req.Send() +} + +// GetConsoleOutputWithContext is the same as GetConsoleOutput with the addition of +// the ability to pass a context and additional request options. +// +// See GetConsoleOutput for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetConsoleOutputWithContext(ctx aws.Context, input *GetConsoleOutputInput, opts ...request.Option) (*GetConsoleOutputOutput, error) { + req, out := c.GetConsoleOutputRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetConsoleScreenshot = "GetConsoleScreenshot" + +// GetConsoleScreenshotRequest generates a "aws/request.Request" representing the +// client's request for the GetConsoleScreenshot operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetConsoleScreenshot for more information on using the GetConsoleScreenshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetConsoleScreenshotRequest method. +// req, resp := client.GetConsoleScreenshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleScreenshot +func (c *EC2) GetConsoleScreenshotRequest(input *GetConsoleScreenshotInput) (req *request.Request, output *GetConsoleScreenshotOutput) { + op := &request.Operation{ + Name: opGetConsoleScreenshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConsoleScreenshotInput{} + } + + output = &GetConsoleScreenshotOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetConsoleScreenshot API operation for Amazon Elastic Compute Cloud. +// +// Retrieve a JPG-format screenshot of a running instance to help with troubleshooting. +// +// The returned content is Base64-encoded. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetConsoleScreenshot for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleScreenshot +func (c *EC2) GetConsoleScreenshot(input *GetConsoleScreenshotInput) (*GetConsoleScreenshotOutput, error) { + req, out := c.GetConsoleScreenshotRequest(input) + return out, req.Send() +} + +// GetConsoleScreenshotWithContext is the same as GetConsoleScreenshot with the addition of +// the ability to pass a context and additional request options. +// +// See GetConsoleScreenshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetConsoleScreenshotWithContext(ctx aws.Context, input *GetConsoleScreenshotInput, opts ...request.Option) (*GetConsoleScreenshotOutput, error) { + req, out := c.GetConsoleScreenshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDefaultCreditSpecification = "GetDefaultCreditSpecification" + +// GetDefaultCreditSpecificationRequest generates a "aws/request.Request" representing the +// client's request for the GetDefaultCreditSpecification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDefaultCreditSpecification for more information on using the GetDefaultCreditSpecification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDefaultCreditSpecificationRequest method. +// req, resp := client.GetDefaultCreditSpecificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetDefaultCreditSpecification +func (c *EC2) GetDefaultCreditSpecificationRequest(input *GetDefaultCreditSpecificationInput) (req *request.Request, output *GetDefaultCreditSpecificationOutput) { + op := &request.Operation{ + Name: opGetDefaultCreditSpecification, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDefaultCreditSpecificationInput{} + } + + output = &GetDefaultCreditSpecificationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDefaultCreditSpecification API operation for Amazon Elastic Compute Cloud. +// +// Describes the default credit option for CPU usage of a burstable performance +// instance family. +// +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetDefaultCreditSpecification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetDefaultCreditSpecification +func (c *EC2) GetDefaultCreditSpecification(input *GetDefaultCreditSpecificationInput) (*GetDefaultCreditSpecificationOutput, error) { + req, out := c.GetDefaultCreditSpecificationRequest(input) + return out, req.Send() +} + +// GetDefaultCreditSpecificationWithContext is the same as GetDefaultCreditSpecification with the addition of +// the ability to pass a context and additional request options. +// +// See GetDefaultCreditSpecification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetDefaultCreditSpecificationWithContext(ctx aws.Context, input *GetDefaultCreditSpecificationInput, opts ...request.Option) (*GetDefaultCreditSpecificationOutput, error) { + req, out := c.GetDefaultCreditSpecificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetEbsDefaultKmsKeyId = "GetEbsDefaultKmsKeyId" + +// GetEbsDefaultKmsKeyIdRequest generates a "aws/request.Request" representing the +// client's request for the GetEbsDefaultKmsKeyId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetEbsDefaultKmsKeyId for more information on using the GetEbsDefaultKmsKeyId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetEbsDefaultKmsKeyIdRequest method. +// req, resp := client.GetEbsDefaultKmsKeyIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetEbsDefaultKmsKeyId +func (c *EC2) GetEbsDefaultKmsKeyIdRequest(input *GetEbsDefaultKmsKeyIdInput) (req *request.Request, output *GetEbsDefaultKmsKeyIdOutput) { + op := &request.Operation{ + Name: opGetEbsDefaultKmsKeyId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEbsDefaultKmsKeyIdInput{} + } + + output = &GetEbsDefaultKmsKeyIdOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud. +// +// Describes the default customer master key (CMK) for EBS encryption by default +// for your account in this Region. You can change the default CMK for encryption +// by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId. +// +// For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetEbsDefaultKmsKeyId for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetEbsDefaultKmsKeyId +func (c *EC2) GetEbsDefaultKmsKeyId(input *GetEbsDefaultKmsKeyIdInput) (*GetEbsDefaultKmsKeyIdOutput, error) { + req, out := c.GetEbsDefaultKmsKeyIdRequest(input) + return out, req.Send() +} + +// GetEbsDefaultKmsKeyIdWithContext is the same as GetEbsDefaultKmsKeyId with the addition of +// the ability to pass a context and additional request options. +// +// See GetEbsDefaultKmsKeyId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetEbsDefaultKmsKeyIdWithContext(ctx aws.Context, input *GetEbsDefaultKmsKeyIdInput, opts ...request.Option) (*GetEbsDefaultKmsKeyIdOutput, error) { + req, out := c.GetEbsDefaultKmsKeyIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetEbsEncryptionByDefault = "GetEbsEncryptionByDefault" + +// GetEbsEncryptionByDefaultRequest generates a "aws/request.Request" representing the +// client's request for the GetEbsEncryptionByDefault operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetEbsEncryptionByDefault for more information on using the GetEbsEncryptionByDefault +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetEbsEncryptionByDefaultRequest method. +// req, resp := client.GetEbsEncryptionByDefaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetEbsEncryptionByDefault +func (c *EC2) GetEbsEncryptionByDefaultRequest(input *GetEbsEncryptionByDefaultInput) (req *request.Request, output *GetEbsEncryptionByDefaultOutput) { + op := &request.Operation{ + Name: opGetEbsEncryptionByDefault, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetEbsEncryptionByDefaultInput{} + } + + output = &GetEbsEncryptionByDefaultOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetEbsEncryptionByDefault API operation for Amazon Elastic Compute Cloud. +// +// Describes whether EBS encryption by default is enabled for your account in +// the current Region. +// +// For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetEbsEncryptionByDefault for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetEbsEncryptionByDefault +func (c *EC2) GetEbsEncryptionByDefault(input *GetEbsEncryptionByDefaultInput) (*GetEbsEncryptionByDefaultOutput, error) { + req, out := c.GetEbsEncryptionByDefaultRequest(input) + return out, req.Send() +} + +// GetEbsEncryptionByDefaultWithContext is the same as GetEbsEncryptionByDefault with the addition of +// the ability to pass a context and additional request options. +// +// See GetEbsEncryptionByDefault for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetEbsEncryptionByDefaultWithContext(ctx aws.Context, input *GetEbsEncryptionByDefaultInput, opts ...request.Option) (*GetEbsEncryptionByDefaultOutput, error) { + req, out := c.GetEbsEncryptionByDefaultRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetGroupsForCapacityReservation = "GetGroupsForCapacityReservation" + +// GetGroupsForCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the GetGroupsForCapacityReservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetGroupsForCapacityReservation for more information on using the GetGroupsForCapacityReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetGroupsForCapacityReservationRequest method. +// req, resp := client.GetGroupsForCapacityReservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetGroupsForCapacityReservation +func (c *EC2) GetGroupsForCapacityReservationRequest(input *GetGroupsForCapacityReservationInput) (req *request.Request, output *GetGroupsForCapacityReservationOutput) { + op := &request.Operation{ + Name: opGetGroupsForCapacityReservation, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetGroupsForCapacityReservationInput{} + } + + output = &GetGroupsForCapacityReservationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetGroupsForCapacityReservation API operation for Amazon Elastic Compute Cloud. +// +// Lists the resource groups to which a Capacity Reservation has been added. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetGroupsForCapacityReservation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetGroupsForCapacityReservation +func (c *EC2) GetGroupsForCapacityReservation(input *GetGroupsForCapacityReservationInput) (*GetGroupsForCapacityReservationOutput, error) { + req, out := c.GetGroupsForCapacityReservationRequest(input) + return out, req.Send() +} + +// GetGroupsForCapacityReservationWithContext is the same as GetGroupsForCapacityReservation with the addition of +// the ability to pass a context and additional request options. +// +// See GetGroupsForCapacityReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetGroupsForCapacityReservationWithContext(ctx aws.Context, input *GetGroupsForCapacityReservationInput, opts ...request.Option) (*GetGroupsForCapacityReservationOutput, error) { + req, out := c.GetGroupsForCapacityReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetGroupsForCapacityReservationPages iterates over the pages of a GetGroupsForCapacityReservation operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetGroupsForCapacityReservation method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetGroupsForCapacityReservation operation. +// pageNum := 0 +// err := client.GetGroupsForCapacityReservationPages(params, +// func(page *ec2.GetGroupsForCapacityReservationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetGroupsForCapacityReservationPages(input *GetGroupsForCapacityReservationInput, fn func(*GetGroupsForCapacityReservationOutput, bool) bool) error { + return c.GetGroupsForCapacityReservationPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetGroupsForCapacityReservationPagesWithContext same as GetGroupsForCapacityReservationPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetGroupsForCapacityReservationPagesWithContext(ctx aws.Context, input *GetGroupsForCapacityReservationInput, fn func(*GetGroupsForCapacityReservationOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetGroupsForCapacityReservationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetGroupsForCapacityReservationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetGroupsForCapacityReservationOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetHostReservationPurchasePreview = "GetHostReservationPurchasePreview" + +// GetHostReservationPurchasePreviewRequest generates a "aws/request.Request" representing the +// client's request for the GetHostReservationPurchasePreview operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetHostReservationPurchasePreview for more information on using the GetHostReservationPurchasePreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetHostReservationPurchasePreviewRequest method. +// req, resp := client.GetHostReservationPurchasePreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetHostReservationPurchasePreview +func (c *EC2) GetHostReservationPurchasePreviewRequest(input *GetHostReservationPurchasePreviewInput) (req *request.Request, output *GetHostReservationPurchasePreviewOutput) { + op := &request.Operation{ + Name: opGetHostReservationPurchasePreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetHostReservationPurchasePreviewInput{} + } + + output = &GetHostReservationPurchasePreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetHostReservationPurchasePreview API operation for Amazon Elastic Compute Cloud. +// +// Preview a reservation purchase with configurations that match those of your +// Dedicated Host. You must have active Dedicated Hosts in your account before +// you purchase a reservation. +// +// This is a preview of the PurchaseHostReservation action and does not result +// in the offering being purchased. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetHostReservationPurchasePreview for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetHostReservationPurchasePreview +func (c *EC2) GetHostReservationPurchasePreview(input *GetHostReservationPurchasePreviewInput) (*GetHostReservationPurchasePreviewOutput, error) { + req, out := c.GetHostReservationPurchasePreviewRequest(input) + return out, req.Send() +} + +// GetHostReservationPurchasePreviewWithContext is the same as GetHostReservationPurchasePreview with the addition of +// the ability to pass a context and additional request options. +// +// See GetHostReservationPurchasePreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetHostReservationPurchasePreviewWithContext(ctx aws.Context, input *GetHostReservationPurchasePreviewInput, opts ...request.Option) (*GetHostReservationPurchasePreviewOutput, error) { + req, out := c.GetHostReservationPurchasePreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLaunchTemplateData = "GetLaunchTemplateData" + +// GetLaunchTemplateDataRequest generates a "aws/request.Request" representing the +// client's request for the GetLaunchTemplateData operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLaunchTemplateData for more information on using the GetLaunchTemplateData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLaunchTemplateDataRequest method. +// req, resp := client.GetLaunchTemplateDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetLaunchTemplateData +func (c *EC2) GetLaunchTemplateDataRequest(input *GetLaunchTemplateDataInput) (req *request.Request, output *GetLaunchTemplateDataOutput) { + op := &request.Operation{ + Name: opGetLaunchTemplateData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLaunchTemplateDataInput{} + } + + output = &GetLaunchTemplateDataOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLaunchTemplateData API operation for Amazon Elastic Compute Cloud. +// +// Retrieves the configuration data of the specified instance. You can use this +// data to create a launch template. +// +// This action calls on other describe actions to get instance information. +// Depending on your instance configuration, you may need to allow the following +// actions in your IAM policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications, +// DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, +// you can allow describe* depending on your instance requirements. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetLaunchTemplateData for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetLaunchTemplateData +func (c *EC2) GetLaunchTemplateData(input *GetLaunchTemplateDataInput) (*GetLaunchTemplateDataOutput, error) { + req, out := c.GetLaunchTemplateDataRequest(input) + return out, req.Send() +} + +// GetLaunchTemplateDataWithContext is the same as GetLaunchTemplateData with the addition of +// the ability to pass a context and additional request options. +// +// See GetLaunchTemplateData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetLaunchTemplateDataWithContext(ctx aws.Context, input *GetLaunchTemplateDataInput, opts ...request.Option) (*GetLaunchTemplateDataOutput, error) { + req, out := c.GetLaunchTemplateDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetManagedPrefixListAssociations = "GetManagedPrefixListAssociations" + +// GetManagedPrefixListAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedPrefixListAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedPrefixListAssociations for more information on using the GetManagedPrefixListAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedPrefixListAssociationsRequest method. +// req, resp := client.GetManagedPrefixListAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListAssociations +func (c *EC2) GetManagedPrefixListAssociationsRequest(input *GetManagedPrefixListAssociationsInput) (req *request.Request, output *GetManagedPrefixListAssociationsOutput) { + op := &request.Operation{ + Name: opGetManagedPrefixListAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetManagedPrefixListAssociationsInput{} + } + + output = &GetManagedPrefixListAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedPrefixListAssociations API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the resources that are associated with the specified +// managed prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetManagedPrefixListAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListAssociations +func (c *EC2) GetManagedPrefixListAssociations(input *GetManagedPrefixListAssociationsInput) (*GetManagedPrefixListAssociationsOutput, error) { + req, out := c.GetManagedPrefixListAssociationsRequest(input) + return out, req.Send() +} + +// GetManagedPrefixListAssociationsWithContext is the same as GetManagedPrefixListAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedPrefixListAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListAssociationsWithContext(ctx aws.Context, input *GetManagedPrefixListAssociationsInput, opts ...request.Option) (*GetManagedPrefixListAssociationsOutput, error) { + req, out := c.GetManagedPrefixListAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetManagedPrefixListAssociationsPages iterates over the pages of a GetManagedPrefixListAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetManagedPrefixListAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetManagedPrefixListAssociations operation. +// pageNum := 0 +// err := client.GetManagedPrefixListAssociationsPages(params, +// func(page *ec2.GetManagedPrefixListAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetManagedPrefixListAssociationsPages(input *GetManagedPrefixListAssociationsInput, fn func(*GetManagedPrefixListAssociationsOutput, bool) bool) error { + return c.GetManagedPrefixListAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetManagedPrefixListAssociationsPagesWithContext same as GetManagedPrefixListAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListAssociationsPagesWithContext(ctx aws.Context, input *GetManagedPrefixListAssociationsInput, fn func(*GetManagedPrefixListAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetManagedPrefixListAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetManagedPrefixListAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetManagedPrefixListAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetManagedPrefixListEntries = "GetManagedPrefixListEntries" + +// GetManagedPrefixListEntriesRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedPrefixListEntries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedPrefixListEntries for more information on using the GetManagedPrefixListEntries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedPrefixListEntriesRequest method. +// req, resp := client.GetManagedPrefixListEntriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListEntries +func (c *EC2) GetManagedPrefixListEntriesRequest(input *GetManagedPrefixListEntriesInput) (req *request.Request, output *GetManagedPrefixListEntriesOutput) { + op := &request.Operation{ + Name: opGetManagedPrefixListEntries, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetManagedPrefixListEntriesInput{} + } + + output = &GetManagedPrefixListEntriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedPrefixListEntries API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the entries for a specified managed prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetManagedPrefixListEntries for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListEntries +func (c *EC2) GetManagedPrefixListEntries(input *GetManagedPrefixListEntriesInput) (*GetManagedPrefixListEntriesOutput, error) { + req, out := c.GetManagedPrefixListEntriesRequest(input) + return out, req.Send() +} + +// GetManagedPrefixListEntriesWithContext is the same as GetManagedPrefixListEntries with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedPrefixListEntries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListEntriesWithContext(ctx aws.Context, input *GetManagedPrefixListEntriesInput, opts ...request.Option) (*GetManagedPrefixListEntriesOutput, error) { + req, out := c.GetManagedPrefixListEntriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetManagedPrefixListEntriesPages iterates over the pages of a GetManagedPrefixListEntries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetManagedPrefixListEntries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetManagedPrefixListEntries operation. +// pageNum := 0 +// err := client.GetManagedPrefixListEntriesPages(params, +// func(page *ec2.GetManagedPrefixListEntriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetManagedPrefixListEntriesPages(input *GetManagedPrefixListEntriesInput, fn func(*GetManagedPrefixListEntriesOutput, bool) bool) error { + return c.GetManagedPrefixListEntriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetManagedPrefixListEntriesPagesWithContext same as GetManagedPrefixListEntriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListEntriesPagesWithContext(ctx aws.Context, input *GetManagedPrefixListEntriesInput, fn func(*GetManagedPrefixListEntriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetManagedPrefixListEntriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetManagedPrefixListEntriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetManagedPrefixListEntriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetPasswordData = "GetPasswordData" + +// GetPasswordDataRequest generates a "aws/request.Request" representing the +// client's request for the GetPasswordData operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPasswordData for more information on using the GetPasswordData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPasswordDataRequest method. +// req, resp := client.GetPasswordDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetPasswordData +func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request.Request, output *GetPasswordDataOutput) { + op := &request.Operation{ + Name: opGetPasswordData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPasswordDataInput{} + } + + output = &GetPasswordDataOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPasswordData API operation for Amazon Elastic Compute Cloud. +// +// Retrieves the encrypted administrator password for a running Windows instance. +// +// The Windows password is generated at boot by the EC2Config service or EC2Launch +// scripts (Windows Server 2016 and later). This usually only happens the first +// time an instance is launched. For more information, see EC2Config (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/UsingConfig_WinAMI.html) +// and EC2Launch (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2launch.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For the EC2Config service, the password is not generated for rebundled AMIs +// unless Ec2SetPassword is enabled before bundling. +// +// The password is encrypted using the key pair that you specified when you +// launched the instance. You must provide the corresponding key pair file. +// +// When you launch an instance, password generation and encryption may take +// a few minutes. If you try to retrieve the password before it's available, +// the output returns an empty string. We recommend that you wait up to 15 minutes +// after launching an instance before trying to retrieve the generated password. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetPasswordData for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetPasswordData +func (c *EC2) GetPasswordData(input *GetPasswordDataInput) (*GetPasswordDataOutput, error) { + req, out := c.GetPasswordDataRequest(input) + return out, req.Send() +} + +// GetPasswordDataWithContext is the same as GetPasswordData with the addition of +// the ability to pass a context and additional request options. +// +// See GetPasswordData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetPasswordDataWithContext(ctx aws.Context, input *GetPasswordDataInput, opts ...request.Option) (*GetPasswordDataOutput, error) { + req, out := c.GetPasswordDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetReservedInstancesExchangeQuote = "GetReservedInstancesExchangeQuote" + +// GetReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the +// client's request for the GetReservedInstancesExchangeQuote operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetReservedInstancesExchangeQuote for more information on using the GetReservedInstancesExchangeQuote +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetReservedInstancesExchangeQuoteRequest method. +// req, resp := client.GetReservedInstancesExchangeQuoteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetReservedInstancesExchangeQuote +func (c *EC2) GetReservedInstancesExchangeQuoteRequest(input *GetReservedInstancesExchangeQuoteInput) (req *request.Request, output *GetReservedInstancesExchangeQuoteOutput) { + op := &request.Operation{ + Name: opGetReservedInstancesExchangeQuote, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetReservedInstancesExchangeQuoteInput{} + } + + output = &GetReservedInstancesExchangeQuoteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetReservedInstancesExchangeQuote API operation for Amazon Elastic Compute Cloud. +// +// Returns a quote and exchange information for exchanging one or more specified +// Convertible Reserved Instances for a new Convertible Reserved Instance. If +// the exchange cannot be performed, the reason is returned in the response. +// Use AcceptReservedInstancesExchangeQuote to perform the exchange. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetReservedInstancesExchangeQuote for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetReservedInstancesExchangeQuote +func (c *EC2) GetReservedInstancesExchangeQuote(input *GetReservedInstancesExchangeQuoteInput) (*GetReservedInstancesExchangeQuoteOutput, error) { + req, out := c.GetReservedInstancesExchangeQuoteRequest(input) + return out, req.Send() +} + +// GetReservedInstancesExchangeQuoteWithContext is the same as GetReservedInstancesExchangeQuote with the addition of +// the ability to pass a context and additional request options. +// +// See GetReservedInstancesExchangeQuote for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetReservedInstancesExchangeQuoteWithContext(ctx aws.Context, input *GetReservedInstancesExchangeQuoteInput, opts ...request.Option) (*GetReservedInstancesExchangeQuoteOutput, error) { + req, out := c.GetReservedInstancesExchangeQuoteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetTransitGatewayAttachmentPropagations = "GetTransitGatewayAttachmentPropagations" + +// GetTransitGatewayAttachmentPropagationsRequest generates a "aws/request.Request" representing the +// client's request for the GetTransitGatewayAttachmentPropagations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTransitGatewayAttachmentPropagations for more information on using the GetTransitGatewayAttachmentPropagations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTransitGatewayAttachmentPropagationsRequest method. +// req, resp := client.GetTransitGatewayAttachmentPropagationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayAttachmentPropagations +func (c *EC2) GetTransitGatewayAttachmentPropagationsRequest(input *GetTransitGatewayAttachmentPropagationsInput) (req *request.Request, output *GetTransitGatewayAttachmentPropagationsOutput) { + op := &request.Operation{ + Name: opGetTransitGatewayAttachmentPropagations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetTransitGatewayAttachmentPropagationsInput{} + } + + output = &GetTransitGatewayAttachmentPropagationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTransitGatewayAttachmentPropagations API operation for Amazon Elastic Compute Cloud. +// +// Lists the route tables to which the specified resource attachment propagates +// routes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetTransitGatewayAttachmentPropagations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayAttachmentPropagations +func (c *EC2) GetTransitGatewayAttachmentPropagations(input *GetTransitGatewayAttachmentPropagationsInput) (*GetTransitGatewayAttachmentPropagationsOutput, error) { + req, out := c.GetTransitGatewayAttachmentPropagationsRequest(input) + return out, req.Send() +} + +// GetTransitGatewayAttachmentPropagationsWithContext is the same as GetTransitGatewayAttachmentPropagations with the addition of +// the ability to pass a context and additional request options. +// +// See GetTransitGatewayAttachmentPropagations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayAttachmentPropagationsWithContext(ctx aws.Context, input *GetTransitGatewayAttachmentPropagationsInput, opts ...request.Option) (*GetTransitGatewayAttachmentPropagationsOutput, error) { + req, out := c.GetTransitGatewayAttachmentPropagationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetTransitGatewayAttachmentPropagationsPages iterates over the pages of a GetTransitGatewayAttachmentPropagations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetTransitGatewayAttachmentPropagations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetTransitGatewayAttachmentPropagations operation. +// pageNum := 0 +// err := client.GetTransitGatewayAttachmentPropagationsPages(params, +// func(page *ec2.GetTransitGatewayAttachmentPropagationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetTransitGatewayAttachmentPropagationsPages(input *GetTransitGatewayAttachmentPropagationsInput, fn func(*GetTransitGatewayAttachmentPropagationsOutput, bool) bool) error { + return c.GetTransitGatewayAttachmentPropagationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetTransitGatewayAttachmentPropagationsPagesWithContext same as GetTransitGatewayAttachmentPropagationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayAttachmentPropagationsPagesWithContext(ctx aws.Context, input *GetTransitGatewayAttachmentPropagationsInput, fn func(*GetTransitGatewayAttachmentPropagationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetTransitGatewayAttachmentPropagationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetTransitGatewayAttachmentPropagationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayAttachmentPropagationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetTransitGatewayMulticastDomainAssociations = "GetTransitGatewayMulticastDomainAssociations" + +// GetTransitGatewayMulticastDomainAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the GetTransitGatewayMulticastDomainAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTransitGatewayMulticastDomainAssociations for more information on using the GetTransitGatewayMulticastDomainAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTransitGatewayMulticastDomainAssociationsRequest method. +// req, resp := client.GetTransitGatewayMulticastDomainAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayMulticastDomainAssociations +func (c *EC2) GetTransitGatewayMulticastDomainAssociationsRequest(input *GetTransitGatewayMulticastDomainAssociationsInput) (req *request.Request, output *GetTransitGatewayMulticastDomainAssociationsOutput) { + op := &request.Operation{ + Name: opGetTransitGatewayMulticastDomainAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetTransitGatewayMulticastDomainAssociationsInput{} + } + + output = &GetTransitGatewayMulticastDomainAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTransitGatewayMulticastDomainAssociations API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the associations for the transit gateway multicast +// domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetTransitGatewayMulticastDomainAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayMulticastDomainAssociations +func (c *EC2) GetTransitGatewayMulticastDomainAssociations(input *GetTransitGatewayMulticastDomainAssociationsInput) (*GetTransitGatewayMulticastDomainAssociationsOutput, error) { + req, out := c.GetTransitGatewayMulticastDomainAssociationsRequest(input) + return out, req.Send() +} + +// GetTransitGatewayMulticastDomainAssociationsWithContext is the same as GetTransitGatewayMulticastDomainAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See GetTransitGatewayMulticastDomainAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayMulticastDomainAssociationsWithContext(ctx aws.Context, input *GetTransitGatewayMulticastDomainAssociationsInput, opts ...request.Option) (*GetTransitGatewayMulticastDomainAssociationsOutput, error) { + req, out := c.GetTransitGatewayMulticastDomainAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetTransitGatewayMulticastDomainAssociationsPages iterates over the pages of a GetTransitGatewayMulticastDomainAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetTransitGatewayMulticastDomainAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetTransitGatewayMulticastDomainAssociations operation. +// pageNum := 0 +// err := client.GetTransitGatewayMulticastDomainAssociationsPages(params, +// func(page *ec2.GetTransitGatewayMulticastDomainAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetTransitGatewayMulticastDomainAssociationsPages(input *GetTransitGatewayMulticastDomainAssociationsInput, fn func(*GetTransitGatewayMulticastDomainAssociationsOutput, bool) bool) error { + return c.GetTransitGatewayMulticastDomainAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetTransitGatewayMulticastDomainAssociationsPagesWithContext same as GetTransitGatewayMulticastDomainAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayMulticastDomainAssociationsPagesWithContext(ctx aws.Context, input *GetTransitGatewayMulticastDomainAssociationsInput, fn func(*GetTransitGatewayMulticastDomainAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetTransitGatewayMulticastDomainAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetTransitGatewayMulticastDomainAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayMulticastDomainAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetTransitGatewayPrefixListReferences = "GetTransitGatewayPrefixListReferences" + +// GetTransitGatewayPrefixListReferencesRequest generates a "aws/request.Request" representing the +// client's request for the GetTransitGatewayPrefixListReferences operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTransitGatewayPrefixListReferences for more information on using the GetTransitGatewayPrefixListReferences +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTransitGatewayPrefixListReferencesRequest method. +// req, resp := client.GetTransitGatewayPrefixListReferencesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayPrefixListReferences +func (c *EC2) GetTransitGatewayPrefixListReferencesRequest(input *GetTransitGatewayPrefixListReferencesInput) (req *request.Request, output *GetTransitGatewayPrefixListReferencesOutput) { + op := &request.Operation{ + Name: opGetTransitGatewayPrefixListReferences, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetTransitGatewayPrefixListReferencesInput{} + } + + output = &GetTransitGatewayPrefixListReferencesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTransitGatewayPrefixListReferences API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the prefix list references in a specified transit +// gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetTransitGatewayPrefixListReferences for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayPrefixListReferences +func (c *EC2) GetTransitGatewayPrefixListReferences(input *GetTransitGatewayPrefixListReferencesInput) (*GetTransitGatewayPrefixListReferencesOutput, error) { + req, out := c.GetTransitGatewayPrefixListReferencesRequest(input) + return out, req.Send() +} + +// GetTransitGatewayPrefixListReferencesWithContext is the same as GetTransitGatewayPrefixListReferences with the addition of +// the ability to pass a context and additional request options. +// +// See GetTransitGatewayPrefixListReferences for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayPrefixListReferencesWithContext(ctx aws.Context, input *GetTransitGatewayPrefixListReferencesInput, opts ...request.Option) (*GetTransitGatewayPrefixListReferencesOutput, error) { + req, out := c.GetTransitGatewayPrefixListReferencesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetTransitGatewayPrefixListReferencesPages iterates over the pages of a GetTransitGatewayPrefixListReferences operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetTransitGatewayPrefixListReferences method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetTransitGatewayPrefixListReferences operation. +// pageNum := 0 +// err := client.GetTransitGatewayPrefixListReferencesPages(params, +// func(page *ec2.GetTransitGatewayPrefixListReferencesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetTransitGatewayPrefixListReferencesPages(input *GetTransitGatewayPrefixListReferencesInput, fn func(*GetTransitGatewayPrefixListReferencesOutput, bool) bool) error { + return c.GetTransitGatewayPrefixListReferencesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetTransitGatewayPrefixListReferencesPagesWithContext same as GetTransitGatewayPrefixListReferencesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayPrefixListReferencesPagesWithContext(ctx aws.Context, input *GetTransitGatewayPrefixListReferencesInput, fn func(*GetTransitGatewayPrefixListReferencesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetTransitGatewayPrefixListReferencesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetTransitGatewayPrefixListReferencesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayPrefixListReferencesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetTransitGatewayRouteTableAssociations = "GetTransitGatewayRouteTableAssociations" + +// GetTransitGatewayRouteTableAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the GetTransitGatewayRouteTableAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTransitGatewayRouteTableAssociations for more information on using the GetTransitGatewayRouteTableAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTransitGatewayRouteTableAssociationsRequest method. +// req, resp := client.GetTransitGatewayRouteTableAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayRouteTableAssociations +func (c *EC2) GetTransitGatewayRouteTableAssociationsRequest(input *GetTransitGatewayRouteTableAssociationsInput) (req *request.Request, output *GetTransitGatewayRouteTableAssociationsOutput) { + op := &request.Operation{ + Name: opGetTransitGatewayRouteTableAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetTransitGatewayRouteTableAssociationsInput{} + } + + output = &GetTransitGatewayRouteTableAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTransitGatewayRouteTableAssociations API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the associations for the specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetTransitGatewayRouteTableAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayRouteTableAssociations +func (c *EC2) GetTransitGatewayRouteTableAssociations(input *GetTransitGatewayRouteTableAssociationsInput) (*GetTransitGatewayRouteTableAssociationsOutput, error) { + req, out := c.GetTransitGatewayRouteTableAssociationsRequest(input) + return out, req.Send() +} + +// GetTransitGatewayRouteTableAssociationsWithContext is the same as GetTransitGatewayRouteTableAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See GetTransitGatewayRouteTableAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayRouteTableAssociationsWithContext(ctx aws.Context, input *GetTransitGatewayRouteTableAssociationsInput, opts ...request.Option) (*GetTransitGatewayRouteTableAssociationsOutput, error) { + req, out := c.GetTransitGatewayRouteTableAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetTransitGatewayRouteTableAssociationsPages iterates over the pages of a GetTransitGatewayRouteTableAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetTransitGatewayRouteTableAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetTransitGatewayRouteTableAssociations operation. +// pageNum := 0 +// err := client.GetTransitGatewayRouteTableAssociationsPages(params, +// func(page *ec2.GetTransitGatewayRouteTableAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetTransitGatewayRouteTableAssociationsPages(input *GetTransitGatewayRouteTableAssociationsInput, fn func(*GetTransitGatewayRouteTableAssociationsOutput, bool) bool) error { + return c.GetTransitGatewayRouteTableAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetTransitGatewayRouteTableAssociationsPagesWithContext same as GetTransitGatewayRouteTableAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayRouteTableAssociationsPagesWithContext(ctx aws.Context, input *GetTransitGatewayRouteTableAssociationsInput, fn func(*GetTransitGatewayRouteTableAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetTransitGatewayRouteTableAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetTransitGatewayRouteTableAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayRouteTableAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetTransitGatewayRouteTablePropagations = "GetTransitGatewayRouteTablePropagations" + +// GetTransitGatewayRouteTablePropagationsRequest generates a "aws/request.Request" representing the +// client's request for the GetTransitGatewayRouteTablePropagations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTransitGatewayRouteTablePropagations for more information on using the GetTransitGatewayRouteTablePropagations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTransitGatewayRouteTablePropagationsRequest method. +// req, resp := client.GetTransitGatewayRouteTablePropagationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayRouteTablePropagations +func (c *EC2) GetTransitGatewayRouteTablePropagationsRequest(input *GetTransitGatewayRouteTablePropagationsInput) (req *request.Request, output *GetTransitGatewayRouteTablePropagationsOutput) { + op := &request.Operation{ + Name: opGetTransitGatewayRouteTablePropagations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetTransitGatewayRouteTablePropagationsInput{} + } + + output = &GetTransitGatewayRouteTablePropagationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTransitGatewayRouteTablePropagations API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the route table propagations for the specified transit +// gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetTransitGatewayRouteTablePropagations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayRouteTablePropagations +func (c *EC2) GetTransitGatewayRouteTablePropagations(input *GetTransitGatewayRouteTablePropagationsInput) (*GetTransitGatewayRouteTablePropagationsOutput, error) { + req, out := c.GetTransitGatewayRouteTablePropagationsRequest(input) + return out, req.Send() +} + +// GetTransitGatewayRouteTablePropagationsWithContext is the same as GetTransitGatewayRouteTablePropagations with the addition of +// the ability to pass a context and additional request options. +// +// See GetTransitGatewayRouteTablePropagations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayRouteTablePropagationsWithContext(ctx aws.Context, input *GetTransitGatewayRouteTablePropagationsInput, opts ...request.Option) (*GetTransitGatewayRouteTablePropagationsOutput, error) { + req, out := c.GetTransitGatewayRouteTablePropagationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetTransitGatewayRouteTablePropagationsPages iterates over the pages of a GetTransitGatewayRouteTablePropagations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetTransitGatewayRouteTablePropagations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetTransitGatewayRouteTablePropagations operation. +// pageNum := 0 +// err := client.GetTransitGatewayRouteTablePropagationsPages(params, +// func(page *ec2.GetTransitGatewayRouteTablePropagationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetTransitGatewayRouteTablePropagationsPages(input *GetTransitGatewayRouteTablePropagationsInput, fn func(*GetTransitGatewayRouteTablePropagationsOutput, bool) bool) error { + return c.GetTransitGatewayRouteTablePropagationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetTransitGatewayRouteTablePropagationsPagesWithContext same as GetTransitGatewayRouteTablePropagationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayRouteTablePropagationsPagesWithContext(ctx aws.Context, input *GetTransitGatewayRouteTablePropagationsInput, fn func(*GetTransitGatewayRouteTablePropagationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetTransitGatewayRouteTablePropagationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetTransitGatewayRouteTablePropagationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayRouteTablePropagationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opImportClientVpnClientCertificateRevocationList = "ImportClientVpnClientCertificateRevocationList" + +// ImportClientVpnClientCertificateRevocationListRequest generates a "aws/request.Request" representing the +// client's request for the ImportClientVpnClientCertificateRevocationList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportClientVpnClientCertificateRevocationList for more information on using the ImportClientVpnClientCertificateRevocationList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportClientVpnClientCertificateRevocationListRequest method. +// req, resp := client.ImportClientVpnClientCertificateRevocationListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportClientVpnClientCertificateRevocationList +func (c *EC2) ImportClientVpnClientCertificateRevocationListRequest(input *ImportClientVpnClientCertificateRevocationListInput) (req *request.Request, output *ImportClientVpnClientCertificateRevocationListOutput) { + op := &request.Operation{ + Name: opImportClientVpnClientCertificateRevocationList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportClientVpnClientCertificateRevocationListInput{} + } + + output = &ImportClientVpnClientCertificateRevocationListOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportClientVpnClientCertificateRevocationList API operation for Amazon Elastic Compute Cloud. +// +// Uploads a client certificate revocation list to the specified Client VPN +// endpoint. Uploading a client certificate revocation list overwrites the existing +// client certificate revocation list. +// +// Uploading a client certificate revocation list resets existing client connections. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ImportClientVpnClientCertificateRevocationList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportClientVpnClientCertificateRevocationList +func (c *EC2) ImportClientVpnClientCertificateRevocationList(input *ImportClientVpnClientCertificateRevocationListInput) (*ImportClientVpnClientCertificateRevocationListOutput, error) { + req, out := c.ImportClientVpnClientCertificateRevocationListRequest(input) + return out, req.Send() +} + +// ImportClientVpnClientCertificateRevocationListWithContext is the same as ImportClientVpnClientCertificateRevocationList with the addition of +// the ability to pass a context and additional request options. +// +// See ImportClientVpnClientCertificateRevocationList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportClientVpnClientCertificateRevocationListWithContext(ctx aws.Context, input *ImportClientVpnClientCertificateRevocationListInput, opts ...request.Option) (*ImportClientVpnClientCertificateRevocationListOutput, error) { + req, out := c.ImportClientVpnClientCertificateRevocationListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opImportImage = "ImportImage" + +// ImportImageRequest generates a "aws/request.Request" representing the +// client's request for the ImportImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportImage for more information on using the ImportImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportImageRequest method. +// req, resp := client.ImportImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportImage +func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, output *ImportImageOutput) { + op := &request.Operation{ + Name: opImportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportImageInput{} + } + + output = &ImportImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportImage API operation for Amazon Elastic Compute Cloud. +// +// Import single or multi-volume disk images or EBS snapshots into an Amazon +// Machine Image (AMI). For more information, see Importing a VM as an Image +// Using VM Import/Export (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html) +// in the VM Import/Export User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ImportImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportImage +func (c *EC2) ImportImage(input *ImportImageInput) (*ImportImageOutput, error) { + req, out := c.ImportImageRequest(input) + return out, req.Send() +} + +// ImportImageWithContext is the same as ImportImage with the addition of +// the ability to pass a context and additional request options. +// +// See ImportImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportImageWithContext(ctx aws.Context, input *ImportImageInput, opts ...request.Option) (*ImportImageOutput, error) { + req, out := c.ImportImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opImportInstance = "ImportInstance" + +// ImportInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ImportInstance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportInstance for more information on using the ImportInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportInstanceRequest method. +// req, resp := client.ImportInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportInstance +func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Request, output *ImportInstanceOutput) { + op := &request.Operation{ + Name: opImportInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportInstanceInput{} + } + + output = &ImportInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportInstance API operation for Amazon Elastic Compute Cloud. +// +// Creates an import instance task using metadata from the specified disk image. +// ImportInstance only supports single-volume VMs. To import multi-volume VMs, +// use ImportImage. For more information, see Importing a Virtual Machine Using +// the Amazon EC2 CLI (https://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ec2-cli-vmimport-export.html). +// +// For information about the import manifest referenced by this API action, +// see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ImportInstance for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportInstance +func (c *EC2) ImportInstance(input *ImportInstanceInput) (*ImportInstanceOutput, error) { + req, out := c.ImportInstanceRequest(input) + return out, req.Send() +} + +// ImportInstanceWithContext is the same as ImportInstance with the addition of +// the ability to pass a context and additional request options. +// +// See ImportInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportInstanceWithContext(ctx aws.Context, input *ImportInstanceInput, opts ...request.Option) (*ImportInstanceOutput, error) { + req, out := c.ImportInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opImportKeyPair = "ImportKeyPair" + +// ImportKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the ImportKeyPair operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportKeyPair for more information on using the ImportKeyPair +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportKeyPairRequest method. +// req, resp := client.ImportKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportKeyPair +func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) { + op := &request.Operation{ + Name: opImportKeyPair, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportKeyPairInput{} + } + + output = &ImportKeyPairOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportKeyPair API operation for Amazon Elastic Compute Cloud. +// +// Imports the public key from an RSA key pair that you created with a third-party +// tool. Compare this with CreateKeyPair, in which AWS creates the key pair +// and gives the keys to you (AWS keeps a copy of the public key). With ImportKeyPair, +// you create the key pair and give AWS just the public key. The private key +// is never transferred between you and AWS. +// +// For more information about key pairs, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ImportKeyPair for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportKeyPair +func (c *EC2) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) + return out, req.Send() +} + +// ImportKeyPairWithContext is the same as ImportKeyPair with the addition of +// the ability to pass a context and additional request options. +// +// See ImportKeyPair for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportKeyPairWithContext(ctx aws.Context, input *ImportKeyPairInput, opts ...request.Option) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opImportSnapshot = "ImportSnapshot" + +// ImportSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the ImportSnapshot operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportSnapshot for more information on using the ImportSnapshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportSnapshotRequest method. +// req, resp := client.ImportSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportSnapshot +func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Request, output *ImportSnapshotOutput) { + op := &request.Operation{ + Name: opImportSnapshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportSnapshotInput{} + } + + output = &ImportSnapshotOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportSnapshot API operation for Amazon Elastic Compute Cloud. +// +// Imports a disk into an EBS snapshot. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ImportSnapshot for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportSnapshot +func (c *EC2) ImportSnapshot(input *ImportSnapshotInput) (*ImportSnapshotOutput, error) { + req, out := c.ImportSnapshotRequest(input) + return out, req.Send() +} + +// ImportSnapshotWithContext is the same as ImportSnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See ImportSnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportSnapshotWithContext(ctx aws.Context, input *ImportSnapshotInput, opts ...request.Option) (*ImportSnapshotOutput, error) { + req, out := c.ImportSnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opImportVolume = "ImportVolume" + +// ImportVolumeRequest generates a "aws/request.Request" representing the +// client's request for the ImportVolume operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportVolume for more information on using the ImportVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportVolumeRequest method. +// req, resp := client.ImportVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportVolume +func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Request, output *ImportVolumeOutput) { + op := &request.Operation{ + Name: opImportVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportVolumeInput{} + } + + output = &ImportVolumeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportVolume API operation for Amazon Elastic Compute Cloud. +// +// Creates an import volume task using metadata from the specified disk image.For +// more information, see Importing Disks to Amazon EBS (https://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/importing-your-volumes-into-amazon-ebs.html). +// +// For information about the import manifest referenced by this API action, +// see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ImportVolume for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportVolume +func (c *EC2) ImportVolume(input *ImportVolumeInput) (*ImportVolumeOutput, error) { + req, out := c.ImportVolumeRequest(input) + return out, req.Send() +} + +// ImportVolumeWithContext is the same as ImportVolume with the addition of +// the ability to pass a context and additional request options. +// +// See ImportVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportVolumeWithContext(ctx aws.Context, input *ImportVolumeInput, opts ...request.Option) (*ImportVolumeOutput, error) { + req, out := c.ImportVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyAvailabilityZoneGroup = "ModifyAvailabilityZoneGroup" + +// ModifyAvailabilityZoneGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyAvailabilityZoneGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyAvailabilityZoneGroup for more information on using the ModifyAvailabilityZoneGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyAvailabilityZoneGroupRequest method. +// req, resp := client.ModifyAvailabilityZoneGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyAvailabilityZoneGroup +func (c *EC2) ModifyAvailabilityZoneGroupRequest(input *ModifyAvailabilityZoneGroupInput) (req *request.Request, output *ModifyAvailabilityZoneGroupOutput) { + op := &request.Operation{ + Name: opModifyAvailabilityZoneGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyAvailabilityZoneGroupInput{} + } + + output = &ModifyAvailabilityZoneGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyAvailabilityZoneGroup API operation for Amazon Elastic Compute Cloud. +// +// Changes the opt-in status of the Local Zone and Wavelength Zone group for +// your account. +// +// Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) +// to view the value for GroupName. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyAvailabilityZoneGroup for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyAvailabilityZoneGroup +func (c *EC2) ModifyAvailabilityZoneGroup(input *ModifyAvailabilityZoneGroupInput) (*ModifyAvailabilityZoneGroupOutput, error) { + req, out := c.ModifyAvailabilityZoneGroupRequest(input) + return out, req.Send() +} + +// ModifyAvailabilityZoneGroupWithContext is the same as ModifyAvailabilityZoneGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyAvailabilityZoneGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyAvailabilityZoneGroupWithContext(ctx aws.Context, input *ModifyAvailabilityZoneGroupInput, opts ...request.Option) (*ModifyAvailabilityZoneGroupOutput, error) { + req, out := c.ModifyAvailabilityZoneGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyCapacityReservation = "ModifyCapacityReservation" + +// ModifyCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCapacityReservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyCapacityReservation for more information on using the ModifyCapacityReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyCapacityReservationRequest method. +// req, resp := client.ModifyCapacityReservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyCapacityReservation +func (c *EC2) ModifyCapacityReservationRequest(input *ModifyCapacityReservationInput) (req *request.Request, output *ModifyCapacityReservationOutput) { + op := &request.Operation{ + Name: opModifyCapacityReservation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCapacityReservationInput{} + } + + output = &ModifyCapacityReservationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyCapacityReservation API operation for Amazon Elastic Compute Cloud. +// +// Modifies a Capacity Reservation's capacity and the conditions under which +// it is to be released. You cannot change a Capacity Reservation's instance +// type, EBS optimization, instance store settings, platform, Availability Zone, +// or instance eligibility. If you need to modify any of these attributes, we +// recommend that you cancel the Capacity Reservation, and then create a new +// one with the required attributes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyCapacityReservation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyCapacityReservation +func (c *EC2) ModifyCapacityReservation(input *ModifyCapacityReservationInput) (*ModifyCapacityReservationOutput, error) { + req, out := c.ModifyCapacityReservationRequest(input) + return out, req.Send() +} + +// ModifyCapacityReservationWithContext is the same as ModifyCapacityReservation with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyCapacityReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyCapacityReservationWithContext(ctx aws.Context, input *ModifyCapacityReservationInput, opts ...request.Option) (*ModifyCapacityReservationOutput, error) { + req, out := c.ModifyCapacityReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyClientVpnEndpoint = "ModifyClientVpnEndpoint" + +// ModifyClientVpnEndpointRequest generates a "aws/request.Request" representing the +// client's request for the ModifyClientVpnEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyClientVpnEndpoint for more information on using the ModifyClientVpnEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyClientVpnEndpointRequest method. +// req, resp := client.ModifyClientVpnEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyClientVpnEndpoint +func (c *EC2) ModifyClientVpnEndpointRequest(input *ModifyClientVpnEndpointInput) (req *request.Request, output *ModifyClientVpnEndpointOutput) { + op := &request.Operation{ + Name: opModifyClientVpnEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyClientVpnEndpointInput{} + } + + output = &ModifyClientVpnEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyClientVpnEndpoint API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified Client VPN endpoint. Modifying the DNS server resets +// existing client connections. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyClientVpnEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyClientVpnEndpoint +func (c *EC2) ModifyClientVpnEndpoint(input *ModifyClientVpnEndpointInput) (*ModifyClientVpnEndpointOutput, error) { + req, out := c.ModifyClientVpnEndpointRequest(input) + return out, req.Send() +} + +// ModifyClientVpnEndpointWithContext is the same as ModifyClientVpnEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyClientVpnEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyClientVpnEndpointWithContext(ctx aws.Context, input *ModifyClientVpnEndpointInput, opts ...request.Option) (*ModifyClientVpnEndpointOutput, error) { + req, out := c.ModifyClientVpnEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyDefaultCreditSpecification = "ModifyDefaultCreditSpecification" + +// ModifyDefaultCreditSpecificationRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDefaultCreditSpecification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyDefaultCreditSpecification for more information on using the ModifyDefaultCreditSpecification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyDefaultCreditSpecificationRequest method. +// req, resp := client.ModifyDefaultCreditSpecificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyDefaultCreditSpecification +func (c *EC2) ModifyDefaultCreditSpecificationRequest(input *ModifyDefaultCreditSpecificationInput) (req *request.Request, output *ModifyDefaultCreditSpecificationOutput) { + op := &request.Operation{ + Name: opModifyDefaultCreditSpecification, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDefaultCreditSpecificationInput{} + } + + output = &ModifyDefaultCreditSpecificationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyDefaultCreditSpecification API operation for Amazon Elastic Compute Cloud. +// +// Modifies the default credit option for CPU usage of burstable performance +// instances. The default credit option is set at the account level per AWS +// Region, and is specified per instance family. All new burstable performance +// instances in the account launch using the default credit option. +// +// ModifyDefaultCreditSpecification is an asynchronous operation, which works +// at an AWS Region level and modifies the credit option for each Availability +// Zone. All zones in a Region are updated within five minutes. But if instances +// are launched during this operation, they might not get the new credit option +// until the zone is updated. To verify whether the update has occurred, you +// can call GetDefaultCreditSpecification and check DefaultCreditSpecification +// for updates. +// +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyDefaultCreditSpecification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyDefaultCreditSpecification +func (c *EC2) ModifyDefaultCreditSpecification(input *ModifyDefaultCreditSpecificationInput) (*ModifyDefaultCreditSpecificationOutput, error) { + req, out := c.ModifyDefaultCreditSpecificationRequest(input) + return out, req.Send() +} + +// ModifyDefaultCreditSpecificationWithContext is the same as ModifyDefaultCreditSpecification with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyDefaultCreditSpecification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyDefaultCreditSpecificationWithContext(ctx aws.Context, input *ModifyDefaultCreditSpecificationInput, opts ...request.Option) (*ModifyDefaultCreditSpecificationOutput, error) { + req, out := c.ModifyDefaultCreditSpecificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyEbsDefaultKmsKeyId = "ModifyEbsDefaultKmsKeyId" + +// ModifyEbsDefaultKmsKeyIdRequest generates a "aws/request.Request" representing the +// client's request for the ModifyEbsDefaultKmsKeyId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyEbsDefaultKmsKeyId for more information on using the ModifyEbsDefaultKmsKeyId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyEbsDefaultKmsKeyIdRequest method. +// req, resp := client.ModifyEbsDefaultKmsKeyIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyEbsDefaultKmsKeyId +func (c *EC2) ModifyEbsDefaultKmsKeyIdRequest(input *ModifyEbsDefaultKmsKeyIdInput) (req *request.Request, output *ModifyEbsDefaultKmsKeyIdOutput) { + op := &request.Operation{ + Name: opModifyEbsDefaultKmsKeyId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyEbsDefaultKmsKeyIdInput{} + } + + output = &ModifyEbsDefaultKmsKeyIdOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud. +// +// Changes the default customer master key (CMK) for EBS encryption by default +// for your account in this Region. +// +// AWS creates a unique AWS managed CMK in each Region for use with encryption +// by default. If you change the default CMK to a symmetric customer managed +// CMK, it is used instead of the AWS managed CMK. To reset the default CMK +// to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does +// not support asymmetric CMKs. +// +// If you delete or disable the customer managed CMK that you specified for +// use with encryption by default, your instances will fail to launch. +// +// For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyEbsDefaultKmsKeyId for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyEbsDefaultKmsKeyId +func (c *EC2) ModifyEbsDefaultKmsKeyId(input *ModifyEbsDefaultKmsKeyIdInput) (*ModifyEbsDefaultKmsKeyIdOutput, error) { + req, out := c.ModifyEbsDefaultKmsKeyIdRequest(input) + return out, req.Send() +} + +// ModifyEbsDefaultKmsKeyIdWithContext is the same as ModifyEbsDefaultKmsKeyId with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyEbsDefaultKmsKeyId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyEbsDefaultKmsKeyIdWithContext(ctx aws.Context, input *ModifyEbsDefaultKmsKeyIdInput, opts ...request.Option) (*ModifyEbsDefaultKmsKeyIdOutput, error) { + req, out := c.ModifyEbsDefaultKmsKeyIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyFleet = "ModifyFleet" + +// ModifyFleetRequest generates a "aws/request.Request" representing the +// client's request for the ModifyFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyFleet for more information on using the ModifyFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyFleetRequest method. +// req, resp := client.ModifyFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFleet +func (c *EC2) ModifyFleetRequest(input *ModifyFleetInput) (req *request.Request, output *ModifyFleetOutput) { + op := &request.Operation{ + Name: opModifyFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyFleetInput{} + } + + output = &ModifyFleetOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyFleet API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified EC2 Fleet. +// +// You can only modify an EC2 Fleet request of type maintain. +// +// While the EC2 Fleet is being modified, it is in the modifying state. +// +// To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches +// the additional Spot Instances according to the allocation strategy for the +// EC2 Fleet request. If the allocation strategy is lowest-price, the EC2 Fleet +// launches instances using the Spot Instance pool with the lowest price. If +// the allocation strategy is diversified, the EC2 Fleet distributes the instances +// across the Spot Instance pools. If the allocation strategy is capacity-optimized, +// EC2 Fleet launches instances from Spot Instance pools with optimal capacity +// for the number of instances that are launching. +// +// To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 +// Fleet cancels any open requests that exceed the new target capacity. You +// can request that the EC2 Fleet terminate Spot Instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowest-price, the EC2 Fleet terminates the instances with the highest +// price per unit. If the allocation strategy is capacity-optimized, the EC2 +// Fleet terminates the instances in the Spot Instance pools that have the least +// available Spot Instance capacity. If the allocation strategy is diversified, +// the EC2 Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the EC2 Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. +// +// If you are finished with your EC2 Fleet for now, but will use it again later, +// you can set the target capacity to 0. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyFleet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFleet +func (c *EC2) ModifyFleet(input *ModifyFleetInput) (*ModifyFleetOutput, error) { + req, out := c.ModifyFleetRequest(input) + return out, req.Send() +} + +// ModifyFleetWithContext is the same as ModifyFleet with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyFleetWithContext(ctx aws.Context, input *ModifyFleetInput, opts ...request.Option) (*ModifyFleetOutput, error) { + req, out := c.ModifyFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyFpgaImageAttribute = "ModifyFpgaImageAttribute" + +// ModifyFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyFpgaImageAttribute for more information on using the ModifyFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyFpgaImageAttributeRequest method. +// req, resp := client.ModifyFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttribute +func (c *EC2) ModifyFpgaImageAttributeRequest(input *ModifyFpgaImageAttributeInput) (req *request.Request, output *ModifyFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyFpgaImageAttributeInput{} + } + + output = &ModifyFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified attribute of the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyFpgaImageAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttribute +func (c *EC2) ModifyFpgaImageAttribute(input *ModifyFpgaImageAttributeInput) (*ModifyFpgaImageAttributeOutput, error) { + req, out := c.ModifyFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// ModifyFpgaImageAttributeWithContext is the same as ModifyFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyFpgaImageAttributeWithContext(ctx aws.Context, input *ModifyFpgaImageAttributeInput, opts ...request.Option) (*ModifyFpgaImageAttributeOutput, error) { + req, out := c.ModifyFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyHosts = "ModifyHosts" + +// ModifyHostsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyHosts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyHosts for more information on using the ModifyHosts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyHostsRequest method. +// req, resp := client.ModifyHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyHosts +func (c *EC2) ModifyHostsRequest(input *ModifyHostsInput) (req *request.Request, output *ModifyHostsOutput) { + op := &request.Operation{ + Name: opModifyHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyHostsInput{} + } + + output = &ModifyHostsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyHosts API operation for Amazon Elastic Compute Cloud. +// +// Modify the auto-placement setting of a Dedicated Host. When auto-placement +// is enabled, any instances that you launch with a tenancy of host but without +// a specific host ID are placed onto any available Dedicated Host in your account +// that has auto-placement enabled. When auto-placement is disabled, you need +// to provide a host ID to have the instance launch onto a specific host. If +// no host ID is provided, the instance is launched onto a suitable host with +// auto-placement enabled. +// +// You can also use this API action to modify a Dedicated Host to support either +// multiple instance types in an instance family, or to support a specific instance +// type only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyHosts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyHosts +func (c *EC2) ModifyHosts(input *ModifyHostsInput) (*ModifyHostsOutput, error) { + req, out := c.ModifyHostsRequest(input) + return out, req.Send() +} + +// ModifyHostsWithContext is the same as ModifyHosts with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyHostsWithContext(ctx aws.Context, input *ModifyHostsInput, opts ...request.Option) (*ModifyHostsOutput, error) { + req, out := c.ModifyHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyIdFormat = "ModifyIdFormat" + +// ModifyIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the ModifyIdFormat operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyIdFormat for more information on using the ModifyIdFormat +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyIdFormatRequest method. +// req, resp := client.ModifyIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdFormat +func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Request, output *ModifyIdFormatOutput) { + op := &request.Operation{ + Name: opModifyIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyIdFormatInput{} + } + + output = &ModifyIdFormatOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyIdFormat API operation for Amazon Elastic Compute Cloud. +// +// Modifies the ID format for the specified resource on a per-Region basis. +// You can specify that resources should receive longer IDs (17-character IDs) +// when they are created. +// +// This request can only be used to modify longer ID settings for resource types +// that are within the opt-in period. Resources currently in their opt-in period +// include: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation +// | elastic-ip-association | export-task | flow-log | image | import-task | +// internet-gateway | network-acl | network-acl-association | network-interface +// | network-interface-attachment | prefix-list | route-table | route-table-association +// | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// This setting applies to the IAM user who makes the request; it does not apply +// to the entire AWS account. By default, an IAM user defaults to the same settings +// as the root user. If you're using this action as the root user, then these +// settings apply to the entire account, unless an IAM user explicitly overrides +// these settings for themselves. For more information, see Resource IDs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Resources created with longer IDs are visible to all IAM roles and users, +// regardless of these settings and provided that they have permission to use +// the relevant Describe command for the resource type. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyIdFormat for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdFormat +func (c *EC2) ModifyIdFormat(input *ModifyIdFormatInput) (*ModifyIdFormatOutput, error) { + req, out := c.ModifyIdFormatRequest(input) + return out, req.Send() +} + +// ModifyIdFormatWithContext is the same as ModifyIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyIdFormatWithContext(ctx aws.Context, input *ModifyIdFormatInput, opts ...request.Option) (*ModifyIdFormatOutput, error) { + req, out := c.ModifyIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyIdentityIdFormat = "ModifyIdentityIdFormat" + +// ModifyIdentityIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the ModifyIdentityIdFormat operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyIdentityIdFormat for more information on using the ModifyIdentityIdFormat +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyIdentityIdFormatRequest method. +// req, resp := client.ModifyIdentityIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdentityIdFormat +func (c *EC2) ModifyIdentityIdFormatRequest(input *ModifyIdentityIdFormatInput) (req *request.Request, output *ModifyIdentityIdFormatOutput) { + op := &request.Operation{ + Name: opModifyIdentityIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyIdentityIdFormatInput{} + } + + output = &ModifyIdentityIdFormatOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyIdentityIdFormat API operation for Amazon Elastic Compute Cloud. +// +// Modifies the ID format of a resource for a specified IAM user, IAM role, +// or the root user for an account; or all IAM users, IAM roles, and the root +// user for an account. You can specify that resources should receive longer +// IDs (17-character IDs) when they are created. +// +// This request can only be used to modify longer ID settings for resource types +// that are within the opt-in period. Resources currently in their opt-in period +// include: bundle | conversion-task | customer-gateway | dhcp-options | elastic-ip-allocation +// | elastic-ip-association | export-task | flow-log | image | import-task | +// internet-gateway | network-acl | network-acl-association | network-interface +// | network-interface-attachment | prefix-list | route-table | route-table-association +// | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association +// | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway. +// +// For more information, see Resource IDs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// This setting applies to the principal specified in the request; it does not +// apply to the principal that makes the request. +// +// Resources created with longer IDs are visible to all IAM roles and users, +// regardless of these settings and provided that they have permission to use +// the relevant Describe command for the resource type. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyIdentityIdFormat for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdentityIdFormat +func (c *EC2) ModifyIdentityIdFormat(input *ModifyIdentityIdFormatInput) (*ModifyIdentityIdFormatOutput, error) { + req, out := c.ModifyIdentityIdFormatRequest(input) + return out, req.Send() +} + +// ModifyIdentityIdFormatWithContext is the same as ModifyIdentityIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyIdentityIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyIdentityIdFormatWithContext(ctx aws.Context, input *ModifyIdentityIdFormatInput, opts ...request.Option) (*ModifyIdentityIdFormatOutput, error) { + req, out := c.ModifyIdentityIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyImageAttribute = "ModifyImageAttribute" + +// ModifyImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyImageAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyImageAttribute for more information on using the ModifyImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyImageAttributeRequest method. +// req, resp := client.ModifyImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyImageAttribute +func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req *request.Request, output *ModifyImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyImageAttributeInput{} + } + + output = &ModifyImageAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified attribute of the specified AMI. You can specify only +// one attribute at a time. You can use the Attribute parameter to specify the +// attribute or one of the following parameters: Description, LaunchPermission, +// or ProductCode. +// +// AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace +// product code cannot be made public. +// +// To enable the SriovNetSupport enhanced networking attribute of an image, +// enable SriovNetSupport on an instance and create an AMI from the instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyImageAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyImageAttribute +func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) { + req, out := c.ModifyImageAttributeRequest(input) + return out, req.Send() +} + +// ModifyImageAttributeWithContext is the same as ModifyImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyImageAttributeWithContext(ctx aws.Context, input *ModifyImageAttributeInput, opts ...request.Option) (*ModifyImageAttributeOutput, error) { + req, out := c.ModifyImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyInstanceAttribute = "ModifyInstanceAttribute" + +// ModifyInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyInstanceAttribute for more information on using the ModifyInstanceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyInstanceAttributeRequest method. +// req, resp := client.ModifyInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceAttribute +func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput) (req *request.Request, output *ModifyInstanceAttributeOutput) { + op := &request.Operation{ + Name: opModifyInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceAttributeInput{} + } + + output = &ModifyInstanceAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyInstanceAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified attribute of the specified instance. You can specify +// only one attribute at a time. +// +// Note: Using this action to change the security groups associated with an +// elastic network interface (ENI) attached to an instance in a VPC can result +// in an error if the instance has more than one ENI. To change the security +// groups associated with an ENI attached to an instance that has multiple ENIs, +// we recommend that you use the ModifyNetworkInterfaceAttribute action. +// +// To modify some attributes, the instance must be stopped. For more information, +// see Modifying attributes of a stopped instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyInstanceAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceAttribute +func (c *EC2) ModifyInstanceAttribute(input *ModifyInstanceAttributeInput) (*ModifyInstanceAttributeOutput, error) { + req, out := c.ModifyInstanceAttributeRequest(input) + return out, req.Send() +} + +// ModifyInstanceAttributeWithContext is the same as ModifyInstanceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyInstanceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyInstanceAttributeWithContext(ctx aws.Context, input *ModifyInstanceAttributeInput, opts ...request.Option) (*ModifyInstanceAttributeOutput, error) { + req, out := c.ModifyInstanceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyInstanceCapacityReservationAttributes = "ModifyInstanceCapacityReservationAttributes" + +// ModifyInstanceCapacityReservationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceCapacityReservationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyInstanceCapacityReservationAttributes for more information on using the ModifyInstanceCapacityReservationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyInstanceCapacityReservationAttributesRequest method. +// req, resp := client.ModifyInstanceCapacityReservationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceCapacityReservationAttributes +func (c *EC2) ModifyInstanceCapacityReservationAttributesRequest(input *ModifyInstanceCapacityReservationAttributesInput) (req *request.Request, output *ModifyInstanceCapacityReservationAttributesOutput) { + op := &request.Operation{ + Name: opModifyInstanceCapacityReservationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceCapacityReservationAttributesInput{} + } + + output = &ModifyInstanceCapacityReservationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyInstanceCapacityReservationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Modifies the Capacity Reservation settings for a stopped instance. Use this +// action to configure an instance to target a specific Capacity Reservation, +// run in any open Capacity Reservation with matching attributes, or run On-Demand +// Instance capacity. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyInstanceCapacityReservationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceCapacityReservationAttributes +func (c *EC2) ModifyInstanceCapacityReservationAttributes(input *ModifyInstanceCapacityReservationAttributesInput) (*ModifyInstanceCapacityReservationAttributesOutput, error) { + req, out := c.ModifyInstanceCapacityReservationAttributesRequest(input) + return out, req.Send() +} + +// ModifyInstanceCapacityReservationAttributesWithContext is the same as ModifyInstanceCapacityReservationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyInstanceCapacityReservationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyInstanceCapacityReservationAttributesWithContext(ctx aws.Context, input *ModifyInstanceCapacityReservationAttributesInput, opts ...request.Option) (*ModifyInstanceCapacityReservationAttributesOutput, error) { + req, out := c.ModifyInstanceCapacityReservationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyInstanceCreditSpecification = "ModifyInstanceCreditSpecification" + +// ModifyInstanceCreditSpecificationRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceCreditSpecification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyInstanceCreditSpecification for more information on using the ModifyInstanceCreditSpecification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyInstanceCreditSpecificationRequest method. +// req, resp := client.ModifyInstanceCreditSpecificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceCreditSpecification +func (c *EC2) ModifyInstanceCreditSpecificationRequest(input *ModifyInstanceCreditSpecificationInput) (req *request.Request, output *ModifyInstanceCreditSpecificationOutput) { + op := &request.Operation{ + Name: opModifyInstanceCreditSpecification, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceCreditSpecificationInput{} + } + + output = &ModifyInstanceCreditSpecificationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyInstanceCreditSpecification API operation for Amazon Elastic Compute Cloud. +// +// Modifies the credit option for CPU usage on a running or stopped burstable +// performance instance. The credit options are standard and unlimited. +// +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyInstanceCreditSpecification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceCreditSpecification +func (c *EC2) ModifyInstanceCreditSpecification(input *ModifyInstanceCreditSpecificationInput) (*ModifyInstanceCreditSpecificationOutput, error) { + req, out := c.ModifyInstanceCreditSpecificationRequest(input) + return out, req.Send() +} + +// ModifyInstanceCreditSpecificationWithContext is the same as ModifyInstanceCreditSpecification with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyInstanceCreditSpecification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyInstanceCreditSpecificationWithContext(ctx aws.Context, input *ModifyInstanceCreditSpecificationInput, opts ...request.Option) (*ModifyInstanceCreditSpecificationOutput, error) { + req, out := c.ModifyInstanceCreditSpecificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyInstanceEventStartTime = "ModifyInstanceEventStartTime" + +// ModifyInstanceEventStartTimeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceEventStartTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyInstanceEventStartTime for more information on using the ModifyInstanceEventStartTime +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyInstanceEventStartTimeRequest method. +// req, resp := client.ModifyInstanceEventStartTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceEventStartTime +func (c *EC2) ModifyInstanceEventStartTimeRequest(input *ModifyInstanceEventStartTimeInput) (req *request.Request, output *ModifyInstanceEventStartTimeOutput) { + op := &request.Operation{ + Name: opModifyInstanceEventStartTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceEventStartTimeInput{} + } + + output = &ModifyInstanceEventStartTimeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyInstanceEventStartTime API operation for Amazon Elastic Compute Cloud. +// +// Modifies the start time for a scheduled Amazon EC2 instance event. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyInstanceEventStartTime for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceEventStartTime +func (c *EC2) ModifyInstanceEventStartTime(input *ModifyInstanceEventStartTimeInput) (*ModifyInstanceEventStartTimeOutput, error) { + req, out := c.ModifyInstanceEventStartTimeRequest(input) + return out, req.Send() +} + +// ModifyInstanceEventStartTimeWithContext is the same as ModifyInstanceEventStartTime with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyInstanceEventStartTime for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyInstanceEventStartTimeWithContext(ctx aws.Context, input *ModifyInstanceEventStartTimeInput, opts ...request.Option) (*ModifyInstanceEventStartTimeOutput, error) { + req, out := c.ModifyInstanceEventStartTimeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyInstanceMetadataOptions = "ModifyInstanceMetadataOptions" + +// ModifyInstanceMetadataOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceMetadataOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyInstanceMetadataOptions for more information on using the ModifyInstanceMetadataOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyInstanceMetadataOptionsRequest method. +// req, resp := client.ModifyInstanceMetadataOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceMetadataOptions +func (c *EC2) ModifyInstanceMetadataOptionsRequest(input *ModifyInstanceMetadataOptionsInput) (req *request.Request, output *ModifyInstanceMetadataOptionsOutput) { + op := &request.Operation{ + Name: opModifyInstanceMetadataOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceMetadataOptionsInput{} + } + + output = &ModifyInstanceMetadataOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyInstanceMetadataOptions API operation for Amazon Elastic Compute Cloud. +// +// Modify the instance metadata parameters on a running or stopped instance. +// When you modify the parameters on a stopped instance, they are applied when +// the instance is started. When you modify the parameters on a running instance, +// the API responds with a state of “pending”. After the parameter modifications +// are successfully applied to the instance, the state of the modifications +// changes from “pending” to “applied” in subsequent describe-instances +// API calls. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyInstanceMetadataOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceMetadataOptions +func (c *EC2) ModifyInstanceMetadataOptions(input *ModifyInstanceMetadataOptionsInput) (*ModifyInstanceMetadataOptionsOutput, error) { + req, out := c.ModifyInstanceMetadataOptionsRequest(input) + return out, req.Send() +} + +// ModifyInstanceMetadataOptionsWithContext is the same as ModifyInstanceMetadataOptions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyInstanceMetadataOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyInstanceMetadataOptionsWithContext(ctx aws.Context, input *ModifyInstanceMetadataOptionsInput, opts ...request.Option) (*ModifyInstanceMetadataOptionsOutput, error) { + req, out := c.ModifyInstanceMetadataOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyInstancePlacement = "ModifyInstancePlacement" + +// ModifyInstancePlacementRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstancePlacement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyInstancePlacement for more information on using the ModifyInstancePlacement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyInstancePlacementRequest method. +// req, resp := client.ModifyInstancePlacementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstancePlacement +func (c *EC2) ModifyInstancePlacementRequest(input *ModifyInstancePlacementInput) (req *request.Request, output *ModifyInstancePlacementOutput) { + op := &request.Operation{ + Name: opModifyInstancePlacement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstancePlacementInput{} + } + + output = &ModifyInstancePlacementOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyInstancePlacement API operation for Amazon Elastic Compute Cloud. +// +// Modifies the placement attributes for a specified instance. You can do the +// following: +// +// * Modify the affinity between an instance and a Dedicated Host (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html). +// When affinity is set to host and the instance is not associated with a +// specific Dedicated Host, the next time the instance is launched, it is +// automatically associated with the host on which it lands. If the instance +// is restarted or rebooted, this relationship persists. +// +// * Change the Dedicated Host with which an instance is associated. +// +// * Change the instance tenancy of an instance from host to dedicated, or +// from dedicated to host. +// +// * Move an instance to or from a placement group (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html). +// +// At least one attribute for affinity, host ID, tenancy, or placement group +// name must be specified in the request. Affinity and tenancy can be modified +// in the same request. +// +// To modify the host ID, tenancy, placement group, or partition for an instance, +// the instance must be in the stopped state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyInstancePlacement for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstancePlacement +func (c *EC2) ModifyInstancePlacement(input *ModifyInstancePlacementInput) (*ModifyInstancePlacementOutput, error) { + req, out := c.ModifyInstancePlacementRequest(input) + return out, req.Send() +} + +// ModifyInstancePlacementWithContext is the same as ModifyInstancePlacement with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyInstancePlacement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyInstancePlacementWithContext(ctx aws.Context, input *ModifyInstancePlacementInput, opts ...request.Option) (*ModifyInstancePlacementOutput, error) { + req, out := c.ModifyInstancePlacementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyLaunchTemplate = "ModifyLaunchTemplate" + +// ModifyLaunchTemplateRequest generates a "aws/request.Request" representing the +// client's request for the ModifyLaunchTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyLaunchTemplate for more information on using the ModifyLaunchTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyLaunchTemplateRequest method. +// req, resp := client.ModifyLaunchTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyLaunchTemplate +func (c *EC2) ModifyLaunchTemplateRequest(input *ModifyLaunchTemplateInput) (req *request.Request, output *ModifyLaunchTemplateOutput) { + op := &request.Operation{ + Name: opModifyLaunchTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyLaunchTemplateInput{} + } + + output = &ModifyLaunchTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyLaunchTemplate API operation for Amazon Elastic Compute Cloud. +// +// Modifies a launch template. You can specify which version of the launch template +// to set as the default version. When launching an instance, the default version +// applies when a launch template version is not specified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyLaunchTemplate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyLaunchTemplate +func (c *EC2) ModifyLaunchTemplate(input *ModifyLaunchTemplateInput) (*ModifyLaunchTemplateOutput, error) { + req, out := c.ModifyLaunchTemplateRequest(input) + return out, req.Send() +} + +// ModifyLaunchTemplateWithContext is the same as ModifyLaunchTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyLaunchTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyLaunchTemplateWithContext(ctx aws.Context, input *ModifyLaunchTemplateInput, opts ...request.Option) (*ModifyLaunchTemplateOutput, error) { + req, out := c.ModifyLaunchTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyManagedPrefixList = "ModifyManagedPrefixList" + +// ModifyManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the ModifyManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyManagedPrefixList for more information on using the ModifyManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyManagedPrefixListRequest method. +// req, resp := client.ModifyManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyManagedPrefixList +func (c *EC2) ModifyManagedPrefixListRequest(input *ModifyManagedPrefixListInput) (req *request.Request, output *ModifyManagedPrefixListOutput) { + op := &request.Operation{ + Name: opModifyManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyManagedPrefixListInput{} + } + + output = &ModifyManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified managed prefix list. +// +// Adding or removing entries in a prefix list creates a new version of the +// prefix list. Changing the name of the prefix list does not affect the version. +// +// If you specify a current version number that does not match the true current +// version number, the request fails. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyManagedPrefixList +func (c *EC2) ModifyManagedPrefixList(input *ModifyManagedPrefixListInput) (*ModifyManagedPrefixListOutput, error) { + req, out := c.ModifyManagedPrefixListRequest(input) + return out, req.Send() +} + +// ModifyManagedPrefixListWithContext is the same as ModifyManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyManagedPrefixListWithContext(ctx aws.Context, input *ModifyManagedPrefixListInput, opts ...request.Option) (*ModifyManagedPrefixListOutput, error) { + req, out := c.ModifyManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" + +// ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyNetworkInterfaceAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyNetworkInterfaceAttribute for more information on using the ModifyNetworkInterfaceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyNetworkInterfaceAttributeRequest method. +// req, resp := client.ModifyNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyNetworkInterfaceAttribute +func (c *EC2) ModifyNetworkInterfaceAttributeRequest(input *ModifyNetworkInterfaceAttributeInput) (req *request.Request, output *ModifyNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opModifyNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyNetworkInterfaceAttributeInput{} + } + + output = &ModifyNetworkInterfaceAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyNetworkInterfaceAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified network interface attribute. You can specify only +// one attribute at a time. You can use this action to attach and detach security +// groups from an existing EC2 instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyNetworkInterfaceAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyNetworkInterfaceAttribute +func (c *EC2) ModifyNetworkInterfaceAttribute(input *ModifyNetworkInterfaceAttributeInput) (*ModifyNetworkInterfaceAttributeOutput, error) { + req, out := c.ModifyNetworkInterfaceAttributeRequest(input) + return out, req.Send() +} + +// ModifyNetworkInterfaceAttributeWithContext is the same as ModifyNetworkInterfaceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyNetworkInterfaceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyNetworkInterfaceAttributeWithContext(ctx aws.Context, input *ModifyNetworkInterfaceAttributeInput, opts ...request.Option) (*ModifyNetworkInterfaceAttributeOutput, error) { + req, out := c.ModifyNetworkInterfaceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyReservedInstances = "ModifyReservedInstances" + +// ModifyReservedInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReservedInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyReservedInstances for more information on using the ModifyReservedInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyReservedInstancesRequest method. +// req, resp := client.ModifyReservedInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyReservedInstances +func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput) (req *request.Request, output *ModifyReservedInstancesOutput) { + op := &request.Operation{ + Name: opModifyReservedInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReservedInstancesInput{} + } + + output = &ModifyReservedInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyReservedInstances API operation for Amazon Elastic Compute Cloud. +// +// Modifies the Availability Zone, instance count, instance type, or network +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, +// network platform, and instance type. +// +// For more information, see Modifying Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyReservedInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyReservedInstances +func (c *EC2) ModifyReservedInstances(input *ModifyReservedInstancesInput) (*ModifyReservedInstancesOutput, error) { + req, out := c.ModifyReservedInstancesRequest(input) + return out, req.Send() +} + +// ModifyReservedInstancesWithContext is the same as ModifyReservedInstances with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyReservedInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyReservedInstancesWithContext(ctx aws.Context, input *ModifyReservedInstancesInput, opts ...request.Option) (*ModifyReservedInstancesOutput, error) { + req, out := c.ModifyReservedInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifySnapshotAttribute = "ModifySnapshotAttribute" + +// ModifySnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifySnapshotAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifySnapshotAttribute for more information on using the ModifySnapshotAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifySnapshotAttributeRequest method. +// req, resp := client.ModifySnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySnapshotAttribute +func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput) (req *request.Request, output *ModifySnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifySnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySnapshotAttributeInput{} + } + + output = &ModifySnapshotAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifySnapshotAttribute API operation for Amazon Elastic Compute Cloud. +// +// Adds or removes permission settings for the specified snapshot. You may add +// or remove specified AWS account IDs from a snapshot's list of create volume +// permissions, but you cannot do both in a single operation. If you need to +// both add and remove account IDs for a snapshot, you must use multiple operations. +// You can make up to 500 modifications to a snapshot in a single operation. +// +// Encrypted snapshots and snapshots with AWS Marketplace product codes cannot +// be made public. Snapshots encrypted with your default CMK cannot be shared +// with other accounts. +// +// For more information about modifying snapshot permissions, see Sharing snapshots +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifySnapshotAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySnapshotAttribute +func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*ModifySnapshotAttributeOutput, error) { + req, out := c.ModifySnapshotAttributeRequest(input) + return out, req.Send() +} + +// ModifySnapshotAttributeWithContext is the same as ModifySnapshotAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifySnapshotAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifySnapshotAttributeWithContext(ctx aws.Context, input *ModifySnapshotAttributeInput, opts ...request.Option) (*ModifySnapshotAttributeOutput, error) { + req, out := c.ModifySnapshotAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifySpotFleetRequest = "ModifySpotFleetRequest" + +// ModifySpotFleetRequestRequest generates a "aws/request.Request" representing the +// client's request for the ModifySpotFleetRequest operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifySpotFleetRequest for more information on using the ModifySpotFleetRequest +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifySpotFleetRequestRequest method. +// req, resp := client.ModifySpotFleetRequestRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySpotFleetRequest +func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) (req *request.Request, output *ModifySpotFleetRequestOutput) { + op := &request.Operation{ + Name: opModifySpotFleetRequest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySpotFleetRequestInput{} + } + + output = &ModifySpotFleetRequestOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifySpotFleetRequest API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified Spot Fleet request. +// +// You can only modify a Spot Fleet request of type maintain. +// +// While the Spot Fleet request is being modified, it is in the modifying state. +// +// To scale up your Spot Fleet, increase its target capacity. The Spot Fleet +// launches the additional Spot Instances according to the allocation strategy +// for the Spot Fleet request. If the allocation strategy is lowestPrice, the +// Spot Fleet launches instances using the Spot Instance pool with the lowest +// price. If the allocation strategy is diversified, the Spot Fleet distributes +// the instances across the Spot Instance pools. If the allocation strategy +// is capacityOptimized, Spot Fleet launches instances from Spot Instance pools +// with optimal capacity for the number of instances that are launching. +// +// To scale down your Spot Fleet, decrease its target capacity. First, the Spot +// Fleet cancels any open requests that exceed the new target capacity. You +// can request that the Spot Fleet terminate Spot Instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the Spot Fleet terminates the instances with the highest +// price per unit. If the allocation strategy is capacityOptimized, the Spot +// Fleet terminates the instances in the Spot Instance pools that have the least +// available Spot Instance capacity. If the allocation strategy is diversified, +// the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the Spot Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. +// +// If you are finished with your Spot Fleet for now, but will use it again later, +// you can set the target capacity to 0. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifySpotFleetRequest for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySpotFleetRequest +func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*ModifySpotFleetRequestOutput, error) { + req, out := c.ModifySpotFleetRequestRequest(input) + return out, req.Send() +} + +// ModifySpotFleetRequestWithContext is the same as ModifySpotFleetRequest with the addition of +// the ability to pass a context and additional request options. +// +// See ModifySpotFleetRequest for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifySpotFleetRequestWithContext(ctx aws.Context, input *ModifySpotFleetRequestInput, opts ...request.Option) (*ModifySpotFleetRequestOutput, error) { + req, out := c.ModifySpotFleetRequestRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifySubnetAttribute = "ModifySubnetAttribute" + +// ModifySubnetAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifySubnetAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifySubnetAttribute for more information on using the ModifySubnetAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifySubnetAttributeRequest method. +// req, resp := client.ModifySubnetAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySubnetAttribute +func (c *EC2) ModifySubnetAttributeRequest(input *ModifySubnetAttributeInput) (req *request.Request, output *ModifySubnetAttributeOutput) { + op := &request.Operation{ + Name: opModifySubnetAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySubnetAttributeInput{} + } + + output = &ModifySubnetAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifySubnetAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies a subnet attribute. You can only modify one attribute at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifySubnetAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySubnetAttribute +func (c *EC2) ModifySubnetAttribute(input *ModifySubnetAttributeInput) (*ModifySubnetAttributeOutput, error) { + req, out := c.ModifySubnetAttributeRequest(input) + return out, req.Send() +} + +// ModifySubnetAttributeWithContext is the same as ModifySubnetAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifySubnetAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifySubnetAttributeWithContext(ctx aws.Context, input *ModifySubnetAttributeInput, opts ...request.Option) (*ModifySubnetAttributeOutput, error) { + req, out := c.ModifySubnetAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTrafficMirrorFilterNetworkServices = "ModifyTrafficMirrorFilterNetworkServices" + +// ModifyTrafficMirrorFilterNetworkServicesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTrafficMirrorFilterNetworkServices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTrafficMirrorFilterNetworkServices for more information on using the ModifyTrafficMirrorFilterNetworkServices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTrafficMirrorFilterNetworkServicesRequest method. +// req, resp := client.ModifyTrafficMirrorFilterNetworkServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorFilterNetworkServices +func (c *EC2) ModifyTrafficMirrorFilterNetworkServicesRequest(input *ModifyTrafficMirrorFilterNetworkServicesInput) (req *request.Request, output *ModifyTrafficMirrorFilterNetworkServicesOutput) { + op := &request.Operation{ + Name: opModifyTrafficMirrorFilterNetworkServices, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTrafficMirrorFilterNetworkServicesInput{} + } + + output = &ModifyTrafficMirrorFilterNetworkServicesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTrafficMirrorFilterNetworkServices API operation for Amazon Elastic Compute Cloud. +// +// Allows or restricts mirroring network services. +// +// By default, Amazon DNS network services are not eligible for Traffic Mirror. +// Use AddNetworkServices to add network services to a Traffic Mirror filter. +// When a network service is added to the Traffic Mirror filter, all traffic +// related to that network service will be mirrored. When you no longer want +// to mirror network services, use RemoveNetworkServices to remove the network +// services from the Traffic Mirror filter. +// +// For information about filter rule properties, see Network Services (https://docs.aws.amazon.com/vpc/latest/mirroring/traffic-mirroring-considerations.html) +// in the Traffic Mirroring User Guide . +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTrafficMirrorFilterNetworkServices for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorFilterNetworkServices +func (c *EC2) ModifyTrafficMirrorFilterNetworkServices(input *ModifyTrafficMirrorFilterNetworkServicesInput) (*ModifyTrafficMirrorFilterNetworkServicesOutput, error) { + req, out := c.ModifyTrafficMirrorFilterNetworkServicesRequest(input) + return out, req.Send() +} + +// ModifyTrafficMirrorFilterNetworkServicesWithContext is the same as ModifyTrafficMirrorFilterNetworkServices with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTrafficMirrorFilterNetworkServices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTrafficMirrorFilterNetworkServicesWithContext(ctx aws.Context, input *ModifyTrafficMirrorFilterNetworkServicesInput, opts ...request.Option) (*ModifyTrafficMirrorFilterNetworkServicesOutput, error) { + req, out := c.ModifyTrafficMirrorFilterNetworkServicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTrafficMirrorFilterRule = "ModifyTrafficMirrorFilterRule" + +// ModifyTrafficMirrorFilterRuleRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTrafficMirrorFilterRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTrafficMirrorFilterRule for more information on using the ModifyTrafficMirrorFilterRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTrafficMirrorFilterRuleRequest method. +// req, resp := client.ModifyTrafficMirrorFilterRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorFilterRule +func (c *EC2) ModifyTrafficMirrorFilterRuleRequest(input *ModifyTrafficMirrorFilterRuleInput) (req *request.Request, output *ModifyTrafficMirrorFilterRuleOutput) { + op := &request.Operation{ + Name: opModifyTrafficMirrorFilterRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTrafficMirrorFilterRuleInput{} + } + + output = &ModifyTrafficMirrorFilterRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTrafficMirrorFilterRule API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified Traffic Mirror rule. +// +// DestinationCidrBlock and SourceCidrBlock must both be an IPv4 range or an +// IPv6 range. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTrafficMirrorFilterRule for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorFilterRule +func (c *EC2) ModifyTrafficMirrorFilterRule(input *ModifyTrafficMirrorFilterRuleInput) (*ModifyTrafficMirrorFilterRuleOutput, error) { + req, out := c.ModifyTrafficMirrorFilterRuleRequest(input) + return out, req.Send() +} + +// ModifyTrafficMirrorFilterRuleWithContext is the same as ModifyTrafficMirrorFilterRule with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTrafficMirrorFilterRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTrafficMirrorFilterRuleWithContext(ctx aws.Context, input *ModifyTrafficMirrorFilterRuleInput, opts ...request.Option) (*ModifyTrafficMirrorFilterRuleOutput, error) { + req, out := c.ModifyTrafficMirrorFilterRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTrafficMirrorSession = "ModifyTrafficMirrorSession" + +// ModifyTrafficMirrorSessionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTrafficMirrorSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTrafficMirrorSession for more information on using the ModifyTrafficMirrorSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTrafficMirrorSessionRequest method. +// req, resp := client.ModifyTrafficMirrorSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorSession +func (c *EC2) ModifyTrafficMirrorSessionRequest(input *ModifyTrafficMirrorSessionInput) (req *request.Request, output *ModifyTrafficMirrorSessionOutput) { + op := &request.Operation{ + Name: opModifyTrafficMirrorSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTrafficMirrorSessionInput{} + } + + output = &ModifyTrafficMirrorSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTrafficMirrorSession API operation for Amazon Elastic Compute Cloud. +// +// Modifies a Traffic Mirror session. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTrafficMirrorSession for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTrafficMirrorSession +func (c *EC2) ModifyTrafficMirrorSession(input *ModifyTrafficMirrorSessionInput) (*ModifyTrafficMirrorSessionOutput, error) { + req, out := c.ModifyTrafficMirrorSessionRequest(input) + return out, req.Send() +} + +// ModifyTrafficMirrorSessionWithContext is the same as ModifyTrafficMirrorSession with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTrafficMirrorSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTrafficMirrorSessionWithContext(ctx aws.Context, input *ModifyTrafficMirrorSessionInput, opts ...request.Option) (*ModifyTrafficMirrorSessionOutput, error) { + req, out := c.ModifyTrafficMirrorSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTransitGateway = "ModifyTransitGateway" + +// ModifyTransitGatewayRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTransitGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTransitGateway for more information on using the ModifyTransitGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTransitGatewayRequest method. +// req, resp := client.ModifyTransitGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGateway +func (c *EC2) ModifyTransitGatewayRequest(input *ModifyTransitGatewayInput) (req *request.Request, output *ModifyTransitGatewayOutput) { + op := &request.Operation{ + Name: opModifyTransitGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTransitGatewayInput{} + } + + output = &ModifyTransitGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTransitGateway API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified transit gateway. When you modify a transit gateway, +// the modified options are applied to new transit gateway attachments only. +// Your existing transit gateway attachments are not modified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTransitGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGateway +func (c *EC2) ModifyTransitGateway(input *ModifyTransitGatewayInput) (*ModifyTransitGatewayOutput, error) { + req, out := c.ModifyTransitGatewayRequest(input) + return out, req.Send() +} + +// ModifyTransitGatewayWithContext is the same as ModifyTransitGateway with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTransitGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTransitGatewayWithContext(ctx aws.Context, input *ModifyTransitGatewayInput, opts ...request.Option) (*ModifyTransitGatewayOutput, error) { + req, out := c.ModifyTransitGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTransitGatewayPrefixListReference = "ModifyTransitGatewayPrefixListReference" + +// ModifyTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTransitGatewayPrefixListReference for more information on using the ModifyTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.ModifyTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGatewayPrefixListReference +func (c *EC2) ModifyTransitGatewayPrefixListReferenceRequest(input *ModifyTransitGatewayPrefixListReferenceInput) (req *request.Request, output *ModifyTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opModifyTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTransitGatewayPrefixListReferenceInput{} + } + + output = &ModifyTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Modifies a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGatewayPrefixListReference +func (c *EC2) ModifyTransitGatewayPrefixListReference(input *ModifyTransitGatewayPrefixListReferenceInput) (*ModifyTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.ModifyTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// ModifyTransitGatewayPrefixListReferenceWithContext is the same as ModifyTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *ModifyTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*ModifyTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.ModifyTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTransitGatewayVpcAttachment = "ModifyTransitGatewayVpcAttachment" + +// ModifyTransitGatewayVpcAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTransitGatewayVpcAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTransitGatewayVpcAttachment for more information on using the ModifyTransitGatewayVpcAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTransitGatewayVpcAttachmentRequest method. +// req, resp := client.ModifyTransitGatewayVpcAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGatewayVpcAttachment +func (c *EC2) ModifyTransitGatewayVpcAttachmentRequest(input *ModifyTransitGatewayVpcAttachmentInput) (req *request.Request, output *ModifyTransitGatewayVpcAttachmentOutput) { + op := &request.Operation{ + Name: opModifyTransitGatewayVpcAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTransitGatewayVpcAttachmentInput{} + } + + output = &ModifyTransitGatewayVpcAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTransitGatewayVpcAttachment API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified VPC attachment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTransitGatewayVpcAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGatewayVpcAttachment +func (c *EC2) ModifyTransitGatewayVpcAttachment(input *ModifyTransitGatewayVpcAttachmentInput) (*ModifyTransitGatewayVpcAttachmentOutput, error) { + req, out := c.ModifyTransitGatewayVpcAttachmentRequest(input) + return out, req.Send() +} + +// ModifyTransitGatewayVpcAttachmentWithContext is the same as ModifyTransitGatewayVpcAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTransitGatewayVpcAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTransitGatewayVpcAttachmentWithContext(ctx aws.Context, input *ModifyTransitGatewayVpcAttachmentInput, opts ...request.Option) (*ModifyTransitGatewayVpcAttachmentOutput, error) { + req, out := c.ModifyTransitGatewayVpcAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVolume = "ModifyVolume" + +// ModifyVolumeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVolume operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVolume for more information on using the ModifyVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVolumeRequest method. +// req, resp := client.ModifyVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolume +func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Request, output *ModifyVolumeOutput) { + op := &request.Operation{ + Name: opModifyVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVolumeInput{} + } + + output = &ModifyVolumeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVolume API operation for Amazon Elastic Compute Cloud. +// +// You can modify several parameters of an existing EBS volume, including volume +// size, volume type, and IOPS capacity. If your EBS volume is attached to a +// current-generation EC2 instance type, you might be able to apply these changes +// without stopping the instance or detaching the volume from it. For more information +// about modifying an EBS volume running Linux, see Modifying the size, IOPS, +// or type of an EBS volume on Linux (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html). +// For more information about modifying an EBS volume running Windows, see Modifying +// the size, IOPS, or type of an EBS volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). +// +// When you complete a resize operation on your volume, you need to extend the +// volume's file-system size to take advantage of the new storage capacity. +// For information about extending a Linux file system, see Extending a Linux +// file system (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#recognize-expanded-volume-linux). +// For information about extending a Windows file system, see Extending a Windows +// file system (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html#recognize-expanded-volume-windows). +// +// You can use CloudWatch Events to check the status of a modification to an +// EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch +// Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). +// You can also track the status of a modification using DescribeVolumesModifications. +// For information about tracking status changes using either method, see Monitoring +// volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). +// +// With previous-generation instance types, resizing an EBS volume might require +// detaching and reattaching the volume or stopping and restarting the instance. +// For more information, see Amazon EBS Elastic Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modify-volume.html) +// (Linux) or Amazon EBS Elastic Volumes (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-modify-volume.html) +// (Windows). +// +// If you reach the maximum volume modification rate per volume limit, you will +// need to wait at least six hours before applying further modifications to +// the affected EBS volume. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVolume for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolume +func (c *EC2) ModifyVolume(input *ModifyVolumeInput) (*ModifyVolumeOutput, error) { + req, out := c.ModifyVolumeRequest(input) + return out, req.Send() +} + +// ModifyVolumeWithContext is the same as ModifyVolume with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVolumeWithContext(ctx aws.Context, input *ModifyVolumeInput, opts ...request.Option) (*ModifyVolumeOutput, error) { + req, out := c.ModifyVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVolumeAttribute = "ModifyVolumeAttribute" + +// ModifyVolumeAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVolumeAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVolumeAttribute for more information on using the ModifyVolumeAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVolumeAttributeRequest method. +// req, resp := client.ModifyVolumeAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeAttribute +func (c *EC2) ModifyVolumeAttributeRequest(input *ModifyVolumeAttributeInput) (req *request.Request, output *ModifyVolumeAttributeOutput) { + op := &request.Operation{ + Name: opModifyVolumeAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVolumeAttributeInput{} + } + + output = &ModifyVolumeAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyVolumeAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies a volume attribute. +// +// By default, all I/O operations for the volume are suspended when the data +// on the volume is determined to be potentially inconsistent, to prevent undetectable, +// latent data corruption. The I/O access to the volume can be resumed by first +// enabling I/O access and then checking the data consistency on your volume. +// +// You can change the default behavior to resume I/O operations. We recommend +// that you change this only for boot volumes or for volumes that are stateless +// or disposable. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVolumeAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeAttribute +func (c *EC2) ModifyVolumeAttribute(input *ModifyVolumeAttributeInput) (*ModifyVolumeAttributeOutput, error) { + req, out := c.ModifyVolumeAttributeRequest(input) + return out, req.Send() +} + +// ModifyVolumeAttributeWithContext is the same as ModifyVolumeAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVolumeAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVolumeAttributeWithContext(ctx aws.Context, input *ModifyVolumeAttributeInput, opts ...request.Option) (*ModifyVolumeAttributeOutput, error) { + req, out := c.ModifyVolumeAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpcAttribute = "ModifyVpcAttribute" + +// ModifyVpcAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcAttribute for more information on using the ModifyVpcAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcAttributeRequest method. +// req, resp := client.ModifyVpcAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcAttribute +func (c *EC2) ModifyVpcAttributeRequest(input *ModifyVpcAttributeInput) (req *request.Request, output *ModifyVpcAttributeOutput) { + op := &request.Operation{ + Name: opModifyVpcAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcAttributeInput{} + } + + output = &ModifyVpcAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyVpcAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified attribute of the specified VPC. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcAttribute +func (c *EC2) ModifyVpcAttribute(input *ModifyVpcAttributeInput) (*ModifyVpcAttributeOutput, error) { + req, out := c.ModifyVpcAttributeRequest(input) + return out, req.Send() +} + +// ModifyVpcAttributeWithContext is the same as ModifyVpcAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcAttributeWithContext(ctx aws.Context, input *ModifyVpcAttributeInput, opts ...request.Option) (*ModifyVpcAttributeOutput, error) { + req, out := c.ModifyVpcAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpcEndpoint = "ModifyVpcEndpoint" + +// ModifyVpcEndpointRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcEndpoint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcEndpoint for more information on using the ModifyVpcEndpoint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcEndpointRequest method. +// req, resp := client.ModifyVpcEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpoint +func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *request.Request, output *ModifyVpcEndpointOutput) { + op := &request.Operation{ + Name: opModifyVpcEndpoint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcEndpointInput{} + } + + output = &ModifyVpcEndpointOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcEndpoint API operation for Amazon Elastic Compute Cloud. +// +// Modifies attributes of a specified VPC endpoint. The attributes that you +// can modify depend on the type of VPC endpoint (interface, gateway, or Gateway +// Load Balancer). For more information, see VPC Endpoints (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcEndpoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpoint +func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpointOutput, error) { + req, out := c.ModifyVpcEndpointRequest(input) + return out, req.Send() +} + +// ModifyVpcEndpointWithContext is the same as ModifyVpcEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcEndpointWithContext(ctx aws.Context, input *ModifyVpcEndpointInput, opts ...request.Option) (*ModifyVpcEndpointOutput, error) { + req, out := c.ModifyVpcEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpcEndpointConnectionNotification = "ModifyVpcEndpointConnectionNotification" + +// ModifyVpcEndpointConnectionNotificationRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcEndpointConnectionNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcEndpointConnectionNotification for more information on using the ModifyVpcEndpointConnectionNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcEndpointConnectionNotificationRequest method. +// req, resp := client.ModifyVpcEndpointConnectionNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointConnectionNotification +func (c *EC2) ModifyVpcEndpointConnectionNotificationRequest(input *ModifyVpcEndpointConnectionNotificationInput) (req *request.Request, output *ModifyVpcEndpointConnectionNotificationOutput) { + op := &request.Operation{ + Name: opModifyVpcEndpointConnectionNotification, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcEndpointConnectionNotificationInput{} + } + + output = &ModifyVpcEndpointConnectionNotificationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcEndpointConnectionNotification API operation for Amazon Elastic Compute Cloud. +// +// Modifies a connection notification for VPC endpoint or VPC endpoint service. +// You can change the SNS topic for the notification, or the events for which +// to be notified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcEndpointConnectionNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointConnectionNotification +func (c *EC2) ModifyVpcEndpointConnectionNotification(input *ModifyVpcEndpointConnectionNotificationInput) (*ModifyVpcEndpointConnectionNotificationOutput, error) { + req, out := c.ModifyVpcEndpointConnectionNotificationRequest(input) + return out, req.Send() +} + +// ModifyVpcEndpointConnectionNotificationWithContext is the same as ModifyVpcEndpointConnectionNotification with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcEndpointConnectionNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcEndpointConnectionNotificationWithContext(ctx aws.Context, input *ModifyVpcEndpointConnectionNotificationInput, opts ...request.Option) (*ModifyVpcEndpointConnectionNotificationOutput, error) { + req, out := c.ModifyVpcEndpointConnectionNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpcEndpointServiceConfiguration = "ModifyVpcEndpointServiceConfiguration" + +// ModifyVpcEndpointServiceConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcEndpointServiceConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcEndpointServiceConfiguration for more information on using the ModifyVpcEndpointServiceConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcEndpointServiceConfigurationRequest method. +// req, resp := client.ModifyVpcEndpointServiceConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointServiceConfiguration +func (c *EC2) ModifyVpcEndpointServiceConfigurationRequest(input *ModifyVpcEndpointServiceConfigurationInput) (req *request.Request, output *ModifyVpcEndpointServiceConfigurationOutput) { + op := &request.Operation{ + Name: opModifyVpcEndpointServiceConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcEndpointServiceConfigurationInput{} + } + + output = &ModifyVpcEndpointServiceConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcEndpointServiceConfiguration API operation for Amazon Elastic Compute Cloud. +// +// Modifies the attributes of your VPC endpoint service configuration. You can +// change the Network Load Balancers or Gateway Load Balancers for your service, +// and you can specify whether acceptance is required for requests to connect +// to your endpoint service through an interface VPC endpoint. +// +// If you set or modify the private DNS name, you must prove that you own the +// private DNS domain name. For more information, see VPC Endpoint Service Private +// DNS Name Verification (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-services-dns-validation.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcEndpointServiceConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointServiceConfiguration +func (c *EC2) ModifyVpcEndpointServiceConfiguration(input *ModifyVpcEndpointServiceConfigurationInput) (*ModifyVpcEndpointServiceConfigurationOutput, error) { + req, out := c.ModifyVpcEndpointServiceConfigurationRequest(input) + return out, req.Send() +} + +// ModifyVpcEndpointServiceConfigurationWithContext is the same as ModifyVpcEndpointServiceConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcEndpointServiceConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcEndpointServiceConfigurationWithContext(ctx aws.Context, input *ModifyVpcEndpointServiceConfigurationInput, opts ...request.Option) (*ModifyVpcEndpointServiceConfigurationOutput, error) { + req, out := c.ModifyVpcEndpointServiceConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpcEndpointServicePermissions = "ModifyVpcEndpointServicePermissions" + +// ModifyVpcEndpointServicePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcEndpointServicePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcEndpointServicePermissions for more information on using the ModifyVpcEndpointServicePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcEndpointServicePermissionsRequest method. +// req, resp := client.ModifyVpcEndpointServicePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointServicePermissions +func (c *EC2) ModifyVpcEndpointServicePermissionsRequest(input *ModifyVpcEndpointServicePermissionsInput) (req *request.Request, output *ModifyVpcEndpointServicePermissionsOutput) { + op := &request.Operation{ + Name: opModifyVpcEndpointServicePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcEndpointServicePermissionsInput{} + } + + output = &ModifyVpcEndpointServicePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcEndpointServicePermissions API operation for Amazon Elastic Compute Cloud. +// +// Modifies the permissions for your VPC endpoint service (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html). +// You can add or remove permissions for service consumers (IAM users, IAM roles, +// and AWS accounts) to connect to your endpoint service. +// +// If you grant permissions to all principals, the service is public. Any users +// who know the name of a public service can send a request to attach an endpoint. +// If the service does not require manual approval, attachments are automatically +// approved. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcEndpointServicePermissions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpointServicePermissions +func (c *EC2) ModifyVpcEndpointServicePermissions(input *ModifyVpcEndpointServicePermissionsInput) (*ModifyVpcEndpointServicePermissionsOutput, error) { + req, out := c.ModifyVpcEndpointServicePermissionsRequest(input) + return out, req.Send() +} + +// ModifyVpcEndpointServicePermissionsWithContext is the same as ModifyVpcEndpointServicePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcEndpointServicePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcEndpointServicePermissionsWithContext(ctx aws.Context, input *ModifyVpcEndpointServicePermissionsInput, opts ...request.Option) (*ModifyVpcEndpointServicePermissionsOutput, error) { + req, out := c.ModifyVpcEndpointServicePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpcPeeringConnectionOptions = "ModifyVpcPeeringConnectionOptions" + +// ModifyVpcPeeringConnectionOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcPeeringConnectionOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcPeeringConnectionOptions for more information on using the ModifyVpcPeeringConnectionOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcPeeringConnectionOptionsRequest method. +// req, resp := client.ModifyVpcPeeringConnectionOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcPeeringConnectionOptions +func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringConnectionOptionsInput) (req *request.Request, output *ModifyVpcPeeringConnectionOptionsOutput) { + op := &request.Operation{ + Name: opModifyVpcPeeringConnectionOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcPeeringConnectionOptionsInput{} + } + + output = &ModifyVpcPeeringConnectionOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcPeeringConnectionOptions API operation for Amazon Elastic Compute Cloud. +// +// Modifies the VPC peering connection options on one side of a VPC peering +// connection. You can do the following: +// +// * Enable/disable communication over the peering connection between an +// EC2-Classic instance that's linked to your VPC (using ClassicLink) and +// instances in the peer VPC. +// +// * Enable/disable communication over the peering connection between instances +// in your VPC and an EC2-Classic instance that's linked to the peer VPC. +// +// * Enable/disable the ability to resolve public DNS hostnames to private +// IP addresses when queried from instances in the peer VPC. +// +// If the peered VPCs are in the same AWS account, you can enable DNS resolution +// for queries from the local VPC. This ensures that queries from the local +// VPC resolve to private IP addresses in the peer VPC. This option is not available +// if the peered VPCs are in different AWS accounts or different Regions. For +// peered VPCs in different AWS accounts, each AWS account owner must initiate +// a separate request to modify the peering connection options. For inter-region +// peering connections, you must use the Region for the requester VPC to modify +// the requester VPC peering options and the Region for the accepter VPC to +// modify the accepter VPC peering options. To verify which VPCs are the accepter +// and the requester for a VPC peering connection, use the DescribeVpcPeeringConnections +// command. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcPeeringConnectionOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcPeeringConnectionOptions +func (c *EC2) ModifyVpcPeeringConnectionOptions(input *ModifyVpcPeeringConnectionOptionsInput) (*ModifyVpcPeeringConnectionOptionsOutput, error) { + req, out := c.ModifyVpcPeeringConnectionOptionsRequest(input) + return out, req.Send() +} + +// ModifyVpcPeeringConnectionOptionsWithContext is the same as ModifyVpcPeeringConnectionOptions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcPeeringConnectionOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcPeeringConnectionOptionsWithContext(ctx aws.Context, input *ModifyVpcPeeringConnectionOptionsInput, opts ...request.Option) (*ModifyVpcPeeringConnectionOptionsOutput, error) { + req, out := c.ModifyVpcPeeringConnectionOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpcTenancy = "ModifyVpcTenancy" + +// ModifyVpcTenancyRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcTenancy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcTenancy for more information on using the ModifyVpcTenancy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcTenancyRequest method. +// req, resp := client.ModifyVpcTenancyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancy +func (c *EC2) ModifyVpcTenancyRequest(input *ModifyVpcTenancyInput) (req *request.Request, output *ModifyVpcTenancyOutput) { + op := &request.Operation{ + Name: opModifyVpcTenancy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcTenancyInput{} + } + + output = &ModifyVpcTenancyOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcTenancy API operation for Amazon Elastic Compute Cloud. +// +// Modifies the instance tenancy attribute of the specified VPC. You can change +// the instance tenancy attribute of a VPC to default only. You cannot change +// the instance tenancy attribute to dedicated. +// +// After you modify the tenancy of the VPC, any new instances that you launch +// into the VPC have a tenancy of default, unless you specify otherwise during +// launch. The tenancy of any existing instances in the VPC is not affected. +// +// For more information, see Dedicated Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcTenancy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancy +func (c *EC2) ModifyVpcTenancy(input *ModifyVpcTenancyInput) (*ModifyVpcTenancyOutput, error) { + req, out := c.ModifyVpcTenancyRequest(input) + return out, req.Send() +} + +// ModifyVpcTenancyWithContext is the same as ModifyVpcTenancy with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcTenancy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcTenancyWithContext(ctx aws.Context, input *ModifyVpcTenancyInput, opts ...request.Option) (*ModifyVpcTenancyOutput, error) { + req, out := c.ModifyVpcTenancyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpnConnection = "ModifyVpnConnection" + +// ModifyVpnConnectionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnConnection for more information on using the ModifyVpnConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnConnectionRequest method. +// req, resp := client.ModifyVpnConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnConnection +func (c *EC2) ModifyVpnConnectionRequest(input *ModifyVpnConnectionInput) (req *request.Request, output *ModifyVpnConnectionOutput) { + op := &request.Operation{ + Name: opModifyVpnConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnConnectionInput{} + } + + output = &ModifyVpnConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnConnection API operation for Amazon Elastic Compute Cloud. +// +// Modifies the customer gateway or the target gateway of an AWS Site-to-Site +// VPN connection. To modify the target gateway, the following migration options +// are available: +// +// * An existing virtual private gateway to a new virtual private gateway +// +// * An existing virtual private gateway to a transit gateway +// +// * An existing transit gateway to a new transit gateway +// +// * An existing transit gateway to a virtual private gateway +// +// Before you perform the migration to the new gateway, you must configure the +// new gateway. Use CreateVpnGateway to create a virtual private gateway, or +// CreateTransitGateway to create a transit gateway. +// +// This step is required when you migrate from a virtual private gateway with +// static routes to a transit gateway. +// +// You must delete the static routes before you migrate to the new gateway. +// +// Keep a copy of the static route before you delete it. You will need to add +// back these routes to the transit gateway after the VPN connection migration +// is complete. +// +// After you migrate to the new gateway, you might need to modify your VPC route +// table. Use CreateRoute and DeleteRoute to make the changes described in VPN +// Gateway Target Modification Required VPC Route Table Updates (https://docs.aws.amazon.com/vpn/latest/s2svpn/modify-vpn-target.html#step-update-routing) +// in the AWS Site-to-Site VPN User Guide. +// +// When the new gateway is a transit gateway, modify the transit gateway route +// table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. +// Use CreateTransitGatewayRoute to add the routes. +// +// If you deleted VPN static routes, you must add the static routes to the transit +// gateway route table. +// +// After you perform this operation, the AWS VPN endpoint's IP addresses on +// the AWS side and the tunnel options remain intact. Your AWS Site-to-Site +// VPN connection will be temporarily unavailable for a brief period while we +// provision the new endpoints. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnConnection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnConnection +func (c *EC2) ModifyVpnConnection(input *ModifyVpnConnectionInput) (*ModifyVpnConnectionOutput, error) { + req, out := c.ModifyVpnConnectionRequest(input) + return out, req.Send() +} + +// ModifyVpnConnectionWithContext is the same as ModifyVpnConnection with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnConnectionWithContext(ctx aws.Context, input *ModifyVpnConnectionInput, opts ...request.Option) (*ModifyVpnConnectionOutput, error) { + req, out := c.ModifyVpnConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpnConnectionOptions = "ModifyVpnConnectionOptions" + +// ModifyVpnConnectionOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnConnectionOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnConnectionOptions for more information on using the ModifyVpnConnectionOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnConnectionOptionsRequest method. +// req, resp := client.ModifyVpnConnectionOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnConnectionOptions +func (c *EC2) ModifyVpnConnectionOptionsRequest(input *ModifyVpnConnectionOptionsInput) (req *request.Request, output *ModifyVpnConnectionOptionsOutput) { + op := &request.Operation{ + Name: opModifyVpnConnectionOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnConnectionOptionsInput{} + } + + output = &ModifyVpnConnectionOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnConnectionOptions API operation for Amazon Elastic Compute Cloud. +// +// Modifies the connection options for your Site-to-Site VPN connection. +// +// When you modify the VPN connection options, the VPN endpoint IP addresses +// on the AWS side do not change, and the tunnel options do not change. Your +// VPN connection will be temporarily unavailable for a brief period while the +// VPN connection is updated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnConnectionOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnConnectionOptions +func (c *EC2) ModifyVpnConnectionOptions(input *ModifyVpnConnectionOptionsInput) (*ModifyVpnConnectionOptionsOutput, error) { + req, out := c.ModifyVpnConnectionOptionsRequest(input) + return out, req.Send() +} + +// ModifyVpnConnectionOptionsWithContext is the same as ModifyVpnConnectionOptions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnConnectionOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnConnectionOptionsWithContext(ctx aws.Context, input *ModifyVpnConnectionOptionsInput, opts ...request.Option) (*ModifyVpnConnectionOptionsOutput, error) { + req, out := c.ModifyVpnConnectionOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpnTunnelCertificate = "ModifyVpnTunnelCertificate" + +// ModifyVpnTunnelCertificateRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnTunnelCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnTunnelCertificate for more information on using the ModifyVpnTunnelCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnTunnelCertificateRequest method. +// req, resp := client.ModifyVpnTunnelCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificateRequest(input *ModifyVpnTunnelCertificateInput) (req *request.Request, output *ModifyVpnTunnelCertificateOutput) { + op := &request.Operation{ + Name: opModifyVpnTunnelCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnTunnelCertificateInput{} + } + + output = &ModifyVpnTunnelCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnTunnelCertificate API operation for Amazon Elastic Compute Cloud. +// +// Modifies the VPN tunnel endpoint certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnTunnelCertificate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificate(input *ModifyVpnTunnelCertificateInput) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + return out, req.Send() +} + +// ModifyVpnTunnelCertificateWithContext is the same as ModifyVpnTunnelCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnTunnelCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnTunnelCertificateWithContext(ctx aws.Context, input *ModifyVpnTunnelCertificateInput, opts ...request.Option) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVpnTunnelOptions = "ModifyVpnTunnelOptions" + +// ModifyVpnTunnelOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnTunnelOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnTunnelOptions for more information on using the ModifyVpnTunnelOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnTunnelOptionsRequest method. +// req, resp := client.ModifyVpnTunnelOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelOptions +func (c *EC2) ModifyVpnTunnelOptionsRequest(input *ModifyVpnTunnelOptionsInput) (req *request.Request, output *ModifyVpnTunnelOptionsOutput) { + op := &request.Operation{ + Name: opModifyVpnTunnelOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnTunnelOptionsInput{} + } + + output = &ModifyVpnTunnelOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnTunnelOptions API operation for Amazon Elastic Compute Cloud. +// +// Modifies the options for a VPN tunnel in an AWS Site-to-Site VPN connection. +// You can modify multiple options for a tunnel in a single request, but you +// can only modify one tunnel at a time. For more information, see Site-to-Site +// VPN Tunnel Options for Your Site-to-Site VPN Connection (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPNTunnels.html) +// in the AWS Site-to-Site VPN User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnTunnelOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelOptions +func (c *EC2) ModifyVpnTunnelOptions(input *ModifyVpnTunnelOptionsInput) (*ModifyVpnTunnelOptionsOutput, error) { + req, out := c.ModifyVpnTunnelOptionsRequest(input) + return out, req.Send() +} + +// ModifyVpnTunnelOptionsWithContext is the same as ModifyVpnTunnelOptions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnTunnelOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnTunnelOptionsWithContext(ctx aws.Context, input *ModifyVpnTunnelOptionsInput, opts ...request.Option) (*ModifyVpnTunnelOptionsOutput, error) { + req, out := c.ModifyVpnTunnelOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opMonitorInstances = "MonitorInstances" + +// MonitorInstancesRequest generates a "aws/request.Request" representing the +// client's request for the MonitorInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See MonitorInstances for more information on using the MonitorInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the MonitorInstancesRequest method. +// req, resp := client.MonitorInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MonitorInstances +func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *request.Request, output *MonitorInstancesOutput) { + op := &request.Operation{ + Name: opMonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MonitorInstancesInput{} + } + + output = &MonitorInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// MonitorInstances API operation for Amazon Elastic Compute Cloud. +// +// Enables detailed monitoring for a running instance. Otherwise, basic monitoring +// is enabled. For more information, see Monitoring your instances and volumes +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// To disable detailed monitoring, see . +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation MonitorInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MonitorInstances +func (c *EC2) MonitorInstances(input *MonitorInstancesInput) (*MonitorInstancesOutput, error) { + req, out := c.MonitorInstancesRequest(input) + return out, req.Send() +} + +// MonitorInstancesWithContext is the same as MonitorInstances with the addition of +// the ability to pass a context and additional request options. +// +// See MonitorInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) MonitorInstancesWithContext(ctx aws.Context, input *MonitorInstancesInput, opts ...request.Option) (*MonitorInstancesOutput, error) { + req, out := c.MonitorInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opMoveAddressToVpc = "MoveAddressToVpc" + +// MoveAddressToVpcRequest generates a "aws/request.Request" representing the +// client's request for the MoveAddressToVpc operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See MoveAddressToVpc for more information on using the MoveAddressToVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the MoveAddressToVpcRequest method. +// req, resp := client.MoveAddressToVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MoveAddressToVpc +func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *request.Request, output *MoveAddressToVpcOutput) { + op := &request.Operation{ + Name: opMoveAddressToVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MoveAddressToVpcInput{} + } + + output = &MoveAddressToVpcOutput{} + req = c.newRequest(op, input, output) + return +} + +// MoveAddressToVpc API operation for Amazon Elastic Compute Cloud. +// +// Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC +// platform. The Elastic IP address must be allocated to your account for more +// than 24 hours, and it must not be associated with an instance. After the +// Elastic IP address is moved, it is no longer available for use in the EC2-Classic +// platform, unless you move it back using the RestoreAddressToClassic request. +// You cannot move an Elastic IP address that was originally allocated for use +// in the EC2-VPC platform to the EC2-Classic platform. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation MoveAddressToVpc for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MoveAddressToVpc +func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) { + req, out := c.MoveAddressToVpcRequest(input) + return out, req.Send() +} + +// MoveAddressToVpcWithContext is the same as MoveAddressToVpc with the addition of +// the ability to pass a context and additional request options. +// +// See MoveAddressToVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) MoveAddressToVpcWithContext(ctx aws.Context, input *MoveAddressToVpcInput, opts ...request.Option) (*MoveAddressToVpcOutput, error) { + req, out := c.MoveAddressToVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opProvisionByoipCidr = "ProvisionByoipCidr" + +// ProvisionByoipCidrRequest generates a "aws/request.Request" representing the +// client's request for the ProvisionByoipCidr operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ProvisionByoipCidr for more information on using the ProvisionByoipCidr +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ProvisionByoipCidrRequest method. +// req, resp := client.ProvisionByoipCidrRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ProvisionByoipCidr +func (c *EC2) ProvisionByoipCidrRequest(input *ProvisionByoipCidrInput) (req *request.Request, output *ProvisionByoipCidrOutput) { + op := &request.Operation{ + Name: opProvisionByoipCidr, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ProvisionByoipCidrInput{} + } + + output = &ProvisionByoipCidrOutput{} + req = c.newRequest(op, input, output) + return +} + +// ProvisionByoipCidr API operation for Amazon Elastic Compute Cloud. +// +// Provisions an IPv4 or IPv6 address range for use with your AWS resources +// through bring your own IP addresses (BYOIP) and creates a corresponding address +// pool. After the address range is provisioned, it is ready to be advertised +// using AdvertiseByoipCidr. +// +// AWS verifies that you own the address range and are authorized to advertise +// it. You must ensure that the address range is registered to you and that +// you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise +// the address range. For more information, see Bring Your Own IP Addresses +// (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Provisioning an address range is an asynchronous operation, so the call returns +// immediately, but the address range is not ready to use until its status changes +// from pending-provision to provisioned. To monitor the status of an address +// range, use DescribeByoipCidrs. To allocate an Elastic IP address from your +// IPv4 address pool, use AllocateAddress with either the specific address from +// the address pool or the ID of the address pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ProvisionByoipCidr for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ProvisionByoipCidr +func (c *EC2) ProvisionByoipCidr(input *ProvisionByoipCidrInput) (*ProvisionByoipCidrOutput, error) { + req, out := c.ProvisionByoipCidrRequest(input) + return out, req.Send() +} + +// ProvisionByoipCidrWithContext is the same as ProvisionByoipCidr with the addition of +// the ability to pass a context and additional request options. +// +// See ProvisionByoipCidr for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ProvisionByoipCidrWithContext(ctx aws.Context, input *ProvisionByoipCidrInput, opts ...request.Option) (*ProvisionByoipCidrOutput, error) { + req, out := c.ProvisionByoipCidrRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPurchaseHostReservation = "PurchaseHostReservation" + +// PurchaseHostReservationRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseHostReservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PurchaseHostReservation for more information on using the PurchaseHostReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PurchaseHostReservationRequest method. +// req, resp := client.PurchaseHostReservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseHostReservation +func (c *EC2) PurchaseHostReservationRequest(input *PurchaseHostReservationInput) (req *request.Request, output *PurchaseHostReservationOutput) { + op := &request.Operation{ + Name: opPurchaseHostReservation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseHostReservationInput{} + } + + output = &PurchaseHostReservationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PurchaseHostReservation API operation for Amazon Elastic Compute Cloud. +// +// Purchase a reservation with configurations that match those of your Dedicated +// Host. You must have active Dedicated Hosts in your account before you purchase +// a reservation. This action results in the specified reservation being purchased +// and charged to your account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation PurchaseHostReservation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseHostReservation +func (c *EC2) PurchaseHostReservation(input *PurchaseHostReservationInput) (*PurchaseHostReservationOutput, error) { + req, out := c.PurchaseHostReservationRequest(input) + return out, req.Send() +} + +// PurchaseHostReservationWithContext is the same as PurchaseHostReservation with the addition of +// the ability to pass a context and additional request options. +// +// See PurchaseHostReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) PurchaseHostReservationWithContext(ctx aws.Context, input *PurchaseHostReservationInput, opts ...request.Option) (*PurchaseHostReservationOutput, error) { + req, out := c.PurchaseHostReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" + +// PurchaseReservedInstancesOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedInstancesOffering operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PurchaseReservedInstancesOffering for more information on using the PurchaseReservedInstancesOffering +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PurchaseReservedInstancesOfferingRequest method. +// req, resp := client.PurchaseReservedInstancesOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseReservedInstancesOffering +func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedInstancesOfferingInput) (req *request.Request, output *PurchaseReservedInstancesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedInstancesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedInstancesOfferingInput{} + } + + output = &PurchaseReservedInstancesOfferingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PurchaseReservedInstancesOffering API operation for Amazon Elastic Compute Cloud. +// +// Purchases a Reserved Instance for use with your account. With Reserved Instances, +// you pay a lower hourly rate compared to On-Demand instance pricing. +// +// Use DescribeReservedInstancesOfferings to get a list of Reserved Instance +// offerings that match your specifications. After you've purchased a Reserved +// Instance, you can check for your new Reserved Instance with DescribeReservedInstances. +// +// To queue a purchase for a future date and time, specify a purchase time. +// If you do not specify a purchase time, the default is the current time. +// +// For more information, see Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// and Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation PurchaseReservedInstancesOffering for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseReservedInstancesOffering +func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstancesOfferingInput) (*PurchaseReservedInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedInstancesOfferingRequest(input) + return out, req.Send() +} + +// PurchaseReservedInstancesOfferingWithContext is the same as PurchaseReservedInstancesOffering with the addition of +// the ability to pass a context and additional request options. +// +// See PurchaseReservedInstancesOffering for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) PurchaseReservedInstancesOfferingWithContext(ctx aws.Context, input *PurchaseReservedInstancesOfferingInput, opts ...request.Option) (*PurchaseReservedInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedInstancesOfferingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPurchaseScheduledInstances = "PurchaseScheduledInstances" + +// PurchaseScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseScheduledInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PurchaseScheduledInstances for more information on using the PurchaseScheduledInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PurchaseScheduledInstancesRequest method. +// req, resp := client.PurchaseScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseScheduledInstances +func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstancesInput) (req *request.Request, output *PurchaseScheduledInstancesOutput) { + op := &request.Operation{ + Name: opPurchaseScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseScheduledInstancesInput{} + } + + output = &PurchaseScheduledInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// PurchaseScheduledInstances API operation for Amazon Elastic Compute Cloud. +// +// Purchases the Scheduled Instances with the specified schedule. +// +// Scheduled Instances enable you to purchase Amazon EC2 compute capacity by +// the hour for a one-year term. Before you can purchase a Scheduled Instance, +// you must call DescribeScheduledInstanceAvailability to check for available +// schedules and obtain a purchase token. After you purchase a Scheduled Instance, +// you must call RunScheduledInstances during each scheduled time period. +// +// After you purchase a Scheduled Instance, you can't cancel, modify, or resell +// your purchase. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation PurchaseScheduledInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseScheduledInstances +func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) (*PurchaseScheduledInstancesOutput, error) { + req, out := c.PurchaseScheduledInstancesRequest(input) + return out, req.Send() +} + +// PurchaseScheduledInstancesWithContext is the same as PurchaseScheduledInstances with the addition of +// the ability to pass a context and additional request options. +// +// See PurchaseScheduledInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) PurchaseScheduledInstancesWithContext(ctx aws.Context, input *PurchaseScheduledInstancesInput, opts ...request.Option) (*PurchaseScheduledInstancesOutput, error) { + req, out := c.PurchaseScheduledInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRebootInstances = "RebootInstances" + +// RebootInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RebootInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RebootInstances for more information on using the RebootInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RebootInstancesRequest method. +// req, resp := client.RebootInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RebootInstances +func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.Request, output *RebootInstancesOutput) { + op := &request.Operation{ + Name: opRebootInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootInstancesInput{} + } + + output = &RebootInstancesOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RebootInstances API operation for Amazon Elastic Compute Cloud. +// +// Requests a reboot of the specified instances. This operation is asynchronous; +// it only queues a request to reboot the specified instances. The operation +// succeeds if the instances are valid and belong to you. Requests to reboot +// terminated instances are ignored. +// +// If an instance does not cleanly shut down within a few minutes, Amazon EC2 +// performs a hard reboot. +// +// For more information about troubleshooting, see Getting console output and +// rebooting instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RebootInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RebootInstances +func (c *EC2) RebootInstances(input *RebootInstancesInput) (*RebootInstancesOutput, error) { + req, out := c.RebootInstancesRequest(input) + return out, req.Send() +} + +// RebootInstancesWithContext is the same as RebootInstances with the addition of +// the ability to pass a context and additional request options. +// +// See RebootInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RebootInstancesWithContext(ctx aws.Context, input *RebootInstancesInput, opts ...request.Option) (*RebootInstancesOutput, error) { + req, out := c.RebootInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterImage = "RegisterImage" + +// RegisterImageRequest generates a "aws/request.Request" representing the +// client's request for the RegisterImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterImage for more information on using the RegisterImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterImageRequest method. +// req, resp := client.RegisterImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterImage +func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Request, output *RegisterImageOutput) { + op := &request.Operation{ + Name: opRegisterImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterImageInput{} + } + + output = &RegisterImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterImage API operation for Amazon Elastic Compute Cloud. +// +// Registers an AMI. When you're creating an AMI, this is the final step you +// must complete before you can launch an instance from the AMI. For more information +// about creating AMIs, see Creating your own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For Amazon EBS-backed instances, CreateImage creates and registers the AMI +// in a single request, so you don't have to register the AMI yourself. +// +// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from +// a snapshot of a root device volume. You specify the snapshot using the block +// device mapping. For more information, see Launching a Linux instance from +// a backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If any snapshots have AWS Marketplace product codes, they are copied to the +// new AMI. +// +// Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) +// and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code +// associated with an AMI to verify the subscription status for package updates. +// To create a new AMI for operating systems that require a billing product +// code, instead of registering the AMI, do the following to preserve the billing +// product code association: +// +// Launch an instance from an existing AMI with that billing product code. +// +// Customize the instance. +// +// Create an AMI from the instance using CreateImage. +// +// If you purchase a Reserved Instance to apply to an On-Demand Instance that +// was launched from an AMI with a billing product code, make sure that the +// Reserved Instance has the matching billing product code. If you purchase +// a Reserved Instance without the matching billing product code, the Reserved +// Instance will not be applied to the On-Demand Instance. For information about +// how to obtain the platform details and billing information of an AMI, see +// Obtaining billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// If needed, you can deregister an AMI at any time. Any modifications you make +// to an AMI backed by an instance store volume invalidates its registration. +// If you make changes to an image, deregister the previous image and register +// the new image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RegisterImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterImage +func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, error) { + req, out := c.RegisterImageRequest(input) + return out, req.Send() +} + +// RegisterImageWithContext is the same as RegisterImage with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RegisterImageWithContext(ctx aws.Context, input *RegisterImageInput, opts ...request.Option) (*RegisterImageOutput, error) { + req, out := c.RegisterImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterInstanceEventNotificationAttributes = "RegisterInstanceEventNotificationAttributes" + +// RegisterInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the RegisterInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterInstanceEventNotificationAttributes for more information on using the RegisterInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterInstanceEventNotificationAttributesRequest method. +// req, resp := client.RegisterInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterInstanceEventNotificationAttributes +func (c *EC2) RegisterInstanceEventNotificationAttributesRequest(input *RegisterInstanceEventNotificationAttributesInput) (req *request.Request, output *RegisterInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opRegisterInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstanceEventNotificationAttributesInput{} + } + + output = &RegisterInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Registers a set of tag keys to include in scheduled event notifications for +// your resources. +// +// To remove tags, use . +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RegisterInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterInstanceEventNotificationAttributes +func (c *EC2) RegisterInstanceEventNotificationAttributes(input *RegisterInstanceEventNotificationAttributesInput) (*RegisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.RegisterInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// RegisterInstanceEventNotificationAttributesWithContext is the same as RegisterInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RegisterInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *RegisterInstanceEventNotificationAttributesInput, opts ...request.Option) (*RegisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.RegisterInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterTransitGatewayMulticastGroupMembers = "RegisterTransitGatewayMulticastGroupMembers" + +// RegisterTransitGatewayMulticastGroupMembersRequest generates a "aws/request.Request" representing the +// client's request for the RegisterTransitGatewayMulticastGroupMembers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterTransitGatewayMulticastGroupMembers for more information on using the RegisterTransitGatewayMulticastGroupMembers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterTransitGatewayMulticastGroupMembersRequest method. +// req, resp := client.RegisterTransitGatewayMulticastGroupMembersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterTransitGatewayMulticastGroupMembers +func (c *EC2) RegisterTransitGatewayMulticastGroupMembersRequest(input *RegisterTransitGatewayMulticastGroupMembersInput) (req *request.Request, output *RegisterTransitGatewayMulticastGroupMembersOutput) { + op := &request.Operation{ + Name: opRegisterTransitGatewayMulticastGroupMembers, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTransitGatewayMulticastGroupMembersInput{} + } + + output = &RegisterTransitGatewayMulticastGroupMembersOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterTransitGatewayMulticastGroupMembers API operation for Amazon Elastic Compute Cloud. +// +// Registers members (network interfaces) with the transit gateway multicast +// group. A member is a network interface associated with a supported EC2 instance +// that receives multicast traffic. For information about supported instances, +// see Multicast Consideration (https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-limits.html#multicast-limits) +// in Amazon VPC Transit Gateways. +// +// After you add the members, use SearchTransitGatewayMulticastGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SearchTransitGatewayMulticastGroups.html) +// to verify that the members were added to the transit gateway multicast group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RegisterTransitGatewayMulticastGroupMembers for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterTransitGatewayMulticastGroupMembers +func (c *EC2) RegisterTransitGatewayMulticastGroupMembers(input *RegisterTransitGatewayMulticastGroupMembersInput) (*RegisterTransitGatewayMulticastGroupMembersOutput, error) { + req, out := c.RegisterTransitGatewayMulticastGroupMembersRequest(input) + return out, req.Send() +} + +// RegisterTransitGatewayMulticastGroupMembersWithContext is the same as RegisterTransitGatewayMulticastGroupMembers with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterTransitGatewayMulticastGroupMembers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RegisterTransitGatewayMulticastGroupMembersWithContext(ctx aws.Context, input *RegisterTransitGatewayMulticastGroupMembersInput, opts ...request.Option) (*RegisterTransitGatewayMulticastGroupMembersOutput, error) { + req, out := c.RegisterTransitGatewayMulticastGroupMembersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterTransitGatewayMulticastGroupSources = "RegisterTransitGatewayMulticastGroupSources" + +// RegisterTransitGatewayMulticastGroupSourcesRequest generates a "aws/request.Request" representing the +// client's request for the RegisterTransitGatewayMulticastGroupSources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterTransitGatewayMulticastGroupSources for more information on using the RegisterTransitGatewayMulticastGroupSources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterTransitGatewayMulticastGroupSourcesRequest method. +// req, resp := client.RegisterTransitGatewayMulticastGroupSourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterTransitGatewayMulticastGroupSources +func (c *EC2) RegisterTransitGatewayMulticastGroupSourcesRequest(input *RegisterTransitGatewayMulticastGroupSourcesInput) (req *request.Request, output *RegisterTransitGatewayMulticastGroupSourcesOutput) { + op := &request.Operation{ + Name: opRegisterTransitGatewayMulticastGroupSources, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTransitGatewayMulticastGroupSourcesInput{} + } + + output = &RegisterTransitGatewayMulticastGroupSourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterTransitGatewayMulticastGroupSources API operation for Amazon Elastic Compute Cloud. +// +// Registers sources (network interfaces) with the specified transit gateway +// multicast group. +// +// A multicast source is a network interface attached to a supported instance +// that sends multicast traffic. For information about supported instances, +// see Multicast Considerations (https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-limits.html#multicast-limits) +// in Amazon VPC Transit Gateways. +// +// After you add the source, use SearchTransitGatewayMulticastGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SearchTransitGatewayMulticastGroups.html) +// to verify that the source was added to the multicast group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RegisterTransitGatewayMulticastGroupSources for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterTransitGatewayMulticastGroupSources +func (c *EC2) RegisterTransitGatewayMulticastGroupSources(input *RegisterTransitGatewayMulticastGroupSourcesInput) (*RegisterTransitGatewayMulticastGroupSourcesOutput, error) { + req, out := c.RegisterTransitGatewayMulticastGroupSourcesRequest(input) + return out, req.Send() +} + +// RegisterTransitGatewayMulticastGroupSourcesWithContext is the same as RegisterTransitGatewayMulticastGroupSources with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterTransitGatewayMulticastGroupSources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RegisterTransitGatewayMulticastGroupSourcesWithContext(ctx aws.Context, input *RegisterTransitGatewayMulticastGroupSourcesInput, opts ...request.Option) (*RegisterTransitGatewayMulticastGroupSourcesOutput, error) { + req, out := c.RegisterTransitGatewayMulticastGroupSourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRejectTransitGatewayPeeringAttachment = "RejectTransitGatewayPeeringAttachment" + +// RejectTransitGatewayPeeringAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the RejectTransitGatewayPeeringAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RejectTransitGatewayPeeringAttachment for more information on using the RejectTransitGatewayPeeringAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RejectTransitGatewayPeeringAttachmentRequest method. +// req, resp := client.RejectTransitGatewayPeeringAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectTransitGatewayPeeringAttachment +func (c *EC2) RejectTransitGatewayPeeringAttachmentRequest(input *RejectTransitGatewayPeeringAttachmentInput) (req *request.Request, output *RejectTransitGatewayPeeringAttachmentOutput) { + op := &request.Operation{ + Name: opRejectTransitGatewayPeeringAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RejectTransitGatewayPeeringAttachmentInput{} + } + + output = &RejectTransitGatewayPeeringAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// RejectTransitGatewayPeeringAttachment API operation for Amazon Elastic Compute Cloud. +// +// Rejects a transit gateway peering attachment request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RejectTransitGatewayPeeringAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectTransitGatewayPeeringAttachment +func (c *EC2) RejectTransitGatewayPeeringAttachment(input *RejectTransitGatewayPeeringAttachmentInput) (*RejectTransitGatewayPeeringAttachmentOutput, error) { + req, out := c.RejectTransitGatewayPeeringAttachmentRequest(input) + return out, req.Send() +} + +// RejectTransitGatewayPeeringAttachmentWithContext is the same as RejectTransitGatewayPeeringAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See RejectTransitGatewayPeeringAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RejectTransitGatewayPeeringAttachmentWithContext(ctx aws.Context, input *RejectTransitGatewayPeeringAttachmentInput, opts ...request.Option) (*RejectTransitGatewayPeeringAttachmentOutput, error) { + req, out := c.RejectTransitGatewayPeeringAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRejectTransitGatewayVpcAttachment = "RejectTransitGatewayVpcAttachment" + +// RejectTransitGatewayVpcAttachmentRequest generates a "aws/request.Request" representing the +// client's request for the RejectTransitGatewayVpcAttachment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RejectTransitGatewayVpcAttachment for more information on using the RejectTransitGatewayVpcAttachment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RejectTransitGatewayVpcAttachmentRequest method. +// req, resp := client.RejectTransitGatewayVpcAttachmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectTransitGatewayVpcAttachment +func (c *EC2) RejectTransitGatewayVpcAttachmentRequest(input *RejectTransitGatewayVpcAttachmentInput) (req *request.Request, output *RejectTransitGatewayVpcAttachmentOutput) { + op := &request.Operation{ + Name: opRejectTransitGatewayVpcAttachment, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RejectTransitGatewayVpcAttachmentInput{} + } + + output = &RejectTransitGatewayVpcAttachmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// RejectTransitGatewayVpcAttachment API operation for Amazon Elastic Compute Cloud. +// +// Rejects a request to attach a VPC to a transit gateway. +// +// The VPC attachment must be in the pendingAcceptance state. Use DescribeTransitGatewayVpcAttachments +// to view your pending VPC attachment requests. Use AcceptTransitGatewayVpcAttachment +// to accept a VPC attachment request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RejectTransitGatewayVpcAttachment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectTransitGatewayVpcAttachment +func (c *EC2) RejectTransitGatewayVpcAttachment(input *RejectTransitGatewayVpcAttachmentInput) (*RejectTransitGatewayVpcAttachmentOutput, error) { + req, out := c.RejectTransitGatewayVpcAttachmentRequest(input) + return out, req.Send() +} + +// RejectTransitGatewayVpcAttachmentWithContext is the same as RejectTransitGatewayVpcAttachment with the addition of +// the ability to pass a context and additional request options. +// +// See RejectTransitGatewayVpcAttachment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RejectTransitGatewayVpcAttachmentWithContext(ctx aws.Context, input *RejectTransitGatewayVpcAttachmentInput, opts ...request.Option) (*RejectTransitGatewayVpcAttachmentOutput, error) { + req, out := c.RejectTransitGatewayVpcAttachmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRejectVpcEndpointConnections = "RejectVpcEndpointConnections" + +// RejectVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the RejectVpcEndpointConnections operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RejectVpcEndpointConnections for more information on using the RejectVpcEndpointConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RejectVpcEndpointConnectionsRequest method. +// req, resp := client.RejectVpcEndpointConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcEndpointConnections +func (c *EC2) RejectVpcEndpointConnectionsRequest(input *RejectVpcEndpointConnectionsInput) (req *request.Request, output *RejectVpcEndpointConnectionsOutput) { + op := &request.Operation{ + Name: opRejectVpcEndpointConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RejectVpcEndpointConnectionsInput{} + } + + output = &RejectVpcEndpointConnectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// RejectVpcEndpointConnections API operation for Amazon Elastic Compute Cloud. +// +// Rejects one or more VPC endpoint connection requests to your VPC endpoint +// service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RejectVpcEndpointConnections for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcEndpointConnections +func (c *EC2) RejectVpcEndpointConnections(input *RejectVpcEndpointConnectionsInput) (*RejectVpcEndpointConnectionsOutput, error) { + req, out := c.RejectVpcEndpointConnectionsRequest(input) + return out, req.Send() +} + +// RejectVpcEndpointConnectionsWithContext is the same as RejectVpcEndpointConnections with the addition of +// the ability to pass a context and additional request options. +// +// See RejectVpcEndpointConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RejectVpcEndpointConnectionsWithContext(ctx aws.Context, input *RejectVpcEndpointConnectionsInput, opts ...request.Option) (*RejectVpcEndpointConnectionsOutput, error) { + req, out := c.RejectVpcEndpointConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" + +// RejectVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the RejectVpcPeeringConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RejectVpcPeeringConnection for more information on using the RejectVpcPeeringConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RejectVpcPeeringConnectionRequest method. +// req, resp := client.RejectVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcPeeringConnection +func (c *EC2) RejectVpcPeeringConnectionRequest(input *RejectVpcPeeringConnectionInput) (req *request.Request, output *RejectVpcPeeringConnectionOutput) { + op := &request.Operation{ + Name: opRejectVpcPeeringConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RejectVpcPeeringConnectionInput{} + } + + output = &RejectVpcPeeringConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// RejectVpcPeeringConnection API operation for Amazon Elastic Compute Cloud. +// +// Rejects a VPC peering connection request. The VPC peering connection must +// be in the pending-acceptance state. Use the DescribeVpcPeeringConnections +// request to view your outstanding VPC peering connection requests. To delete +// an active VPC peering connection, or to delete a VPC peering connection request +// that you initiated, use DeleteVpcPeeringConnection. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RejectVpcPeeringConnection for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcPeeringConnection +func (c *EC2) RejectVpcPeeringConnection(input *RejectVpcPeeringConnectionInput) (*RejectVpcPeeringConnectionOutput, error) { + req, out := c.RejectVpcPeeringConnectionRequest(input) + return out, req.Send() +} + +// RejectVpcPeeringConnectionWithContext is the same as RejectVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See RejectVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RejectVpcPeeringConnectionWithContext(ctx aws.Context, input *RejectVpcPeeringConnectionInput, opts ...request.Option) (*RejectVpcPeeringConnectionOutput, error) { + req, out := c.RejectVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReleaseAddress = "ReleaseAddress" + +// ReleaseAddressRequest generates a "aws/request.Request" representing the +// client's request for the ReleaseAddress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReleaseAddress for more information on using the ReleaseAddress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReleaseAddressRequest method. +// req, resp := client.ReleaseAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseAddress +func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Request, output *ReleaseAddressOutput) { + op := &request.Operation{ + Name: opReleaseAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseAddressInput{} + } + + output = &ReleaseAddressOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ReleaseAddress API operation for Amazon Elastic Compute Cloud. +// +// Releases the specified Elastic IP address. +// +// [EC2-Classic, default VPC] Releasing an Elastic IP address automatically +// disassociates it from any instance that it's associated with. To disassociate +// an Elastic IP address without releasing it, use DisassociateAddress. +// +// [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic +// IP address before you can release it. Otherwise, Amazon EC2 returns an error +// (InvalidIPAddress.InUse). +// +// After releasing an Elastic IP address, it is released to the IP address pool. +// Be sure to update your DNS records and any servers or devices that communicate +// with the address. If you attempt to release an Elastic IP address that you +// already released, you'll get an AuthFailure error if the address is already +// allocated to another AWS account. +// +// [EC2-VPC] After you release an Elastic IP address for use in a VPC, you might +// be able to recover it. For more information, see AllocateAddress. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReleaseAddress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseAddress +func (c *EC2) ReleaseAddress(input *ReleaseAddressInput) (*ReleaseAddressOutput, error) { + req, out := c.ReleaseAddressRequest(input) + return out, req.Send() +} + +// ReleaseAddressWithContext is the same as ReleaseAddress with the addition of +// the ability to pass a context and additional request options. +// +// See ReleaseAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReleaseAddressWithContext(ctx aws.Context, input *ReleaseAddressInput, opts ...request.Option) (*ReleaseAddressOutput, error) { + req, out := c.ReleaseAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReleaseHosts = "ReleaseHosts" + +// ReleaseHostsRequest generates a "aws/request.Request" representing the +// client's request for the ReleaseHosts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReleaseHosts for more information on using the ReleaseHosts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReleaseHostsRequest method. +// req, resp := client.ReleaseHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseHosts +func (c *EC2) ReleaseHostsRequest(input *ReleaseHostsInput) (req *request.Request, output *ReleaseHostsOutput) { + op := &request.Operation{ + Name: opReleaseHosts, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReleaseHostsInput{} + } + + output = &ReleaseHostsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ReleaseHosts API operation for Amazon Elastic Compute Cloud. +// +// When you no longer want to use an On-Demand Dedicated Host it can be released. +// On-Demand billing is stopped and the host goes into released state. The host +// ID of Dedicated Hosts that have been released can no longer be specified +// in another request, for example, to modify the host. You must stop or terminate +// all instances on a host before it can be released. +// +// When Dedicated Hosts are released, it may take some time for them to stop +// counting toward your limit and you may receive capacity errors when trying +// to allocate new Dedicated Hosts. Wait a few minutes and then try again. +// +// Released hosts still appear in a DescribeHosts response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReleaseHosts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseHosts +func (c *EC2) ReleaseHosts(input *ReleaseHostsInput) (*ReleaseHostsOutput, error) { + req, out := c.ReleaseHostsRequest(input) + return out, req.Send() +} + +// ReleaseHostsWithContext is the same as ReleaseHosts with the addition of +// the ability to pass a context and additional request options. +// +// See ReleaseHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReleaseHostsWithContext(ctx aws.Context, input *ReleaseHostsInput, opts ...request.Option) (*ReleaseHostsOutput, error) { + req, out := c.ReleaseHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReplaceIamInstanceProfileAssociation = "ReplaceIamInstanceProfileAssociation" + +// ReplaceIamInstanceProfileAssociationRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceIamInstanceProfileAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReplaceIamInstanceProfileAssociation for more information on using the ReplaceIamInstanceProfileAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReplaceIamInstanceProfileAssociationRequest method. +// req, resp := client.ReplaceIamInstanceProfileAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceIamInstanceProfileAssociation +func (c *EC2) ReplaceIamInstanceProfileAssociationRequest(input *ReplaceIamInstanceProfileAssociationInput) (req *request.Request, output *ReplaceIamInstanceProfileAssociationOutput) { + op := &request.Operation{ + Name: opReplaceIamInstanceProfileAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceIamInstanceProfileAssociationInput{} + } + + output = &ReplaceIamInstanceProfileAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ReplaceIamInstanceProfileAssociation API operation for Amazon Elastic Compute Cloud. +// +// Replaces an IAM instance profile for the specified running instance. You +// can use this action to change the IAM instance profile that's associated +// with an instance without having to disassociate the existing IAM instance +// profile first. +// +// Use DescribeIamInstanceProfileAssociations to get the association ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReplaceIamInstanceProfileAssociation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceIamInstanceProfileAssociation +func (c *EC2) ReplaceIamInstanceProfileAssociation(input *ReplaceIamInstanceProfileAssociationInput) (*ReplaceIamInstanceProfileAssociationOutput, error) { + req, out := c.ReplaceIamInstanceProfileAssociationRequest(input) + return out, req.Send() +} + +// ReplaceIamInstanceProfileAssociationWithContext is the same as ReplaceIamInstanceProfileAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceIamInstanceProfileAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceIamInstanceProfileAssociationWithContext(ctx aws.Context, input *ReplaceIamInstanceProfileAssociationInput, opts ...request.Option) (*ReplaceIamInstanceProfileAssociationOutput, error) { + req, out := c.ReplaceIamInstanceProfileAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" + +// ReplaceNetworkAclAssociationRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceNetworkAclAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReplaceNetworkAclAssociation for more information on using the ReplaceNetworkAclAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReplaceNetworkAclAssociationRequest method. +// req, resp := client.ReplaceNetworkAclAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclAssociation +func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssociationInput) (req *request.Request, output *ReplaceNetworkAclAssociationOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclAssociationInput{} + } + + output = &ReplaceNetworkAclAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ReplaceNetworkAclAssociation API operation for Amazon Elastic Compute Cloud. +// +// Changes which network ACL a subnet is associated with. By default when you +// create a subnet, it's automatically associated with the default network ACL. +// For more information, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// This is an idempotent operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReplaceNetworkAclAssociation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclAssociation +func (c *EC2) ReplaceNetworkAclAssociation(input *ReplaceNetworkAclAssociationInput) (*ReplaceNetworkAclAssociationOutput, error) { + req, out := c.ReplaceNetworkAclAssociationRequest(input) + return out, req.Send() +} + +// ReplaceNetworkAclAssociationWithContext is the same as ReplaceNetworkAclAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceNetworkAclAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceNetworkAclAssociationWithContext(ctx aws.Context, input *ReplaceNetworkAclAssociationInput, opts ...request.Option) (*ReplaceNetworkAclAssociationOutput, error) { + req, out := c.ReplaceNetworkAclAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" + +// ReplaceNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceNetworkAclEntry operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReplaceNetworkAclEntry for more information on using the ReplaceNetworkAclEntry +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReplaceNetworkAclEntryRequest method. +// req, resp := client.ReplaceNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclEntry +func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) (req *request.Request, output *ReplaceNetworkAclEntryOutput) { + op := &request.Operation{ + Name: opReplaceNetworkAclEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceNetworkAclEntryInput{} + } + + output = &ReplaceNetworkAclEntryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ReplaceNetworkAclEntry API operation for Amazon Elastic Compute Cloud. +// +// Replaces an entry (rule) in a network ACL. For more information, see Network +// ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html) in +// the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReplaceNetworkAclEntry for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclEntry +func (c *EC2) ReplaceNetworkAclEntry(input *ReplaceNetworkAclEntryInput) (*ReplaceNetworkAclEntryOutput, error) { + req, out := c.ReplaceNetworkAclEntryRequest(input) + return out, req.Send() +} + +// ReplaceNetworkAclEntryWithContext is the same as ReplaceNetworkAclEntry with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceNetworkAclEntry for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceNetworkAclEntryWithContext(ctx aws.Context, input *ReplaceNetworkAclEntryInput, opts ...request.Option) (*ReplaceNetworkAclEntryOutput, error) { + req, out := c.ReplaceNetworkAclEntryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReplaceRoute = "ReplaceRoute" + +// ReplaceRouteRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReplaceRoute for more information on using the ReplaceRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReplaceRouteRequest method. +// req, resp := client.ReplaceRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRoute +func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Request, output *ReplaceRouteOutput) { + op := &request.Operation{ + Name: opReplaceRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteInput{} + } + + output = &ReplaceRouteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ReplaceRoute API operation for Amazon Elastic Compute Cloud. +// +// Replaces an existing route within a route table in a VPC. You must provide +// only one of the following: internet gateway, virtual private gateway, NAT +// instance, NAT gateway, VPC peering connection, network interface, egress-only +// internet gateway, or transit gateway. +// +// For more information, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReplaceRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRoute +func (c *EC2) ReplaceRoute(input *ReplaceRouteInput) (*ReplaceRouteOutput, error) { + req, out := c.ReplaceRouteRequest(input) + return out, req.Send() +} + +// ReplaceRouteWithContext is the same as ReplaceRoute with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceRouteWithContext(ctx aws.Context, input *ReplaceRouteInput, opts ...request.Option) (*ReplaceRouteOutput, error) { + req, out := c.ReplaceRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" + +// ReplaceRouteTableAssociationRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceRouteTableAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReplaceRouteTableAssociation for more information on using the ReplaceRouteTableAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReplaceRouteTableAssociationRequest method. +// req, resp := client.ReplaceRouteTableAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRouteTableAssociation +func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssociationInput) (req *request.Request, output *ReplaceRouteTableAssociationOutput) { + op := &request.Operation{ + Name: opReplaceRouteTableAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceRouteTableAssociationInput{} + } + + output = &ReplaceRouteTableAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ReplaceRouteTableAssociation API operation for Amazon Elastic Compute Cloud. +// +// Changes the route table associated with a given subnet, internet gateway, +// or virtual private gateway in a VPC. After the operation completes, the subnet +// or gateway uses the routes in the new route table. For more information about +// route tables, see Route Tables (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// You can also use this operation to change which table is the main route table +// in the VPC. Specify the main route table's association ID and the route table +// ID of the new main route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReplaceRouteTableAssociation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRouteTableAssociation +func (c *EC2) ReplaceRouteTableAssociation(input *ReplaceRouteTableAssociationInput) (*ReplaceRouteTableAssociationOutput, error) { + req, out := c.ReplaceRouteTableAssociationRequest(input) + return out, req.Send() +} + +// ReplaceRouteTableAssociationWithContext is the same as ReplaceRouteTableAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceRouteTableAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceRouteTableAssociationWithContext(ctx aws.Context, input *ReplaceRouteTableAssociationInput, opts ...request.Option) (*ReplaceRouteTableAssociationOutput, error) { + req, out := c.ReplaceRouteTableAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReplaceTransitGatewayRoute = "ReplaceTransitGatewayRoute" + +// ReplaceTransitGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceTransitGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReplaceTransitGatewayRoute for more information on using the ReplaceTransitGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReplaceTransitGatewayRouteRequest method. +// req, resp := client.ReplaceTransitGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceTransitGatewayRoute +func (c *EC2) ReplaceTransitGatewayRouteRequest(input *ReplaceTransitGatewayRouteInput) (req *request.Request, output *ReplaceTransitGatewayRouteOutput) { + op := &request.Operation{ + Name: opReplaceTransitGatewayRoute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceTransitGatewayRouteInput{} + } + + output = &ReplaceTransitGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// ReplaceTransitGatewayRoute API operation for Amazon Elastic Compute Cloud. +// +// Replaces the specified route in the specified transit gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReplaceTransitGatewayRoute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceTransitGatewayRoute +func (c *EC2) ReplaceTransitGatewayRoute(input *ReplaceTransitGatewayRouteInput) (*ReplaceTransitGatewayRouteOutput, error) { + req, out := c.ReplaceTransitGatewayRouteRequest(input) + return out, req.Send() +} + +// ReplaceTransitGatewayRouteWithContext is the same as ReplaceTransitGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceTransitGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceTransitGatewayRouteWithContext(ctx aws.Context, input *ReplaceTransitGatewayRouteInput, opts ...request.Option) (*ReplaceTransitGatewayRouteOutput, error) { + req, out := c.ReplaceTransitGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReportInstanceStatus = "ReportInstanceStatus" + +// ReportInstanceStatusRequest generates a "aws/request.Request" representing the +// client's request for the ReportInstanceStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReportInstanceStatus for more information on using the ReportInstanceStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReportInstanceStatusRequest method. +// req, resp := client.ReportInstanceStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReportInstanceStatus +func (c *EC2) ReportInstanceStatusRequest(input *ReportInstanceStatusInput) (req *request.Request, output *ReportInstanceStatusOutput) { + op := &request.Operation{ + Name: opReportInstanceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReportInstanceStatusInput{} + } + + output = &ReportInstanceStatusOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ReportInstanceStatus API operation for Amazon Elastic Compute Cloud. +// +// Submits feedback about the status of an instance. The instance must be in +// the running state. If your experience with the instance differs from the +// instance status returned by DescribeInstanceStatus, use ReportInstanceStatus +// to report your experience with the instance. Amazon EC2 collects this information +// to improve the accuracy of status checks. +// +// Use of this action does not change the value returned by DescribeInstanceStatus. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReportInstanceStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReportInstanceStatus +func (c *EC2) ReportInstanceStatus(input *ReportInstanceStatusInput) (*ReportInstanceStatusOutput, error) { + req, out := c.ReportInstanceStatusRequest(input) + return out, req.Send() +} + +// ReportInstanceStatusWithContext is the same as ReportInstanceStatus with the addition of +// the ability to pass a context and additional request options. +// +// See ReportInstanceStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReportInstanceStatusWithContext(ctx aws.Context, input *ReportInstanceStatusInput, opts ...request.Option) (*ReportInstanceStatusOutput, error) { + req, out := c.ReportInstanceStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRequestSpotFleet = "RequestSpotFleet" + +// RequestSpotFleetRequest generates a "aws/request.Request" representing the +// client's request for the RequestSpotFleet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RequestSpotFleet for more information on using the RequestSpotFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RequestSpotFleetRequest method. +// req, resp := client.RequestSpotFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotFleet +func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *request.Request, output *RequestSpotFleetOutput) { + op := &request.Operation{ + Name: opRequestSpotFleet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotFleetInput{} + } + + output = &RequestSpotFleetOutput{} + req = c.newRequest(op, input, output) + return +} + +// RequestSpotFleet API operation for Amazon Elastic Compute Cloud. +// +// Creates a Spot Fleet request. +// +// The Spot Fleet request specifies the total target capacity and the On-Demand +// target capacity. Amazon EC2 calculates the difference between the total capacity +// and On-Demand capacity, and launches the difference as Spot capacity. +// +// You can submit a single request that includes multiple launch specifications +// that vary by instance type, AMI, Availability Zone, or subnet. +// +// By default, the Spot Fleet requests Spot Instances in the Spot Instance pool +// where the price per unit is the lowest. Each launch specification can include +// its own instance weighting that reflects the value of the instance type to +// your application workload. +// +// Alternatively, you can specify that the Spot Fleet distribute the target +// capacity across the Spot pools included in its launch specifications. By +// ensuring that the Spot Instances in your Spot Fleet are in different Spot +// pools, you can improve the availability of your fleet. +// +// You can specify tags for the Spot Fleet request and instances launched by +// the fleet. You cannot tag other resource types in a Spot Fleet request because +// only the spot-fleet-request and instance resource types are supported. +// +// For more information, see Spot Fleet requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) +// in the Amazon EC2 User Guide for Linux Instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RequestSpotFleet for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotFleet +func (c *EC2) RequestSpotFleet(input *RequestSpotFleetInput) (*RequestSpotFleetOutput, error) { + req, out := c.RequestSpotFleetRequest(input) + return out, req.Send() +} + +// RequestSpotFleetWithContext is the same as RequestSpotFleet with the addition of +// the ability to pass a context and additional request options. +// +// See RequestSpotFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RequestSpotFleetWithContext(ctx aws.Context, input *RequestSpotFleetInput, opts ...request.Option) (*RequestSpotFleetOutput, error) { + req, out := c.RequestSpotFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRequestSpotInstances = "RequestSpotInstances" + +// RequestSpotInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RequestSpotInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RequestSpotInstances for more information on using the RequestSpotInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RequestSpotInstancesRequest method. +// req, resp := client.RequestSpotInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotInstances +func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req *request.Request, output *RequestSpotInstancesOutput) { + op := &request.Operation{ + Name: opRequestSpotInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RequestSpotInstancesInput{} + } + + output = &RequestSpotInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// RequestSpotInstances API operation for Amazon Elastic Compute Cloud. +// +// Creates a Spot Instance request. +// +// For more information, see Spot Instance requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon EC2 User Guide for Linux Instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RequestSpotInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotInstances +func (c *EC2) RequestSpotInstances(input *RequestSpotInstancesInput) (*RequestSpotInstancesOutput, error) { + req, out := c.RequestSpotInstancesRequest(input) + return out, req.Send() +} + +// RequestSpotInstancesWithContext is the same as RequestSpotInstances with the addition of +// the ability to pass a context and additional request options. +// +// See RequestSpotInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RequestSpotInstancesWithContext(ctx aws.Context, input *RequestSpotInstancesInput, opts ...request.Option) (*RequestSpotInstancesOutput, error) { + req, out := c.RequestSpotInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetEbsDefaultKmsKeyId = "ResetEbsDefaultKmsKeyId" + +// ResetEbsDefaultKmsKeyIdRequest generates a "aws/request.Request" representing the +// client's request for the ResetEbsDefaultKmsKeyId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetEbsDefaultKmsKeyId for more information on using the ResetEbsDefaultKmsKeyId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetEbsDefaultKmsKeyIdRequest method. +// req, resp := client.ResetEbsDefaultKmsKeyIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetEbsDefaultKmsKeyId +func (c *EC2) ResetEbsDefaultKmsKeyIdRequest(input *ResetEbsDefaultKmsKeyIdInput) (req *request.Request, output *ResetEbsDefaultKmsKeyIdOutput) { + op := &request.Operation{ + Name: opResetEbsDefaultKmsKeyId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetEbsDefaultKmsKeyIdInput{} + } + + output = &ResetEbsDefaultKmsKeyIdOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud. +// +// Resets the default customer master key (CMK) for EBS encryption for your +// account in this Region to the AWS managed CMK for EBS. +// +// After resetting the default CMK to the AWS managed CMK, you can continue +// to encrypt by a customer managed CMK by specifying it when you create the +// volume. For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetEbsDefaultKmsKeyId for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetEbsDefaultKmsKeyId +func (c *EC2) ResetEbsDefaultKmsKeyId(input *ResetEbsDefaultKmsKeyIdInput) (*ResetEbsDefaultKmsKeyIdOutput, error) { + req, out := c.ResetEbsDefaultKmsKeyIdRequest(input) + return out, req.Send() +} + +// ResetEbsDefaultKmsKeyIdWithContext is the same as ResetEbsDefaultKmsKeyId with the addition of +// the ability to pass a context and additional request options. +// +// See ResetEbsDefaultKmsKeyId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetEbsDefaultKmsKeyIdWithContext(ctx aws.Context, input *ResetEbsDefaultKmsKeyIdInput, opts ...request.Option) (*ResetEbsDefaultKmsKeyIdOutput, error) { + req, out := c.ResetEbsDefaultKmsKeyIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetFpgaImageAttribute = "ResetFpgaImageAttribute" + +// ResetFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetFpgaImageAttribute for more information on using the ResetFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetFpgaImageAttributeRequest method. +// req, resp := client.ResetFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttribute +func (c *EC2) ResetFpgaImageAttributeRequest(input *ResetFpgaImageAttributeInput) (req *request.Request, output *ResetFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opResetFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetFpgaImageAttributeInput{} + } + + output = &ResetFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Resets the specified attribute of the specified Amazon FPGA Image (AFI) to +// its default value. You can only reset the load permission attribute. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetFpgaImageAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttribute +func (c *EC2) ResetFpgaImageAttribute(input *ResetFpgaImageAttributeInput) (*ResetFpgaImageAttributeOutput, error) { + req, out := c.ResetFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// ResetFpgaImageAttributeWithContext is the same as ResetFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetFpgaImageAttributeWithContext(ctx aws.Context, input *ResetFpgaImageAttributeInput, opts ...request.Option) (*ResetFpgaImageAttributeOutput, error) { + req, out := c.ResetFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetImageAttribute = "ResetImageAttribute" + +// ResetImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetImageAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetImageAttribute for more information on using the ResetImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetImageAttributeRequest method. +// req, resp := client.ResetImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetImageAttribute +func (c *EC2) ResetImageAttributeRequest(input *ResetImageAttributeInput) (req *request.Request, output *ResetImageAttributeOutput) { + op := &request.Operation{ + Name: opResetImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetImageAttributeInput{} + } + + output = &ResetImageAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ResetImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Resets an attribute of an AMI to its default value. +// +// The productCodes attribute can't be reset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetImageAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetImageAttribute +func (c *EC2) ResetImageAttribute(input *ResetImageAttributeInput) (*ResetImageAttributeOutput, error) { + req, out := c.ResetImageAttributeRequest(input) + return out, req.Send() +} + +// ResetImageAttributeWithContext is the same as ResetImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetImageAttributeWithContext(ctx aws.Context, input *ResetImageAttributeInput, opts ...request.Option) (*ResetImageAttributeOutput, error) { + req, out := c.ResetImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetInstanceAttribute = "ResetInstanceAttribute" + +// ResetInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetInstanceAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetInstanceAttribute for more information on using the ResetInstanceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetInstanceAttributeRequest method. +// req, resp := client.ResetInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetInstanceAttribute +func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) (req *request.Request, output *ResetInstanceAttributeOutput) { + op := &request.Operation{ + Name: opResetInstanceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetInstanceAttributeInput{} + } + + output = &ResetInstanceAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ResetInstanceAttribute API operation for Amazon Elastic Compute Cloud. +// +// Resets an attribute of an instance to its default value. To reset the kernel +// or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, +// the instance can be either running or stopped. +// +// The sourceDestCheck attribute controls whether source/destination checking +// is enabled. The default value is true, which means checking is enabled. This +// value must be false for a NAT instance to perform NAT. For more information, +// see NAT Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) +// in the Amazon Virtual Private Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetInstanceAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetInstanceAttribute +func (c *EC2) ResetInstanceAttribute(input *ResetInstanceAttributeInput) (*ResetInstanceAttributeOutput, error) { + req, out := c.ResetInstanceAttributeRequest(input) + return out, req.Send() +} + +// ResetInstanceAttributeWithContext is the same as ResetInstanceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetInstanceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetInstanceAttributeWithContext(ctx aws.Context, input *ResetInstanceAttributeInput, opts ...request.Option) (*ResetInstanceAttributeOutput, error) { + req, out := c.ResetInstanceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" + +// ResetNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetNetworkInterfaceAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetNetworkInterfaceAttribute for more information on using the ResetNetworkInterfaceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetNetworkInterfaceAttributeRequest method. +// req, resp := client.ResetNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetNetworkInterfaceAttribute +func (c *EC2) ResetNetworkInterfaceAttributeRequest(input *ResetNetworkInterfaceAttributeInput) (req *request.Request, output *ResetNetworkInterfaceAttributeOutput) { + op := &request.Operation{ + Name: opResetNetworkInterfaceAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetNetworkInterfaceAttributeInput{} + } + + output = &ResetNetworkInterfaceAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ResetNetworkInterfaceAttribute API operation for Amazon Elastic Compute Cloud. +// +// Resets a network interface attribute. You can specify only one attribute +// at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetNetworkInterfaceAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetNetworkInterfaceAttribute +func (c *EC2) ResetNetworkInterfaceAttribute(input *ResetNetworkInterfaceAttributeInput) (*ResetNetworkInterfaceAttributeOutput, error) { + req, out := c.ResetNetworkInterfaceAttributeRequest(input) + return out, req.Send() +} + +// ResetNetworkInterfaceAttributeWithContext is the same as ResetNetworkInterfaceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetNetworkInterfaceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetNetworkInterfaceAttributeWithContext(ctx aws.Context, input *ResetNetworkInterfaceAttributeInput, opts ...request.Option) (*ResetNetworkInterfaceAttributeOutput, error) { + req, out := c.ResetNetworkInterfaceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetSnapshotAttribute = "ResetSnapshotAttribute" + +// ResetSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetSnapshotAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetSnapshotAttribute for more information on using the ResetSnapshotAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetSnapshotAttributeRequest method. +// req, resp := client.ResetSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetSnapshotAttribute +func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) (req *request.Request, output *ResetSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opResetSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetSnapshotAttributeInput{} + } + + output = &ResetSnapshotAttributeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ResetSnapshotAttribute API operation for Amazon Elastic Compute Cloud. +// +// Resets permission settings for the specified snapshot. +// +// For more information about modifying snapshot permissions, see Sharing snapshots +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetSnapshotAttribute for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetSnapshotAttribute +func (c *EC2) ResetSnapshotAttribute(input *ResetSnapshotAttributeInput) (*ResetSnapshotAttributeOutput, error) { + req, out := c.ResetSnapshotAttributeRequest(input) + return out, req.Send() +} + +// ResetSnapshotAttributeWithContext is the same as ResetSnapshotAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetSnapshotAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetSnapshotAttributeWithContext(ctx aws.Context, input *ResetSnapshotAttributeInput, opts ...request.Option) (*ResetSnapshotAttributeOutput, error) { + req, out := c.ResetSnapshotAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreAddressToClassic = "RestoreAddressToClassic" + +// RestoreAddressToClassicRequest generates a "aws/request.Request" representing the +// client's request for the RestoreAddressToClassic operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreAddressToClassic for more information on using the RestoreAddressToClassic +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreAddressToClassicRequest method. +// req, resp := client.RestoreAddressToClassicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreAddressToClassic +func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput) (req *request.Request, output *RestoreAddressToClassicOutput) { + op := &request.Operation{ + Name: opRestoreAddressToClassic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreAddressToClassicInput{} + } + + output = &RestoreAddressToClassicOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreAddressToClassic API operation for Amazon Elastic Compute Cloud. +// +// Restores an Elastic IP address that was previously moved to the EC2-VPC platform +// back to the EC2-Classic platform. You cannot move an Elastic IP address that +// was originally allocated for use in EC2-VPC. The Elastic IP address must +// not be associated with an instance or network interface. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RestoreAddressToClassic for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreAddressToClassic +func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) { + req, out := c.RestoreAddressToClassicRequest(input) + return out, req.Send() +} + +// RestoreAddressToClassicWithContext is the same as RestoreAddressToClassic with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreAddressToClassic for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RestoreAddressToClassicWithContext(ctx aws.Context, input *RestoreAddressToClassicInput, opts ...request.Option) (*RestoreAddressToClassicOutput, error) { + req, out := c.RestoreAddressToClassicRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreManagedPrefixListVersion = "RestoreManagedPrefixListVersion" + +// RestoreManagedPrefixListVersionRequest generates a "aws/request.Request" representing the +// client's request for the RestoreManagedPrefixListVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreManagedPrefixListVersion for more information on using the RestoreManagedPrefixListVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreManagedPrefixListVersionRequest method. +// req, resp := client.RestoreManagedPrefixListVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreManagedPrefixListVersion +func (c *EC2) RestoreManagedPrefixListVersionRequest(input *RestoreManagedPrefixListVersionInput) (req *request.Request, output *RestoreManagedPrefixListVersionOutput) { + op := &request.Operation{ + Name: opRestoreManagedPrefixListVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreManagedPrefixListVersionInput{} + } + + output = &RestoreManagedPrefixListVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreManagedPrefixListVersion API operation for Amazon Elastic Compute Cloud. +// +// Restores the entries from a previous version of a managed prefix list to +// a new version of the prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RestoreManagedPrefixListVersion for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreManagedPrefixListVersion +func (c *EC2) RestoreManagedPrefixListVersion(input *RestoreManagedPrefixListVersionInput) (*RestoreManagedPrefixListVersionOutput, error) { + req, out := c.RestoreManagedPrefixListVersionRequest(input) + return out, req.Send() +} + +// RestoreManagedPrefixListVersionWithContext is the same as RestoreManagedPrefixListVersion with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreManagedPrefixListVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RestoreManagedPrefixListVersionWithContext(ctx aws.Context, input *RestoreManagedPrefixListVersionInput, opts ...request.Option) (*RestoreManagedPrefixListVersionOutput, error) { + req, out := c.RestoreManagedPrefixListVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRevokeClientVpnIngress = "RevokeClientVpnIngress" + +// RevokeClientVpnIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeClientVpnIngress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RevokeClientVpnIngress for more information on using the RevokeClientVpnIngress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RevokeClientVpnIngressRequest method. +// req, resp := client.RevokeClientVpnIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeClientVpnIngress +func (c *EC2) RevokeClientVpnIngressRequest(input *RevokeClientVpnIngressInput) (req *request.Request, output *RevokeClientVpnIngressOutput) { + op := &request.Operation{ + Name: opRevokeClientVpnIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeClientVpnIngressInput{} + } + + output = &RevokeClientVpnIngressOutput{} + req = c.newRequest(op, input, output) + return +} + +// RevokeClientVpnIngress API operation for Amazon Elastic Compute Cloud. +// +// Removes an ingress authorization rule from a Client VPN endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RevokeClientVpnIngress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeClientVpnIngress +func (c *EC2) RevokeClientVpnIngress(input *RevokeClientVpnIngressInput) (*RevokeClientVpnIngressOutput, error) { + req, out := c.RevokeClientVpnIngressRequest(input) + return out, req.Send() +} + +// RevokeClientVpnIngressWithContext is the same as RevokeClientVpnIngress with the addition of +// the ability to pass a context and additional request options. +// +// See RevokeClientVpnIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RevokeClientVpnIngressWithContext(ctx aws.Context, input *RevokeClientVpnIngressInput, opts ...request.Option) (*RevokeClientVpnIngressOutput, error) { + req, out := c.RevokeClientVpnIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" + +// RevokeSecurityGroupEgressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeSecurityGroupEgress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RevokeSecurityGroupEgress for more information on using the RevokeSecurityGroupEgress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RevokeSecurityGroupEgressRequest method. +// req, resp := client.RevokeSecurityGroupEgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupEgress +func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressInput) (req *request.Request, output *RevokeSecurityGroupEgressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupEgressInput{} + } + + output = &RevokeSecurityGroupEgressOutput{} + req = c.newRequest(op, input, output) + return +} + +// RevokeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud. +// +// [VPC only] Removes the specified egress rules from a security group for EC2-VPC. +// This action does not apply to security groups for use in EC2-Classic. To +// remove a rule, the values that you specify (for example, ports) must match +// the existing rule's values exactly. +// +// [Default VPC] If the values you specify do not match the existing rule's +// values, no error is returned, and the output describes the security group +// rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. +// +// Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source +// security group. For the TCP and UDP protocols, you must also specify the +// destination port or range of ports. For the ICMP protocol, you must also +// specify the ICMP type and code. If the security group rule has a description, +// you do not have to specify the description to revoke the rule. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RevokeSecurityGroupEgress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupEgress +func (c *EC2) RevokeSecurityGroupEgress(input *RevokeSecurityGroupEgressInput) (*RevokeSecurityGroupEgressOutput, error) { + req, out := c.RevokeSecurityGroupEgressRequest(input) + return out, req.Send() +} + +// RevokeSecurityGroupEgressWithContext is the same as RevokeSecurityGroupEgress with the addition of +// the ability to pass a context and additional request options. +// +// See RevokeSecurityGroupEgress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RevokeSecurityGroupEgressWithContext(ctx aws.Context, input *RevokeSecurityGroupEgressInput, opts ...request.Option) (*RevokeSecurityGroupEgressOutput, error) { + req, out := c.RevokeSecurityGroupEgressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" + +// RevokeSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeSecurityGroupIngress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RevokeSecurityGroupIngress for more information on using the RevokeSecurityGroupIngress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RevokeSecurityGroupIngressRequest method. +// req, resp := client.RevokeSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupIngress +func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngressInput) (req *request.Request, output *RevokeSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeSecurityGroupIngressInput{} + } + + output = &RevokeSecurityGroupIngressOutput{} + req = c.newRequest(op, input, output) + return +} + +// RevokeSecurityGroupIngress API operation for Amazon Elastic Compute Cloud. +// +// Removes the specified ingress rules from a security group. To remove a rule, +// the values that you specify (for example, ports) must match the existing +// rule's values exactly. +// +// [EC2-Classic , default VPC] If the values you specify do not match the existing +// rule's values, no error is returned, and the output describes the security +// group rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. +// +// Each rule consists of the protocol and the CIDR range or source security +// group. For the TCP and UDP protocols, you must also specify the destination +// port or range of ports. For the ICMP protocol, you must also specify the +// ICMP type and code. If the security group rule has a description, you do +// not have to specify the description to revoke the rule. +// +// Rule changes are propagated to instances within the security group as quickly +// as possible. However, a small delay might occur. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RevokeSecurityGroupIngress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupIngress +func (c *EC2) RevokeSecurityGroupIngress(input *RevokeSecurityGroupIngressInput) (*RevokeSecurityGroupIngressOutput, error) { + req, out := c.RevokeSecurityGroupIngressRequest(input) + return out, req.Send() +} + +// RevokeSecurityGroupIngressWithContext is the same as RevokeSecurityGroupIngress with the addition of +// the ability to pass a context and additional request options. +// +// See RevokeSecurityGroupIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RevokeSecurityGroupIngressWithContext(ctx aws.Context, input *RevokeSecurityGroupIngressInput, opts ...request.Option) (*RevokeSecurityGroupIngressOutput, error) { + req, out := c.RevokeSecurityGroupIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRunInstances = "RunInstances" + +// RunInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RunInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RunInstances for more information on using the RunInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RunInstancesRequest method. +// req, resp := client.RunInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunInstances +func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Request, output *Reservation) { + op := &request.Operation{ + Name: opRunInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunInstancesInput{} + } + + output = &Reservation{} + req = c.newRequest(op, input, output) + return +} + +// RunInstances API operation for Amazon Elastic Compute Cloud. +// +// Launches the specified number of instances using an AMI for which you have +// permissions. +// +// You can specify a number of options, or leave the default options. The following +// rules apply: +// +// * [EC2-VPC] If you don't specify a subnet ID, we choose a default subnet +// from your default VPC for you. If you don't have a default VPC, you must +// specify a subnet ID in the request. +// +// * [EC2-Classic] If don't specify an Availability Zone, we choose one for +// you. +// +// * Some instance types must be launched into a VPC. If you do not have +// a default VPC, or if you do not specify a subnet ID, the request fails. +// For more information, see Instance types available only in a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). +// +// * [EC2-VPC] All instances have a network interface with a primary private +// IPv4 address. If you don't specify this address, we choose one from the +// IPv4 range of your subnet. +// +// * Not all instance types support IPv6 addresses. For more information, +// see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). +// +// * If you don't specify a security group ID, we use the default security +// group. For more information, see Security groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). +// +// * If any of the AMIs have a product code attached for which the user has +// not subscribed, the request fails. +// +// You can create a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html), +// which is a resource that contains the parameters to launch an instance. When +// you launch an instance using RunInstances, you can specify the launch template +// instead of specifying the launch parameters. +// +// To ensure faster instance launches, break up large requests into smaller +// batches. For example, create five separate launch requests for 100 instances +// each instead of one launch request for 500 instances. +// +// An instance is ready for you to use when it's in the running state. You can +// check the state of your instance using DescribeInstances. You can tag instances +// and EBS volumes during launch, after launch, or both. For more information, +// see CreateTags and Tagging your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// +// Linux instances have access to the public key of the key pair at boot. You +// can use this key to provide secure access to the instance. Amazon EC2 public +// images use this feature to provide secure access without passwords. For more +// information, see Key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For troubleshooting, see What to do if an instance immediately terminates +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), +// and Troubleshooting connecting to your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RunInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunInstances +func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) { + req, out := c.RunInstancesRequest(input) + return out, req.Send() +} + +// RunInstancesWithContext is the same as RunInstances with the addition of +// the ability to pass a context and additional request options. +// +// See RunInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RunInstancesWithContext(ctx aws.Context, input *RunInstancesInput, opts ...request.Option) (*Reservation, error) { + req, out := c.RunInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRunScheduledInstances = "RunScheduledInstances" + +// RunScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RunScheduledInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RunScheduledInstances for more information on using the RunScheduledInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RunScheduledInstancesRequest method. +// req, resp := client.RunScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunScheduledInstances +func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (req *request.Request, output *RunScheduledInstancesOutput) { + op := &request.Operation{ + Name: opRunScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunScheduledInstancesInput{} + } + + output = &RunScheduledInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// RunScheduledInstances API operation for Amazon Elastic Compute Cloud. +// +// Launches the specified Scheduled Instances. +// +// Before you can launch a Scheduled Instance, you must purchase it and obtain +// an identifier using PurchaseScheduledInstances. +// +// You must launch a Scheduled Instance during its scheduled time period. You +// can't stop or reboot a Scheduled Instance, but you can terminate it as needed. +// If you terminate a Scheduled Instance before the current scheduled time period +// ends, you can launch it again after a few minutes. For more information, +// see Scheduled Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-scheduled-instances.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RunScheduledInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunScheduledInstances +func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunScheduledInstancesOutput, error) { + req, out := c.RunScheduledInstancesRequest(input) + return out, req.Send() +} + +// RunScheduledInstancesWithContext is the same as RunScheduledInstances with the addition of +// the ability to pass a context and additional request options. +// +// See RunScheduledInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RunScheduledInstancesWithContext(ctx aws.Context, input *RunScheduledInstancesInput, opts ...request.Option) (*RunScheduledInstancesOutput, error) { + req, out := c.RunScheduledInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSearchLocalGatewayRoutes = "SearchLocalGatewayRoutes" + +// SearchLocalGatewayRoutesRequest generates a "aws/request.Request" representing the +// client's request for the SearchLocalGatewayRoutes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SearchLocalGatewayRoutes for more information on using the SearchLocalGatewayRoutes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SearchLocalGatewayRoutesRequest method. +// req, resp := client.SearchLocalGatewayRoutesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SearchLocalGatewayRoutes +func (c *EC2) SearchLocalGatewayRoutesRequest(input *SearchLocalGatewayRoutesInput) (req *request.Request, output *SearchLocalGatewayRoutesOutput) { + op := &request.Operation{ + Name: opSearchLocalGatewayRoutes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &SearchLocalGatewayRoutesInput{} + } + + output = &SearchLocalGatewayRoutesOutput{} + req = c.newRequest(op, input, output) + return +} + +// SearchLocalGatewayRoutes API operation for Amazon Elastic Compute Cloud. +// +// Searches for routes in the specified local gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation SearchLocalGatewayRoutes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SearchLocalGatewayRoutes +func (c *EC2) SearchLocalGatewayRoutes(input *SearchLocalGatewayRoutesInput) (*SearchLocalGatewayRoutesOutput, error) { + req, out := c.SearchLocalGatewayRoutesRequest(input) + return out, req.Send() +} + +// SearchLocalGatewayRoutesWithContext is the same as SearchLocalGatewayRoutes with the addition of +// the ability to pass a context and additional request options. +// +// See SearchLocalGatewayRoutes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SearchLocalGatewayRoutesWithContext(ctx aws.Context, input *SearchLocalGatewayRoutesInput, opts ...request.Option) (*SearchLocalGatewayRoutesOutput, error) { + req, out := c.SearchLocalGatewayRoutesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// SearchLocalGatewayRoutesPages iterates over the pages of a SearchLocalGatewayRoutes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchLocalGatewayRoutes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchLocalGatewayRoutes operation. +// pageNum := 0 +// err := client.SearchLocalGatewayRoutesPages(params, +// func(page *ec2.SearchLocalGatewayRoutesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) SearchLocalGatewayRoutesPages(input *SearchLocalGatewayRoutesInput, fn func(*SearchLocalGatewayRoutesOutput, bool) bool) error { + return c.SearchLocalGatewayRoutesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SearchLocalGatewayRoutesPagesWithContext same as SearchLocalGatewayRoutesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SearchLocalGatewayRoutesPagesWithContext(ctx aws.Context, input *SearchLocalGatewayRoutesInput, fn func(*SearchLocalGatewayRoutesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchLocalGatewayRoutesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchLocalGatewayRoutesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchLocalGatewayRoutesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opSearchTransitGatewayMulticastGroups = "SearchTransitGatewayMulticastGroups" + +// SearchTransitGatewayMulticastGroupsRequest generates a "aws/request.Request" representing the +// client's request for the SearchTransitGatewayMulticastGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SearchTransitGatewayMulticastGroups for more information on using the SearchTransitGatewayMulticastGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SearchTransitGatewayMulticastGroupsRequest method. +// req, resp := client.SearchTransitGatewayMulticastGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SearchTransitGatewayMulticastGroups +func (c *EC2) SearchTransitGatewayMulticastGroupsRequest(input *SearchTransitGatewayMulticastGroupsInput) (req *request.Request, output *SearchTransitGatewayMulticastGroupsOutput) { + op := &request.Operation{ + Name: opSearchTransitGatewayMulticastGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &SearchTransitGatewayMulticastGroupsInput{} + } + + output = &SearchTransitGatewayMulticastGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// SearchTransitGatewayMulticastGroups API operation for Amazon Elastic Compute Cloud. +// +// Searches one or more transit gateway multicast groups and returns the group +// membership information. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation SearchTransitGatewayMulticastGroups for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SearchTransitGatewayMulticastGroups +func (c *EC2) SearchTransitGatewayMulticastGroups(input *SearchTransitGatewayMulticastGroupsInput) (*SearchTransitGatewayMulticastGroupsOutput, error) { + req, out := c.SearchTransitGatewayMulticastGroupsRequest(input) + return out, req.Send() +} + +// SearchTransitGatewayMulticastGroupsWithContext is the same as SearchTransitGatewayMulticastGroups with the addition of +// the ability to pass a context and additional request options. +// +// See SearchTransitGatewayMulticastGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SearchTransitGatewayMulticastGroupsWithContext(ctx aws.Context, input *SearchTransitGatewayMulticastGroupsInput, opts ...request.Option) (*SearchTransitGatewayMulticastGroupsOutput, error) { + req, out := c.SearchTransitGatewayMulticastGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// SearchTransitGatewayMulticastGroupsPages iterates over the pages of a SearchTransitGatewayMulticastGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchTransitGatewayMulticastGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchTransitGatewayMulticastGroups operation. +// pageNum := 0 +// err := client.SearchTransitGatewayMulticastGroupsPages(params, +// func(page *ec2.SearchTransitGatewayMulticastGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) SearchTransitGatewayMulticastGroupsPages(input *SearchTransitGatewayMulticastGroupsInput, fn func(*SearchTransitGatewayMulticastGroupsOutput, bool) bool) error { + return c.SearchTransitGatewayMulticastGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SearchTransitGatewayMulticastGroupsPagesWithContext same as SearchTransitGatewayMulticastGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SearchTransitGatewayMulticastGroupsPagesWithContext(ctx aws.Context, input *SearchTransitGatewayMulticastGroupsInput, fn func(*SearchTransitGatewayMulticastGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchTransitGatewayMulticastGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchTransitGatewayMulticastGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchTransitGatewayMulticastGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opSearchTransitGatewayRoutes = "SearchTransitGatewayRoutes" + +// SearchTransitGatewayRoutesRequest generates a "aws/request.Request" representing the +// client's request for the SearchTransitGatewayRoutes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SearchTransitGatewayRoutes for more information on using the SearchTransitGatewayRoutes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SearchTransitGatewayRoutesRequest method. +// req, resp := client.SearchTransitGatewayRoutesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SearchTransitGatewayRoutes +func (c *EC2) SearchTransitGatewayRoutesRequest(input *SearchTransitGatewayRoutesInput) (req *request.Request, output *SearchTransitGatewayRoutesOutput) { + op := &request.Operation{ + Name: opSearchTransitGatewayRoutes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SearchTransitGatewayRoutesInput{} + } + + output = &SearchTransitGatewayRoutesOutput{} + req = c.newRequest(op, input, output) + return +} + +// SearchTransitGatewayRoutes API operation for Amazon Elastic Compute Cloud. +// +// Searches for routes in the specified transit gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation SearchTransitGatewayRoutes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SearchTransitGatewayRoutes +func (c *EC2) SearchTransitGatewayRoutes(input *SearchTransitGatewayRoutesInput) (*SearchTransitGatewayRoutesOutput, error) { + req, out := c.SearchTransitGatewayRoutesRequest(input) + return out, req.Send() +} + +// SearchTransitGatewayRoutesWithContext is the same as SearchTransitGatewayRoutes with the addition of +// the ability to pass a context and additional request options. +// +// See SearchTransitGatewayRoutes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SearchTransitGatewayRoutesWithContext(ctx aws.Context, input *SearchTransitGatewayRoutesInput, opts ...request.Option) (*SearchTransitGatewayRoutesOutput, error) { + req, out := c.SearchTransitGatewayRoutesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendDiagnosticInterrupt = "SendDiagnosticInterrupt" + +// SendDiagnosticInterruptRequest generates a "aws/request.Request" representing the +// client's request for the SendDiagnosticInterrupt operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendDiagnosticInterrupt for more information on using the SendDiagnosticInterrupt +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendDiagnosticInterruptRequest method. +// req, resp := client.SendDiagnosticInterruptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput) (req *request.Request, output *SendDiagnosticInterruptOutput) { + op := &request.Operation{ + Name: opSendDiagnosticInterrupt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendDiagnosticInterruptInput{} + } + + output = &SendDiagnosticInterruptOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SendDiagnosticInterrupt API operation for Amazon Elastic Compute Cloud. +// +// Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger +// a kernel panic (on Linux instances), or a blue screen/stop error (on Windows +// instances). For instances based on Intel and AMD processors, the interrupt +// is received as a non-maskable interrupt (NMI). +// +// In general, the operating system crashes and reboots when a kernel panic +// or stop error is triggered. The operating system can also be configured to +// perform diagnostic tasks, such as generating a memory dump file, loading +// a secondary kernel, or obtaining a call trace. +// +// Before sending a diagnostic interrupt to your instance, ensure that its operating +// system is configured to perform the required diagnostic tasks. +// +// For more information about configuring your operating system to generate +// a crash dump when a kernel panic or stop error occurs, see Send a diagnostic +// interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) +// (Linux instances) or Send a Diagnostic Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) +// (Windows instances). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation SendDiagnosticInterrupt for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterrupt(input *SendDiagnosticInterruptInput) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + return out, req.Send() +} + +// SendDiagnosticInterruptWithContext is the same as SendDiagnosticInterrupt with the addition of +// the ability to pass a context and additional request options. +// +// See SendDiagnosticInterrupt for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SendDiagnosticInterruptWithContext(ctx aws.Context, input *SendDiagnosticInterruptInput, opts ...request.Option) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartInstances = "StartInstances" + +// StartInstancesRequest generates a "aws/request.Request" representing the +// client's request for the StartInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartInstances for more information on using the StartInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartInstancesRequest method. +// req, resp := client.StartInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StartInstances +func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Request, output *StartInstancesOutput) { + op := &request.Operation{ + Name: opStartInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstancesInput{} + } + + output = &StartInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartInstances API operation for Amazon Elastic Compute Cloud. +// +// Starts an Amazon EBS-backed instance that you've previously stopped. +// +// Instances that use Amazon EBS volumes as their root devices can be quickly +// stopped and started. When an instance is stopped, the compute resources are +// released and you are not billed for instance usage. However, your root partition +// Amazon EBS volume remains and continues to persist your data, and you are +// charged for Amazon EBS volume usage. You can restart your instance at any +// time. Every time you start your Windows instance, Amazon EC2 charges you +// for a full instance hour. If you stop and restart your Windows instance, +// a new instance hour begins and Amazon EC2 charges you for another full instance +// hour even if you are still within the same 60-minute period when it was stopped. +// Every time you start your Linux instance, Amazon EC2 charges a one-minute +// minimum for instance usage, and thereafter charges per second for instance +// usage. +// +// Before stopping an instance, make sure it is in a state from which it can +// be restarted. Stopping an instance does not preserve data stored in RAM. +// +// Performing this operation on an instance that uses an instance store as its +// root device returns an error. +// +// For more information, see Stopping instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation StartInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StartInstances +func (c *EC2) StartInstances(input *StartInstancesInput) (*StartInstancesOutput, error) { + req, out := c.StartInstancesRequest(input) + return out, req.Send() +} + +// StartInstancesWithContext is the same as StartInstances with the addition of +// the ability to pass a context and additional request options. +// +// See StartInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) StartInstancesWithContext(ctx aws.Context, input *StartInstancesInput, opts ...request.Option) (*StartInstancesOutput, error) { + req, out := c.StartInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartVpcEndpointServicePrivateDnsVerification = "StartVpcEndpointServicePrivateDnsVerification" + +// StartVpcEndpointServicePrivateDnsVerificationRequest generates a "aws/request.Request" representing the +// client's request for the StartVpcEndpointServicePrivateDnsVerification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartVpcEndpointServicePrivateDnsVerification for more information on using the StartVpcEndpointServicePrivateDnsVerification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartVpcEndpointServicePrivateDnsVerificationRequest method. +// req, resp := client.StartVpcEndpointServicePrivateDnsVerificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StartVpcEndpointServicePrivateDnsVerification +func (c *EC2) StartVpcEndpointServicePrivateDnsVerificationRequest(input *StartVpcEndpointServicePrivateDnsVerificationInput) (req *request.Request, output *StartVpcEndpointServicePrivateDnsVerificationOutput) { + op := &request.Operation{ + Name: opStartVpcEndpointServicePrivateDnsVerification, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartVpcEndpointServicePrivateDnsVerificationInput{} + } + + output = &StartVpcEndpointServicePrivateDnsVerificationOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartVpcEndpointServicePrivateDnsVerification API operation for Amazon Elastic Compute Cloud. +// +// Initiates the verification process to prove that the service provider owns +// the private DNS name domain for the endpoint service. +// +// The service provider must successfully perform the verification before the +// consumer can use the name to access the service. +// +// Before the service provider runs this command, they must add a record to +// the DNS server. For more information, see Adding a TXT Record to Your Domain's +// DNS Server (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-services-dns-validation.html#add-dns-txt-record) +// in the Amazon VPC User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation StartVpcEndpointServicePrivateDnsVerification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StartVpcEndpointServicePrivateDnsVerification +func (c *EC2) StartVpcEndpointServicePrivateDnsVerification(input *StartVpcEndpointServicePrivateDnsVerificationInput) (*StartVpcEndpointServicePrivateDnsVerificationOutput, error) { + req, out := c.StartVpcEndpointServicePrivateDnsVerificationRequest(input) + return out, req.Send() +} + +// StartVpcEndpointServicePrivateDnsVerificationWithContext is the same as StartVpcEndpointServicePrivateDnsVerification with the addition of +// the ability to pass a context and additional request options. +// +// See StartVpcEndpointServicePrivateDnsVerification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) StartVpcEndpointServicePrivateDnsVerificationWithContext(ctx aws.Context, input *StartVpcEndpointServicePrivateDnsVerificationInput, opts ...request.Option) (*StartVpcEndpointServicePrivateDnsVerificationOutput, error) { + req, out := c.StartVpcEndpointServicePrivateDnsVerificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopInstances = "StopInstances" + +// StopInstancesRequest generates a "aws/request.Request" representing the +// client's request for the StopInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopInstances for more information on using the StopInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopInstancesRequest method. +// req, resp := client.StopInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StopInstances +func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Request, output *StopInstancesOutput) { + op := &request.Operation{ + Name: opStopInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopInstancesInput{} + } + + output = &StopInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopInstances API operation for Amazon Elastic Compute Cloud. +// +// Stops an Amazon EBS-backed instance. +// +// You can use the Stop action to hibernate an instance if the instance is enabled +// for hibernation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#enabling-hibernation) +// and it meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// We don't charge usage for a stopped instance, or data transfer fees; however, +// your root partition Amazon EBS volume remains and continues to persist your +// data, and you are charged for Amazon EBS volume usage. Every time you start +// your Windows instance, Amazon EC2 charges you for a full instance hour. If +// you stop and restart your Windows instance, a new instance hour begins and +// Amazon EC2 charges you for another full instance hour even if you are still +// within the same 60-minute period when it was stopped. Every time you start +// your Linux instance, Amazon EC2 charges a one-minute minimum for instance +// usage, and thereafter charges per second for instance usage. +// +// You can't stop or hibernate instance store-backed instances. You can't use +// the Stop action to hibernate Spot Instances, but you can specify that Amazon +// EC2 should hibernate Spot Instances when they are interrupted. For more information, +// see Hibernating interrupted Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#hibernate-spot-instances) +// in the Amazon Elastic Compute Cloud User Guide. +// +// When you stop or hibernate an instance, we shut it down. You can restart +// your instance at any time. Before stopping or hibernating an instance, make +// sure it is in a state from which it can be restarted. Stopping an instance +// does not preserve data stored in RAM, but hibernating an instance does preserve +// data stored in RAM. If an instance cannot hibernate successfully, a normal +// shutdown occurs. +// +// Stopping and hibernating an instance is different to rebooting or terminating +// it. For example, when you stop or hibernate an instance, the root device +// and any other devices attached to the instance persist. When you terminate +// an instance, the root device and any other devices attached during the instance +// launch are automatically deleted. For more information about the differences +// between rebooting, stopping, hibernating, and terminating instances, see +// Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// When you stop an instance, we attempt to shut it down forcibly after a short +// while. If your instance appears stuck in the stopping state after a period +// of time, there may be an issue with the underlying host computer. For more +// information, see Troubleshooting stopping your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation StopInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StopInstances +func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, error) { + req, out := c.StopInstancesRequest(input) + return out, req.Send() +} + +// StopInstancesWithContext is the same as StopInstances with the addition of +// the ability to pass a context and additional request options. +// +// See StopInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) StopInstancesWithContext(ctx aws.Context, input *StopInstancesInput, opts ...request.Option) (*StopInstancesOutput, error) { + req, out := c.StopInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTerminateClientVpnConnections = "TerminateClientVpnConnections" + +// TerminateClientVpnConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the TerminateClientVpnConnections operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TerminateClientVpnConnections for more information on using the TerminateClientVpnConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TerminateClientVpnConnectionsRequest method. +// req, resp := client.TerminateClientVpnConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TerminateClientVpnConnections +func (c *EC2) TerminateClientVpnConnectionsRequest(input *TerminateClientVpnConnectionsInput) (req *request.Request, output *TerminateClientVpnConnectionsOutput) { + op := &request.Operation{ + Name: opTerminateClientVpnConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateClientVpnConnectionsInput{} + } + + output = &TerminateClientVpnConnectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// TerminateClientVpnConnections API operation for Amazon Elastic Compute Cloud. +// +// Terminates active Client VPN endpoint connections. This action can be used +// to terminate a specific client connection, or up to five connections established +// by a specific user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation TerminateClientVpnConnections for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TerminateClientVpnConnections +func (c *EC2) TerminateClientVpnConnections(input *TerminateClientVpnConnectionsInput) (*TerminateClientVpnConnectionsOutput, error) { + req, out := c.TerminateClientVpnConnectionsRequest(input) + return out, req.Send() +} + +// TerminateClientVpnConnectionsWithContext is the same as TerminateClientVpnConnections with the addition of +// the ability to pass a context and additional request options. +// +// See TerminateClientVpnConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) TerminateClientVpnConnectionsWithContext(ctx aws.Context, input *TerminateClientVpnConnectionsInput, opts ...request.Option) (*TerminateClientVpnConnectionsOutput, error) { + req, out := c.TerminateClientVpnConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTerminateInstances = "TerminateInstances" + +// TerminateInstancesRequest generates a "aws/request.Request" representing the +// client's request for the TerminateInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TerminateInstances for more information on using the TerminateInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TerminateInstancesRequest method. +// req, resp := client.TerminateInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TerminateInstances +func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *request.Request, output *TerminateInstancesOutput) { + op := &request.Operation{ + Name: opTerminateInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateInstancesInput{} + } + + output = &TerminateInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// TerminateInstances API operation for Amazon Elastic Compute Cloud. +// +// Shuts down the specified instances. This operation is idempotent; if you +// terminate an instance more than once, each call succeeds. +// +// If you specify multiple instances and the request fails (for example, because +// of a single incorrect instance ID), none of the instances are terminated. +// +// Terminated instances remain visible after termination (for approximately +// one hour). +// +// By default, Amazon EC2 deletes all EBS volumes that were attached when the +// instance launched. Volumes attached after instance launch continue running. +// +// You can stop, start, and terminate EBS-backed instances. You can only terminate +// instance store-backed instances. What happens to an instance differs if you +// stop it or terminate it. For example, when you stop an instance, the root +// device and any other devices attached to the instance persist. When you terminate +// an instance, any attached EBS volumes with the DeleteOnTermination block +// device mapping parameter set to true are automatically deleted. For more +// information about the differences between stopping and terminating instances, +// see Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For more information about troubleshooting, see Troubleshooting terminating +// your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation TerminateInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TerminateInstances +func (c *EC2) TerminateInstances(input *TerminateInstancesInput) (*TerminateInstancesOutput, error) { + req, out := c.TerminateInstancesRequest(input) + return out, req.Send() +} + +// TerminateInstancesWithContext is the same as TerminateInstances with the addition of +// the ability to pass a context and additional request options. +// +// See TerminateInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) TerminateInstancesWithContext(ctx aws.Context, input *TerminateInstancesInput, opts ...request.Option) (*TerminateInstancesOutput, error) { + req, out := c.TerminateInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUnassignIpv6Addresses = "UnassignIpv6Addresses" + +// UnassignIpv6AddressesRequest generates a "aws/request.Request" representing the +// client's request for the UnassignIpv6Addresses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UnassignIpv6Addresses for more information on using the UnassignIpv6Addresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UnassignIpv6AddressesRequest method. +// req, resp := client.UnassignIpv6AddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignIpv6Addresses +func (c *EC2) UnassignIpv6AddressesRequest(input *UnassignIpv6AddressesInput) (req *request.Request, output *UnassignIpv6AddressesOutput) { + op := &request.Operation{ + Name: opUnassignIpv6Addresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignIpv6AddressesInput{} + } + + output = &UnassignIpv6AddressesOutput{} + req = c.newRequest(op, input, output) + return +} + +// UnassignIpv6Addresses API operation for Amazon Elastic Compute Cloud. +// +// Unassigns one or more IPv6 addresses from a network interface. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation UnassignIpv6Addresses for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignIpv6Addresses +func (c *EC2) UnassignIpv6Addresses(input *UnassignIpv6AddressesInput) (*UnassignIpv6AddressesOutput, error) { + req, out := c.UnassignIpv6AddressesRequest(input) + return out, req.Send() +} + +// UnassignIpv6AddressesWithContext is the same as UnassignIpv6Addresses with the addition of +// the ability to pass a context and additional request options. +// +// See UnassignIpv6Addresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UnassignIpv6AddressesWithContext(ctx aws.Context, input *UnassignIpv6AddressesInput, opts ...request.Option) (*UnassignIpv6AddressesOutput, error) { + req, out := c.UnassignIpv6AddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" + +// UnassignPrivateIpAddressesRequest generates a "aws/request.Request" representing the +// client's request for the UnassignPrivateIpAddresses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UnassignPrivateIpAddresses for more information on using the UnassignPrivateIpAddresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UnassignPrivateIpAddressesRequest method. +// req, resp := client.UnassignPrivateIpAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignPrivateIpAddresses +func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddressesInput) (req *request.Request, output *UnassignPrivateIpAddressesOutput) { + op := &request.Operation{ + Name: opUnassignPrivateIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnassignPrivateIpAddressesInput{} + } + + output = &UnassignPrivateIpAddressesOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UnassignPrivateIpAddresses API operation for Amazon Elastic Compute Cloud. +// +// Unassigns one or more secondary private IP addresses from a network interface. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation UnassignPrivateIpAddresses for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignPrivateIpAddresses +func (c *EC2) UnassignPrivateIpAddresses(input *UnassignPrivateIpAddressesInput) (*UnassignPrivateIpAddressesOutput, error) { + req, out := c.UnassignPrivateIpAddressesRequest(input) + return out, req.Send() +} + +// UnassignPrivateIpAddressesWithContext is the same as UnassignPrivateIpAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See UnassignPrivateIpAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UnassignPrivateIpAddressesWithContext(ctx aws.Context, input *UnassignPrivateIpAddressesInput, opts ...request.Option) (*UnassignPrivateIpAddressesOutput, error) { + req, out := c.UnassignPrivateIpAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUnmonitorInstances = "UnmonitorInstances" + +// UnmonitorInstancesRequest generates a "aws/request.Request" representing the +// client's request for the UnmonitorInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UnmonitorInstances for more information on using the UnmonitorInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UnmonitorInstancesRequest method. +// req, resp := client.UnmonitorInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnmonitorInstances +func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *request.Request, output *UnmonitorInstancesOutput) { + op := &request.Operation{ + Name: opUnmonitorInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnmonitorInstancesInput{} + } + + output = &UnmonitorInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// UnmonitorInstances API operation for Amazon Elastic Compute Cloud. +// +// Disables detailed monitoring for a running instance. For more information, +// see Monitoring your instances and volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation UnmonitorInstances for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnmonitorInstances +func (c *EC2) UnmonitorInstances(input *UnmonitorInstancesInput) (*UnmonitorInstancesOutput, error) { + req, out := c.UnmonitorInstancesRequest(input) + return out, req.Send() +} + +// UnmonitorInstancesWithContext is the same as UnmonitorInstances with the addition of +// the ability to pass a context and additional request options. +// +// See UnmonitorInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UnmonitorInstancesWithContext(ctx aws.Context, input *UnmonitorInstancesInput, opts ...request.Option) (*UnmonitorInstancesOutput, error) { + req, out := c.UnmonitorInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSecurityGroupRuleDescriptionsEgress = "UpdateSecurityGroupRuleDescriptionsEgress" + +// UpdateSecurityGroupRuleDescriptionsEgressRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSecurityGroupRuleDescriptionsEgress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSecurityGroupRuleDescriptionsEgress for more information on using the UpdateSecurityGroupRuleDescriptionsEgress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSecurityGroupRuleDescriptionsEgressRequest method. +// req, resp := client.UpdateSecurityGroupRuleDescriptionsEgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsEgress +func (c *EC2) UpdateSecurityGroupRuleDescriptionsEgressRequest(input *UpdateSecurityGroupRuleDescriptionsEgressInput) (req *request.Request, output *UpdateSecurityGroupRuleDescriptionsEgressOutput) { + op := &request.Operation{ + Name: opUpdateSecurityGroupRuleDescriptionsEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSecurityGroupRuleDescriptionsEgressInput{} + } + + output = &UpdateSecurityGroupRuleDescriptionsEgressOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSecurityGroupRuleDescriptionsEgress API operation for Amazon Elastic Compute Cloud. +// +// [VPC only] Updates the description of an egress (outbound) security group +// rule. You can replace an existing description, or add a description to a +// rule that did not have one previously. +// +// You specify the description as part of the IP permissions structure. You +// can remove a description for a security group rule by omitting the description +// parameter in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation UpdateSecurityGroupRuleDescriptionsEgress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsEgress +func (c *EC2) UpdateSecurityGroupRuleDescriptionsEgress(input *UpdateSecurityGroupRuleDescriptionsEgressInput) (*UpdateSecurityGroupRuleDescriptionsEgressOutput, error) { + req, out := c.UpdateSecurityGroupRuleDescriptionsEgressRequest(input) + return out, req.Send() +} + +// UpdateSecurityGroupRuleDescriptionsEgressWithContext is the same as UpdateSecurityGroupRuleDescriptionsEgress with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSecurityGroupRuleDescriptionsEgress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UpdateSecurityGroupRuleDescriptionsEgressWithContext(ctx aws.Context, input *UpdateSecurityGroupRuleDescriptionsEgressInput, opts ...request.Option) (*UpdateSecurityGroupRuleDescriptionsEgressOutput, error) { + req, out := c.UpdateSecurityGroupRuleDescriptionsEgressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSecurityGroupRuleDescriptionsIngress = "UpdateSecurityGroupRuleDescriptionsIngress" + +// UpdateSecurityGroupRuleDescriptionsIngressRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSecurityGroupRuleDescriptionsIngress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSecurityGroupRuleDescriptionsIngress for more information on using the UpdateSecurityGroupRuleDescriptionsIngress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSecurityGroupRuleDescriptionsIngressRequest method. +// req, resp := client.UpdateSecurityGroupRuleDescriptionsIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsIngress +func (c *EC2) UpdateSecurityGroupRuleDescriptionsIngressRequest(input *UpdateSecurityGroupRuleDescriptionsIngressInput) (req *request.Request, output *UpdateSecurityGroupRuleDescriptionsIngressOutput) { + op := &request.Operation{ + Name: opUpdateSecurityGroupRuleDescriptionsIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSecurityGroupRuleDescriptionsIngressInput{} + } + + output = &UpdateSecurityGroupRuleDescriptionsIngressOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSecurityGroupRuleDescriptionsIngress API operation for Amazon Elastic Compute Cloud. +// +// Updates the description of an ingress (inbound) security group rule. You +// can replace an existing description, or add a description to a rule that +// did not have one previously. +// +// You specify the description as part of the IP permissions structure. You +// can remove a description for a security group rule by omitting the description +// parameter in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation UpdateSecurityGroupRuleDescriptionsIngress for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsIngress +func (c *EC2) UpdateSecurityGroupRuleDescriptionsIngress(input *UpdateSecurityGroupRuleDescriptionsIngressInput) (*UpdateSecurityGroupRuleDescriptionsIngressOutput, error) { + req, out := c.UpdateSecurityGroupRuleDescriptionsIngressRequest(input) + return out, req.Send() +} + +// UpdateSecurityGroupRuleDescriptionsIngressWithContext is the same as UpdateSecurityGroupRuleDescriptionsIngress with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSecurityGroupRuleDescriptionsIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UpdateSecurityGroupRuleDescriptionsIngressWithContext(ctx aws.Context, input *UpdateSecurityGroupRuleDescriptionsIngressInput, opts ...request.Option) (*UpdateSecurityGroupRuleDescriptionsIngressOutput, error) { + req, out := c.UpdateSecurityGroupRuleDescriptionsIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opWithdrawByoipCidr = "WithdrawByoipCidr" + +// WithdrawByoipCidrRequest generates a "aws/request.Request" representing the +// client's request for the WithdrawByoipCidr operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See WithdrawByoipCidr for more information on using the WithdrawByoipCidr +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the WithdrawByoipCidrRequest method. +// req, resp := client.WithdrawByoipCidrRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/WithdrawByoipCidr +func (c *EC2) WithdrawByoipCidrRequest(input *WithdrawByoipCidrInput) (req *request.Request, output *WithdrawByoipCidrOutput) { + op := &request.Operation{ + Name: opWithdrawByoipCidr, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &WithdrawByoipCidrInput{} + } + + output = &WithdrawByoipCidrOutput{} + req = c.newRequest(op, input, output) + return +} + +// WithdrawByoipCidr API operation for Amazon Elastic Compute Cloud. +// +// Stops advertising an address range that is provisioned as an address pool. +// +// You can perform this operation at most once every 10 seconds, even if you +// specify different address ranges each time. +// +// It can take a few minutes before traffic to the specified addresses stops +// routing to AWS because of BGP propagation delays. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation WithdrawByoipCidr for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/WithdrawByoipCidr +func (c *EC2) WithdrawByoipCidr(input *WithdrawByoipCidrInput) (*WithdrawByoipCidrOutput, error) { + req, out := c.WithdrawByoipCidrRequest(input) + return out, req.Send() +} + +// WithdrawByoipCidrWithContext is the same as WithdrawByoipCidr with the addition of +// the ability to pass a context and additional request options. +// +// See WithdrawByoipCidr for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WithdrawByoipCidrWithContext(ctx aws.Context, input *WithdrawByoipCidrInput, opts ...request.Option) (*WithdrawByoipCidrOutput, error) { + req, out := c.WithdrawByoipCidrRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Contains the parameters for accepting the quote. +type AcceptReservedInstancesExchangeQuoteInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the Convertible Reserved Instances to exchange for another Convertible + // Reserved Instance of the same or higher value. + // + // ReservedInstanceIds is a required field + ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"` + + // The configuration of the target Convertible Reserved Instance to exchange + // for your current Convertible Reserved Instances. + TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"` +} + +// String returns the string representation +func (s AcceptReservedInstancesExchangeQuoteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptReservedInstancesExchangeQuoteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptReservedInstancesExchangeQuoteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptReservedInstancesExchangeQuoteInput"} + if s.ReservedInstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstanceIds")) + } + if s.TargetConfigurations != nil { + for i, v := range s.TargetConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AcceptReservedInstancesExchangeQuoteInput) SetDryRun(v bool) *AcceptReservedInstancesExchangeQuoteInput { + s.DryRun = &v + return s +} + +// SetReservedInstanceIds sets the ReservedInstanceIds field's value. +func (s *AcceptReservedInstancesExchangeQuoteInput) SetReservedInstanceIds(v []*string) *AcceptReservedInstancesExchangeQuoteInput { + s.ReservedInstanceIds = v + return s +} + +// SetTargetConfigurations sets the TargetConfigurations field's value. +func (s *AcceptReservedInstancesExchangeQuoteInput) SetTargetConfigurations(v []*TargetConfigurationRequest) *AcceptReservedInstancesExchangeQuoteInput { + s.TargetConfigurations = v + return s +} + +// The result of the exchange and whether it was successful. +type AcceptReservedInstancesExchangeQuoteOutput struct { + _ struct{} `type:"structure"` + + // The ID of the successful exchange. + ExchangeId *string `locationName:"exchangeId" type:"string"` +} + +// String returns the string representation +func (s AcceptReservedInstancesExchangeQuoteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptReservedInstancesExchangeQuoteOutput) GoString() string { + return s.String() +} + +// SetExchangeId sets the ExchangeId field's value. +func (s *AcceptReservedInstancesExchangeQuoteOutput) SetExchangeId(v string) *AcceptReservedInstancesExchangeQuoteOutput { + s.ExchangeId = &v + return s +} + +type AcceptTransitGatewayPeeringAttachmentInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the transit gateway attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AcceptTransitGatewayPeeringAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptTransitGatewayPeeringAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptTransitGatewayPeeringAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptTransitGatewayPeeringAttachmentInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AcceptTransitGatewayPeeringAttachmentInput) SetDryRun(v bool) *AcceptTransitGatewayPeeringAttachmentInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *AcceptTransitGatewayPeeringAttachmentInput) SetTransitGatewayAttachmentId(v string) *AcceptTransitGatewayPeeringAttachmentInput { + s.TransitGatewayAttachmentId = &v + return s +} + +type AcceptTransitGatewayPeeringAttachmentOutput struct { + _ struct{} `type:"structure"` + + // The transit gateway peering attachment. + TransitGatewayPeeringAttachment *TransitGatewayPeeringAttachment `locationName:"transitGatewayPeeringAttachment" type:"structure"` +} + +// String returns the string representation +func (s AcceptTransitGatewayPeeringAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptTransitGatewayPeeringAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPeeringAttachment sets the TransitGatewayPeeringAttachment field's value. +func (s *AcceptTransitGatewayPeeringAttachmentOutput) SetTransitGatewayPeeringAttachment(v *TransitGatewayPeeringAttachment) *AcceptTransitGatewayPeeringAttachmentOutput { + s.TransitGatewayPeeringAttachment = v + return s +} + +type AcceptTransitGatewayVpcAttachmentInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AcceptTransitGatewayVpcAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptTransitGatewayVpcAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptTransitGatewayVpcAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptTransitGatewayVpcAttachmentInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AcceptTransitGatewayVpcAttachmentInput) SetDryRun(v bool) *AcceptTransitGatewayVpcAttachmentInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *AcceptTransitGatewayVpcAttachmentInput) SetTransitGatewayAttachmentId(v string) *AcceptTransitGatewayVpcAttachmentInput { + s.TransitGatewayAttachmentId = &v + return s +} + +type AcceptTransitGatewayVpcAttachmentOutput struct { + _ struct{} `type:"structure"` + + // The VPC attachment. + TransitGatewayVpcAttachment *TransitGatewayVpcAttachment `locationName:"transitGatewayVpcAttachment" type:"structure"` +} + +// String returns the string representation +func (s AcceptTransitGatewayVpcAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptTransitGatewayVpcAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayVpcAttachment sets the TransitGatewayVpcAttachment field's value. +func (s *AcceptTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment(v *TransitGatewayVpcAttachment) *AcceptTransitGatewayVpcAttachmentOutput { + s.TransitGatewayVpcAttachment = v + return s +} + +type AcceptVpcEndpointConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the VPC endpoint service. + // + // ServiceId is a required field + ServiceId *string `type:"string" required:"true"` + + // The IDs of one or more interface VPC endpoints. + // + // VpcEndpointIds is a required field + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s AcceptVpcEndpointConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcEndpointConnectionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptVpcEndpointConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptVpcEndpointConnectionsInput"} + if s.ServiceId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceId")) + } + if s.VpcEndpointIds == nil { + invalidParams.Add(request.NewErrParamRequired("VpcEndpointIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AcceptVpcEndpointConnectionsInput) SetDryRun(v bool) *AcceptVpcEndpointConnectionsInput { + s.DryRun = &v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *AcceptVpcEndpointConnectionsInput) SetServiceId(v string) *AcceptVpcEndpointConnectionsInput { + s.ServiceId = &v + return s +} + +// SetVpcEndpointIds sets the VpcEndpointIds field's value. +func (s *AcceptVpcEndpointConnectionsInput) SetVpcEndpointIds(v []*string) *AcceptVpcEndpointConnectionsInput { + s.VpcEndpointIds = v + return s +} + +type AcceptVpcEndpointConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the interface endpoints that were not accepted, if applicable. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AcceptVpcEndpointConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcEndpointConnectionsOutput) GoString() string { + return s.String() +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *AcceptVpcEndpointConnectionsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *AcceptVpcEndpointConnectionsOutput { + s.Unsuccessful = v + return s +} + +type AcceptVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. You must specify this parameter in + // the request. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *AcceptVpcPeeringConnectionInput) SetDryRun(v bool) *AcceptVpcPeeringConnectionInput { + s.DryRun = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *AcceptVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *AcceptVpcPeeringConnectionInput { + s.VpcPeeringConnectionId = &v + return s +} + +type AcceptVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` +} + +// String returns the string representation +func (s AcceptVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// SetVpcPeeringConnection sets the VpcPeeringConnection field's value. +func (s *AcceptVpcPeeringConnectionOutput) SetVpcPeeringConnection(v *VpcPeeringConnection) *AcceptVpcPeeringConnectionOutput { + s.VpcPeeringConnection = v + return s +} + +// Describes an account attribute. +type AccountAttribute struct { + _ struct{} `type:"structure"` + + // The name of the account attribute. + AttributeName *string `locationName:"attributeName" type:"string"` + + // The values for the account attribute. + AttributeValues []*AccountAttributeValue `locationName:"attributeValueSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AccountAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttribute) GoString() string { + return s.String() +} + +// SetAttributeName sets the AttributeName field's value. +func (s *AccountAttribute) SetAttributeName(v string) *AccountAttribute { + s.AttributeName = &v + return s +} + +// SetAttributeValues sets the AttributeValues field's value. +func (s *AccountAttribute) SetAttributeValues(v []*AccountAttributeValue) *AccountAttribute { + s.AttributeValues = v + return s +} + +// Describes a value of an account attribute. +type AccountAttributeValue struct { + _ struct{} `type:"structure"` + + // The value of the attribute. + AttributeValue *string `locationName:"attributeValue" type:"string"` +} + +// String returns the string representation +func (s AccountAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAttributeValue) GoString() string { + return s.String() +} + +// SetAttributeValue sets the AttributeValue field's value. +func (s *AccountAttributeValue) SetAttributeValue(v string) *AccountAttributeValue { + s.AttributeValue = &v + return s +} + +// Describes a running instance in a Spot Fleet. +type ActiveInstance struct { + _ struct{} `type:"structure"` + + // The health status of the instance. If the status of either the instance status + // check or the system status check is impaired, the health status of the instance + // is unhealthy. Otherwise, the health status is healthy. + InstanceHealth *string `locationName:"instanceHealth" type:"string" enum:"InstanceHealthStatus"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The ID of the Spot Instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` +} + +// String returns the string representation +func (s ActiveInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveInstance) GoString() string { + return s.String() +} + +// SetInstanceHealth sets the InstanceHealth field's value. +func (s *ActiveInstance) SetInstanceHealth(v string) *ActiveInstance { + s.InstanceHealth = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ActiveInstance) SetInstanceId(v string) *ActiveInstance { + s.InstanceId = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ActiveInstance) SetInstanceType(v string) *ActiveInstance { + s.InstanceType = &v + return s +} + +// SetSpotInstanceRequestId sets the SpotInstanceRequestId field's value. +func (s *ActiveInstance) SetSpotInstanceRequestId(v string) *ActiveInstance { + s.SpotInstanceRequestId = &v + return s +} + +// An entry for a prefix list. +type AddPrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // A description for the entry. + // + // Constraints: Up to 255 characters in length. + Description *string `type:"string"` +} + +// String returns the string representation +func (s AddPrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPrefixListEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddPrefixListEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddPrefixListEntry"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *AddPrefixListEntry) SetCidr(v string) *AddPrefixListEntry { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AddPrefixListEntry) SetDescription(v string) *AddPrefixListEntry { + s.Description = &v + return s +} + +// Describes an Elastic IP address, or a carrier IP address. +type Address struct { + _ struct{} `type:"structure"` + + // The ID representing the allocation of the address for use with EC2-VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID representing the association of the address with an instance in a + // VPC. + AssociationId *string `locationName:"associationId" type:"string"` + + // The carrier IP address associated. This option is only available for network + // interfaces which reside in a subnet in a Wavelength Zone (for example an + // EC2 instance). + CarrierIp *string `locationName:"carrierIp" type:"string"` + + // The customer-owned IP address. + CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` + + // The ID of the customer-owned address pool. + CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` + + // Indicates whether this Elastic IP address is for use with instances in EC2-Classic + // (standard) or instances in a VPC (vpc). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The ID of the instance that the address is associated with (if any). + InstanceId *string `locationName:"instanceId" type:"string"` + + // The name of the unique set of Availability Zones, Local Zones, or Wavelength + // Zones from which AWS advertises IP addresses. + NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that owns the network interface. + NetworkInterfaceOwnerId *string `locationName:"networkInterfaceOwnerId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + // The ID of an address pool. + PublicIpv4Pool *string `locationName:"publicIpv4Pool" type:"string"` + + // Any tags assigned to the Elastic IP address. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s Address) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Address) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *Address) SetAllocationId(v string) *Address { + s.AllocationId = &v + return s +} + +// SetAssociationId sets the AssociationId field's value. +func (s *Address) SetAssociationId(v string) *Address { + s.AssociationId = &v + return s +} + +// SetCarrierIp sets the CarrierIp field's value. +func (s *Address) SetCarrierIp(v string) *Address { + s.CarrierIp = &v + return s +} + +// SetCustomerOwnedIp sets the CustomerOwnedIp field's value. +func (s *Address) SetCustomerOwnedIp(v string) *Address { + s.CustomerOwnedIp = &v + return s +} + +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *Address) SetCustomerOwnedIpv4Pool(v string) *Address { + s.CustomerOwnedIpv4Pool = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *Address) SetDomain(v string) *Address { + s.Domain = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *Address) SetInstanceId(v string) *Address { + s.InstanceId = &v + return s +} + +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *Address) SetNetworkBorderGroup(v string) *Address { + s.NetworkBorderGroup = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *Address) SetNetworkInterfaceId(v string) *Address { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkInterfaceOwnerId sets the NetworkInterfaceOwnerId field's value. +func (s *Address) SetNetworkInterfaceOwnerId(v string) *Address { + s.NetworkInterfaceOwnerId = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *Address) SetPrivateIpAddress(v string) *Address { + s.PrivateIpAddress = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *Address) SetPublicIp(v string) *Address { + s.PublicIp = &v + return s +} + +// SetPublicIpv4Pool sets the PublicIpv4Pool field's value. +func (s *Address) SetPublicIpv4Pool(v string) *Address { + s.PublicIpv4Pool = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Address) SetTags(v []*Tag) *Address { + s.Tags = v + return s +} + +type AdvertiseByoipCidrInput struct { + _ struct{} `type:"structure"` + + // The address range, in CIDR notation. This must be the exact range that you + // provisioned. You can't advertise only a portion of the provisioned range. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s AdvertiseByoipCidrInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvertiseByoipCidrInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdvertiseByoipCidrInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdvertiseByoipCidrInput"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *AdvertiseByoipCidrInput) SetCidr(v string) *AdvertiseByoipCidrInput { + s.Cidr = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AdvertiseByoipCidrInput) SetDryRun(v bool) *AdvertiseByoipCidrInput { + s.DryRun = &v + return s +} + +type AdvertiseByoipCidrOutput struct { + _ struct{} `type:"structure"` + + // Information about the address range. + ByoipCidr *ByoipCidr `locationName:"byoipCidr" type:"structure"` +} + +// String returns the string representation +func (s AdvertiseByoipCidrOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvertiseByoipCidrOutput) GoString() string { + return s.String() +} + +// SetByoipCidr sets the ByoipCidr field's value. +func (s *AdvertiseByoipCidrOutput) SetByoipCidr(v *ByoipCidr) *AdvertiseByoipCidrOutput { + s.ByoipCidr = v + return s +} + +type AllocateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address + // pool. + Address *string `type:"string"` + + // The ID of a customer-owned address pool. Use this parameter to let Amazon + // EC2 select an address from the address pool. Alternatively, specify a specific + // address from the address pool. + CustomerOwnedIpv4Pool *string `type:"string"` + + // Indicates whether the Elastic IP address is for use with instances in a VPC + // or instances in EC2-Classic. + // + // Default: If the Region supports EC2-Classic, the default is standard. Otherwise, + // the default is vpc. + Domain *string `type:"string" enum:"DomainType"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique set of Availability Zones, Local Zones, or Wavelength Zones from + // which AWS advertises IP addresses. Use this parameter to limit the IP address + // to this location. IP addresses cannot move between network border groups. + // + // Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) + // to view the network border groups. + // + // You cannot use a network border group with EC2 Classic. If you attempt this + // operation on EC2 classic, you will receive an InvalidParameterCombination + // error. For more information, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). + NetworkBorderGroup *string `type:"string"` + + // The ID of an address pool that you own. Use this parameter to let Amazon + // EC2 select an address from the address pool. To specify a specific address + // from the address pool, use the Address parameter instead. + PublicIpv4Pool *string `type:"string"` +} + +// String returns the string representation +func (s AllocateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressInput) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *AllocateAddressInput) SetAddress(v string) *AllocateAddressInput { + s.Address = &v + return s +} + +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *AllocateAddressInput) SetCustomerOwnedIpv4Pool(v string) *AllocateAddressInput { + s.CustomerOwnedIpv4Pool = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *AllocateAddressInput) SetDomain(v string) *AllocateAddressInput { + s.Domain = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AllocateAddressInput) SetDryRun(v bool) *AllocateAddressInput { + s.DryRun = &v + return s +} + +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *AllocateAddressInput) SetNetworkBorderGroup(v string) *AllocateAddressInput { + s.NetworkBorderGroup = &v + return s +} + +// SetPublicIpv4Pool sets the PublicIpv4Pool field's value. +func (s *AllocateAddressInput) SetPublicIpv4Pool(v string) *AllocateAddressInput { + s.PublicIpv4Pool = &v + return s +} + +type AllocateAddressOutput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The ID that AWS assigns to represent the allocation of the Elastic + // IP address for use with instances in a VPC. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The carrier IP address. This option is only available for network interfaces + // which reside in a subnet in a Wavelength Zone (for example an EC2 instance). + CarrierIp *string `locationName:"carrierIp" type:"string"` + + // The customer-owned IP address. + CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` + + // The ID of the customer-owned address pool. + CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` + + // Indicates whether the Elastic IP address is for use with instances in a VPC + // (vpc) or instances in EC2-Classic (standard). + Domain *string `locationName:"domain" type:"string" enum:"DomainType"` + + // The set of Availability Zones, Local Zones, or Wavelength Zones from which + // AWS advertises IP addresses. + NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + // The ID of an address pool. + PublicIpv4Pool *string `locationName:"publicIpv4Pool" type:"string"` +} + +// String returns the string representation +func (s AllocateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateAddressOutput) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *AllocateAddressOutput) SetAllocationId(v string) *AllocateAddressOutput { + s.AllocationId = &v + return s +} + +// SetCarrierIp sets the CarrierIp field's value. +func (s *AllocateAddressOutput) SetCarrierIp(v string) *AllocateAddressOutput { + s.CarrierIp = &v + return s +} + +// SetCustomerOwnedIp sets the CustomerOwnedIp field's value. +func (s *AllocateAddressOutput) SetCustomerOwnedIp(v string) *AllocateAddressOutput { + s.CustomerOwnedIp = &v + return s +} + +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *AllocateAddressOutput) SetCustomerOwnedIpv4Pool(v string) *AllocateAddressOutput { + s.CustomerOwnedIpv4Pool = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *AllocateAddressOutput) SetDomain(v string) *AllocateAddressOutput { + s.Domain = &v + return s +} + +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *AllocateAddressOutput) SetNetworkBorderGroup(v string) *AllocateAddressOutput { + s.NetworkBorderGroup = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *AllocateAddressOutput) SetPublicIp(v string) *AllocateAddressOutput { + s.PublicIp = &v + return s +} + +// SetPublicIpv4Pool sets the PublicIpv4Pool field's value. +func (s *AllocateAddressOutput) SetPublicIpv4Pool(v string) *AllocateAddressOutput { + s.PublicIpv4Pool = &v + return s +} + +type AllocateHostsInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the host accepts any untargeted instance launches that + // match its instance type configuration, or if it only accepts Host tenancy + // instance launches that specify its unique host ID. For more information, + // see Understanding Instance Placement and Host Affinity (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-dedicated-hosts-work.html#dedicated-hosts-understanding) + // in the Amazon EC2 User Guide for Linux Instances. + // + // Default: on + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The Availability Zone in which to allocate the Dedicated Host. + // + // AvailabilityZone is a required field + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether to enable or disable host recovery for the Dedicated Host. + // Host recovery is disabled by default. For more information, see Host Recovery + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: off + HostRecovery *string `type:"string" enum:"HostRecovery"` + + // Specifies the instance family to be supported by the Dedicated Hosts. If + // you specify an instance family, the Dedicated Hosts support multiple instance + // types within that instance family. + // + // If you want the Dedicated Hosts to support a specific instance type only, + // omit this parameter and specify InstanceType instead. You cannot specify + // InstanceFamily and InstanceType in the same request. + InstanceFamily *string `type:"string"` + + // Specifies the instance type to be supported by the Dedicated Hosts. If you + // specify an instance type, the Dedicated Hosts support instances of the specified + // instance type only. + // + // If you want the Dedicated Hosts to support multiple instance types in a specific + // instance family, omit this parameter and specify InstanceFamily instead. + // You cannot specify InstanceType and InstanceFamily in the same request. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The number of Dedicated Hosts to allocate to your account with these parameters. + // + // Quantity is a required field + Quantity *int64 `locationName:"quantity" type:"integer" required:"true"` + + // The tags to apply to the Dedicated Host during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AllocateHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocateHostsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocateHostsInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoPlacement sets the AutoPlacement field's value. +func (s *AllocateHostsInput) SetAutoPlacement(v string) *AllocateHostsInput { + s.AutoPlacement = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *AllocateHostsInput) SetAvailabilityZone(v string) *AllocateHostsInput { + s.AvailabilityZone = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *AllocateHostsInput) SetClientToken(v string) *AllocateHostsInput { + s.ClientToken = &v + return s +} + +// SetHostRecovery sets the HostRecovery field's value. +func (s *AllocateHostsInput) SetHostRecovery(v string) *AllocateHostsInput { + s.HostRecovery = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *AllocateHostsInput) SetInstanceFamily(v string) *AllocateHostsInput { + s.InstanceFamily = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *AllocateHostsInput) SetInstanceType(v string) *AllocateHostsInput { + s.InstanceType = &v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *AllocateHostsInput) SetQuantity(v int64) *AllocateHostsInput { + s.Quantity = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *AllocateHostsInput) SetTagSpecifications(v []*TagSpecification) *AllocateHostsInput { + s.TagSpecifications = v + return s +} + +// Contains the output of AllocateHosts. +type AllocateHostsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the allocated Dedicated Host. This is used to launch an instance + // onto a specific host. + HostIds []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s AllocateHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostsOutput) GoString() string { + return s.String() +} + +// SetHostIds sets the HostIds field's value. +func (s *AllocateHostsOutput) SetHostIds(v []*string) *AllocateHostsOutput { + s.HostIds = v + return s +} + +// Describes a principal. +type AllowedPrincipal struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the principal. + Principal *string `locationName:"principal" type:"string"` + + // The type of principal. + PrincipalType *string `locationName:"principalType" type:"string" enum:"PrincipalType"` +} + +// String returns the string representation +func (s AllowedPrincipal) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllowedPrincipal) GoString() string { + return s.String() +} + +// SetPrincipal sets the Principal field's value. +func (s *AllowedPrincipal) SetPrincipal(v string) *AllowedPrincipal { + s.Principal = &v + return s +} + +// SetPrincipalType sets the PrincipalType field's value. +func (s *AllowedPrincipal) SetPrincipalType(v string) *AllowedPrincipal { + s.PrincipalType = &v + return s +} + +type ApplySecurityGroupsToClientVpnTargetNetworkInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the security groups to apply to the associated target network. + // Up to 5 security groups can be applied to an associated target network. + // + // SecurityGroupIds is a required field + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list" required:"true"` + + // The ID of the VPC in which the associated target network is located. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ApplySecurityGroupsToClientVpnTargetNetworkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplySecurityGroupsToClientVpnTargetNetworkInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApplySecurityGroupsToClientVpnTargetNetworkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApplySecurityGroupsToClientVpnTargetNetworkInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.SecurityGroupIds == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroupIds")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *ApplySecurityGroupsToClientVpnTargetNetworkInput) SetClientVpnEndpointId(v string) *ApplySecurityGroupsToClientVpnTargetNetworkInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ApplySecurityGroupsToClientVpnTargetNetworkInput) SetDryRun(v bool) *ApplySecurityGroupsToClientVpnTargetNetworkInput { + s.DryRun = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *ApplySecurityGroupsToClientVpnTargetNetworkInput) SetSecurityGroupIds(v []*string) *ApplySecurityGroupsToClientVpnTargetNetworkInput { + s.SecurityGroupIds = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ApplySecurityGroupsToClientVpnTargetNetworkInput) SetVpcId(v string) *ApplySecurityGroupsToClientVpnTargetNetworkInput { + s.VpcId = &v + return s +} + +type ApplySecurityGroupsToClientVpnTargetNetworkOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the applied security groups. + SecurityGroupIds []*string `locationName:"securityGroupIds" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ApplySecurityGroupsToClientVpnTargetNetworkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplySecurityGroupsToClientVpnTargetNetworkOutput) GoString() string { + return s.String() +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *ApplySecurityGroupsToClientVpnTargetNetworkOutput) SetSecurityGroupIds(v []*string) *ApplySecurityGroupsToClientVpnTargetNetworkOutput { + s.SecurityGroupIds = v + return s +} + +type AssignIpv6AddressesInput struct { + _ struct{} `type:"structure"` + + // The number of IPv6 addresses to assign to the network interface. Amazon EC2 + // automatically selects the IPv6 addresses from the subnet range. You can't + // use this option if specifying specific IPv6 addresses. + Ipv6AddressCount *int64 `locationName:"ipv6AddressCount" type:"integer"` + + // One or more specific IPv6 addresses to be assigned to the network interface. + // You can't use this option if you're specifying a number of IPv6 addresses. + Ipv6Addresses []*string `locationName:"ipv6Addresses" locationNameList:"item" type:"list"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssignIpv6AddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignIpv6AddressesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssignIpv6AddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssignIpv6AddressesInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIpv6AddressCount sets the Ipv6AddressCount field's value. +func (s *AssignIpv6AddressesInput) SetIpv6AddressCount(v int64) *AssignIpv6AddressesInput { + s.Ipv6AddressCount = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *AssignIpv6AddressesInput) SetIpv6Addresses(v []*string) *AssignIpv6AddressesInput { + s.Ipv6Addresses = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AssignIpv6AddressesInput) SetNetworkInterfaceId(v string) *AssignIpv6AddressesInput { + s.NetworkInterfaceId = &v + return s +} + +type AssignIpv6AddressesOutput struct { + _ struct{} `type:"structure"` + + // The IPv6 addresses assigned to the network interface. + AssignedIpv6Addresses []*string `locationName:"assignedIpv6Addresses" locationNameList:"item" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` +} + +// String returns the string representation +func (s AssignIpv6AddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignIpv6AddressesOutput) GoString() string { + return s.String() +} + +// SetAssignedIpv6Addresses sets the AssignedIpv6Addresses field's value. +func (s *AssignIpv6AddressesOutput) SetAssignedIpv6Addresses(v []*string) *AssignIpv6AddressesOutput { + s.AssignedIpv6Addresses = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AssignIpv6AddressesOutput) SetNetworkInterfaceId(v string) *AssignIpv6AddressesOutput { + s.NetworkInterfaceId = &v + return s +} + +// Contains the parameters for AssignPrivateIpAddresses. +type AssignPrivateIpAddressesInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to allow an IP address that is already assigned to another + // network interface or instance to be reassigned to the specified network interface. + AllowReassignment *bool `locationName:"allowReassignment" type:"boolean"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // One or more IP addresses to be assigned as a secondary private IP address + // to the network interface. You can't specify this parameter when also specifying + // a number of secondary IP addresses. + // + // If you don't specify an IP address, Amazon EC2 automatically selects an IP + // address within the subnet range. + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list"` + + // The number of secondary IP addresses to assign to the network interface. + // You can't specify this parameter when also specifying private IP addresses. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssignPrivateIpAddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssignPrivateIpAddressesInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowReassignment sets the AllowReassignment field's value. +func (s *AssignPrivateIpAddressesInput) SetAllowReassignment(v bool) *AssignPrivateIpAddressesInput { + s.AllowReassignment = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AssignPrivateIpAddressesInput) SetNetworkInterfaceId(v string) *AssignPrivateIpAddressesInput { + s.NetworkInterfaceId = &v + return s +} + +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value. +func (s *AssignPrivateIpAddressesInput) SetPrivateIpAddresses(v []*string) *AssignPrivateIpAddressesInput { + s.PrivateIpAddresses = v + return s +} + +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value. +func (s *AssignPrivateIpAddressesInput) SetSecondaryPrivateIpAddressCount(v int64) *AssignPrivateIpAddressesInput { + s.SecondaryPrivateIpAddressCount = &v + return s +} + +type AssignPrivateIpAddressesOutput struct { + _ struct{} `type:"structure"` + + // The private IP addresses assigned to the network interface. + AssignedPrivateIpAddresses []*AssignedPrivateIpAddress `locationName:"assignedPrivateIpAddressesSet" locationNameList:"item" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` +} + +// String returns the string representation +func (s AssignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +// SetAssignedPrivateIpAddresses sets the AssignedPrivateIpAddresses field's value. +func (s *AssignPrivateIpAddressesOutput) SetAssignedPrivateIpAddresses(v []*AssignedPrivateIpAddress) *AssignPrivateIpAddressesOutput { + s.AssignedPrivateIpAddresses = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AssignPrivateIpAddressesOutput) SetNetworkInterfaceId(v string) *AssignPrivateIpAddressesOutput { + s.NetworkInterfaceId = &v + return s +} + +// Describes the private IP addresses assigned to a network interface. +type AssignedPrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The private IP address assigned to the network interface. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s AssignedPrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssignedPrivateIpAddress) GoString() string { + return s.String() +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *AssignedPrivateIpAddress) SetPrivateIpAddress(v string) *AssignedPrivateIpAddress { + s.PrivateIpAddress = &v + return s +} + +type AssociateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The allocation ID. This is required for EC2-VPC. + AllocationId *string `type:"string"` + + // [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic + // IP address that is already associated with an instance or network interface + // to be reassociated with the specified instance or network interface. Otherwise, + // the operation fails. In a VPC in an EC2-VPC-only account, reassociation is + // automatic, therefore you can specify false to ensure the operation fails + // if the Elastic IP address is already associated with another resource. + AllowReassociation *bool `locationName:"allowReassociation" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. This is required for EC2-Classic. For EC2-VPC, you + // can specify either the instance ID or the network interface ID, but not both. + // The operation fails if you specify an instance ID unless exactly one network + // interface is attached. + InstanceId *string `type:"string"` + + // [EC2-VPC] The ID of the network interface. If the instance has more than + // one network interface, you must specify a network interface ID. + // + // For EC2-VPC, you can specify either the instance ID or the network interface + // ID, but not both. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // [EC2-VPC] The primary or secondary private IP address to associate with the + // Elastic IP address. If no private IP address is specified, the Elastic IP + // address is associated with the primary private IP address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The Elastic IP address to associate with the instance. This is required for + // EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s AssociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressInput) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *AssociateAddressInput) SetAllocationId(v string) *AssociateAddressInput { + s.AllocationId = &v + return s +} + +// SetAllowReassociation sets the AllowReassociation field's value. +func (s *AssociateAddressInput) SetAllowReassociation(v bool) *AssociateAddressInput { + s.AllowReassociation = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateAddressInput) SetDryRun(v bool) *AssociateAddressInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AssociateAddressInput) SetInstanceId(v string) *AssociateAddressInput { + s.InstanceId = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AssociateAddressInput) SetNetworkInterfaceId(v string) *AssociateAddressInput { + s.NetworkInterfaceId = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *AssociateAddressInput) SetPrivateIpAddress(v string) *AssociateAddressInput { + s.PrivateIpAddress = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *AssociateAddressInput) SetPublicIp(v string) *AssociateAddressInput { + s.PublicIp = &v + return s +} + +type AssociateAddressOutput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The ID that represents the association of the Elastic IP address + // with an instance. + AssociationId *string `locationName:"associationId" type:"string"` +} + +// String returns the string representation +func (s AssociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateAddressOutput) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *AssociateAddressOutput) SetAssociationId(v string) *AssociateAddressOutput { + s.AssociationId = &v + return s +} + +type AssociateClientVpnTargetNetworkInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the subnet to associate with the Client VPN endpoint. + // + // SubnetId is a required field + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateClientVpnTargetNetworkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateClientVpnTargetNetworkInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateClientVpnTargetNetworkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateClientVpnTargetNetworkInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *AssociateClientVpnTargetNetworkInput) SetClientToken(v string) *AssociateClientVpnTargetNetworkInput { + s.ClientToken = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *AssociateClientVpnTargetNetworkInput) SetClientVpnEndpointId(v string) *AssociateClientVpnTargetNetworkInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateClientVpnTargetNetworkInput) SetDryRun(v bool) *AssociateClientVpnTargetNetworkInput { + s.DryRun = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *AssociateClientVpnTargetNetworkInput) SetSubnetId(v string) *AssociateClientVpnTargetNetworkInput { + s.SubnetId = &v + return s +} + +type AssociateClientVpnTargetNetworkOutput struct { + _ struct{} `type:"structure"` + + // The unique ID of the target network association. + AssociationId *string `locationName:"associationId" type:"string"` + + // The current state of the target network association. + Status *AssociationStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s AssociateClientVpnTargetNetworkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateClientVpnTargetNetworkOutput) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *AssociateClientVpnTargetNetworkOutput) SetAssociationId(v string) *AssociateClientVpnTargetNetworkOutput { + s.AssociationId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AssociateClientVpnTargetNetworkOutput) SetStatus(v *AssociationStatus) *AssociateClientVpnTargetNetworkOutput { + s.Status = v + return s +} + +type AssociateDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DHCP options set, or default to associate no DHCP options with + // the VPC. + // + // DhcpOptionsId is a required field + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateDhcpOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateDhcpOptionsInput"} + if s.DhcpOptionsId == nil { + invalidParams.Add(request.NewErrParamRequired("DhcpOptionsId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDhcpOptionsId sets the DhcpOptionsId field's value. +func (s *AssociateDhcpOptionsInput) SetDhcpOptionsId(v string) *AssociateDhcpOptionsInput { + s.DhcpOptionsId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateDhcpOptionsInput) SetDryRun(v bool) *AssociateDhcpOptionsInput { + s.DryRun = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AssociateDhcpOptionsInput) SetVpcId(v string) *AssociateDhcpOptionsInput { + s.VpcId = &v + return s +} + +type AssociateDhcpOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateDhcpOptionsOutput) GoString() string { + return s.String() +} + +type AssociateEnclaveCertificateIamRoleInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate with which to associate the IAM role. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ARN of the IAM role to associate with the ACM certificate. You can associate + // up to 16 IAM roles with an ACM certificate. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AssociateEnclaveCertificateIamRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEnclaveCertificateIamRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateEnclaveCertificateIamRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateEnclaveCertificateIamRoleInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetCertificateArn(v string) *AssociateEnclaveCertificateIamRoleInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetDryRun(v bool) *AssociateEnclaveCertificateIamRoleInput { + s.DryRun = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetRoleArn(v string) *AssociateEnclaveCertificateIamRoleInput { + s.RoleArn = &v + return s +} + +type AssociateEnclaveCertificateIamRoleOutput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket to which the certificate was uploaded. + CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"` + + // The Amazon S3 object key where the certificate, certificate chain, and encrypted + // private key bundle are stored. The object key is formatted as follows: certificate_arn/role_arn. + CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` + + // The ID of the AWS KMS CMK used to encrypt the private key of the certificate. + EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"` +} + +// String returns the string representation +func (s AssociateEnclaveCertificateIamRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEnclaveCertificateIamRoleOutput) GoString() string { + return s.String() +} + +// SetCertificateS3BucketName sets the CertificateS3BucketName field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetCertificateS3BucketName(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.CertificateS3BucketName = &v + return s +} + +// SetCertificateS3ObjectKey sets the CertificateS3ObjectKey field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetCertificateS3ObjectKey(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.CertificateS3ObjectKey = &v + return s +} + +// SetEncryptionKmsKeyId sets the EncryptionKmsKeyId field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetEncryptionKmsKeyId(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.EncryptionKmsKeyId = &v + return s +} + +type AssociateIamInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The IAM instance profile. + // + // IamInstanceProfile is a required field + IamInstanceProfile *IamInstanceProfileSpecification `type:"structure" required:"true"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateIamInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateIamInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateIamInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateIamInstanceProfileInput"} + if s.IamInstanceProfile == nil { + invalidParams.Add(request.NewErrParamRequired("IamInstanceProfile")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *AssociateIamInstanceProfileInput) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *AssociateIamInstanceProfileInput { + s.IamInstanceProfile = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AssociateIamInstanceProfileInput) SetInstanceId(v string) *AssociateIamInstanceProfileInput { + s.InstanceId = &v + return s +} + +type AssociateIamInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // Information about the IAM instance profile association. + IamInstanceProfileAssociation *IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociation" type:"structure"` +} + +// String returns the string representation +func (s AssociateIamInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateIamInstanceProfileOutput) GoString() string { + return s.String() +} + +// SetIamInstanceProfileAssociation sets the IamInstanceProfileAssociation field's value. +func (s *AssociateIamInstanceProfileOutput) SetIamInstanceProfileAssociation(v *IamInstanceProfileAssociation) *AssociateIamInstanceProfileOutput { + s.IamInstanceProfileAssociation = v + return s +} + +type AssociateRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the internet gateway or virtual private gateway. + GatewayId *string `type:"string"` + + // The ID of the route table. + // + // RouteTableId is a required field + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s AssociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateRouteTableInput"} + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateRouteTableInput) SetDryRun(v bool) *AssociateRouteTableInput { + s.DryRun = &v + return s +} + +// SetGatewayId sets the GatewayId field's value. +func (s *AssociateRouteTableInput) SetGatewayId(v string) *AssociateRouteTableInput { + s.GatewayId = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *AssociateRouteTableInput) SetRouteTableId(v string) *AssociateRouteTableInput { + s.RouteTableId = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *AssociateRouteTableInput) SetSubnetId(v string) *AssociateRouteTableInput { + s.SubnetId = &v + return s +} + +type AssociateRouteTableOutput struct { + _ struct{} `type:"structure"` + + // The route table association ID. This ID is required for disassociating the + // route table. + AssociationId *string `locationName:"associationId" type:"string"` + + // The state of the association. + AssociationState *RouteTableAssociationState `locationName:"associationState" type:"structure"` +} + +// String returns the string representation +func (s AssociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateRouteTableOutput) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *AssociateRouteTableOutput) SetAssociationId(v string) *AssociateRouteTableOutput { + s.AssociationId = &v + return s +} + +// SetAssociationState sets the AssociationState field's value. +func (s *AssociateRouteTableOutput) SetAssociationState(v *RouteTableAssociationState) *AssociateRouteTableOutput { + s.AssociationState = v + return s +} + +type AssociateSubnetCidrBlockInput struct { + _ struct{} `type:"structure"` + + // The IPv6 CIDR block for your subnet. The subnet must have a /64 prefix length. + // + // Ipv6CidrBlock is a required field + Ipv6CidrBlock *string `locationName:"ipv6CidrBlock" type:"string" required:"true"` + + // The ID of your subnet. + // + // SubnetId is a required field + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateSubnetCidrBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateSubnetCidrBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateSubnetCidrBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateSubnetCidrBlockInput"} + if s.Ipv6CidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("Ipv6CidrBlock")) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *AssociateSubnetCidrBlockInput) SetIpv6CidrBlock(v string) *AssociateSubnetCidrBlockInput { + s.Ipv6CidrBlock = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *AssociateSubnetCidrBlockInput) SetSubnetId(v string) *AssociateSubnetCidrBlockInput { + s.SubnetId = &v + return s +} + +type AssociateSubnetCidrBlockOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPv6 CIDR block association. + Ipv6CidrBlockAssociation *SubnetIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociation" type:"structure"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s AssociateSubnetCidrBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateSubnetCidrBlockOutput) GoString() string { + return s.String() +} + +// SetIpv6CidrBlockAssociation sets the Ipv6CidrBlockAssociation field's value. +func (s *AssociateSubnetCidrBlockOutput) SetIpv6CidrBlockAssociation(v *SubnetIpv6CidrBlockAssociation) *AssociateSubnetCidrBlockOutput { + s.Ipv6CidrBlockAssociation = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *AssociateSubnetCidrBlockOutput) SetSubnetId(v string) *AssociateSubnetCidrBlockOutput { + s.SubnetId = &v + return s +} + +type AssociateTransitGatewayMulticastDomainInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the subnets to associate with the transit gateway multicast domain. + SubnetIds []*string `locationNameList:"item" type:"list"` + + // The ID of the transit gateway attachment to associate with the transit gateway + // multicast domain. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `type:"string"` +} + +// String returns the string representation +func (s AssociateTransitGatewayMulticastDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateTransitGatewayMulticastDomainInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateTransitGatewayMulticastDomainInput) SetDryRun(v bool) *AssociateTransitGatewayMulticastDomainInput { + s.DryRun = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *AssociateTransitGatewayMulticastDomainInput) SetSubnetIds(v []*string) *AssociateTransitGatewayMulticastDomainInput { + s.SubnetIds = v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *AssociateTransitGatewayMulticastDomainInput) SetTransitGatewayAttachmentId(v string) *AssociateTransitGatewayMulticastDomainInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *AssociateTransitGatewayMulticastDomainInput) SetTransitGatewayMulticastDomainId(v string) *AssociateTransitGatewayMulticastDomainInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type AssociateTransitGatewayMulticastDomainOutput struct { + _ struct{} `type:"structure"` + + // Information about the transit gateway multicast domain associations. + Associations *TransitGatewayMulticastDomainAssociations `locationName:"associations" type:"structure"` +} + +// String returns the string representation +func (s AssociateTransitGatewayMulticastDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateTransitGatewayMulticastDomainOutput) GoString() string { + return s.String() +} + +// SetAssociations sets the Associations field's value. +func (s *AssociateTransitGatewayMulticastDomainOutput) SetAssociations(v *TransitGatewayMulticastDomainAssociations) *AssociateTransitGatewayMulticastDomainOutput { + s.Associations = v + return s +} + +type AssociateTransitGatewayRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateTransitGatewayRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateTransitGatewayRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateTransitGatewayRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateTransitGatewayRouteTableInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateTransitGatewayRouteTableInput) SetDryRun(v bool) *AssociateTransitGatewayRouteTableInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *AssociateTransitGatewayRouteTableInput) SetTransitGatewayAttachmentId(v string) *AssociateTransitGatewayRouteTableInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *AssociateTransitGatewayRouteTableInput) SetTransitGatewayRouteTableId(v string) *AssociateTransitGatewayRouteTableInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type AssociateTransitGatewayRouteTableOutput struct { + _ struct{} `type:"structure"` + + // The ID of the association. + Association *TransitGatewayAssociation `locationName:"association" type:"structure"` +} + +// String returns the string representation +func (s AssociateTransitGatewayRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateTransitGatewayRouteTableOutput) GoString() string { + return s.String() +} + +// SetAssociation sets the Association field's value. +func (s *AssociateTransitGatewayRouteTableOutput) SetAssociation(v *TransitGatewayAssociation) *AssociateTransitGatewayRouteTableOutput { + s.Association = v + return s +} + +type AssociateVpcCidrBlockInput struct { + _ struct{} `type:"structure"` + + // Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for + // the VPC. You cannot specify the range of IPv6 addresses, or the size of the + // CIDR block. + AmazonProvidedIpv6CidrBlock *bool `locationName:"amazonProvidedIpv6CidrBlock" type:"boolean"` + + // An IPv4 CIDR block to associate with the VPC. + CidrBlock *string `type:"string"` + + // An IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool + // in the request. + // + // To let Amazon choose the IPv6 CIDR block for you, omit this parameter. + Ipv6CidrBlock *string `type:"string"` + + // The name of the location from which we advertise the IPV6 CIDR block. Use + // this parameter to limit the CIDR block to this location. + // + // You must set AmazonProvidedIpv6CidrBlock to true to use this parameter. + // + // You can have one IPv6 CIDR block association per network border group. + Ipv6CidrBlockNetworkBorderGroup *string `type:"string"` + + // The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block. + Ipv6Pool *string `type:"string"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateVpcCidrBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVpcCidrBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateVpcCidrBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateVpcCidrBlockInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAmazonProvidedIpv6CidrBlock sets the AmazonProvidedIpv6CidrBlock field's value. +func (s *AssociateVpcCidrBlockInput) SetAmazonProvidedIpv6CidrBlock(v bool) *AssociateVpcCidrBlockInput { + s.AmazonProvidedIpv6CidrBlock = &v + return s +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *AssociateVpcCidrBlockInput) SetCidrBlock(v string) *AssociateVpcCidrBlockInput { + s.CidrBlock = &v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *AssociateVpcCidrBlockInput) SetIpv6CidrBlock(v string) *AssociateVpcCidrBlockInput { + s.Ipv6CidrBlock = &v + return s +} + +// SetIpv6CidrBlockNetworkBorderGroup sets the Ipv6CidrBlockNetworkBorderGroup field's value. +func (s *AssociateVpcCidrBlockInput) SetIpv6CidrBlockNetworkBorderGroup(v string) *AssociateVpcCidrBlockInput { + s.Ipv6CidrBlockNetworkBorderGroup = &v + return s +} + +// SetIpv6Pool sets the Ipv6Pool field's value. +func (s *AssociateVpcCidrBlockInput) SetIpv6Pool(v string) *AssociateVpcCidrBlockInput { + s.Ipv6Pool = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AssociateVpcCidrBlockInput) SetVpcId(v string) *AssociateVpcCidrBlockInput { + s.VpcId = &v + return s +} + +type AssociateVpcCidrBlockOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPv4 CIDR block association. + CidrBlockAssociation *VpcCidrBlockAssociation `locationName:"cidrBlockAssociation" type:"structure"` + + // Information about the IPv6 CIDR block association. + Ipv6CidrBlockAssociation *VpcIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociation" type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s AssociateVpcCidrBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVpcCidrBlockOutput) GoString() string { + return s.String() +} + +// SetCidrBlockAssociation sets the CidrBlockAssociation field's value. +func (s *AssociateVpcCidrBlockOutput) SetCidrBlockAssociation(v *VpcCidrBlockAssociation) *AssociateVpcCidrBlockOutput { + s.CidrBlockAssociation = v + return s +} + +// SetIpv6CidrBlockAssociation sets the Ipv6CidrBlockAssociation field's value. +func (s *AssociateVpcCidrBlockOutput) SetIpv6CidrBlockAssociation(v *VpcIpv6CidrBlockAssociation) *AssociateVpcCidrBlockOutput { + s.Ipv6CidrBlockAssociation = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AssociateVpcCidrBlockOutput) SetVpcId(v string) *AssociateVpcCidrBlockOutput { + s.VpcId = &v + return s +} + +// Information about the associated IAM roles. +type AssociatedRole struct { + _ struct{} `type:"structure"` + + // The ARN of the associated IAM role. + AssociatedRoleArn *string `locationName:"associatedRoleArn" min:"1" type:"string"` + + // The name of the Amazon S3 bucket in which the Amazon S3 object is stored. + CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"` + + // The key of the Amazon S3 object ey where the certificate, certificate chain, + // and encrypted private key bundle is stored. The object key is formated as + // follows: certificate_arn/role_arn. + CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` + + // The ID of the KMS customer master key (CMK) used to encrypt the private key. + EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"` +} + +// String returns the string representation +func (s AssociatedRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociatedRole) GoString() string { + return s.String() +} + +// SetAssociatedRoleArn sets the AssociatedRoleArn field's value. +func (s *AssociatedRole) SetAssociatedRoleArn(v string) *AssociatedRole { + s.AssociatedRoleArn = &v + return s +} + +// SetCertificateS3BucketName sets the CertificateS3BucketName field's value. +func (s *AssociatedRole) SetCertificateS3BucketName(v string) *AssociatedRole { + s.CertificateS3BucketName = &v + return s +} + +// SetCertificateS3ObjectKey sets the CertificateS3ObjectKey field's value. +func (s *AssociatedRole) SetCertificateS3ObjectKey(v string) *AssociatedRole { + s.CertificateS3ObjectKey = &v + return s +} + +// SetEncryptionKmsKeyId sets the EncryptionKmsKeyId field's value. +func (s *AssociatedRole) SetEncryptionKmsKeyId(v string) *AssociatedRole { + s.EncryptionKmsKeyId = &v + return s +} + +// Describes a target network that is associated with a Client VPN endpoint. +// A target network is a subnet in a VPC. +type AssociatedTargetNetwork struct { + _ struct{} `type:"structure"` + + // The ID of the subnet. + NetworkId *string `locationName:"networkId" type:"string"` + + // The target network type. + NetworkType *string `locationName:"networkType" type:"string" enum:"AssociatedNetworkType"` +} + +// String returns the string representation +func (s AssociatedTargetNetwork) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociatedTargetNetwork) GoString() string { + return s.String() +} + +// SetNetworkId sets the NetworkId field's value. +func (s *AssociatedTargetNetwork) SetNetworkId(v string) *AssociatedTargetNetwork { + s.NetworkId = &v + return s +} + +// SetNetworkType sets the NetworkType field's value. +func (s *AssociatedTargetNetwork) SetNetworkType(v string) *AssociatedTargetNetwork { + s.NetworkType = &v + return s +} + +// Describes the state of a target network association. +type AssociationStatus struct { + _ struct{} `type:"structure"` + + // The state of the target network association. + Code *string `locationName:"code" type:"string" enum:"AssociationStatusCode"` + + // A message about the status of the target network association, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AssociationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *AssociationStatus) SetCode(v string) *AssociationStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *AssociationStatus) SetMessage(v string) *AssociationStatus { + s.Message = &v + return s +} + +type AttachClassicLinkVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of one or more of the VPC's security groups. You cannot specify security + // groups from a different VPC. + // + // Groups is a required field + Groups []*string `locationName:"SecurityGroupId" locationNameList:"groupId" type:"list" required:"true"` + + // The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of a ClassicLink-enabled VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachClassicLinkVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachClassicLinkVpcInput"} + if s.Groups == nil { + invalidParams.Add(request.NewErrParamRequired("Groups")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AttachClassicLinkVpcInput) SetDryRun(v bool) *AttachClassicLinkVpcInput { + s.DryRun = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *AttachClassicLinkVpcInput) SetGroups(v []*string) *AttachClassicLinkVpcInput { + s.Groups = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AttachClassicLinkVpcInput) SetInstanceId(v string) *AttachClassicLinkVpcInput { + s.InstanceId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AttachClassicLinkVpcInput) SetVpcId(v string) *AttachClassicLinkVpcInput { + s.VpcId = &v + return s +} + +type AttachClassicLinkVpcOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s AttachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *AttachClassicLinkVpcOutput) SetReturn(v bool) *AttachClassicLinkVpcOutput { + s.Return = &v + return s +} + +type AttachInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the internet gateway. + // + // InternetGatewayId is a required field + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachInternetGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachInternetGatewayInput"} + if s.InternetGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("InternetGatewayId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AttachInternetGatewayInput) SetDryRun(v bool) *AttachInternetGatewayInput { + s.DryRun = &v + return s +} + +// SetInternetGatewayId sets the InternetGatewayId field's value. +func (s *AttachInternetGatewayInput) SetInternetGatewayId(v string) *AttachInternetGatewayInput { + s.InternetGatewayId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AttachInternetGatewayInput) SetVpcId(v string) *AttachInternetGatewayInput { + s.VpcId = &v + return s +} + +type AttachInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInternetGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AttachNetworkInterface. +type AttachNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // The index of the device for the network interface attachment. + // + // DeviceIndex is a required field + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachNetworkInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachNetworkInterfaceInput"} + if s.DeviceIndex == nil { + invalidParams.Add(request.NewErrParamRequired("DeviceIndex")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeviceIndex sets the DeviceIndex field's value. +func (s *AttachNetworkInterfaceInput) SetDeviceIndex(v int64) *AttachNetworkInterfaceInput { + s.DeviceIndex = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AttachNetworkInterfaceInput) SetDryRun(v bool) *AttachNetworkInterfaceInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AttachNetworkInterfaceInput) SetInstanceId(v string) *AttachNetworkInterfaceInput { + s.InstanceId = &v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *AttachNetworkInterfaceInput) SetNetworkCardIndex(v int64) *AttachNetworkInterfaceInput { + s.NetworkCardIndex = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AttachNetworkInterfaceInput) SetNetworkInterfaceId(v string) *AttachNetworkInterfaceInput { + s.NetworkInterfaceId = &v + return s +} + +// Contains the output of AttachNetworkInterface. +type AttachNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` +} + +// String returns the string representation +func (s AttachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +// SetAttachmentId sets the AttachmentId field's value. +func (s *AttachNetworkInterfaceOutput) SetAttachmentId(v string) *AttachNetworkInterfaceOutput { + s.AttachmentId = &v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *AttachNetworkInterfaceOutput) SetNetworkCardIndex(v int64) *AttachNetworkInterfaceOutput { + s.NetworkCardIndex = &v + return s +} + +type AttachVolumeInput struct { + _ struct{} `type:"structure"` + + // The device name (for example, /dev/sdh or xvdh). + // + // Device is a required field + Device *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The ID of the EBS volume. The volume and instance must be within the same + // Availability Zone. + // + // VolumeId is a required field + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachVolumeInput"} + if s.Device == nil { + invalidParams.Add(request.NewErrParamRequired("Device")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDevice sets the Device field's value. +func (s *AttachVolumeInput) SetDevice(v string) *AttachVolumeInput { + s.Device = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AttachVolumeInput) SetDryRun(v bool) *AttachVolumeInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AttachVolumeInput) SetInstanceId(v string) *AttachVolumeInput { + s.InstanceId = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *AttachVolumeInput) SetVolumeId(v string) *AttachVolumeInput { + s.VolumeId = &v + return s +} + +// Contains the parameters for AttachVpnGateway. +type AttachVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + // + // VpnGatewayId is a required field + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachVpnGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachVpnGatewayInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + if s.VpnGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *AttachVpnGatewayInput) SetDryRun(v bool) *AttachVpnGatewayInput { + s.DryRun = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AttachVpnGatewayInput) SetVpcId(v string) *AttachVpnGatewayInput { + s.VpcId = &v + return s +} + +// SetVpnGatewayId sets the VpnGatewayId field's value. +func (s *AttachVpnGatewayInput) SetVpnGatewayId(v string) *AttachVpnGatewayInput { + s.VpnGatewayId = &v + return s +} + +// Contains the output of AttachVpnGateway. +type AttachVpnGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the attachment. + VpcAttachment *VpcAttachment `locationName:"attachment" type:"structure"` +} + +// String returns the string representation +func (s AttachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachVpnGatewayOutput) GoString() string { + return s.String() +} + +// SetVpcAttachment sets the VpcAttachment field's value. +func (s *AttachVpnGatewayOutput) SetVpcAttachment(v *VpcAttachment) *AttachVpnGatewayOutput { + s.VpcAttachment = v + return s +} + +// Describes a value for a resource attribute that is a Boolean value. +type AttributeBooleanValue struct { + _ struct{} `type:"structure"` + + // The attribute value. The valid values are true or false. + Value *bool `locationName:"value" type:"boolean"` +} + +// String returns the string representation +func (s AttributeBooleanValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeBooleanValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *AttributeBooleanValue) SetValue(v bool) *AttributeBooleanValue { + s.Value = &v + return s +} + +// Describes a value for a resource attribute that is a String. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // The attribute value. The value is case-sensitive. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *AttributeValue) SetValue(v string) *AttributeValue { + s.Value = &v + return s +} + +// Information about an authorization rule. +type AuthorizationRule struct { + _ struct{} `type:"structure"` + + // Indicates whether the authorization rule grants access to all clients. + AccessAll *bool `locationName:"accessAll" type:"boolean"` + + // The ID of the Client VPN endpoint with which the authorization rule is associated. + ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` + + // A brief description of the authorization rule. + Description *string `locationName:"description" type:"string"` + + // The IPv4 address range, in CIDR notation, of the network to which the authorization + // rule applies. + DestinationCidr *string `locationName:"destinationCidr" type:"string"` + + // The ID of the Active Directory group to which the authorization rule grants + // access. + GroupId *string `locationName:"groupId" type:"string"` + + // The current state of the authorization rule. + Status *ClientVpnAuthorizationRuleStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s AuthorizationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizationRule) GoString() string { + return s.String() +} + +// SetAccessAll sets the AccessAll field's value. +func (s *AuthorizationRule) SetAccessAll(v bool) *AuthorizationRule { + s.AccessAll = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *AuthorizationRule) SetClientVpnEndpointId(v string) *AuthorizationRule { + s.ClientVpnEndpointId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AuthorizationRule) SetDescription(v string) *AuthorizationRule { + s.Description = &v + return s +} + +// SetDestinationCidr sets the DestinationCidr field's value. +func (s *AuthorizationRule) SetDestinationCidr(v string) *AuthorizationRule { + s.DestinationCidr = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *AuthorizationRule) SetGroupId(v string) *AuthorizationRule { + s.GroupId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AuthorizationRule) SetStatus(v *ClientVpnAuthorizationRuleStatus) *AuthorizationRule { + s.Status = v + return s +} + +type AuthorizeClientVpnIngressInput struct { + _ struct{} `type:"structure"` + + // The ID of the group to grant access to, for example, the Active Directory + // group or identity provider (IdP) group. Required if AuthorizeAllGroups is + // false or not specified. + AccessGroupId *string `type:"string"` + + // Indicates whether to grant access to all clients. Specify true to grant all + // clients who successfully establish a VPN connection access to the network. + // Must be set to true if AccessGroupId is not specified. + AuthorizeAllGroups *bool `type:"boolean"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // A brief description of the authorization rule. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IPv4 address range, in CIDR notation, of the network for which access + // is being authorized. + // + // TargetNetworkCidr is a required field + TargetNetworkCidr *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AuthorizeClientVpnIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeClientVpnIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthorizeClientVpnIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthorizeClientVpnIngressInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.TargetNetworkCidr == nil { + invalidParams.Add(request.NewErrParamRequired("TargetNetworkCidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessGroupId sets the AccessGroupId field's value. +func (s *AuthorizeClientVpnIngressInput) SetAccessGroupId(v string) *AuthorizeClientVpnIngressInput { + s.AccessGroupId = &v + return s +} + +// SetAuthorizeAllGroups sets the AuthorizeAllGroups field's value. +func (s *AuthorizeClientVpnIngressInput) SetAuthorizeAllGroups(v bool) *AuthorizeClientVpnIngressInput { + s.AuthorizeAllGroups = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *AuthorizeClientVpnIngressInput) SetClientToken(v string) *AuthorizeClientVpnIngressInput { + s.ClientToken = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *AuthorizeClientVpnIngressInput) SetClientVpnEndpointId(v string) *AuthorizeClientVpnIngressInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AuthorizeClientVpnIngressInput) SetDescription(v string) *AuthorizeClientVpnIngressInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AuthorizeClientVpnIngressInput) SetDryRun(v bool) *AuthorizeClientVpnIngressInput { + s.DryRun = &v + return s +} + +// SetTargetNetworkCidr sets the TargetNetworkCidr field's value. +func (s *AuthorizeClientVpnIngressInput) SetTargetNetworkCidr(v string) *AuthorizeClientVpnIngressInput { + s.TargetNetworkCidr = &v + return s +} + +type AuthorizeClientVpnIngressOutput struct { + _ struct{} `type:"structure"` + + // The current state of the authorization rule. + Status *ClientVpnAuthorizationRuleStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s AuthorizeClientVpnIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeClientVpnIngressOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AuthorizeClientVpnIngressOutput) SetStatus(v *ClientVpnAuthorizationRuleStatus) *AuthorizeClientVpnIngressOutput { + s.Status = v + return s +} + +type AuthorizeSecurityGroupEgressInput struct { + _ struct{} `type:"structure"` + + // Not supported. Use a set of IP permissions to specify the CIDR. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Not supported. Use a set of IP permissions to specify the port. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + // + // GroupId is a required field + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // The sets of IP permissions. You can't specify a destination security group + // and a CIDR IP address range in the same set of permissions. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // Not supported. Use a set of IP permissions to specify the protocol name or + // number. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // Not supported. Use a set of IP permissions to specify a destination security + // group. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // Not supported. Use a set of IP permissions to specify a destination security + // group. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // Not supported. Use a set of IP permissions to specify the port. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthorizeSecurityGroupEgressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthorizeSecurityGroupEgressInput"} + if s.GroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidrIp sets the CidrIp field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetCidrIp(v string) *AuthorizeSecurityGroupEgressInput { + s.CidrIp = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetDryRun(v bool) *AuthorizeSecurityGroupEgressInput { + s.DryRun = &v + return s +} + +// SetFromPort sets the FromPort field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetFromPort(v int64) *AuthorizeSecurityGroupEgressInput { + s.FromPort = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetGroupId(v string) *AuthorizeSecurityGroupEgressInput { + s.GroupId = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetIpPermissions(v []*IpPermission) *AuthorizeSecurityGroupEgressInput { + s.IpPermissions = v + return s +} + +// SetIpProtocol sets the IpProtocol field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetIpProtocol(v string) *AuthorizeSecurityGroupEgressInput { + s.IpProtocol = &v + return s +} + +// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetSourceSecurityGroupName(v string) *AuthorizeSecurityGroupEgressInput { + s.SourceSecurityGroupName = &v + return s +} + +// SetSourceSecurityGroupOwnerId sets the SourceSecurityGroupOwnerId field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetSourceSecurityGroupOwnerId(v string) *AuthorizeSecurityGroupEgressInput { + s.SourceSecurityGroupOwnerId = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *AuthorizeSecurityGroupEgressInput) SetToPort(v int64) *AuthorizeSecurityGroupEgressInput { + s.ToPort = &v + return s +} + +type AuthorizeSecurityGroupEgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +type AuthorizeSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The IPv4 address range, in CIDR format. You can't specify this parameter + // when specifying a source security group. To specify an IPv6 address range, + // use a set of IP permissions. + // + // Alternatively, use a set of IP permissions to specify multiple rules and + // a description for the rule. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all types. If you specify all + // ICMP types, you must specify all codes. + // + // Alternatively, use a set of IP permissions to specify multiple rules and + // a description for the rule. + FromPort *int64 `type:"integer"` + + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You must specify + // either the security group ID or the security group name in the request. + GroupName *string `type:"string"` + + // The sets of IP permissions. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // To specify icmpv6, use a set of IP permissions. + // + // [VPC only] Use -1 to specify all protocols. If you specify -1 or a protocol + // other than tcp, udp, or icmp, traffic on all ports is allowed, regardless + // of any ports you specify. + // + // Alternatively, use a set of IP permissions to specify multiple rules and + // a description for the rule. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. Creates rules that grant full ICMP, UDP, and TCP + // access. To create a rule with a specific IP protocol and port range, use + // a set of IP permissions instead. For EC2-VPC, the source security group must + // be in the same VPC. + SourceSecurityGroupName *string `type:"string"` + + // [nondefault VPC] The AWS account ID for the source security group, if the + // source security group is in a different account. You can't specify this parameter + // in combination with the following parameters: the CIDR IP address range, + // the IP protocol, the start of the port range, and the end of the port range. + // Creates rules that grant full ICMP, UDP, and TCP access. To create a rule + // with a specific IP protocol and port range, use a set of IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all codes. If you specify all + // ICMP types, you must specify all codes. + // + // Alternatively, use a set of IP permissions to specify multiple rules and + // a description for the rule. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// SetCidrIp sets the CidrIp field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetCidrIp(v string) *AuthorizeSecurityGroupIngressInput { + s.CidrIp = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetDryRun(v bool) *AuthorizeSecurityGroupIngressInput { + s.DryRun = &v + return s +} + +// SetFromPort sets the FromPort field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetFromPort(v int64) *AuthorizeSecurityGroupIngressInput { + s.FromPort = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetGroupId(v string) *AuthorizeSecurityGroupIngressInput { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetGroupName(v string) *AuthorizeSecurityGroupIngressInput { + s.GroupName = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetIpPermissions(v []*IpPermission) *AuthorizeSecurityGroupIngressInput { + s.IpPermissions = v + return s +} + +// SetIpProtocol sets the IpProtocol field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetIpProtocol(v string) *AuthorizeSecurityGroupIngressInput { + s.IpProtocol = &v + return s +} + +// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetSourceSecurityGroupName(v string) *AuthorizeSecurityGroupIngressInput { + s.SourceSecurityGroupName = &v + return s +} + +// SetSourceSecurityGroupOwnerId sets the SourceSecurityGroupOwnerId field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetSourceSecurityGroupOwnerId(v string) *AuthorizeSecurityGroupIngressInput { + s.SourceSecurityGroupOwnerId = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *AuthorizeSecurityGroupIngressInput) SetToPort(v int64) *AuthorizeSecurityGroupIngressInput { + s.ToPort = &v + return s +} + +type AuthorizeSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// Describes Availability Zones, Local Zones, and Wavelength Zones. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // For Availability Zones, this parameter has the same value as the Region name. + // + // For Local Zones, the name of the associated group, for example us-west-2-lax-1. + // + // For Wavelength Zones, the name of the associated group, for example us-east-1-wl1-bos-wlz-1. + GroupName *string `locationName:"groupName" type:"string"` + + // Any messages about the Availability Zone, Local Zone, or Wavelength Zone. + Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` + + // The name of the network border group. + NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` + + // For Availability Zones, this parameter always has the value of opt-in-not-required. + // + // For Local Zones and Wavelength Zones, this parameter is the opt-in status. + // The possible values are opted-in, and not-opted-in. + OptInStatus *string `locationName:"optInStatus" type:"string" enum:"AvailabilityZoneOptInStatus"` + + // The ID of the zone that handles some of the Local Zone or Wavelength Zone + // control plane operations, such as API calls. + ParentZoneId *string `locationName:"parentZoneId" type:"string"` + + // The name of the zone that handles some of the Local Zone or Wavelength Zone + // control plane operations, such as API calls. + ParentZoneName *string `locationName:"parentZoneName" type:"string"` + + // The name of the Region. + RegionName *string `locationName:"regionName" type:"string"` + + // The state of the Availability Zone, Local Zone, or Wavelength Zone. + State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` + + // The ID of the Availability Zone, Local Zone, or Wavelength Zone. + ZoneId *string `locationName:"zoneId" type:"string"` + + // The name of the Availability Zone, Local Zone, or Wavelength Zone. + ZoneName *string `locationName:"zoneName" type:"string"` + + // The type of zone. The valid values are availability-zone, local-zone, and + // wavelength-zone. + ZoneType *string `locationName:"zoneType" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// SetGroupName sets the GroupName field's value. +func (s *AvailabilityZone) SetGroupName(v string) *AvailabilityZone { + s.GroupName = &v + return s +} + +// SetMessages sets the Messages field's value. +func (s *AvailabilityZone) SetMessages(v []*AvailabilityZoneMessage) *AvailabilityZone { + s.Messages = v + return s +} + +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *AvailabilityZone) SetNetworkBorderGroup(v string) *AvailabilityZone { + s.NetworkBorderGroup = &v + return s +} + +// SetOptInStatus sets the OptInStatus field's value. +func (s *AvailabilityZone) SetOptInStatus(v string) *AvailabilityZone { + s.OptInStatus = &v + return s +} + +// SetParentZoneId sets the ParentZoneId field's value. +func (s *AvailabilityZone) SetParentZoneId(v string) *AvailabilityZone { + s.ParentZoneId = &v + return s +} + +// SetParentZoneName sets the ParentZoneName field's value. +func (s *AvailabilityZone) SetParentZoneName(v string) *AvailabilityZone { + s.ParentZoneName = &v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *AvailabilityZone) SetRegionName(v string) *AvailabilityZone { + s.RegionName = &v + return s +} + +// SetState sets the State field's value. +func (s *AvailabilityZone) SetState(v string) *AvailabilityZone { + s.State = &v + return s +} + +// SetZoneId sets the ZoneId field's value. +func (s *AvailabilityZone) SetZoneId(v string) *AvailabilityZone { + s.ZoneId = &v + return s +} + +// SetZoneName sets the ZoneName field's value. +func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { + s.ZoneName = &v + return s +} + +// SetZoneType sets the ZoneType field's value. +func (s *AvailabilityZone) SetZoneType(v string) *AvailabilityZone { + s.ZoneType = &v + return s +} + +// Describes a message about an Availability Zone, Local Zone, or Wavelength +// Zone. +type AvailabilityZoneMessage struct { + _ struct{} `type:"structure"` + + // The message about the Availability Zone, Local Zone, or Wavelength Zone. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZoneMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZoneMessage) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *AvailabilityZoneMessage) SetMessage(v string) *AvailabilityZoneMessage { + s.Message = &v + return s +} + +// The capacity information for instances that can be launched onto the Dedicated +// Host. +type AvailableCapacity struct { + _ struct{} `type:"structure"` + + // The number of instances that can be launched onto the Dedicated Host depending + // on the host's available capacity. For Dedicated Hosts that support multiple + // instance types, this parameter represents the number of instances for each + // instance size that is supported on the host. + AvailableInstanceCapacity []*InstanceCapacity `locationName:"availableInstanceCapacity" locationNameList:"item" type:"list"` + + // The number of vCPUs available for launching instances onto the Dedicated + // Host. + AvailableVCpus *int64 `locationName:"availableVCpus" type:"integer"` +} + +// String returns the string representation +func (s AvailableCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailableCapacity) GoString() string { + return s.String() +} + +// SetAvailableInstanceCapacity sets the AvailableInstanceCapacity field's value. +func (s *AvailableCapacity) SetAvailableInstanceCapacity(v []*InstanceCapacity) *AvailableCapacity { + s.AvailableInstanceCapacity = v + return s +} + +// SetAvailableVCpus sets the AvailableVCpus field's value. +func (s *AvailableCapacity) SetAvailableVCpus(v int64) *AvailableCapacity { + s.AvailableVCpus = &v + return s +} + +type BlobAttributeValue struct { + _ struct{} `type:"structure"` + + // Value is automatically base64 encoded/decoded by the SDK. + Value []byte `locationName:"value" type:"blob"` +} + +// String returns the string representation +func (s BlobAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlobAttributeValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *BlobAttributeValue) SetValue(v []byte) *BlobAttributeValue { + s.Value = v + return s +} + +// Describes a block device mapping. +type BlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsBlockDevice `locationName:"ebs" type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with 2 available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1. The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // NVMe instance store volumes are automatically enumerated and assigned a device + // name. Including them in your block device mapping has no effect. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s BlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockDeviceMapping) GoString() string { + return s.String() +} + +// SetDeviceName sets the DeviceName field's value. +func (s *BlockDeviceMapping) SetDeviceName(v string) *BlockDeviceMapping { + s.DeviceName = &v + return s +} + +// SetEbs sets the Ebs field's value. +func (s *BlockDeviceMapping) SetEbs(v *EbsBlockDevice) *BlockDeviceMapping { + s.Ebs = v + return s +} + +// SetNoDevice sets the NoDevice field's value. +func (s *BlockDeviceMapping) SetNoDevice(v string) *BlockDeviceMapping { + s.NoDevice = &v + return s +} + +// SetVirtualName sets the VirtualName field's value. +func (s *BlockDeviceMapping) SetVirtualName(v string) *BlockDeviceMapping { + s.VirtualName = &v + return s +} + +// Contains the parameters for BundleInstance. +type BundleInstanceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to bundle. + // + // Type: String + // + // Default: None + // + // Required: Yes + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + // + // Storage is a required field + Storage *Storage `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BundleInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BundleInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BundleInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Storage == nil { + invalidParams.Add(request.NewErrParamRequired("Storage")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *BundleInstanceInput) SetDryRun(v bool) *BundleInstanceInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *BundleInstanceInput) SetInstanceId(v string) *BundleInstanceInput { + s.InstanceId = &v + return s +} + +// SetStorage sets the Storage field's value. +func (s *BundleInstanceInput) SetStorage(v *Storage) *BundleInstanceInput { + s.Storage = v + return s +} + +// Contains the output of BundleInstance. +type BundleInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` +} + +// String returns the string representation +func (s BundleInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleInstanceOutput) GoString() string { + return s.String() +} + +// SetBundleTask sets the BundleTask field's value. +func (s *BundleInstanceOutput) SetBundleTask(v *BundleTask) *BundleInstanceOutput { + s.BundleTask = v + return s +} + +// Describes a bundle task. +type BundleTask struct { + _ struct{} `type:"structure"` + + // The ID of the bundle task. + BundleId *string `locationName:"bundleId" type:"string"` + + // If the task fails, a description of the error. + BundleTaskError *BundleTaskError `locationName:"error" type:"structure"` + + // The ID of the instance associated with this bundle task. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The level of task completion, as a percent (for example, 20%). + Progress *string `locationName:"progress" type:"string"` + + // The time this task started. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // The state of the task. + State *string `locationName:"state" type:"string" enum:"BundleTaskState"` + + // The Amazon S3 storage locations. + Storage *Storage `locationName:"storage" type:"structure"` + + // The time of the most recent update for the task. + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp"` +} + +// String returns the string representation +func (s BundleTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTask) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *BundleTask) SetBundleId(v string) *BundleTask { + s.BundleId = &v + return s +} + +// SetBundleTaskError sets the BundleTaskError field's value. +func (s *BundleTask) SetBundleTaskError(v *BundleTaskError) *BundleTask { + s.BundleTaskError = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *BundleTask) SetInstanceId(v string) *BundleTask { + s.InstanceId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *BundleTask) SetProgress(v string) *BundleTask { + s.Progress = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *BundleTask) SetStartTime(v time.Time) *BundleTask { + s.StartTime = &v + return s +} + +// SetState sets the State field's value. +func (s *BundleTask) SetState(v string) *BundleTask { + s.State = &v + return s +} + +// SetStorage sets the Storage field's value. +func (s *BundleTask) SetStorage(v *Storage) *BundleTask { + s.Storage = v + return s +} + +// SetUpdateTime sets the UpdateTime field's value. +func (s *BundleTask) SetUpdateTime(v time.Time) *BundleTask { + s.UpdateTime = &v + return s +} + +// Describes an error for BundleInstance. +type BundleTaskError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BundleTaskError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BundleTaskError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *BundleTaskError) SetCode(v string) *BundleTaskError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *BundleTaskError) SetMessage(v string) *BundleTaskError { + s.Message = &v + return s +} + +// Information about an address range that is provisioned for use with your +// AWS resources through bring your own IP addresses (BYOIP). +type ByoipCidr struct { + _ struct{} `type:"structure"` + + // The address range, in CIDR notation. + Cidr *string `locationName:"cidr" type:"string"` + + // The description of the address range. + Description *string `locationName:"description" type:"string"` + + // The state of the address pool. + State *string `locationName:"state" type:"string" enum:"ByoipCidrState"` + + // Upon success, contains the ID of the address pool. Otherwise, contains an + // error message. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ByoipCidr) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByoipCidr) GoString() string { + return s.String() +} + +// SetCidr sets the Cidr field's value. +func (s *ByoipCidr) SetCidr(v string) *ByoipCidr { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ByoipCidr) SetDescription(v string) *ByoipCidr { + s.Description = &v + return s +} + +// SetState sets the State field's value. +func (s *ByoipCidr) SetState(v string) *ByoipCidr { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ByoipCidr) SetStatusMessage(v string) *ByoipCidr { + s.StatusMessage = &v + return s +} + +// Contains the parameters for CancelBundleTask. +type CancelBundleTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the bundle task. + // + // BundleId is a required field + BundleId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s CancelBundleTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelBundleTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelBundleTaskInput"} + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBundleId sets the BundleId field's value. +func (s *CancelBundleTaskInput) SetBundleId(v string) *CancelBundleTaskInput { + s.BundleId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CancelBundleTaskInput) SetDryRun(v bool) *CancelBundleTaskInput { + s.DryRun = &v + return s +} + +// Contains the output of CancelBundleTask. +type CancelBundleTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle task. + BundleTask *BundleTask `locationName:"bundleInstanceTask" type:"structure"` +} + +// String returns the string representation +func (s CancelBundleTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelBundleTaskOutput) GoString() string { + return s.String() +} + +// SetBundleTask sets the BundleTask field's value. +func (s *CancelBundleTaskOutput) SetBundleTask(v *BundleTask) *CancelBundleTaskOutput { + s.BundleTask = v + return s +} + +type CancelCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation to be cancelled. + // + // CapacityReservationId is a required field + CapacityReservationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s CancelCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelCapacityReservationInput"} + if s.CapacityReservationId == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *CancelCapacityReservationInput) SetCapacityReservationId(v string) *CancelCapacityReservationInput { + s.CapacityReservationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CancelCapacityReservationInput) SetDryRun(v bool) *CancelCapacityReservationInput { + s.DryRun = &v + return s +} + +type CancelCapacityReservationOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s CancelCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCapacityReservationOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *CancelCapacityReservationOutput) SetReturn(v bool) *CancelCapacityReservationOutput { + s.Return = &v + return s +} + +type CancelConversionTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the conversion task. + // + // ConversionTaskId is a required field + ConversionTaskId *string `locationName:"conversionTaskId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The reason for canceling the conversion task. + ReasonMessage *string `locationName:"reasonMessage" type:"string"` +} + +// String returns the string representation +func (s CancelConversionTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelConversionTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelConversionTaskInput"} + if s.ConversionTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("ConversionTaskId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConversionTaskId sets the ConversionTaskId field's value. +func (s *CancelConversionTaskInput) SetConversionTaskId(v string) *CancelConversionTaskInput { + s.ConversionTaskId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CancelConversionTaskInput) SetDryRun(v bool) *CancelConversionTaskInput { + s.DryRun = &v + return s +} + +// SetReasonMessage sets the ReasonMessage field's value. +func (s *CancelConversionTaskInput) SetReasonMessage(v string) *CancelConversionTaskInput { + s.ReasonMessage = &v + return s +} + +type CancelConversionTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelConversionTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelConversionTaskOutput) GoString() string { + return s.String() +} + +type CancelExportTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the export task. This is the ID returned by CreateInstanceExportTask. + // + // ExportTaskId is a required field + ExportTaskId *string `locationName:"exportTaskId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"} + if s.ExportTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("ExportTaskId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExportTaskId sets the ExportTaskId field's value. +func (s *CancelExportTaskInput) SetExportTaskId(v string) *CancelExportTaskInput { + s.ExportTaskId = &v + return s +} + +type CancelExportTaskOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelExportTaskOutput) GoString() string { + return s.String() +} + +type CancelImportTaskInput struct { + _ struct{} `type:"structure"` + + // The reason for canceling the task. + CancelReason *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the import image or import snapshot task to be canceled. + ImportTaskId *string `type:"string"` +} + +// String returns the string representation +func (s CancelImportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskInput) GoString() string { + return s.String() +} + +// SetCancelReason sets the CancelReason field's value. +func (s *CancelImportTaskInput) SetCancelReason(v string) *CancelImportTaskInput { + s.CancelReason = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CancelImportTaskInput) SetDryRun(v bool) *CancelImportTaskInput { + s.DryRun = &v + return s +} + +// SetImportTaskId sets the ImportTaskId field's value. +func (s *CancelImportTaskInput) SetImportTaskId(v string) *CancelImportTaskInput { + s.ImportTaskId = &v + return s +} + +type CancelImportTaskOutput struct { + _ struct{} `type:"structure"` + + // The ID of the task being canceled. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The current state of the task being canceled. + PreviousState *string `locationName:"previousState" type:"string"` + + // The current state of the task being canceled. + State *string `locationName:"state" type:"string"` +} + +// String returns the string representation +func (s CancelImportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelImportTaskOutput) GoString() string { + return s.String() +} + +// SetImportTaskId sets the ImportTaskId field's value. +func (s *CancelImportTaskOutput) SetImportTaskId(v string) *CancelImportTaskOutput { + s.ImportTaskId = &v + return s +} + +// SetPreviousState sets the PreviousState field's value. +func (s *CancelImportTaskOutput) SetPreviousState(v string) *CancelImportTaskOutput { + s.PreviousState = &v + return s +} + +// SetState sets the State field's value. +func (s *CancelImportTaskOutput) SetState(v string) *CancelImportTaskOutput { + s.State = &v + return s +} + +// Contains the parameters for CancelReservedInstancesListing. +type CancelReservedInstancesListingInput struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance listing. + // + // ReservedInstancesListingId is a required field + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelReservedInstancesListingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelReservedInstancesListingInput"} + if s.ReservedInstancesListingId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesListingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReservedInstancesListingId sets the ReservedInstancesListingId field's value. +func (s *CancelReservedInstancesListingInput) SetReservedInstancesListingId(v string) *CancelReservedInstancesListingInput { + s.ReservedInstancesListingId = &v + return s +} + +// Contains the output of CancelReservedInstancesListing. +type CancelReservedInstancesListingOutput struct { + _ struct{} `type:"structure"` + + // The Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReservedInstancesListingOutput) GoString() string { + return s.String() +} + +// SetReservedInstancesListings sets the ReservedInstancesListings field's value. +func (s *CancelReservedInstancesListingOutput) SetReservedInstancesListings(v []*ReservedInstancesListing) *CancelReservedInstancesListingOutput { + s.ReservedInstancesListings = v + return s +} + +// Describes a Spot Fleet error. +type CancelSpotFleetRequestsError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" enum:"CancelBatchErrorCode"` + + // The description for the error code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *CancelSpotFleetRequestsError) SetCode(v string) *CancelSpotFleetRequestsError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *CancelSpotFleetRequestsError) SetMessage(v string) *CancelSpotFleetRequestsError { + s.Message = &v + return s +} + +// Describes a Spot Fleet request that was not successfully canceled. +type CancelSpotFleetRequestsErrorItem struct { + _ struct{} `type:"structure"` + + // The error. + Error *CancelSpotFleetRequestsError `locationName:"error" type:"structure"` + + // The ID of the Spot Fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsErrorItem) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *CancelSpotFleetRequestsErrorItem) SetError(v *CancelSpotFleetRequestsError) *CancelSpotFleetRequestsErrorItem { + s.Error = v + return s +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *CancelSpotFleetRequestsErrorItem) SetSpotFleetRequestId(v string) *CancelSpotFleetRequestsErrorItem { + s.SpotFleetRequestId = &v + return s +} + +// Contains the parameters for CancelSpotFleetRequests. +type CancelSpotFleetRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the Spot Fleet requests. + // + // SpotFleetRequestIds is a required field + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list" required:"true"` + + // Indicates whether to terminate instances for a Spot Fleet request if it is + // canceled successfully. + // + // TerminateInstances is a required field + TerminateInstances *bool `locationName:"terminateInstances" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelSpotFleetRequestsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelSpotFleetRequestsInput"} + if s.SpotFleetRequestIds == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestIds")) + } + if s.TerminateInstances == nil { + invalidParams.Add(request.NewErrParamRequired("TerminateInstances")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CancelSpotFleetRequestsInput) SetDryRun(v bool) *CancelSpotFleetRequestsInput { + s.DryRun = &v + return s +} + +// SetSpotFleetRequestIds sets the SpotFleetRequestIds field's value. +func (s *CancelSpotFleetRequestsInput) SetSpotFleetRequestIds(v []*string) *CancelSpotFleetRequestsInput { + s.SpotFleetRequestIds = v + return s +} + +// SetTerminateInstances sets the TerminateInstances field's value. +func (s *CancelSpotFleetRequestsInput) SetTerminateInstances(v bool) *CancelSpotFleetRequestsInput { + s.TerminateInstances = &v + return s +} + +// Contains the output of CancelSpotFleetRequests. +type CancelSpotFleetRequestsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Spot Fleet requests that are successfully canceled. + SuccessfulFleetRequests []*CancelSpotFleetRequestsSuccessItem `locationName:"successfulFleetRequestSet" locationNameList:"item" type:"list"` + + // Information about the Spot Fleet requests that are not successfully canceled. + UnsuccessfulFleetRequests []*CancelSpotFleetRequestsErrorItem `locationName:"unsuccessfulFleetRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// SetSuccessfulFleetRequests sets the SuccessfulFleetRequests field's value. +func (s *CancelSpotFleetRequestsOutput) SetSuccessfulFleetRequests(v []*CancelSpotFleetRequestsSuccessItem) *CancelSpotFleetRequestsOutput { + s.SuccessfulFleetRequests = v + return s +} + +// SetUnsuccessfulFleetRequests sets the UnsuccessfulFleetRequests field's value. +func (s *CancelSpotFleetRequestsOutput) SetUnsuccessfulFleetRequests(v []*CancelSpotFleetRequestsErrorItem) *CancelSpotFleetRequestsOutput { + s.UnsuccessfulFleetRequests = v + return s +} + +// Describes a Spot Fleet request that was successfully canceled. +type CancelSpotFleetRequestsSuccessItem struct { + _ struct{} `type:"structure"` + + // The current state of the Spot Fleet request. + CurrentSpotFleetRequestState *string `locationName:"currentSpotFleetRequestState" type:"string" enum:"BatchState"` + + // The previous state of the Spot Fleet request. + PreviousSpotFleetRequestState *string `locationName:"previousSpotFleetRequestState" type:"string" enum:"BatchState"` + + // The ID of the Spot Fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string"` +} + +// String returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotFleetRequestsSuccessItem) GoString() string { + return s.String() +} + +// SetCurrentSpotFleetRequestState sets the CurrentSpotFleetRequestState field's value. +func (s *CancelSpotFleetRequestsSuccessItem) SetCurrentSpotFleetRequestState(v string) *CancelSpotFleetRequestsSuccessItem { + s.CurrentSpotFleetRequestState = &v + return s +} + +// SetPreviousSpotFleetRequestState sets the PreviousSpotFleetRequestState field's value. +func (s *CancelSpotFleetRequestsSuccessItem) SetPreviousSpotFleetRequestState(v string) *CancelSpotFleetRequestsSuccessItem { + s.PreviousSpotFleetRequestState = &v + return s +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *CancelSpotFleetRequestsSuccessItem) SetSpotFleetRequestId(v string) *CancelSpotFleetRequestsSuccessItem { + s.SpotFleetRequestId = &v + return s +} + +// Contains the parameters for CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more Spot Instance request IDs. + // + // SpotInstanceRequestIds is a required field + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list" required:"true"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelSpotInstanceRequestsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelSpotInstanceRequestsInput"} + if s.SpotInstanceRequestIds == nil { + invalidParams.Add(request.NewErrParamRequired("SpotInstanceRequestIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CancelSpotInstanceRequestsInput) SetDryRun(v bool) *CancelSpotInstanceRequestsInput { + s.DryRun = &v + return s +} + +// SetSpotInstanceRequestIds sets the SpotInstanceRequestIds field's value. +func (s *CancelSpotInstanceRequestsInput) SetSpotInstanceRequestIds(v []*string) *CancelSpotInstanceRequestsInput { + s.SpotInstanceRequestIds = v + return s +} + +// Contains the output of CancelSpotInstanceRequests. +type CancelSpotInstanceRequestsOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot Instance requests. + CancelledSpotInstanceRequests []*CancelledSpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CancelSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// SetCancelledSpotInstanceRequests sets the CancelledSpotInstanceRequests field's value. +func (s *CancelSpotInstanceRequestsOutput) SetCancelledSpotInstanceRequests(v []*CancelledSpotInstanceRequest) *CancelSpotInstanceRequestsOutput { + s.CancelledSpotInstanceRequests = v + return s +} + +// Describes a request to cancel a Spot Instance. +type CancelledSpotInstanceRequest struct { + _ struct{} `type:"structure"` + + // The ID of the Spot Instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The state of the Spot Instance request. + State *string `locationName:"state" type:"string" enum:"CancelSpotInstanceRequestState"` +} + +// String returns the string representation +func (s CancelledSpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelledSpotInstanceRequest) GoString() string { + return s.String() +} + +// SetSpotInstanceRequestId sets the SpotInstanceRequestId field's value. +func (s *CancelledSpotInstanceRequest) SetSpotInstanceRequestId(v string) *CancelledSpotInstanceRequest { + s.SpotInstanceRequestId = &v + return s +} + +// SetState sets the State field's value. +func (s *CancelledSpotInstanceRequest) SetState(v string) *CancelledSpotInstanceRequest { + s.State = &v + return s +} + +// Describes a Capacity Reservation. +type CapacityReservation struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the capacity is reserved. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The Availability Zone ID of the Capacity Reservation. + AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + + // The remaining capacity. Indicates the number of instances that can be launched + // in the Capacity Reservation. + AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + + // The Amazon Resource Name (ARN) of the Capacity Reservation. + CapacityReservationArn *string `locationName:"capacityReservationArn" type:"string"` + + // The ID of the Capacity Reservation. + CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + + // The date and time at which the Capacity Reservation was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp"` + + // Indicates whether the Capacity Reservation supports EBS-optimized instances. + // This optimization provides dedicated throughput to Amazon EBS and an optimized + // configuration stack to provide optimal I/O performance. This optimization + // isn't available with all instance types. Additional usage charges apply when + // using an EBS- optimized instance. + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The date and time at which the Capacity Reservation expires. When a Capacity + // Reservation expires, the reserved capacity is released and you can no longer + // launch instances into it. The Capacity Reservation's state changes to expired + // when it reaches its end date and time. + EndDate *time.Time `locationName:"endDate" type:"timestamp"` + + // Indicates the way in which the Capacity Reservation ends. A Capacity Reservation + // can have one of the following end types: + // + // * unlimited - The Capacity Reservation remains active until you explicitly + // cancel it. + // + // * limited - The Capacity Reservation expires automatically at a specified + // date and time. + EndDateType *string `locationName:"endDateType" type:"string" enum:"EndDateType"` + + // Indicates whether the Capacity Reservation supports instances with temporary, + // block-level storage. + EphemeralStorage *bool `locationName:"ephemeralStorage" type:"boolean"` + + // Indicates the type of instance launches that the Capacity Reservation accepts. + // The options include: + // + // * open - The Capacity Reservation accepts all instances that have matching + // attributes (instance type, platform, and Availability Zone). Instances + // that have matching attributes launch into the Capacity Reservation automatically + // without specifying any additional parameters. + // + // * targeted - The Capacity Reservation only accepts instances that have + // matching attributes (instance type, platform, and Availability Zone), + // and explicitly target the Capacity Reservation. This ensures that only + // permitted instances can use the reserved capacity. + InstanceMatchCriteria *string `locationName:"instanceMatchCriteria" type:"string" enum:"InstanceMatchCriteria"` + + // The type of operating system for which the Capacity Reservation reserves + // capacity. + InstancePlatform *string `locationName:"instancePlatform" type:"string" enum:"CapacityReservationInstancePlatform"` + + // The type of instance for which the Capacity Reservation reserves capacity. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The ID of the AWS account that owns the Capacity Reservation. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The current state of the Capacity Reservation. A Capacity Reservation can + // be in one of the following states: + // + // * active - The Capacity Reservation is active and the capacity is available + // for your use. + // + // * expired - The Capacity Reservation expired automatically at the date + // and time specified in your request. The reserved capacity is no longer + // available for your use. + // + // * cancelled - The Capacity Reservation was manually cancelled. The reserved + // capacity is no longer available for your use. + // + // * pending - The Capacity Reservation request was successful but the capacity + // provisioning is still pending. + // + // * failed - The Capacity Reservation request has failed. A request might + // fail due to invalid request parameters, capacity constraints, or instance + // limit constraints. Failed requests are retained for 60 minutes. + State *string `locationName:"state" type:"string" enum:"CapacityReservationState"` + + // Any tags assigned to the Capacity Reservation. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // Indicates the tenancy of the Capacity Reservation. A Capacity Reservation + // can have one of the following tenancy settings: + // + // * default - The Capacity Reservation is created on hardware that is shared + // with other AWS accounts. + // + // * dedicated - The Capacity Reservation is created on single-tenant hardware + // that is dedicated to a single AWS account. + Tenancy *string `locationName:"tenancy" type:"string" enum:"CapacityReservationTenancy"` + + // The total number of instances for which the Capacity Reservation reserves + // capacity. + TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` +} + +// String returns the string representation +func (s CapacityReservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservation) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CapacityReservation) SetAvailabilityZone(v string) *CapacityReservation { + s.AvailabilityZone = &v + return s +} + +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *CapacityReservation) SetAvailabilityZoneId(v string) *CapacityReservation { + s.AvailabilityZoneId = &v + return s +} + +// SetAvailableInstanceCount sets the AvailableInstanceCount field's value. +func (s *CapacityReservation) SetAvailableInstanceCount(v int64) *CapacityReservation { + s.AvailableInstanceCount = &v + return s +} + +// SetCapacityReservationArn sets the CapacityReservationArn field's value. +func (s *CapacityReservation) SetCapacityReservationArn(v string) *CapacityReservation { + s.CapacityReservationArn = &v + return s +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *CapacityReservation) SetCapacityReservationId(v string) *CapacityReservation { + s.CapacityReservationId = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *CapacityReservation) SetCreateDate(v time.Time) *CapacityReservation { + s.CreateDate = &v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *CapacityReservation) SetEbsOptimized(v bool) *CapacityReservation { + s.EbsOptimized = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *CapacityReservation) SetEndDate(v time.Time) *CapacityReservation { + s.EndDate = &v + return s +} + +// SetEndDateType sets the EndDateType field's value. +func (s *CapacityReservation) SetEndDateType(v string) *CapacityReservation { + s.EndDateType = &v + return s +} + +// SetEphemeralStorage sets the EphemeralStorage field's value. +func (s *CapacityReservation) SetEphemeralStorage(v bool) *CapacityReservation { + s.EphemeralStorage = &v + return s +} + +// SetInstanceMatchCriteria sets the InstanceMatchCriteria field's value. +func (s *CapacityReservation) SetInstanceMatchCriteria(v string) *CapacityReservation { + s.InstanceMatchCriteria = &v + return s +} + +// SetInstancePlatform sets the InstancePlatform field's value. +func (s *CapacityReservation) SetInstancePlatform(v string) *CapacityReservation { + s.InstancePlatform = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *CapacityReservation) SetInstanceType(v string) *CapacityReservation { + s.InstanceType = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *CapacityReservation) SetOwnerId(v string) *CapacityReservation { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *CapacityReservation) SetState(v string) *CapacityReservation { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CapacityReservation) SetTags(v []*Tag) *CapacityReservation { + s.Tags = v + return s +} + +// SetTenancy sets the Tenancy field's value. +func (s *CapacityReservation) SetTenancy(v string) *CapacityReservation { + s.Tenancy = &v + return s +} + +// SetTotalInstanceCount sets the TotalInstanceCount field's value. +func (s *CapacityReservation) SetTotalInstanceCount(v int64) *CapacityReservation { + s.TotalInstanceCount = &v + return s +} + +// Describes a resource group to which a Capacity Reservation has been added. +type CapacityReservationGroup struct { + _ struct{} `type:"structure"` + + // The ARN of the resource group. + GroupArn *string `locationName:"groupArn" type:"string"` + + // The ID of the AWS account that owns the resource group. + OwnerId *string `locationName:"ownerId" type:"string"` +} + +// String returns the string representation +func (s CapacityReservationGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationGroup) GoString() string { + return s.String() +} + +// SetGroupArn sets the GroupArn field's value. +func (s *CapacityReservationGroup) SetGroupArn(v string) *CapacityReservationGroup { + s.GroupArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *CapacityReservationGroup) SetOwnerId(v string) *CapacityReservationGroup { + s.OwnerId = &v + return s +} + +// Describes the strategy for using unused Capacity Reservations for fulfilling +// On-Demand capacity. +// +// This strategy can only be used if the EC2 Fleet is of type instant. +// +// For more information about Capacity Reservations, see On-Demand Capacity +// Reservations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html) +// in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity +// Reservations in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) +// in the Amazon Elastic Compute Cloud User Guide. +type CapacityReservationOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether to use unused Capacity Reservations for fulfilling On-Demand + // capacity. + // + // If you specify use-capacity-reservations-first, the fleet uses unused Capacity + // Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. + // If multiple instance pools have unused Capacity Reservations, the On-Demand + // allocation strategy (lowest-price or prioritized) is applied. If the number + // of unused Capacity Reservations is less than the On-Demand target capacity, + // the remaining On-Demand target capacity is launched according to the On-Demand + // allocation strategy (lowest-price or prioritized). + // + // If you do not specify a value, the fleet fulfils the On-Demand capacity according + // to the chosen On-Demand allocation strategy. + UsageStrategy *string `locationName:"usageStrategy" type:"string" enum:"FleetCapacityReservationUsageStrategy"` +} + +// String returns the string representation +func (s CapacityReservationOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationOptions) GoString() string { + return s.String() +} + +// SetUsageStrategy sets the UsageStrategy field's value. +func (s *CapacityReservationOptions) SetUsageStrategy(v string) *CapacityReservationOptions { + s.UsageStrategy = &v + return s +} + +// Describes the strategy for using unused Capacity Reservations for fulfilling +// On-Demand capacity. +// +// This strategy can only be used if the EC2 Fleet is of type instant. +// +// For more information about Capacity Reservations, see On-Demand Capacity +// Reservations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html) +// in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity +// Reservations in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) +// in the Amazon Elastic Compute Cloud User Guide. +type CapacityReservationOptionsRequest struct { + _ struct{} `type:"structure"` + + // Indicates whether to use unused Capacity Reservations for fulfilling On-Demand + // capacity. + // + // If you specify use-capacity-reservations-first, the fleet uses unused Capacity + // Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. + // If multiple instance pools have unused Capacity Reservations, the On-Demand + // allocation strategy (lowest-price or prioritized) is applied. If the number + // of unused Capacity Reservations is less than the On-Demand target capacity, + // the remaining On-Demand target capacity is launched according to the On-Demand + // allocation strategy (lowest-price or prioritized). + // + // If you do not specify a value, the fleet fulfils the On-Demand capacity according + // to the chosen On-Demand allocation strategy. + UsageStrategy *string `type:"string" enum:"FleetCapacityReservationUsageStrategy"` +} + +// String returns the string representation +func (s CapacityReservationOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationOptionsRequest) GoString() string { + return s.String() +} + +// SetUsageStrategy sets the UsageStrategy field's value. +func (s *CapacityReservationOptionsRequest) SetUsageStrategy(v string) *CapacityReservationOptionsRequest { + s.UsageStrategy = &v + return s +} + +// Describes an instance's Capacity Reservation targeting option. You can specify +// only one parameter at a time. If you specify CapacityReservationPreference +// and CapacityReservationTarget, the request fails. +// +// Use the CapacityReservationPreference parameter to configure the instance +// to run as an On-Demand Instance or to run in any open Capacity Reservation +// that has matching attributes (instance type, platform, Availability Zone). +// Use the CapacityReservationTarget parameter to explicitly target a specific +// Capacity Reservation or a Capacity Reservation group. +type CapacityReservationSpecification struct { + _ struct{} `type:"structure"` + + // Indicates the instance's Capacity Reservation preferences. Possible preferences + // include: + // + // * open - The instance can run in any open Capacity Reservation that has + // matching attributes (instance type, platform, Availability Zone). + // + // * none - The instance avoids running in a Capacity Reservation even if + // one is available. The instance runs as an On-Demand Instance. + CapacityReservationPreference *string `type:"string" enum:"CapacityReservationPreference"` + + // Information about the target Capacity Reservation or Capacity Reservation + // group. + CapacityReservationTarget *CapacityReservationTarget `type:"structure"` +} + +// String returns the string representation +func (s CapacityReservationSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationSpecification) GoString() string { + return s.String() +} + +// SetCapacityReservationPreference sets the CapacityReservationPreference field's value. +func (s *CapacityReservationSpecification) SetCapacityReservationPreference(v string) *CapacityReservationSpecification { + s.CapacityReservationPreference = &v + return s +} + +// SetCapacityReservationTarget sets the CapacityReservationTarget field's value. +func (s *CapacityReservationSpecification) SetCapacityReservationTarget(v *CapacityReservationTarget) *CapacityReservationSpecification { + s.CapacityReservationTarget = v + return s +} + +// Describes the instance's Capacity Reservation targeting preferences. The +// action returns the capacityReservationPreference response element if the +// instance is configured to run in On-Demand capacity, or if it is configured +// in run in any open Capacity Reservation that has matching attributes (instance +// type, platform, Availability Zone). The action returns the capacityReservationTarget +// response element if the instance explicily targets a specific Capacity Reservation +// or Capacity Reservation group. +type CapacityReservationSpecificationResponse struct { + _ struct{} `type:"structure"` + + // Describes the instance's Capacity Reservation preferences. Possible preferences + // include: + // + // * open - The instance can run in any open Capacity Reservation that has + // matching attributes (instance type, platform, Availability Zone). + // + // * none - The instance avoids running in a Capacity Reservation even if + // one is available. The instance runs in On-Demand capacity. + CapacityReservationPreference *string `locationName:"capacityReservationPreference" type:"string" enum:"CapacityReservationPreference"` + + // Information about the targeted Capacity Reservation or Capacity Reservation + // group. + CapacityReservationTarget *CapacityReservationTargetResponse `locationName:"capacityReservationTarget" type:"structure"` +} + +// String returns the string representation +func (s CapacityReservationSpecificationResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationSpecificationResponse) GoString() string { + return s.String() +} + +// SetCapacityReservationPreference sets the CapacityReservationPreference field's value. +func (s *CapacityReservationSpecificationResponse) SetCapacityReservationPreference(v string) *CapacityReservationSpecificationResponse { + s.CapacityReservationPreference = &v + return s +} + +// SetCapacityReservationTarget sets the CapacityReservationTarget field's value. +func (s *CapacityReservationSpecificationResponse) SetCapacityReservationTarget(v *CapacityReservationTargetResponse) *CapacityReservationSpecificationResponse { + s.CapacityReservationTarget = v + return s +} + +// Describes a target Capacity Reservation or Capacity Reservation group. +type CapacityReservationTarget struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation in which to run the instance. + CapacityReservationId *string `type:"string"` + + // The ARN of the Capacity Reservation resource group in which to run the instance. + CapacityReservationResourceGroupArn *string `type:"string"` +} + +// String returns the string representation +func (s CapacityReservationTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationTarget) GoString() string { + return s.String() +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *CapacityReservationTarget) SetCapacityReservationId(v string) *CapacityReservationTarget { + s.CapacityReservationId = &v + return s +} + +// SetCapacityReservationResourceGroupArn sets the CapacityReservationResourceGroupArn field's value. +func (s *CapacityReservationTarget) SetCapacityReservationResourceGroupArn(v string) *CapacityReservationTarget { + s.CapacityReservationResourceGroupArn = &v + return s +} + +// Describes a target Capacity Reservation or Capacity Reservation group. +type CapacityReservationTargetResponse struct { + _ struct{} `type:"structure"` + + // The ID of the targeted Capacity Reservation. + CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + + // The ARN of the targeted Capacity Reservation group. + CapacityReservationResourceGroupArn *string `locationName:"capacityReservationResourceGroupArn" type:"string"` +} + +// String returns the string representation +func (s CapacityReservationTargetResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationTargetResponse) GoString() string { + return s.String() +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *CapacityReservationTargetResponse) SetCapacityReservationId(v string) *CapacityReservationTargetResponse { + s.CapacityReservationId = &v + return s +} + +// SetCapacityReservationResourceGroupArn sets the CapacityReservationResourceGroupArn field's value. +func (s *CapacityReservationTargetResponse) SetCapacityReservationResourceGroupArn(v string) *CapacityReservationTargetResponse { + s.CapacityReservationResourceGroupArn = &v + return s +} + +// Describes a carrier gateway. +type CarrierGateway struct { + _ struct{} `type:"structure"` + + // The ID of the carrier gateway. + CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"` + + // The AWS account ID of the owner of the carrier gateway. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the carrier gateway. + State *string `locationName:"state" type:"string" enum:"CarrierGatewayState"` + + // The tags assigned to the carrier gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC associated with the carrier gateway. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s CarrierGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CarrierGateway) GoString() string { + return s.String() +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *CarrierGateway) SetCarrierGatewayId(v string) *CarrierGateway { + s.CarrierGatewayId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *CarrierGateway) SetOwnerId(v string) *CarrierGateway { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *CarrierGateway) SetState(v string) *CarrierGateway { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CarrierGateway) SetTags(v []*Tag) *CarrierGateway { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CarrierGateway) SetVpcId(v string) *CarrierGateway { + s.VpcId = &v + return s +} + +// Information about the client certificate used for authentication. +type CertificateAuthentication struct { + _ struct{} `type:"structure"` + + // The ARN of the client certificate. + ClientRootCertificateChain *string `locationName:"clientRootCertificateChain" type:"string"` +} + +// String returns the string representation +func (s CertificateAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateAuthentication) GoString() string { + return s.String() +} + +// SetClientRootCertificateChain sets the ClientRootCertificateChain field's value. +func (s *CertificateAuthentication) SetClientRootCertificateChain(v string) *CertificateAuthentication { + s.ClientRootCertificateChain = &v + return s +} + +// Information about the client certificate to be used for authentication. +type CertificateAuthenticationRequest struct { + _ struct{} `type:"structure"` + + // The ARN of the client certificate. The certificate must be signed by a certificate + // authority (CA) and it must be provisioned in AWS Certificate Manager (ACM). + ClientRootCertificateChainArn *string `type:"string"` +} + +// String returns the string representation +func (s CertificateAuthenticationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CertificateAuthenticationRequest) GoString() string { + return s.String() +} + +// SetClientRootCertificateChainArn sets the ClientRootCertificateChainArn field's value. +func (s *CertificateAuthenticationRequest) SetClientRootCertificateChainArn(v string) *CertificateAuthenticationRequest { + s.ClientRootCertificateChainArn = &v + return s +} + +// Provides authorization for Amazon to bring a specific IP address range to +// a specific AWS account using bring your own IP addresses (BYOIP). For more +// information, see Prepare to Bring Your Address Range to Your AWS Account +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#prepare-for-byoip) +// in the Amazon Elastic Compute Cloud User Guide. +type CidrAuthorizationContext struct { + _ struct{} `type:"structure"` + + // The plain-text authorization message for the prefix and account. + // + // Message is a required field + Message *string `type:"string" required:"true"` + + // The signed authorization message for the prefix and account. + // + // Signature is a required field + Signature *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CidrAuthorizationContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CidrAuthorizationContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CidrAuthorizationContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CidrAuthorizationContext"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Signature == nil { + invalidParams.Add(request.NewErrParamRequired("Signature")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMessage sets the Message field's value. +func (s *CidrAuthorizationContext) SetMessage(v string) *CidrAuthorizationContext { + s.Message = &v + return s +} + +// SetSignature sets the Signature field's value. +func (s *CidrAuthorizationContext) SetSignature(v string) *CidrAuthorizationContext { + s.Signature = &v + return s +} + +// Describes an IPv4 CIDR block. +type CidrBlock struct { + _ struct{} `type:"structure"` + + // The IPv4 CIDR block. + CidrBlock *string `locationName:"cidrBlock" type:"string"` +} + +// String returns the string representation +func (s CidrBlock) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CidrBlock) GoString() string { + return s.String() +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *CidrBlock) SetCidrBlock(v string) *CidrBlock { + s.CidrBlock = &v + return s +} + +// Describes the ClassicLink DNS support status of a VPC. +type ClassicLinkDnsSupport struct { + _ struct{} `type:"structure"` + + // Indicates whether ClassicLink DNS support is enabled for the VPC. + ClassicLinkDnsSupported *bool `locationName:"classicLinkDnsSupported" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkDnsSupport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkDnsSupport) GoString() string { + return s.String() +} + +// SetClassicLinkDnsSupported sets the ClassicLinkDnsSupported field's value. +func (s *ClassicLinkDnsSupport) SetClassicLinkDnsSupported(v bool) *ClassicLinkDnsSupport { + s.ClassicLinkDnsSupported = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ClassicLinkDnsSupport) SetVpcId(v string) *ClassicLinkDnsSupport { + s.VpcId = &v + return s +} + +// Describes a linked EC2-Classic instance. +type ClassicLinkInstance struct { + _ struct{} `type:"structure"` + + // A list of security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkInstance) GoString() string { + return s.String() +} + +// SetGroups sets the Groups field's value. +func (s *ClassicLinkInstance) SetGroups(v []*GroupIdentifier) *ClassicLinkInstance { + s.Groups = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ClassicLinkInstance) SetInstanceId(v string) *ClassicLinkInstance { + s.InstanceId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ClassicLinkInstance) SetTags(v []*Tag) *ClassicLinkInstance { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ClassicLinkInstance) SetVpcId(v string) *ClassicLinkInstance { + s.VpcId = &v + return s +} + +// Describes a Classic Load Balancer. +type ClassicLoadBalancer struct { + _ struct{} `type:"structure"` + + // The name of the load balancer. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s ClassicLoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLoadBalancer) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *ClassicLoadBalancer) SetName(v string) *ClassicLoadBalancer { + s.Name = &v + return s +} + +// Describes the Classic Load Balancers to attach to a Spot Fleet. Spot Fleet +// registers the running Spot Instances with these Classic Load Balancers. +type ClassicLoadBalancersConfig struct { + _ struct{} `type:"structure"` + + // One or more Classic Load Balancers. + ClassicLoadBalancers []*ClassicLoadBalancer `locationName:"classicLoadBalancers" locationNameList:"item" min:"1" type:"list"` +} + +// String returns the string representation +func (s ClassicLoadBalancersConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLoadBalancersConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ClassicLoadBalancersConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ClassicLoadBalancersConfig"} + if s.ClassicLoadBalancers != nil && len(s.ClassicLoadBalancers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClassicLoadBalancers", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClassicLoadBalancers sets the ClassicLoadBalancers field's value. +func (s *ClassicLoadBalancersConfig) SetClassicLoadBalancers(v []*ClassicLoadBalancer) *ClassicLoadBalancersConfig { + s.ClassicLoadBalancers = v + return s +} + +// Describes the state of a client certificate revocation list. +type ClientCertificateRevocationListStatus struct { + _ struct{} `type:"structure"` + + // The state of the client certificate revocation list. + Code *string `locationName:"code" type:"string" enum:"ClientCertificateRevocationListStatusCode"` + + // A message about the status of the client certificate revocation list, if + // applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientCertificateRevocationListStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientCertificateRevocationListStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ClientCertificateRevocationListStatus) SetCode(v string) *ClientCertificateRevocationListStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ClientCertificateRevocationListStatus) SetMessage(v string) *ClientCertificateRevocationListStatus { + s.Message = &v + return s +} + +// The options for managing connection authorization for new client connections. +type ClientConnectOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether client connect options are enabled. The default is false + // (not enabled). + Enabled *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function used for connection + // authorization. + LambdaFunctionArn *string `type:"string"` +} + +// String returns the string representation +func (s ClientConnectOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientConnectOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ClientConnectOptions) SetEnabled(v bool) *ClientConnectOptions { + s.Enabled = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *ClientConnectOptions) SetLambdaFunctionArn(v string) *ClientConnectOptions { + s.LambdaFunctionArn = &v + return s +} + +// The options for managing connection authorization for new client connections. +type ClientConnectResponseOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether client connect options are enabled. + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function used for connection + // authorization. + LambdaFunctionArn *string `locationName:"lambdaFunctionArn" type:"string"` + + // The status of any updates to the client connect options. + Status *ClientVpnEndpointAttributeStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s ClientConnectResponseOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientConnectResponseOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ClientConnectResponseOptions) SetEnabled(v bool) *ClientConnectResponseOptions { + s.Enabled = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *ClientConnectResponseOptions) SetLambdaFunctionArn(v string) *ClientConnectResponseOptions { + s.LambdaFunctionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ClientConnectResponseOptions) SetStatus(v *ClientVpnEndpointAttributeStatus) *ClientConnectResponseOptions { + s.Status = v + return s +} + +// Describes the client-specific data. +type ClientData struct { + _ struct{} `type:"structure"` + + // A user-defined comment about the disk upload. + Comment *string `type:"string"` + + // The time that the disk upload ends. + UploadEnd *time.Time `type:"timestamp"` + + // The size of the uploaded disk image, in GiB. + UploadSize *float64 `type:"double"` + + // The time that the disk upload starts. + UploadStart *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ClientData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientData) GoString() string { + return s.String() +} + +// SetComment sets the Comment field's value. +func (s *ClientData) SetComment(v string) *ClientData { + s.Comment = &v + return s +} + +// SetUploadEnd sets the UploadEnd field's value. +func (s *ClientData) SetUploadEnd(v time.Time) *ClientData { + s.UploadEnd = &v + return s +} + +// SetUploadSize sets the UploadSize field's value. +func (s *ClientData) SetUploadSize(v float64) *ClientData { + s.UploadSize = &v + return s +} + +// SetUploadStart sets the UploadStart field's value. +func (s *ClientData) SetUploadStart(v time.Time) *ClientData { + s.UploadStart = &v + return s +} + +// Describes the authentication methods used by a Client VPN endpoint. For more +// information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/client-authentication.html) +// in the AWS Client VPN Administrator Guide. +type ClientVpnAuthentication struct { + _ struct{} `type:"structure"` + + // Information about the Active Directory, if applicable. + ActiveDirectory *DirectoryServiceAuthentication `locationName:"activeDirectory" type:"structure"` + + // Information about the IAM SAML identity provider, if applicable. + FederatedAuthentication *FederatedAuthentication `locationName:"federatedAuthentication" type:"structure"` + + // Information about the authentication certificates, if applicable. + MutualAuthentication *CertificateAuthentication `locationName:"mutualAuthentication" type:"structure"` + + // The authentication type used. + Type *string `locationName:"type" type:"string" enum:"ClientVpnAuthenticationType"` +} + +// String returns the string representation +func (s ClientVpnAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnAuthentication) GoString() string { + return s.String() +} + +// SetActiveDirectory sets the ActiveDirectory field's value. +func (s *ClientVpnAuthentication) SetActiveDirectory(v *DirectoryServiceAuthentication) *ClientVpnAuthentication { + s.ActiveDirectory = v + return s +} + +// SetFederatedAuthentication sets the FederatedAuthentication field's value. +func (s *ClientVpnAuthentication) SetFederatedAuthentication(v *FederatedAuthentication) *ClientVpnAuthentication { + s.FederatedAuthentication = v + return s +} + +// SetMutualAuthentication sets the MutualAuthentication field's value. +func (s *ClientVpnAuthentication) SetMutualAuthentication(v *CertificateAuthentication) *ClientVpnAuthentication { + s.MutualAuthentication = v + return s +} + +// SetType sets the Type field's value. +func (s *ClientVpnAuthentication) SetType(v string) *ClientVpnAuthentication { + s.Type = &v + return s +} + +// Describes the authentication method to be used by a Client VPN endpoint. +// For more information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) +// in the AWS Client VPN Administrator Guide. +type ClientVpnAuthenticationRequest struct { + _ struct{} `type:"structure"` + + // Information about the Active Directory to be used, if applicable. You must + // provide this information if Type is directory-service-authentication. + ActiveDirectory *DirectoryServiceAuthenticationRequest `type:"structure"` + + // Information about the IAM SAML identity provider to be used, if applicable. + // You must provide this information if Type is federated-authentication. + FederatedAuthentication *FederatedAuthenticationRequest `type:"structure"` + + // Information about the authentication certificates to be used, if applicable. + // You must provide this information if Type is certificate-authentication. + MutualAuthentication *CertificateAuthenticationRequest `type:"structure"` + + // The type of client authentication to be used. + Type *string `type:"string" enum:"ClientVpnAuthenticationType"` +} + +// String returns the string representation +func (s ClientVpnAuthenticationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnAuthenticationRequest) GoString() string { + return s.String() +} + +// SetActiveDirectory sets the ActiveDirectory field's value. +func (s *ClientVpnAuthenticationRequest) SetActiveDirectory(v *DirectoryServiceAuthenticationRequest) *ClientVpnAuthenticationRequest { + s.ActiveDirectory = v + return s +} + +// SetFederatedAuthentication sets the FederatedAuthentication field's value. +func (s *ClientVpnAuthenticationRequest) SetFederatedAuthentication(v *FederatedAuthenticationRequest) *ClientVpnAuthenticationRequest { + s.FederatedAuthentication = v + return s +} + +// SetMutualAuthentication sets the MutualAuthentication field's value. +func (s *ClientVpnAuthenticationRequest) SetMutualAuthentication(v *CertificateAuthenticationRequest) *ClientVpnAuthenticationRequest { + s.MutualAuthentication = v + return s +} + +// SetType sets the Type field's value. +func (s *ClientVpnAuthenticationRequest) SetType(v string) *ClientVpnAuthenticationRequest { + s.Type = &v + return s +} + +// Describes the state of an authorization rule. +type ClientVpnAuthorizationRuleStatus struct { + _ struct{} `type:"structure"` + + // The state of the authorization rule. + Code *string `locationName:"code" type:"string" enum:"ClientVpnAuthorizationRuleStatusCode"` + + // A message about the status of the authorization rule, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientVpnAuthorizationRuleStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnAuthorizationRuleStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ClientVpnAuthorizationRuleStatus) SetCode(v string) *ClientVpnAuthorizationRuleStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ClientVpnAuthorizationRuleStatus) SetMessage(v string) *ClientVpnAuthorizationRuleStatus { + s.Message = &v + return s +} + +// Describes a client connection. +type ClientVpnConnection struct { + _ struct{} `type:"structure"` + + // The IP address of the client. + ClientIp *string `locationName:"clientIp" type:"string"` + + // The ID of the Client VPN endpoint to which the client is connected. + ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` + + // The common name associated with the client. This is either the name of the + // client certificate, or the Active Directory user name. + CommonName *string `locationName:"commonName" type:"string"` + + // The date and time the client connection was terminated. + ConnectionEndTime *string `locationName:"connectionEndTime" type:"string"` + + // The date and time the client connection was established. + ConnectionEstablishedTime *string `locationName:"connectionEstablishedTime" type:"string"` + + // The ID of the client connection. + ConnectionId *string `locationName:"connectionId" type:"string"` + + // The number of bytes received by the client. + EgressBytes *string `locationName:"egressBytes" type:"string"` + + // The number of packets received by the client. + EgressPackets *string `locationName:"egressPackets" type:"string"` + + // The number of bytes sent by the client. + IngressBytes *string `locationName:"ingressBytes" type:"string"` + + // The number of packets sent by the client. + IngressPackets *string `locationName:"ingressPackets" type:"string"` + + // The statuses returned by the client connect handler for posture compliance, + // if applicable. + PostureComplianceStatuses []*string `locationName:"postureComplianceStatusSet" locationNameList:"item" type:"list"` + + // The current state of the client connection. + Status *ClientVpnConnectionStatus `locationName:"status" type:"structure"` + + // The current date and time. + Timestamp *string `locationName:"timestamp" type:"string"` + + // The username of the client who established the client connection. This information + // is only provided if Active Directory client authentication is used. + Username *string `locationName:"username" type:"string"` +} + +// String returns the string representation +func (s ClientVpnConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnConnection) GoString() string { + return s.String() +} + +// SetClientIp sets the ClientIp field's value. +func (s *ClientVpnConnection) SetClientIp(v string) *ClientVpnConnection { + s.ClientIp = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *ClientVpnConnection) SetClientVpnEndpointId(v string) *ClientVpnConnection { + s.ClientVpnEndpointId = &v + return s +} + +// SetCommonName sets the CommonName field's value. +func (s *ClientVpnConnection) SetCommonName(v string) *ClientVpnConnection { + s.CommonName = &v + return s +} + +// SetConnectionEndTime sets the ConnectionEndTime field's value. +func (s *ClientVpnConnection) SetConnectionEndTime(v string) *ClientVpnConnection { + s.ConnectionEndTime = &v + return s +} + +// SetConnectionEstablishedTime sets the ConnectionEstablishedTime field's value. +func (s *ClientVpnConnection) SetConnectionEstablishedTime(v string) *ClientVpnConnection { + s.ConnectionEstablishedTime = &v + return s +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *ClientVpnConnection) SetConnectionId(v string) *ClientVpnConnection { + s.ConnectionId = &v + return s +} + +// SetEgressBytes sets the EgressBytes field's value. +func (s *ClientVpnConnection) SetEgressBytes(v string) *ClientVpnConnection { + s.EgressBytes = &v + return s +} + +// SetEgressPackets sets the EgressPackets field's value. +func (s *ClientVpnConnection) SetEgressPackets(v string) *ClientVpnConnection { + s.EgressPackets = &v + return s +} + +// SetIngressBytes sets the IngressBytes field's value. +func (s *ClientVpnConnection) SetIngressBytes(v string) *ClientVpnConnection { + s.IngressBytes = &v + return s +} + +// SetIngressPackets sets the IngressPackets field's value. +func (s *ClientVpnConnection) SetIngressPackets(v string) *ClientVpnConnection { + s.IngressPackets = &v + return s +} + +// SetPostureComplianceStatuses sets the PostureComplianceStatuses field's value. +func (s *ClientVpnConnection) SetPostureComplianceStatuses(v []*string) *ClientVpnConnection { + s.PostureComplianceStatuses = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ClientVpnConnection) SetStatus(v *ClientVpnConnectionStatus) *ClientVpnConnection { + s.Status = v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *ClientVpnConnection) SetTimestamp(v string) *ClientVpnConnection { + s.Timestamp = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *ClientVpnConnection) SetUsername(v string) *ClientVpnConnection { + s.Username = &v + return s +} + +// Describes the status of a client connection. +type ClientVpnConnectionStatus struct { + _ struct{} `type:"structure"` + + // The state of the client connection. + Code *string `locationName:"code" type:"string" enum:"ClientVpnConnectionStatusCode"` + + // A message about the status of the client connection, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientVpnConnectionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnConnectionStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ClientVpnConnectionStatus) SetCode(v string) *ClientVpnConnectionStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ClientVpnConnectionStatus) SetMessage(v string) *ClientVpnConnectionStatus { + s.Message = &v + return s +} + +// Describes a Client VPN endpoint. +type ClientVpnEndpoint struct { + _ struct{} `type:"structure"` + + // Information about the associated target networks. A target network is a subnet + // in a VPC. + // + // Deprecated: This property is deprecated. To view the target networks associated with a Client VPN endpoint, call DescribeClientVpnTargetNetworks and inspect the clientVpnTargetNetworks response element. + AssociatedTargetNetworks []*AssociatedTargetNetwork `locationName:"associatedTargetNetwork" locationNameList:"item" deprecated:"true" type:"list"` + + // Information about the authentication method used by the Client VPN endpoint. + AuthenticationOptions []*ClientVpnAuthentication `locationName:"authenticationOptions" locationNameList:"item" type:"list"` + + // The IPv4 address range, in CIDR notation, from which client IP addresses + // are assigned. + ClientCidrBlock *string `locationName:"clientCidrBlock" type:"string"` + + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectResponseOptions `locationName:"clientConnectOptions" type:"structure"` + + // The ID of the Client VPN endpoint. + ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` + + // Information about the client connection logging options for the Client VPN + // endpoint. + ConnectionLogOptions *ConnectionLogResponseOptions `locationName:"connectionLogOptions" type:"structure"` + + // The date and time the Client VPN endpoint was created. + CreationTime *string `locationName:"creationTime" type:"string"` + + // The date and time the Client VPN endpoint was deleted, if applicable. + DeletionTime *string `locationName:"deletionTime" type:"string"` + + // A brief description of the endpoint. + Description *string `locationName:"description" type:"string"` + + // The DNS name to be used by clients when connecting to the Client VPN endpoint. + DnsName *string `locationName:"dnsName" type:"string"` + + // Information about the DNS servers to be used for DNS resolution. + DnsServers []*string `locationName:"dnsServer" locationNameList:"item" type:"list"` + + // The IDs of the security groups for the target network. + SecurityGroupIds []*string `locationName:"securityGroupIdSet" locationNameList:"item" type:"list"` + + // The URL of the self-service portal. + SelfServicePortalUrl *string `locationName:"selfServicePortalUrl" type:"string"` + + // The ARN of the server certificate. + ServerCertificateArn *string `locationName:"serverCertificateArn" type:"string"` + + // Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint. + // + // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client + // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) + // in the AWS Client VPN Administrator Guide. + SplitTunnel *bool `locationName:"splitTunnel" type:"boolean"` + + // The current state of the Client VPN endpoint. + Status *ClientVpnEndpointStatus `locationName:"status" type:"structure"` + + // Any tags assigned to the Client VPN endpoint. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The transport protocol used by the Client VPN endpoint. + TransportProtocol *string `locationName:"transportProtocol" type:"string" enum:"TransportProtocol"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + // The port number for the Client VPN endpoint. + VpnPort *int64 `locationName:"vpnPort" type:"integer"` + + // The protocol used by the VPN session. + VpnProtocol *string `locationName:"vpnProtocol" type:"string" enum:"VpnProtocol"` +} + +// String returns the string representation +func (s ClientVpnEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnEndpoint) GoString() string { + return s.String() +} + +// SetAssociatedTargetNetworks sets the AssociatedTargetNetworks field's value. +func (s *ClientVpnEndpoint) SetAssociatedTargetNetworks(v []*AssociatedTargetNetwork) *ClientVpnEndpoint { + s.AssociatedTargetNetworks = v + return s +} + +// SetAuthenticationOptions sets the AuthenticationOptions field's value. +func (s *ClientVpnEndpoint) SetAuthenticationOptions(v []*ClientVpnAuthentication) *ClientVpnEndpoint { + s.AuthenticationOptions = v + return s +} + +// SetClientCidrBlock sets the ClientCidrBlock field's value. +func (s *ClientVpnEndpoint) SetClientCidrBlock(v string) *ClientVpnEndpoint { + s.ClientCidrBlock = &v + return s +} + +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *ClientVpnEndpoint) SetClientConnectOptions(v *ClientConnectResponseOptions) *ClientVpnEndpoint { + s.ClientConnectOptions = v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *ClientVpnEndpoint) SetClientVpnEndpointId(v string) *ClientVpnEndpoint { + s.ClientVpnEndpointId = &v + return s +} + +// SetConnectionLogOptions sets the ConnectionLogOptions field's value. +func (s *ClientVpnEndpoint) SetConnectionLogOptions(v *ConnectionLogResponseOptions) *ClientVpnEndpoint { + s.ConnectionLogOptions = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ClientVpnEndpoint) SetCreationTime(v string) *ClientVpnEndpoint { + s.CreationTime = &v + return s +} + +// SetDeletionTime sets the DeletionTime field's value. +func (s *ClientVpnEndpoint) SetDeletionTime(v string) *ClientVpnEndpoint { + s.DeletionTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ClientVpnEndpoint) SetDescription(v string) *ClientVpnEndpoint { + s.Description = &v + return s +} + +// SetDnsName sets the DnsName field's value. +func (s *ClientVpnEndpoint) SetDnsName(v string) *ClientVpnEndpoint { + s.DnsName = &v + return s +} + +// SetDnsServers sets the DnsServers field's value. +func (s *ClientVpnEndpoint) SetDnsServers(v []*string) *ClientVpnEndpoint { + s.DnsServers = v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *ClientVpnEndpoint) SetSecurityGroupIds(v []*string) *ClientVpnEndpoint { + s.SecurityGroupIds = v + return s +} + +// SetSelfServicePortalUrl sets the SelfServicePortalUrl field's value. +func (s *ClientVpnEndpoint) SetSelfServicePortalUrl(v string) *ClientVpnEndpoint { + s.SelfServicePortalUrl = &v + return s +} + +// SetServerCertificateArn sets the ServerCertificateArn field's value. +func (s *ClientVpnEndpoint) SetServerCertificateArn(v string) *ClientVpnEndpoint { + s.ServerCertificateArn = &v + return s +} + +// SetSplitTunnel sets the SplitTunnel field's value. +func (s *ClientVpnEndpoint) SetSplitTunnel(v bool) *ClientVpnEndpoint { + s.SplitTunnel = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ClientVpnEndpoint) SetStatus(v *ClientVpnEndpointStatus) *ClientVpnEndpoint { + s.Status = v + return s +} + +// SetTags sets the Tags field's value. +func (s *ClientVpnEndpoint) SetTags(v []*Tag) *ClientVpnEndpoint { + s.Tags = v + return s +} + +// SetTransportProtocol sets the TransportProtocol field's value. +func (s *ClientVpnEndpoint) SetTransportProtocol(v string) *ClientVpnEndpoint { + s.TransportProtocol = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ClientVpnEndpoint) SetVpcId(v string) *ClientVpnEndpoint { + s.VpcId = &v + return s +} + +// SetVpnPort sets the VpnPort field's value. +func (s *ClientVpnEndpoint) SetVpnPort(v int64) *ClientVpnEndpoint { + s.VpnPort = &v + return s +} + +// SetVpnProtocol sets the VpnProtocol field's value. +func (s *ClientVpnEndpoint) SetVpnProtocol(v string) *ClientVpnEndpoint { + s.VpnProtocol = &v + return s +} + +// Describes the status of the Client VPN endpoint attribute. +type ClientVpnEndpointAttributeStatus struct { + _ struct{} `type:"structure"` + + // The status code. + Code *string `locationName:"code" type:"string" enum:"ClientVpnEndpointAttributeStatusCode"` + + // The status message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientVpnEndpointAttributeStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnEndpointAttributeStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ClientVpnEndpointAttributeStatus) SetCode(v string) *ClientVpnEndpointAttributeStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ClientVpnEndpointAttributeStatus) SetMessage(v string) *ClientVpnEndpointAttributeStatus { + s.Message = &v + return s +} + +// Describes the state of a Client VPN endpoint. +type ClientVpnEndpointStatus struct { + _ struct{} `type:"structure"` + + // The state of the Client VPN endpoint. Possible states include: + // + // * pending-associate - The Client VPN endpoint has been created but no + // target networks have been associated. The Client VPN endpoint cannot accept + // connections. + // + // * available - The Client VPN endpoint has been created and a target network + // has been associated. The Client VPN endpoint can accept connections. + // + // * deleting - The Client VPN endpoint is being deleted. The Client VPN + // endpoint cannot accept connections. + // + // * deleted - The Client VPN endpoint has been deleted. The Client VPN endpoint + // cannot accept connections. + Code *string `locationName:"code" type:"string" enum:"ClientVpnEndpointStatusCode"` + + // A message about the status of the Client VPN endpoint. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientVpnEndpointStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnEndpointStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ClientVpnEndpointStatus) SetCode(v string) *ClientVpnEndpointStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ClientVpnEndpointStatus) SetMessage(v string) *ClientVpnEndpointStatus { + s.Message = &v + return s +} + +// Information about a Client VPN endpoint route. +type ClientVpnRoute struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint with which the route is associated. + ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` + + // A brief description of the route. + Description *string `locationName:"description" type:"string"` + + // The IPv4 address range, in CIDR notation, of the route destination. + DestinationCidr *string `locationName:"destinationCidr" type:"string"` + + // Indicates how the route was associated with the Client VPN endpoint. associate + // indicates that the route was automatically added when the target network + // was associated with the Client VPN endpoint. add-route indicates that the + // route was manually added using the CreateClientVpnRoute action. + Origin *string `locationName:"origin" type:"string"` + + // The current state of the route. + Status *ClientVpnRouteStatus `locationName:"status" type:"structure"` + + // The ID of the subnet through which traffic is routed. + TargetSubnet *string `locationName:"targetSubnet" type:"string"` + + // The route type. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s ClientVpnRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnRoute) GoString() string { + return s.String() +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *ClientVpnRoute) SetClientVpnEndpointId(v string) *ClientVpnRoute { + s.ClientVpnEndpointId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ClientVpnRoute) SetDescription(v string) *ClientVpnRoute { + s.Description = &v + return s +} + +// SetDestinationCidr sets the DestinationCidr field's value. +func (s *ClientVpnRoute) SetDestinationCidr(v string) *ClientVpnRoute { + s.DestinationCidr = &v + return s +} + +// SetOrigin sets the Origin field's value. +func (s *ClientVpnRoute) SetOrigin(v string) *ClientVpnRoute { + s.Origin = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ClientVpnRoute) SetStatus(v *ClientVpnRouteStatus) *ClientVpnRoute { + s.Status = v + return s +} + +// SetTargetSubnet sets the TargetSubnet field's value. +func (s *ClientVpnRoute) SetTargetSubnet(v string) *ClientVpnRoute { + s.TargetSubnet = &v + return s +} + +// SetType sets the Type field's value. +func (s *ClientVpnRoute) SetType(v string) *ClientVpnRoute { + s.Type = &v + return s +} + +// Describes the state of a Client VPN endpoint route. +type ClientVpnRouteStatus struct { + _ struct{} `type:"structure"` + + // The state of the Client VPN endpoint route. + Code *string `locationName:"code" type:"string" enum:"ClientVpnRouteStatusCode"` + + // A message about the status of the Client VPN endpoint route, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientVpnRouteStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnRouteStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ClientVpnRouteStatus) SetCode(v string) *ClientVpnRouteStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ClientVpnRouteStatus) SetMessage(v string) *ClientVpnRouteStatus { + s.Message = &v + return s +} + +// Describes address usage for a customer-owned address pool. +type CoipAddressUsage struct { + _ struct{} `type:"structure"` + + // The allocation ID of the address. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The AWS account ID. + AwsAccountId *string `locationName:"awsAccountId" type:"string"` + + // The AWS service. + AwsService *string `locationName:"awsService" type:"string"` + + // The customer-owned IP address. + CoIp *string `locationName:"coIp" type:"string"` +} + +// String returns the string representation +func (s CoipAddressUsage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CoipAddressUsage) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *CoipAddressUsage) SetAllocationId(v string) *CoipAddressUsage { + s.AllocationId = &v + return s +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CoipAddressUsage) SetAwsAccountId(v string) *CoipAddressUsage { + s.AwsAccountId = &v + return s +} + +// SetAwsService sets the AwsService field's value. +func (s *CoipAddressUsage) SetAwsService(v string) *CoipAddressUsage { + s.AwsService = &v + return s +} + +// SetCoIp sets the CoIp field's value. +func (s *CoipAddressUsage) SetCoIp(v string) *CoipAddressUsage { + s.CoIp = &v + return s +} + +// Describes a customer-owned address pool. +type CoipPool struct { + _ struct{} `type:"structure"` + + // The ID of the local gateway route table. + LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` + + // The ARN of the address pool. + PoolArn *string `locationName:"poolArn" min:"1" type:"string"` + + // The address ranges of the address pool. + PoolCidrs []*string `locationName:"poolCidrSet" locationNameList:"item" type:"list"` + + // The ID of the address pool. + PoolId *string `locationName:"poolId" type:"string"` + + // The tags. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CoipPool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CoipPool) GoString() string { + return s.String() +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *CoipPool) SetLocalGatewayRouteTableId(v string) *CoipPool { + s.LocalGatewayRouteTableId = &v + return s +} + +// SetPoolArn sets the PoolArn field's value. +func (s *CoipPool) SetPoolArn(v string) *CoipPool { + s.PoolArn = &v + return s +} + +// SetPoolCidrs sets the PoolCidrs field's value. +func (s *CoipPool) SetPoolCidrs(v []*string) *CoipPool { + s.PoolCidrs = v + return s +} + +// SetPoolId sets the PoolId field's value. +func (s *CoipPool) SetPoolId(v string) *CoipPool { + s.PoolId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CoipPool) SetTags(v []*Tag) *CoipPool { + s.Tags = v + return s +} + +type ConfirmProductInstanceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The product code. This must be a product code that you own. + // + // ProductCode is a required field + ProductCode *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmProductInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmProductInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmProductInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.ProductCode == nil { + invalidParams.Add(request.NewErrParamRequired("ProductCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ConfirmProductInstanceInput) SetDryRun(v bool) *ConfirmProductInstanceInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ConfirmProductInstanceInput) SetInstanceId(v string) *ConfirmProductInstanceInput { + s.InstanceId = &v + return s +} + +// SetProductCode sets the ProductCode field's value. +func (s *ConfirmProductInstanceInput) SetProductCode(v string) *ConfirmProductInstanceInput { + s.ProductCode = &v + return s +} + +type ConfirmProductInstanceOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID of the instance owner. This is only present if the product + // code is attached to the instance. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The return value of the request. Returns true if the specified product code + // is owned by the requester and associated with the specified instance. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ConfirmProductInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmProductInstanceOutput) GoString() string { + return s.String() +} + +// SetOwnerId sets the OwnerId field's value. +func (s *ConfirmProductInstanceOutput) SetOwnerId(v string) *ConfirmProductInstanceOutput { + s.OwnerId = &v + return s +} + +// SetReturn sets the Return field's value. +func (s *ConfirmProductInstanceOutput) SetReturn(v bool) *ConfirmProductInstanceOutput { + s.Return = &v + return s +} + +// Describes the client connection logging options for the Client VPN endpoint. +type ConnectionLogOptions struct { + _ struct{} `type:"structure"` + + // The name of the CloudWatch Logs log group. Required if connection logging + // is enabled. + CloudwatchLogGroup *string `type:"string"` + + // The name of the CloudWatch Logs log stream to which the connection data is + // published. + CloudwatchLogStream *string `type:"string"` + + // Indicates whether connection logging is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ConnectionLogOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionLogOptions) GoString() string { + return s.String() +} + +// SetCloudwatchLogGroup sets the CloudwatchLogGroup field's value. +func (s *ConnectionLogOptions) SetCloudwatchLogGroup(v string) *ConnectionLogOptions { + s.CloudwatchLogGroup = &v + return s +} + +// SetCloudwatchLogStream sets the CloudwatchLogStream field's value. +func (s *ConnectionLogOptions) SetCloudwatchLogStream(v string) *ConnectionLogOptions { + s.CloudwatchLogStream = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *ConnectionLogOptions) SetEnabled(v bool) *ConnectionLogOptions { + s.Enabled = &v + return s +} + +// Information about the client connection logging options for a Client VPN +// endpoint. +type ConnectionLogResponseOptions struct { + _ struct{} `type:"structure"` + + // The name of the Amazon CloudWatch Logs log group to which connection logging + // data is published. + CloudwatchLogGroup *string `type:"string"` + + // The name of the Amazon CloudWatch Logs log stream to which connection logging + // data is published. + CloudwatchLogStream *string `type:"string"` + + // Indicates whether client connection logging is enabled for the Client VPN + // endpoint. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ConnectionLogResponseOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionLogResponseOptions) GoString() string { + return s.String() +} + +// SetCloudwatchLogGroup sets the CloudwatchLogGroup field's value. +func (s *ConnectionLogResponseOptions) SetCloudwatchLogGroup(v string) *ConnectionLogResponseOptions { + s.CloudwatchLogGroup = &v + return s +} + +// SetCloudwatchLogStream sets the CloudwatchLogStream field's value. +func (s *ConnectionLogResponseOptions) SetCloudwatchLogStream(v string) *ConnectionLogResponseOptions { + s.CloudwatchLogStream = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *ConnectionLogResponseOptions) SetEnabled(v bool) *ConnectionLogResponseOptions { + s.Enabled = &v + return s +} + +// Describes a connection notification for a VPC endpoint or VPC endpoint service. +type ConnectionNotification struct { + _ struct{} `type:"structure"` + + // The events for the notification. Valid values are Accept, Connect, Delete, + // and Reject. + ConnectionEvents []*string `locationName:"connectionEvents" locationNameList:"item" type:"list"` + + // The ARN of the SNS topic for the notification. + ConnectionNotificationArn *string `locationName:"connectionNotificationArn" type:"string"` + + // The ID of the notification. + ConnectionNotificationId *string `locationName:"connectionNotificationId" type:"string"` + + // The state of the notification. + ConnectionNotificationState *string `locationName:"connectionNotificationState" type:"string" enum:"ConnectionNotificationState"` + + // The type of notification. + ConnectionNotificationType *string `locationName:"connectionNotificationType" type:"string" enum:"ConnectionNotificationType"` + + // The ID of the endpoint service. + ServiceId *string `locationName:"serviceId" type:"string"` + + // The ID of the VPC endpoint. + VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"` +} + +// String returns the string representation +func (s ConnectionNotification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionNotification) GoString() string { + return s.String() +} + +// SetConnectionEvents sets the ConnectionEvents field's value. +func (s *ConnectionNotification) SetConnectionEvents(v []*string) *ConnectionNotification { + s.ConnectionEvents = v + return s +} + +// SetConnectionNotificationArn sets the ConnectionNotificationArn field's value. +func (s *ConnectionNotification) SetConnectionNotificationArn(v string) *ConnectionNotification { + s.ConnectionNotificationArn = &v + return s +} + +// SetConnectionNotificationId sets the ConnectionNotificationId field's value. +func (s *ConnectionNotification) SetConnectionNotificationId(v string) *ConnectionNotification { + s.ConnectionNotificationId = &v + return s +} + +// SetConnectionNotificationState sets the ConnectionNotificationState field's value. +func (s *ConnectionNotification) SetConnectionNotificationState(v string) *ConnectionNotification { + s.ConnectionNotificationState = &v + return s +} + +// SetConnectionNotificationType sets the ConnectionNotificationType field's value. +func (s *ConnectionNotification) SetConnectionNotificationType(v string) *ConnectionNotification { + s.ConnectionNotificationType = &v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *ConnectionNotification) SetServiceId(v string) *ConnectionNotification { + s.ServiceId = &v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *ConnectionNotification) SetVpcEndpointId(v string) *ConnectionNotification { + s.VpcEndpointId = &v + return s +} + +// Describes a conversion task. +type ConversionTask struct { + _ struct{} `type:"structure"` + + // The ID of the conversion task. + ConversionTaskId *string `locationName:"conversionTaskId" type:"string"` + + // The time when the task expires. If the upload isn't complete before the expiration + // time, we automatically cancel the task. + ExpirationTime *string `locationName:"expirationTime" type:"string"` + + // If the task is for importing an instance, this contains information about + // the import instance task. + ImportInstance *ImportInstanceTaskDetails `locationName:"importInstance" type:"structure"` + + // If the task is for importing a volume, this contains information about the + // import volume task. + ImportVolume *ImportVolumeTaskDetails `locationName:"importVolume" type:"structure"` + + // The state of the conversion task. + State *string `locationName:"state" type:"string" enum:"ConversionTaskState"` + + // The status message related to the conversion task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ConversionTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConversionTask) GoString() string { + return s.String() +} + +// SetConversionTaskId sets the ConversionTaskId field's value. +func (s *ConversionTask) SetConversionTaskId(v string) *ConversionTask { + s.ConversionTaskId = &v + return s +} + +// SetExpirationTime sets the ExpirationTime field's value. +func (s *ConversionTask) SetExpirationTime(v string) *ConversionTask { + s.ExpirationTime = &v + return s +} + +// SetImportInstance sets the ImportInstance field's value. +func (s *ConversionTask) SetImportInstance(v *ImportInstanceTaskDetails) *ConversionTask { + s.ImportInstance = v + return s +} + +// SetImportVolume sets the ImportVolume field's value. +func (s *ConversionTask) SetImportVolume(v *ImportVolumeTaskDetails) *ConversionTask { + s.ImportVolume = v + return s +} + +// SetState sets the State field's value. +func (s *ConversionTask) SetState(v string) *ConversionTask { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ConversionTask) SetStatusMessage(v string) *ConversionTask { + s.StatusMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ConversionTask) SetTags(v []*Tag) *ConversionTask { + s.Tags = v + return s +} + +type CopyFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The description for the new AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name for the new AFI. The default is the name of the source AFI. + Name *string `type:"string"` + + // The ID of the source AFI. + // + // SourceFpgaImageId is a required field + SourceFpgaImageId *string `type:"string" required:"true"` + + // The Region that contains the source AFI. + // + // SourceRegion is a required field + SourceRegion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyFpgaImageInput"} + if s.SourceFpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceFpgaImageId")) + } + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CopyFpgaImageInput) SetClientToken(v string) *CopyFpgaImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CopyFpgaImageInput) SetDescription(v string) *CopyFpgaImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CopyFpgaImageInput) SetDryRun(v bool) *CopyFpgaImageInput { + s.DryRun = &v + return s +} + +// SetName sets the Name field's value. +func (s *CopyFpgaImageInput) SetName(v string) *CopyFpgaImageInput { + s.Name = &v + return s +} + +// SetSourceFpgaImageId sets the SourceFpgaImageId field's value. +func (s *CopyFpgaImageInput) SetSourceFpgaImageId(v string) *CopyFpgaImageInput { + s.SourceFpgaImageId = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *CopyFpgaImageInput) SetSourceRegion(v string) *CopyFpgaImageInput { + s.SourceRegion = &v + return s +} + +type CopyFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AFI. + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` +} + +// String returns the string representation +func (s CopyFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyFpgaImageOutput) GoString() string { + return s.String() +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *CopyFpgaImageOutput) SetFpgaImageId(v string) *CopyFpgaImageOutput { + s.FpgaImageId = &v + return s +} + +// Contains the parameters for CopyImage. +type CopyImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon Elastic Compute Cloud User Guide. + ClientToken *string `type:"string"` + + // A description for the new AMI in the destination Region. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the destination snapshots of the copied image should be + // encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot + // create an unencrypted copy of an encrypted snapshot. The default CMK for + // EBS is used unless you specify a non-default AWS Key Management Service (AWS + // KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The identifier of the symmetric AWS Key Management Service (AWS KMS) customer + // master key (CMK) to use when creating encrypted volumes. If this parameter + // is not specified, your AWS managed CMK for EBS is used. If you specify a + // CMK, you must also set the encrypted state to true. + // + // You can specify a CMK using any of the following: + // + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Key alias. For example, alias/ExampleAlias. + // + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // AWS authenticates the CMK asynchronously. Therefore, if you specify an identifier + // that is not valid, the action can appear to complete, but eventually fails. + // + // The specified CMK must exist in the destination Region. + // + // Amazon EBS does not support asymmetric CMKs. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The name of the new AMI in the destination Region. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The ID of the AMI to copy. + // + // SourceImageId is a required field + SourceImageId *string `type:"string" required:"true"` + + // The name of the Region that contains the AMI to copy. + // + // SourceRegion is a required field + SourceRegion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyImageInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SourceImageId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceImageId")) + } + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CopyImageInput) SetClientToken(v string) *CopyImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CopyImageInput) SetDescription(v string) *CopyImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CopyImageInput) SetDryRun(v bool) *CopyImageInput { + s.DryRun = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *CopyImageInput) SetEncrypted(v bool) *CopyImageInput { + s.Encrypted = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CopyImageInput) SetKmsKeyId(v string) *CopyImageInput { + s.KmsKeyId = &v + return s +} + +// SetName sets the Name field's value. +func (s *CopyImageInput) SetName(v string) *CopyImageInput { + s.Name = &v + return s +} + +// SetSourceImageId sets the SourceImageId field's value. +func (s *CopyImageInput) SetSourceImageId(v string) *CopyImageInput { + s.SourceImageId = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *CopyImageInput) SetSourceRegion(v string) *CopyImageInput { + s.SourceRegion = &v + return s +} + +// Contains the output of CopyImage. +type CopyImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CopyImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyImageOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *CopyImageOutput) SetImageId(v string) *CopyImageOutput { + s.ImageId = &v + return s +} + +type CopySnapshotInput struct { + _ struct{} `type:"structure"` + + // A description for the EBS snapshot. + Description *string `type:"string"` + + // The destination Region to use in the PresignedUrl parameter of a snapshot + // copy operation. This parameter is only valid for specifying the destination + // Region in a PresignedUrl parameter, where it is required. + // + // The snapshot copy is sent to the regional endpoint that you sent the HTTP + // request to (for example, ec2.us-east-1.amazonaws.com). With the AWS CLI, + // this is specified using the --region parameter or the default Region in your + // AWS configuration file. + DestinationRegion *string `locationName:"destinationRegion" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // To encrypt a copy of an unencrypted snapshot if encryption by default is + // not enabled, enable encryption using this parameter. Otherwise, omit this + // parameter. Encrypted snapshots are encrypted, even if you omit this parameter + // and encryption by default is not enabled. You cannot set this parameter to + // false. For more information, see Amazon EBS encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The identifier of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, + // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted + // state must be true. + // + // You can specify the CMK using any of the following: + // + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Key alias. For example, alias/ExampleAlias. + // + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, + // alias, or ARN that is not valid, the action can appear to complete, but eventually + // fails. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // When you copy an encrypted source snapshot using the Amazon EC2 Query API, + // you must supply a pre-signed URL. This parameter is optional for unencrypted + // snapshots. For more information, see Query requests (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html). + // + // The PresignedUrl should use the snapshot source endpoint, the CopySnapshot + // action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion + // parameters. The PresignedUrl must be signed using AWS Signature Version 4. + // Because EBS snapshots are stored in Amazon S3, the signing algorithm for + // this parameter uses the same logic that is described in Authenticating Requests: + // Using Query Parameters (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + // in the Amazon Simple Storage Service API Reference. An invalid or improperly + // signed PresignedUrl will cause the copy operation to fail asynchronously, + // and the snapshot will move to an error state. + PresignedUrl *string `locationName:"presignedUrl" type:"string"` + + // The ID of the Region that contains the snapshot to be copied. + // + // SourceRegion is a required field + SourceRegion *string `type:"string" required:"true"` + + // The ID of the EBS snapshot to copy. + // + // SourceSnapshotId is a required field + SourceSnapshotId *string `type:"string" required:"true"` + + // The tags to apply to the new snapshot. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopySnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"} + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + if s.SourceSnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceSnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CopySnapshotInput) SetDescription(v string) *CopySnapshotInput { + s.Description = &v + return s +} + +// SetDestinationRegion sets the DestinationRegion field's value. +func (s *CopySnapshotInput) SetDestinationRegion(v string) *CopySnapshotInput { + s.DestinationRegion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CopySnapshotInput) SetDryRun(v bool) *CopySnapshotInput { + s.DryRun = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *CopySnapshotInput) SetEncrypted(v bool) *CopySnapshotInput { + s.Encrypted = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CopySnapshotInput) SetKmsKeyId(v string) *CopySnapshotInput { + s.KmsKeyId = &v + return s +} + +// SetPresignedUrl sets the PresignedUrl field's value. +func (s *CopySnapshotInput) SetPresignedUrl(v string) *CopySnapshotInput { + s.PresignedUrl = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *CopySnapshotInput) SetSourceRegion(v string) *CopySnapshotInput { + s.SourceRegion = &v + return s +} + +// SetSourceSnapshotId sets the SourceSnapshotId field's value. +func (s *CopySnapshotInput) SetSourceSnapshotId(v string) *CopySnapshotInput { + s.SourceSnapshotId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CopySnapshotInput) SetTagSpecifications(v []*TagSpecification) *CopySnapshotInput { + s.TagSpecifications = v + return s +} + +type CopySnapshotOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // Any tags applied to the new snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *CopySnapshotOutput) SetSnapshotId(v string) *CopySnapshotOutput { + s.SnapshotId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CopySnapshotOutput) SetTags(v []*Tag) *CopySnapshotOutput { + s.Tags = v + return s +} + +// The CPU options for the instance. +type CpuOptions struct { + _ struct{} `type:"structure"` + + // The number of CPU cores for the instance. + CoreCount *int64 `locationName:"coreCount" type:"integer"` + + // The number of threads per CPU core. + ThreadsPerCore *int64 `locationName:"threadsPerCore" type:"integer"` +} + +// String returns the string representation +func (s CpuOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CpuOptions) GoString() string { + return s.String() +} + +// SetCoreCount sets the CoreCount field's value. +func (s *CpuOptions) SetCoreCount(v int64) *CpuOptions { + s.CoreCount = &v + return s +} + +// SetThreadsPerCore sets the ThreadsPerCore field's value. +func (s *CpuOptions) SetThreadsPerCore(v int64) *CpuOptions { + s.ThreadsPerCore = &v + return s +} + +// The CPU options for the instance. Both the core count and threads per core +// must be specified in the request. +type CpuOptionsRequest struct { + _ struct{} `type:"structure"` + + // The number of CPU cores for the instance. + CoreCount *int64 `type:"integer"` + + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. + ThreadsPerCore *int64 `type:"integer"` +} + +// String returns the string representation +func (s CpuOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CpuOptionsRequest) GoString() string { + return s.String() +} + +// SetCoreCount sets the CoreCount field's value. +func (s *CpuOptionsRequest) SetCoreCount(v int64) *CpuOptionsRequest { + s.CoreCount = &v + return s +} + +// SetThreadsPerCore sets the ThreadsPerCore field's value. +func (s *CpuOptionsRequest) SetThreadsPerCore(v int64) *CpuOptionsRequest { + s.ThreadsPerCore = &v + return s +} + +type CreateCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to create the Capacity Reservation. + AvailabilityZone *string `type:"string"` + + // The ID of the Availability Zone in which to create the Capacity Reservation. + AvailabilityZoneId *string `type:"string"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Indicates whether the Capacity Reservation supports EBS-optimized instances. + // This optimization provides dedicated throughput to Amazon EBS and an optimized + // configuration stack to provide optimal I/O performance. This optimization + // isn't available with all instance types. Additional usage charges apply when + // using an EBS- optimized instance. + EbsOptimized *bool `type:"boolean"` + + // The date and time at which the Capacity Reservation expires. When a Capacity + // Reservation expires, the reserved capacity is released and you can no longer + // launch instances into it. The Capacity Reservation's state changes to expired + // when it reaches its end date and time. + // + // You must provide an EndDate value if EndDateType is limited. Omit EndDate + // if EndDateType is unlimited. + // + // If the EndDateType is limited, the Capacity Reservation is cancelled within + // an hour from the specified time. For example, if you specify 5/31/2019, 13:30:55, + // the Capacity Reservation is guaranteed to end between 13:30:55 and 14:30:55 + // on 5/31/2019. + EndDate *time.Time `type:"timestamp"` + + // Indicates the way in which the Capacity Reservation ends. A Capacity Reservation + // can have one of the following end types: + // + // * unlimited - The Capacity Reservation remains active until you explicitly + // cancel it. Do not provide an EndDate if the EndDateType is unlimited. + // + // * limited - The Capacity Reservation expires automatically at a specified + // date and time. You must provide an EndDate value if the EndDateType value + // is limited. + EndDateType *string `type:"string" enum:"EndDateType"` + + // Indicates whether the Capacity Reservation supports instances with temporary, + // block-level storage. + EphemeralStorage *bool `type:"boolean"` + + // The number of instances for which to reserve capacity. + // + // InstanceCount is a required field + InstanceCount *int64 `type:"integer" required:"true"` + + // Indicates the type of instance launches that the Capacity Reservation accepts. + // The options include: + // + // * open - The Capacity Reservation automatically matches all instances + // that have matching attributes (instance type, platform, and Availability + // Zone). Instances that have matching attributes run in the Capacity Reservation + // automatically without specifying any additional parameters. + // + // * targeted - The Capacity Reservation only accepts instances that have + // matching attributes (instance type, platform, and Availability Zone), + // and explicitly target the Capacity Reservation. This ensures that only + // permitted instances can use the reserved capacity. + // + // Default: open + InstanceMatchCriteria *string `type:"string" enum:"InstanceMatchCriteria"` + + // The type of operating system for which to reserve capacity. + // + // InstancePlatform is a required field + InstancePlatform *string `type:"string" required:"true" enum:"CapacityReservationInstancePlatform"` + + // The instance type for which to reserve capacity. For more information, see + // Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // InstanceType is a required field + InstanceType *string `type:"string" required:"true"` + + // The tags to apply to the Capacity Reservation during launch. + TagSpecifications []*TagSpecification `locationNameList:"item" type:"list"` + + // Indicates the tenancy of the Capacity Reservation. A Capacity Reservation + // can have one of the following tenancy settings: + // + // * default - The Capacity Reservation is created on hardware that is shared + // with other AWS accounts. + // + // * dedicated - The Capacity Reservation is created on single-tenant hardware + // that is dedicated to a single AWS account. + Tenancy *string `type:"string" enum:"CapacityReservationTenancy"` +} + +// String returns the string representation +func (s CreateCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCapacityReservationInput"} + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.InstancePlatform == nil { + invalidParams.Add(request.NewErrParamRequired("InstancePlatform")) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateCapacityReservationInput) SetAvailabilityZone(v string) *CreateCapacityReservationInput { + s.AvailabilityZone = &v + return s +} + +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *CreateCapacityReservationInput) SetAvailabilityZoneId(v string) *CreateCapacityReservationInput { + s.AvailabilityZoneId = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateCapacityReservationInput) SetClientToken(v string) *CreateCapacityReservationInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateCapacityReservationInput) SetDryRun(v bool) *CreateCapacityReservationInput { + s.DryRun = &v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *CreateCapacityReservationInput) SetEbsOptimized(v bool) *CreateCapacityReservationInput { + s.EbsOptimized = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *CreateCapacityReservationInput) SetEndDate(v time.Time) *CreateCapacityReservationInput { + s.EndDate = &v + return s +} + +// SetEndDateType sets the EndDateType field's value. +func (s *CreateCapacityReservationInput) SetEndDateType(v string) *CreateCapacityReservationInput { + s.EndDateType = &v + return s +} + +// SetEphemeralStorage sets the EphemeralStorage field's value. +func (s *CreateCapacityReservationInput) SetEphemeralStorage(v bool) *CreateCapacityReservationInput { + s.EphemeralStorage = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *CreateCapacityReservationInput) SetInstanceCount(v int64) *CreateCapacityReservationInput { + s.InstanceCount = &v + return s +} + +// SetInstanceMatchCriteria sets the InstanceMatchCriteria field's value. +func (s *CreateCapacityReservationInput) SetInstanceMatchCriteria(v string) *CreateCapacityReservationInput { + s.InstanceMatchCriteria = &v + return s +} + +// SetInstancePlatform sets the InstancePlatform field's value. +func (s *CreateCapacityReservationInput) SetInstancePlatform(v string) *CreateCapacityReservationInput { + s.InstancePlatform = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *CreateCapacityReservationInput) SetInstanceType(v string) *CreateCapacityReservationInput { + s.InstanceType = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateCapacityReservationInput) SetTagSpecifications(v []*TagSpecification) *CreateCapacityReservationInput { + s.TagSpecifications = v + return s +} + +// SetTenancy sets the Tenancy field's value. +func (s *CreateCapacityReservationInput) SetTenancy(v string) *CreateCapacityReservationInput { + s.Tenancy = &v + return s +} + +type CreateCapacityReservationOutput struct { + _ struct{} `type:"structure"` + + // Information about the Capacity Reservation. + CapacityReservation *CapacityReservation `locationName:"capacityReservation" type:"structure"` +} + +// String returns the string representation +func (s CreateCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCapacityReservationOutput) GoString() string { + return s.String() +} + +// SetCapacityReservation sets the CapacityReservation field's value. +func (s *CreateCapacityReservationOutput) SetCapacityReservation(v *CapacityReservation) *CreateCapacityReservationOutput { + s.CapacityReservation = v + return s +} + +type CreateCarrierGatewayInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags to associate with the carrier gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the VPC to associate with the carrier gateway. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCarrierGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCarrierGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCarrierGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCarrierGatewayInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateCarrierGatewayInput) SetClientToken(v string) *CreateCarrierGatewayInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateCarrierGatewayInput) SetDryRun(v bool) *CreateCarrierGatewayInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateCarrierGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateCarrierGatewayInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateCarrierGatewayInput) SetVpcId(v string) *CreateCarrierGatewayInput { + s.VpcId = &v + return s +} + +type CreateCarrierGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateway *CarrierGateway `locationName:"carrierGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateCarrierGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCarrierGatewayOutput) GoString() string { + return s.String() +} + +// SetCarrierGateway sets the CarrierGateway field's value. +func (s *CreateCarrierGatewayOutput) SetCarrierGateway(v *CarrierGateway) *CreateCarrierGatewayOutput { + s.CarrierGateway = v + return s +} + +type CreateClientVpnEndpointInput struct { + _ struct{} `type:"structure"` + + // Information about the authentication method to be used to authenticate clients. + // + // AuthenticationOptions is a required field + AuthenticationOptions []*ClientVpnAuthenticationRequest `locationName:"Authentication" type:"list" required:"true"` + + // The IPv4 address range, in CIDR notation, from which to assign client IP + // addresses. The address range cannot overlap with the local CIDR of the VPC + // in which the associated subnet is located, or the routes that you add manually. + // The address range cannot be changed after the Client VPN endpoint has been + // created. The CIDR block should be /22 or greater. + // + // ClientCidrBlock is a required field + ClientCidrBlock *string `type:"string" required:"true"` + + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectOptions `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Information about the client connection logging options. + // + // If you enable client connection logging, data about client connections is + // sent to a Cloudwatch Logs log stream. The following information is logged: + // + // * Client connection requests + // + // * Client connection results (successful and unsuccessful) + // + // * Reasons for unsuccessful client connection requests + // + // * Client connection termination time + // + // ConnectionLogOptions is a required field + ConnectionLogOptions *ConnectionLogOptions `type:"structure" required:"true"` + + // A brief description of the Client VPN endpoint. + Description *string `type:"string"` + + // Information about the DNS servers to be used for DNS resolution. A Client + // VPN endpoint can have up to two DNS servers. If no DNS server is specified, + // the DNS address configured on the device is used for the DNS server. + DnsServers []*string `locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of one or more security groups to apply to the target network. You + // must also specify the ID of the VPC that contains the security groups. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + // Specify whether to enable the self-service portal for the Client VPN endpoint. + // + // Default Value: enabled + SelfServicePortal *string `type:"string" enum:"SelfServicePortal"` + + // The ARN of the server certificate. For more information, see the AWS Certificate + // Manager User Guide (https://docs.aws.amazon.com/acm/latest/userguide/). + // + // ServerCertificateArn is a required field + ServerCertificateArn *string `type:"string" required:"true"` + + // Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint. + // + // By default, split-tunnel on a VPN endpoint is disabled. + // + // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client + // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) + // in the AWS Client VPN Administrator Guide. + SplitTunnel *bool `type:"boolean"` + + // The tags to apply to the Client VPN endpoint during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The transport protocol to be used by the VPN session. + // + // Default value: udp + TransportProtocol *string `type:"string" enum:"TransportProtocol"` + + // The ID of the VPC to associate with the Client VPN endpoint. If no security + // group IDs are specified in the request, the default security group for the + // VPC is applied. + VpcId *string `type:"string"` + + // The port number to assign to the Client VPN endpoint for TCP and UDP traffic. + // + // Valid Values: 443 | 1194 + // + // Default Value: 443 + VpnPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s CreateClientVpnEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClientVpnEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClientVpnEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClientVpnEndpointInput"} + if s.AuthenticationOptions == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationOptions")) + } + if s.ClientCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("ClientCidrBlock")) + } + if s.ConnectionLogOptions == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionLogOptions")) + } + if s.ServerCertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationOptions sets the AuthenticationOptions field's value. +func (s *CreateClientVpnEndpointInput) SetAuthenticationOptions(v []*ClientVpnAuthenticationRequest) *CreateClientVpnEndpointInput { + s.AuthenticationOptions = v + return s +} + +// SetClientCidrBlock sets the ClientCidrBlock field's value. +func (s *CreateClientVpnEndpointInput) SetClientCidrBlock(v string) *CreateClientVpnEndpointInput { + s.ClientCidrBlock = &v + return s +} + +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *CreateClientVpnEndpointInput) SetClientConnectOptions(v *ClientConnectOptions) *CreateClientVpnEndpointInput { + s.ClientConnectOptions = v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateClientVpnEndpointInput) SetClientToken(v string) *CreateClientVpnEndpointInput { + s.ClientToken = &v + return s +} + +// SetConnectionLogOptions sets the ConnectionLogOptions field's value. +func (s *CreateClientVpnEndpointInput) SetConnectionLogOptions(v *ConnectionLogOptions) *CreateClientVpnEndpointInput { + s.ConnectionLogOptions = v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateClientVpnEndpointInput) SetDescription(v string) *CreateClientVpnEndpointInput { + s.Description = &v + return s +} + +// SetDnsServers sets the DnsServers field's value. +func (s *CreateClientVpnEndpointInput) SetDnsServers(v []*string) *CreateClientVpnEndpointInput { + s.DnsServers = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateClientVpnEndpointInput) SetDryRun(v bool) *CreateClientVpnEndpointInput { + s.DryRun = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateClientVpnEndpointInput) SetSecurityGroupIds(v []*string) *CreateClientVpnEndpointInput { + s.SecurityGroupIds = v + return s +} + +// SetSelfServicePortal sets the SelfServicePortal field's value. +func (s *CreateClientVpnEndpointInput) SetSelfServicePortal(v string) *CreateClientVpnEndpointInput { + s.SelfServicePortal = &v + return s +} + +// SetServerCertificateArn sets the ServerCertificateArn field's value. +func (s *CreateClientVpnEndpointInput) SetServerCertificateArn(v string) *CreateClientVpnEndpointInput { + s.ServerCertificateArn = &v + return s +} + +// SetSplitTunnel sets the SplitTunnel field's value. +func (s *CreateClientVpnEndpointInput) SetSplitTunnel(v bool) *CreateClientVpnEndpointInput { + s.SplitTunnel = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateClientVpnEndpointInput) SetTagSpecifications(v []*TagSpecification) *CreateClientVpnEndpointInput { + s.TagSpecifications = v + return s +} + +// SetTransportProtocol sets the TransportProtocol field's value. +func (s *CreateClientVpnEndpointInput) SetTransportProtocol(v string) *CreateClientVpnEndpointInput { + s.TransportProtocol = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateClientVpnEndpointInput) SetVpcId(v string) *CreateClientVpnEndpointInput { + s.VpcId = &v + return s +} + +// SetVpnPort sets the VpnPort field's value. +func (s *CreateClientVpnEndpointInput) SetVpnPort(v int64) *CreateClientVpnEndpointInput { + s.VpnPort = &v + return s +} + +type CreateClientVpnEndpointOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` + + // The DNS name to be used by clients when establishing their VPN session. + DnsName *string `locationName:"dnsName" type:"string"` + + // The current state of the Client VPN endpoint. + Status *ClientVpnEndpointStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s CreateClientVpnEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClientVpnEndpointOutput) GoString() string { + return s.String() +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *CreateClientVpnEndpointOutput) SetClientVpnEndpointId(v string) *CreateClientVpnEndpointOutput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDnsName sets the DnsName field's value. +func (s *CreateClientVpnEndpointOutput) SetDnsName(v string) *CreateClientVpnEndpointOutput { + s.DnsName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateClientVpnEndpointOutput) SetStatus(v *ClientVpnEndpointStatus) *CreateClientVpnEndpointOutput { + s.Status = v + return s +} + +type CreateClientVpnRouteInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The ID of the Client VPN endpoint to which to add the route. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // A brief description of the route. + Description *string `type:"string"` + + // The IPv4 address range, in CIDR notation, of the route destination. For example: + // + // * To add a route for Internet access, enter 0.0.0.0/0 + // + // * To add a route for a peered VPC, enter the peered VPC's IPv4 CIDR range + // + // * To add a route for an on-premises network, enter the AWS Site-to-Site + // VPN connection's IPv4 CIDR range + // + // * To add a route for the local network, enter the client CIDR range + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the subnet through which you want to route traffic. The specified + // subnet must be an existing target network of the Client VPN endpoint. + // + // Alternatively, if you're adding a route for the local network, specify local. + // + // TargetVpcSubnetId is a required field + TargetVpcSubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateClientVpnRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClientVpnRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateClientVpnRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateClientVpnRouteInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.TargetVpcSubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetVpcSubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateClientVpnRouteInput) SetClientToken(v string) *CreateClientVpnRouteInput { + s.ClientToken = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *CreateClientVpnRouteInput) SetClientVpnEndpointId(v string) *CreateClientVpnRouteInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateClientVpnRouteInput) SetDescription(v string) *CreateClientVpnRouteInput { + s.Description = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *CreateClientVpnRouteInput) SetDestinationCidrBlock(v string) *CreateClientVpnRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateClientVpnRouteInput) SetDryRun(v bool) *CreateClientVpnRouteInput { + s.DryRun = &v + return s +} + +// SetTargetVpcSubnetId sets the TargetVpcSubnetId field's value. +func (s *CreateClientVpnRouteInput) SetTargetVpcSubnetId(v string) *CreateClientVpnRouteInput { + s.TargetVpcSubnetId = &v + return s +} + +type CreateClientVpnRouteOutput struct { + _ struct{} `type:"structure"` + + // The current state of the route. + Status *ClientVpnRouteStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s CreateClientVpnRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateClientVpnRouteOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *CreateClientVpnRouteOutput) SetStatus(v *ClientVpnRouteStatus) *CreateClientVpnRouteOutput { + s.Status = v + return s +} + +// Contains the parameters for CreateCustomerGateway. +type CreateCustomerGatewayInput struct { + _ struct{} `type:"structure"` + + // For devices that support BGP, the customer gateway's BGP ASN. + // + // Default: 65000 + // + // BgpAsn is a required field + BgpAsn *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `type:"string"` + + // A name for the customer gateway device. + // + // Length Constraints: Up to 255 characters. + DeviceName *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Internet-routable IP address for the customer gateway's outside interface. + // The address must be static. + PublicIp *string `locationName:"IpAddress" type:"string"` + + // The tags to apply to the customer gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The type of VPN connection that this customer gateway supports (ipsec.1). + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"GatewayType"` +} + +// String returns the string representation +func (s CreateCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCustomerGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCustomerGatewayInput"} + if s.BgpAsn == nil { + invalidParams.Add(request.NewErrParamRequired("BgpAsn")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBgpAsn sets the BgpAsn field's value. +func (s *CreateCustomerGatewayInput) SetBgpAsn(v int64) *CreateCustomerGatewayInput { + s.BgpAsn = &v + return s +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *CreateCustomerGatewayInput) SetCertificateArn(v string) *CreateCustomerGatewayInput { + s.CertificateArn = &v + return s +} + +// SetDeviceName sets the DeviceName field's value. +func (s *CreateCustomerGatewayInput) SetDeviceName(v string) *CreateCustomerGatewayInput { + s.DeviceName = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateCustomerGatewayInput) SetDryRun(v bool) *CreateCustomerGatewayInput { + s.DryRun = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *CreateCustomerGatewayInput) SetPublicIp(v string) *CreateCustomerGatewayInput { + s.PublicIp = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateCustomerGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateCustomerGatewayInput { + s.TagSpecifications = v + return s +} + +// SetType sets the Type field's value. +func (s *CreateCustomerGatewayInput) SetType(v string) *CreateCustomerGatewayInput { + s.Type = &v + return s +} + +// Contains the output of CreateCustomerGateway. +type CreateCustomerGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the customer gateway. + CustomerGateway *CustomerGateway `locationName:"customerGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomerGatewayOutput) GoString() string { + return s.String() +} + +// SetCustomerGateway sets the CustomerGateway field's value. +func (s *CreateCustomerGatewayOutput) SetCustomerGateway(v *CustomerGateway) *CreateCustomerGatewayOutput { + s.CustomerGateway = v + return s +} + +type CreateDefaultSubnetInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to create the default subnet. + // + // AvailabilityZone is a required field + AvailabilityZone *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreateDefaultSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDefaultSubnetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDefaultSubnetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDefaultSubnetInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateDefaultSubnetInput) SetAvailabilityZone(v string) *CreateDefaultSubnetInput { + s.AvailabilityZone = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateDefaultSubnetInput) SetDryRun(v bool) *CreateDefaultSubnetInput { + s.DryRun = &v + return s +} + +type CreateDefaultSubnetOutput struct { + _ struct{} `type:"structure"` + + // Information about the subnet. + Subnet *Subnet `locationName:"subnet" type:"structure"` +} + +// String returns the string representation +func (s CreateDefaultSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDefaultSubnetOutput) GoString() string { + return s.String() +} + +// SetSubnet sets the Subnet field's value. +func (s *CreateDefaultSubnetOutput) SetSubnet(v *Subnet) *CreateDefaultSubnetOutput { + s.Subnet = v + return s +} + +type CreateDefaultVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreateDefaultVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDefaultVpcInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateDefaultVpcInput) SetDryRun(v bool) *CreateDefaultVpcInput { + s.DryRun = &v + return s +} + +type CreateDefaultVpcOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC. + Vpc *Vpc `locationName:"vpc" type:"structure"` +} + +// String returns the string representation +func (s CreateDefaultVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDefaultVpcOutput) GoString() string { + return s.String() +} + +// SetVpc sets the Vpc field's value. +func (s *CreateDefaultVpcOutput) SetVpc(v *Vpc) *CreateDefaultVpcOutput { + s.Vpc = v + return s +} + +type CreateDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // A DHCP configuration option. + // + // DhcpConfigurations is a required field + DhcpConfigurations []*NewDhcpConfiguration `locationName:"dhcpConfiguration" locationNameList:"item" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the DHCP option. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDhcpOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDhcpOptionsInput"} + if s.DhcpConfigurations == nil { + invalidParams.Add(request.NewErrParamRequired("DhcpConfigurations")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDhcpConfigurations sets the DhcpConfigurations field's value. +func (s *CreateDhcpOptionsInput) SetDhcpConfigurations(v []*NewDhcpConfiguration) *CreateDhcpOptionsInput { + s.DhcpConfigurations = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateDhcpOptionsInput) SetDryRun(v bool) *CreateDhcpOptionsInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateDhcpOptionsInput) SetTagSpecifications(v []*TagSpecification) *CreateDhcpOptionsInput { + s.TagSpecifications = v + return s +} + +type CreateDhcpOptionsOutput struct { + _ struct{} `type:"structure"` + + // A set of DHCP options. + DhcpOptions *DhcpOptions `locationName:"dhcpOptions" type:"structure"` +} + +// String returns the string representation +func (s CreateDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDhcpOptionsOutput) GoString() string { + return s.String() +} + +// SetDhcpOptions sets the DhcpOptions field's value. +func (s *CreateDhcpOptionsOutput) SetDhcpOptions(v *DhcpOptions) *CreateDhcpOptionsOutput { + s.DhcpOptions = v + return s +} + +type CreateEgressOnlyInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags to assign to the egress-only internet gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the VPC for which to create the egress-only internet gateway. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateEgressOnlyInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEgressOnlyInternetGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEgressOnlyInternetGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateEgressOnlyInternetGatewayInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateEgressOnlyInternetGatewayInput) SetClientToken(v string) *CreateEgressOnlyInternetGatewayInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateEgressOnlyInternetGatewayInput) SetDryRun(v bool) *CreateEgressOnlyInternetGatewayInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateEgressOnlyInternetGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateEgressOnlyInternetGatewayInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateEgressOnlyInternetGatewayInput) SetVpcId(v string) *CreateEgressOnlyInternetGatewayInput { + s.VpcId = &v + return s +} + +type CreateEgressOnlyInternetGatewayOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the egress-only internet gateway. + EgressOnlyInternetGateway *EgressOnlyInternetGateway `locationName:"egressOnlyInternetGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateEgressOnlyInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateEgressOnlyInternetGatewayOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateEgressOnlyInternetGatewayOutput) SetClientToken(v string) *CreateEgressOnlyInternetGatewayOutput { + s.ClientToken = &v + return s +} + +// SetEgressOnlyInternetGateway sets the EgressOnlyInternetGateway field's value. +func (s *CreateEgressOnlyInternetGatewayOutput) SetEgressOnlyInternetGateway(v *EgressOnlyInternetGateway) *CreateEgressOnlyInternetGatewayOutput { + s.EgressOnlyInternetGateway = v + return s +} + +// Describes the instances that could not be launched by the fleet. +type CreateFleetError struct { + _ struct{} `type:"structure"` + + // The error code that indicates why the instance could not be launched. For + // more information about error codes, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + ErrorCode *string `locationName:"errorCode" type:"string"` + + // The error message that describes why the instance could not be launched. + // For more information about error messages, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // The launch templates and overrides that were used for launching the instances. + // The values that you specify in the Overrides replace the values in the launch + // template. + LaunchTemplateAndOverrides *LaunchTemplateAndOverridesResponse `locationName:"launchTemplateAndOverrides" type:"structure"` + + // Indicates if the instance that could not be launched was a Spot Instance + // or On-Demand Instance. + Lifecycle *string `locationName:"lifecycle" type:"string" enum:"InstanceLifecycle"` +} + +// String returns the string representation +func (s CreateFleetError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *CreateFleetError) SetErrorCode(v string) *CreateFleetError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *CreateFleetError) SetErrorMessage(v string) *CreateFleetError { + s.ErrorMessage = &v + return s +} + +// SetLaunchTemplateAndOverrides sets the LaunchTemplateAndOverrides field's value. +func (s *CreateFleetError) SetLaunchTemplateAndOverrides(v *LaunchTemplateAndOverridesResponse) *CreateFleetError { + s.LaunchTemplateAndOverrides = v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *CreateFleetError) SetLifecycle(v string) *CreateFleetError { + s.Lifecycle = &v + return s +} + +type CreateFleetInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Indicates whether running instances should be terminated if the total target + // capacity of the EC2 Fleet is decreased below the current size of the EC2 + // Fleet. + ExcessCapacityTerminationPolicy *string `type:"string" enum:"FleetExcessCapacityTerminationPolicy"` + + // The configuration for the EC2 Fleet. + // + // LaunchTemplateConfigs is a required field + LaunchTemplateConfigs []*FleetLaunchTemplateConfigRequest `locationNameList:"item" type:"list" required:"true"` + + // Describes the configuration of On-Demand Instances in an EC2 Fleet. + OnDemandOptions *OnDemandOptionsRequest `type:"structure"` + + // Indicates whether EC2 Fleet should replace unhealthy instances. + ReplaceUnhealthyInstances *bool `type:"boolean"` + + // Describes the configuration of Spot Instances in an EC2 Fleet. + SpotOptions *SpotOptionsRequest `type:"structure"` + + // The key-value pair for tagging the EC2 Fleet request on creation. The value + // for ResourceType must be fleet, otherwise the fleet request fails. To tag + // instances at launch, specify the tags in the launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template). + // For information about tagging after launch, see Tagging your resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The number of units to request. + // + // TargetCapacitySpecification is a required field + TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure" required:"true"` + + // Indicates whether running instances should be terminated when the EC2 Fleet + // expires. + TerminateInstancesWithExpiration *bool `type:"boolean"` + + // The type of request. The default value is maintain. + // + // * maintain - The EC2 Fleet plaees an asynchronous request for your desired + // capacity, and continues to maintain your desired Spot capacity by replenishing + // interrupted Spot Instances. + // + // * request - The EC2 Fleet places an asynchronous one-time request for + // your desired capacity, but does submit Spot requests in alternative capacity + // pools if Spot capacity is unavailable, and does not maintain Spot capacity + // if Spot Instances are interrupted. + // + // * instant - The EC2 Fleet places a synchronous one-time request for your + // desired capacity, and returns errors for any instances that could not + // be launched. + // + // For more information, see EC2 Fleet request types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-request-type) + // in the Amazon Elastic Compute Cloud User Guide. + Type *string `type:"string" enum:"FleetType"` + + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The default is to start fulfilling the request immediately. + ValidFrom *time.Time `type:"timestamp"` + + // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // At this point, no new EC2 Fleet requests are placed or able to fulfill the + // request. If no value is specified, the request remains until you cancel it. + ValidUntil *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CreateFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFleetInput"} + if s.LaunchTemplateConfigs == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchTemplateConfigs")) + } + if s.TargetCapacitySpecification == nil { + invalidParams.Add(request.NewErrParamRequired("TargetCapacitySpecification")) + } + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TargetCapacitySpecification != nil { + if err := s.TargetCapacitySpecification.Validate(); err != nil { + invalidParams.AddNested("TargetCapacitySpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateFleetInput) SetClientToken(v string) *CreateFleetInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateFleetInput) SetDryRun(v bool) *CreateFleetInput { + s.DryRun = &v + return s +} + +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. +func (s *CreateFleetInput) SetExcessCapacityTerminationPolicy(v string) *CreateFleetInput { + s.ExcessCapacityTerminationPolicy = &v + return s +} + +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *CreateFleetInput) SetLaunchTemplateConfigs(v []*FleetLaunchTemplateConfigRequest) *CreateFleetInput { + s.LaunchTemplateConfigs = v + return s +} + +// SetOnDemandOptions sets the OnDemandOptions field's value. +func (s *CreateFleetInput) SetOnDemandOptions(v *OnDemandOptionsRequest) *CreateFleetInput { + s.OnDemandOptions = v + return s +} + +// SetReplaceUnhealthyInstances sets the ReplaceUnhealthyInstances field's value. +func (s *CreateFleetInput) SetReplaceUnhealthyInstances(v bool) *CreateFleetInput { + s.ReplaceUnhealthyInstances = &v + return s +} + +// SetSpotOptions sets the SpotOptions field's value. +func (s *CreateFleetInput) SetSpotOptions(v *SpotOptionsRequest) *CreateFleetInput { + s.SpotOptions = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateFleetInput) SetTagSpecifications(v []*TagSpecification) *CreateFleetInput { + s.TagSpecifications = v + return s +} + +// SetTargetCapacitySpecification sets the TargetCapacitySpecification field's value. +func (s *CreateFleetInput) SetTargetCapacitySpecification(v *TargetCapacitySpecificationRequest) *CreateFleetInput { + s.TargetCapacitySpecification = v + return s +} + +// SetTerminateInstancesWithExpiration sets the TerminateInstancesWithExpiration field's value. +func (s *CreateFleetInput) SetTerminateInstancesWithExpiration(v bool) *CreateFleetInput { + s.TerminateInstancesWithExpiration = &v + return s +} + +// SetType sets the Type field's value. +func (s *CreateFleetInput) SetType(v string) *CreateFleetInput { + s.Type = &v + return s +} + +// SetValidFrom sets the ValidFrom field's value. +func (s *CreateFleetInput) SetValidFrom(v time.Time) *CreateFleetInput { + s.ValidFrom = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *CreateFleetInput) SetValidUntil(v time.Time) *CreateFleetInput { + s.ValidUntil = &v + return s +} + +// Describes the instances that were launched by the fleet. +type CreateFleetInstance struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + InstanceIds []*string `locationName:"instanceIds" locationNameList:"item" type:"list"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The launch templates and overrides that were used for launching the instances. + // The values that you specify in the Overrides replace the values in the launch + // template. + LaunchTemplateAndOverrides *LaunchTemplateAndOverridesResponse `locationName:"launchTemplateAndOverrides" type:"structure"` + + // Indicates if the instance that was launched is a Spot Instance or On-Demand + // Instance. + Lifecycle *string `locationName:"lifecycle" type:"string" enum:"InstanceLifecycle"` + + // The value is Windows for Windows instances. Otherwise, the value is blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` +} + +// String returns the string representation +func (s CreateFleetInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetInstance) GoString() string { + return s.String() +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *CreateFleetInstance) SetInstanceIds(v []*string) *CreateFleetInstance { + s.InstanceIds = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *CreateFleetInstance) SetInstanceType(v string) *CreateFleetInstance { + s.InstanceType = &v + return s +} + +// SetLaunchTemplateAndOverrides sets the LaunchTemplateAndOverrides field's value. +func (s *CreateFleetInstance) SetLaunchTemplateAndOverrides(v *LaunchTemplateAndOverridesResponse) *CreateFleetInstance { + s.LaunchTemplateAndOverrides = v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *CreateFleetInstance) SetLifecycle(v string) *CreateFleetInstance { + s.Lifecycle = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *CreateFleetInstance) SetPlatform(v string) *CreateFleetInstance { + s.Platform = &v + return s +} + +type CreateFleetOutput struct { + _ struct{} `type:"structure"` + + // Information about the instances that could not be launched by the fleet. + // Valid only when Type is set to instant. + Errors []*CreateFleetError `locationName:"errorSet" locationNameList:"item" type:"list"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // Information about the instances that were launched by the fleet. Valid only + // when Type is set to instant. + Instances []*CreateFleetInstance `locationName:"fleetInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFleetOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *CreateFleetOutput) SetErrors(v []*CreateFleetError) *CreateFleetOutput { + s.Errors = v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *CreateFleetOutput) SetFleetId(v string) *CreateFleetOutput { + s.FleetId = &v + return s +} + +// SetInstances sets the Instances field's value. +func (s *CreateFleetOutput) SetInstances(v []*CreateFleetInstance) *CreateFleetOutput { + s.Instances = v + return s +} + +type CreateFlowLogsInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The ARN for the IAM role that permits Amazon EC2 to publish flow logs to + // a CloudWatch Logs log group in your account. + // + // If you specify LogDestinationType as s3, do not specify DeliverLogsPermissionArn + // or LogGroupName. + DeliverLogsPermissionArn *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Specifies the destination to which the flow log data is to be published. + // Flow log data can be published to a CloudWatch Logs log group or an Amazon + // S3 bucket. The value specified for this parameter depends on the value specified + // for LogDestinationType. + // + // If LogDestinationType is not specified or cloud-watch-logs, specify the Amazon + // Resource Name (ARN) of the CloudWatch Logs log group. For example, to publish + // to a log group called my-logs, specify arn:aws:logs:us-east-1:123456789012:log-group:my-logs. + // Alternatively, use LogGroupName instead. + // + // If LogDestinationType is s3, specify the ARN of the Amazon S3 bucket. You + // can also specify a subfolder in the bucket. To specify a subfolder in the + // bucket, use the following ARN format: bucket_ARN/subfolder_name/. For example, + // to specify a subfolder named my-logs in a bucket named my-bucket, use the + // following ARN: arn:aws:s3:::my-bucket/my-logs/. You cannot use AWSLogs as + // a subfolder name. This is a reserved term. + LogDestination *string `type:"string"` + + // Specifies the type of destination to which the flow log data is to be published. + // Flow log data can be published to CloudWatch Logs or Amazon S3. To publish + // flow log data to CloudWatch Logs, specify cloud-watch-logs. To publish flow + // log data to Amazon S3, specify s3. + // + // If you specify LogDestinationType as s3, do not specify DeliverLogsPermissionArn + // or LogGroupName. + // + // Default: cloud-watch-logs + LogDestinationType *string `type:"string" enum:"LogDestinationType"` + + // The fields to include in the flow log record, in the order in which they + // should appear. For a list of available fields, see Flow Log Records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records). + // If you omit this parameter, the flow log is created using the default format. + // If you specify this parameter, you must specify at least one field. + // + // Specify the fields using the ${field-id} format, separated by spaces. For + // the AWS CLI, use single quotation marks (' ') to surround the parameter value. + LogFormat *string `type:"string"` + + // The name of a new or existing CloudWatch Logs log group where Amazon EC2 + // publishes your flow logs. + // + // If you specify LogDestinationType as s3, do not specify DeliverLogsPermissionArn + // or LogGroupName. + LogGroupName *string `type:"string"` + + // The maximum interval of time during which a flow of packets is captured and + // aggregated into a flow log record. You can specify 60 seconds (1 minute) + // or 600 seconds (10 minutes). + // + // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances), + // the aggregation interval is always 60 seconds or less, regardless of the + // value that you specify. + // + // Default: 600 + MaxAggregationInterval *int64 `type:"integer"` + + // The ID of the subnet, network interface, or VPC for which you want to create + // a flow log. + // + // Constraints: Maximum of 1000 resources + // + // ResourceIds is a required field + ResourceIds []*string `locationName:"ResourceId" locationNameList:"item" type:"list" required:"true"` + + // The type of resource for which to create the flow log. For example, if you + // specified a VPC ID for the ResourceId property, specify VPC for this property. + // + // ResourceType is a required field + ResourceType *string `type:"string" required:"true" enum:"FlowLogsResourceType"` + + // The tags to apply to the flow logs. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The type of traffic to log. You can log traffic that the resource accepts + // or rejects, or all traffic. + // + // TrafficType is a required field + TrafficType *string `type:"string" required:"true" enum:"TrafficType"` +} + +// String returns the string representation +func (s CreateFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFlowLogsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFlowLogsInput"} + if s.ResourceIds == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIds")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.TrafficType == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateFlowLogsInput) SetClientToken(v string) *CreateFlowLogsInput { + s.ClientToken = &v + return s +} + +// SetDeliverLogsPermissionArn sets the DeliverLogsPermissionArn field's value. +func (s *CreateFlowLogsInput) SetDeliverLogsPermissionArn(v string) *CreateFlowLogsInput { + s.DeliverLogsPermissionArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateFlowLogsInput) SetDryRun(v bool) *CreateFlowLogsInput { + s.DryRun = &v + return s +} + +// SetLogDestination sets the LogDestination field's value. +func (s *CreateFlowLogsInput) SetLogDestination(v string) *CreateFlowLogsInput { + s.LogDestination = &v + return s +} + +// SetLogDestinationType sets the LogDestinationType field's value. +func (s *CreateFlowLogsInput) SetLogDestinationType(v string) *CreateFlowLogsInput { + s.LogDestinationType = &v + return s +} + +// SetLogFormat sets the LogFormat field's value. +func (s *CreateFlowLogsInput) SetLogFormat(v string) *CreateFlowLogsInput { + s.LogFormat = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *CreateFlowLogsInput) SetLogGroupName(v string) *CreateFlowLogsInput { + s.LogGroupName = &v + return s +} + +// SetMaxAggregationInterval sets the MaxAggregationInterval field's value. +func (s *CreateFlowLogsInput) SetMaxAggregationInterval(v int64) *CreateFlowLogsInput { + s.MaxAggregationInterval = &v + return s +} + +// SetResourceIds sets the ResourceIds field's value. +func (s *CreateFlowLogsInput) SetResourceIds(v []*string) *CreateFlowLogsInput { + s.ResourceIds = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *CreateFlowLogsInput) SetResourceType(v string) *CreateFlowLogsInput { + s.ResourceType = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateFlowLogsInput) SetTagSpecifications(v []*TagSpecification) *CreateFlowLogsInput { + s.TagSpecifications = v + return s +} + +// SetTrafficType sets the TrafficType field's value. +func (s *CreateFlowLogsInput) SetTrafficType(v string) *CreateFlowLogsInput { + s.TrafficType = &v + return s +} + +type CreateFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the flow logs. + FlowLogIds []*string `locationName:"flowLogIdSet" locationNameList:"item" type:"list"` + + // Information about the flow logs that could not be created successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowLogsOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateFlowLogsOutput) SetClientToken(v string) *CreateFlowLogsOutput { + s.ClientToken = &v + return s +} + +// SetFlowLogIds sets the FlowLogIds field's value. +func (s *CreateFlowLogsOutput) SetFlowLogIds(v []*string) *CreateFlowLogsOutput { + s.FlowLogIds = v + return s +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *CreateFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *CreateFlowLogsOutput { + s.Unsuccessful = v + return s +} + +type CreateFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // A description for the AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The location of the encrypted design checkpoint in Amazon S3. The input must + // be a tarball. + // + // InputStorageLocation is a required field + InputStorageLocation *StorageLocation `type:"structure" required:"true"` + + // The location in Amazon S3 for the output logs. + LogsStorageLocation *StorageLocation `type:"structure"` + + // A name for the AFI. + Name *string `type:"string"` + + // The tags to apply to the FPGA image during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFpgaImageInput"} + if s.InputStorageLocation == nil { + invalidParams.Add(request.NewErrParamRequired("InputStorageLocation")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateFpgaImageInput) SetClientToken(v string) *CreateFpgaImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateFpgaImageInput) SetDescription(v string) *CreateFpgaImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateFpgaImageInput) SetDryRun(v bool) *CreateFpgaImageInput { + s.DryRun = &v + return s +} + +// SetInputStorageLocation sets the InputStorageLocation field's value. +func (s *CreateFpgaImageInput) SetInputStorageLocation(v *StorageLocation) *CreateFpgaImageInput { + s.InputStorageLocation = v + return s +} + +// SetLogsStorageLocation sets the LogsStorageLocation field's value. +func (s *CreateFpgaImageInput) SetLogsStorageLocation(v *StorageLocation) *CreateFpgaImageInput { + s.LogsStorageLocation = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateFpgaImageInput) SetName(v string) *CreateFpgaImageInput { + s.Name = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateFpgaImageInput) SetTagSpecifications(v []*TagSpecification) *CreateFpgaImageInput { + s.TagSpecifications = v + return s +} + +type CreateFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // The global FPGA image identifier (AGFI ID). + FpgaImageGlobalId *string `locationName:"fpgaImageGlobalId" type:"string"` + + // The FPGA image identifier (AFI ID). + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` +} + +// String returns the string representation +func (s CreateFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFpgaImageOutput) GoString() string { + return s.String() +} + +// SetFpgaImageGlobalId sets the FpgaImageGlobalId field's value. +func (s *CreateFpgaImageOutput) SetFpgaImageGlobalId(v string) *CreateFpgaImageOutput { + s.FpgaImageGlobalId = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *CreateFpgaImageOutput) SetFpgaImageId(v string) *CreateFpgaImageOutput { + s.FpgaImageId = &v + return s +} + +type CreateImageInput struct { + _ struct{} `type:"structure"` + + // The block device mappings. This parameter cannot be used to modify the encryption + // status of existing volumes or snapshots. To create an AMI with encrypted + // snapshots, use the CopyImage action. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for the new image. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // A name for the new image. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // By default, Amazon EC2 attempts to shut down and reboot the instance before + // creating the image. If the 'No Reboot' option is set, Amazon EC2 doesn't + // shut down the instance before creating the image. When this option is used, + // file system integrity on the created image can't be guaranteed. + NoReboot *bool `locationName:"noReboot" type:"boolean"` +} + +// String returns the string representation +func (s CreateImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateImageInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *CreateImageInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *CreateImageInput { + s.BlockDeviceMappings = v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateImageInput) SetDescription(v string) *CreateImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateImageInput) SetDryRun(v bool) *CreateImageInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *CreateImageInput) SetInstanceId(v string) *CreateImageInput { + s.InstanceId = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateImageInput) SetName(v string) *CreateImageInput { + s.Name = &v + return s +} + +// SetNoReboot sets the NoReboot field's value. +func (s *CreateImageInput) SetNoReboot(v bool) *CreateImageInput { + s.NoReboot = &v + return s +} + +type CreateImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s CreateImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateImageOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *CreateImageOutput) SetImageId(v string) *CreateImageOutput { + s.ImageId = &v + return s +} + +type CreateInstanceExportTaskInput struct { + _ struct{} `type:"structure"` + + // A description for the conversion task or the resource being exported. The + // maximum length is 255 characters. + Description *string `locationName:"description" type:"string"` + + // The format and location for an instance export task. + // + // ExportToS3Task is a required field + ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure" required:"true"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The tags to apply to the instance export task during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The target virtualization environment. + // + // TargetEnvironment is a required field + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" required:"true" enum:"ExportEnvironment"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInstanceExportTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstanceExportTaskInput"} + if s.ExportToS3Task == nil { + invalidParams.Add(request.NewErrParamRequired("ExportToS3Task")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.TargetEnvironment == nil { + invalidParams.Add(request.NewErrParamRequired("TargetEnvironment")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateInstanceExportTaskInput) SetDescription(v string) *CreateInstanceExportTaskInput { + s.Description = &v + return s +} + +// SetExportToS3Task sets the ExportToS3Task field's value. +func (s *CreateInstanceExportTaskInput) SetExportToS3Task(v *ExportToS3TaskSpecification) *CreateInstanceExportTaskInput { + s.ExportToS3Task = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *CreateInstanceExportTaskInput) SetInstanceId(v string) *CreateInstanceExportTaskInput { + s.InstanceId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInstanceExportTaskInput) SetTagSpecifications(v []*TagSpecification) *CreateInstanceExportTaskInput { + s.TagSpecifications = v + return s +} + +// SetTargetEnvironment sets the TargetEnvironment field's value. +func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateInstanceExportTaskInput { + s.TargetEnvironment = &v + return s +} + +type CreateInstanceExportTaskOutput struct { + _ struct{} `type:"structure"` + + // Information about the instance export task. + ExportTask *ExportTask `locationName:"exportTask" type:"structure"` +} + +// String returns the string representation +func (s CreateInstanceExportTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceExportTaskOutput) GoString() string { + return s.String() +} + +// SetExportTask sets the ExportTask field's value. +func (s *CreateInstanceExportTaskOutput) SetExportTask(v *ExportTask) *CreateInstanceExportTaskOutput { + s.ExportTask = v + return s +} + +type CreateInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the internet gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateInternetGatewayInput) SetDryRun(v bool) *CreateInternetGatewayInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInternetGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateInternetGatewayInput { + s.TagSpecifications = v + return s +} + +type CreateInternetGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the internet gateway. + InternetGateway *InternetGateway `locationName:"internetGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInternetGatewayOutput) GoString() string { + return s.String() +} + +// SetInternetGateway sets the InternetGateway field's value. +func (s *CreateInternetGatewayOutput) SetInternetGateway(v *InternetGateway) *CreateInternetGatewayOutput { + s.InternetGateway = v + return s +} + +type CreateKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + // + // Constraints: Up to 255 ASCII characters + // + // KeyName is a required field + KeyName *string `type:"string" required:"true"` + + // The tags to apply to the new key pair. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateKeyPairInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateKeyPairInput"} + if s.KeyName == nil { + invalidParams.Add(request.NewErrParamRequired("KeyName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateKeyPairInput) SetDryRun(v bool) *CreateKeyPairInput { + s.DryRun = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *CreateKeyPairInput) SetKeyName(v string) *CreateKeyPairInput { + s.KeyName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateKeyPairInput) SetTagSpecifications(v []*TagSpecification) *CreateKeyPairInput { + s.TagSpecifications = v + return s +} + +// Describes a key pair. +type CreateKeyPairOutput struct { + _ struct{} `type:"structure"` + + // The SHA-1 digest of the DER encoded private key. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // An unencrypted PEM encoded RSA private key. + KeyMaterial *string `locationName:"keyMaterial" type:"string" sensitive:"true"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // The ID of the key pair. + KeyPairId *string `locationName:"keyPairId" type:"string"` + + // Any tags applied to the key pair. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateKeyPairOutput) GoString() string { + return s.String() +} + +// SetKeyFingerprint sets the KeyFingerprint field's value. +func (s *CreateKeyPairOutput) SetKeyFingerprint(v string) *CreateKeyPairOutput { + s.KeyFingerprint = &v + return s +} + +// SetKeyMaterial sets the KeyMaterial field's value. +func (s *CreateKeyPairOutput) SetKeyMaterial(v string) *CreateKeyPairOutput { + s.KeyMaterial = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *CreateKeyPairOutput) SetKeyName(v string) *CreateKeyPairOutput { + s.KeyName = &v + return s +} + +// SetKeyPairId sets the KeyPairId field's value. +func (s *CreateKeyPairOutput) SetKeyPairId(v string) *CreateKeyPairOutput { + s.KeyPairId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateKeyPairOutput) SetTags(v []*Tag) *CreateKeyPairOutput { + s.Tags = v + return s +} + +type CreateLaunchTemplateInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraint: Maximum 128 ASCII characters. + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The information for the launch template. + // + // LaunchTemplateData is a required field + LaunchTemplateData *RequestLaunchTemplateData `type:"structure" required:"true"` + + // A name for the launch template. + // + // LaunchTemplateName is a required field + LaunchTemplateName *string `min:"3" type:"string" required:"true"` + + // The tags to apply to the launch template during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // A description for the first version of the launch template. + VersionDescription *string `type:"string"` +} + +// String returns the string representation +func (s CreateLaunchTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLaunchTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLaunchTemplateInput"} + if s.LaunchTemplateData == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchTemplateData")) + } + if s.LaunchTemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchTemplateName")) + } + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + if s.LaunchTemplateData != nil { + if err := s.LaunchTemplateData.Validate(); err != nil { + invalidParams.AddNested("LaunchTemplateData", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateLaunchTemplateInput) SetClientToken(v string) *CreateLaunchTemplateInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateLaunchTemplateInput) SetDryRun(v bool) *CreateLaunchTemplateInput { + s.DryRun = &v + return s +} + +// SetLaunchTemplateData sets the LaunchTemplateData field's value. +func (s *CreateLaunchTemplateInput) SetLaunchTemplateData(v *RequestLaunchTemplateData) *CreateLaunchTemplateInput { + s.LaunchTemplateData = v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *CreateLaunchTemplateInput) SetLaunchTemplateName(v string) *CreateLaunchTemplateInput { + s.LaunchTemplateName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateLaunchTemplateInput) SetTagSpecifications(v []*TagSpecification) *CreateLaunchTemplateInput { + s.TagSpecifications = v + return s +} + +// SetVersionDescription sets the VersionDescription field's value. +func (s *CreateLaunchTemplateInput) SetVersionDescription(v string) *CreateLaunchTemplateInput { + s.VersionDescription = &v + return s +} + +type CreateLaunchTemplateOutput struct { + _ struct{} `type:"structure"` + + // Information about the launch template. + LaunchTemplate *LaunchTemplate `locationName:"launchTemplate" type:"structure"` + + // If the launch template contains parameters or parameter combinations that + // are not valid, an error code and an error message are returned for each issue + // that's found. + Warning *ValidationWarning `locationName:"warning" type:"structure"` +} + +// String returns the string representation +func (s CreateLaunchTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchTemplateOutput) GoString() string { + return s.String() +} + +// SetLaunchTemplate sets the LaunchTemplate field's value. +func (s *CreateLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *CreateLaunchTemplateOutput { + s.LaunchTemplate = v + return s +} + +// SetWarning sets the Warning field's value. +func (s *CreateLaunchTemplateOutput) SetWarning(v *ValidationWarning) *CreateLaunchTemplateOutput { + s.Warning = v + return s +} + +type CreateLaunchTemplateVersionInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraint: Maximum 128 ASCII characters. + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The information for the launch template. + // + // LaunchTemplateData is a required field + LaunchTemplateData *RequestLaunchTemplateData `type:"structure" required:"true"` + + // The ID of the launch template. You must specify either the launch template + // ID or launch template name in the request. + LaunchTemplateId *string `type:"string"` + + // The name of the launch template. You must specify either the launch template + // ID or launch template name in the request. + LaunchTemplateName *string `min:"3" type:"string"` + + // The version number of the launch template version on which to base the new + // version. The new version inherits the same launch parameters as the source + // version, except for parameters that you specify in LaunchTemplateData. Snapshots + // applied to the block device mapping are ignored when creating a new version + // unless they are explicitly included. + SourceVersion *string `type:"string"` + + // A description for the version of the launch template. + VersionDescription *string `type:"string"` +} + +// String returns the string representation +func (s CreateLaunchTemplateVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchTemplateVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLaunchTemplateVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLaunchTemplateVersionInput"} + if s.LaunchTemplateData == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchTemplateData")) + } + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + if s.LaunchTemplateData != nil { + if err := s.LaunchTemplateData.Validate(); err != nil { + invalidParams.AddNested("LaunchTemplateData", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateLaunchTemplateVersionInput) SetClientToken(v string) *CreateLaunchTemplateVersionInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateLaunchTemplateVersionInput) SetDryRun(v bool) *CreateLaunchTemplateVersionInput { + s.DryRun = &v + return s +} + +// SetLaunchTemplateData sets the LaunchTemplateData field's value. +func (s *CreateLaunchTemplateVersionInput) SetLaunchTemplateData(v *RequestLaunchTemplateData) *CreateLaunchTemplateVersionInput { + s.LaunchTemplateData = v + return s +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *CreateLaunchTemplateVersionInput) SetLaunchTemplateId(v string) *CreateLaunchTemplateVersionInput { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *CreateLaunchTemplateVersionInput) SetLaunchTemplateName(v string) *CreateLaunchTemplateVersionInput { + s.LaunchTemplateName = &v + return s +} + +// SetSourceVersion sets the SourceVersion field's value. +func (s *CreateLaunchTemplateVersionInput) SetSourceVersion(v string) *CreateLaunchTemplateVersionInput { + s.SourceVersion = &v + return s +} + +// SetVersionDescription sets the VersionDescription field's value. +func (s *CreateLaunchTemplateVersionInput) SetVersionDescription(v string) *CreateLaunchTemplateVersionInput { + s.VersionDescription = &v + return s +} + +type CreateLaunchTemplateVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the launch template version. + LaunchTemplateVersion *LaunchTemplateVersion `locationName:"launchTemplateVersion" type:"structure"` + + // If the new version of the launch template contains parameters or parameter + // combinations that are not valid, an error code and an error message are returned + // for each issue that's found. + Warning *ValidationWarning `locationName:"warning" type:"structure"` +} + +// String returns the string representation +func (s CreateLaunchTemplateVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLaunchTemplateVersionOutput) GoString() string { + return s.String() +} + +// SetLaunchTemplateVersion sets the LaunchTemplateVersion field's value. +func (s *CreateLaunchTemplateVersionOutput) SetLaunchTemplateVersion(v *LaunchTemplateVersion) *CreateLaunchTemplateVersionOutput { + s.LaunchTemplateVersion = v + return s +} + +// SetWarning sets the Warning field's value. +func (s *CreateLaunchTemplateVersionOutput) SetWarning(v *ValidationWarning) *CreateLaunchTemplateVersionOutput { + s.Warning = v + return s +} + +type CreateLocalGatewayRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR range used for destination matches. Routing decisions are based + // on the most specific match. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the local gateway route table. + // + // LocalGatewayRouteTableId is a required field + LocalGatewayRouteTableId *string `type:"string" required:"true"` + + // The ID of the virtual interface group. + // + // LocalGatewayVirtualInterfaceGroupId is a required field + LocalGatewayVirtualInterfaceGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLocalGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocalGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocalGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocalGatewayRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.LocalGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("LocalGatewayRouteTableId")) + } + if s.LocalGatewayVirtualInterfaceGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("LocalGatewayVirtualInterfaceGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *CreateLocalGatewayRouteInput) SetDestinationCidrBlock(v string) *CreateLocalGatewayRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateLocalGatewayRouteInput) SetDryRun(v bool) *CreateLocalGatewayRouteInput { + s.DryRun = &v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *CreateLocalGatewayRouteInput) SetLocalGatewayRouteTableId(v string) *CreateLocalGatewayRouteInput { + s.LocalGatewayRouteTableId = &v + return s +} + +// SetLocalGatewayVirtualInterfaceGroupId sets the LocalGatewayVirtualInterfaceGroupId field's value. +func (s *CreateLocalGatewayRouteInput) SetLocalGatewayVirtualInterfaceGroupId(v string) *CreateLocalGatewayRouteInput { + s.LocalGatewayVirtualInterfaceGroupId = &v + return s +} + +type CreateLocalGatewayRouteOutput struct { + _ struct{} `type:"structure"` + + // Information about the route. + Route *LocalGatewayRoute `locationName:"route" type:"structure"` +} + +// String returns the string representation +func (s CreateLocalGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocalGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetRoute sets the Route field's value. +func (s *CreateLocalGatewayRouteOutput) SetRoute(v *LocalGatewayRoute) *CreateLocalGatewayRouteOutput { + s.Route = v + return s +} + +type CreateLocalGatewayRouteTableVpcAssociationInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the local gateway route table. + // + // LocalGatewayRouteTableId is a required field + LocalGatewayRouteTableId *string `type:"string" required:"true"` + + // The tags to assign to the local gateway route table VPC association. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLocalGatewayRouteTableVpcAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocalGatewayRouteTableVpcAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocalGatewayRouteTableVpcAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocalGatewayRouteTableVpcAssociationInput"} + if s.LocalGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("LocalGatewayRouteTableId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetDryRun(v bool) *CreateLocalGatewayRouteTableVpcAssociationInput { + s.DryRun = &v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetLocalGatewayRouteTableId(v string) *CreateLocalGatewayRouteTableVpcAssociationInput { + s.LocalGatewayRouteTableId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetTagSpecifications(v []*TagSpecification) *CreateLocalGatewayRouteTableVpcAssociationInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetVpcId(v string) *CreateLocalGatewayRouteTableVpcAssociationInput { + s.VpcId = &v + return s +} + +type CreateLocalGatewayRouteTableVpcAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + LocalGatewayRouteTableVpcAssociation *LocalGatewayRouteTableVpcAssociation `locationName:"localGatewayRouteTableVpcAssociation" type:"structure"` +} + +// String returns the string representation +func (s CreateLocalGatewayRouteTableVpcAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocalGatewayRouteTableVpcAssociationOutput) GoString() string { + return s.String() +} + +// SetLocalGatewayRouteTableVpcAssociation sets the LocalGatewayRouteTableVpcAssociation field's value. +func (s *CreateLocalGatewayRouteTableVpcAssociationOutput) SetLocalGatewayRouteTableVpcAssociation(v *LocalGatewayRouteTableVpcAssociation) *CreateLocalGatewayRouteTableVpcAssociationOutput { + s.LocalGatewayRouteTableVpcAssociation = v + return s +} + +type CreateManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // The IP address type. + // + // Valid Values: IPv4 | IPv6 + // + // AddressFamily is a required field + AddressFamily *string `type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Up to 255 UTF-8 characters in length. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more entries for the prefix list. + Entries []*AddPrefixListEntry `locationName:"Entry" type:"list"` + + // The maximum number of entries for the prefix list. + // + // MaxEntries is a required field + MaxEntries *int64 `type:"integer" required:"true"` + + // A name for the prefix list. + // + // Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws. + // + // PrefixListName is a required field + PrefixListName *string `type:"string" required:"true"` + + // The tags to apply to the prefix list during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateManagedPrefixListInput"} + if s.AddressFamily == nil { + invalidParams.Add(request.NewErrParamRequired("AddressFamily")) + } + if s.MaxEntries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxEntries")) + } + if s.PrefixListName == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListName")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *CreateManagedPrefixListInput) SetAddressFamily(v string) *CreateManagedPrefixListInput { + s.AddressFamily = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateManagedPrefixListInput) SetClientToken(v string) *CreateManagedPrefixListInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateManagedPrefixListInput) SetDryRun(v bool) *CreateManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *CreateManagedPrefixListInput) SetEntries(v []*AddPrefixListEntry) *CreateManagedPrefixListInput { + s.Entries = v + return s +} + +// SetMaxEntries sets the MaxEntries field's value. +func (s *CreateManagedPrefixListInput) SetMaxEntries(v int64) *CreateManagedPrefixListInput { + s.MaxEntries = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *CreateManagedPrefixListInput) SetPrefixListName(v string) *CreateManagedPrefixListInput { + s.PrefixListName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateManagedPrefixListInput) SetTagSpecifications(v []*TagSpecification) *CreateManagedPrefixListInput { + s.TagSpecifications = v + return s +} + +type CreateManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s CreateManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *CreateManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *CreateManagedPrefixListOutput { + s.PrefixList = v + return s +} + +type CreateNatGatewayInput struct { + _ struct{} `type:"structure"` + + // The allocation ID of an Elastic IP address to associate with the NAT gateway. + // If the Elastic IP address is associated with another resource, you must first + // disassociate it. + // + // AllocationId is a required field + AllocationId *string `type:"string" required:"true"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraint: Maximum 64 ASCII characters. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The subnet in which to create the NAT gateway. + // + // SubnetId is a required field + SubnetId *string `type:"string" required:"true"` + + // The tags to assign to the NAT gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateNatGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNatGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNatGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNatGatewayInput"} + if s.AllocationId == nil { + invalidParams.Add(request.NewErrParamRequired("AllocationId")) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllocationId sets the AllocationId field's value. +func (s *CreateNatGatewayInput) SetAllocationId(v string) *CreateNatGatewayInput { + s.AllocationId = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateNatGatewayInput) SetClientToken(v string) *CreateNatGatewayInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateNatGatewayInput) SetDryRun(v bool) *CreateNatGatewayInput { + s.DryRun = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *CreateNatGatewayInput) SetSubnetId(v string) *CreateNatGatewayInput { + s.SubnetId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNatGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateNatGatewayInput { + s.TagSpecifications = v + return s +} + +type CreateNatGatewayOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier to ensure the idempotency of the request. + // Only returned if a client token was provided in the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the NAT gateway. + NatGateway *NatGateway `locationName:"natGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateNatGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNatGatewayOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateNatGatewayOutput) SetClientToken(v string) *CreateNatGatewayOutput { + s.ClientToken = &v + return s +} + +// SetNatGateway sets the NatGateway field's value. +func (s *CreateNatGatewayOutput) SetNatGateway(v *NatGateway) *CreateNatGatewayOutput { + s.NatGateway = v + return s +} + +type CreateNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // The IPv4 network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether this is an egress rule (rule is applied to traffic leaving + // the subnet). + // + // Egress is a required field + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP or ICMPv6 type and code. Required if specifying protocol + // 1 (ICMP) or protocol 58 (ICMPv6) with an IPv6 CIDR block. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The IPv6 network range to allow or deny, in CIDR notation (for example 2001:db8:1234:1a00::/64). + Ipv6CidrBlock *string `locationName:"ipv6CidrBlock" type:"string"` + + // The ID of the network ACL. + // + // NetworkAclId is a required field + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. Required if + // specifying protocol 6 (TCP) or 17 (UDP). + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol number. A value of "-1" means all protocols. If you specify + // "-1" or a protocol number other than "6" (TCP), "17" (UDP), or "1" (ICMP), + // traffic on all ports is allowed, regardless of any ports or ICMP types or + // codes that you specify. If you specify protocol "58" (ICMPv6) and specify + // an IPv4 CIDR block, traffic for all ICMP types and codes allowed, regardless + // of any that you specify. If you specify protocol "58" (ICMPv6) and specify + // an IPv6 CIDR block, you must specify an ICMP type and code. + // + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + // + // RuleAction is a required field + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number for the entry (for example, 100). ACL entries are processed + // in ascending order by rule number. + // + // Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 is + // reserved for internal use. + // + // RuleNumber is a required field + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkAclEntryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkAclEntryInput"} + if s.Egress == nil { + invalidParams.Add(request.NewErrParamRequired("Egress")) + } + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.RuleAction == nil { + invalidParams.Add(request.NewErrParamRequired("RuleAction")) + } + if s.RuleNumber == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *CreateNetworkAclEntryInput) SetCidrBlock(v string) *CreateNetworkAclEntryInput { + s.CidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateNetworkAclEntryInput) SetDryRun(v bool) *CreateNetworkAclEntryInput { + s.DryRun = &v + return s +} + +// SetEgress sets the Egress field's value. +func (s *CreateNetworkAclEntryInput) SetEgress(v bool) *CreateNetworkAclEntryInput { + s.Egress = &v + return s +} + +// SetIcmpTypeCode sets the IcmpTypeCode field's value. +func (s *CreateNetworkAclEntryInput) SetIcmpTypeCode(v *IcmpTypeCode) *CreateNetworkAclEntryInput { + s.IcmpTypeCode = v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *CreateNetworkAclEntryInput) SetIpv6CidrBlock(v string) *CreateNetworkAclEntryInput { + s.Ipv6CidrBlock = &v + return s +} + +// SetNetworkAclId sets the NetworkAclId field's value. +func (s *CreateNetworkAclEntryInput) SetNetworkAclId(v string) *CreateNetworkAclEntryInput { + s.NetworkAclId = &v + return s +} + +// SetPortRange sets the PortRange field's value. +func (s *CreateNetworkAclEntryInput) SetPortRange(v *PortRange) *CreateNetworkAclEntryInput { + s.PortRange = v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *CreateNetworkAclEntryInput) SetProtocol(v string) *CreateNetworkAclEntryInput { + s.Protocol = &v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *CreateNetworkAclEntryInput) SetRuleAction(v string) *CreateNetworkAclEntryInput { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *CreateNetworkAclEntryInput) SetRuleNumber(v int64) *CreateNetworkAclEntryInput { + s.RuleNumber = &v + return s +} + +type CreateNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type CreateNetworkAclInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the network ACL. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkAclInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateNetworkAclInput) SetDryRun(v bool) *CreateNetworkAclInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNetworkAclInput) SetTagSpecifications(v []*TagSpecification) *CreateNetworkAclInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateNetworkAclInput) SetVpcId(v string) *CreateNetworkAclInput { + s.VpcId = &v + return s +} + +type CreateNetworkAclOutput struct { + _ struct{} `type:"structure"` + + // Information about the network ACL. + NetworkAcl *NetworkAcl `locationName:"networkAcl" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkAclOutput) GoString() string { + return s.String() +} + +// SetNetworkAcl sets the NetworkAcl field's value. +func (s *CreateNetworkAclOutput) SetNetworkAcl(v *NetworkAcl) *CreateNetworkAclOutput { + s.NetworkAcl = v + return s +} + +// Contains the parameters for CreateNetworkInterface. +type CreateNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // A description for the network interface. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // Indicates the type of network interface. To create an Elastic Fabric Adapter + // (EFA), specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // in the Amazon Elastic Compute Cloud User Guide. + InterfaceType *string `type:"string" enum:"NetworkInterfaceCreationType"` + + // The number of IPv6 addresses to assign to a network interface. Amazon EC2 + // automatically selects the IPv6 addresses from the subnet range. You can't + // use this option if specifying specific IPv6 addresses. If your subnet has + // the AssignIpv6AddressOnCreation attribute set to true, you can specify 0 + // to override this setting. + Ipv6AddressCount *int64 `locationName:"ipv6AddressCount" type:"integer"` + + // One or more specific IPv6 addresses from the IPv6 CIDR block range of your + // subnet. You can't use this option if you're specifying a number of IPv6 addresses. + Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6Addresses" locationNameList:"item" type:"list"` + + // The primary private IPv4 address of the network interface. If you don't specify + // an IPv4 address, Amazon EC2 selects one for you from the subnet's IPv4 CIDR + // range. If you specify an IP address, you cannot indicate any IP addresses + // specified in privateIpAddresses as primary (only one IP address can be designated + // as primary). + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IPv4 addresses. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IPv4 addresses to assign to a network interface. + // When you specify a number of secondary IPv4 addresses, Amazon EC2 selects + // these IP addresses within the subnet's IPv4 CIDR range. You can't specify + // this option and specify more than one private IP address using privateIpAddresses. + // + // The number of IP addresses you can assign to a network interface varies by + // instance type. For more information, see IP Addresses Per ENI Per Instance + // Type (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) + // in the Amazon Virtual Private Cloud User Guide. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet to associate with the network interface. + // + // SubnetId is a required field + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` + + // The tags to apply to the new network interface. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkInterfaceInput"} + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateNetworkInterfaceInput) SetDescription(v string) *CreateNetworkInterfaceInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateNetworkInterfaceInput) SetDryRun(v bool) *CreateNetworkInterfaceInput { + s.DryRun = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *CreateNetworkInterfaceInput) SetGroups(v []*string) *CreateNetworkInterfaceInput { + s.Groups = v + return s +} + +// SetInterfaceType sets the InterfaceType field's value. +func (s *CreateNetworkInterfaceInput) SetInterfaceType(v string) *CreateNetworkInterfaceInput { + s.InterfaceType = &v + return s +} + +// SetIpv6AddressCount sets the Ipv6AddressCount field's value. +func (s *CreateNetworkInterfaceInput) SetIpv6AddressCount(v int64) *CreateNetworkInterfaceInput { + s.Ipv6AddressCount = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *CreateNetworkInterfaceInput) SetIpv6Addresses(v []*InstanceIpv6Address) *CreateNetworkInterfaceInput { + s.Ipv6Addresses = v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *CreateNetworkInterfaceInput) SetPrivateIpAddress(v string) *CreateNetworkInterfaceInput { + s.PrivateIpAddress = &v + return s +} + +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value. +func (s *CreateNetworkInterfaceInput) SetPrivateIpAddresses(v []*PrivateIpAddressSpecification) *CreateNetworkInterfaceInput { + s.PrivateIpAddresses = v + return s +} + +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value. +func (s *CreateNetworkInterfaceInput) SetSecondaryPrivateIpAddressCount(v int64) *CreateNetworkInterfaceInput { + s.SecondaryPrivateIpAddressCount = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *CreateNetworkInterfaceInput) SetSubnetId(v string) *CreateNetworkInterfaceInput { + s.SubnetId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNetworkInterfaceInput) SetTagSpecifications(v []*TagSpecification) *CreateNetworkInterfaceInput { + s.TagSpecifications = v + return s +} + +// Contains the output of CreateNetworkInterface. +type CreateNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` + + // Information about the network interface. + NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfaceOutput) GoString() string { + return s.String() +} + +// SetNetworkInterface sets the NetworkInterface field's value. +func (s *CreateNetworkInterfaceOutput) SetNetworkInterface(v *NetworkInterface) *CreateNetworkInterfaceOutput { + s.NetworkInterface = v + return s +} + +// Contains the parameters for CreateNetworkInterfacePermission. +type CreateNetworkInterfacePermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + AwsAccountId *string `type:"string"` + + // The AWS service. Currently not supported. + AwsService *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `type:"string" required:"true"` + + // The type of permission to grant. + // + // Permission is a required field + Permission *string `type:"string" required:"true" enum:"InterfacePermissionType"` +} + +// String returns the string representation +func (s CreateNetworkInterfacePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfacePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkInterfacePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkInterfacePermissionInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.Permission == nil { + invalidParams.Add(request.NewErrParamRequired("Permission")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateNetworkInterfacePermissionInput) SetAwsAccountId(v string) *CreateNetworkInterfacePermissionInput { + s.AwsAccountId = &v + return s +} + +// SetAwsService sets the AwsService field's value. +func (s *CreateNetworkInterfacePermissionInput) SetAwsService(v string) *CreateNetworkInterfacePermissionInput { + s.AwsService = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateNetworkInterfacePermissionInput) SetDryRun(v bool) *CreateNetworkInterfacePermissionInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *CreateNetworkInterfacePermissionInput) SetNetworkInterfaceId(v string) *CreateNetworkInterfacePermissionInput { + s.NetworkInterfaceId = &v + return s +} + +// SetPermission sets the Permission field's value. +func (s *CreateNetworkInterfacePermissionInput) SetPermission(v string) *CreateNetworkInterfacePermissionInput { + s.Permission = &v + return s +} + +// Contains the output of CreateNetworkInterfacePermission. +type CreateNetworkInterfacePermissionOutput struct { + _ struct{} `type:"structure"` + + // Information about the permission for the network interface. + InterfacePermission *NetworkInterfacePermission `locationName:"interfacePermission" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfacePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfacePermissionOutput) GoString() string { + return s.String() +} + +// SetInterfacePermission sets the InterfacePermission field's value. +func (s *CreateNetworkInterfacePermissionOutput) SetInterfacePermission(v *NetworkInterfacePermission) *CreateNetworkInterfacePermissionOutput { + s.InterfacePermission = v + return s +} + +type CreatePlacementGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A name for the placement group. Must be unique within the scope of your account + // for the Region. + // + // Constraints: Up to 255 ASCII characters + GroupName *string `locationName:"groupName" type:"string"` + + // The number of partitions. Valid only when Strategy is set to partition. + PartitionCount *int64 `type:"integer"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"` + + // The tags to apply to the new placement group. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreatePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *CreatePlacementGroupInput) SetDryRun(v bool) *CreatePlacementGroupInput { + s.DryRun = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *CreatePlacementGroupInput) SetGroupName(v string) *CreatePlacementGroupInput { + s.GroupName = &v + return s +} + +// SetPartitionCount sets the PartitionCount field's value. +func (s *CreatePlacementGroupInput) SetPartitionCount(v int64) *CreatePlacementGroupInput { + s.PartitionCount = &v + return s +} + +// SetStrategy sets the Strategy field's value. +func (s *CreatePlacementGroupInput) SetStrategy(v string) *CreatePlacementGroupInput { + s.Strategy = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreatePlacementGroupInput) SetTagSpecifications(v []*TagSpecification) *CreatePlacementGroupInput { + s.TagSpecifications = v + return s +} + +type CreatePlacementGroupOutput struct { + _ struct{} `type:"structure"` + + // Describes a placement group. + PlacementGroup *PlacementGroup `locationName:"placementGroup" type:"structure"` +} + +// String returns the string representation +func (s CreatePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePlacementGroupOutput) GoString() string { + return s.String() +} + +// SetPlacementGroup sets the PlacementGroup field's value. +func (s *CreatePlacementGroupOutput) SetPlacementGroup(v *PlacementGroup) *CreatePlacementGroupOutput { + s.PlacementGroup = v + return s +} + +// Contains the parameters for CreateReservedInstancesListing. +type CreateReservedInstancesListingInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of your + // listings. This helps avoid duplicate listings. For more information, see + // Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // ClientToken is a required field + ClientToken *string `locationName:"clientToken" type:"string" required:"true"` + + // The number of instances that are a part of a Reserved Instance account to + // be listed in the Reserved Instance Marketplace. This number should be less + // than or equal to the instance count associated with the Reserved Instance + // ID specified in this call. + // + // InstanceCount is a required field + InstanceCount *int64 `locationName:"instanceCount" type:"integer" required:"true"` + + // A list specifying the price of the Standard Reserved Instance for each month + // remaining in the Reserved Instance term. + // + // PriceSchedules is a required field + PriceSchedules []*PriceScheduleSpecification `locationName:"priceSchedules" locationNameList:"item" type:"list" required:"true"` + + // The ID of the active Standard Reserved Instance. + // + // ReservedInstancesId is a required field + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReservedInstancesListingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReservedInstancesListingInput"} + if s.ClientToken == nil { + invalidParams.Add(request.NewErrParamRequired("ClientToken")) + } + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.PriceSchedules == nil { + invalidParams.Add(request.NewErrParamRequired("PriceSchedules")) + } + if s.ReservedInstancesId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateReservedInstancesListingInput) SetClientToken(v string) *CreateReservedInstancesListingInput { + s.ClientToken = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *CreateReservedInstancesListingInput) SetInstanceCount(v int64) *CreateReservedInstancesListingInput { + s.InstanceCount = &v + return s +} + +// SetPriceSchedules sets the PriceSchedules field's value. +func (s *CreateReservedInstancesListingInput) SetPriceSchedules(v []*PriceScheduleSpecification) *CreateReservedInstancesListingInput { + s.PriceSchedules = v + return s +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *CreateReservedInstancesListingInput) SetReservedInstancesId(v string) *CreateReservedInstancesListingInput { + s.ReservedInstancesId = &v + return s +} + +// Contains the output of CreateReservedInstancesListing. +type CreateReservedInstancesListingOutput struct { + _ struct{} `type:"structure"` + + // Information about the Standard Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateReservedInstancesListingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReservedInstancesListingOutput) GoString() string { + return s.String() +} + +// SetReservedInstancesListings sets the ReservedInstancesListings field's value. +func (s *CreateReservedInstancesListingOutput) SetReservedInstancesListings(v []*ReservedInstancesListing) *CreateReservedInstancesListingOutput { + s.ReservedInstancesListings = v + return s +} + +type CreateRouteInput struct { + _ struct{} `type:"structure"` + + // The ID of the carrier gateway. + // + // You can only use this option when the VPC contains a subnet which is associated + // with a Wavelength Zone. + CarrierGatewayId *string `type:"string"` + + // The IPv4 CIDR address block used for the destination match. Routing decisions + // are based on the most specific match. We modify the specified CIDR block + // to its canonical form; for example, if you specify 100.68.0.18/18, we modify + // it to 100.68.0.0/18. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The IPv6 CIDR block used for the destination match. Routing decisions are + // based on the most specific match. + DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + + // The ID of a prefix list used for the destination match. + DestinationPrefixListId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [IPv6 traffic only] The ID of an egress-only internet gateway. + EgressOnlyInternetGatewayId *string `locationName:"egressOnlyInternetGatewayId" type:"string"` + + // The ID of an internet gateway or virtual private gateway attached to your + // VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. The operation fails if you specify + // an instance ID unless exactly one network interface is attached. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of the local gateway. + LocalGatewayId *string `type:"string"` + + // [IPv4 traffic only] The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table for the route. + // + // RouteTableId is a required field + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a transit gateway. + TransitGatewayId *string `type:"string"` + + // The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only. + VpcEndpointId *string `type:"string"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s CreateRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRouteInput"} + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *CreateRouteInput) SetCarrierGatewayId(v string) *CreateRouteInput { + s.CarrierGatewayId = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *CreateRouteInput) SetDestinationCidrBlock(v string) *CreateRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationIpv6CidrBlock sets the DestinationIpv6CidrBlock field's value. +func (s *CreateRouteInput) SetDestinationIpv6CidrBlock(v string) *CreateRouteInput { + s.DestinationIpv6CidrBlock = &v + return s +} + +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *CreateRouteInput) SetDestinationPrefixListId(v string) *CreateRouteInput { + s.DestinationPrefixListId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateRouteInput) SetDryRun(v bool) *CreateRouteInput { + s.DryRun = &v + return s +} + +// SetEgressOnlyInternetGatewayId sets the EgressOnlyInternetGatewayId field's value. +func (s *CreateRouteInput) SetEgressOnlyInternetGatewayId(v string) *CreateRouteInput { + s.EgressOnlyInternetGatewayId = &v + return s +} + +// SetGatewayId sets the GatewayId field's value. +func (s *CreateRouteInput) SetGatewayId(v string) *CreateRouteInput { + s.GatewayId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *CreateRouteInput) SetInstanceId(v string) *CreateRouteInput { + s.InstanceId = &v + return s +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *CreateRouteInput) SetLocalGatewayId(v string) *CreateRouteInput { + s.LocalGatewayId = &v + return s +} + +// SetNatGatewayId sets the NatGatewayId field's value. +func (s *CreateRouteInput) SetNatGatewayId(v string) *CreateRouteInput { + s.NatGatewayId = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *CreateRouteInput) SetNetworkInterfaceId(v string) *CreateRouteInput { + s.NetworkInterfaceId = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *CreateRouteInput) SetRouteTableId(v string) *CreateRouteInput { + s.RouteTableId = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *CreateRouteInput) SetTransitGatewayId(v string) *CreateRouteInput { + s.TransitGatewayId = &v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *CreateRouteInput) SetVpcEndpointId(v string) *CreateRouteInput { + s.VpcEndpointId = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *CreateRouteInput) SetVpcPeeringConnectionId(v string) *CreateRouteInput { + s.VpcPeeringConnectionId = &v + return s +} + +type CreateRouteOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s CreateRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *CreateRouteOutput) SetReturn(v bool) *CreateRouteOutput { + s.Return = &v + return s +} + +type CreateRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the route table. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRouteTableInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateRouteTableInput) SetDryRun(v bool) *CreateRouteTableInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateRouteTableInput) SetTagSpecifications(v []*TagSpecification) *CreateRouteTableInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateRouteTableInput) SetVpcId(v string) *CreateRouteTableInput { + s.VpcId = &v + return s +} + +type CreateRouteTableOutput struct { + _ struct{} `type:"structure"` + + // Information about the route table. + RouteTable *RouteTable `locationName:"routeTable" type:"structure"` +} + +// String returns the string representation +func (s CreateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRouteTableOutput) GoString() string { + return s.String() +} + +// SetRouteTable sets the RouteTable field's value. +func (s *CreateRouteTableOutput) SetRouteTable(v *RouteTable) *CreateRouteTableOutput { + s.RouteTable = v + return s +} + +type CreateSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the security group. This is informational only. + // + // Constraints: Up to 255 characters in length + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + // + // Description is a required field + Description *string `locationName:"GroupDescription" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the security group. + // + // Constraints: Up to 255 characters in length. Cannot start with sg-. + // + // Constraints for EC2-Classic: ASCII characters + // + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + // + // GroupName is a required field + GroupName *string `type:"string" required:"true"` + + // The tags to assign to the security group. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CreateSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSecurityGroupInput"} + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateSecurityGroupInput) SetDescription(v string) *CreateSecurityGroupInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateSecurityGroupInput) SetDryRun(v bool) *CreateSecurityGroupInput { + s.DryRun = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *CreateSecurityGroupInput) SetGroupName(v string) *CreateSecurityGroupInput { + s.GroupName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSecurityGroupInput) SetTagSpecifications(v []*TagSpecification) *CreateSecurityGroupInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateSecurityGroupInput) SetVpcId(v string) *CreateSecurityGroupInput { + s.VpcId = &v + return s +} + +type CreateSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecurityGroupOutput) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *CreateSecurityGroupOutput) SetGroupId(v string) *CreateSecurityGroupOutput { + s.GroupId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateSecurityGroupOutput) SetTags(v []*Tag) *CreateSecurityGroupOutput { + s.Tags = v + return s +} + +type CreateSnapshotInput struct { + _ struct{} `type:"structure"` + + // A description for the snapshot. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to apply to the snapshot during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the EBS volume. + // + // VolumeId is a required field + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateSnapshotInput) SetDescription(v string) *CreateSnapshotInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateSnapshotInput) SetDryRun(v bool) *CreateSnapshotInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSnapshotInput) SetTagSpecifications(v []*TagSpecification) *CreateSnapshotInput { + s.TagSpecifications = v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *CreateSnapshotInput) SetVolumeId(v string) *CreateSnapshotInput { + s.VolumeId = &v + return s +} + +type CreateSnapshotsInput struct { + _ struct{} `type:"structure"` + + // Copies the tags from the specified volume to corresponding snapshot. + CopyTagsFromSource *string `type:"string" enum:"CopyTagsFromSource"` + + // A description propagated to every snapshot specified by the instance. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance to specify which volumes should be included in the snapshots. + // + // InstanceSpecification is a required field + InstanceSpecification *InstanceSpecification `type:"structure" required:"true"` + + // Tags to apply to every snapshot specified by the instance. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotsInput"} + if s.InstanceSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceSpecification")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCopyTagsFromSource sets the CopyTagsFromSource field's value. +func (s *CreateSnapshotsInput) SetCopyTagsFromSource(v string) *CreateSnapshotsInput { + s.CopyTagsFromSource = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateSnapshotsInput) SetDescription(v string) *CreateSnapshotsInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateSnapshotsInput) SetDryRun(v bool) *CreateSnapshotsInput { + s.DryRun = &v + return s +} + +// SetInstanceSpecification sets the InstanceSpecification field's value. +func (s *CreateSnapshotsInput) SetInstanceSpecification(v *InstanceSpecification) *CreateSnapshotsInput { + s.InstanceSpecification = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSnapshotsInput) SetTagSpecifications(v []*TagSpecification) *CreateSnapshotsInput { + s.TagSpecifications = v + return s +} + +type CreateSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // List of snapshots. + Snapshots []*SnapshotInfo `locationName:"snapshotSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotsOutput) GoString() string { + return s.String() +} + +// SetSnapshots sets the Snapshots field's value. +func (s *CreateSnapshotsOutput) SetSnapshots(v []*SnapshotInfo) *CreateSnapshotsOutput { + s.Snapshots = v + return s +} + +// Contains the parameters for CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket in which to store the Spot Instance data + // feed. For more information about bucket names, see Rules for bucket naming + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules) + // in the Amazon S3 Developer Guide. + // + // Bucket is a required field + Bucket *string `locationName:"bucket" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The prefix for the data feed file names. + Prefix *string `locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSpotDatafeedSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSpotDatafeedSubscriptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CreateSpotDatafeedSubscriptionInput) SetBucket(v string) *CreateSpotDatafeedSubscriptionInput { + s.Bucket = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateSpotDatafeedSubscriptionInput) SetDryRun(v bool) *CreateSpotDatafeedSubscriptionInput { + s.DryRun = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *CreateSpotDatafeedSubscriptionInput) SetPrefix(v string) *CreateSpotDatafeedSubscriptionInput { + s.Prefix = &v + return s +} + +// Contains the output of CreateSpotDatafeedSubscription. +type CreateSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Spot Instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` +} + +// String returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +// SetSpotDatafeedSubscription sets the SpotDatafeedSubscription field's value. +func (s *CreateSpotDatafeedSubscriptionOutput) SetSpotDatafeedSubscription(v *SpotDatafeedSubscription) *CreateSpotDatafeedSubscriptionOutput { + s.SpotDatafeedSubscription = v + return s +} + +type CreateSubnetInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone or Local Zone for the subnet. + // + // Default: AWS selects one for you. If you create more than one subnet in your + // VPC, we do not necessarily select a different zone for each subnet. + // + // To create a subnet in a Local Zone, set this value to the Local Zone ID, + // for example us-west-2-lax-1a. For information about the Regions that support + // Local Zones, see Available Regions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) + // in the Amazon Elastic Compute Cloud User Guide. + // + // To create a subnet in an Outpost, set this value to the Availability Zone + // for the Outpost and specify the Outpost ARN. + AvailabilityZone *string `type:"string"` + + // The AZ ID or the Local Zone ID of the subnet. + AvailabilityZoneId *string `type:"string"` + + // The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. + // + // CidrBlock is a required field + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IPv6 network range for the subnet, in CIDR notation. The subnet size + // must use a /64 prefix length. + Ipv6CidrBlock *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost + // ARN, you must also specify the Availability Zone of the Outpost subnet. + OutpostArn *string `type:"string"` + + // The tags to assign to the subnet. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSubnetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSubnetInput"} + if s.CidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("CidrBlock")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateSubnetInput) SetAvailabilityZone(v string) *CreateSubnetInput { + s.AvailabilityZone = &v + return s +} + +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *CreateSubnetInput) SetAvailabilityZoneId(v string) *CreateSubnetInput { + s.AvailabilityZoneId = &v + return s +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *CreateSubnetInput) SetCidrBlock(v string) *CreateSubnetInput { + s.CidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateSubnetInput) SetDryRun(v bool) *CreateSubnetInput { + s.DryRun = &v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *CreateSubnetInput) SetIpv6CidrBlock(v string) *CreateSubnetInput { + s.Ipv6CidrBlock = &v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *CreateSubnetInput) SetOutpostArn(v string) *CreateSubnetInput { + s.OutpostArn = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSubnetInput) SetTagSpecifications(v []*TagSpecification) *CreateSubnetInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateSubnetInput) SetVpcId(v string) *CreateSubnetInput { + s.VpcId = &v + return s +} + +type CreateSubnetOutput struct { + _ struct{} `type:"structure"` + + // Information about the subnet. + Subnet *Subnet `locationName:"subnet" type:"structure"` +} + +// String returns the string representation +func (s CreateSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubnetOutput) GoString() string { + return s.String() +} + +// SetSubnet sets the Subnet field's value. +func (s *CreateSubnetOutput) SetSubnet(v *Subnet) *CreateSubnetOutput { + s.Subnet = v + return s +} + +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the resources, separated by spaces. + // + // Constraints: Up to 1000 resource IDs. We recommend breaking up this request + // into smaller batches. + // + // Resources is a required field + Resources []*string `locationName:"ResourceId" type:"list" required:"true"` + + // The tags. The value parameter is required, but if you don't want the tag + // to have a value, specify the parameter with no value, and we set the value + // to an empty string. + // + // Tags is a required field + Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"} + if s.Resources == nil { + invalidParams.Add(request.NewErrParamRequired("Resources")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTagsInput) SetDryRun(v bool) *CreateTagsInput { + s.DryRun = &v + return s +} + +// SetResources sets the Resources field's value. +func (s *CreateTagsInput) SetResources(v []*string) *CreateTagsInput { + s.Resources = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTagsInput) SetTags(v []*Tag) *CreateTagsInput { + s.Tags = v + return s +} + +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +type CreateTrafficMirrorFilterInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The description of the Traffic Mirror filter. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags to assign to a Traffic Mirror filter. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateTrafficMirrorFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorFilterInput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorFilterInput) SetClientToken(v string) *CreateTrafficMirrorFilterInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateTrafficMirrorFilterInput) SetDescription(v string) *CreateTrafficMirrorFilterInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTrafficMirrorFilterInput) SetDryRun(v bool) *CreateTrafficMirrorFilterInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTrafficMirrorFilterInput) SetTagSpecifications(v []*TagSpecification) *CreateTrafficMirrorFilterInput { + s.TagSpecifications = v + return s +} + +type CreateTrafficMirrorFilterOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the Traffic Mirror filter. + TrafficMirrorFilter *TrafficMirrorFilter `locationName:"trafficMirrorFilter" type:"structure"` +} + +// String returns the string representation +func (s CreateTrafficMirrorFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorFilterOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorFilterOutput) SetClientToken(v string) *CreateTrafficMirrorFilterOutput { + s.ClientToken = &v + return s +} + +// SetTrafficMirrorFilter sets the TrafficMirrorFilter field's value. +func (s *CreateTrafficMirrorFilterOutput) SetTrafficMirrorFilter(v *TrafficMirrorFilter) *CreateTrafficMirrorFilterOutput { + s.TrafficMirrorFilter = v + return s +} + +type CreateTrafficMirrorFilterRuleInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The description of the Traffic Mirror rule. + Description *string `type:"string"` + + // The destination CIDR block to assign to the Traffic Mirror rule. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // The destination port range. + DestinationPortRange *TrafficMirrorPortRangeRequest `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The protocol, for example UDP, to assign to the Traffic Mirror rule. + // + // For information about the protocol value, see Protocol Numbers (https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + // on the Internet Assigned Numbers Authority (IANA) website. + Protocol *int64 `type:"integer"` + + // The action to take (accept | reject) on the filtered traffic. + // + // RuleAction is a required field + RuleAction *string `type:"string" required:"true" enum:"TrafficMirrorRuleAction"` + + // The number of the Traffic Mirror rule. This number must be unique for each + // Traffic Mirror rule in a given direction. The rules are processed in ascending + // order by rule number. + // + // RuleNumber is a required field + RuleNumber *int64 `type:"integer" required:"true"` + + // The source CIDR block to assign to the Traffic Mirror rule. + // + // SourceCidrBlock is a required field + SourceCidrBlock *string `type:"string" required:"true"` + + // The source port range. + SourcePortRange *TrafficMirrorPortRangeRequest `type:"structure"` + + // The type of traffic (ingress | egress). + // + // TrafficDirection is a required field + TrafficDirection *string `type:"string" required:"true" enum:"TrafficDirection"` + + // The ID of the filter that this rule is associated with. + // + // TrafficMirrorFilterId is a required field + TrafficMirrorFilterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTrafficMirrorFilterRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorFilterRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficMirrorFilterRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficMirrorFilterRuleInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.RuleAction == nil { + invalidParams.Add(request.NewErrParamRequired("RuleAction")) + } + if s.RuleNumber == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNumber")) + } + if s.SourceCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("SourceCidrBlock")) + } + if s.TrafficDirection == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficDirection")) + } + if s.TrafficMirrorFilterId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetClientToken(v string) *CreateTrafficMirrorFilterRuleInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetDescription(v string) *CreateTrafficMirrorFilterRuleInput { + s.Description = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetDestinationCidrBlock(v string) *CreateTrafficMirrorFilterRuleInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationPortRange sets the DestinationPortRange field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetDestinationPortRange(v *TrafficMirrorPortRangeRequest) *CreateTrafficMirrorFilterRuleInput { + s.DestinationPortRange = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetDryRun(v bool) *CreateTrafficMirrorFilterRuleInput { + s.DryRun = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetProtocol(v int64) *CreateTrafficMirrorFilterRuleInput { + s.Protocol = &v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetRuleAction(v string) *CreateTrafficMirrorFilterRuleInput { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetRuleNumber(v int64) *CreateTrafficMirrorFilterRuleInput { + s.RuleNumber = &v + return s +} + +// SetSourceCidrBlock sets the SourceCidrBlock field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetSourceCidrBlock(v string) *CreateTrafficMirrorFilterRuleInput { + s.SourceCidrBlock = &v + return s +} + +// SetSourcePortRange sets the SourcePortRange field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetSourcePortRange(v *TrafficMirrorPortRangeRequest) *CreateTrafficMirrorFilterRuleInput { + s.SourcePortRange = v + return s +} + +// SetTrafficDirection sets the TrafficDirection field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetTrafficDirection(v string) *CreateTrafficMirrorFilterRuleInput { + s.TrafficDirection = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetTrafficMirrorFilterId(v string) *CreateTrafficMirrorFilterRuleInput { + s.TrafficMirrorFilterId = &v + return s +} + +type CreateTrafficMirrorFilterRuleOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The Traffic Mirror rule. + TrafficMirrorFilterRule *TrafficMirrorFilterRule `locationName:"trafficMirrorFilterRule" type:"structure"` +} + +// String returns the string representation +func (s CreateTrafficMirrorFilterRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorFilterRuleOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorFilterRuleOutput) SetClientToken(v string) *CreateTrafficMirrorFilterRuleOutput { + s.ClientToken = &v + return s +} + +// SetTrafficMirrorFilterRule sets the TrafficMirrorFilterRule field's value. +func (s *CreateTrafficMirrorFilterRuleOutput) SetTrafficMirrorFilterRule(v *TrafficMirrorFilterRule) *CreateTrafficMirrorFilterRuleOutput { + s.TrafficMirrorFilterRule = v + return s +} + +type CreateTrafficMirrorSessionInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The description of the Traffic Mirror session. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the source network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `type:"string" required:"true"` + + // The number of bytes in each packet to mirror. These are bytes after the VXLAN + // header. Do not specify this parameter when you want to mirror the entire + // packet. To mirror a subset of the packet, set this to the length (in bytes) + // that you want to mirror. For example, if you set this value to 100, then + // the first 100 bytes that meet the filter criteria are copied to the target. + // + // If you do not want to mirror the entire packet, use the PacketLength parameter + // to specify the number of bytes in each packet to mirror. + PacketLength *int64 `type:"integer"` + + // The session number determines the order in which sessions are evaluated when + // an interface is used by multiple sessions. The first session with a matching + // filter is the one that mirrors the packets. + // + // Valid values are 1-32766. + // + // SessionNumber is a required field + SessionNumber *int64 `type:"integer" required:"true"` + + // The tags to assign to a Traffic Mirror session. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror filter. + // + // TrafficMirrorFilterId is a required field + TrafficMirrorFilterId *string `type:"string" required:"true"` + + // The ID of the Traffic Mirror target. + // + // TrafficMirrorTargetId is a required field + TrafficMirrorTargetId *string `type:"string" required:"true"` + + // The VXLAN ID for the Traffic Mirror session. For more information about the + // VXLAN protocol, see RFC 7348 (https://tools.ietf.org/html/rfc7348). If you + // do not specify a VirtualNetworkId, an account-wide unique id is chosen at + // random. + VirtualNetworkId *int64 `type:"integer"` +} + +// String returns the string representation +func (s CreateTrafficMirrorSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTrafficMirrorSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTrafficMirrorSessionInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.SessionNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SessionNumber")) + } + if s.TrafficMirrorFilterId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterId")) + } + if s.TrafficMirrorTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorTargetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorSessionInput) SetClientToken(v string) *CreateTrafficMirrorSessionInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateTrafficMirrorSessionInput) SetDescription(v string) *CreateTrafficMirrorSessionInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTrafficMirrorSessionInput) SetDryRun(v bool) *CreateTrafficMirrorSessionInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *CreateTrafficMirrorSessionInput) SetNetworkInterfaceId(v string) *CreateTrafficMirrorSessionInput { + s.NetworkInterfaceId = &v + return s +} + +// SetPacketLength sets the PacketLength field's value. +func (s *CreateTrafficMirrorSessionInput) SetPacketLength(v int64) *CreateTrafficMirrorSessionInput { + s.PacketLength = &v + return s +} + +// SetSessionNumber sets the SessionNumber field's value. +func (s *CreateTrafficMirrorSessionInput) SetSessionNumber(v int64) *CreateTrafficMirrorSessionInput { + s.SessionNumber = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTrafficMirrorSessionInput) SetTagSpecifications(v []*TagSpecification) *CreateTrafficMirrorSessionInput { + s.TagSpecifications = v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *CreateTrafficMirrorSessionInput) SetTrafficMirrorFilterId(v string) *CreateTrafficMirrorSessionInput { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *CreateTrafficMirrorSessionInput) SetTrafficMirrorTargetId(v string) *CreateTrafficMirrorSessionInput { + s.TrafficMirrorTargetId = &v + return s +} + +// SetVirtualNetworkId sets the VirtualNetworkId field's value. +func (s *CreateTrafficMirrorSessionInput) SetVirtualNetworkId(v int64) *CreateTrafficMirrorSessionInput { + s.VirtualNetworkId = &v + return s +} + +type CreateTrafficMirrorSessionOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the Traffic Mirror session. + TrafficMirrorSession *TrafficMirrorSession `locationName:"trafficMirrorSession" type:"structure"` +} + +// String returns the string representation +func (s CreateTrafficMirrorSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorSessionOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorSessionOutput) SetClientToken(v string) *CreateTrafficMirrorSessionOutput { + s.ClientToken = &v + return s +} + +// SetTrafficMirrorSession sets the TrafficMirrorSession field's value. +func (s *CreateTrafficMirrorSessionOutput) SetTrafficMirrorSession(v *TrafficMirrorSession) *CreateTrafficMirrorSessionOutput { + s.TrafficMirrorSession = v + return s +} + +type CreateTrafficMirrorTargetInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The description of the Traffic Mirror target. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The network interface ID that is associated with the target. + NetworkInterfaceId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Network Load Balancer that is associated + // with the target. + NetworkLoadBalancerArn *string `type:"string"` + + // The tags to assign to the Traffic Mirror target. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateTrafficMirrorTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorTargetInput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorTargetInput) SetClientToken(v string) *CreateTrafficMirrorTargetInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateTrafficMirrorTargetInput) SetDescription(v string) *CreateTrafficMirrorTargetInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTrafficMirrorTargetInput) SetDryRun(v bool) *CreateTrafficMirrorTargetInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *CreateTrafficMirrorTargetInput) SetNetworkInterfaceId(v string) *CreateTrafficMirrorTargetInput { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkLoadBalancerArn sets the NetworkLoadBalancerArn field's value. +func (s *CreateTrafficMirrorTargetInput) SetNetworkLoadBalancerArn(v string) *CreateTrafficMirrorTargetInput { + s.NetworkLoadBalancerArn = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTrafficMirrorTargetInput) SetTagSpecifications(v []*TagSpecification) *CreateTrafficMirrorTargetInput { + s.TagSpecifications = v + return s +} + +type CreateTrafficMirrorTargetOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the Traffic Mirror target. + TrafficMirrorTarget *TrafficMirrorTarget `locationName:"trafficMirrorTarget" type:"structure"` +} + +// String returns the string representation +func (s CreateTrafficMirrorTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTrafficMirrorTargetOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateTrafficMirrorTargetOutput) SetClientToken(v string) *CreateTrafficMirrorTargetOutput { + s.ClientToken = &v + return s +} + +// SetTrafficMirrorTarget sets the TrafficMirrorTarget field's value. +func (s *CreateTrafficMirrorTargetOutput) SetTrafficMirrorTarget(v *TrafficMirrorTarget) *CreateTrafficMirrorTargetOutput { + s.TrafficMirrorTarget = v + return s +} + +type CreateTransitGatewayInput struct { + _ struct{} `type:"structure"` + + // A description of the transit gateway. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The transit gateway options. + Options *TransitGatewayRequestOptions `type:"structure"` + + // The tags to apply to the transit gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateTransitGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayInput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *CreateTransitGatewayInput) SetDescription(v string) *CreateTransitGatewayInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayInput) SetDryRun(v bool) *CreateTransitGatewayInput { + s.DryRun = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *CreateTransitGatewayInput) SetOptions(v *TransitGatewayRequestOptions) *CreateTransitGatewayInput { + s.Options = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTransitGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateTransitGatewayInput { + s.TagSpecifications = v + return s +} + +type CreateTransitGatewayMulticastDomainInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags for the transit gateway multicast domain. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the transit gateway. + // + // TransitGatewayId is a required field + TransitGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTransitGatewayMulticastDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayMulticastDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTransitGatewayMulticastDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTransitGatewayMulticastDomainInput"} + if s.TransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayMulticastDomainInput) SetDryRun(v bool) *CreateTransitGatewayMulticastDomainInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTransitGatewayMulticastDomainInput) SetTagSpecifications(v []*TagSpecification) *CreateTransitGatewayMulticastDomainInput { + s.TagSpecifications = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *CreateTransitGatewayMulticastDomainInput) SetTransitGatewayId(v string) *CreateTransitGatewayMulticastDomainInput { + s.TransitGatewayId = &v + return s +} + +type CreateTransitGatewayMulticastDomainOutput struct { + _ struct{} `type:"structure"` + + // Information about the transit gateway multicast domain. + TransitGatewayMulticastDomain *TransitGatewayMulticastDomain `locationName:"transitGatewayMulticastDomain" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayMulticastDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayMulticastDomainOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayMulticastDomain sets the TransitGatewayMulticastDomain field's value. +func (s *CreateTransitGatewayMulticastDomainOutput) SetTransitGatewayMulticastDomain(v *TransitGatewayMulticastDomain) *CreateTransitGatewayMulticastDomainOutput { + s.TransitGatewayMulticastDomain = v + return s +} + +type CreateTransitGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the transit gateway. + TransitGateway *TransitGateway `locationName:"transitGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayOutput) GoString() string { + return s.String() +} + +// SetTransitGateway sets the TransitGateway field's value. +func (s *CreateTransitGatewayOutput) SetTransitGateway(v *TransitGateway) *CreateTransitGatewayOutput { + s.TransitGateway = v + return s +} + +type CreateTransitGatewayPeeringAttachmentInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The AWS account ID of the owner of the peer transit gateway. + // + // PeerAccountId is a required field + PeerAccountId *string `type:"string" required:"true"` + + // The Region where the peer transit gateway is located. + // + // PeerRegion is a required field + PeerRegion *string `type:"string" required:"true"` + + // The ID of the peer transit gateway with which to create the peering attachment. + // + // PeerTransitGatewayId is a required field + PeerTransitGatewayId *string `type:"string" required:"true"` + + // The tags to apply to the transit gateway peering attachment. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the transit gateway. + // + // TransitGatewayId is a required field + TransitGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTransitGatewayPeeringAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayPeeringAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTransitGatewayPeeringAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTransitGatewayPeeringAttachmentInput"} + if s.PeerAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("PeerAccountId")) + } + if s.PeerRegion == nil { + invalidParams.Add(request.NewErrParamRequired("PeerRegion")) + } + if s.PeerTransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("PeerTransitGatewayId")) + } + if s.TransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayPeeringAttachmentInput) SetDryRun(v bool) *CreateTransitGatewayPeeringAttachmentInput { + s.DryRun = &v + return s +} + +// SetPeerAccountId sets the PeerAccountId field's value. +func (s *CreateTransitGatewayPeeringAttachmentInput) SetPeerAccountId(v string) *CreateTransitGatewayPeeringAttachmentInput { + s.PeerAccountId = &v + return s +} + +// SetPeerRegion sets the PeerRegion field's value. +func (s *CreateTransitGatewayPeeringAttachmentInput) SetPeerRegion(v string) *CreateTransitGatewayPeeringAttachmentInput { + s.PeerRegion = &v + return s +} + +// SetPeerTransitGatewayId sets the PeerTransitGatewayId field's value. +func (s *CreateTransitGatewayPeeringAttachmentInput) SetPeerTransitGatewayId(v string) *CreateTransitGatewayPeeringAttachmentInput { + s.PeerTransitGatewayId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTransitGatewayPeeringAttachmentInput) SetTagSpecifications(v []*TagSpecification) *CreateTransitGatewayPeeringAttachmentInput { + s.TagSpecifications = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *CreateTransitGatewayPeeringAttachmentInput) SetTransitGatewayId(v string) *CreateTransitGatewayPeeringAttachmentInput { + s.TransitGatewayId = &v + return s +} + +type CreateTransitGatewayPeeringAttachmentOutput struct { + _ struct{} `type:"structure"` + + // The transit gateway peering attachment. + TransitGatewayPeeringAttachment *TransitGatewayPeeringAttachment `locationName:"transitGatewayPeeringAttachment" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayPeeringAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayPeeringAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPeeringAttachment sets the TransitGatewayPeeringAttachment field's value. +func (s *CreateTransitGatewayPeeringAttachmentOutput) SetTransitGatewayPeeringAttachment(v *TransitGatewayPeeringAttachment) *CreateTransitGatewayPeeringAttachmentOutput { + s.TransitGatewayPeeringAttachment = v + return s +} + +type CreateTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to drop traffic that matches this route. + Blackhole *bool `type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list that is used for destination matches. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the attachment to which traffic is routed. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlackhole sets the Blackhole field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetBlackhole(v bool) *CreateTransitGatewayPrefixListReferenceInput { + s.Blackhole = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *CreateTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetTransitGatewayAttachmentId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type CreateTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *CreateTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *CreateTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + +type CreateTransitGatewayRouteInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to drop traffic that matches this route. + Blackhole *bool `type:"boolean"` + + // The CIDR range used for destination matches. Routing decisions are based + // on the most specific match. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTransitGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTransitGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTransitGatewayRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlackhole sets the Blackhole field's value. +func (s *CreateTransitGatewayRouteInput) SetBlackhole(v bool) *CreateTransitGatewayRouteInput { + s.Blackhole = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *CreateTransitGatewayRouteInput) SetDestinationCidrBlock(v string) *CreateTransitGatewayRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayRouteInput) SetDryRun(v bool) *CreateTransitGatewayRouteInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *CreateTransitGatewayRouteInput) SetTransitGatewayAttachmentId(v string) *CreateTransitGatewayRouteInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *CreateTransitGatewayRouteInput) SetTransitGatewayRouteTableId(v string) *CreateTransitGatewayRouteInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type CreateTransitGatewayRouteOutput struct { + _ struct{} `type:"structure"` + + // Information about the route. + Route *TransitGatewayRoute `locationName:"route" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetRoute sets the Route field's value. +func (s *CreateTransitGatewayRouteOutput) SetRoute(v *TransitGatewayRoute) *CreateTransitGatewayRouteOutput { + s.Route = v + return s +} + +type CreateTransitGatewayRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags to apply to the transit gateway route table. + TagSpecifications []*TagSpecification `locationNameList:"item" type:"list"` + + // The ID of the transit gateway. + // + // TransitGatewayId is a required field + TransitGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTransitGatewayRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTransitGatewayRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTransitGatewayRouteTableInput"} + if s.TransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayRouteTableInput) SetDryRun(v bool) *CreateTransitGatewayRouteTableInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTransitGatewayRouteTableInput) SetTagSpecifications(v []*TagSpecification) *CreateTransitGatewayRouteTableInput { + s.TagSpecifications = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *CreateTransitGatewayRouteTableInput) SetTransitGatewayId(v string) *CreateTransitGatewayRouteTableInput { + s.TransitGatewayId = &v + return s +} + +type CreateTransitGatewayRouteTableOutput struct { + _ struct{} `type:"structure"` + + // Information about the transit gateway route table. + TransitGatewayRouteTable *TransitGatewayRouteTable `locationName:"transitGatewayRouteTable" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayRouteTableOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayRouteTable sets the TransitGatewayRouteTable field's value. +func (s *CreateTransitGatewayRouteTableOutput) SetTransitGatewayRouteTable(v *TransitGatewayRouteTable) *CreateTransitGatewayRouteTableOutput { + s.TransitGatewayRouteTable = v + return s +} + +type CreateTransitGatewayVpcAttachmentInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The VPC attachment options. + Options *CreateTransitGatewayVpcAttachmentRequestOptions `type:"structure"` + + // The IDs of one or more subnets. You can specify only one subnet per Availability + // Zone. You must specify at least one subnet, but we recommend that you specify + // two subnets for better availability. The transit gateway uses one IP address + // from each specified subnet. + // + // SubnetIds is a required field + SubnetIds []*string `locationNameList:"item" type:"list" required:"true"` + + // The tags to apply to the VPC attachment. + TagSpecifications []*TagSpecification `locationNameList:"item" type:"list"` + + // The ID of the transit gateway. + // + // TransitGatewayId is a required field + TransitGatewayId *string `type:"string" required:"true"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTransitGatewayVpcAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayVpcAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTransitGatewayVpcAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTransitGatewayVpcAttachmentInput"} + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + if s.TransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayVpcAttachmentInput) SetDryRun(v bool) *CreateTransitGatewayVpcAttachmentInput { + s.DryRun = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *CreateTransitGatewayVpcAttachmentInput) SetOptions(v *CreateTransitGatewayVpcAttachmentRequestOptions) *CreateTransitGatewayVpcAttachmentInput { + s.Options = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *CreateTransitGatewayVpcAttachmentInput) SetSubnetIds(v []*string) *CreateTransitGatewayVpcAttachmentInput { + s.SubnetIds = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTransitGatewayVpcAttachmentInput) SetTagSpecifications(v []*TagSpecification) *CreateTransitGatewayVpcAttachmentInput { + s.TagSpecifications = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *CreateTransitGatewayVpcAttachmentInput) SetTransitGatewayId(v string) *CreateTransitGatewayVpcAttachmentInput { + s.TransitGatewayId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateTransitGatewayVpcAttachmentInput) SetVpcId(v string) *CreateTransitGatewayVpcAttachmentInput { + s.VpcId = &v + return s +} + +type CreateTransitGatewayVpcAttachmentOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC attachment. + TransitGatewayVpcAttachment *TransitGatewayVpcAttachment `locationName:"transitGatewayVpcAttachment" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayVpcAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayVpcAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayVpcAttachment sets the TransitGatewayVpcAttachment field's value. +func (s *CreateTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment(v *TransitGatewayVpcAttachment) *CreateTransitGatewayVpcAttachmentOutput { + s.TransitGatewayVpcAttachment = v + return s +} + +// Describes the options for a VPC attachment. +type CreateTransitGatewayVpcAttachmentRequestOptions struct { + _ struct{} `type:"structure"` + + // Enable or disable support for appliance mode. If enabled, a traffic flow + // between a source and destination uses the same Availability Zone for the + // VPC attachment for the lifetime of that flow. The default is disable. + ApplianceModeSupport *string `type:"string" enum:"ApplianceModeSupportValue"` + + // Enable or disable DNS support. The default is enable. + DnsSupport *string `type:"string" enum:"DnsSupportValue"` + + // Enable or disable IPv6 support. + Ipv6Support *string `type:"string" enum:"Ipv6SupportValue"` +} + +// String returns the string representation +func (s CreateTransitGatewayVpcAttachmentRequestOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayVpcAttachmentRequestOptions) GoString() string { + return s.String() +} + +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetApplianceModeSupport(v string) *CreateTransitGatewayVpcAttachmentRequestOptions { + s.ApplianceModeSupport = &v + return s +} + +// SetDnsSupport sets the DnsSupport field's value. +func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetDnsSupport(v string) *CreateTransitGatewayVpcAttachmentRequestOptions { + s.DnsSupport = &v + return s +} + +// SetIpv6Support sets the Ipv6Support field's value. +func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetIpv6Support(v string) *CreateTransitGatewayVpcAttachmentRequestOptions { + s.Ipv6Support = &v + return s +} + +type CreateVolumeInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to create the volume. + // + // AvailabilityZone is a required field + AvailabilityZone *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the volume should be encrypted. The effect of setting the + // encryption state to true depends on the volume origin (new or from a snapshot), + // starting encryption state, ownership, and whether encryption by default is + // enabled. For more information, see Encryption by default (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Encrypted Amazon EBS volumes must be attached to instances that support Amazon + // EBS encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, + // this represents the number of IOPS that are provisioned for the volume. For + // gp2 volumes, this represents the baseline performance of the volume and the + // rate at which the volume accumulates I/O credits for bursting. + // + // The following are the supported values for each volume type: + // + // * gp3: 3,000-16,000 IOPS + // + // * io1: 100-64,000 IOPS + // + // * io2: 100-64,000 IOPS + // + // For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built + // on the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Other instance families guarantee performance up to 32,000 IOPS. + // + // This parameter is required for io1 and io2 volumes. The default for gp3 volumes + // is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard + // volumes. + Iops *int64 `type:"integer"` + + // The identifier of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, + // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted + // state must be true. + // + // You can specify the CMK using any of the following: + // + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Key alias. For example, alias/ExampleAlias. + // + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, + // alias, or ARN that is not valid, the action can appear to complete, but eventually + // fails. + KmsKeyId *string `type:"string"` + + // Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, + // you can attach the volume to up to 16 Instances built on the Nitro System + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) + // in the same Availability Zone. This parameter is supported with io1 volumes + // only. For more information, see Amazon EBS Multi-Attach (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html) + // in the Amazon Elastic Compute Cloud User Guide. + MultiAttachEnabled *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `type:"string"` + + // The size of the volume, in GiBs. You must specify either a snapshot ID or + // a volume size. If you specify a snapshot, the default is the snapshot size. + // You can specify a volume size that is equal to or larger than the snapshot + // size. + // + // The following are the supported volumes sizes for each volume type: + // + // * gp2 and gp3: 1-16,384 + // + // * io1 and io2: 4-16,384 + // + // * st1 and sc1: 125-16,384 + // + // * standard: 1-1,024 + Size *int64 `type:"integer"` + + // The snapshot from which to create the volume. You must specify either a snapshot + // ID or a volume size. + SnapshotId *string `type:"string"` + + // The tags to apply to the volume during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The throughput to provision for a volume, with a maximum of 1,000 MiB/s. + // + // This parameter is valid only for gp3 volumes. + // + // Valid Range: Minimum value of 125. Maximum value of 1000. + Throughput *int64 `type:"integer"` + + // The volume type. This parameter can be one of the following values: + // + // * General Purpose SSD: gp2 | gp3 + // + // * Provisioned IOPS SSD: io1 | io2 + // + // * Throughput Optimized HDD: st1 + // + // * Cold HDD: sc1 + // + // * Magnetic: standard + // + // For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: gp2 + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s CreateVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVolumeInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateVolumeInput) SetAvailabilityZone(v string) *CreateVolumeInput { + s.AvailabilityZone = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateVolumeInput) SetDryRun(v bool) *CreateVolumeInput { + s.DryRun = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *CreateVolumeInput) SetEncrypted(v bool) *CreateVolumeInput { + s.Encrypted = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *CreateVolumeInput) SetIops(v int64) *CreateVolumeInput { + s.Iops = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateVolumeInput) SetKmsKeyId(v string) *CreateVolumeInput { + s.KmsKeyId = &v + return s +} + +// SetMultiAttachEnabled sets the MultiAttachEnabled field's value. +func (s *CreateVolumeInput) SetMultiAttachEnabled(v bool) *CreateVolumeInput { + s.MultiAttachEnabled = &v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *CreateVolumeInput) SetOutpostArn(v string) *CreateVolumeInput { + s.OutpostArn = &v + return s +} + +// SetSize sets the Size field's value. +func (s *CreateVolumeInput) SetSize(v int64) *CreateVolumeInput { + s.Size = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *CreateVolumeInput) SetSnapshotId(v string) *CreateVolumeInput { + s.SnapshotId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVolumeInput) SetTagSpecifications(v []*TagSpecification) *CreateVolumeInput { + s.TagSpecifications = v + return s +} + +// SetThroughput sets the Throughput field's value. +func (s *CreateVolumeInput) SetThroughput(v int64) *CreateVolumeInput { + s.Throughput = &v + return s +} + +// SetVolumeType sets the VolumeType field's value. +func (s *CreateVolumeInput) SetVolumeType(v string) *CreateVolumeInput { + s.VolumeType = &v + return s +} + +// Describes the user or group to be added or removed from the list of create +// volume permissions for a volume. +type CreateVolumePermission struct { + _ struct{} `type:"structure"` + + // The group to be added or removed. The possible value is all. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID to be added or removed. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s CreateVolumePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermission) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *CreateVolumePermission) SetGroup(v string) *CreateVolumePermission { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *CreateVolumePermission) SetUserId(v string) *CreateVolumePermission { + s.UserId = &v + return s +} + +// Describes modifications to the list of create volume permissions for a volume. +type CreateVolumePermissionModifications struct { + _ struct{} `type:"structure"` + + // Adds the specified AWS account ID or group to the list. + Add []*CreateVolumePermission `locationNameList:"item" type:"list"` + + // Removes the specified AWS account ID or group from the list. + Remove []*CreateVolumePermission `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateVolumePermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVolumePermissionModifications) GoString() string { + return s.String() +} + +// SetAdd sets the Add field's value. +func (s *CreateVolumePermissionModifications) SetAdd(v []*CreateVolumePermission) *CreateVolumePermissionModifications { + s.Add = v + return s +} + +// SetRemove sets the Remove field's value. +func (s *CreateVolumePermissionModifications) SetRemove(v []*CreateVolumePermission) *CreateVolumePermissionModifications { + s.Remove = v + return s +} + +type CreateVpcEndpointConnectionNotificationInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // One or more endpoint events for which to receive notifications. Valid values + // are Accept, Connect, Delete, and Reject. + // + // ConnectionEvents is a required field + ConnectionEvents []*string `locationNameList:"item" type:"list" required:"true"` + + // The ARN of the SNS topic for the notifications. + // + // ConnectionNotificationArn is a required field + ConnectionNotificationArn *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the endpoint service. + ServiceId *string `type:"string"` + + // The ID of the endpoint. + VpcEndpointId *string `type:"string"` +} + +// String returns the string representation +func (s CreateVpcEndpointConnectionNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointConnectionNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpcEndpointConnectionNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpcEndpointConnectionNotificationInput"} + if s.ConnectionEvents == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionEvents")) + } + if s.ConnectionNotificationArn == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionNotificationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateVpcEndpointConnectionNotificationInput) SetClientToken(v string) *CreateVpcEndpointConnectionNotificationInput { + s.ClientToken = &v + return s +} + +// SetConnectionEvents sets the ConnectionEvents field's value. +func (s *CreateVpcEndpointConnectionNotificationInput) SetConnectionEvents(v []*string) *CreateVpcEndpointConnectionNotificationInput { + s.ConnectionEvents = v + return s +} + +// SetConnectionNotificationArn sets the ConnectionNotificationArn field's value. +func (s *CreateVpcEndpointConnectionNotificationInput) SetConnectionNotificationArn(v string) *CreateVpcEndpointConnectionNotificationInput { + s.ConnectionNotificationArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateVpcEndpointConnectionNotificationInput) SetDryRun(v bool) *CreateVpcEndpointConnectionNotificationInput { + s.DryRun = &v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *CreateVpcEndpointConnectionNotificationInput) SetServiceId(v string) *CreateVpcEndpointConnectionNotificationInput { + s.ServiceId = &v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *CreateVpcEndpointConnectionNotificationInput) SetVpcEndpointId(v string) *CreateVpcEndpointConnectionNotificationInput { + s.VpcEndpointId = &v + return s +} + +type CreateVpcEndpointConnectionNotificationOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the notification. + ConnectionNotification *ConnectionNotification `locationName:"connectionNotification" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcEndpointConnectionNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointConnectionNotificationOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateVpcEndpointConnectionNotificationOutput) SetClientToken(v string) *CreateVpcEndpointConnectionNotificationOutput { + s.ClientToken = &v + return s +} + +// SetConnectionNotification sets the ConnectionNotification field's value. +func (s *CreateVpcEndpointConnectionNotificationOutput) SetConnectionNotification(v *ConnectionNotification) *CreateVpcEndpointConnectionNotificationOutput { + s.ConnectionNotification = v + return s +} + +// Contains the parameters for CreateVpcEndpoint. +type CreateVpcEndpointInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // (Interface and gateway endpoints) A policy to attach to the endpoint that + // controls access to the service. The policy must be in valid JSON format. + // If this parameter is not specified, we attach a default policy that allows + // full access to the service. + PolicyDocument *string `type:"string"` + + // (Interface endpoint) Indicates whether to associate a private hosted zone + // with the specified VPC. The private hosted zone contains a record set for + // the default public DNS name for the service for the Region (for example, + // kinesis.us-east-1.amazonaws.com), which resolves to the private IP addresses + // of the endpoint network interfaces in the VPC. This enables you to make requests + // to the default public DNS name for the service instead of the public DNS + // names that are automatically generated by the VPC endpoint service. + // + // To use a private hosted zone, you must set the following VPC attributes to + // true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to + // set the VPC attributes. + // + // Default: true + PrivateDnsEnabled *bool `type:"boolean"` + + // (Gateway endpoint) One or more route table IDs. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` + + // (Interface endpoint) The ID of one or more security groups to associate with + // the endpoint network interface. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + // The service name. To get a list of available services, use the DescribeVpcEndpointServices + // request, or get the name from the service provider. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // (Interface and Gateway Load Balancer endpoints) The ID of one or more subnets + // in which to create an endpoint network interface. For a Gateway Load Balancer + // endpoint, you can specify one subnet only. + SubnetIds []*string `locationName:"SubnetId" locationNameList:"item" type:"list"` + + // The tags to associate with the endpoint. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The type of endpoint. + // + // Default: Gateway + VpcEndpointType *string `type:"string" enum:"VpcEndpointType"` + + // The ID of the VPC in which the endpoint will be used. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpcEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpcEndpointInput"} + if s.ServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceName")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateVpcEndpointInput) SetClientToken(v string) *CreateVpcEndpointInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateVpcEndpointInput) SetDryRun(v bool) *CreateVpcEndpointInput { + s.DryRun = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *CreateVpcEndpointInput) SetPolicyDocument(v string) *CreateVpcEndpointInput { + s.PolicyDocument = &v + return s +} + +// SetPrivateDnsEnabled sets the PrivateDnsEnabled field's value. +func (s *CreateVpcEndpointInput) SetPrivateDnsEnabled(v bool) *CreateVpcEndpointInput { + s.PrivateDnsEnabled = &v + return s +} + +// SetRouteTableIds sets the RouteTableIds field's value. +func (s *CreateVpcEndpointInput) SetRouteTableIds(v []*string) *CreateVpcEndpointInput { + s.RouteTableIds = v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateVpcEndpointInput) SetSecurityGroupIds(v []*string) *CreateVpcEndpointInput { + s.SecurityGroupIds = v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *CreateVpcEndpointInput) SetServiceName(v string) *CreateVpcEndpointInput { + s.ServiceName = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *CreateVpcEndpointInput) SetSubnetIds(v []*string) *CreateVpcEndpointInput { + s.SubnetIds = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcEndpointInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcEndpointInput { + s.TagSpecifications = v + return s +} + +// SetVpcEndpointType sets the VpcEndpointType field's value. +func (s *CreateVpcEndpointInput) SetVpcEndpointType(v string) *CreateVpcEndpointInput { + s.VpcEndpointType = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateVpcEndpointInput) SetVpcId(v string) *CreateVpcEndpointInput { + s.VpcId = &v + return s +} + +// Contains the output of CreateVpcEndpoint. +type CreateVpcEndpointOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the endpoint. + VpcEndpoint *VpcEndpoint `locationName:"vpcEndpoint" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateVpcEndpointOutput) SetClientToken(v string) *CreateVpcEndpointOutput { + s.ClientToken = &v + return s +} + +// SetVpcEndpoint sets the VpcEndpoint field's value. +func (s *CreateVpcEndpointOutput) SetVpcEndpoint(v *VpcEndpoint) *CreateVpcEndpointOutput { + s.VpcEndpoint = v + return s +} + +type CreateVpcEndpointServiceConfigurationInput struct { + _ struct{} `type:"structure"` + + // Indicates whether requests from service consumers to create an endpoint to + // your service must be accepted. To accept a request, use AcceptVpcEndpointConnections. + AcceptanceRequired *bool `type:"boolean"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The Amazon Resource Names (ARNs) of one or more Gateway Load Balancers. + GatewayLoadBalancerArns []*string `locationName:"GatewayLoadBalancerArn" locationNameList:"item" type:"list"` + + // The Amazon Resource Names (ARNs) of one or more Network Load Balancers for + // your service. + NetworkLoadBalancerArns []*string `locationName:"NetworkLoadBalancerArn" locationNameList:"item" type:"list"` + + // (Interface endpoint configuration) The private DNS name to assign to the + // VPC endpoint service. + PrivateDnsName *string `type:"string"` + + // The tags to associate with the service. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateVpcEndpointServiceConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointServiceConfigurationInput) GoString() string { + return s.String() +} + +// SetAcceptanceRequired sets the AcceptanceRequired field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetAcceptanceRequired(v bool) *CreateVpcEndpointServiceConfigurationInput { + s.AcceptanceRequired = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetClientToken(v string) *CreateVpcEndpointServiceConfigurationInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetDryRun(v bool) *CreateVpcEndpointServiceConfigurationInput { + s.DryRun = &v + return s +} + +// SetGatewayLoadBalancerArns sets the GatewayLoadBalancerArns field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetGatewayLoadBalancerArns(v []*string) *CreateVpcEndpointServiceConfigurationInput { + s.GatewayLoadBalancerArns = v + return s +} + +// SetNetworkLoadBalancerArns sets the NetworkLoadBalancerArns field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetNetworkLoadBalancerArns(v []*string) *CreateVpcEndpointServiceConfigurationInput { + s.NetworkLoadBalancerArns = v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetPrivateDnsName(v string) *CreateVpcEndpointServiceConfigurationInput { + s.PrivateDnsName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcEndpointServiceConfigurationInput { + s.TagSpecifications = v + return s +} + +type CreateVpcEndpointServiceConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Information about the service configuration. + ServiceConfiguration *ServiceConfiguration `locationName:"serviceConfiguration" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcEndpointServiceConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcEndpointServiceConfigurationOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateVpcEndpointServiceConfigurationOutput) SetClientToken(v string) *CreateVpcEndpointServiceConfigurationOutput { + s.ClientToken = &v + return s +} + +// SetServiceConfiguration sets the ServiceConfiguration field's value. +func (s *CreateVpcEndpointServiceConfigurationOutput) SetServiceConfiguration(v *ServiceConfiguration) *CreateVpcEndpointServiceConfigurationOutput { + s.ServiceConfiguration = v + return s +} + +type CreateVpcInput struct { + _ struct{} `type:"structure"` + + // Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for + // the VPC. You cannot specify the range of IP addresses, or the size of the + // CIDR block. + AmazonProvidedIpv6CidrBlock *bool `locationName:"amazonProvidedIpv6CidrBlock" type:"boolean"` + + // The IPv4 network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. + // + // CidrBlock is a required field + CidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tenancy options for instances launched into the VPC. For default, instances + // are launched with shared tenancy by default. You can launch instances with + // any tenancy into a shared tenancy VPC. For dedicated, instances are launched + // as dedicated tenancy instances by default. You can only launch instances + // with a tenancy of dedicated or host into a dedicated tenancy VPC. + // + // Important: The host value cannot be used with this parameter. Use the default + // or dedicated values only. + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool + // in the request. + // + // To let Amazon choose the IPv6 CIDR block for you, omit this parameter. + Ipv6CidrBlock *string `type:"string"` + + // The name of the location from which we advertise the IPV6 CIDR block. Use + // this parameter to limit the address to this location. + // + // You must set AmazonProvidedIpv6CidrBlock to true to use this parameter. + Ipv6CidrBlockNetworkBorderGroup *string `type:"string"` + + // The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block. + Ipv6Pool *string `type:"string"` + + // The tags to assign to the VPC. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpcInput"} + if s.CidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("CidrBlock")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAmazonProvidedIpv6CidrBlock sets the AmazonProvidedIpv6CidrBlock field's value. +func (s *CreateVpcInput) SetAmazonProvidedIpv6CidrBlock(v bool) *CreateVpcInput { + s.AmazonProvidedIpv6CidrBlock = &v + return s +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *CreateVpcInput) SetCidrBlock(v string) *CreateVpcInput { + s.CidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateVpcInput) SetDryRun(v bool) *CreateVpcInput { + s.DryRun = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *CreateVpcInput) SetInstanceTenancy(v string) *CreateVpcInput { + s.InstanceTenancy = &v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *CreateVpcInput) SetIpv6CidrBlock(v string) *CreateVpcInput { + s.Ipv6CidrBlock = &v + return s +} + +// SetIpv6CidrBlockNetworkBorderGroup sets the Ipv6CidrBlockNetworkBorderGroup field's value. +func (s *CreateVpcInput) SetIpv6CidrBlockNetworkBorderGroup(v string) *CreateVpcInput { + s.Ipv6CidrBlockNetworkBorderGroup = &v + return s +} + +// SetIpv6Pool sets the Ipv6Pool field's value. +func (s *CreateVpcInput) SetIpv6Pool(v string) *CreateVpcInput { + s.Ipv6Pool = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcInput { + s.TagSpecifications = v + return s +} + +type CreateVpcOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC. + Vpc *Vpc `locationName:"vpc" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcOutput) GoString() string { + return s.String() +} + +// SetVpc sets the Vpc field's value. +func (s *CreateVpcOutput) SetVpc(v *Vpc) *CreateVpcOutput { + s.Vpc = v + return s +} + +type CreateVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The AWS account ID of the owner of the accepter VPC. + // + // Default: Your AWS account ID + PeerOwnerId *string `locationName:"peerOwnerId" type:"string"` + + // The Region code for the accepter VPC, if the accepter VPC is located in a + // Region other than the Region in which you make the request. + // + // Default: The Region in which you make the request. + PeerRegion *string `type:"string"` + + // The ID of the VPC with which you are creating the VPC peering connection. + // You must specify this parameter in the request. + PeerVpcId *string `locationName:"peerVpcId" type:"string"` + + // The tags to assign to the peering connection. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the requester VPC. You must specify this parameter in the request. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateVpcPeeringConnectionInput) SetDryRun(v bool) *CreateVpcPeeringConnectionInput { + s.DryRun = &v + return s +} + +// SetPeerOwnerId sets the PeerOwnerId field's value. +func (s *CreateVpcPeeringConnectionInput) SetPeerOwnerId(v string) *CreateVpcPeeringConnectionInput { + s.PeerOwnerId = &v + return s +} + +// SetPeerRegion sets the PeerRegion field's value. +func (s *CreateVpcPeeringConnectionInput) SetPeerRegion(v string) *CreateVpcPeeringConnectionInput { + s.PeerRegion = &v + return s +} + +// SetPeerVpcId sets the PeerVpcId field's value. +func (s *CreateVpcPeeringConnectionInput) SetPeerVpcId(v string) *CreateVpcPeeringConnectionInput { + s.PeerVpcId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcPeeringConnectionInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcPeeringConnectionInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateVpcPeeringConnectionInput) SetVpcId(v string) *CreateVpcPeeringConnectionInput { + s.VpcId = &v + return s +} + +type CreateVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection. + VpcPeeringConnection *VpcPeeringConnection `locationName:"vpcPeeringConnection" type:"structure"` +} + +// String returns the string representation +func (s CreateVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// SetVpcPeeringConnection sets the VpcPeeringConnection field's value. +func (s *CreateVpcPeeringConnectionOutput) SetVpcPeeringConnection(v *VpcPeeringConnection) *CreateVpcPeeringConnectionOutput { + s.VpcPeeringConnection = v + return s +} + +// Contains the parameters for CreateVpnConnection. +type CreateVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway. + // + // CustomerGatewayId is a required field + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The options for the VPN connection. + Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` + + // The tags to apply to the VPN connection. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the transit gateway. If you specify a transit gateway, you cannot + // specify a virtual private gateway. + TransitGatewayId *string `type:"string"` + + // The type of VPN connection (ipsec.1). + // + // Type is a required field + Type *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. If you specify a virtual private gateway, + // you cannot specify a transit gateway. + VpnGatewayId *string `type:"string"` +} + +// String returns the string representation +func (s CreateVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpnConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpnConnectionInput"} + if s.CustomerGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerGatewayId")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *CreateVpnConnectionInput) SetCustomerGatewayId(v string) *CreateVpnConnectionInput { + s.CustomerGatewayId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateVpnConnectionInput) SetDryRun(v bool) *CreateVpnConnectionInput { + s.DryRun = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *CreateVpnConnectionInput) SetOptions(v *VpnConnectionOptionsSpecification) *CreateVpnConnectionInput { + s.Options = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpnConnectionInput) SetTagSpecifications(v []*TagSpecification) *CreateVpnConnectionInput { + s.TagSpecifications = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *CreateVpnConnectionInput) SetTransitGatewayId(v string) *CreateVpnConnectionInput { + s.TransitGatewayId = &v + return s +} + +// SetType sets the Type field's value. +func (s *CreateVpnConnectionInput) SetType(v string) *CreateVpnConnectionInput { + s.Type = &v + return s +} + +// SetVpnGatewayId sets the VpnGatewayId field's value. +func (s *CreateVpnConnectionInput) SetVpnGatewayId(v string) *CreateVpnConnectionInput { + s.VpnGatewayId = &v + return s +} + +// Contains the output of CreateVpnConnection. +type CreateVpnConnectionOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *CreateVpnConnectionOutput) SetVpnConnection(v *VpnConnection) *CreateVpnConnectionOutput { + s.VpnConnection = v + return s +} + +// Contains the parameters for CreateVpnConnectionRoute. +type CreateVpnConnectionRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer network. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpnConnectionRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpnConnectionRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *CreateVpnConnectionRouteInput) SetDestinationCidrBlock(v string) *CreateVpnConnectionRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *CreateVpnConnectionRouteInput) SetVpnConnectionId(v string) *CreateVpnConnectionRouteInput { + s.VpnConnectionId = &v + return s +} + +type CreateVpnConnectionRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +// Contains the parameters for CreateVpnGateway. +type CreateVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // A private Autonomous System Number (ASN) for the Amazon side of a BGP session. + // If you're using a 16-bit ASN, it must be in the 64512 to 65534 range. If + // you're using a 32-bit ASN, it must be in the 4200000000 to 4294967294 range. + // + // Default: 64512 + AmazonSideAsn *int64 `type:"long"` + + // The Availability Zone for the virtual private gateway. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to apply to the virtual private gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The type of VPN connection this virtual private gateway supports. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"GatewayType"` +} + +// String returns the string representation +func (s CreateVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVpnGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVpnGatewayInput"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAmazonSideAsn sets the AmazonSideAsn field's value. +func (s *CreateVpnGatewayInput) SetAmazonSideAsn(v int64) *CreateVpnGatewayInput { + s.AmazonSideAsn = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateVpnGatewayInput) SetAvailabilityZone(v string) *CreateVpnGatewayInput { + s.AvailabilityZone = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateVpnGatewayInput) SetDryRun(v bool) *CreateVpnGatewayInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpnGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateVpnGatewayInput { + s.TagSpecifications = v + return s +} + +// SetType sets the Type field's value. +func (s *CreateVpnGatewayInput) SetType(v string) *CreateVpnGatewayInput { + s.Type = &v + return s +} + +// Contains the output of CreateVpnGateway. +type CreateVpnGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the virtual private gateway. + VpnGateway *VpnGateway `locationName:"vpnGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVpnGatewayOutput) GoString() string { + return s.String() +} + +// SetVpnGateway sets the VpnGateway field's value. +func (s *CreateVpnGatewayOutput) SetVpnGateway(v *VpnGateway) *CreateVpnGatewayOutput { + s.VpnGateway = v + return s +} + +// Describes the credit option for CPU usage of a T2, T3, or T3a instance. +type CreditSpecification struct { + _ struct{} `type:"structure"` + + // The credit option for CPU usage of a T2, T3, or T3a instance. Valid values + // are standard and unlimited. + CpuCredits *string `locationName:"cpuCredits" type:"string"` +} + +// String returns the string representation +func (s CreditSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreditSpecification) GoString() string { + return s.String() +} + +// SetCpuCredits sets the CpuCredits field's value. +func (s *CreditSpecification) SetCpuCredits(v string) *CreditSpecification { + s.CpuCredits = &v + return s +} + +// The credit option for CPU usage of a T2, T3, or T3a instance. +type CreditSpecificationRequest struct { + _ struct{} `type:"structure"` + + // The credit option for CPU usage of a T2, T3, or T3a instance. Valid values + // are standard and unlimited. + // + // CpuCredits is a required field + CpuCredits *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreditSpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreditSpecificationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreditSpecificationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreditSpecificationRequest"} + if s.CpuCredits == nil { + invalidParams.Add(request.NewErrParamRequired("CpuCredits")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCpuCredits sets the CpuCredits field's value. +func (s *CreditSpecificationRequest) SetCpuCredits(v string) *CreditSpecificationRequest { + s.CpuCredits = &v + return s +} + +// Describes a customer gateway. +type CustomerGateway struct { + _ struct{} `type:"structure"` + + // The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number + // (ASN). + BgpAsn *string `locationName:"bgpAsn" type:"string"` + + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the customer gateway. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The name of customer gateway device. + DeviceName *string `locationName:"deviceName" type:"string"` + + // The Internet-routable IP address of the customer gateway's outside interface. + IpAddress *string `locationName:"ipAddress" type:"string"` + + // The current state of the customer gateway (pending | available | deleting + // | deleted). + State *string `locationName:"state" type:"string"` + + // Any tags assigned to the customer gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the customer gateway supports (ipsec.1). + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s CustomerGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerGateway) GoString() string { + return s.String() +} + +// SetBgpAsn sets the BgpAsn field's value. +func (s *CustomerGateway) SetBgpAsn(v string) *CustomerGateway { + s.BgpAsn = &v + return s +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *CustomerGateway) SetCertificateArn(v string) *CustomerGateway { + s.CertificateArn = &v + return s +} + +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *CustomerGateway) SetCustomerGatewayId(v string) *CustomerGateway { + s.CustomerGatewayId = &v + return s +} + +// SetDeviceName sets the DeviceName field's value. +func (s *CustomerGateway) SetDeviceName(v string) *CustomerGateway { + s.DeviceName = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *CustomerGateway) SetIpAddress(v string) *CustomerGateway { + s.IpAddress = &v + return s +} + +// SetState sets the State field's value. +func (s *CustomerGateway) SetState(v string) *CustomerGateway { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CustomerGateway) SetTags(v []*Tag) *CustomerGateway { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *CustomerGateway) SetType(v string) *CustomerGateway { + s.Type = &v + return s +} + +type DeleteCarrierGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the carrier gateway. + // + // CarrierGatewayId is a required field + CarrierGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteCarrierGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCarrierGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCarrierGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCarrierGatewayInput"} + if s.CarrierGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("CarrierGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *DeleteCarrierGatewayInput) SetCarrierGatewayId(v string) *DeleteCarrierGatewayInput { + s.CarrierGatewayId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteCarrierGatewayInput) SetDryRun(v bool) *DeleteCarrierGatewayInput { + s.DryRun = &v + return s +} + +type DeleteCarrierGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateway *CarrierGateway `locationName:"carrierGateway" type:"structure"` +} + +// String returns the string representation +func (s DeleteCarrierGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCarrierGatewayOutput) GoString() string { + return s.String() +} + +// SetCarrierGateway sets the CarrierGateway field's value. +func (s *DeleteCarrierGatewayOutput) SetCarrierGateway(v *CarrierGateway) *DeleteCarrierGatewayOutput { + s.CarrierGateway = v + return s +} + +type DeleteClientVpnEndpointInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN to be deleted. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteClientVpnEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClientVpnEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClientVpnEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClientVpnEndpointInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *DeleteClientVpnEndpointInput) SetClientVpnEndpointId(v string) *DeleteClientVpnEndpointInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteClientVpnEndpointInput) SetDryRun(v bool) *DeleteClientVpnEndpointInput { + s.DryRun = &v + return s +} + +type DeleteClientVpnEndpointOutput struct { + _ struct{} `type:"structure"` + + // The current state of the Client VPN endpoint. + Status *ClientVpnEndpointStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s DeleteClientVpnEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClientVpnEndpointOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteClientVpnEndpointOutput) SetStatus(v *ClientVpnEndpointStatus) *DeleteClientVpnEndpointOutput { + s.Status = v + return s +} + +type DeleteClientVpnRouteInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint from which the route is to be deleted. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // The IPv4 address range, in CIDR notation, of the route to be deleted. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the target subnet used by the route. + TargetVpcSubnetId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteClientVpnRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClientVpnRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteClientVpnRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteClientVpnRouteInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *DeleteClientVpnRouteInput) SetClientVpnEndpointId(v string) *DeleteClientVpnRouteInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *DeleteClientVpnRouteInput) SetDestinationCidrBlock(v string) *DeleteClientVpnRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteClientVpnRouteInput) SetDryRun(v bool) *DeleteClientVpnRouteInput { + s.DryRun = &v + return s +} + +// SetTargetVpcSubnetId sets the TargetVpcSubnetId field's value. +func (s *DeleteClientVpnRouteInput) SetTargetVpcSubnetId(v string) *DeleteClientVpnRouteInput { + s.TargetVpcSubnetId = &v + return s +} + +type DeleteClientVpnRouteOutput struct { + _ struct{} `type:"structure"` + + // The current state of the route. + Status *ClientVpnRouteStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s DeleteClientVpnRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteClientVpnRouteOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteClientVpnRouteOutput) SetStatus(v *ClientVpnRouteStatus) *DeleteClientVpnRouteOutput { + s.Status = v + return s +} + +// Contains the parameters for DeleteCustomerGateway. +type DeleteCustomerGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway. + // + // CustomerGatewayId is a required field + CustomerGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCustomerGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCustomerGatewayInput"} + if s.CustomerGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *DeleteCustomerGatewayInput) SetCustomerGatewayId(v string) *DeleteCustomerGatewayInput { + s.CustomerGatewayId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteCustomerGatewayInput) SetDryRun(v bool) *DeleteCustomerGatewayInput { + s.DryRun = &v + return s +} + +type DeleteCustomerGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomerGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomerGatewayOutput) GoString() string { + return s.String() +} + +type DeleteDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the DHCP options set. + // + // DhcpOptionsId is a required field + DhcpOptionsId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDhcpOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDhcpOptionsInput"} + if s.DhcpOptionsId == nil { + invalidParams.Add(request.NewErrParamRequired("DhcpOptionsId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDhcpOptionsId sets the DhcpOptionsId field's value. +func (s *DeleteDhcpOptionsInput) SetDhcpOptionsId(v string) *DeleteDhcpOptionsInput { + s.DhcpOptionsId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteDhcpOptionsInput) SetDryRun(v bool) *DeleteDhcpOptionsInput { + s.DryRun = &v + return s +} + +type DeleteDhcpOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDhcpOptionsOutput) GoString() string { + return s.String() +} + +type DeleteEgressOnlyInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the egress-only internet gateway. + // + // EgressOnlyInternetGatewayId is a required field + EgressOnlyInternetGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEgressOnlyInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEgressOnlyInternetGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEgressOnlyInternetGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteEgressOnlyInternetGatewayInput"} + if s.EgressOnlyInternetGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("EgressOnlyInternetGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteEgressOnlyInternetGatewayInput) SetDryRun(v bool) *DeleteEgressOnlyInternetGatewayInput { + s.DryRun = &v + return s +} + +// SetEgressOnlyInternetGatewayId sets the EgressOnlyInternetGatewayId field's value. +func (s *DeleteEgressOnlyInternetGatewayInput) SetEgressOnlyInternetGatewayId(v string) *DeleteEgressOnlyInternetGatewayInput { + s.EgressOnlyInternetGatewayId = &v + return s +} + +type DeleteEgressOnlyInternetGatewayOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + ReturnCode *bool `locationName:"returnCode" type:"boolean"` +} + +// String returns the string representation +func (s DeleteEgressOnlyInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteEgressOnlyInternetGatewayOutput) GoString() string { + return s.String() +} + +// SetReturnCode sets the ReturnCode field's value. +func (s *DeleteEgressOnlyInternetGatewayOutput) SetReturnCode(v bool) *DeleteEgressOnlyInternetGatewayOutput { + s.ReturnCode = &v + return s +} + +// Describes an EC2 Fleet error. +type DeleteFleetError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" enum:"DeleteFleetErrorCode"` + + // The description for the error code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DeleteFleetError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *DeleteFleetError) SetCode(v string) *DeleteFleetError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DeleteFleetError) SetMessage(v string) *DeleteFleetError { + s.Message = &v + return s +} + +// Describes an EC2 Fleet that was not successfully deleted. +type DeleteFleetErrorItem struct { + _ struct{} `type:"structure"` + + // The error. + Error *DeleteFleetError `locationName:"error" type:"structure"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` +} + +// String returns the string representation +func (s DeleteFleetErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetErrorItem) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *DeleteFleetErrorItem) SetError(v *DeleteFleetError) *DeleteFleetErrorItem { + s.Error = v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DeleteFleetErrorItem) SetFleetId(v string) *DeleteFleetErrorItem { + s.FleetId = &v + return s +} + +// Describes an EC2 Fleet that was successfully deleted. +type DeleteFleetSuccessItem struct { + _ struct{} `type:"structure"` + + // The current state of the EC2 Fleet. + CurrentFleetState *string `locationName:"currentFleetState" type:"string" enum:"FleetStateCode"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // The previous state of the EC2 Fleet. + PreviousFleetState *string `locationName:"previousFleetState" type:"string" enum:"FleetStateCode"` +} + +// String returns the string representation +func (s DeleteFleetSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetSuccessItem) GoString() string { + return s.String() +} + +// SetCurrentFleetState sets the CurrentFleetState field's value. +func (s *DeleteFleetSuccessItem) SetCurrentFleetState(v string) *DeleteFleetSuccessItem { + s.CurrentFleetState = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DeleteFleetSuccessItem) SetFleetId(v string) *DeleteFleetSuccessItem { + s.FleetId = &v + return s +} + +// SetPreviousFleetState sets the PreviousFleetState field's value. +func (s *DeleteFleetSuccessItem) SetPreviousFleetState(v string) *DeleteFleetSuccessItem { + s.PreviousFleetState = &v + return s +} + +type DeleteFleetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the EC2 Fleets. + // + // FleetIds is a required field + FleetIds []*string `locationName:"FleetId" type:"list" required:"true"` + + // Indicates whether to terminate the instances when the EC2 Fleet is deleted. + // The default is to terminate the instances. + // + // To let the instances continue to run after the EC2 Fleet is deleted, specify + // NoTerminateInstances. Supported only for fleets of type maintain and request. + // + // For instant fleets, you cannot specify NoTerminateInstances. A deleted instant + // fleet with running instances is not supported. + // + // TerminateInstances is a required field + TerminateInstances *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s DeleteFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFleetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFleetsInput"} + if s.FleetIds == nil { + invalidParams.Add(request.NewErrParamRequired("FleetIds")) + } + if s.TerminateInstances == nil { + invalidParams.Add(request.NewErrParamRequired("TerminateInstances")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteFleetsInput) SetDryRun(v bool) *DeleteFleetsInput { + s.DryRun = &v + return s +} + +// SetFleetIds sets the FleetIds field's value. +func (s *DeleteFleetsInput) SetFleetIds(v []*string) *DeleteFleetsInput { + s.FleetIds = v + return s +} + +// SetTerminateInstances sets the TerminateInstances field's value. +func (s *DeleteFleetsInput) SetTerminateInstances(v bool) *DeleteFleetsInput { + s.TerminateInstances = &v + return s +} + +type DeleteFleetsOutput struct { + _ struct{} `type:"structure"` + + // Information about the EC2 Fleets that are successfully deleted. + SuccessfulFleetDeletions []*DeleteFleetSuccessItem `locationName:"successfulFleetDeletionSet" locationNameList:"item" type:"list"` + + // Information about the EC2 Fleets that are not successfully deleted. + UnsuccessfulFleetDeletions []*DeleteFleetErrorItem `locationName:"unsuccessfulFleetDeletionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteFleetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetsOutput) GoString() string { + return s.String() +} + +// SetSuccessfulFleetDeletions sets the SuccessfulFleetDeletions field's value. +func (s *DeleteFleetsOutput) SetSuccessfulFleetDeletions(v []*DeleteFleetSuccessItem) *DeleteFleetsOutput { + s.SuccessfulFleetDeletions = v + return s +} + +// SetUnsuccessfulFleetDeletions sets the UnsuccessfulFleetDeletions field's value. +func (s *DeleteFleetsOutput) SetUnsuccessfulFleetDeletions(v []*DeleteFleetErrorItem) *DeleteFleetsOutput { + s.UnsuccessfulFleetDeletions = v + return s +} + +type DeleteFlowLogsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more flow log IDs. + // + // Constraint: Maximum of 1000 flow log IDs. + // + // FlowLogIds is a required field + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFlowLogsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFlowLogsInput"} + if s.FlowLogIds == nil { + invalidParams.Add(request.NewErrParamRequired("FlowLogIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteFlowLogsInput) SetDryRun(v bool) *DeleteFlowLogsInput { + s.DryRun = &v + return s +} + +// SetFlowLogIds sets the FlowLogIds field's value. +func (s *DeleteFlowLogsInput) SetFlowLogIds(v []*string) *DeleteFlowLogsInput { + s.FlowLogIds = v + return s +} + +type DeleteFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Information about the flow logs that could not be deleted successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowLogsOutput) GoString() string { + return s.String() +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *DeleteFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteFlowLogsOutput { + s.Unsuccessful = v + return s +} + +type DeleteFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFpgaImageInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteFpgaImageInput) SetDryRun(v bool) *DeleteFpgaImageInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *DeleteFpgaImageInput) SetFpgaImageId(v string) *DeleteFpgaImageInput { + s.FpgaImageId = &v + return s +} + +type DeleteFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFpgaImageOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DeleteFpgaImageOutput) SetReturn(v bool) *DeleteFpgaImageOutput { + s.Return = &v + return s +} + +type DeleteInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the internet gateway. + // + // InternetGatewayId is a required field + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInternetGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInternetGatewayInput"} + if s.InternetGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("InternetGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteInternetGatewayInput) SetDryRun(v bool) *DeleteInternetGatewayInput { + s.DryRun = &v + return s +} + +// SetInternetGatewayId sets the InternetGatewayId field's value. +func (s *DeleteInternetGatewayInput) SetInternetGatewayId(v string) *DeleteInternetGatewayInput { + s.InternetGatewayId = &v + return s +} + +type DeleteInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInternetGatewayOutput) GoString() string { + return s.String() +} + +type DeleteKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the key pair. + KeyName *string `type:"string"` + + // The ID of the key pair. + KeyPairId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteKeyPairInput) SetDryRun(v bool) *DeleteKeyPairInput { + s.DryRun = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *DeleteKeyPairInput) SetKeyName(v string) *DeleteKeyPairInput { + s.KeyName = &v + return s +} + +// SetKeyPairId sets the KeyPairId field's value. +func (s *DeleteKeyPairInput) SetKeyPairId(v string) *DeleteKeyPairInput { + s.KeyPairId = &v + return s +} + +type DeleteKeyPairOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteKeyPairOutput) GoString() string { + return s.String() +} + +type DeleteLaunchTemplateInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the launch template. You must specify either the launch template + // ID or launch template name in the request. + LaunchTemplateId *string `type:"string"` + + // The name of the launch template. You must specify either the launch template + // ID or launch template name in the request. + LaunchTemplateName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s DeleteLaunchTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLaunchTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLaunchTemplateInput"} + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteLaunchTemplateInput) SetDryRun(v bool) *DeleteLaunchTemplateInput { + s.DryRun = &v + return s +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *DeleteLaunchTemplateInput) SetLaunchTemplateId(v string) *DeleteLaunchTemplateInput { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *DeleteLaunchTemplateInput) SetLaunchTemplateName(v string) *DeleteLaunchTemplateInput { + s.LaunchTemplateName = &v + return s +} + +type DeleteLaunchTemplateOutput struct { + _ struct{} `type:"structure"` + + // Information about the launch template. + LaunchTemplate *LaunchTemplate `locationName:"launchTemplate" type:"structure"` +} + +// String returns the string representation +func (s DeleteLaunchTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchTemplateOutput) GoString() string { + return s.String() +} + +// SetLaunchTemplate sets the LaunchTemplate field's value. +func (s *DeleteLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *DeleteLaunchTemplateOutput { + s.LaunchTemplate = v + return s +} + +type DeleteLaunchTemplateVersionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the launch template. You must specify either the launch template + // ID or launch template name in the request. + LaunchTemplateId *string `type:"string"` + + // The name of the launch template. You must specify either the launch template + // ID or launch template name in the request. + LaunchTemplateName *string `min:"3" type:"string"` + + // The version numbers of one or more launch template versions to delete. + // + // Versions is a required field + Versions []*string `locationName:"LaunchTemplateVersion" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteLaunchTemplateVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchTemplateVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLaunchTemplateVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLaunchTemplateVersionsInput"} + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + if s.Versions == nil { + invalidParams.Add(request.NewErrParamRequired("Versions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteLaunchTemplateVersionsInput) SetDryRun(v bool) *DeleteLaunchTemplateVersionsInput { + s.DryRun = &v + return s +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *DeleteLaunchTemplateVersionsInput) SetLaunchTemplateId(v string) *DeleteLaunchTemplateVersionsInput { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *DeleteLaunchTemplateVersionsInput) SetLaunchTemplateName(v string) *DeleteLaunchTemplateVersionsInput { + s.LaunchTemplateName = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *DeleteLaunchTemplateVersionsInput) SetVersions(v []*string) *DeleteLaunchTemplateVersionsInput { + s.Versions = v + return s +} + +type DeleteLaunchTemplateVersionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the launch template versions that were successfully deleted. + SuccessfullyDeletedLaunchTemplateVersions []*DeleteLaunchTemplateVersionsResponseSuccessItem `locationName:"successfullyDeletedLaunchTemplateVersionSet" locationNameList:"item" type:"list"` + + // Information about the launch template versions that could not be deleted. + UnsuccessfullyDeletedLaunchTemplateVersions []*DeleteLaunchTemplateVersionsResponseErrorItem `locationName:"unsuccessfullyDeletedLaunchTemplateVersionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteLaunchTemplateVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchTemplateVersionsOutput) GoString() string { + return s.String() +} + +// SetSuccessfullyDeletedLaunchTemplateVersions sets the SuccessfullyDeletedLaunchTemplateVersions field's value. +func (s *DeleteLaunchTemplateVersionsOutput) SetSuccessfullyDeletedLaunchTemplateVersions(v []*DeleteLaunchTemplateVersionsResponseSuccessItem) *DeleteLaunchTemplateVersionsOutput { + s.SuccessfullyDeletedLaunchTemplateVersions = v + return s +} + +// SetUnsuccessfullyDeletedLaunchTemplateVersions sets the UnsuccessfullyDeletedLaunchTemplateVersions field's value. +func (s *DeleteLaunchTemplateVersionsOutput) SetUnsuccessfullyDeletedLaunchTemplateVersions(v []*DeleteLaunchTemplateVersionsResponseErrorItem) *DeleteLaunchTemplateVersionsOutput { + s.UnsuccessfullyDeletedLaunchTemplateVersions = v + return s +} + +// Describes a launch template version that could not be deleted. +type DeleteLaunchTemplateVersionsResponseErrorItem struct { + _ struct{} `type:"structure"` + + // The ID of the launch template. + LaunchTemplateId *string `locationName:"launchTemplateId" type:"string"` + + // The name of the launch template. + LaunchTemplateName *string `locationName:"launchTemplateName" type:"string"` + + // Information about the error. + ResponseError *ResponseError `locationName:"responseError" type:"structure"` + + // The version number of the launch template. + VersionNumber *int64 `locationName:"versionNumber" type:"long"` +} + +// String returns the string representation +func (s DeleteLaunchTemplateVersionsResponseErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchTemplateVersionsResponseErrorItem) GoString() string { + return s.String() +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *DeleteLaunchTemplateVersionsResponseErrorItem) SetLaunchTemplateId(v string) *DeleteLaunchTemplateVersionsResponseErrorItem { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *DeleteLaunchTemplateVersionsResponseErrorItem) SetLaunchTemplateName(v string) *DeleteLaunchTemplateVersionsResponseErrorItem { + s.LaunchTemplateName = &v + return s +} + +// SetResponseError sets the ResponseError field's value. +func (s *DeleteLaunchTemplateVersionsResponseErrorItem) SetResponseError(v *ResponseError) *DeleteLaunchTemplateVersionsResponseErrorItem { + s.ResponseError = v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *DeleteLaunchTemplateVersionsResponseErrorItem) SetVersionNumber(v int64) *DeleteLaunchTemplateVersionsResponseErrorItem { + s.VersionNumber = &v + return s +} + +// Describes a launch template version that was successfully deleted. +type DeleteLaunchTemplateVersionsResponseSuccessItem struct { + _ struct{} `type:"structure"` + + // The ID of the launch template. + LaunchTemplateId *string `locationName:"launchTemplateId" type:"string"` + + // The name of the launch template. + LaunchTemplateName *string `locationName:"launchTemplateName" type:"string"` + + // The version number of the launch template. + VersionNumber *int64 `locationName:"versionNumber" type:"long"` +} + +// String returns the string representation +func (s DeleteLaunchTemplateVersionsResponseSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLaunchTemplateVersionsResponseSuccessItem) GoString() string { + return s.String() +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *DeleteLaunchTemplateVersionsResponseSuccessItem) SetLaunchTemplateId(v string) *DeleteLaunchTemplateVersionsResponseSuccessItem { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *DeleteLaunchTemplateVersionsResponseSuccessItem) SetLaunchTemplateName(v string) *DeleteLaunchTemplateVersionsResponseSuccessItem { + s.LaunchTemplateName = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *DeleteLaunchTemplateVersionsResponseSuccessItem) SetVersionNumber(v int64) *DeleteLaunchTemplateVersionsResponseSuccessItem { + s.VersionNumber = &v + return s +} + +type DeleteLocalGatewayRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR range for the route. This must match the CIDR for the route exactly. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the local gateway route table. + // + // LocalGatewayRouteTableId is a required field + LocalGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLocalGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLocalGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLocalGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLocalGatewayRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.LocalGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("LocalGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *DeleteLocalGatewayRouteInput) SetDestinationCidrBlock(v string) *DeleteLocalGatewayRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteLocalGatewayRouteInput) SetDryRun(v bool) *DeleteLocalGatewayRouteInput { + s.DryRun = &v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *DeleteLocalGatewayRouteInput) SetLocalGatewayRouteTableId(v string) *DeleteLocalGatewayRouteInput { + s.LocalGatewayRouteTableId = &v + return s +} + +type DeleteLocalGatewayRouteOutput struct { + _ struct{} `type:"structure"` + + // Information about the route. + Route *LocalGatewayRoute `locationName:"route" type:"structure"` +} + +// String returns the string representation +func (s DeleteLocalGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLocalGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetRoute sets the Route field's value. +func (s *DeleteLocalGatewayRouteOutput) SetRoute(v *LocalGatewayRoute) *DeleteLocalGatewayRouteOutput { + s.Route = v + return s +} + +type DeleteLocalGatewayRouteTableVpcAssociationInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the association. + // + // LocalGatewayRouteTableVpcAssociationId is a required field + LocalGatewayRouteTableVpcAssociationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLocalGatewayRouteTableVpcAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLocalGatewayRouteTableVpcAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLocalGatewayRouteTableVpcAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLocalGatewayRouteTableVpcAssociationInput"} + if s.LocalGatewayRouteTableVpcAssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("LocalGatewayRouteTableVpcAssociationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteLocalGatewayRouteTableVpcAssociationInput) SetDryRun(v bool) *DeleteLocalGatewayRouteTableVpcAssociationInput { + s.DryRun = &v + return s +} + +// SetLocalGatewayRouteTableVpcAssociationId sets the LocalGatewayRouteTableVpcAssociationId field's value. +func (s *DeleteLocalGatewayRouteTableVpcAssociationInput) SetLocalGatewayRouteTableVpcAssociationId(v string) *DeleteLocalGatewayRouteTableVpcAssociationInput { + s.LocalGatewayRouteTableVpcAssociationId = &v + return s +} + +type DeleteLocalGatewayRouteTableVpcAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + LocalGatewayRouteTableVpcAssociation *LocalGatewayRouteTableVpcAssociation `locationName:"localGatewayRouteTableVpcAssociation" type:"structure"` +} + +// String returns the string representation +func (s DeleteLocalGatewayRouteTableVpcAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLocalGatewayRouteTableVpcAssociationOutput) GoString() string { + return s.String() +} + +// SetLocalGatewayRouteTableVpcAssociation sets the LocalGatewayRouteTableVpcAssociation field's value. +func (s *DeleteLocalGatewayRouteTableVpcAssociationOutput) SetLocalGatewayRouteTableVpcAssociation(v *LocalGatewayRouteTableVpcAssociation) *DeleteLocalGatewayRouteTableVpcAssociationOutput { + s.LocalGatewayRouteTableVpcAssociation = v + return s +} + +type DeleteManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteManagedPrefixListInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteManagedPrefixListInput) SetDryRun(v bool) *DeleteManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *DeleteManagedPrefixListInput) SetPrefixListId(v string) *DeleteManagedPrefixListInput { + s.PrefixListId = &v + return s +} + +type DeleteManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s DeleteManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *DeleteManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *DeleteManagedPrefixListOutput { + s.PrefixList = v + return s +} + +type DeleteNatGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the NAT gateway. + // + // NatGatewayId is a required field + NatGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNatGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNatGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNatGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNatGatewayInput"} + if s.NatGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("NatGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteNatGatewayInput) SetDryRun(v bool) *DeleteNatGatewayInput { + s.DryRun = &v + return s +} + +// SetNatGatewayId sets the NatGatewayId field's value. +func (s *DeleteNatGatewayInput) SetNatGatewayId(v string) *DeleteNatGatewayInput { + s.NatGatewayId = &v + return s +} + +type DeleteNatGatewayOutput struct { + _ struct{} `type:"structure"` + + // The ID of the NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` +} + +// String returns the string representation +func (s DeleteNatGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNatGatewayOutput) GoString() string { + return s.String() +} + +// SetNatGatewayId sets the NatGatewayId field's value. +func (s *DeleteNatGatewayOutput) SetNatGatewayId(v string) *DeleteNatGatewayOutput { + s.NatGatewayId = &v + return s +} + +type DeleteNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the rule is an egress rule. + // + // Egress is a required field + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // The ID of the network ACL. + // + // NetworkAclId is a required field + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // The rule number of the entry to delete. + // + // RuleNumber is a required field + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkAclEntryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkAclEntryInput"} + if s.Egress == nil { + invalidParams.Add(request.NewErrParamRequired("Egress")) + } + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + if s.RuleNumber == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteNetworkAclEntryInput) SetDryRun(v bool) *DeleteNetworkAclEntryInput { + s.DryRun = &v + return s +} + +// SetEgress sets the Egress field's value. +func (s *DeleteNetworkAclEntryInput) SetEgress(v bool) *DeleteNetworkAclEntryInput { + s.Egress = &v + return s +} + +// SetNetworkAclId sets the NetworkAclId field's value. +func (s *DeleteNetworkAclEntryInput) SetNetworkAclId(v string) *DeleteNetworkAclEntryInput { + s.NetworkAclId = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *DeleteNetworkAclEntryInput) SetRuleNumber(v int64) *DeleteNetworkAclEntryInput { + s.RuleNumber = &v + return s +} + +type DeleteNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type DeleteNetworkAclInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network ACL. + // + // NetworkAclId is a required field + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkAclInput"} + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteNetworkAclInput) SetDryRun(v bool) *DeleteNetworkAclInput { + s.DryRun = &v + return s +} + +// SetNetworkAclId sets the NetworkAclId field's value. +func (s *DeleteNetworkAclInput) SetNetworkAclId(v string) *DeleteNetworkAclInput { + s.NetworkAclId = &v + return s +} + +type DeleteNetworkAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkAclOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteNetworkInterface. +type DeleteNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkInterfaceInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteNetworkInterfaceInput) SetDryRun(v bool) *DeleteNetworkInterfaceInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *DeleteNetworkInterfaceInput) SetNetworkInterfaceId(v string) *DeleteNetworkInterfaceInput { + s.NetworkInterfaceId = &v + return s +} + +type DeleteNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfaceOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteNetworkInterfacePermission. +type DeleteNetworkInterfacePermissionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Specify true to remove the permission even if the network interface is attached + // to an instance. + Force *bool `type:"boolean"` + + // The ID of the network interface permission. + // + // NetworkInterfacePermissionId is a required field + NetworkInterfacePermissionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkInterfacePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfacePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkInterfacePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkInterfacePermissionInput"} + if s.NetworkInterfacePermissionId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfacePermissionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetDryRun(v bool) *DeleteNetworkInterfacePermissionInput { + s.DryRun = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetForce(v bool) *DeleteNetworkInterfacePermissionInput { + s.Force = &v + return s +} + +// SetNetworkInterfacePermissionId sets the NetworkInterfacePermissionId field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetNetworkInterfacePermissionId(v string) *DeleteNetworkInterfacePermissionInput { + s.NetworkInterfacePermissionId = &v + return s +} + +// Contains the output for DeleteNetworkInterfacePermission. +type DeleteNetworkInterfacePermissionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds, otherwise returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteNetworkInterfacePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfacePermissionOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DeleteNetworkInterfacePermissionOutput) SetReturn(v bool) *DeleteNetworkInterfacePermissionOutput { + s.Return = &v + return s +} + +type DeletePlacementGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The name of the placement group. + // + // GroupName is a required field + GroupName *string `locationName:"groupName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePlacementGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePlacementGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePlacementGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeletePlacementGroupInput) SetDryRun(v bool) *DeletePlacementGroupInput { + s.DryRun = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *DeletePlacementGroupInput) SetGroupName(v string) *DeletePlacementGroupInput { + s.GroupName = &v + return s +} + +type DeletePlacementGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePlacementGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePlacementGroupOutput) GoString() string { + return s.String() +} + +// Describes the error for a Reserved Instance whose queued purchase could not +// be deleted. +type DeleteQueuedReservedInstancesError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" enum:"DeleteQueuedReservedInstancesErrorCode"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DeleteQueuedReservedInstancesError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueuedReservedInstancesError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *DeleteQueuedReservedInstancesError) SetCode(v string) *DeleteQueuedReservedInstancesError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DeleteQueuedReservedInstancesError) SetMessage(v string) *DeleteQueuedReservedInstancesError { + s.Message = &v + return s +} + +type DeleteQueuedReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the Reserved Instances. + // + // ReservedInstancesIds is a required field + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"item" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteQueuedReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueuedReservedInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteQueuedReservedInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteQueuedReservedInstancesInput"} + if s.ReservedInstancesIds == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesIds")) + } + if s.ReservedInstancesIds != nil && len(s.ReservedInstancesIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReservedInstancesIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteQueuedReservedInstancesInput) SetDryRun(v bool) *DeleteQueuedReservedInstancesInput { + s.DryRun = &v + return s +} + +// SetReservedInstancesIds sets the ReservedInstancesIds field's value. +func (s *DeleteQueuedReservedInstancesInput) SetReservedInstancesIds(v []*string) *DeleteQueuedReservedInstancesInput { + s.ReservedInstancesIds = v + return s +} + +type DeleteQueuedReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the queued purchases that could not be deleted. + FailedQueuedPurchaseDeletions []*FailedQueuedPurchaseDeletion `locationName:"failedQueuedPurchaseDeletionSet" locationNameList:"item" type:"list"` + + // Information about the queued purchases that were successfully deleted. + SuccessfulQueuedPurchaseDeletions []*SuccessfulQueuedPurchaseDeletion `locationName:"successfulQueuedPurchaseDeletionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteQueuedReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueuedReservedInstancesOutput) GoString() string { + return s.String() +} + +// SetFailedQueuedPurchaseDeletions sets the FailedQueuedPurchaseDeletions field's value. +func (s *DeleteQueuedReservedInstancesOutput) SetFailedQueuedPurchaseDeletions(v []*FailedQueuedPurchaseDeletion) *DeleteQueuedReservedInstancesOutput { + s.FailedQueuedPurchaseDeletions = v + return s +} + +// SetSuccessfulQueuedPurchaseDeletions sets the SuccessfulQueuedPurchaseDeletions field's value. +func (s *DeleteQueuedReservedInstancesOutput) SetSuccessfulQueuedPurchaseDeletions(v []*SuccessfulQueuedPurchaseDeletion) *DeleteQueuedReservedInstancesOutput { + s.SuccessfulQueuedPurchaseDeletions = v + return s +} + +type DeleteRouteInput struct { + _ struct{} `type:"structure"` + + // The IPv4 CIDR range for the route. The value you specify must match the CIDR + // for the route exactly. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The IPv6 CIDR range for the route. The value you specify must match the CIDR + // for the route exactly. + DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + + // The ID of the prefix list for the route. + DestinationPrefixListId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + // + // RouteTableId is a required field + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRouteInput"} + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *DeleteRouteInput) SetDestinationCidrBlock(v string) *DeleteRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationIpv6CidrBlock sets the DestinationIpv6CidrBlock field's value. +func (s *DeleteRouteInput) SetDestinationIpv6CidrBlock(v string) *DeleteRouteInput { + s.DestinationIpv6CidrBlock = &v + return s +} + +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *DeleteRouteInput) SetDestinationPrefixListId(v string) *DeleteRouteInput { + s.DestinationPrefixListId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteRouteInput) SetDryRun(v bool) *DeleteRouteInput { + s.DryRun = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *DeleteRouteInput) SetRouteTableId(v string) *DeleteRouteInput { + s.RouteTableId = &v + return s +} + +type DeleteRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteOutput) GoString() string { + return s.String() +} + +type DeleteRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the route table. + // + // RouteTableId is a required field + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRouteTableInput"} + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteRouteTableInput) SetDryRun(v bool) *DeleteRouteTableInput { + s.DryRun = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *DeleteRouteTableInput) SetRouteTableId(v string) *DeleteRouteTableInput { + s.RouteTableId = &v + return s +} + +type DeleteRouteTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRouteTableOutput) GoString() string { + return s.String() +} + +type DeleteSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the security group. Required for a nondefault VPC. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You can specify + // either the security group name or the security group ID. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s DeleteSecurityGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteSecurityGroupInput) SetDryRun(v bool) *DeleteSecurityGroupInput { + s.DryRun = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *DeleteSecurityGroupInput) SetGroupId(v string) *DeleteSecurityGroupInput { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *DeleteSecurityGroupInput) SetGroupName(v string) *DeleteSecurityGroupInput { + s.GroupName = &v + return s +} + +type DeleteSecurityGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecurityGroupOutput) GoString() string { + return s.String() +} + +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + // + // SnapshotId is a required field + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotInput"} + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteSnapshotInput) SetDryRun(v bool) *DeleteSnapshotInput { + s.DryRun = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *DeleteSnapshotInput) SetSnapshotId(v string) *DeleteSnapshotInput { + s.SnapshotId = &v + return s +} + +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteSpotDatafeedSubscription. +type DeleteSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteSpotDatafeedSubscriptionInput) SetDryRun(v bool) *DeleteSpotDatafeedSubscriptionInput { + s.DryRun = &v + return s +} + +type DeleteSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +type DeleteSubnetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the subnet. + // + // SubnetId is a required field + SubnetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSubnetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSubnetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubnetInput"} + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteSubnetInput) SetDryRun(v bool) *DeleteSubnetInput { + s.DryRun = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *DeleteSubnetInput) SetSubnetId(v string) *DeleteSubnetInput { + s.SubnetId = &v + return s +} + +type DeleteSubnetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubnetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubnetOutput) GoString() string { + return s.String() +} + +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the resources, separated by spaces. + // + // Constraints: Up to 1000 resource IDs. We recommend breaking up this request + // into smaller batches. + // + // Resources is a required field + Resources []*string `locationName:"resourceId" type:"list" required:"true"` + + // The tags to delete. Specify a tag key and an optional tag value to delete + // specific tags. If you specify a tag key without a tag value, we delete any + // tag with this key regardless of its value. If you specify a tag key with + // an empty string as the tag value, we delete the tag only if its value is + // an empty string. + // + // If you omit this parameter, we delete all user-defined tags for the specified + // resources. We do not delete AWS-generated tags (tags that have the aws: prefix). + Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.Resources == nil { + invalidParams.Add(request.NewErrParamRequired("Resources")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTagsInput) SetDryRun(v bool) *DeleteTagsInput { + s.DryRun = &v + return s +} + +// SetResources sets the Resources field's value. +func (s *DeleteTagsInput) SetResources(v []*string) *DeleteTagsInput { + s.Resources = v + return s +} + +// SetTags sets the Tags field's value. +func (s *DeleteTagsInput) SetTags(v []*Tag) *DeleteTagsInput { + s.Tags = v + return s +} + +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DeleteTrafficMirrorFilterInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the Traffic Mirror filter. + // + // TrafficMirrorFilterId is a required field + TrafficMirrorFilterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficMirrorFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficMirrorFilterInput"} + if s.TrafficMirrorFilterId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTrafficMirrorFilterInput) SetDryRun(v bool) *DeleteTrafficMirrorFilterInput { + s.DryRun = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *DeleteTrafficMirrorFilterInput) SetTrafficMirrorFilterId(v string) *DeleteTrafficMirrorFilterInput { + s.TrafficMirrorFilterId = &v + return s +} + +type DeleteTrafficMirrorFilterOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterId *string `locationName:"trafficMirrorFilterId" type:"string"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorFilterOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *DeleteTrafficMirrorFilterOutput) SetTrafficMirrorFilterId(v string) *DeleteTrafficMirrorFilterOutput { + s.TrafficMirrorFilterId = &v + return s +} + +type DeleteTrafficMirrorFilterRuleInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the Traffic Mirror rule. + // + // TrafficMirrorFilterRuleId is a required field + TrafficMirrorFilterRuleId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorFilterRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorFilterRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficMirrorFilterRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficMirrorFilterRuleInput"} + if s.TrafficMirrorFilterRuleId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterRuleId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTrafficMirrorFilterRuleInput) SetDryRun(v bool) *DeleteTrafficMirrorFilterRuleInput { + s.DryRun = &v + return s +} + +// SetTrafficMirrorFilterRuleId sets the TrafficMirrorFilterRuleId field's value. +func (s *DeleteTrafficMirrorFilterRuleInput) SetTrafficMirrorFilterRuleId(v string) *DeleteTrafficMirrorFilterRuleInput { + s.TrafficMirrorFilterRuleId = &v + return s +} + +type DeleteTrafficMirrorFilterRuleOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted Traffic Mirror rule. + TrafficMirrorFilterRuleId *string `locationName:"trafficMirrorFilterRuleId" type:"string"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorFilterRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorFilterRuleOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorFilterRuleId sets the TrafficMirrorFilterRuleId field's value. +func (s *DeleteTrafficMirrorFilterRuleOutput) SetTrafficMirrorFilterRuleId(v string) *DeleteTrafficMirrorFilterRuleOutput { + s.TrafficMirrorFilterRuleId = &v + return s +} + +type DeleteTrafficMirrorSessionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the Traffic Mirror session. + // + // TrafficMirrorSessionId is a required field + TrafficMirrorSessionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficMirrorSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficMirrorSessionInput"} + if s.TrafficMirrorSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorSessionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTrafficMirrorSessionInput) SetDryRun(v bool) *DeleteTrafficMirrorSessionInput { + s.DryRun = &v + return s +} + +// SetTrafficMirrorSessionId sets the TrafficMirrorSessionId field's value. +func (s *DeleteTrafficMirrorSessionInput) SetTrafficMirrorSessionId(v string) *DeleteTrafficMirrorSessionInput { + s.TrafficMirrorSessionId = &v + return s +} + +type DeleteTrafficMirrorSessionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted Traffic Mirror session. + TrafficMirrorSessionId *string `locationName:"trafficMirrorSessionId" type:"string"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorSessionOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorSessionId sets the TrafficMirrorSessionId field's value. +func (s *DeleteTrafficMirrorSessionOutput) SetTrafficMirrorSessionId(v string) *DeleteTrafficMirrorSessionOutput { + s.TrafficMirrorSessionId = &v + return s +} + +type DeleteTrafficMirrorTargetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the Traffic Mirror target. + // + // TrafficMirrorTargetId is a required field + TrafficMirrorTargetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTrafficMirrorTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTrafficMirrorTargetInput"} + if s.TrafficMirrorTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorTargetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTrafficMirrorTargetInput) SetDryRun(v bool) *DeleteTrafficMirrorTargetInput { + s.DryRun = &v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *DeleteTrafficMirrorTargetInput) SetTrafficMirrorTargetId(v string) *DeleteTrafficMirrorTargetInput { + s.TrafficMirrorTargetId = &v + return s +} + +type DeleteTrafficMirrorTargetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted Traffic Mirror target. + TrafficMirrorTargetId *string `locationName:"trafficMirrorTargetId" type:"string"` +} + +// String returns the string representation +func (s DeleteTrafficMirrorTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTrafficMirrorTargetOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *DeleteTrafficMirrorTargetOutput) SetTrafficMirrorTargetId(v string) *DeleteTrafficMirrorTargetOutput { + s.TrafficMirrorTargetId = &v + return s +} + +type DeleteTransitGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the transit gateway. + // + // TransitGatewayId is a required field + TransitGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayInput"} + if s.TransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayInput) SetDryRun(v bool) *DeleteTransitGatewayInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *DeleteTransitGatewayInput) SetTransitGatewayId(v string) *DeleteTransitGatewayInput { + s.TransitGatewayId = &v + return s +} + +type DeleteTransitGatewayMulticastDomainInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the transit gateway multicast domain. + // + // TransitGatewayMulticastDomainId is a required field + TransitGatewayMulticastDomainId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayMulticastDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayMulticastDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayMulticastDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayMulticastDomainInput"} + if s.TransitGatewayMulticastDomainId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayMulticastDomainId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayMulticastDomainInput) SetDryRun(v bool) *DeleteTransitGatewayMulticastDomainInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *DeleteTransitGatewayMulticastDomainInput) SetTransitGatewayMulticastDomainId(v string) *DeleteTransitGatewayMulticastDomainInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type DeleteTransitGatewayMulticastDomainOutput struct { + _ struct{} `type:"structure"` + + // Information about the deleted transit gateway multicast domain. + TransitGatewayMulticastDomain *TransitGatewayMulticastDomain `locationName:"transitGatewayMulticastDomain" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayMulticastDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayMulticastDomainOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayMulticastDomain sets the TransitGatewayMulticastDomain field's value. +func (s *DeleteTransitGatewayMulticastDomainOutput) SetTransitGatewayMulticastDomain(v *TransitGatewayMulticastDomain) *DeleteTransitGatewayMulticastDomainOutput { + s.TransitGatewayMulticastDomain = v + return s +} + +type DeleteTransitGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the deleted transit gateway. + TransitGateway *TransitGateway `locationName:"transitGateway" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayOutput) GoString() string { + return s.String() +} + +// SetTransitGateway sets the TransitGateway field's value. +func (s *DeleteTransitGatewayOutput) SetTransitGateway(v *TransitGateway) *DeleteTransitGatewayOutput { + s.TransitGateway = v + return s +} + +type DeleteTransitGatewayPeeringAttachmentInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the transit gateway peering attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayPeeringAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayPeeringAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayPeeringAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayPeeringAttachmentInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayPeeringAttachmentInput) SetDryRun(v bool) *DeleteTransitGatewayPeeringAttachmentInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *DeleteTransitGatewayPeeringAttachmentInput) SetTransitGatewayAttachmentId(v string) *DeleteTransitGatewayPeeringAttachmentInput { + s.TransitGatewayAttachmentId = &v + return s +} + +type DeleteTransitGatewayPeeringAttachmentOutput struct { + _ struct{} `type:"structure"` + + // The transit gateway peering attachment. + TransitGatewayPeeringAttachment *TransitGatewayPeeringAttachment `locationName:"transitGatewayPeeringAttachment" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayPeeringAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayPeeringAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPeeringAttachment sets the TransitGatewayPeeringAttachment field's value. +func (s *DeleteTransitGatewayPeeringAttachmentOutput) SetTransitGatewayPeeringAttachment(v *TransitGatewayPeeringAttachment) *DeleteTransitGatewayPeeringAttachmentOutput { + s.TransitGatewayPeeringAttachment = v + return s +} + +type DeleteTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *DeleteTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *DeleteTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *DeleteTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type DeleteTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the deleted prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *DeleteTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *DeleteTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + +type DeleteTransitGatewayRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR range for the route. This must match the CIDR for the route exactly. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *DeleteTransitGatewayRouteInput) SetDestinationCidrBlock(v string) *DeleteTransitGatewayRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayRouteInput) SetDryRun(v bool) *DeleteTransitGatewayRouteInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *DeleteTransitGatewayRouteInput) SetTransitGatewayRouteTableId(v string) *DeleteTransitGatewayRouteInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type DeleteTransitGatewayRouteOutput struct { + _ struct{} `type:"structure"` + + // Information about the route. + Route *TransitGatewayRoute `locationName:"route" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetRoute sets the Route field's value. +func (s *DeleteTransitGatewayRouteOutput) SetRoute(v *TransitGatewayRoute) *DeleteTransitGatewayRouteOutput { + s.Route = v + return s +} + +type DeleteTransitGatewayRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayRouteTableInput"} + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayRouteTableInput) SetDryRun(v bool) *DeleteTransitGatewayRouteTableInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *DeleteTransitGatewayRouteTableInput) SetTransitGatewayRouteTableId(v string) *DeleteTransitGatewayRouteTableInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type DeleteTransitGatewayRouteTableOutput struct { + _ struct{} `type:"structure"` + + // Information about the deleted transit gateway route table. + TransitGatewayRouteTable *TransitGatewayRouteTable `locationName:"transitGatewayRouteTable" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayRouteTableOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayRouteTable sets the TransitGatewayRouteTable field's value. +func (s *DeleteTransitGatewayRouteTableOutput) SetTransitGatewayRouteTable(v *TransitGatewayRouteTable) *DeleteTransitGatewayRouteTableOutput { + s.TransitGatewayRouteTable = v + return s +} + +type DeleteTransitGatewayVpcAttachmentInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayVpcAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayVpcAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayVpcAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayVpcAttachmentInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayVpcAttachmentInput) SetDryRun(v bool) *DeleteTransitGatewayVpcAttachmentInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *DeleteTransitGatewayVpcAttachmentInput) SetTransitGatewayAttachmentId(v string) *DeleteTransitGatewayVpcAttachmentInput { + s.TransitGatewayAttachmentId = &v + return s +} + +type DeleteTransitGatewayVpcAttachmentOutput struct { + _ struct{} `type:"structure"` + + // Information about the deleted VPC attachment. + TransitGatewayVpcAttachment *TransitGatewayVpcAttachment `locationName:"transitGatewayVpcAttachment" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayVpcAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayVpcAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayVpcAttachment sets the TransitGatewayVpcAttachment field's value. +func (s *DeleteTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment(v *TransitGatewayVpcAttachment) *DeleteTransitGatewayVpcAttachmentOutput { + s.TransitGatewayVpcAttachment = v + return s +} + +type DeleteVolumeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + // + // VolumeId is a required field + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteVolumeInput) SetDryRun(v bool) *DeleteVolumeInput { + s.DryRun = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *DeleteVolumeInput) SetVolumeId(v string) *DeleteVolumeInput { + s.VolumeId = &v + return s +} + +type DeleteVolumeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVolumeOutput) GoString() string { + return s.String() +} + +type DeleteVpcEndpointConnectionNotificationsInput struct { + _ struct{} `type:"structure"` + + // One or more notification IDs. + // + // ConnectionNotificationIds is a required field + ConnectionNotificationIds []*string `locationName:"ConnectionNotificationId" locationNameList:"item" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteVpcEndpointConnectionNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointConnectionNotificationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcEndpointConnectionNotificationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcEndpointConnectionNotificationsInput"} + if s.ConnectionNotificationIds == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionNotificationIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionNotificationIds sets the ConnectionNotificationIds field's value. +func (s *DeleteVpcEndpointConnectionNotificationsInput) SetConnectionNotificationIds(v []*string) *DeleteVpcEndpointConnectionNotificationsInput { + s.ConnectionNotificationIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteVpcEndpointConnectionNotificationsInput) SetDryRun(v bool) *DeleteVpcEndpointConnectionNotificationsInput { + s.DryRun = &v + return s +} + +type DeleteVpcEndpointConnectionNotificationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the notifications that could not be deleted successfully. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteVpcEndpointConnectionNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointConnectionNotificationsOutput) GoString() string { + return s.String() +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *DeleteVpcEndpointConnectionNotificationsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteVpcEndpointConnectionNotificationsOutput { + s.Unsuccessful = v + return s +} + +type DeleteVpcEndpointServiceConfigurationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of one or more services. + // + // ServiceIds is a required field + ServiceIds []*string `locationName:"ServiceId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcEndpointServiceConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointServiceConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcEndpointServiceConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcEndpointServiceConfigurationsInput"} + if s.ServiceIds == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteVpcEndpointServiceConfigurationsInput) SetDryRun(v bool) *DeleteVpcEndpointServiceConfigurationsInput { + s.DryRun = &v + return s +} + +// SetServiceIds sets the ServiceIds field's value. +func (s *DeleteVpcEndpointServiceConfigurationsInput) SetServiceIds(v []*string) *DeleteVpcEndpointServiceConfigurationsInput { + s.ServiceIds = v + return s +} + +type DeleteVpcEndpointServiceConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the service configurations that were not deleted, if applicable. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteVpcEndpointServiceConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointServiceConfigurationsOutput) GoString() string { + return s.String() +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *DeleteVpcEndpointServiceConfigurationsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteVpcEndpointServiceConfigurationsOutput { + s.Unsuccessful = v + return s +} + +// Contains the parameters for DeleteVpcEndpoints. +type DeleteVpcEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more VPC endpoint IDs. + // + // VpcEndpointIds is a required field + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcEndpointsInput"} + if s.VpcEndpointIds == nil { + invalidParams.Add(request.NewErrParamRequired("VpcEndpointIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteVpcEndpointsInput) SetDryRun(v bool) *DeleteVpcEndpointsInput { + s.DryRun = &v + return s +} + +// SetVpcEndpointIds sets the VpcEndpointIds field's value. +func (s *DeleteVpcEndpointsInput) SetVpcEndpointIds(v []*string) *DeleteVpcEndpointsInput { + s.VpcEndpointIds = v + return s +} + +// Contains the output of DeleteVpcEndpoints. +type DeleteVpcEndpointsOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC endpoints that were not successfully deleted. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeleteVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcEndpointsOutput) GoString() string { + return s.String() +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *DeleteVpcEndpointsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteVpcEndpointsOutput { + s.Unsuccessful = v + return s +} + +type DeleteVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteVpcInput) SetDryRun(v bool) *DeleteVpcInput { + s.DryRun = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DeleteVpcInput) SetVpcId(v string) *DeleteVpcInput { + s.VpcId = &v + return s +} + +type DeleteVpcOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcOutput) GoString() string { + return s.String() +} + +type DeleteVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + // + // VpcPeeringConnectionId is a required field + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpcPeeringConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpcPeeringConnectionInput"} + if s.VpcPeeringConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteVpcPeeringConnectionInput) SetDryRun(v bool) *DeleteVpcPeeringConnectionInput { + s.DryRun = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *DeleteVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *DeleteVpcPeeringConnectionInput { + s.VpcPeeringConnectionId = &v + return s +} + +type DeleteVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DeleteVpcPeeringConnectionOutput) SetReturn(v bool) *DeleteVpcPeeringConnectionOutput { + s.Return = &v + return s +} + +// Contains the parameters for DeleteVpnConnection. +type DeleteVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpnConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpnConnectionInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteVpnConnectionInput) SetDryRun(v bool) *DeleteVpnConnectionInput { + s.DryRun = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *DeleteVpnConnectionInput) SetVpnConnectionId(v string) *DeleteVpnConnectionInput { + s.VpnConnectionId = &v + return s +} + +type DeleteVpnConnectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVpnConnectionRoute. +type DeleteVpnConnectionRouteInput struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer network. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // The ID of the VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpnConnectionRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpnConnectionRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *DeleteVpnConnectionRouteInput) SetDestinationCidrBlock(v string) *DeleteVpnConnectionRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *DeleteVpnConnectionRouteInput) SetVpnConnectionId(v string) *DeleteVpnConnectionRouteInput { + s.VpnConnectionId = &v + return s +} + +type DeleteVpnConnectionRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnConnectionRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnConnectionRouteOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DeleteVpnGateway. +type DeleteVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the virtual private gateway. + // + // VpnGatewayId is a required field + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVpnGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVpnGatewayInput"} + if s.VpnGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteVpnGatewayInput) SetDryRun(v bool) *DeleteVpnGatewayInput { + s.DryRun = &v + return s +} + +// SetVpnGatewayId sets the VpnGatewayId field's value. +func (s *DeleteVpnGatewayInput) SetVpnGatewayId(v string) *DeleteVpnGatewayInput { + s.VpnGatewayId = &v + return s +} + +type DeleteVpnGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVpnGatewayOutput) GoString() string { + return s.String() +} + +type DeprovisionByoipCidrInput struct { + _ struct{} `type:"structure"` + + // The address range, in CIDR notation. The prefix must be the same prefix that + // you specified when you provisioned the address range. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeprovisionByoipCidrInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprovisionByoipCidrInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeprovisionByoipCidrInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeprovisionByoipCidrInput"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *DeprovisionByoipCidrInput) SetCidr(v string) *DeprovisionByoipCidrInput { + s.Cidr = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeprovisionByoipCidrInput) SetDryRun(v bool) *DeprovisionByoipCidrInput { + s.DryRun = &v + return s +} + +type DeprovisionByoipCidrOutput struct { + _ struct{} `type:"structure"` + + // Information about the address range. + ByoipCidr *ByoipCidr `locationName:"byoipCidr" type:"structure"` +} + +// String returns the string representation +func (s DeprovisionByoipCidrOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeprovisionByoipCidrOutput) GoString() string { + return s.String() +} + +// SetByoipCidr sets the ByoipCidr field's value. +func (s *DeprovisionByoipCidrOutput) SetByoipCidr(v *ByoipCidr) *DeprovisionByoipCidrOutput { + s.ByoipCidr = v + return s +} + +// Contains the parameters for DeregisterImage. +type DeregisterImageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterImageInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeregisterImageInput) SetDryRun(v bool) *DeregisterImageInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *DeregisterImageInput) SetImageId(v string) *DeregisterImageInput { + s.ImageId = &v + return s +} + +type DeregisterImageOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterImageOutput) GoString() string { + return s.String() +} + +type DeregisterInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Information about the tag keys to deregister. + InstanceTagAttribute *DeregisterInstanceTagAttributeRequest `type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DeregisterInstanceEventNotificationAttributesInput) SetDryRun(v bool) *DeregisterInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DeregisterInstanceEventNotificationAttributesInput) SetInstanceTagAttribute(v *DeregisterInstanceTagAttributeRequest) *DeregisterInstanceEventNotificationAttributesInput { + s.InstanceTagAttribute = v + return s +} + +type DeregisterInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // The resulting set of tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DeregisterInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *DeregisterInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + +// Information about the tag keys to deregister for the current Region. You +// can either specify individual tag keys or deregister all tag keys in the +// current Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys +// in the request +type DeregisterInstanceTagAttributeRequest struct { + _ struct{} `type:"structure"` + + // Indicates whether to deregister all tag keys in the current Region. Specify + // false to deregister all tag keys. + IncludeAllTagsOfInstance *bool `type:"boolean"` + + // Information about the tag keys to deregister. + InstanceTagKeys []*string `locationName:"InstanceTagKey" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeregisterInstanceTagAttributeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceTagAttributeRequest) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *DeregisterInstanceTagAttributeRequest) SetIncludeAllTagsOfInstance(v bool) *DeregisterInstanceTagAttributeRequest { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *DeregisterInstanceTagAttributeRequest) SetInstanceTagKeys(v []*string) *DeregisterInstanceTagAttributeRequest { + s.InstanceTagKeys = v + return s +} + +type DeregisterTransitGatewayMulticastGroupMembersInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `type:"string"` + + // The IDs of the group members' network interfaces. + NetworkInterfaceIds []*string `locationNameList:"item" type:"list"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `type:"string"` +} + +// String returns the string representation +func (s DeregisterTransitGatewayMulticastGroupMembersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTransitGatewayMulticastGroupMembersInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DeregisterTransitGatewayMulticastGroupMembersInput) SetDryRun(v bool) *DeregisterTransitGatewayMulticastGroupMembersInput { + s.DryRun = &v + return s +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *DeregisterTransitGatewayMulticastGroupMembersInput) SetGroupIpAddress(v string) *DeregisterTransitGatewayMulticastGroupMembersInput { + s.GroupIpAddress = &v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *DeregisterTransitGatewayMulticastGroupMembersInput) SetNetworkInterfaceIds(v []*string) *DeregisterTransitGatewayMulticastGroupMembersInput { + s.NetworkInterfaceIds = v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *DeregisterTransitGatewayMulticastGroupMembersInput) SetTransitGatewayMulticastDomainId(v string) *DeregisterTransitGatewayMulticastGroupMembersInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type DeregisterTransitGatewayMulticastGroupMembersOutput struct { + _ struct{} `type:"structure"` + + // Information about the deregistered members. + DeregisteredMulticastGroupMembers *TransitGatewayMulticastDeregisteredGroupMembers `locationName:"deregisteredMulticastGroupMembers" type:"structure"` +} + +// String returns the string representation +func (s DeregisterTransitGatewayMulticastGroupMembersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTransitGatewayMulticastGroupMembersOutput) GoString() string { + return s.String() +} + +// SetDeregisteredMulticastGroupMembers sets the DeregisteredMulticastGroupMembers field's value. +func (s *DeregisterTransitGatewayMulticastGroupMembersOutput) SetDeregisteredMulticastGroupMembers(v *TransitGatewayMulticastDeregisteredGroupMembers) *DeregisterTransitGatewayMulticastGroupMembersOutput { + s.DeregisteredMulticastGroupMembers = v + return s +} + +type DeregisterTransitGatewayMulticastGroupSourcesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `type:"string"` + + // The IDs of the group sources' network interfaces. + NetworkInterfaceIds []*string `locationNameList:"item" type:"list"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `type:"string"` +} + +// String returns the string representation +func (s DeregisterTransitGatewayMulticastGroupSourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTransitGatewayMulticastGroupSourcesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DeregisterTransitGatewayMulticastGroupSourcesInput) SetDryRun(v bool) *DeregisterTransitGatewayMulticastGroupSourcesInput { + s.DryRun = &v + return s +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *DeregisterTransitGatewayMulticastGroupSourcesInput) SetGroupIpAddress(v string) *DeregisterTransitGatewayMulticastGroupSourcesInput { + s.GroupIpAddress = &v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *DeregisterTransitGatewayMulticastGroupSourcesInput) SetNetworkInterfaceIds(v []*string) *DeregisterTransitGatewayMulticastGroupSourcesInput { + s.NetworkInterfaceIds = v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *DeregisterTransitGatewayMulticastGroupSourcesInput) SetTransitGatewayMulticastDomainId(v string) *DeregisterTransitGatewayMulticastGroupSourcesInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type DeregisterTransitGatewayMulticastGroupSourcesOutput struct { + _ struct{} `type:"structure"` + + // Information about the deregistered group sources. + DeregisteredMulticastGroupSources *TransitGatewayMulticastDeregisteredGroupSources `locationName:"deregisteredMulticastGroupSources" type:"structure"` +} + +// String returns the string representation +func (s DeregisterTransitGatewayMulticastGroupSourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTransitGatewayMulticastGroupSourcesOutput) GoString() string { + return s.String() +} + +// SetDeregisteredMulticastGroupSources sets the DeregisteredMulticastGroupSources field's value. +func (s *DeregisterTransitGatewayMulticastGroupSourcesOutput) SetDeregisteredMulticastGroupSources(v *TransitGatewayMulticastDeregisteredGroupSources) *DeregisterTransitGatewayMulticastGroupSourcesOutput { + s.DeregisteredMulticastGroupSources = v + return s +} + +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` + + // The account attribute names. + AttributeNames []*string `locationName:"attributeName" locationNameList:"attributeName" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesInput) GoString() string { + return s.String() +} + +// SetAttributeNames sets the AttributeNames field's value. +func (s *DescribeAccountAttributesInput) SetAttributeNames(v []*string) *DescribeAccountAttributesInput { + s.AttributeNames = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeAccountAttributesInput) SetDryRun(v bool) *DescribeAccountAttributesInput { + s.DryRun = &v + return s +} + +type DescribeAccountAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the account attributes. + AccountAttributes []*AccountAttribute `locationName:"accountAttributeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAccountAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountAttributesOutput) GoString() string { + return s.String() +} + +// SetAccountAttributes sets the AccountAttributes field's value. +func (s *DescribeAccountAttributesOutput) SetAccountAttributes(v []*AccountAttribute) *DescribeAccountAttributesOutput { + s.AccountAttributes = v + return s +} + +type DescribeAddressesInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] Information about the allocation IDs. + AllocationIds []*string `locationName:"AllocationId" locationNameList:"AllocationId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // * allocation-id - [EC2-VPC] The allocation ID for the address. + // + // * association-id - [EC2-VPC] The association ID for the address. + // + // * domain - Indicates whether the address is for use in EC2-Classic (standard) + // or in a VPC (vpc). + // + // * instance-id - The ID of the instance the address is associated with, + // if any. + // + // * network-border-group - A unique set of Availability Zones, Local Zones, + // or Wavelength Zones from where AWS advertises IP addresses. + // + // * network-interface-id - [EC2-VPC] The ID of the network interface that + // the address is associated with, if any. + // + // * network-interface-owner-id - The AWS account ID of the owner. + // + // * private-ip-address - [EC2-VPC] The private IP address associated with + // the Elastic IP address. + // + // * public-ip - The Elastic IP address, or the carrier IP address. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Elastic IP addresses. + // + // Default: Describes all your Elastic IP addresses. + PublicIps []*string `locationName:"PublicIp" locationNameList:"PublicIp" type:"list"` +} + +// String returns the string representation +func (s DescribeAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesInput) GoString() string { + return s.String() +} + +// SetAllocationIds sets the AllocationIds field's value. +func (s *DescribeAddressesInput) SetAllocationIds(v []*string) *DescribeAddressesInput { + s.AllocationIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeAddressesInput) SetDryRun(v bool) *DescribeAddressesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeAddressesInput) SetFilters(v []*Filter) *DescribeAddressesInput { + s.Filters = v + return s +} + +// SetPublicIps sets the PublicIps field's value. +func (s *DescribeAddressesInput) SetPublicIps(v []*string) *DescribeAddressesInput { + s.PublicIps = v + return s +} + +type DescribeAddressesOutput struct { + _ struct{} `type:"structure"` + + // Information about the Elastic IP addresses. + Addresses []*Address `locationName:"addressesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAddressesOutput) GoString() string { + return s.String() +} + +// SetAddresses sets the Addresses field's value. +func (s *DescribeAddressesOutput) SetAddresses(v []*Address) *DescribeAddressesOutput { + s.Addresses = v + return s +} + +type DescribeAggregateIdFormatInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeAggregateIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAggregateIdFormatInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeAggregateIdFormatInput) SetDryRun(v bool) *DescribeAggregateIdFormatInput { + s.DryRun = &v + return s +} + +type DescribeAggregateIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about each resource's ID format. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` + + // Indicates whether all resource types in the Region are configured to use + // longer IDs. This value is only true if all users are configured to use longer + // IDs for all resources types in the Region. + UseLongIdsAggregated *bool `locationName:"useLongIdsAggregated" type:"boolean"` +} + +// String returns the string representation +func (s DescribeAggregateIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAggregateIdFormatOutput) GoString() string { + return s.String() +} + +// SetStatuses sets the Statuses field's value. +func (s *DescribeAggregateIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeAggregateIdFormatOutput { + s.Statuses = v + return s +} + +// SetUseLongIdsAggregated sets the UseLongIdsAggregated field's value. +func (s *DescribeAggregateIdFormatOutput) SetUseLongIdsAggregated(v bool) *DescribeAggregateIdFormatOutput { + s.UseLongIdsAggregated = &v + return s +} + +type DescribeAvailabilityZonesInput struct { + _ struct{} `type:"structure"` + + // Include all Availability Zones, Local Zones, and Wavelength Zones regardless + // of your opt-in status. + // + // If you do not use this parameter, the results include only the zones for + // the Regions where you have chosen the option to opt in. + AllAvailabilityZones *bool `type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * group-name - For Availability Zones, use the Region name. For Local + // Zones, use the name of the group associated with the Local Zone (for example, + // us-west-2-lax-1) For Wavelength Zones, use the name of the group associated + // with the Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1). + // + // * message - The Zone message. + // + // * opt-in-status - The opt-in status (opted-in, and not-opted-in | opt-in-not-required). + // + // * parent-zoneID - The ID of the zone that handles some of the Local Zone + // and Wavelength Zone control plane operations, such as API calls. + // + // * parent-zoneName - The ID of the zone that handles some of the Local + // Zone and Wavelength Zone control plane operations, such as API calls. + // + // * region-name - The name of the Region for the Zone (for example, us-east-1). + // + // * state - The state of the Availability Zone, the Local Zone, or the Wavelength + // Zone (available | information | impaired | unavailable). + // + // * zone-id - The ID of the Availability Zone (for example, use1-az1), the + // Local Zone (for example, usw2-lax1-az1), or the Wavelength Zone (for example, + // us-east-1-wl1-bos-wlz-1). + // + // * zone-type - The type of zone, for example, local-zone. + // + // * zone-name - The name of the Availability Zone (for example, us-east-1a), + // the Local Zone (for example, us-west-2-lax-1a), or the Wavelength Zone + // (for example, us-east-1-wl1-bos-wlz-1). + // + // * zone-type - The type of zone, for example, local-zone. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the Availability Zones, Local Zones, and Wavelength Zones. + ZoneIds []*string `locationName:"ZoneId" locationNameList:"ZoneId" type:"list"` + + // The names of the Availability Zones, Local Zones, and Wavelength Zones. + ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesInput) GoString() string { + return s.String() +} + +// SetAllAvailabilityZones sets the AllAvailabilityZones field's value. +func (s *DescribeAvailabilityZonesInput) SetAllAvailabilityZones(v bool) *DescribeAvailabilityZonesInput { + s.AllAvailabilityZones = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeAvailabilityZonesInput) SetDryRun(v bool) *DescribeAvailabilityZonesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeAvailabilityZonesInput) SetFilters(v []*Filter) *DescribeAvailabilityZonesInput { + s.Filters = v + return s +} + +// SetZoneIds sets the ZoneIds field's value. +func (s *DescribeAvailabilityZonesInput) SetZoneIds(v []*string) *DescribeAvailabilityZonesInput { + s.ZoneIds = v + return s +} + +// SetZoneNames sets the ZoneNames field's value. +func (s *DescribeAvailabilityZonesInput) SetZoneNames(v []*string) *DescribeAvailabilityZonesInput { + s.ZoneNames = v + return s +} + +type DescribeAvailabilityZonesOutput struct { + _ struct{} `type:"structure"` + + // Information about the Availability Zones, Local Zones, and Wavelength Zones. + AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeAvailabilityZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailabilityZonesOutput) GoString() string { + return s.String() +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *DescribeAvailabilityZonesOutput) SetAvailabilityZones(v []*AvailabilityZone) *DescribeAvailabilityZonesOutput { + s.AvailabilityZones = v + return s +} + +type DescribeBundleTasksInput struct { + _ struct{} `type:"structure"` + + // The bundle task IDs. + // + // Default: Describes all your bundle tasks. + BundleIds []*string `locationName:"BundleId" locationNameList:"BundleId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * bundle-id - The ID of the bundle task. + // + // * error-code - If the task failed, the error code returned. + // + // * error-message - If the task failed, the error message returned. + // + // * instance-id - The ID of the instance. + // + // * progress - The level of task completion, as a percentage (for example, + // 20%). + // + // * s3-bucket - The Amazon S3 bucket to store the AMI. + // + // * s3-prefix - The beginning of the AMI name. + // + // * start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z). + // + // * state - The state of the task (pending | waiting-for-shutdown | bundling + // | storing | cancelling | complete | failed). + // + // * update-time - The time of the most recent update for the task. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeBundleTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksInput) GoString() string { + return s.String() +} + +// SetBundleIds sets the BundleIds field's value. +func (s *DescribeBundleTasksInput) SetBundleIds(v []*string) *DescribeBundleTasksInput { + s.BundleIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeBundleTasksInput) SetDryRun(v bool) *DescribeBundleTasksInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeBundleTasksInput) SetFilters(v []*Filter) *DescribeBundleTasksInput { + s.Filters = v + return s +} + +type DescribeBundleTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the bundle tasks. + BundleTasks []*BundleTask `locationName:"bundleInstanceTasksSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeBundleTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBundleTasksOutput) GoString() string { + return s.String() +} + +// SetBundleTasks sets the BundleTasks field's value. +func (s *DescribeBundleTasksOutput) SetBundleTasks(v []*BundleTask) *DescribeBundleTasksOutput { + s.BundleTasks = v + return s +} + +type DescribeByoipCidrsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + // + // MaxResults is a required field + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeByoipCidrsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeByoipCidrsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeByoipCidrsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeByoipCidrsInput"} + if s.MaxResults == nil { + invalidParams.Add(request.NewErrParamRequired("MaxResults")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeByoipCidrsInput) SetDryRun(v bool) *DescribeByoipCidrsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeByoipCidrsInput) SetMaxResults(v int64) *DescribeByoipCidrsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeByoipCidrsInput) SetNextToken(v string) *DescribeByoipCidrsInput { + s.NextToken = &v + return s +} + +type DescribeByoipCidrsOutput struct { + _ struct{} `type:"structure"` + + // Information about your address ranges. + ByoipCidrs []*ByoipCidr `locationName:"byoipCidrSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeByoipCidrsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeByoipCidrsOutput) GoString() string { + return s.String() +} + +// SetByoipCidrs sets the ByoipCidrs field's value. +func (s *DescribeByoipCidrsOutput) SetByoipCidrs(v []*ByoipCidr) *DescribeByoipCidrsOutput { + s.ByoipCidrs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeByoipCidrsOutput) SetNextToken(v string) *DescribeByoipCidrsOutput { + s.NextToken = &v + return s +} + +type DescribeCapacityReservationsInput struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation. + CapacityReservationIds []*string `locationName:"CapacityReservationId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * instance-type - The type of instance for which the Capacity Reservation + // reserves capacity. + // + // * owner-id - The ID of the AWS account that owns the Capacity Reservation. + // + // * availability-zone-id - The Availability Zone ID of the Capacity Reservation. + // + // * instance-platform - The type of operating system for which the Capacity + // Reservation reserves capacity. + // + // * availability-zone - The Availability Zone ID of the Capacity Reservation. + // + // * tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity + // Reservation can have one of the following tenancy settings: default - + // The Capacity Reservation is created on hardware that is shared with other + // AWS accounts. dedicated - The Capacity Reservation is created on single-tenant + // hardware that is dedicated to a single AWS account. + // + // * state - The current state of the Capacity Reservation. A Capacity Reservation + // can be in one of the following states: active- The Capacity Reservation + // is active and the capacity is available for your use. expired - The Capacity + // Reservation expired automatically at the date and time specified in your + // request. The reserved capacity is no longer available for your use. cancelled + // - The Capacity Reservation was manually cancelled. The reserved capacity + // is no longer available for your use. pending - The Capacity Reservation + // request was successful but the capacity provisioning is still pending. + // failed - The Capacity Reservation request has failed. A request might + // fail due to invalid request parameters, capacity constraints, or instance + // limit constraints. Failed requests are retained for 60 minutes. + // + // * end-date - The date and time at which the Capacity Reservation expires. + // When a Capacity Reservation expires, the reserved capacity is released + // and you can no longer launch instances into it. The Capacity Reservation's + // state changes to expired when it reaches its end date and time. + // + // * end-date-type - Indicates the way in which the Capacity Reservation + // ends. A Capacity Reservation can have one of the following end types: + // unlimited - The Capacity Reservation remains active until you explicitly + // cancel it. limited - The Capacity Reservation expires automatically at + // a specified date and time. + // + // * instance-match-criteria - Indicates the type of instance launches that + // the Capacity Reservation accepts. The options include: open - The Capacity + // Reservation accepts all instances that have matching attributes (instance + // type, platform, and Availability Zone). Instances that have matching attributes + // launch into the Capacity Reservation automatically without specifying + // any additional parameters. targeted - The Capacity Reservation only accepts + // instances that have matching attributes (instance type, platform, and + // Availability Zone), and explicitly target the Capacity Reservation. This + // ensures that only permitted instances can use the reserved capacity. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCapacityReservationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCapacityReservationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCapacityReservationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCapacityReservationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationIds sets the CapacityReservationIds field's value. +func (s *DescribeCapacityReservationsInput) SetCapacityReservationIds(v []*string) *DescribeCapacityReservationsInput { + s.CapacityReservationIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeCapacityReservationsInput) SetDryRun(v bool) *DescribeCapacityReservationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCapacityReservationsInput) SetFilters(v []*Filter) *DescribeCapacityReservationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeCapacityReservationsInput) SetMaxResults(v int64) *DescribeCapacityReservationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCapacityReservationsInput) SetNextToken(v string) *DescribeCapacityReservationsInput { + s.NextToken = &v + return s +} + +type DescribeCapacityReservationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Capacity Reservations. + CapacityReservations []*CapacityReservation `locationName:"capacityReservationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCapacityReservationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCapacityReservationsOutput) GoString() string { + return s.String() +} + +// SetCapacityReservations sets the CapacityReservations field's value. +func (s *DescribeCapacityReservationsOutput) SetCapacityReservations(v []*CapacityReservation) *DescribeCapacityReservationsOutput { + s.CapacityReservations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCapacityReservationsOutput) SetNextToken(v string) *DescribeCapacityReservationsOutput { + s.NextToken = &v + return s +} + +type DescribeCarrierGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more carrier gateway IDs. + CarrierGatewayIds []*string `locationName:"CarrierGatewayId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * carrier-gateway-id - The ID of the carrier gateway. + // + // * state - The state of the carrier gateway (pending | failed | available + // | deleting | deleted). + // + // * owner-id - The AWS account ID of the owner of the carrier gateway. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC associated with the carrier gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCarrierGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCarrierGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCarrierGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCarrierGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCarrierGatewayIds sets the CarrierGatewayIds field's value. +func (s *DescribeCarrierGatewaysInput) SetCarrierGatewayIds(v []*string) *DescribeCarrierGatewaysInput { + s.CarrierGatewayIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeCarrierGatewaysInput) SetDryRun(v bool) *DescribeCarrierGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCarrierGatewaysInput) SetFilters(v []*Filter) *DescribeCarrierGatewaysInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeCarrierGatewaysInput) SetMaxResults(v int64) *DescribeCarrierGatewaysInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCarrierGatewaysInput) SetNextToken(v string) *DescribeCarrierGatewaysInput { + s.NextToken = &v + return s +} + +type DescribeCarrierGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateways []*CarrierGateway `locationName:"carrierGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCarrierGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCarrierGatewaysOutput) GoString() string { + return s.String() +} + +// SetCarrierGateways sets the CarrierGateways field's value. +func (s *DescribeCarrierGatewaysOutput) SetCarrierGateways(v []*CarrierGateway) *DescribeCarrierGatewaysOutput { + s.CarrierGateways = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCarrierGatewaysOutput) SetNextToken(v string) *DescribeCarrierGatewaysOutput { + s.NextToken = &v + return s +} + +type DescribeClassicLinkInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * group-id - The ID of a VPC security group that's associated with the + // instance. + // + // * instance-id - The ID of the instance. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC to which the instance is linked. vpc-id - + // The ID of the VPC that the instance is linked to. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + // + // Constraint: If the value is greater than 1000, we return only 1000 items. + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClassicLinkInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClassicLinkInstancesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeClassicLinkInstancesInput) SetDryRun(v bool) *DescribeClassicLinkInstancesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeClassicLinkInstancesInput) SetFilters(v []*Filter) *DescribeClassicLinkInstancesInput { + s.Filters = v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *DescribeClassicLinkInstancesInput) SetInstanceIds(v []*string) *DescribeClassicLinkInstancesInput { + s.InstanceIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeClassicLinkInstancesInput) SetMaxResults(v int64) *DescribeClassicLinkInstancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClassicLinkInstancesInput) SetNextToken(v string) *DescribeClassicLinkInstancesInput { + s.NextToken = &v + return s +} + +type DescribeClassicLinkInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more linked EC2-Classic instances. + Instances []*ClassicLinkInstance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClassicLinkInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClassicLinkInstancesOutput) GoString() string { + return s.String() +} + +// SetInstances sets the Instances field's value. +func (s *DescribeClassicLinkInstancesOutput) SetInstances(v []*ClassicLinkInstance) *DescribeClassicLinkInstancesOutput { + s.Instances = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClassicLinkInstancesOutput) SetNextToken(v string) *DescribeClassicLinkInstancesOutput { + s.NextToken = &v + return s +} + +type DescribeClientVpnAuthorizationRulesInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // * description - The description of the authorization rule. + // + // * destination-cidr - The CIDR of the network to which the authorization + // rule applies. + // + // * group-id - The ID of the Active Directory group to which the authorization + // rule grants access. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the nextToken + // value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnAuthorizationRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnAuthorizationRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClientVpnAuthorizationRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClientVpnAuthorizationRulesInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *DescribeClientVpnAuthorizationRulesInput) SetClientVpnEndpointId(v string) *DescribeClientVpnAuthorizationRulesInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeClientVpnAuthorizationRulesInput) SetDryRun(v bool) *DescribeClientVpnAuthorizationRulesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeClientVpnAuthorizationRulesInput) SetFilters(v []*Filter) *DescribeClientVpnAuthorizationRulesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeClientVpnAuthorizationRulesInput) SetMaxResults(v int64) *DescribeClientVpnAuthorizationRulesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnAuthorizationRulesInput) SetNextToken(v string) *DescribeClientVpnAuthorizationRulesInput { + s.NextToken = &v + return s +} + +type DescribeClientVpnAuthorizationRulesOutput struct { + _ struct{} `type:"structure"` + + // Information about the authorization rules. + AuthorizationRules []*AuthorizationRule `locationName:"authorizationRule" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnAuthorizationRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnAuthorizationRulesOutput) GoString() string { + return s.String() +} + +// SetAuthorizationRules sets the AuthorizationRules field's value. +func (s *DescribeClientVpnAuthorizationRulesOutput) SetAuthorizationRules(v []*AuthorizationRule) *DescribeClientVpnAuthorizationRulesOutput { + s.AuthorizationRules = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnAuthorizationRulesOutput) SetNextToken(v string) *DescribeClientVpnAuthorizationRulesOutput { + s.NextToken = &v + return s +} + +type DescribeClientVpnConnectionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // * connection-id - The ID of the connection. + // + // * username - For Active Directory client authentication, the user name + // of the client who established the client connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the nextToken + // value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnConnectionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClientVpnConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClientVpnConnectionsInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *DescribeClientVpnConnectionsInput) SetClientVpnEndpointId(v string) *DescribeClientVpnConnectionsInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeClientVpnConnectionsInput) SetDryRun(v bool) *DescribeClientVpnConnectionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeClientVpnConnectionsInput) SetFilters(v []*Filter) *DescribeClientVpnConnectionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeClientVpnConnectionsInput) SetMaxResults(v int64) *DescribeClientVpnConnectionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnConnectionsInput) SetNextToken(v string) *DescribeClientVpnConnectionsInput { + s.NextToken = &v + return s +} + +type DescribeClientVpnConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the active and terminated client connections. + Connections []*ClientVpnConnection `locationName:"connections" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnConnectionsOutput) GoString() string { + return s.String() +} + +// SetConnections sets the Connections field's value. +func (s *DescribeClientVpnConnectionsOutput) SetConnections(v []*ClientVpnConnection) *DescribeClientVpnConnectionsOutput { + s.Connections = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnConnectionsOutput) SetNextToken(v string) *DescribeClientVpnConnectionsOutput { + s.NextToken = &v + return s +} + +type DescribeClientVpnEndpointsInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + ClientVpnEndpointIds []*string `locationName:"ClientVpnEndpointId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // * endpoint-id - The ID of the Client VPN endpoint. + // + // * transport-protocol - The transport protocol (tcp | udp). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the nextToken + // value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClientVpnEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClientVpnEndpointsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointIds sets the ClientVpnEndpointIds field's value. +func (s *DescribeClientVpnEndpointsInput) SetClientVpnEndpointIds(v []*string) *DescribeClientVpnEndpointsInput { + s.ClientVpnEndpointIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeClientVpnEndpointsInput) SetDryRun(v bool) *DescribeClientVpnEndpointsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeClientVpnEndpointsInput) SetFilters(v []*Filter) *DescribeClientVpnEndpointsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeClientVpnEndpointsInput) SetMaxResults(v int64) *DescribeClientVpnEndpointsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnEndpointsInput) SetNextToken(v string) *DescribeClientVpnEndpointsInput { + s.NextToken = &v + return s +} + +type DescribeClientVpnEndpointsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Client VPN endpoints. + ClientVpnEndpoints []*ClientVpnEndpoint `locationName:"clientVpnEndpoint" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnEndpointsOutput) GoString() string { + return s.String() +} + +// SetClientVpnEndpoints sets the ClientVpnEndpoints field's value. +func (s *DescribeClientVpnEndpointsOutput) SetClientVpnEndpoints(v []*ClientVpnEndpoint) *DescribeClientVpnEndpointsOutput { + s.ClientVpnEndpoints = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnEndpointsOutput) SetNextToken(v string) *DescribeClientVpnEndpointsOutput { + s.NextToken = &v + return s +} + +type DescribeClientVpnRoutesInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // * destination-cidr - The CIDR of the route destination. + // + // * origin - How the route was associated with the Client VPN endpoint (associate + // | add-route). + // + // * target-subnet - The ID of the subnet through which traffic is routed. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the nextToken + // value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnRoutesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnRoutesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClientVpnRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClientVpnRoutesInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *DescribeClientVpnRoutesInput) SetClientVpnEndpointId(v string) *DescribeClientVpnRoutesInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeClientVpnRoutesInput) SetDryRun(v bool) *DescribeClientVpnRoutesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeClientVpnRoutesInput) SetFilters(v []*Filter) *DescribeClientVpnRoutesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeClientVpnRoutesInput) SetMaxResults(v int64) *DescribeClientVpnRoutesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnRoutesInput) SetNextToken(v string) *DescribeClientVpnRoutesInput { + s.NextToken = &v + return s +} + +type DescribeClientVpnRoutesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the Client VPN endpoint routes. + Routes []*ClientVpnRoute `locationName:"routes" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeClientVpnRoutesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnRoutesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnRoutesOutput) SetNextToken(v string) *DescribeClientVpnRoutesOutput { + s.NextToken = &v + return s +} + +// SetRoutes sets the Routes field's value. +func (s *DescribeClientVpnRoutesOutput) SetRoutes(v []*ClientVpnRoute) *DescribeClientVpnRoutesOutput { + s.Routes = v + return s +} + +type DescribeClientVpnTargetNetworksInput struct { + _ struct{} `type:"structure"` + + // The IDs of the target network associations. + AssociationIds []*string `locationNameList:"item" type:"list"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // * association-id - The ID of the association. + // + // * target-network-id - The ID of the subnet specified as the target network. + // + // * vpc-id - The ID of the VPC in which the target network is located. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the nextToken + // value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnTargetNetworksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnTargetNetworksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClientVpnTargetNetworksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClientVpnTargetNetworksInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationIds sets the AssociationIds field's value. +func (s *DescribeClientVpnTargetNetworksInput) SetAssociationIds(v []*string) *DescribeClientVpnTargetNetworksInput { + s.AssociationIds = v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *DescribeClientVpnTargetNetworksInput) SetClientVpnEndpointId(v string) *DescribeClientVpnTargetNetworksInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeClientVpnTargetNetworksInput) SetDryRun(v bool) *DescribeClientVpnTargetNetworksInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeClientVpnTargetNetworksInput) SetFilters(v []*Filter) *DescribeClientVpnTargetNetworksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeClientVpnTargetNetworksInput) SetMaxResults(v int64) *DescribeClientVpnTargetNetworksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnTargetNetworksInput) SetNextToken(v string) *DescribeClientVpnTargetNetworksInput { + s.NextToken = &v + return s +} + +type DescribeClientVpnTargetNetworksOutput struct { + _ struct{} `type:"structure"` + + // Information about the associated target networks. + ClientVpnTargetNetworks []*TargetNetwork `locationName:"clientVpnTargetNetworks" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeClientVpnTargetNetworksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClientVpnTargetNetworksOutput) GoString() string { + return s.String() +} + +// SetClientVpnTargetNetworks sets the ClientVpnTargetNetworks field's value. +func (s *DescribeClientVpnTargetNetworksOutput) SetClientVpnTargetNetworks(v []*TargetNetwork) *DescribeClientVpnTargetNetworksOutput { + s.ClientVpnTargetNetworks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeClientVpnTargetNetworksOutput) SetNextToken(v string) *DescribeClientVpnTargetNetworksOutput { + s.NextToken = &v + return s +} + +type DescribeCoipPoolsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. The following are the possible values: + // + // * coip-pool.pool-id + // + // * coip-pool.local-gateway-route-table-id + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The IDs of the address pools. + PoolIds []*string `locationName:"PoolId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeCoipPoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCoipPoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCoipPoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCoipPoolsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeCoipPoolsInput) SetDryRun(v bool) *DescribeCoipPoolsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCoipPoolsInput) SetFilters(v []*Filter) *DescribeCoipPoolsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeCoipPoolsInput) SetMaxResults(v int64) *DescribeCoipPoolsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCoipPoolsInput) SetNextToken(v string) *DescribeCoipPoolsInput { + s.NextToken = &v + return s +} + +// SetPoolIds sets the PoolIds field's value. +func (s *DescribeCoipPoolsInput) SetPoolIds(v []*string) *DescribeCoipPoolsInput { + s.PoolIds = v + return s +} + +type DescribeCoipPoolsOutput struct { + _ struct{} `type:"structure"` + + // Information about the address pools. + CoipPools []*CoipPool `locationName:"coipPoolSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCoipPoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCoipPoolsOutput) GoString() string { + return s.String() +} + +// SetCoipPools sets the CoipPools field's value. +func (s *DescribeCoipPoolsOutput) SetCoipPools(v []*CoipPool) *DescribeCoipPoolsOutput { + s.CoipPools = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCoipPoolsOutput) SetNextToken(v string) *DescribeCoipPoolsOutput { + s.NextToken = &v + return s +} + +type DescribeConversionTasksInput struct { + _ struct{} `type:"structure"` + + // The conversion task IDs. + ConversionTaskIds []*string `locationName:"conversionTaskId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeConversionTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksInput) GoString() string { + return s.String() +} + +// SetConversionTaskIds sets the ConversionTaskIds field's value. +func (s *DescribeConversionTasksInput) SetConversionTaskIds(v []*string) *DescribeConversionTasksInput { + s.ConversionTaskIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeConversionTasksInput) SetDryRun(v bool) *DescribeConversionTasksInput { + s.DryRun = &v + return s +} + +type DescribeConversionTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion tasks. + ConversionTasks []*ConversionTask `locationName:"conversionTasks" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeConversionTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConversionTasksOutput) GoString() string { + return s.String() +} + +// SetConversionTasks sets the ConversionTasks field's value. +func (s *DescribeConversionTasksOutput) SetConversionTasks(v []*ConversionTask) *DescribeConversionTasksOutput { + s.ConversionTasks = v + return s +} + +// Contains the parameters for DescribeCustomerGateways. +type DescribeCustomerGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more customer gateway IDs. + // + // Default: Describes all your customer gateways. + CustomerGatewayIds []*string `locationName:"CustomerGatewayId" locationNameList:"CustomerGatewayId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous + // System Number (ASN). + // + // * customer-gateway-id - The ID of the customer gateway. + // + // * ip-address - The IP address of the customer gateway's Internet-routable + // external interface. + // + // * state - The state of the customer gateway (pending | available | deleting + // | deleted). + // + // * type - The type of customer gateway. Currently, the only supported type + // is ipsec.1. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysInput) GoString() string { + return s.String() +} + +// SetCustomerGatewayIds sets the CustomerGatewayIds field's value. +func (s *DescribeCustomerGatewaysInput) SetCustomerGatewayIds(v []*string) *DescribeCustomerGatewaysInput { + s.CustomerGatewayIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeCustomerGatewaysInput) SetDryRun(v bool) *DescribeCustomerGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCustomerGatewaysInput) SetFilters(v []*Filter) *DescribeCustomerGatewaysInput { + s.Filters = v + return s +} + +// Contains the output of DescribeCustomerGateways. +type DescribeCustomerGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more customer gateways. + CustomerGateways []*CustomerGateway `locationName:"customerGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeCustomerGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomerGatewaysOutput) GoString() string { + return s.String() +} + +// SetCustomerGateways sets the CustomerGateways field's value. +func (s *DescribeCustomerGatewaysOutput) SetCustomerGateways(v []*CustomerGateway) *DescribeCustomerGatewaysOutput { + s.CustomerGateways = v + return s +} + +type DescribeDhcpOptionsInput struct { + _ struct{} `type:"structure"` + + // The IDs of one or more DHCP options sets. + // + // Default: Describes all your DHCP options sets. + DhcpOptionsIds []*string `locationName:"DhcpOptionsId" locationNameList:"DhcpOptionsId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * dhcp-options-id - The ID of a DHCP options set. + // + // * key - The key for one of the options (for example, domain-name). + // + // * value - The value for one of the options. + // + // * owner-id - The ID of the AWS account that owns the DHCP options set. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDhcpOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDhcpOptionsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDhcpOptionsIds sets the DhcpOptionsIds field's value. +func (s *DescribeDhcpOptionsInput) SetDhcpOptionsIds(v []*string) *DescribeDhcpOptionsInput { + s.DhcpOptionsIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeDhcpOptionsInput) SetDryRun(v bool) *DescribeDhcpOptionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeDhcpOptionsInput) SetFilters(v []*Filter) *DescribeDhcpOptionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeDhcpOptionsInput) SetMaxResults(v int64) *DescribeDhcpOptionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDhcpOptionsInput) SetNextToken(v string) *DescribeDhcpOptionsInput { + s.NextToken = &v + return s +} + +type DescribeDhcpOptionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more DHCP options sets. + DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeDhcpOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDhcpOptionsOutput) GoString() string { + return s.String() +} + +// SetDhcpOptions sets the DhcpOptions field's value. +func (s *DescribeDhcpOptionsOutput) SetDhcpOptions(v []*DhcpOptions) *DescribeDhcpOptionsOutput { + s.DhcpOptions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDhcpOptionsOutput) SetNextToken(v string) *DescribeDhcpOptionsOutput { + s.NextToken = &v + return s +} + +type DescribeEgressOnlyInternetGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more egress-only internet gateway IDs. + EgressOnlyInternetGatewayIds []*string `locationName:"EgressOnlyInternetGatewayId" locationNameList:"item" type:"list"` + + // One or more filters. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEgressOnlyInternetGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEgressOnlyInternetGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEgressOnlyInternetGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEgressOnlyInternetGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeEgressOnlyInternetGatewaysInput) SetDryRun(v bool) *DescribeEgressOnlyInternetGatewaysInput { + s.DryRun = &v + return s +} + +// SetEgressOnlyInternetGatewayIds sets the EgressOnlyInternetGatewayIds field's value. +func (s *DescribeEgressOnlyInternetGatewaysInput) SetEgressOnlyInternetGatewayIds(v []*string) *DescribeEgressOnlyInternetGatewaysInput { + s.EgressOnlyInternetGatewayIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeEgressOnlyInternetGatewaysInput) SetFilters(v []*Filter) *DescribeEgressOnlyInternetGatewaysInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeEgressOnlyInternetGatewaysInput) SetMaxResults(v int64) *DescribeEgressOnlyInternetGatewaysInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeEgressOnlyInternetGatewaysInput) SetNextToken(v string) *DescribeEgressOnlyInternetGatewaysInput { + s.NextToken = &v + return s +} + +type DescribeEgressOnlyInternetGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the egress-only internet gateways. + EgressOnlyInternetGateways []*EgressOnlyInternetGateway `locationName:"egressOnlyInternetGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeEgressOnlyInternetGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEgressOnlyInternetGatewaysOutput) GoString() string { + return s.String() +} + +// SetEgressOnlyInternetGateways sets the EgressOnlyInternetGateways field's value. +func (s *DescribeEgressOnlyInternetGatewaysOutput) SetEgressOnlyInternetGateways(v []*EgressOnlyInternetGateway) *DescribeEgressOnlyInternetGatewaysOutput { + s.EgressOnlyInternetGateways = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeEgressOnlyInternetGatewaysOutput) SetNextToken(v string) *DescribeEgressOnlyInternetGatewaysOutput { + s.NextToken = &v + return s +} + +type DescribeElasticGpusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The Elastic Graphics accelerator IDs. + ElasticGpuIds []*string `locationName:"ElasticGpuId" locationNameList:"item" type:"list"` + + // The filters. + // + // * availability-zone - The Availability Zone in which the Elastic Graphics + // accelerator resides. + // + // * elastic-gpu-health - The status of the Elastic Graphics accelerator + // (OK | IMPAIRED). + // + // * elastic-gpu-state - The state of the Elastic Graphics accelerator (ATTACHED). + // + // * elastic-gpu-type - The type of Elastic Graphics accelerator; for example, + // eg1.medium. + // + // * instance-id - The ID of the instance to which the Elastic Graphics accelerator + // is associated. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 5 and 1000. + MaxResults *int64 `min:"10" type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeElasticGpusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticGpusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeElasticGpusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticGpusInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeElasticGpusInput) SetDryRun(v bool) *DescribeElasticGpusInput { + s.DryRun = &v + return s +} + +// SetElasticGpuIds sets the ElasticGpuIds field's value. +func (s *DescribeElasticGpusInput) SetElasticGpuIds(v []*string) *DescribeElasticGpusInput { + s.ElasticGpuIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeElasticGpusInput) SetFilters(v []*Filter) *DescribeElasticGpusInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeElasticGpusInput) SetMaxResults(v int64) *DescribeElasticGpusInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeElasticGpusInput) SetNextToken(v string) *DescribeElasticGpusInput { + s.NextToken = &v + return s +} + +type DescribeElasticGpusOutput struct { + _ struct{} `type:"structure"` + + // Information about the Elastic Graphics accelerators. + ElasticGpuSet []*ElasticGpus `locationName:"elasticGpuSet" locationNameList:"item" type:"list"` + + // The total number of items to return. If the total number of items available + // is more than the value specified in max-items then a Next-Token will be provided + // in the output that you can use to resume pagination. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeElasticGpusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticGpusOutput) GoString() string { + return s.String() +} + +// SetElasticGpuSet sets the ElasticGpuSet field's value. +func (s *DescribeElasticGpusOutput) SetElasticGpuSet(v []*ElasticGpus) *DescribeElasticGpusOutput { + s.ElasticGpuSet = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeElasticGpusOutput) SetMaxResults(v int64) *DescribeElasticGpusOutput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeElasticGpusOutput) SetNextToken(v string) *DescribeElasticGpusOutput { + s.NextToken = &v + return s +} + +type DescribeExportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the export image tasks. + ExportImageTaskIds []*string `locationName:"ExportImageTaskId" locationNameList:"ExportImageTaskId" type:"list"` + + // Filter tasks using the task-state filter and one of the following values: + // active, completed, deleting, or deleted. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `min:"1" type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportImageTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportImageTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeExportImageTasksInput) SetDryRun(v bool) *DescribeExportImageTasksInput { + s.DryRun = &v + return s +} + +// SetExportImageTaskIds sets the ExportImageTaskIds field's value. +func (s *DescribeExportImageTasksInput) SetExportImageTaskIds(v []*string) *DescribeExportImageTasksInput { + s.ExportImageTaskIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeExportImageTasksInput) SetFilters(v []*Filter) *DescribeExportImageTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeExportImageTasksInput) SetMaxResults(v int64) *DescribeExportImageTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksInput) SetNextToken(v string) *DescribeExportImageTasksInput { + s.NextToken = &v + return s +} + +type DescribeExportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export image tasks. + ExportImageTasks []*ExportImageTask `locationName:"exportImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksOutput) GoString() string { + return s.String() +} + +// SetExportImageTasks sets the ExportImageTasks field's value. +func (s *DescribeExportImageTasksOutput) SetExportImageTasks(v []*ExportImageTask) *DescribeExportImageTasksOutput { + s.ExportImageTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksOutput) SetNextToken(v string) *DescribeExportImageTasksOutput { + s.NextToken = &v + return s +} + +type DescribeExportTasksInput struct { + _ struct{} `type:"structure"` + + // The export task IDs. + ExportTaskIds []*string `locationName:"exportTaskId" locationNameList:"ExportTaskId" type:"list"` + + // the filters for the export tasks. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` +} + +// String returns the string representation +func (s DescribeExportTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksInput) GoString() string { + return s.String() +} + +// SetExportTaskIds sets the ExportTaskIds field's value. +func (s *DescribeExportTasksInput) SetExportTaskIds(v []*string) *DescribeExportTasksInput { + s.ExportTaskIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeExportTasksInput) SetFilters(v []*Filter) *DescribeExportTasksInput { + s.Filters = v + return s +} + +type DescribeExportTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export tasks. + ExportTasks []*ExportTask `locationName:"exportTaskSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeExportTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportTasksOutput) GoString() string { + return s.String() +} + +// SetExportTasks sets the ExportTasks field's value. +func (s *DescribeExportTasksOutput) SetExportTasks(v []*ExportTask) *DescribeExportTasksOutput { + s.ExportTasks = v + return s +} + +// Describes fast snapshot restores for a snapshot. +type DescribeFastSnapshotRestoreSuccessItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time at which fast snapshot restores entered the disabled state. + DisabledTime *time.Time `locationName:"disabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the disabling state. + DisablingTime *time.Time `locationName:"disablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabled state. + EnabledTime *time.Time `locationName:"enabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabling state. + EnablingTime *time.Time `locationName:"enablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the optimizing state. + OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` + + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The state of fast snapshot restores. + State *string `locationName:"state" type:"string" enum:"FastSnapshotRestoreStateCode"` + + // The reason for the state transition. The possible values are as follows: + // + // * Client.UserInitiated - The state successfully transitioned to enabling + // or disabling. + // + // * Client.UserInitiated - Lifecycle state transition - The state successfully + // transitioned to optimizing, enabled, or disabled. + StateTransitionReason *string `locationName:"stateTransitionReason" type:"string"` +} + +// String returns the string representation +func (s DescribeFastSnapshotRestoreSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFastSnapshotRestoreSuccessItem) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetAvailabilityZone(v string) *DescribeFastSnapshotRestoreSuccessItem { + s.AvailabilityZone = &v + return s +} + +// SetDisabledTime sets the DisabledTime field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetDisabledTime(v time.Time) *DescribeFastSnapshotRestoreSuccessItem { + s.DisabledTime = &v + return s +} + +// SetDisablingTime sets the DisablingTime field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetDisablingTime(v time.Time) *DescribeFastSnapshotRestoreSuccessItem { + s.DisablingTime = &v + return s +} + +// SetEnabledTime sets the EnabledTime field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetEnabledTime(v time.Time) *DescribeFastSnapshotRestoreSuccessItem { + s.EnabledTime = &v + return s +} + +// SetEnablingTime sets the EnablingTime field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetEnablingTime(v time.Time) *DescribeFastSnapshotRestoreSuccessItem { + s.EnablingTime = &v + return s +} + +// SetOptimizingTime sets the OptimizingTime field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetOptimizingTime(v time.Time) *DescribeFastSnapshotRestoreSuccessItem { + s.OptimizingTime = &v + return s +} + +// SetOwnerAlias sets the OwnerAlias field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetOwnerAlias(v string) *DescribeFastSnapshotRestoreSuccessItem { + s.OwnerAlias = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetOwnerId(v string) *DescribeFastSnapshotRestoreSuccessItem { + s.OwnerId = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetSnapshotId(v string) *DescribeFastSnapshotRestoreSuccessItem { + s.SnapshotId = &v + return s +} + +// SetState sets the State field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetState(v string) *DescribeFastSnapshotRestoreSuccessItem { + s.State = &v + return s +} + +// SetStateTransitionReason sets the StateTransitionReason field's value. +func (s *DescribeFastSnapshotRestoreSuccessItem) SetStateTransitionReason(v string) *DescribeFastSnapshotRestoreSuccessItem { + s.StateTransitionReason = &v + return s +} + +type DescribeFastSnapshotRestoresInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. The possible values are: + // + // * availability-zone: The Availability Zone of the snapshot. + // + // * owner-id: The ID of the AWS account that enabled fast snapshot restore + // on the snapshot. + // + // * snapshot-id: The ID of the snapshot. + // + // * state: The state of fast snapshot restores for the snapshot (enabling + // | optimizing | enabled | disabling | disabled). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFastSnapshotRestoresInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFastSnapshotRestoresInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFastSnapshotRestoresInput) SetDryRun(v bool) *DescribeFastSnapshotRestoresInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeFastSnapshotRestoresInput) SetFilters(v []*Filter) *DescribeFastSnapshotRestoresInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFastSnapshotRestoresInput) SetMaxResults(v int64) *DescribeFastSnapshotRestoresInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFastSnapshotRestoresInput) SetNextToken(v string) *DescribeFastSnapshotRestoresInput { + s.NextToken = &v + return s +} + +type DescribeFastSnapshotRestoresOutput struct { + _ struct{} `type:"structure"` + + // Information about the state of fast snapshot restores. + FastSnapshotRestores []*DescribeFastSnapshotRestoreSuccessItem `locationName:"fastSnapshotRestoreSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFastSnapshotRestoresOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFastSnapshotRestoresOutput) GoString() string { + return s.String() +} + +// SetFastSnapshotRestores sets the FastSnapshotRestores field's value. +func (s *DescribeFastSnapshotRestoresOutput) SetFastSnapshotRestores(v []*DescribeFastSnapshotRestoreSuccessItem) *DescribeFastSnapshotRestoresOutput { + s.FastSnapshotRestores = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFastSnapshotRestoresOutput) SetNextToken(v string) *DescribeFastSnapshotRestoresOutput { + s.NextToken = &v + return s +} + +// Describes the instances that could not be launched by the fleet. +type DescribeFleetError struct { + _ struct{} `type:"structure"` + + // The error code that indicates why the instance could not be launched. For + // more information about error codes, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + ErrorCode *string `locationName:"errorCode" type:"string"` + + // The error message that describes why the instance could not be launched. + // For more information about error messages, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // The launch templates and overrides that were used for launching the instances. + // The values that you specify in the Overrides replace the values in the launch + // template. + LaunchTemplateAndOverrides *LaunchTemplateAndOverridesResponse `locationName:"launchTemplateAndOverrides" type:"structure"` + + // Indicates if the instance that could not be launched was a Spot Instance + // or On-Demand Instance. + Lifecycle *string `locationName:"lifecycle" type:"string" enum:"InstanceLifecycle"` +} + +// String returns the string representation +func (s DescribeFleetError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *DescribeFleetError) SetErrorCode(v string) *DescribeFleetError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *DescribeFleetError) SetErrorMessage(v string) *DescribeFleetError { + s.ErrorMessage = &v + return s +} + +// SetLaunchTemplateAndOverrides sets the LaunchTemplateAndOverrides field's value. +func (s *DescribeFleetError) SetLaunchTemplateAndOverrides(v *LaunchTemplateAndOverridesResponse) *DescribeFleetError { + s.LaunchTemplateAndOverrides = v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *DescribeFleetError) SetLifecycle(v string) *DescribeFleetError { + s.Lifecycle = &v + return s +} + +type DescribeFleetHistoryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The type of events to describe. By default, all events are described. + EventType *string `type:"string" enum:"FleetEventType"` + + // The ID of the EC2 Fleet. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The start date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s DescribeFleetHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetHistoryInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFleetHistoryInput) SetDryRun(v bool) *DescribeFleetHistoryInput { + s.DryRun = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *DescribeFleetHistoryInput) SetEventType(v string) *DescribeFleetHistoryInput { + s.EventType = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetHistoryInput) SetFleetId(v string) *DescribeFleetHistoryInput { + s.FleetId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFleetHistoryInput) SetMaxResults(v int64) *DescribeFleetHistoryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetHistoryInput) SetNextToken(v string) *DescribeFleetHistoryInput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeFleetHistoryInput) SetStartTime(v time.Time) *DescribeFleetHistoryInput { + s.StartTime = &v + return s +} + +type DescribeFleetHistoryOutput struct { + _ struct{} `type:"structure"` + + // The ID of the EC Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // Information about the events in the history of the EC2 Fleet. + HistoryRecords []*HistoryRecordEntry `locationName:"historyRecordSet" locationNameList:"item" type:"list"` + + // The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // All records up to this time were retrieved. + // + // If nextToken indicates that there are more results, this value is not present. + LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The start date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s DescribeFleetHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetHistoryOutput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetHistoryOutput) SetFleetId(v string) *DescribeFleetHistoryOutput { + s.FleetId = &v + return s +} + +// SetHistoryRecords sets the HistoryRecords field's value. +func (s *DescribeFleetHistoryOutput) SetHistoryRecords(v []*HistoryRecordEntry) *DescribeFleetHistoryOutput { + s.HistoryRecords = v + return s +} + +// SetLastEvaluatedTime sets the LastEvaluatedTime field's value. +func (s *DescribeFleetHistoryOutput) SetLastEvaluatedTime(v time.Time) *DescribeFleetHistoryOutput { + s.LastEvaluatedTime = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetHistoryOutput) SetNextToken(v string) *DescribeFleetHistoryOutput { + s.NextToken = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeFleetHistoryOutput) SetStartTime(v time.Time) *DescribeFleetHistoryOutput { + s.StartTime = &v + return s +} + +type DescribeFleetInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + // + // * instance-type - The instance type. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The ID of the EC2 Fleet. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFleetInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFleetInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFleetInstancesInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFleetInstancesInput) SetDryRun(v bool) *DescribeFleetInstancesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeFleetInstancesInput) SetFilters(v []*Filter) *DescribeFleetInstancesInput { + s.Filters = v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetInstancesInput) SetFleetId(v string) *DescribeFleetInstancesInput { + s.FleetId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFleetInstancesInput) SetMaxResults(v int64) *DescribeFleetInstancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetInstancesInput) SetNextToken(v string) *DescribeFleetInstancesInput { + s.NextToken = &v + return s +} + +type DescribeFleetInstancesOutput struct { + _ struct{} `type:"structure"` + + // The running instances. This list is refreshed periodically and might be out + // of date. + ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetInstancesOutput) GoString() string { + return s.String() +} + +// SetActiveInstances sets the ActiveInstances field's value. +func (s *DescribeFleetInstancesOutput) SetActiveInstances(v []*ActiveInstance) *DescribeFleetInstancesOutput { + s.ActiveInstances = v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *DescribeFleetInstancesOutput) SetFleetId(v string) *DescribeFleetInstancesOutput { + s.FleetId = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetInstancesOutput) SetNextToken(v string) *DescribeFleetInstancesOutput { + s.NextToken = &v + return s +} + +type DescribeFleetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + // + // * activity-status - The progress of the EC2 Fleet ( error | pending-fulfillment + // | pending-termination | fulfilled). + // + // * excess-capacity-termination-policy - Indicates whether to terminate + // running instances if the target capacity is decreased below the current + // EC2 Fleet size (true | false). + // + // * fleet-state - The state of the EC2 Fleet (submitted | active | deleted + // | failed | deleted-running | deleted-terminating | modifying). + // + // * replace-unhealthy-instances - Indicates whether EC2 Fleet should replace + // unhealthy instances (true | false). + // + // * type - The type of request (instant | request | maintain). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The ID of the EC2 Fleets. + FleetIds []*string `locationName:"FleetId" type:"list"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFleetsInput) SetDryRun(v bool) *DescribeFleetsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeFleetsInput) SetFilters(v []*Filter) *DescribeFleetsInput { + s.Filters = v + return s +} + +// SetFleetIds sets the FleetIds field's value. +func (s *DescribeFleetsInput) SetFleetIds(v []*string) *DescribeFleetsInput { + s.FleetIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFleetsInput) SetMaxResults(v int64) *DescribeFleetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetsInput) SetNextToken(v string) *DescribeFleetsInput { + s.NextToken = &v + return s +} + +// Describes the instances that were launched by the fleet. +type DescribeFleetsInstances struct { + _ struct{} `type:"structure"` + + // The IDs of the instances. + InstanceIds []*string `locationName:"instanceIds" locationNameList:"item" type:"list"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The launch templates and overrides that were used for launching the instances. + // The values that you specify in the Overrides replace the values in the launch + // template. + LaunchTemplateAndOverrides *LaunchTemplateAndOverridesResponse `locationName:"launchTemplateAndOverrides" type:"structure"` + + // Indicates if the instance that was launched is a Spot Instance or On-Demand + // Instance. + Lifecycle *string `locationName:"lifecycle" type:"string" enum:"InstanceLifecycle"` + + // The value is Windows for Windows instances. Otherwise, the value is blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` +} + +// String returns the string representation +func (s DescribeFleetsInstances) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetsInstances) GoString() string { + return s.String() +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *DescribeFleetsInstances) SetInstanceIds(v []*string) *DescribeFleetsInstances { + s.InstanceIds = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *DescribeFleetsInstances) SetInstanceType(v string) *DescribeFleetsInstances { + s.InstanceType = &v + return s +} + +// SetLaunchTemplateAndOverrides sets the LaunchTemplateAndOverrides field's value. +func (s *DescribeFleetsInstances) SetLaunchTemplateAndOverrides(v *LaunchTemplateAndOverridesResponse) *DescribeFleetsInstances { + s.LaunchTemplateAndOverrides = v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *DescribeFleetsInstances) SetLifecycle(v string) *DescribeFleetsInstances { + s.Lifecycle = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *DescribeFleetsInstances) SetPlatform(v string) *DescribeFleetsInstances { + s.Platform = &v + return s +} + +type DescribeFleetsOutput struct { + _ struct{} `type:"structure"` + + // Information about the EC2 Fleets. + Fleets []*FleetData `locationName:"fleetSet" locationNameList:"item" type:"list"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetsOutput) GoString() string { + return s.String() +} + +// SetFleets sets the Fleets field's value. +func (s *DescribeFleetsOutput) SetFleets(v []*FleetData) *DescribeFleetsOutput { + s.Fleets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetsOutput) SetNextToken(v string) *DescribeFleetsOutput { + s.NextToken = &v + return s +} + +type DescribeFlowLogsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * deliver-log-status - The status of the logs delivery (SUCCESS | FAILED). + // + // * log-destination-type - The type of destination to which the flow log + // publishes data. Possible destination types include cloud-watch-logs and + // s3. + // + // * flow-log-id - The ID of the flow log. + // + // * log-group-name - The name of the log group. + // + // * resource-id - The ID of the VPC, subnet, or network interface. + // + // * traffic-type - The type of traffic (ACCEPT | REJECT | ALL). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // One or more flow log IDs. + // + // Constraint: Maximum of 1000 flow log IDs. + FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFlowLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFlowLogsInput) SetDryRun(v bool) *DescribeFlowLogsInput { + s.DryRun = &v + return s +} + +// SetFilter sets the Filter field's value. +func (s *DescribeFlowLogsInput) SetFilter(v []*Filter) *DescribeFlowLogsInput { + s.Filter = v + return s +} + +// SetFlowLogIds sets the FlowLogIds field's value. +func (s *DescribeFlowLogsInput) SetFlowLogIds(v []*string) *DescribeFlowLogsInput { + s.FlowLogIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFlowLogsInput) SetMaxResults(v int64) *DescribeFlowLogsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFlowLogsInput) SetNextToken(v string) *DescribeFlowLogsInput { + s.NextToken = &v + return s +} + +type DescribeFlowLogsOutput struct { + _ struct{} `type:"structure"` + + // Information about the flow logs. + FlowLogs []*FlowLog `locationName:"flowLogSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFlowLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowLogsOutput) GoString() string { + return s.String() +} + +// SetFlowLogs sets the FlowLogs field's value. +func (s *DescribeFlowLogsOutput) SetFlowLogs(v []*FlowLog) *DescribeFlowLogsOutput { + s.FlowLogs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFlowLogsOutput) SetNextToken(v string) *DescribeFlowLogsOutput { + s.NextToken = &v + return s +} + +type DescribeFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The AFI attribute. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"FpgaImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFpgaImageAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeFpgaImageAttributeInput) SetAttribute(v string) *DescribeFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFpgaImageAttributeInput) SetDryRun(v bool) *DescribeFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *DescribeFpgaImageAttributeInput) SetFpgaImageId(v string) *DescribeFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +type DescribeFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Information about the attribute. + FpgaImageAttribute *FpgaImageAttribute `locationName:"fpgaImageAttribute" type:"structure"` +} + +// String returns the string representation +func (s DescribeFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetFpgaImageAttribute sets the FpgaImageAttribute field's value. +func (s *DescribeFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttribute) *DescribeFpgaImageAttributeOutput { + s.FpgaImageAttribute = v + return s +} + +type DescribeFpgaImagesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + // + // * create-time - The creation time of the AFI. + // + // * fpga-image-id - The FPGA image identifier (AFI ID). + // + // * fpga-image-global-id - The global FPGA image identifier (AGFI ID). + // + // * name - The name of the AFI. + // + // * owner-id - The AWS account ID of the AFI owner. + // + // * product-code - The product code. + // + // * shell-version - The version of the AWS Shell that was used to create + // the bitstream. + // + // * state - The state of the AFI (pending | failed | available | unavailable). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * update-time - The time of the most recent update. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The AFI IDs. + FpgaImageIds []*string `locationName:"FpgaImageId" locationNameList:"item" type:"list"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` + + // Filters the AFI by owner. Specify an AWS account ID, self (owner is the sender + // of the request), or an AWS owner alias (valid values are amazon | aws-marketplace). + Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` +} + +// String returns the string representation +func (s DescribeFpgaImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFpgaImagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFpgaImagesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFpgaImagesInput) SetDryRun(v bool) *DescribeFpgaImagesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeFpgaImagesInput) SetFilters(v []*Filter) *DescribeFpgaImagesInput { + s.Filters = v + return s +} + +// SetFpgaImageIds sets the FpgaImageIds field's value. +func (s *DescribeFpgaImagesInput) SetFpgaImageIds(v []*string) *DescribeFpgaImagesInput { + s.FpgaImageIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFpgaImagesInput) SetMaxResults(v int64) *DescribeFpgaImagesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFpgaImagesInput) SetNextToken(v string) *DescribeFpgaImagesInput { + s.NextToken = &v + return s +} + +// SetOwners sets the Owners field's value. +func (s *DescribeFpgaImagesInput) SetOwners(v []*string) *DescribeFpgaImagesInput { + s.Owners = v + return s +} + +type DescribeFpgaImagesOutput struct { + _ struct{} `type:"structure"` + + // Information about the FPGA images. + FpgaImages []*FpgaImage `locationName:"fpgaImageSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFpgaImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImagesOutput) GoString() string { + return s.String() +} + +// SetFpgaImages sets the FpgaImages field's value. +func (s *DescribeFpgaImagesOutput) SetFpgaImages(v []*FpgaImage) *DescribeFpgaImagesOutput { + s.FpgaImages = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFpgaImagesOutput) SetNextToken(v string) *DescribeFpgaImagesOutput { + s.NextToken = &v + return s +} + +type DescribeHostReservationOfferingsInput struct { + _ struct{} `type:"structure"` + + // The filters. + // + // * instance-family - The instance family of the offering (for example, + // m4). + // + // * payment-option - The payment option (NoUpfront | PartialUpfront | AllUpfront). + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // This is the maximum duration of the reservation to purchase, specified in + // seconds. Reservations are available in one-year and three-year terms. The + // number of seconds specified must be the number of seconds in a year (365x24x60x60) + // times one of the supported durations (1 or 3). For example, specify 94608000 + // for three years. + MaxDuration *int64 `type:"integer"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + MaxResults *int64 `min:"5" type:"integer"` + + // This is the minimum duration of the reservation you'd like to purchase, specified + // in seconds. Reservations are available in one-year and three-year terms. + // The number of seconds specified must be the number of seconds in a year (365x24x60x60) + // times one of the supported durations (1 or 3). For example, specify 31536000 + // for one year. + MinDuration *int64 `type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` + + // The ID of the reservation offering. + OfferingId *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHostReservationOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostReservationOfferingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeHostReservationOfferingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeHostReservationOfferingsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *DescribeHostReservationOfferingsInput) SetFilter(v []*Filter) *DescribeHostReservationOfferingsInput { + s.Filter = v + return s +} + +// SetMaxDuration sets the MaxDuration field's value. +func (s *DescribeHostReservationOfferingsInput) SetMaxDuration(v int64) *DescribeHostReservationOfferingsInput { + s.MaxDuration = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeHostReservationOfferingsInput) SetMaxResults(v int64) *DescribeHostReservationOfferingsInput { + s.MaxResults = &v + return s +} + +// SetMinDuration sets the MinDuration field's value. +func (s *DescribeHostReservationOfferingsInput) SetMinDuration(v int64) *DescribeHostReservationOfferingsInput { + s.MinDuration = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeHostReservationOfferingsInput) SetNextToken(v string) *DescribeHostReservationOfferingsInput { + s.NextToken = &v + return s +} + +// SetOfferingId sets the OfferingId field's value. +func (s *DescribeHostReservationOfferingsInput) SetOfferingId(v string) *DescribeHostReservationOfferingsInput { + s.OfferingId = &v + return s +} + +type DescribeHostReservationOfferingsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the offerings. + OfferingSet []*HostOffering `locationName:"offeringSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeHostReservationOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostReservationOfferingsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeHostReservationOfferingsOutput) SetNextToken(v string) *DescribeHostReservationOfferingsOutput { + s.NextToken = &v + return s +} + +// SetOfferingSet sets the OfferingSet field's value. +func (s *DescribeHostReservationOfferingsOutput) SetOfferingSet(v []*HostOffering) *DescribeHostReservationOfferingsOutput { + s.OfferingSet = v + return s +} + +type DescribeHostReservationsInput struct { + _ struct{} `type:"structure"` + + // The filters. + // + // * instance-family - The instance family (for example, m4). + // + // * payment-option - The payment option (NoUpfront | PartialUpfront | AllUpfront). + // + // * state - The state of the reservation (payment-pending | payment-failed + // | active | retired). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // The host reservation IDs. + HostReservationIdSet []*string `locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + MaxResults *int64 `type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHostReservationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostReservationsInput) GoString() string { + return s.String() +} + +// SetFilter sets the Filter field's value. +func (s *DescribeHostReservationsInput) SetFilter(v []*Filter) *DescribeHostReservationsInput { + s.Filter = v + return s +} + +// SetHostReservationIdSet sets the HostReservationIdSet field's value. +func (s *DescribeHostReservationsInput) SetHostReservationIdSet(v []*string) *DescribeHostReservationsInput { + s.HostReservationIdSet = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeHostReservationsInput) SetMaxResults(v int64) *DescribeHostReservationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeHostReservationsInput) SetNextToken(v string) *DescribeHostReservationsInput { + s.NextToken = &v + return s +} + +type DescribeHostReservationsOutput struct { + _ struct{} `type:"structure"` + + // Details about the reservation's configuration. + HostReservationSet []*HostReservation `locationName:"hostReservationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostReservationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostReservationsOutput) GoString() string { + return s.String() +} + +// SetHostReservationSet sets the HostReservationSet field's value. +func (s *DescribeHostReservationsOutput) SetHostReservationSet(v []*HostReservation) *DescribeHostReservationsOutput { + s.HostReservationSet = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeHostReservationsOutput) SetNextToken(v string) *DescribeHostReservationsOutput { + s.NextToken = &v + return s +} + +type DescribeHostsInput struct { + _ struct{} `type:"structure"` + + // The filters. + // + // * auto-placement - Whether auto-placement is enabled or disabled (on | + // off). + // + // * availability-zone - The Availability Zone of the host. + // + // * client-token - The idempotency token that you provided when you allocated + // the host. + // + // * host-reservation-id - The ID of the reservation assigned to this host. + // + // * instance-type - The instance type size that the Dedicated Host is configured + // to support. + // + // * state - The allocation state of the Dedicated Host (available | under-assessment + // | permanent-failure | released | released-permanent-failure). + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filter []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The IDs of the Dedicated Hosts. The IDs are used for targeted instance launches. + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + // + // You cannot specify this parameter and the host IDs parameter in the same + // request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostsInput) GoString() string { + return s.String() +} + +// SetFilter sets the Filter field's value. +func (s *DescribeHostsInput) SetFilter(v []*Filter) *DescribeHostsInput { + s.Filter = v + return s +} + +// SetHostIds sets the HostIds field's value. +func (s *DescribeHostsInput) SetHostIds(v []*string) *DescribeHostsInput { + s.HostIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeHostsInput) SetMaxResults(v int64) *DescribeHostsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeHostsInput) SetNextToken(v string) *DescribeHostsInput { + s.NextToken = &v + return s +} + +type DescribeHostsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Dedicated Hosts. + Hosts []*Host `locationName:"hostSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostsOutput) GoString() string { + return s.String() +} + +// SetHosts sets the Hosts field's value. +func (s *DescribeHostsOutput) SetHosts(v []*Host) *DescribeHostsOutput { + s.Hosts = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeHostsOutput) SetNextToken(v string) *DescribeHostsOutput { + s.NextToken = &v + return s +} + +type DescribeIamInstanceProfileAssociationsInput struct { + _ struct{} `type:"structure"` + + // The IAM instance profile associations. + AssociationIds []*string `locationName:"AssociationId" locationNameList:"AssociationId" type:"list"` + + // The filters. + // + // * instance-id - The ID of the instance. + // + // * state - The state of the association (associating | associated | disassociating). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeIamInstanceProfileAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIamInstanceProfileAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIamInstanceProfileAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIamInstanceProfileAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationIds sets the AssociationIds field's value. +func (s *DescribeIamInstanceProfileAssociationsInput) SetAssociationIds(v []*string) *DescribeIamInstanceProfileAssociationsInput { + s.AssociationIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeIamInstanceProfileAssociationsInput) SetFilters(v []*Filter) *DescribeIamInstanceProfileAssociationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeIamInstanceProfileAssociationsInput) SetMaxResults(v int64) *DescribeIamInstanceProfileAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIamInstanceProfileAssociationsInput) SetNextToken(v string) *DescribeIamInstanceProfileAssociationsInput { + s.NextToken = &v + return s +} + +type DescribeIamInstanceProfileAssociationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the IAM instance profile associations. + IamInstanceProfileAssociations []*IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeIamInstanceProfileAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIamInstanceProfileAssociationsOutput) GoString() string { + return s.String() +} + +// SetIamInstanceProfileAssociations sets the IamInstanceProfileAssociations field's value. +func (s *DescribeIamInstanceProfileAssociationsOutput) SetIamInstanceProfileAssociations(v []*IamInstanceProfileAssociation) *DescribeIamInstanceProfileAssociationsOutput { + s.IamInstanceProfileAssociations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIamInstanceProfileAssociationsOutput) SetNextToken(v string) *DescribeIamInstanceProfileAssociationsOutput { + s.NextToken = &v + return s +} + +type DescribeIdFormatInput struct { + _ struct{} `type:"structure"` + + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | instance | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | reservation + // | route-table | route-table-association | security-group | snapshot | subnet + // | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association + // | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway + Resource *string `type:"string"` +} + +// String returns the string representation +func (s DescribeIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdFormatInput) GoString() string { + return s.String() +} + +// SetResource sets the Resource field's value. +func (s *DescribeIdFormatInput) SetResource(v string) *DescribeIdFormatInput { + s.Resource = &v + return s +} + +type DescribeIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about the ID format for the resource. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdFormatOutput) GoString() string { + return s.String() +} + +// SetStatuses sets the Statuses field's value. +func (s *DescribeIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeIdFormatOutput { + s.Statuses = v + return s +} + +type DescribeIdentityIdFormatInput struct { + _ struct{} `type:"structure"` + + // The ARN of the principal, which can be an IAM role, IAM user, or the root + // user. + // + // PrincipalArn is a required field + PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"` + + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | instance | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | reservation + // | route-table | route-table-association | security-group | snapshot | subnet + // | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association + // | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway + Resource *string `locationName:"resource" type:"string"` +} + +// String returns the string representation +func (s DescribeIdentityIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityIdFormatInput"} + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *DescribeIdentityIdFormatInput) SetPrincipalArn(v string) *DescribeIdentityIdFormatInput { + s.PrincipalArn = &v + return s +} + +// SetResource sets the Resource field's value. +func (s *DescribeIdentityIdFormatInput) SetResource(v string) *DescribeIdentityIdFormatInput { + s.Resource = &v + return s +} + +type DescribeIdentityIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about the ID format for the resources. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIdentityIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityIdFormatOutput) GoString() string { + return s.String() +} + +// SetStatuses sets the Statuses field's value. +func (s *DescribeIdentityIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeIdentityIdFormatOutput { + s.Statuses = v + return s +} + +// Contains the parameters for DescribeImageAttribute. +type DescribeImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The AMI attribute. + // + // Note: Depending on your account privileges, the blockDeviceMapping attribute + // may return a Client.AuthFailure error. If this happens, use DescribeImages + // to get information about the block device mapping for the AMI. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImageAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeImageAttributeInput) SetAttribute(v string) *DescribeImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeImageAttributeInput) SetDryRun(v bool) *DescribeImageAttributeInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeImageAttributeInput) SetImageId(v string) *DescribeImageAttributeInput { + s.ImageId = &v + return s +} + +// Describes an image attribute. +type DescribeImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // The block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // A description for the AMI. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // The launch permissions. + LaunchPermissions []*LaunchPermission `locationName:"launchPermission" locationNameList:"item" type:"list"` + + // The product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // Indicates whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` +} + +// String returns the string representation +func (s DescribeImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageAttributeOutput) GoString() string { + return s.String() +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *DescribeImageAttributeOutput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *DescribeImageAttributeOutput { + s.BlockDeviceMappings = v + return s +} + +// SetDescription sets the Description field's value. +func (s *DescribeImageAttributeOutput) SetDescription(v *AttributeValue) *DescribeImageAttributeOutput { + s.Description = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeImageAttributeOutput) SetImageId(v string) *DescribeImageAttributeOutput { + s.ImageId = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *DescribeImageAttributeOutput) SetKernelId(v *AttributeValue) *DescribeImageAttributeOutput { + s.KernelId = v + return s +} + +// SetLaunchPermissions sets the LaunchPermissions field's value. +func (s *DescribeImageAttributeOutput) SetLaunchPermissions(v []*LaunchPermission) *DescribeImageAttributeOutput { + s.LaunchPermissions = v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *DescribeImageAttributeOutput) SetProductCodes(v []*ProductCode) *DescribeImageAttributeOutput { + s.ProductCodes = v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *DescribeImageAttributeOutput) SetRamdiskId(v *AttributeValue) *DescribeImageAttributeOutput { + s.RamdiskId = v + return s +} + +// SetSriovNetSupport sets the SriovNetSupport field's value. +func (s *DescribeImageAttributeOutput) SetSriovNetSupport(v *AttributeValue) *DescribeImageAttributeOutput { + s.SriovNetSupport = v + return s +} + +type DescribeImagesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Scopes the images by users with explicit launch permissions. Specify an AWS + // account ID, self (the sender of the request), or all (public AMIs). + ExecutableUsers []*string `locationName:"ExecutableBy" locationNameList:"ExecutableBy" type:"list"` + + // The filters. + // + // * architecture - The image architecture (i386 | x86_64 | arm64). + // + // * block-device-mapping.delete-on-termination - A Boolean value that indicates + // whether the Amazon EBS volume is deleted on instance termination. + // + // * block-device-mapping.device-name - The device name specified in the + // block device mapping (for example, /dev/sdh or xvdh). + // + // * block-device-mapping.snapshot-id - The ID of the snapshot used for the + // EBS volume. + // + // * block-device-mapping.volume-size - The volume size of the EBS volume, + // in GiB. + // + // * block-device-mapping.volume-type - The volume type of the EBS volume + // (gp2 | io1 | io2 | st1 | sc1 | standard). + // + // * block-device-mapping.encrypted - A Boolean that indicates whether the + // EBS volume is encrypted. + // + // * description - The description of the image (provided during image creation). + // + // * ena-support - A Boolean that indicates whether enhanced networking with + // ENA is enabled. + // + // * hypervisor - The hypervisor type (ovm | xen). + // + // * image-id - The ID of the image. + // + // * image-type - The image type (machine | kernel | ramdisk). + // + // * is-public - A Boolean that indicates whether the image is public. + // + // * kernel-id - The kernel ID. + // + // * manifest-location - The location of the image manifest. + // + // * name - The name of the AMI (provided during image creation). + // + // * owner-alias - The owner alias, from an Amazon-maintained list (amazon + // | aws-marketplace). This is not the user-configured AWS account alias + // set using the IAM console. We recommend that you use the related parameter + // instead of this filter. + // + // * owner-id - The AWS account ID of the owner. We recommend that you use + // the related parameter instead of this filter. + // + // * platform - The platform. To only list Windows-based AMIs, use windows. + // + // * product-code - The product code. + // + // * product-code.type - The type of the product code (devpay | marketplace). + // + // * ramdisk-id - The RAM disk ID. + // + // * root-device-name - The device name of the root device volume (for example, + // /dev/sda1). + // + // * root-device-type - The type of the root device volume (ebs | instance-store). + // + // * state - The state of the image (available | pending | failed). + // + // * state-reason-code - The reason code for the state change. + // + // * state-reason-message - The message for the state change. + // + // * sriov-net-support - A value of simple indicates that enhanced networking + // with the Intel 82599 VF interface is enabled. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * virtualization-type - The virtualization type (paravirtual | hvm). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The image IDs. + // + // Default: Describes all images available to you. + ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"` + + // Scopes the results to images with the specified owners. You can specify a + // combination of AWS account IDs, self, amazon, and aws-marketplace. If you + // omit this parameter, the results include all images for which you have launch + // permissions, regardless of ownership. + Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` +} + +// String returns the string representation +func (s DescribeImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeImagesInput) SetDryRun(v bool) *DescribeImagesInput { + s.DryRun = &v + return s +} + +// SetExecutableUsers sets the ExecutableUsers field's value. +func (s *DescribeImagesInput) SetExecutableUsers(v []*string) *DescribeImagesInput { + s.ExecutableUsers = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeImagesInput) SetFilters(v []*Filter) *DescribeImagesInput { + s.Filters = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *DescribeImagesInput) SetImageIds(v []*string) *DescribeImagesInput { + s.ImageIds = v + return s +} + +// SetOwners sets the Owners field's value. +func (s *DescribeImagesInput) SetOwners(v []*string) *DescribeImagesInput { + s.Owners = v + return s +} + +type DescribeImagesOutput struct { + _ struct{} `type:"structure"` + + // Information about the images. + Images []*Image `locationName:"imagesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImagesOutput) GoString() string { + return s.String() +} + +// SetImages sets the Images field's value. +func (s *DescribeImagesOutput) SetImages(v []*Image) *DescribeImagesOutput { + s.Images = v + return s +} + +type DescribeImportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Filter tasks using the task-state filter and one of the following values: + // active, completed, deleting, or deleted. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The IDs of the import image tasks. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeImportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeImportImageTasksInput) SetDryRun(v bool) *DescribeImportImageTasksInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeImportImageTasksInput) SetFilters(v []*Filter) *DescribeImportImageTasksInput { + s.Filters = v + return s +} + +// SetImportTaskIds sets the ImportTaskIds field's value. +func (s *DescribeImportImageTasksInput) SetImportTaskIds(v []*string) *DescribeImportImageTasksInput { + s.ImportTaskIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImportImageTasksInput) SetMaxResults(v int64) *DescribeImportImageTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImportImageTasksInput) SetNextToken(v string) *DescribeImportImageTasksInput { + s.NextToken = &v + return s +} + +type DescribeImportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of zero or more import image tasks that are currently active or were + // completed or canceled in the previous 7 days. + ImportImageTasks []*ImportImageTask `locationName:"importImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportImageTasksOutput) GoString() string { + return s.String() +} + +// SetImportImageTasks sets the ImportImageTasks field's value. +func (s *DescribeImportImageTasksOutput) SetImportImageTasks(v []*ImportImageTask) *DescribeImportImageTasksOutput { + s.ImportImageTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImportImageTasksOutput) SetNextToken(v string) *DescribeImportImageTasksOutput { + s.NextToken = &v + return s +} + +type DescribeImportSnapshotTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // A list of import snapshot task IDs. + ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeImportSnapshotTasksInput) SetDryRun(v bool) *DescribeImportSnapshotTasksInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeImportSnapshotTasksInput) SetFilters(v []*Filter) *DescribeImportSnapshotTasksInput { + s.Filters = v + return s +} + +// SetImportTaskIds sets the ImportTaskIds field's value. +func (s *DescribeImportSnapshotTasksInput) SetImportTaskIds(v []*string) *DescribeImportSnapshotTasksInput { + s.ImportTaskIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImportSnapshotTasksInput) SetMaxResults(v int64) *DescribeImportSnapshotTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImportSnapshotTasksInput) SetNextToken(v string) *DescribeImportSnapshotTasksInput { + s.NextToken = &v + return s +} + +type DescribeImportSnapshotTasksOutput struct { + _ struct{} `type:"structure"` + + // A list of zero or more import snapshot tasks that are currently active or + // were completed or canceled in the previous 7 days. + ImportSnapshotTasks []*ImportSnapshotTask `locationName:"importSnapshotTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeImportSnapshotTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImportSnapshotTasksOutput) GoString() string { + return s.String() +} + +// SetImportSnapshotTasks sets the ImportSnapshotTasks field's value. +func (s *DescribeImportSnapshotTasksOutput) SetImportSnapshotTasks(v []*ImportSnapshotTask) *DescribeImportSnapshotTasksOutput { + s.ImportSnapshotTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImportSnapshotTasksOutput) SetNextToken(v string) *DescribeImportSnapshotTasksOutput { + s.NextToken = &v + return s +} + +type DescribeInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The instance attribute. + // + // Note: The enaSupport attribute is not supported at this time. + // + // Attribute is a required field + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeInstanceAttributeInput) SetAttribute(v string) *DescribeInstanceAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceAttributeInput) SetDryRun(v bool) *DescribeInstanceAttributeInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DescribeInstanceAttributeInput) SetInstanceId(v string) *DescribeInstanceAttributeInput { + s.InstanceId = &v + return s +} + +// Describes an instance attribute. +type DescribeInstanceAttributeOutput struct { + _ struct{} `type:"structure"` + + // The block device mapping of the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance through the Amazon + // EC2 console, CLI, or API; otherwise, you can. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // Indicates whether enhanced networking with ENA is enabled. + EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` + + // To enable the instance for AWS Nitro Enclaves, set this parameter to true; + // otherwise, set it to false. + EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"` + + // The security groups associated with the instance. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // The instance type. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // The kernel ID. + KernelId *AttributeValue `locationName:"kernel" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The RAM disk ID. + RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` + + // The device name of the root device volume (for example, /dev/sda1). + RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` + + // Indicates whether source/destination checking is enabled. A value of true + // means that checking is enabled, and false means that checking is disabled. + // This value must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` + + // Indicates whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // The user data. + UserData *AttributeValue `locationName:"userData" type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAttributeOutput) GoString() string { + return s.String() +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *DescribeInstanceAttributeOutput) SetBlockDeviceMappings(v []*InstanceBlockDeviceMapping) *DescribeInstanceAttributeOutput { + s.BlockDeviceMappings = v + return s +} + +// SetDisableApiTermination sets the DisableApiTermination field's value. +func (s *DescribeInstanceAttributeOutput) SetDisableApiTermination(v *AttributeBooleanValue) *DescribeInstanceAttributeOutput { + s.DisableApiTermination = v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *DescribeInstanceAttributeOutput) SetEbsOptimized(v *AttributeBooleanValue) *DescribeInstanceAttributeOutput { + s.EbsOptimized = v + return s +} + +// SetEnaSupport sets the EnaSupport field's value. +func (s *DescribeInstanceAttributeOutput) SetEnaSupport(v *AttributeBooleanValue) *DescribeInstanceAttributeOutput { + s.EnaSupport = v + return s +} + +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *DescribeInstanceAttributeOutput) SetEnclaveOptions(v *EnclaveOptions) *DescribeInstanceAttributeOutput { + s.EnclaveOptions = v + return s +} + +// SetGroups sets the Groups field's value. +func (s *DescribeInstanceAttributeOutput) SetGroups(v []*GroupIdentifier) *DescribeInstanceAttributeOutput { + s.Groups = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DescribeInstanceAttributeOutput) SetInstanceId(v string) *DescribeInstanceAttributeOutput { + s.InstanceId = &v + return s +} + +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value. +func (s *DescribeInstanceAttributeOutput) SetInstanceInitiatedShutdownBehavior(v *AttributeValue) *DescribeInstanceAttributeOutput { + s.InstanceInitiatedShutdownBehavior = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *DescribeInstanceAttributeOutput) SetInstanceType(v *AttributeValue) *DescribeInstanceAttributeOutput { + s.InstanceType = v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *DescribeInstanceAttributeOutput) SetKernelId(v *AttributeValue) *DescribeInstanceAttributeOutput { + s.KernelId = v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *DescribeInstanceAttributeOutput) SetProductCodes(v []*ProductCode) *DescribeInstanceAttributeOutput { + s.ProductCodes = v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *DescribeInstanceAttributeOutput) SetRamdiskId(v *AttributeValue) *DescribeInstanceAttributeOutput { + s.RamdiskId = v + return s +} + +// SetRootDeviceName sets the RootDeviceName field's value. +func (s *DescribeInstanceAttributeOutput) SetRootDeviceName(v *AttributeValue) *DescribeInstanceAttributeOutput { + s.RootDeviceName = v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *DescribeInstanceAttributeOutput) SetSourceDestCheck(v *AttributeBooleanValue) *DescribeInstanceAttributeOutput { + s.SourceDestCheck = v + return s +} + +// SetSriovNetSupport sets the SriovNetSupport field's value. +func (s *DescribeInstanceAttributeOutput) SetSriovNetSupport(v *AttributeValue) *DescribeInstanceAttributeOutput { + s.SriovNetSupport = v + return s +} + +// SetUserData sets the UserData field's value. +func (s *DescribeInstanceAttributeOutput) SetUserData(v *AttributeValue) *DescribeInstanceAttributeOutput { + s.UserData = v + return s +} + +type DescribeInstanceCreditSpecificationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + // + // * instance-id - The ID of the instance. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The instance IDs. + // + // Default: Describes all your instances. + // + // Constraints: Maximum 1000 explicitly specified instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 5 and 1000. You cannot specify this parameter and the + // instance IDs parameter in the same call. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceCreditSpecificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceCreditSpecificationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceCreditSpecificationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceCreditSpecificationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceCreditSpecificationsInput) SetDryRun(v bool) *DescribeInstanceCreditSpecificationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstanceCreditSpecificationsInput) SetFilters(v []*Filter) *DescribeInstanceCreditSpecificationsInput { + s.Filters = v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *DescribeInstanceCreditSpecificationsInput) SetInstanceIds(v []*string) *DescribeInstanceCreditSpecificationsInput { + s.InstanceIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstanceCreditSpecificationsInput) SetMaxResults(v int64) *DescribeInstanceCreditSpecificationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceCreditSpecificationsInput) SetNextToken(v string) *DescribeInstanceCreditSpecificationsInput { + s.NextToken = &v + return s +} + +type DescribeInstanceCreditSpecificationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the credit option for CPU usage of an instance. + InstanceCreditSpecifications []*InstanceCreditSpecification `locationName:"instanceCreditSpecificationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceCreditSpecificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceCreditSpecificationsOutput) GoString() string { + return s.String() +} + +// SetInstanceCreditSpecifications sets the InstanceCreditSpecifications field's value. +func (s *DescribeInstanceCreditSpecificationsOutput) SetInstanceCreditSpecifications(v []*InstanceCreditSpecification) *DescribeInstanceCreditSpecificationsOutput { + s.InstanceCreditSpecifications = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceCreditSpecificationsOutput) SetNextToken(v string) *DescribeInstanceCreditSpecificationsOutput { + s.NextToken = &v + return s +} + +type DescribeInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceEventNotificationAttributesInput) SetDryRun(v bool) *DescribeInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +type DescribeInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the registered tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DescribeInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *DescribeInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + +type DescribeInstanceStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * availability-zone - The Availability Zone of the instance. + // + // * event.code - The code for the scheduled event (instance-reboot | system-reboot + // | system-maintenance | instance-retirement | instance-stop). + // + // * event.description - A description of the event. + // + // * event.instance-event-id - The ID of the event whose date and time you + // are modifying. + // + // * event.not-after - The latest end time for the scheduled event (for example, + // 2014-09-15T17:15:20.000Z). + // + // * event.not-before - The earliest start time for the scheduled event (for + // example, 2014-09-15T17:15:20.000Z). + // + // * event.not-before-deadline - The deadline for starting the event (for + // example, 2014-09-15T17:15:20.000Z). + // + // * instance-state-code - The code for the instance state, as a 16-bit unsigned + // integer. The high byte is used for internal purposes and should be ignored. + // The low byte is set based on the state represented. The valid values are + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // * instance-state-name - The state of the instance (pending | running | + // shutting-down | terminated | stopping | stopped). + // + // * instance-status.reachability - Filters on instance status where the + // name is reachability (passed | failed | initializing | insufficient-data). + // + // * instance-status.status - The status of the instance (ok | impaired | + // initializing | insufficient-data | not-applicable). + // + // * system-status.reachability - Filters on system status where the name + // is reachability (passed | failed | initializing | insufficient-data). + // + // * system-status.status - The system status of the instance (ok | impaired + // | initializing | insufficient-data | not-applicable). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // When true, includes the health status for all instances. When false, includes + // the health status for running instances only. + // + // Default: false + IncludeAllInstances *bool `locationName:"includeAllInstances" type:"boolean"` + + // The instance IDs. + // + // Default: Describes all your instances. + // + // Constraints: Maximum 100 explicitly specified instance IDs. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 5 and 1000. You cannot specify this parameter and the + // instance IDs parameter in the same call. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceStatusInput) SetDryRun(v bool) *DescribeInstanceStatusInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstanceStatusInput) SetFilters(v []*Filter) *DescribeInstanceStatusInput { + s.Filters = v + return s +} + +// SetIncludeAllInstances sets the IncludeAllInstances field's value. +func (s *DescribeInstanceStatusInput) SetIncludeAllInstances(v bool) *DescribeInstanceStatusInput { + s.IncludeAllInstances = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *DescribeInstanceStatusInput) SetInstanceIds(v []*string) *DescribeInstanceStatusInput { + s.InstanceIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstanceStatusInput) SetMaxResults(v int64) *DescribeInstanceStatusInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceStatusInput) SetNextToken(v string) *DescribeInstanceStatusInput { + s.NextToken = &v + return s +} + +type DescribeInstanceStatusOutput struct { + _ struct{} `type:"structure"` + + // Information about the status of the instances. + InstanceStatuses []*InstanceStatus `locationName:"instanceStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceStatusOutput) GoString() string { + return s.String() +} + +// SetInstanceStatuses sets the InstanceStatuses field's value. +func (s *DescribeInstanceStatusOutput) SetInstanceStatuses(v []*InstanceStatus) *DescribeInstanceStatusOutput { + s.InstanceStatuses = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceStatusOutput) SetNextToken(v string) *DescribeInstanceStatusOutput { + s.NextToken = &v + return s +} + +type DescribeInstanceTypeOfferingsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // * location - This depends on the location type. For example, if the location + // type is region (default), the location is the Region code (for example, + // us-east-2.) + // + // * instance-type - The instance type. For example, c5.2xlarge. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The location type. + LocationType *string `type:"string" enum:"LocationType"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the next + // token value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceTypeOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceTypeOfferingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceTypeOfferingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceTypeOfferingsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceTypeOfferingsInput) SetDryRun(v bool) *DescribeInstanceTypeOfferingsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstanceTypeOfferingsInput) SetFilters(v []*Filter) *DescribeInstanceTypeOfferingsInput { + s.Filters = v + return s +} + +// SetLocationType sets the LocationType field's value. +func (s *DescribeInstanceTypeOfferingsInput) SetLocationType(v string) *DescribeInstanceTypeOfferingsInput { + s.LocationType = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstanceTypeOfferingsInput) SetMaxResults(v int64) *DescribeInstanceTypeOfferingsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceTypeOfferingsInput) SetNextToken(v string) *DescribeInstanceTypeOfferingsInput { + s.NextToken = &v + return s +} + +type DescribeInstanceTypeOfferingsOutput struct { + _ struct{} `type:"structure"` + + // The instance types offered. + InstanceTypeOfferings []*InstanceTypeOffering `locationName:"instanceTypeOfferingSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceTypeOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceTypeOfferingsOutput) GoString() string { + return s.String() +} + +// SetInstanceTypeOfferings sets the InstanceTypeOfferings field's value. +func (s *DescribeInstanceTypeOfferingsOutput) SetInstanceTypeOfferings(v []*InstanceTypeOffering) *DescribeInstanceTypeOfferingsOutput { + s.InstanceTypeOfferings = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceTypeOfferingsOutput) SetNextToken(v string) *DescribeInstanceTypeOfferingsOutput { + s.NextToken = &v + return s +} + +type DescribeInstanceTypesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. Filter names and values are case-sensitive. + // + // * auto-recovery-supported - Indicates whether auto recovery is supported + // (true | false). + // + // * bare-metal - Indicates whether it is a bare metal instance type (true + // | false). + // + // * burstable-performance-supported - Indicates whether it is a burstable + // performance instance type (true | false). + // + // * current-generation - Indicates whether this instance type is the latest + // generation instance type of an instance family (true | false). + // + // * ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline + // bandwidth performance for an EBS-optimized instance type, in Mbps. + // + // * ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output + // storage operations per second for an EBS-optimized instance type. + // + // * ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline + // throughput performance for an EBS-optimized instance type, in MB/s. + // + // * ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum + // bandwidth performance for an EBS-optimized instance type, in Mbps. + // + // * ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output + // storage operations per second for an EBS-optimized instance type. + // + // * ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum + // throughput performance for an EBS-optimized instance type, in MB/s. + // + // * ebs-info.ebs-optimized-support - Indicates whether the instance type + // is EBS-optimized (supported | unsupported | default). + // + // * ebs-info.encryption-support - Indicates whether EBS encryption is supported + // (supported | unsupported). + // + // * ebs-info.nvme-support - Indicates whether non-volatile memory express + // (NVMe) is supported for EBS volumes (required | supported | unsupported). + // + // * free-tier-eligible - Indicates whether the instance type is eligible + // to use in the free tier (true | false). + // + // * hibernation-supported - Indicates whether On-Demand hibernation is supported + // (true | false). + // + // * hypervisor - The hypervisor (nitro | xen). + // + // * instance-storage-info.disk.count - The number of local disks. + // + // * instance-storage-info.disk.size-in-gb - The storage size of each instance + // storage disk, in GB. + // + // * instance-storage-info.disk.type - The storage technology for the local + // instance storage disks (hdd | ssd). + // + // * instance-storage-info.nvme-support - Indicates whether non-volatile + // memory express (NVMe) is supported for instance store (required | supported) + // | unsupported). + // + // * instance-storage-info.total-size-in-gb - The total amount of storage + // available from all local instance storage, in GB. + // + // * instance-storage-supported - Indicates whether the instance type has + // local instance storage (true | false). + // + // * instance-type - The instance type (for example c5.2xlarge or c5*). + // + // * memory-info.size-in-mib - The memory size. + // + // * network-info.efa-supported - Indicates whether the instance type supports + // Elastic Fabric Adapter (EFA) (true | false). + // + // * network-info.ena-support - Indicates whether Elastic Network Adapter + // (ENA) is supported or required (required | supported | unsupported). + // + // * network-info.ipv4-addresses-per-interface - The maximum number of private + // IPv4 addresses per network interface. + // + // * network-info.ipv6-addresses-per-interface - The maximum number of private + // IPv6 addresses per network interface. + // + // * network-info.ipv6-supported - Indicates whether the instance type supports + // IPv6 (true | false). + // + // * network-info.maximum-network-interfaces - The maximum number of network + // interfaces per instance. + // + // * network-info.network-performance - The network performance (for example, + // "25 Gigabit"). + // + // * processor-info.supported-architecture - The CPU architecture (arm64 + // | i386 | x86_64). + // + // * processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in + // GHz. + // + // * supported-root-device-type - The root device type (ebs | instance-store). + // + // * supported-usage-class - The usage class (on-demand | spot). + // + // * supported-virtualization-type - The virtualization type (hvm | paravirtual). + // + // * vcpu-info.default-cores - The default number of cores for the instance + // type. + // + // * vcpu-info.default-threads-per-core - The default number of threads per + // core for the instance type. + // + // * vcpu-info.default-vcpus - The default number of vCPUs for the instance + // type. + // + // * vcpu-info.valid-cores - The number of cores that can be configured for + // the instance type. + // + // * vcpu-info.valid-threads-per-core - The number of threads per core that + // can be configured for the instance type. For example, "1" or "1,2". + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The instance types. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceTypes []*string `locationName:"InstanceType" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the next + // token value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceTypesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceTypesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceTypesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceTypesInput) SetDryRun(v bool) *DescribeInstanceTypesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstanceTypesInput) SetFilters(v []*Filter) *DescribeInstanceTypesInput { + s.Filters = v + return s +} + +// SetInstanceTypes sets the InstanceTypes field's value. +func (s *DescribeInstanceTypesInput) SetInstanceTypes(v []*string) *DescribeInstanceTypesInput { + s.InstanceTypes = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstanceTypesInput) SetMaxResults(v int64) *DescribeInstanceTypesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceTypesInput) SetNextToken(v string) *DescribeInstanceTypesInput { + s.NextToken = &v + return s +} + +type DescribeInstanceTypesOutput struct { + _ struct{} `type:"structure"` + + // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceTypes []*InstanceTypeInfo `locationName:"instanceTypeSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceTypesOutput) GoString() string { + return s.String() +} + +// SetInstanceTypes sets the InstanceTypes field's value. +func (s *DescribeInstanceTypesOutput) SetInstanceTypes(v []*InstanceTypeInfo) *DescribeInstanceTypesOutput { + s.InstanceTypes = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceTypesOutput) SetNextToken(v string) *DescribeInstanceTypesOutput { + s.NextToken = &v + return s +} + +type DescribeInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * affinity - The affinity setting for an instance running on a Dedicated + // Host (default | host). + // + // * architecture - The instance architecture (i386 | x86_64 | arm64). + // + // * availability-zone - The Availability Zone of the instance. + // + // * block-device-mapping.attach-time - The attach time for an EBS volume + // mapped to the instance, for example, 2010-09-15T17:15:20.000Z. + // + // * block-device-mapping.delete-on-termination - A Boolean that indicates + // whether the EBS volume is deleted on instance termination. + // + // * block-device-mapping.device-name - The device name specified in the + // block device mapping (for example, /dev/sdh or xvdh). + // + // * block-device-mapping.status - The status for the EBS volume (attaching + // | attached | detaching | detached). + // + // * block-device-mapping.volume-id - The volume ID of the EBS volume. + // + // * client-token - The idempotency token you provided when you launched + // the instance. + // + // * dns-name - The public DNS name of the instance. + // + // * group-id - The ID of the security group for the instance. EC2-Classic + // only. + // + // * group-name - The name of the security group for the instance. EC2-Classic + // only. + // + // * hibernation-options.configured - A Boolean that indicates whether the + // instance is enabled for hibernation. A value of true means that the instance + // is enabled for hibernation. + // + // * host-id - The ID of the Dedicated Host on which the instance is running, + // if applicable. + // + // * hypervisor - The hypervisor type of the instance (ovm | xen). The value + // xen is used for both Xen and Nitro hypervisors. + // + // * iam-instance-profile.arn - The instance profile associated with the + // instance. Specified as an ARN. + // + // * image-id - The ID of the image used to launch the instance. + // + // * instance-id - The ID of the instance. + // + // * instance-lifecycle - Indicates whether this is a Spot Instance or a + // Scheduled Instance (spot | scheduled). + // + // * instance-state-code - The state of the instance, as a 16-bit unsigned + // integer. The high byte is used for internal purposes and should be ignored. + // The low byte is set based on the state represented. The valid values are: + // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), + // and 80 (stopped). + // + // * instance-state-name - The state of the instance (pending | running | + // shutting-down | terminated | stopping | stopped). + // + // * instance-type - The type of instance (for example, t2.micro). + // + // * instance.group-id - The ID of the security group for the instance. + // + // * instance.group-name - The name of the security group for the instance. + // + // * ip-address - The public IPv4 address of the instance. + // + // * kernel-id - The kernel ID. + // + // * key-name - The name of the key pair used when the instance was launched. + // + // * launch-index - When launching multiple instances, this is the index + // for the instance in the launch group (for example, 0, 1, 2, and so on). + // + // * launch-time - The time when the instance was launched. + // + // * metadata-options.http-tokens - The metadata request authorization state + // (optional | required) + // + // * metadata-options.http-put-response-hop-limit - The http metadata request + // put response hop limit (integer, possible values 1 to 64) + // + // * metadata-options.http-endpoint - Enable or disable metadata access on + // http endpoint (enabled | disabled) + // + // * monitoring-state - Indicates whether detailed monitoring is enabled + // (disabled | enabled). + // + // * network-interface.addresses.private-ip-address - The private IPv4 address + // associated with the network interface. + // + // * network-interface.addresses.primary - Specifies whether the IPv4 address + // of the network interface is the primary private IPv4 address. + // + // * network-interface.addresses.association.public-ip - The ID of the association + // of an Elastic IP address (IPv4) with a network interface. + // + // * network-interface.addresses.association.ip-owner-id - The owner ID of + // the private IPv4 address associated with the network interface. + // + // * network-interface.association.public-ip - The address of the Elastic + // IP address (IPv4) bound to the network interface. + // + // * network-interface.association.ip-owner-id - The owner of the Elastic + // IP address (IPv4) associated with the network interface. + // + // * network-interface.association.allocation-id - The allocation ID returned + // when you allocated the Elastic IP address (IPv4) for your network interface. + // + // * network-interface.association.association-id - The association ID returned + // when the network interface was associated with an IPv4 address. + // + // * network-interface.attachment.attachment-id - The ID of the interface + // attachment. + // + // * network-interface.attachment.instance-id - The ID of the instance to + // which the network interface is attached. + // + // * network-interface.attachment.instance-owner-id - The owner ID of the + // instance to which the network interface is attached. + // + // * network-interface.attachment.device-index - The device index to which + // the network interface is attached. + // + // * network-interface.attachment.status - The status of the attachment (attaching + // | attached | detaching | detached). + // + // * network-interface.attachment.attach-time - The time that the network + // interface was attached to an instance. + // + // * network-interface.attachment.delete-on-termination - Specifies whether + // the attachment is deleted when an instance is terminated. + // + // * network-interface.availability-zone - The Availability Zone for the + // network interface. + // + // * network-interface.description - The description of the network interface. + // + // * network-interface.group-id - The ID of a security group associated with + // the network interface. + // + // * network-interface.group-name - The name of a security group associated + // with the network interface. + // + // * network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated + // with the network interface. + // + // * network-interface.mac-address - The MAC address of the network interface. + // + // * network-interface.network-interface-id - The ID of the network interface. + // + // * network-interface.owner-id - The ID of the owner of the network interface. + // + // * network-interface.private-dns-name - The private DNS name of the network + // interface. + // + // * network-interface.requester-id - The requester ID for the network interface. + // + // * network-interface.requester-managed - Indicates whether the network + // interface is being managed by AWS. + // + // * network-interface.status - The status of the network interface (available) + // | in-use). + // + // * network-interface.source-dest-check - Whether the network interface + // performs source/destination checking. A value of true means that checking + // is enabled, and false means that checking is disabled. The value must + // be false for the network interface to perform network address translation + // (NAT) in your VPC. + // + // * network-interface.subnet-id - The ID of the subnet for the network interface. + // + // * network-interface.vpc-id - The ID of the VPC for the network interface. + // + // * owner-id - The AWS account ID of the instance owner. + // + // * placement-group-name - The name of the placement group for the instance. + // + // * placement-partition-number - The partition in which the instance is + // located. + // + // * platform - The platform. To list only Windows instances, use windows. + // + // * private-dns-name - The private IPv4 DNS name of the instance. + // + // * private-ip-address - The private IPv4 address of the instance. + // + // * product-code - The product code associated with the AMI used to launch + // the instance. + // + // * product-code.type - The type of product code (devpay | marketplace). + // + // * ramdisk-id - The RAM disk ID. + // + // * reason - The reason for the current state of the instance (for example, + // shows "User Initiated [date]" when you stop or terminate the instance). + // Similar to the state-reason-code filter. + // + // * requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // * reservation-id - The ID of the instance's reservation. A reservation + // ID is created any time you launch an instance. A reservation ID has a + // one-to-one relationship with an instance launch request, but can be associated + // with more than one instance if you launch multiple instances using the + // same launch request. For example, if you launch one instance, you get + // one reservation ID. If you launch ten instances using the same launch + // request, you also get one reservation ID. + // + // * root-device-name - The device name of the root device volume (for example, + // /dev/sda1). + // + // * root-device-type - The type of the root device volume (ebs | instance-store). + // + // * source-dest-check - Indicates whether the instance performs source/destination + // checking. A value of true means that checking is enabled, and false means + // that checking is disabled. The value must be false for the instance to + // perform network address translation (NAT) in your VPC. + // + // * spot-instance-request-id - The ID of the Spot Instance request. + // + // * state-reason-code - The reason code for the state change. + // + // * state-reason-message - A message that describes the state change. + // + // * subnet-id - The ID of the subnet for the instance. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless + // of the tag value. + // + // * tenancy - The tenancy of an instance (dedicated | default | host). + // + // * virtualization-type - The virtualization type of the instance (paravirtual + // | hvm). + // + // * vpc-id - The ID of the VPC that the instance is running in. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The instance IDs. + // + // Default: Describes all your instances. + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 5 and 1000. You cannot specify this parameter and the + // instance IDs parameter in the same call. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to request the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstancesInput) SetDryRun(v bool) *DescribeInstancesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstancesInput) SetFilters(v []*Filter) *DescribeInstancesInput { + s.Filters = v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *DescribeInstancesInput) SetInstanceIds(v []*string) *DescribeInstancesInput { + s.InstanceIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstancesInput) SetMaxResults(v int64) *DescribeInstancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancesInput) SetNextToken(v string) *DescribeInstancesInput { + s.NextToken = &v + return s +} + +type DescribeInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the reservations. + Reservations []*Reservation `locationName:"reservationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancesOutput) SetNextToken(v string) *DescribeInstancesOutput { + s.NextToken = &v + return s +} + +// SetReservations sets the Reservations field's value. +func (s *DescribeInstancesOutput) SetReservations(v []*Reservation) *DescribeInstancesOutput { + s.Reservations = v + return s +} + +type DescribeInternetGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * attachment.state - The current state of the attachment between the gateway + // and the VPC (available). Present only if a VPC is attached. + // + // * attachment.vpc-id - The ID of an attached VPC. + // + // * internet-gateway-id - The ID of the Internet gateway. + // + // * owner-id - The ID of the AWS account that owns the internet gateway. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more internet gateway IDs. + // + // Default: Describes all your internet gateways. + InternetGatewayIds []*string `locationName:"internetGatewayId" locationNameList:"item" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInternetGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInternetGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInternetGatewaysInput) SetDryRun(v bool) *DescribeInternetGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInternetGatewaysInput) SetFilters(v []*Filter) *DescribeInternetGatewaysInput { + s.Filters = v + return s +} + +// SetInternetGatewayIds sets the InternetGatewayIds field's value. +func (s *DescribeInternetGatewaysInput) SetInternetGatewayIds(v []*string) *DescribeInternetGatewaysInput { + s.InternetGatewayIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInternetGatewaysInput) SetMaxResults(v int64) *DescribeInternetGatewaysInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInternetGatewaysInput) SetNextToken(v string) *DescribeInternetGatewaysInput { + s.NextToken = &v + return s +} + +type DescribeInternetGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more internet gateways. + InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeInternetGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInternetGatewaysOutput) GoString() string { + return s.String() +} + +// SetInternetGateways sets the InternetGateways field's value. +func (s *DescribeInternetGatewaysOutput) SetInternetGateways(v []*InternetGateway) *DescribeInternetGatewaysOutput { + s.InternetGateways = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInternetGatewaysOutput) SetNextToken(v string) *DescribeInternetGatewaysOutput { + s.NextToken = &v + return s +} + +type DescribeIpv6PoolsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The IDs of the IPv6 address pools. + PoolIds []*string `locationName:"PoolId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIpv6PoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIpv6PoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIpv6PoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIpv6PoolsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeIpv6PoolsInput) SetDryRun(v bool) *DescribeIpv6PoolsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeIpv6PoolsInput) SetFilters(v []*Filter) *DescribeIpv6PoolsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeIpv6PoolsInput) SetMaxResults(v int64) *DescribeIpv6PoolsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIpv6PoolsInput) SetNextToken(v string) *DescribeIpv6PoolsInput { + s.NextToken = &v + return s +} + +// SetPoolIds sets the PoolIds field's value. +func (s *DescribeIpv6PoolsInput) SetPoolIds(v []*string) *DescribeIpv6PoolsInput { + s.PoolIds = v + return s +} + +type DescribeIpv6PoolsOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPv6 address pools. + Ipv6Pools []*Ipv6Pool `locationName:"ipv6PoolSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeIpv6PoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIpv6PoolsOutput) GoString() string { + return s.String() +} + +// SetIpv6Pools sets the Ipv6Pools field's value. +func (s *DescribeIpv6PoolsOutput) SetIpv6Pools(v []*Ipv6Pool) *DescribeIpv6PoolsOutput { + s.Ipv6Pools = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIpv6PoolsOutput) SetNextToken(v string) *DescribeIpv6PoolsOutput { + s.NextToken = &v + return s +} + +type DescribeKeyPairsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * key-pair-id - The ID of the key pair. + // + // * fingerprint - The fingerprint of the key pair. + // + // * key-name - The name of the key pair. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The key pair names. + // + // Default: Describes all your key pairs. + KeyNames []*string `locationName:"KeyName" locationNameList:"KeyName" type:"list"` + + // The IDs of the key pairs. + KeyPairIds []*string `locationName:"KeyPairId" locationNameList:"KeyPairId" type:"list"` +} + +// String returns the string representation +func (s DescribeKeyPairsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeKeyPairsInput) SetDryRun(v bool) *DescribeKeyPairsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeKeyPairsInput) SetFilters(v []*Filter) *DescribeKeyPairsInput { + s.Filters = v + return s +} + +// SetKeyNames sets the KeyNames field's value. +func (s *DescribeKeyPairsInput) SetKeyNames(v []*string) *DescribeKeyPairsInput { + s.KeyNames = v + return s +} + +// SetKeyPairIds sets the KeyPairIds field's value. +func (s *DescribeKeyPairsInput) SetKeyPairIds(v []*string) *DescribeKeyPairsInput { + s.KeyPairIds = v + return s +} + +type DescribeKeyPairsOutput struct { + _ struct{} `type:"structure"` + + // Information about the key pairs. + KeyPairs []*KeyPairInfo `locationName:"keySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeKeyPairsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeKeyPairsOutput) GoString() string { + return s.String() +} + +// SetKeyPairs sets the KeyPairs field's value. +func (s *DescribeKeyPairsOutput) SetKeyPairs(v []*KeyPairInfo) *DescribeKeyPairsOutput { + s.KeyPairs = v + return s +} + +type DescribeLaunchTemplateVersionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * create-time - The time the launch template version was created. + // + // * ebs-optimized - A boolean that indicates whether the instance is optimized + // for Amazon EBS I/O. + // + // * iam-instance-profile - The ARN of the IAM instance profile. + // + // * image-id - The ID of the AMI. + // + // * instance-type - The instance type. + // + // * is-default-version - A boolean that indicates whether the launch template + // version is the default version. + // + // * kernel-id - The kernel ID. + // + // * ram-disk-id - The RAM disk ID. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The ID of the launch template. To describe one or more versions of a specified + // launch template, you must specify either the launch template ID or the launch + // template name in the request. To describe all the latest or default launch + // template versions in your account, you must omit this parameter. + LaunchTemplateId *string `type:"string"` + + // The name of the launch template. To describe one or more versions of a specified + // launch template, you must specify either the launch template ID or the launch + // template name in the request. To describe all the latest or default launch + // template versions in your account, you must omit this parameter. + LaunchTemplateName *string `min:"3" type:"string"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 1 and 200. + MaxResults *int64 `type:"integer"` + + // The version number up to which to describe launch template versions. + MaxVersion *string `type:"string"` + + // The version number after which to describe launch template versions. + MinVersion *string `type:"string"` + + // The token to request the next page of results. + NextToken *string `type:"string"` + + // One or more versions of the launch template. Valid values depend on whether + // you are describing a specified launch template (by ID or name) or all launch + // templates in your account. + // + // To describe one or more versions of a specified launch template, valid values + // are $Latest, $Default, and numbers. + // + // To describe all launch templates in your account that are defined as the + // latest version, the valid value is $Latest. To describe all launch templates + // in your account that are defined as the default version, the valid value + // is $Default. You can specify $Latest and $Default in the same call. You cannot + // specify numbers. + Versions []*string `locationName:"LaunchTemplateVersion" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeLaunchTemplateVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchTemplateVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLaunchTemplateVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLaunchTemplateVersionsInput"} + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetDryRun(v bool) *DescribeLaunchTemplateVersionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetFilters(v []*Filter) *DescribeLaunchTemplateVersionsInput { + s.Filters = v + return s +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetLaunchTemplateId(v string) *DescribeLaunchTemplateVersionsInput { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetLaunchTemplateName(v string) *DescribeLaunchTemplateVersionsInput { + s.LaunchTemplateName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetMaxResults(v int64) *DescribeLaunchTemplateVersionsInput { + s.MaxResults = &v + return s +} + +// SetMaxVersion sets the MaxVersion field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetMaxVersion(v string) *DescribeLaunchTemplateVersionsInput { + s.MaxVersion = &v + return s +} + +// SetMinVersion sets the MinVersion field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetMinVersion(v string) *DescribeLaunchTemplateVersionsInput { + s.MinVersion = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetNextToken(v string) *DescribeLaunchTemplateVersionsInput { + s.NextToken = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *DescribeLaunchTemplateVersionsInput) SetVersions(v []*string) *DescribeLaunchTemplateVersionsInput { + s.Versions = v + return s +} + +type DescribeLaunchTemplateVersionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the launch template versions. + LaunchTemplateVersions []*LaunchTemplateVersion `locationName:"launchTemplateVersionSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchTemplateVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchTemplateVersionsOutput) GoString() string { + return s.String() +} + +// SetLaunchTemplateVersions sets the LaunchTemplateVersions field's value. +func (s *DescribeLaunchTemplateVersionsOutput) SetLaunchTemplateVersions(v []*LaunchTemplateVersion) *DescribeLaunchTemplateVersionsOutput { + s.LaunchTemplateVersions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLaunchTemplateVersionsOutput) SetNextToken(v string) *DescribeLaunchTemplateVersionsOutput { + s.NextToken = &v + return s +} + +type DescribeLaunchTemplatesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * create-time - The time the launch template was created. + // + // * launch-template-name - The name of the launch template. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more launch template IDs. + LaunchTemplateIds []*string `locationName:"LaunchTemplateId" locationNameList:"item" type:"list"` + + // One or more launch template names. + LaunchTemplateNames []*string `locationName:"LaunchTemplateName" locationNameList:"item" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 1 and 200. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchTemplatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLaunchTemplatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLaunchTemplatesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeLaunchTemplatesInput) SetDryRun(v bool) *DescribeLaunchTemplatesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeLaunchTemplatesInput) SetFilters(v []*Filter) *DescribeLaunchTemplatesInput { + s.Filters = v + return s +} + +// SetLaunchTemplateIds sets the LaunchTemplateIds field's value. +func (s *DescribeLaunchTemplatesInput) SetLaunchTemplateIds(v []*string) *DescribeLaunchTemplatesInput { + s.LaunchTemplateIds = v + return s +} + +// SetLaunchTemplateNames sets the LaunchTemplateNames field's value. +func (s *DescribeLaunchTemplatesInput) SetLaunchTemplateNames(v []*string) *DescribeLaunchTemplatesInput { + s.LaunchTemplateNames = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeLaunchTemplatesInput) SetMaxResults(v int64) *DescribeLaunchTemplatesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLaunchTemplatesInput) SetNextToken(v string) *DescribeLaunchTemplatesInput { + s.NextToken = &v + return s +} + +type DescribeLaunchTemplatesOutput struct { + _ struct{} `type:"structure"` + + // Information about the launch templates. + LaunchTemplates []*LaunchTemplate `locationName:"launchTemplates" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeLaunchTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLaunchTemplatesOutput) GoString() string { + return s.String() +} + +// SetLaunchTemplates sets the LaunchTemplates field's value. +func (s *DescribeLaunchTemplatesOutput) SetLaunchTemplates(v []*LaunchTemplate) *DescribeLaunchTemplatesOutput { + s.LaunchTemplates = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLaunchTemplatesOutput) SetNextToken(v string) *DescribeLaunchTemplatesOutput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-virtual-interface-group-association-id - The + // ID of the association. + // + // * local-gateway-route-table-virtual-interface-group-id - The ID of the + // virtual interface group. + // + // * state - The state of the association. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the associations. + LocalGatewayRouteTableVirtualInterfaceGroupAssociationIds []*string `locationName:"LocalGatewayRouteTableVirtualInterfaceGroupAssociationId" locationNameList:"item" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) SetDryRun(v bool) *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) SetFilters(v []*Filter) *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput { + s.Filters = v + return s +} + +// SetLocalGatewayRouteTableVirtualInterfaceGroupAssociationIds sets the LocalGatewayRouteTableVirtualInterfaceGroupAssociationIds field's value. +func (s *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) SetLocalGatewayRouteTableVirtualInterfaceGroupAssociationIds(v []*string) *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput { + s.LocalGatewayRouteTableVirtualInterfaceGroupAssociationIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) SetMaxResults(v int64) *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) SetNextToken(v string) *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the associations. + LocalGatewayRouteTableVirtualInterfaceGroupAssociations []*LocalGatewayRouteTableVirtualInterfaceGroupAssociation `locationName:"localGatewayRouteTableVirtualInterfaceGroupAssociationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput) GoString() string { + return s.String() +} + +// SetLocalGatewayRouteTableVirtualInterfaceGroupAssociations sets the LocalGatewayRouteTableVirtualInterfaceGroupAssociations field's value. +func (s *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput) SetLocalGatewayRouteTableVirtualInterfaceGroupAssociations(v []*LocalGatewayRouteTableVirtualInterfaceGroupAssociation) *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput { + s.LocalGatewayRouteTableVirtualInterfaceGroupAssociations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput) SetNextToken(v string) *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayRouteTableVpcAssociationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-vpc-association-id - The ID of the association. + // + // * state - The state of the association. + // + // * vpc-id - The ID of the VPC. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the associations. + LocalGatewayRouteTableVpcAssociationIds []*string `locationName:"LocalGatewayRouteTableVpcAssociationId" locationNameList:"item" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayRouteTableVpcAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayRouteTableVpcAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocalGatewayRouteTableVpcAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocalGatewayRouteTableVpcAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeLocalGatewayRouteTableVpcAssociationsInput) SetDryRun(v bool) *DescribeLocalGatewayRouteTableVpcAssociationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeLocalGatewayRouteTableVpcAssociationsInput) SetFilters(v []*Filter) *DescribeLocalGatewayRouteTableVpcAssociationsInput { + s.Filters = v + return s +} + +// SetLocalGatewayRouteTableVpcAssociationIds sets the LocalGatewayRouteTableVpcAssociationIds field's value. +func (s *DescribeLocalGatewayRouteTableVpcAssociationsInput) SetLocalGatewayRouteTableVpcAssociationIds(v []*string) *DescribeLocalGatewayRouteTableVpcAssociationsInput { + s.LocalGatewayRouteTableVpcAssociationIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeLocalGatewayRouteTableVpcAssociationsInput) SetMaxResults(v int64) *DescribeLocalGatewayRouteTableVpcAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayRouteTableVpcAssociationsInput) SetNextToken(v string) *DescribeLocalGatewayRouteTableVpcAssociationsInput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayRouteTableVpcAssociationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the associations. + LocalGatewayRouteTableVpcAssociations []*LocalGatewayRouteTableVpcAssociation `locationName:"localGatewayRouteTableVpcAssociationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayRouteTableVpcAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayRouteTableVpcAssociationsOutput) GoString() string { + return s.String() +} + +// SetLocalGatewayRouteTableVpcAssociations sets the LocalGatewayRouteTableVpcAssociations field's value. +func (s *DescribeLocalGatewayRouteTableVpcAssociationsOutput) SetLocalGatewayRouteTableVpcAssociations(v []*LocalGatewayRouteTableVpcAssociation) *DescribeLocalGatewayRouteTableVpcAssociationsOutput { + s.LocalGatewayRouteTableVpcAssociations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayRouteTableVpcAssociationsOutput) SetNextToken(v string) *DescribeLocalGatewayRouteTableVpcAssociationsOutput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayRouteTablesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of a local gateway route table. + // + // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. + // + // * state - The state of the local gateway route table. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the local gateway route tables. + LocalGatewayRouteTableIds []*string `locationName:"LocalGatewayRouteTableId" locationNameList:"item" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayRouteTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayRouteTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocalGatewayRouteTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocalGatewayRouteTablesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeLocalGatewayRouteTablesInput) SetDryRun(v bool) *DescribeLocalGatewayRouteTablesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeLocalGatewayRouteTablesInput) SetFilters(v []*Filter) *DescribeLocalGatewayRouteTablesInput { + s.Filters = v + return s +} + +// SetLocalGatewayRouteTableIds sets the LocalGatewayRouteTableIds field's value. +func (s *DescribeLocalGatewayRouteTablesInput) SetLocalGatewayRouteTableIds(v []*string) *DescribeLocalGatewayRouteTablesInput { + s.LocalGatewayRouteTableIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeLocalGatewayRouteTablesInput) SetMaxResults(v int64) *DescribeLocalGatewayRouteTablesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayRouteTablesInput) SetNextToken(v string) *DescribeLocalGatewayRouteTablesInput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayRouteTablesOutput struct { + _ struct{} `type:"structure"` + + // Information about the local gateway route tables. + LocalGatewayRouteTables []*LocalGatewayRouteTable `locationName:"localGatewayRouteTableSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayRouteTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayRouteTablesOutput) GoString() string { + return s.String() +} + +// SetLocalGatewayRouteTables sets the LocalGatewayRouteTables field's value. +func (s *DescribeLocalGatewayRouteTablesOutput) SetLocalGatewayRouteTables(v []*LocalGatewayRouteTable) *DescribeLocalGatewayRouteTablesOutput { + s.LocalGatewayRouteTables = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayRouteTablesOutput) SetNextToken(v string) *DescribeLocalGatewayRouteTablesOutput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayVirtualInterfaceGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-virtual-interface-id - The ID of the virtual interface. + // + // * local-gateway-virtual-interface-group-id - The ID of the virtual interface + // group. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the virtual interface groups. + LocalGatewayVirtualInterfaceGroupIds []*string `locationName:"LocalGatewayVirtualInterfaceGroupId" locationNameList:"item" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayVirtualInterfaceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayVirtualInterfaceGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocalGatewayVirtualInterfaceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocalGatewayVirtualInterfaceGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeLocalGatewayVirtualInterfaceGroupsInput) SetDryRun(v bool) *DescribeLocalGatewayVirtualInterfaceGroupsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeLocalGatewayVirtualInterfaceGroupsInput) SetFilters(v []*Filter) *DescribeLocalGatewayVirtualInterfaceGroupsInput { + s.Filters = v + return s +} + +// SetLocalGatewayVirtualInterfaceGroupIds sets the LocalGatewayVirtualInterfaceGroupIds field's value. +func (s *DescribeLocalGatewayVirtualInterfaceGroupsInput) SetLocalGatewayVirtualInterfaceGroupIds(v []*string) *DescribeLocalGatewayVirtualInterfaceGroupsInput { + s.LocalGatewayVirtualInterfaceGroupIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeLocalGatewayVirtualInterfaceGroupsInput) SetMaxResults(v int64) *DescribeLocalGatewayVirtualInterfaceGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayVirtualInterfaceGroupsInput) SetNextToken(v string) *DescribeLocalGatewayVirtualInterfaceGroupsInput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayVirtualInterfaceGroupsOutput struct { + _ struct{} `type:"structure"` + + // The virtual interface groups. + LocalGatewayVirtualInterfaceGroups []*LocalGatewayVirtualInterfaceGroup `locationName:"localGatewayVirtualInterfaceGroupSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayVirtualInterfaceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayVirtualInterfaceGroupsOutput) GoString() string { + return s.String() +} + +// SetLocalGatewayVirtualInterfaceGroups sets the LocalGatewayVirtualInterfaceGroups field's value. +func (s *DescribeLocalGatewayVirtualInterfaceGroupsOutput) SetLocalGatewayVirtualInterfaceGroups(v []*LocalGatewayVirtualInterfaceGroup) *DescribeLocalGatewayVirtualInterfaceGroupsOutput { + s.LocalGatewayVirtualInterfaceGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayVirtualInterfaceGroupsOutput) SetNextToken(v string) *DescribeLocalGatewayVirtualInterfaceGroupsOutput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayVirtualInterfacesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the virtual interfaces. + LocalGatewayVirtualInterfaceIds []*string `locationName:"LocalGatewayVirtualInterfaceId" locationNameList:"item" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayVirtualInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayVirtualInterfacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocalGatewayVirtualInterfacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocalGatewayVirtualInterfacesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeLocalGatewayVirtualInterfacesInput) SetDryRun(v bool) *DescribeLocalGatewayVirtualInterfacesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeLocalGatewayVirtualInterfacesInput) SetFilters(v []*Filter) *DescribeLocalGatewayVirtualInterfacesInput { + s.Filters = v + return s +} + +// SetLocalGatewayVirtualInterfaceIds sets the LocalGatewayVirtualInterfaceIds field's value. +func (s *DescribeLocalGatewayVirtualInterfacesInput) SetLocalGatewayVirtualInterfaceIds(v []*string) *DescribeLocalGatewayVirtualInterfacesInput { + s.LocalGatewayVirtualInterfaceIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeLocalGatewayVirtualInterfacesInput) SetMaxResults(v int64) *DescribeLocalGatewayVirtualInterfacesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayVirtualInterfacesInput) SetNextToken(v string) *DescribeLocalGatewayVirtualInterfacesInput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewayVirtualInterfacesOutput struct { + _ struct{} `type:"structure"` + + // Information about the virtual interfaces. + LocalGatewayVirtualInterfaces []*LocalGatewayVirtualInterface `locationName:"localGatewayVirtualInterfaceSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewayVirtualInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewayVirtualInterfacesOutput) GoString() string { + return s.String() +} + +// SetLocalGatewayVirtualInterfaces sets the LocalGatewayVirtualInterfaces field's value. +func (s *DescribeLocalGatewayVirtualInterfacesOutput) SetLocalGatewayVirtualInterfaces(v []*LocalGatewayVirtualInterface) *DescribeLocalGatewayVirtualInterfacesOutput { + s.LocalGatewayVirtualInterfaces = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewayVirtualInterfacesOutput) SetNextToken(v string) *DescribeLocalGatewayVirtualInterfacesOutput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-virtual-interface-group-association-id - The + // ID of the association. + // + // * local-gateway-route-table-virtual-interface-group-id - The ID of the + // virtual interface group. + // + // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. + // + // * state - The state of the association. + LocalGatewayIds []*string `locationName:"LocalGatewayId" locationNameList:"item" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocalGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocalGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeLocalGatewaysInput) SetDryRun(v bool) *DescribeLocalGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeLocalGatewaysInput) SetFilters(v []*Filter) *DescribeLocalGatewaysInput { + s.Filters = v + return s +} + +// SetLocalGatewayIds sets the LocalGatewayIds field's value. +func (s *DescribeLocalGatewaysInput) SetLocalGatewayIds(v []*string) *DescribeLocalGatewaysInput { + s.LocalGatewayIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeLocalGatewaysInput) SetMaxResults(v int64) *DescribeLocalGatewaysInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewaysInput) SetNextToken(v string) *DescribeLocalGatewaysInput { + s.NextToken = &v + return s +} + +type DescribeLocalGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the local gateways. + LocalGateways []*LocalGateway `locationName:"localGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeLocalGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocalGatewaysOutput) GoString() string { + return s.String() +} + +// SetLocalGateways sets the LocalGateways field's value. +func (s *DescribeLocalGatewaysOutput) SetLocalGateways(v []*LocalGateway) *DescribeLocalGatewaysOutput { + s.LocalGateways = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeLocalGatewaysOutput) SetNextToken(v string) *DescribeLocalGatewaysOutput { + s.NextToken = &v + return s +} + +type DescribeManagedPrefixListsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * owner-id - The ID of the prefix list owner. + // + // * prefix-list-id - The ID of the prefix list. + // + // * prefix-list-name - The name of the prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeManagedPrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeManagedPrefixListsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeManagedPrefixListsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeManagedPrefixListsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeManagedPrefixListsInput) SetDryRun(v bool) *DescribeManagedPrefixListsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeManagedPrefixListsInput) SetFilters(v []*Filter) *DescribeManagedPrefixListsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeManagedPrefixListsInput) SetMaxResults(v int64) *DescribeManagedPrefixListsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeManagedPrefixListsInput) SetNextToken(v string) *DescribeManagedPrefixListsInput { + s.NextToken = &v + return s +} + +// SetPrefixListIds sets the PrefixListIds field's value. +func (s *DescribeManagedPrefixListsInput) SetPrefixListIds(v []*string) *DescribeManagedPrefixListsInput { + s.PrefixListIds = v + return s +} + +type DescribeManagedPrefixListsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the prefix lists. + PrefixLists []*ManagedPrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeManagedPrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeManagedPrefixListsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeManagedPrefixListsOutput) SetNextToken(v string) *DescribeManagedPrefixListsOutput { + s.NextToken = &v + return s +} + +// SetPrefixLists sets the PrefixLists field's value. +func (s *DescribeManagedPrefixListsOutput) SetPrefixLists(v []*ManagedPrefixList) *DescribeManagedPrefixListsOutput { + s.PrefixLists = v + return s +} + +type DescribeMovingAddressesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * moving-status - The status of the Elastic IP address (MovingToVpc | + // RestoringToClassic). + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1000; if MaxResults is given a value outside of this range, an error is returned. + // + // Default: If no value is provided, the default is 1000. + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more Elastic IP addresses. + PublicIps []*string `locationName:"publicIp" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeMovingAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMovingAddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMovingAddressesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeMovingAddressesInput) SetDryRun(v bool) *DescribeMovingAddressesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMovingAddressesInput) SetFilters(v []*Filter) *DescribeMovingAddressesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMovingAddressesInput) SetMaxResults(v int64) *DescribeMovingAddressesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMovingAddressesInput) SetNextToken(v string) *DescribeMovingAddressesInput { + s.NextToken = &v + return s +} + +// SetPublicIps sets the PublicIps field's value. +func (s *DescribeMovingAddressesInput) SetPublicIps(v []*string) *DescribeMovingAddressesInput { + s.PublicIps = v + return s +} + +type DescribeMovingAddressesOutput struct { + _ struct{} `type:"structure"` + + // The status for each Elastic IP address. + MovingAddressStatuses []*MovingAddressStatus `locationName:"movingAddressStatusSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeMovingAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMovingAddressesOutput) GoString() string { + return s.String() +} + +// SetMovingAddressStatuses sets the MovingAddressStatuses field's value. +func (s *DescribeMovingAddressesOutput) SetMovingAddressStatuses(v []*MovingAddressStatus) *DescribeMovingAddressesOutput { + s.MovingAddressStatuses = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMovingAddressesOutput) SetNextToken(v string) *DescribeMovingAddressesOutput { + s.NextToken = &v + return s +} + +type DescribeNatGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * nat-gateway-id - The ID of the NAT gateway. + // + // * state - The state of the NAT gateway (pending | failed | available | + // deleting | deleted). + // + // * subnet-id - The ID of the subnet in which the NAT gateway resides. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC in which the NAT gateway resides. + Filter []*Filter `locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // One or more NAT gateway IDs. + NatGatewayIds []*string `locationName:"NatGatewayId" locationNameList:"item" type:"list"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNatGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNatGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNatGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNatGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeNatGatewaysInput) SetDryRun(v bool) *DescribeNatGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilter sets the Filter field's value. +func (s *DescribeNatGatewaysInput) SetFilter(v []*Filter) *DescribeNatGatewaysInput { + s.Filter = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeNatGatewaysInput) SetMaxResults(v int64) *DescribeNatGatewaysInput { + s.MaxResults = &v + return s +} + +// SetNatGatewayIds sets the NatGatewayIds field's value. +func (s *DescribeNatGatewaysInput) SetNatGatewayIds(v []*string) *DescribeNatGatewaysInput { + s.NatGatewayIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNatGatewaysInput) SetNextToken(v string) *DescribeNatGatewaysInput { + s.NextToken = &v + return s +} + +type DescribeNatGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the NAT gateways. + NatGateways []*NatGateway `locationName:"natGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNatGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNatGatewaysOutput) GoString() string { + return s.String() +} + +// SetNatGateways sets the NatGateways field's value. +func (s *DescribeNatGatewaysOutput) SetNatGateways(v []*NatGateway) *DescribeNatGatewaysOutput { + s.NatGateways = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNatGatewaysOutput) SetNextToken(v string) *DescribeNatGatewaysOutput { + s.NextToken = &v + return s +} + +type DescribeNetworkAclsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * association.association-id - The ID of an association ID for the ACL. + // + // * association.network-acl-id - The ID of the network ACL involved in the + // association. + // + // * association.subnet-id - The ID of the subnet involved in the association. + // + // * default - Indicates whether the ACL is the default network ACL for the + // VPC. + // + // * entry.cidr - The IPv4 CIDR range specified in the entry. + // + // * entry.icmp.code - The ICMP code specified in the entry, if any. + // + // * entry.icmp.type - The ICMP type specified in the entry, if any. + // + // * entry.ipv6-cidr - The IPv6 CIDR range specified in the entry. + // + // * entry.port-range.from - The start of the port range specified in the + // entry. + // + // * entry.port-range.to - The end of the port range specified in the entry. + // + // * entry.protocol - The protocol specified in the entry (tcp | udp | icmp + // or a protocol number). + // + // * entry.rule-action - Allows or denies the matching traffic (allow | deny). + // + // * entry.rule-number - The number of an entry (in other words, rule) in + // the set of ACL entries. + // + // * network-acl-id - The ID of the network ACL. + // + // * owner-id - The ID of the AWS account that owns the network ACL. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC for the network ACL. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // One or more network ACL IDs. + // + // Default: Describes all your network ACLs. + NetworkAclIds []*string `locationName:"NetworkAclId" locationNameList:"item" type:"list"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkAclsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNetworkAclsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNetworkAclsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeNetworkAclsInput) SetDryRun(v bool) *DescribeNetworkAclsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeNetworkAclsInput) SetFilters(v []*Filter) *DescribeNetworkAclsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeNetworkAclsInput) SetMaxResults(v int64) *DescribeNetworkAclsInput { + s.MaxResults = &v + return s +} + +// SetNetworkAclIds sets the NetworkAclIds field's value. +func (s *DescribeNetworkAclsInput) SetNetworkAclIds(v []*string) *DescribeNetworkAclsInput { + s.NetworkAclIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkAclsInput) SetNextToken(v string) *DescribeNetworkAclsInput { + s.NextToken = &v + return s +} + +type DescribeNetworkAclsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more network ACLs. + NetworkAcls []*NetworkAcl `locationName:"networkAclSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkAclsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkAclsOutput) GoString() string { + return s.String() +} + +// SetNetworkAcls sets the NetworkAcls field's value. +func (s *DescribeNetworkAclsOutput) SetNetworkAcls(v []*NetworkAcl) *DescribeNetworkAclsOutput { + s.NetworkAcls = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkAclsOutput) SetNextToken(v string) *DescribeNetworkAclsOutput { + s.NextToken = &v + return s +} + +// Contains the parameters for DescribeNetworkInterfaceAttribute. +type DescribeNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute of the network interface. This parameter is required. + Attribute *string `locationName:"attribute" type:"string" enum:"NetworkInterfaceAttribute"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNetworkInterfaceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNetworkInterfaceAttributeInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeNetworkInterfaceAttributeInput) SetAttribute(v string) *DescribeNetworkInterfaceAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeNetworkInterfaceAttributeInput) SetDryRun(v bool) *DescribeNetworkInterfaceAttributeInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *DescribeNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string) *DescribeNetworkInterfaceAttributeInput { + s.NetworkInterfaceId = &v + return s +} + +// Contains the output of DescribeNetworkInterfaceAttribute. +type DescribeNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` + + // The attachment (if any) of the network interface. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description of the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // The security groups associated with the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Indicates whether source/destination checking is enabled. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` +} + +// String returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +// SetAttachment sets the Attachment field's value. +func (s *DescribeNetworkInterfaceAttributeOutput) SetAttachment(v *NetworkInterfaceAttachment) *DescribeNetworkInterfaceAttributeOutput { + s.Attachment = v + return s +} + +// SetDescription sets the Description field's value. +func (s *DescribeNetworkInterfaceAttributeOutput) SetDescription(v *AttributeValue) *DescribeNetworkInterfaceAttributeOutput { + s.Description = v + return s +} + +// SetGroups sets the Groups field's value. +func (s *DescribeNetworkInterfaceAttributeOutput) SetGroups(v []*GroupIdentifier) *DescribeNetworkInterfaceAttributeOutput { + s.Groups = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *DescribeNetworkInterfaceAttributeOutput) SetNetworkInterfaceId(v string) *DescribeNetworkInterfaceAttributeOutput { + s.NetworkInterfaceId = &v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *DescribeNetworkInterfaceAttributeOutput) SetSourceDestCheck(v *AttributeBooleanValue) *DescribeNetworkInterfaceAttributeOutput { + s.SourceDestCheck = v + return s +} + +// Contains the parameters for DescribeNetworkInterfacePermissions. +type DescribeNetworkInterfacePermissionsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // * network-interface-permission.network-interface-permission-id - The ID + // of the permission. + // + // * network-interface-permission.network-interface-id - The ID of the network + // interface. + // + // * network-interface-permission.aws-account-id - The AWS account ID. + // + // * network-interface-permission.aws-service - The AWS service. + // + // * network-interface-permission.permission - The type of permission (INSTANCE-ATTACH + // | EIP-ASSOCIATE). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. If + // this parameter is not specified, up to 50 results are returned by default. + MaxResults *int64 `min:"5" type:"integer"` + + // One or more network interface permission IDs. + NetworkInterfacePermissionIds []*string `locationName:"NetworkInterfacePermissionId" type:"list"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNetworkInterfacePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNetworkInterfacePermissionsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetFilters(v []*Filter) *DescribeNetworkInterfacePermissionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetMaxResults(v int64) *DescribeNetworkInterfacePermissionsInput { + s.MaxResults = &v + return s +} + +// SetNetworkInterfacePermissionIds sets the NetworkInterfacePermissionIds field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetNetworkInterfacePermissionIds(v []*string) *DescribeNetworkInterfacePermissionsInput { + s.NetworkInterfacePermissionIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetNextToken(v string) *DescribeNetworkInterfacePermissionsInput { + s.NextToken = &v + return s +} + +// Contains the output for DescribeNetworkInterfacePermissions. +type DescribeNetworkInterfacePermissionsOutput struct { + _ struct{} `type:"structure"` + + // The network interface permissions. + NetworkInterfacePermissions []*NetworkInterfacePermission `locationName:"networkInterfacePermissions" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacePermissionsOutput) GoString() string { + return s.String() +} + +// SetNetworkInterfacePermissions sets the NetworkInterfacePermissions field's value. +func (s *DescribeNetworkInterfacePermissionsOutput) SetNetworkInterfacePermissions(v []*NetworkInterfacePermission) *DescribeNetworkInterfacePermissionsOutput { + s.NetworkInterfacePermissions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkInterfacePermissionsOutput) SetNextToken(v string) *DescribeNetworkInterfacePermissionsOutput { + s.NextToken = &v + return s +} + +// Contains the parameters for DescribeNetworkInterfaces. +type DescribeNetworkInterfacesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * addresses.private-ip-address - The private IPv4 addresses associated + // with the network interface. + // + // * addresses.primary - Whether the private IPv4 address is the primary + // IP address associated with the network interface. + // + // * addresses.association.public-ip - The association ID returned when the + // network interface was associated with the Elastic IP address (IPv4). + // + // * addresses.association.owner-id - The owner ID of the addresses associated + // with the network interface. + // + // * association.association-id - The association ID returned when the network + // interface was associated with an IPv4 address. + // + // * association.allocation-id - The allocation ID returned when you allocated + // the Elastic IP address (IPv4) for your network interface. + // + // * association.ip-owner-id - The owner of the Elastic IP address (IPv4) + // associated with the network interface. + // + // * association.public-ip - The address of the Elastic IP address (IPv4) + // bound to the network interface. + // + // * association.public-dns-name - The public DNS name for the network interface + // (IPv4). + // + // * attachment.attachment-id - The ID of the interface attachment. + // + // * attachment.attach-time - The time that the network interface was attached + // to an instance. + // + // * attachment.delete-on-termination - Indicates whether the attachment + // is deleted when an instance is terminated. + // + // * attachment.device-index - The device index to which the network interface + // is attached. + // + // * attachment.instance-id - The ID of the instance to which the network + // interface is attached. + // + // * attachment.instance-owner-id - The owner ID of the instance to which + // the network interface is attached. + // + // * attachment.status - The status of the attachment (attaching | attached + // | detaching | detached). + // + // * availability-zone - The Availability Zone of the network interface. + // + // * description - The description of the network interface. + // + // * group-id - The ID of a security group associated with the network interface. + // + // * group-name - The name of a security group associated with the network + // interface. + // + // * ipv6-addresses.ipv6-address - An IPv6 address associated with the network + // interface. + // + // * mac-address - The MAC address of the network interface. + // + // * network-interface-id - The ID of the network interface. + // + // * owner-id - The AWS account ID of the network interface owner. + // + // * private-ip-address - The private IPv4 address or addresses of the network + // interface. + // + // * private-dns-name - The private DNS name of the network interface (IPv4). + // + // * requester-id - The ID of the entity that launched the instance on your + // behalf (for example, AWS Management Console, Auto Scaling, and so on). + // + // * requester-managed - Indicates whether the network interface is being + // managed by an AWS service (for example, AWS Management Console, Auto Scaling, + // and so on). + // + // * source-dest-check - Indicates whether the network interface performs + // source/destination checking. A value of true means checking is enabled, + // and false means checking is disabled. The value must be false for the + // network interface to perform network address translation (NAT) in your + // VPC. + // + // * status - The status of the network interface. If the network interface + // is not attached to an instance, the status is available; if a network + // interface is attached to an instance the status is in-use. + // + // * subnet-id - The ID of the subnet for the network interface. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC for the network interface. + Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. You cannot specify this parameter and the network interface IDs + // parameter in the same request. + MaxResults *int64 `min:"5" type:"integer"` + + // One or more network interface IDs. + // + // Default: Describes all your network interfaces. + NetworkInterfaceIds []*string `locationName:"NetworkInterfaceId" locationNameList:"item" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNetworkInterfacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNetworkInterfacesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeNetworkInterfacesInput) SetDryRun(v bool) *DescribeNetworkInterfacesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeNetworkInterfacesInput) SetFilters(v []*Filter) *DescribeNetworkInterfacesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeNetworkInterfacesInput) SetMaxResults(v int64) *DescribeNetworkInterfacesInput { + s.MaxResults = &v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *DescribeNetworkInterfacesInput) SetNetworkInterfaceIds(v []*string) *DescribeNetworkInterfacesInput { + s.NetworkInterfaceIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkInterfacesInput) SetNextToken(v string) *DescribeNetworkInterfacesInput { + s.NextToken = &v + return s +} + +// Contains the output of DescribeNetworkInterfaces. +type DescribeNetworkInterfacesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more network interfaces. + NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacesOutput) GoString() string { + return s.String() +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *DescribeNetworkInterfacesOutput) SetNetworkInterfaces(v []*NetworkInterface) *DescribeNetworkInterfacesOutput { + s.NetworkInterfaces = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkInterfacesOutput) SetNextToken(v string) *DescribeNetworkInterfacesOutput { + s.NextToken = &v + return s +} + +type DescribePlacementGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * group-name - The name of the placement group. + // + // * state - The state of the placement group (pending | available | deleting + // | deleted). + // + // * strategy - The strategy of the placement group (cluster | spread | partition). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless + // of the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the placement groups. + GroupIds []*string `locationName:"GroupId" locationNameList:"GroupId" type:"list"` + + // The names of the placement groups. + // + // Default: Describes all your placement groups, or only those otherwise specified. + GroupNames []*string `locationName:"groupName" type:"list"` +} + +// String returns the string representation +func (s DescribePlacementGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribePlacementGroupsInput) SetDryRun(v bool) *DescribePlacementGroupsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribePlacementGroupsInput) SetFilters(v []*Filter) *DescribePlacementGroupsInput { + s.Filters = v + return s +} + +// SetGroupIds sets the GroupIds field's value. +func (s *DescribePlacementGroupsInput) SetGroupIds(v []*string) *DescribePlacementGroupsInput { + s.GroupIds = v + return s +} + +// SetGroupNames sets the GroupNames field's value. +func (s *DescribePlacementGroupsInput) SetGroupNames(v []*string) *DescribePlacementGroupsInput { + s.GroupNames = v + return s +} + +type DescribePlacementGroupsOutput struct { + _ struct{} `type:"structure"` + + // Information about the placement groups. + PlacementGroups []*PlacementGroup `locationName:"placementGroupSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePlacementGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePlacementGroupsOutput) GoString() string { + return s.String() +} + +// SetPlacementGroups sets the PlacementGroups field's value. +func (s *DescribePlacementGroupsOutput) SetPlacementGroups(v []*PlacementGroup) *DescribePlacementGroupsOutput { + s.PlacementGroups = v + return s +} + +type DescribePrefixListsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * prefix-list-id: The ID of a prefix list. + // + // * prefix-list-name: The name of a prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribePrefixListsInput) SetDryRun(v bool) *DescribePrefixListsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribePrefixListsInput) SetFilters(v []*Filter) *DescribePrefixListsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePrefixListsInput) SetMaxResults(v int64) *DescribePrefixListsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePrefixListsInput) SetNextToken(v string) *DescribePrefixListsInput { + s.NextToken = &v + return s +} + +// SetPrefixListIds sets the PrefixListIds field's value. +func (s *DescribePrefixListsInput) SetPrefixListIds(v []*string) *DescribePrefixListsInput { + s.PrefixListIds = v + return s +} + +type DescribePrefixListsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // All available prefix lists. + PrefixLists []*PrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrefixListsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePrefixListsOutput) SetNextToken(v string) *DescribePrefixListsOutput { + s.NextToken = &v + return s +} + +// SetPrefixLists sets the PrefixLists field's value. +func (s *DescribePrefixListsOutput) SetPrefixLists(v []*PrefixList) *DescribePrefixListsOutput { + s.PrefixLists = v + return s +} + +type DescribePrincipalIdFormatInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` + + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | instance | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | reservation + // | route-table | route-table-association | security-group | snapshot | subnet + // | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association + // | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway + Resources []*string `locationName:"Resource" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrincipalIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrincipalIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePrincipalIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePrincipalIdFormatInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribePrincipalIdFormatInput) SetDryRun(v bool) *DescribePrincipalIdFormatInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePrincipalIdFormatInput) SetMaxResults(v int64) *DescribePrincipalIdFormatInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePrincipalIdFormatInput) SetNextToken(v string) *DescribePrincipalIdFormatInput { + s.NextToken = &v + return s +} + +// SetResources sets the Resources field's value. +func (s *DescribePrincipalIdFormatInput) SetResources(v []*string) *DescribePrincipalIdFormatInput { + s.Resources = v + return s +} + +type DescribePrincipalIdFormatOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the ID format settings for the ARN. + Principals []*PrincipalIdFormat `locationName:"principalSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePrincipalIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePrincipalIdFormatOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePrincipalIdFormatOutput) SetNextToken(v string) *DescribePrincipalIdFormatOutput { + s.NextToken = &v + return s +} + +// SetPrincipals sets the Principals field's value. +func (s *DescribePrincipalIdFormatOutput) SetPrincipals(v []*PrincipalIdFormat) *DescribePrincipalIdFormatOutput { + s.Principals = v + return s +} + +type DescribePublicIpv4PoolsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The IDs of the address pools. + PoolIds []*string `locationName:"PoolId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePublicIpv4PoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePublicIpv4PoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePublicIpv4PoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePublicIpv4PoolsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribePublicIpv4PoolsInput) SetFilters(v []*Filter) *DescribePublicIpv4PoolsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePublicIpv4PoolsInput) SetMaxResults(v int64) *DescribePublicIpv4PoolsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePublicIpv4PoolsInput) SetNextToken(v string) *DescribePublicIpv4PoolsInput { + s.NextToken = &v + return s +} + +// SetPoolIds sets the PoolIds field's value. +func (s *DescribePublicIpv4PoolsInput) SetPoolIds(v []*string) *DescribePublicIpv4PoolsInput { + s.PoolIds = v + return s +} + +type DescribePublicIpv4PoolsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the address pools. + PublicIpv4Pools []*PublicIpv4Pool `locationName:"publicIpv4PoolSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribePublicIpv4PoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePublicIpv4PoolsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePublicIpv4PoolsOutput) SetNextToken(v string) *DescribePublicIpv4PoolsOutput { + s.NextToken = &v + return s +} + +// SetPublicIpv4Pools sets the PublicIpv4Pools field's value. +func (s *DescribePublicIpv4PoolsOutput) SetPublicIpv4Pools(v []*PublicIpv4Pool) *DescribePublicIpv4PoolsOutput { + s.PublicIpv4Pools = v + return s +} + +type DescribeRegionsInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to display all Regions, including Regions that are disabled + // for your account. + AllRegions *bool `type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com). + // + // * opt-in-status - The opt-in status of the Region (opt-in-not-required + // | opted-in | not-opted-in). + // + // * region-name - The name of the Region (for example, us-east-1). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The names of the Regions. You can specify any Regions, whether they are enabled + // and disabled for your account. + RegionNames []*string `locationName:"RegionName" locationNameList:"RegionName" type:"list"` +} + +// String returns the string representation +func (s DescribeRegionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsInput) GoString() string { + return s.String() +} + +// SetAllRegions sets the AllRegions field's value. +func (s *DescribeRegionsInput) SetAllRegions(v bool) *DescribeRegionsInput { + s.AllRegions = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeRegionsInput) SetDryRun(v bool) *DescribeRegionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeRegionsInput) SetFilters(v []*Filter) *DescribeRegionsInput { + s.Filters = v + return s +} + +// SetRegionNames sets the RegionNames field's value. +func (s *DescribeRegionsInput) SetRegionNames(v []*string) *DescribeRegionsInput { + s.RegionNames = v + return s +} + +type DescribeRegionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Regions. + Regions []*Region `locationName:"regionInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRegionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionsOutput) GoString() string { + return s.String() +} + +// SetRegions sets the Regions field's value. +func (s *DescribeRegionsOutput) SetRegions(v []*Region) *DescribeRegionsOutput { + s.Regions = v + return s +} + +// Contains the parameters for DescribeReservedInstances. +type DescribeReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // * duration - The duration of the Reserved Instance (one year or three + // years), in seconds (31536000 | 94608000). + // + // * end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z). + // + // * fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // * instance-type - The instance type that is covered by the reservation. + // + // * scope - The scope of the Reserved Instance (Region or Availability Zone). + // + // * product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description + // will only be displayed to EC2-Classic account holders and are for use + // with Amazon VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE + // Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux + // (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server + // Standard | Windows with SQL Server Standard (Amazon VPC) | Windows with + // SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows with + // SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon VPC)). + // + // * reserved-instances-id - The ID of the Reserved Instance. + // + // * start - The time at which the Reserved Instance purchase request was + // placed (for example, 2014-08-07T11:54:42.000Z). + // + // * state - The state of the Reserved Instance (payment-pending | active + // | payment-failed | retired). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Describes whether the Reserved Instance is Standard or Convertible. + OfferingClass *string `type:"string" enum:"OfferingClassType"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // One or more Reserved Instance IDs. + // + // Default: Describes all your Reserved Instances, or only those otherwise specified. + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeReservedInstancesInput) SetDryRun(v bool) *DescribeReservedInstancesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeReservedInstancesInput) SetFilters(v []*Filter) *DescribeReservedInstancesInput { + s.Filters = v + return s +} + +// SetOfferingClass sets the OfferingClass field's value. +func (s *DescribeReservedInstancesInput) SetOfferingClass(v string) *DescribeReservedInstancesInput { + s.OfferingClass = &v + return s +} + +// SetOfferingType sets the OfferingType field's value. +func (s *DescribeReservedInstancesInput) SetOfferingType(v string) *DescribeReservedInstancesInput { + s.OfferingType = &v + return s +} + +// SetReservedInstancesIds sets the ReservedInstancesIds field's value. +func (s *DescribeReservedInstancesInput) SetReservedInstancesIds(v []*string) *DescribeReservedInstancesInput { + s.ReservedInstancesIds = v + return s +} + +// Contains the parameters for DescribeReservedInstancesListings. +type DescribeReservedInstancesListingsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // * reserved-instances-id - The ID of the Reserved Instances. + // + // * reserved-instances-listing-id - The ID of the Reserved Instances listing. + // + // * status - The status of the Reserved Instance listing (pending | active + // | cancelled | closed). + // + // * status-message - The reason for the status. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more Reserved Instance IDs. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // One or more Reserved Instance listing IDs. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsInput) GoString() string { + return s.String() +} + +// SetFilters sets the Filters field's value. +func (s *DescribeReservedInstancesListingsInput) SetFilters(v []*Filter) *DescribeReservedInstancesListingsInput { + s.Filters = v + return s +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *DescribeReservedInstancesListingsInput) SetReservedInstancesId(v string) *DescribeReservedInstancesListingsInput { + s.ReservedInstancesId = &v + return s +} + +// SetReservedInstancesListingId sets the ReservedInstancesListingId field's value. +func (s *DescribeReservedInstancesListingsInput) SetReservedInstancesListingId(v string) *DescribeReservedInstancesListingsInput { + s.ReservedInstancesListingId = &v + return s +} + +// Contains the output of DescribeReservedInstancesListings. +type DescribeReservedInstancesListingsOutput struct { + _ struct{} `type:"structure"` + + // Information about the Reserved Instance listing. + ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesListingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesListingsOutput) GoString() string { + return s.String() +} + +// SetReservedInstancesListings sets the ReservedInstancesListings field's value. +func (s *DescribeReservedInstancesListingsOutput) SetReservedInstancesListings(v []*ReservedInstancesListing) *DescribeReservedInstancesListingsOutput { + s.ReservedInstancesListings = v + return s +} + +// Contains the parameters for DescribeReservedInstancesModifications. +type DescribeReservedInstancesModificationsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // * client-token - The idempotency token for the modification request. + // + // * create-date - The time when the modification request was created. + // + // * effective-date - The time when the modification becomes effective. + // + // * modification-result.reserved-instances-id - The ID for the Reserved + // Instances created as part of the modification request. This ID is only + // available when the status of the modification is fulfilled. + // + // * modification-result.target-configuration.availability-zone - The Availability + // Zone for the new Reserved Instances. + // + // * modification-result.target-configuration.instance-count - The number + // of new Reserved Instances. + // + // * modification-result.target-configuration.instance-type - The instance + // type of the new Reserved Instances. + // + // * modification-result.target-configuration.platform - The network platform + // of the new Reserved Instances (EC2-Classic | EC2-VPC). + // + // * reserved-instances-id - The ID of the Reserved Instances modified. + // + // * reserved-instances-modification-id - The ID of the modification request. + // + // * status - The status of the Reserved Instances modification request (processing + // | fulfilled | failed). + // + // * status-message - The reason for the status. + // + // * update-date - The time when the modification request was last updated. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // IDs for the submitted modification request. + ReservedInstancesModificationIds []*string `locationName:"ReservedInstancesModificationId" locationNameList:"ReservedInstancesModificationId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsInput) GoString() string { + return s.String() +} + +// SetFilters sets the Filters field's value. +func (s *DescribeReservedInstancesModificationsInput) SetFilters(v []*Filter) *DescribeReservedInstancesModificationsInput { + s.Filters = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeReservedInstancesModificationsInput) SetNextToken(v string) *DescribeReservedInstancesModificationsInput { + s.NextToken = &v + return s +} + +// SetReservedInstancesModificationIds sets the ReservedInstancesModificationIds field's value. +func (s *DescribeReservedInstancesModificationsInput) SetReservedInstancesModificationIds(v []*string) *DescribeReservedInstancesModificationsInput { + s.ReservedInstancesModificationIds = v + return s +} + +// Contains the output of DescribeReservedInstancesModifications. +type DescribeReservedInstancesModificationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Reserved Instance modification information. + ReservedInstancesModifications []*ReservedInstancesModification `locationName:"reservedInstancesModificationsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesModificationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeReservedInstancesModificationsOutput) SetNextToken(v string) *DescribeReservedInstancesModificationsOutput { + s.NextToken = &v + return s +} + +// SetReservedInstancesModifications sets the ReservedInstancesModifications field's value. +func (s *DescribeReservedInstancesModificationsOutput) SetReservedInstancesModifications(v []*ReservedInstancesModification) *DescribeReservedInstancesModificationsOutput { + s.ReservedInstancesModifications = v + return s +} + +// Contains the parameters for DescribeReservedInstancesOfferings. +type DescribeReservedInstancesOfferingsInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * availability-zone - The Availability Zone where the Reserved Instance + // can be used. + // + // * duration - The duration of the Reserved Instance (for example, one year + // or three years), in seconds (31536000 | 94608000). + // + // * fixed-price - The purchase price of the Reserved Instance (for example, + // 9800.0). + // + // * instance-type - The instance type that is covered by the reservation. + // + // * marketplace - Set to true to show only Reserved Instance Marketplace + // offerings. When this filter is not used, which is the default behavior, + // all offerings from both AWS and the Reserved Instance Marketplace are + // listed. + // + // * product-description - The Reserved Instance product platform description. + // Instances that include (Amazon VPC) in the product platform description + // will only be displayed to EC2-Classic account holders and are for use + // with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | + // SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise + // Linux (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL + // Server Standard | Windows with SQL Server Standard (Amazon VPC) | Windows + // with SQL Server Web | Windows with SQL Server Web (Amazon VPC) | Windows + // with SQL Server Enterprise | Windows with SQL Server Enterprise (Amazon + // VPC)) + // + // * reserved-instances-offering-id - The Reserved Instances offering ID. + // + // * scope - The scope of the Reserved Instance (Availability Zone or Region). + // + // * usage-price - The usage price of the Reserved Instance, per hour (for + // example, 0.84). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Include Reserved Instance Marketplace offerings in the response. + IncludeMarketplace *bool `type:"boolean"` + + // The tenancy of the instances covered by the reservation. A Reserved Instance + // with a tenancy of dedicated is applied to instances that run in a VPC on + // single-tenant hardware (i.e., Dedicated Instances). + // + // Important: The host value cannot be used with this parameter. Use the default + // or dedicated values only. + // + // Default: default + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type that the reservation will cover (for example, m1.small). + // For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The maximum duration (in seconds) to filter when searching for offerings. + // + // Default: 94608000 (3 years) + MaxDuration *int64 `type:"long"` + + // The maximum number of instances to filter when searching for offerings. + // + // Default: 20 + MaxInstanceCount *int64 `type:"integer"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. The maximum is 100. + // + // Default: 100 + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The minimum duration (in seconds) to filter when searching for offerings. + // + // Default: 2592000 (1 month) + MinDuration *int64 `type:"long"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The offering class of the Reserved Instance. Can be standard or convertible. + OfferingClass *string `type:"string" enum:"OfferingClassType"` + + // The Reserved Instance offering type. If you are using tools that predate + // the 2011-11-01 API version, you only have access to the Medium Utilization + // Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. Instances that include + // (Amazon VPC) in the description are for use with Amazon VPC. + ProductDescription *string `type:"string" enum:"RIProductDescription"` + + // One or more Reserved Instances offering IDs. + ReservedInstancesOfferingIds []*string `locationName:"ReservedInstancesOfferingId" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsInput) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetAvailabilityZone(v string) *DescribeReservedInstancesOfferingsInput { + s.AvailabilityZone = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetDryRun(v bool) *DescribeReservedInstancesOfferingsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetFilters(v []*Filter) *DescribeReservedInstancesOfferingsInput { + s.Filters = v + return s +} + +// SetIncludeMarketplace sets the IncludeMarketplace field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetIncludeMarketplace(v bool) *DescribeReservedInstancesOfferingsInput { + s.IncludeMarketplace = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetInstanceTenancy(v string) *DescribeReservedInstancesOfferingsInput { + s.InstanceTenancy = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetInstanceType(v string) *DescribeReservedInstancesOfferingsInput { + s.InstanceType = &v + return s +} + +// SetMaxDuration sets the MaxDuration field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetMaxDuration(v int64) *DescribeReservedInstancesOfferingsInput { + s.MaxDuration = &v + return s +} + +// SetMaxInstanceCount sets the MaxInstanceCount field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetMaxInstanceCount(v int64) *DescribeReservedInstancesOfferingsInput { + s.MaxInstanceCount = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetMaxResults(v int64) *DescribeReservedInstancesOfferingsInput { + s.MaxResults = &v + return s +} + +// SetMinDuration sets the MinDuration field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetMinDuration(v int64) *DescribeReservedInstancesOfferingsInput { + s.MinDuration = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetNextToken(v string) *DescribeReservedInstancesOfferingsInput { + s.NextToken = &v + return s +} + +// SetOfferingClass sets the OfferingClass field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetOfferingClass(v string) *DescribeReservedInstancesOfferingsInput { + s.OfferingClass = &v + return s +} + +// SetOfferingType sets the OfferingType field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetOfferingType(v string) *DescribeReservedInstancesOfferingsInput { + s.OfferingType = &v + return s +} + +// SetProductDescription sets the ProductDescription field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetProductDescription(v string) *DescribeReservedInstancesOfferingsInput { + s.ProductDescription = &v + return s +} + +// SetReservedInstancesOfferingIds sets the ReservedInstancesOfferingIds field's value. +func (s *DescribeReservedInstancesOfferingsInput) SetReservedInstancesOfferingIds(v []*string) *DescribeReservedInstancesOfferingsInput { + s.ReservedInstancesOfferingIds = v + return s +} + +// Contains the output of DescribeReservedInstancesOfferings. +type DescribeReservedInstancesOfferingsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of Reserved Instances offerings. + ReservedInstancesOfferings []*ReservedInstancesOffering `locationName:"reservedInstancesOfferingsSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOfferingsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeReservedInstancesOfferingsOutput) SetNextToken(v string) *DescribeReservedInstancesOfferingsOutput { + s.NextToken = &v + return s +} + +// SetReservedInstancesOfferings sets the ReservedInstancesOfferings field's value. +func (s *DescribeReservedInstancesOfferingsOutput) SetReservedInstancesOfferings(v []*ReservedInstancesOffering) *DescribeReservedInstancesOfferingsOutput { + s.ReservedInstancesOfferings = v + return s +} + +// Contains the output for DescribeReservedInstances. +type DescribeReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // A list of Reserved Instances. + ReservedInstances []*ReservedInstances `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservedInstancesOutput) GoString() string { + return s.String() +} + +// SetReservedInstances sets the ReservedInstances field's value. +func (s *DescribeReservedInstancesOutput) SetReservedInstances(v []*ReservedInstances) *DescribeReservedInstancesOutput { + s.ReservedInstances = v + return s +} + +type DescribeRouteTablesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * association.route-table-association-id - The ID of an association ID + // for the route table. + // + // * association.route-table-id - The ID of the route table involved in the + // association. + // + // * association.subnet-id - The ID of the subnet involved in the association. + // + // * association.main - Indicates whether the route table is the main route + // table for the VPC (true | false). Route tables that do not have an association + // ID are not returned in the response. + // + // * owner-id - The ID of the AWS account that owns the route table. + // + // * route-table-id - The ID of the route table. + // + // * route.destination-cidr-block - The IPv4 CIDR range specified in a route + // in the table. + // + // * route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in + // a route in the route table. + // + // * route.destination-prefix-list-id - The ID (prefix) of the AWS service + // specified in a route in the table. + // + // * route.egress-only-internet-gateway-id - The ID of an egress-only Internet + // gateway specified in a route in the route table. + // + // * route.gateway-id - The ID of a gateway specified in a route in the table. + // + // * route.instance-id - The ID of an instance specified in a route in the + // table. + // + // * route.nat-gateway-id - The ID of a NAT gateway. + // + // * route.transit-gateway-id - The ID of a transit gateway. + // + // * route.origin - Describes how the route was created. CreateRouteTable + // indicates that the route was automatically created when the route table + // was created; CreateRoute indicates that the route was manually added to + // the route table; EnableVgwRoutePropagation indicates that the route was + // propagated by route propagation. + // + // * route.state - The state of a route in the route table (active | blackhole). + // The blackhole state indicates that the route's target isn't available + // (for example, the specified gateway isn't attached to the VPC, the specified + // NAT instance has been terminated, and so on). + // + // * route.vpc-peering-connection-id - The ID of a VPC peering connection + // specified in a route in the table. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC for the route table. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more route table IDs. + // + // Default: Describes all your route tables. + RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRouteTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRouteTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRouteTablesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeRouteTablesInput) SetDryRun(v bool) *DescribeRouteTablesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeRouteTablesInput) SetFilters(v []*Filter) *DescribeRouteTablesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeRouteTablesInput) SetMaxResults(v int64) *DescribeRouteTablesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeRouteTablesInput) SetNextToken(v string) *DescribeRouteTablesInput { + s.NextToken = &v + return s +} + +// SetRouteTableIds sets the RouteTableIds field's value. +func (s *DescribeRouteTablesInput) SetRouteTableIds(v []*string) *DescribeRouteTablesInput { + s.RouteTableIds = v + return s +} + +// Contains the output of DescribeRouteTables. +type DescribeRouteTablesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more route tables. + RouteTables []*RouteTable `locationName:"routeTableSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeRouteTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRouteTablesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeRouteTablesOutput) SetNextToken(v string) *DescribeRouteTablesOutput { + s.NextToken = &v + return s +} + +// SetRouteTables sets the RouteTables field's value. +func (s *DescribeRouteTablesOutput) SetRouteTables(v []*RouteTable) *DescribeRouteTablesOutput { + s.RouteTables = v + return s +} + +// Contains the parameters for DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + // + // * availability-zone - The Availability Zone (for example, us-west-2a). + // + // * instance-type - The instance type (for example, c4.large). + // + // * network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // * platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The time period for the first schedule to start. + // + // FirstSlotStartTimeRange is a required field + FirstSlotStartTimeRange *SlotDateTimeRangeRequest `type:"structure" required:"true"` + + // The maximum number of results to return in a single call. This value can + // be between 5 and 300. The default value is 300. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The maximum available duration, in hours. This value must be greater than + // MinSlotDurationInHours and less than 1,720. + MaxSlotDurationInHours *int64 `type:"integer"` + + // The minimum available duration, in hours. The minimum required duration is + // 1,200 hours per year. For example, the minimum daily schedule is 4 hours, + // the minimum weekly schedule is 24 hours, and the minimum monthly schedule + // is 100 hours. + MinSlotDurationInHours *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The schedule recurrence. + // + // Recurrence is a required field + Recurrence *ScheduledInstanceRecurrenceRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScheduledInstanceAvailabilityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScheduledInstanceAvailabilityInput"} + if s.FirstSlotStartTimeRange == nil { + invalidParams.Add(request.NewErrParamRequired("FirstSlotStartTimeRange")) + } + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.Recurrence == nil { + invalidParams.Add(request.NewErrParamRequired("Recurrence")) + } + if s.FirstSlotStartTimeRange != nil { + if err := s.FirstSlotStartTimeRange.Validate(); err != nil { + invalidParams.AddNested("FirstSlotStartTimeRange", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeScheduledInstanceAvailabilityInput) SetDryRun(v bool) *DescribeScheduledInstanceAvailabilityInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeScheduledInstanceAvailabilityInput) SetFilters(v []*Filter) *DescribeScheduledInstanceAvailabilityInput { + s.Filters = v + return s +} + +// SetFirstSlotStartTimeRange sets the FirstSlotStartTimeRange field's value. +func (s *DescribeScheduledInstanceAvailabilityInput) SetFirstSlotStartTimeRange(v *SlotDateTimeRangeRequest) *DescribeScheduledInstanceAvailabilityInput { + s.FirstSlotStartTimeRange = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeScheduledInstanceAvailabilityInput) SetMaxResults(v int64) *DescribeScheduledInstanceAvailabilityInput { + s.MaxResults = &v + return s +} + +// SetMaxSlotDurationInHours sets the MaxSlotDurationInHours field's value. +func (s *DescribeScheduledInstanceAvailabilityInput) SetMaxSlotDurationInHours(v int64) *DescribeScheduledInstanceAvailabilityInput { + s.MaxSlotDurationInHours = &v + return s +} + +// SetMinSlotDurationInHours sets the MinSlotDurationInHours field's value. +func (s *DescribeScheduledInstanceAvailabilityInput) SetMinSlotDurationInHours(v int64) *DescribeScheduledInstanceAvailabilityInput { + s.MinSlotDurationInHours = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScheduledInstanceAvailabilityInput) SetNextToken(v string) *DescribeScheduledInstanceAvailabilityInput { + s.NextToken = &v + return s +} + +// SetRecurrence sets the Recurrence field's value. +func (s *DescribeScheduledInstanceAvailabilityInput) SetRecurrence(v *ScheduledInstanceRecurrenceRequest) *DescribeScheduledInstanceAvailabilityInput { + s.Recurrence = v + return s +} + +// Contains the output of DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the available Scheduled Instances. + ScheduledInstanceAvailabilitySet []*ScheduledInstanceAvailability `locationName:"scheduledInstanceAvailabilitySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScheduledInstanceAvailabilityOutput) SetNextToken(v string) *DescribeScheduledInstanceAvailabilityOutput { + s.NextToken = &v + return s +} + +// SetScheduledInstanceAvailabilitySet sets the ScheduledInstanceAvailabilitySet field's value. +func (s *DescribeScheduledInstanceAvailabilityOutput) SetScheduledInstanceAvailabilitySet(v []*ScheduledInstanceAvailability) *DescribeScheduledInstanceAvailabilityOutput { + s.ScheduledInstanceAvailabilitySet = v + return s +} + +// Contains the parameters for DescribeScheduledInstances. +type DescribeScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + // + // * availability-zone - The Availability Zone (for example, us-west-2a). + // + // * instance-type - The instance type (for example, c4.large). + // + // * network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // * platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. This value can + // be between 5 and 300. The default value is 100. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The Scheduled Instance IDs. + ScheduledInstanceIds []*string `locationName:"ScheduledInstanceId" locationNameList:"ScheduledInstanceId" type:"list"` + + // The time period for the first schedule to start. + SlotStartTimeRange *SlotStartTimeRangeRequest `type:"structure"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeScheduledInstancesInput) SetDryRun(v bool) *DescribeScheduledInstancesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeScheduledInstancesInput) SetFilters(v []*Filter) *DescribeScheduledInstancesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeScheduledInstancesInput) SetMaxResults(v int64) *DescribeScheduledInstancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScheduledInstancesInput) SetNextToken(v string) *DescribeScheduledInstancesInput { + s.NextToken = &v + return s +} + +// SetScheduledInstanceIds sets the ScheduledInstanceIds field's value. +func (s *DescribeScheduledInstancesInput) SetScheduledInstanceIds(v []*string) *DescribeScheduledInstancesInput { + s.ScheduledInstanceIds = v + return s +} + +// SetSlotStartTimeRange sets the SlotStartTimeRange field's value. +func (s *DescribeScheduledInstancesInput) SetSlotStartTimeRange(v *SlotStartTimeRangeRequest) *DescribeScheduledInstancesInput { + s.SlotStartTimeRange = v + return s +} + +// Contains the output of DescribeScheduledInstances. +type DescribeScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeScheduledInstancesOutput) SetNextToken(v string) *DescribeScheduledInstancesOutput { + s.NextToken = &v + return s +} + +// SetScheduledInstanceSet sets the ScheduledInstanceSet field's value. +func (s *DescribeScheduledInstancesOutput) SetScheduledInstanceSet(v []*ScheduledInstance) *DescribeScheduledInstancesOutput { + s.ScheduledInstanceSet = v + return s +} + +type DescribeSecurityGroupReferencesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the security groups in your account. + // + // GroupId is a required field + GroupId []*string `locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeSecurityGroupReferencesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupReferencesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSecurityGroupReferencesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSecurityGroupReferencesInput"} + if s.GroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSecurityGroupReferencesInput) SetDryRun(v bool) *DescribeSecurityGroupReferencesInput { + s.DryRun = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *DescribeSecurityGroupReferencesInput) SetGroupId(v []*string) *DescribeSecurityGroupReferencesInput { + s.GroupId = v + return s +} + +type DescribeSecurityGroupReferencesOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPCs with the referencing security groups. + SecurityGroupReferenceSet []*SecurityGroupReference `locationName:"securityGroupReferenceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupReferencesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupReferencesOutput) GoString() string { + return s.String() +} + +// SetSecurityGroupReferenceSet sets the SecurityGroupReferenceSet field's value. +func (s *DescribeSecurityGroupReferencesOutput) SetSecurityGroupReferenceSet(v []*SecurityGroupReference) *DescribeSecurityGroupReferencesOutput { + s.SecurityGroupReferenceSet = v + return s +} + +type DescribeSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. If using multiple filters for rules, the results include security + // groups for which any combination of rules - not necessarily a single rule + // - match all filters. + // + // * description - The description of the security group. + // + // * egress.ip-permission.cidr - An IPv4 CIDR block for an outbound security + // group rule. + // + // * egress.ip-permission.from-port - For an outbound rule, the start of + // port range for the TCP and UDP protocols, or an ICMP type number. + // + // * egress.ip-permission.group-id - The ID of a security group that has + // been referenced in an outbound security group rule. + // + // * egress.ip-permission.group-name - The name of a security group that + // has been referenced in an outbound security group rule. + // + // * egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound + // security group rule. + // + // * egress.ip-permission.prefix-list-id - The ID of a prefix list to which + // a security group rule allows outbound access. + // + // * egress.ip-permission.protocol - The IP protocol for an outbound security + // group rule (tcp | udp | icmp or a protocol number). + // + // * egress.ip-permission.to-port - For an outbound rule, the end of port + // range for the TCP and UDP protocols, or an ICMP code. + // + // * egress.ip-permission.user-id - The ID of an AWS account that has been + // referenced in an outbound security group rule. + // + // * group-id - The ID of the security group. + // + // * group-name - The name of the security group. + // + // * ip-permission.cidr - An IPv4 CIDR block for an inbound security group + // rule. + // + // * ip-permission.from-port - For an inbound rule, the start of port range + // for the TCP and UDP protocols, or an ICMP type number. + // + // * ip-permission.group-id - The ID of a security group that has been referenced + // in an inbound security group rule. + // + // * ip-permission.group-name - The name of a security group that has been + // referenced in an inbound security group rule. + // + // * ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security + // group rule. + // + // * ip-permission.prefix-list-id - The ID of a prefix list from which a + // security group rule allows inbound access. + // + // * ip-permission.protocol - The IP protocol for an inbound security group + // rule (tcp | udp | icmp or a protocol number). + // + // * ip-permission.to-port - For an inbound rule, the end of port range for + // the TCP and UDP protocols, or an ICMP code. + // + // * ip-permission.user-id - The ID of an AWS account that has been referenced + // in an inbound security group rule. + // + // * owner-id - The AWS account ID of the owner of the security group. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC specified when the security group was created. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the security groups. Required for security groups in a nondefault + // VPC. + // + // Default: Describes all your security groups. + GroupIds []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // [EC2-Classic and default VPC only] The names of the security groups. You + // can specify either the security group name or the security group ID. For + // security groups in a nondefault VPC, use the group-name filter to describe + // security groups by name. + // + // Default: Describes all your security groups. + GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another request with the returned NextToken value. + // This value can be between 5 and 1000. If this parameter is not specified, + // then all results are returned. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSecurityGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSecurityGroupsInput) SetDryRun(v bool) *DescribeSecurityGroupsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeSecurityGroupsInput) SetFilters(v []*Filter) *DescribeSecurityGroupsInput { + s.Filters = v + return s +} + +// SetGroupIds sets the GroupIds field's value. +func (s *DescribeSecurityGroupsInput) SetGroupIds(v []*string) *DescribeSecurityGroupsInput { + s.GroupIds = v + return s +} + +// SetGroupNames sets the GroupNames field's value. +func (s *DescribeSecurityGroupsInput) SetGroupNames(v []*string) *DescribeSecurityGroupsInput { + s.GroupNames = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSecurityGroupsInput) SetMaxResults(v int64) *DescribeSecurityGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSecurityGroupsInput) SetNextToken(v string) *DescribeSecurityGroupsInput { + s.NextToken = &v + return s +} + +type DescribeSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the security groups. + SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSecurityGroupsOutput) SetNextToken(v string) *DescribeSecurityGroupsOutput { + s.NextToken = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *DescribeSecurityGroupsOutput) SetSecurityGroups(v []*SecurityGroup) *DescribeSecurityGroupsOutput { + s.SecurityGroups = v + return s +} + +type DescribeSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The snapshot attribute you would like to view. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the EBS snapshot. + // + // SnapshotId is a required field + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSnapshotAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeSnapshotAttributeInput) SetAttribute(v string) *DescribeSnapshotAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSnapshotAttributeInput) SetDryRun(v bool) *DescribeSnapshotAttributeInput { + s.DryRun = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *DescribeSnapshotAttributeInput) SetSnapshotId(v string) *DescribeSnapshotAttributeInput { + s.SnapshotId = &v + return s +} + +type DescribeSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // The users and groups that have the permissions for creating volumes from + // the snapshot. + CreateVolumePermissions []*CreateVolumePermission `locationName:"createVolumePermission" locationNameList:"item" type:"list"` + + // The product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the EBS snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s DescribeSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotAttributeOutput) GoString() string { + return s.String() +} + +// SetCreateVolumePermissions sets the CreateVolumePermissions field's value. +func (s *DescribeSnapshotAttributeOutput) SetCreateVolumePermissions(v []*CreateVolumePermission) *DescribeSnapshotAttributeOutput { + s.CreateVolumePermissions = v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *DescribeSnapshotAttributeOutput) SetProductCodes(v []*ProductCode) *DescribeSnapshotAttributeOutput { + s.ProductCodes = v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *DescribeSnapshotAttributeOutput) SetSnapshotId(v string) *DescribeSnapshotAttributeOutput { + s.SnapshotId = &v + return s +} + +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * description - A description of the snapshot. + // + // * encrypted - Indicates whether the snapshot is encrypted (true | false) + // + // * owner-alias - The owner alias, from an Amazon-maintained list (amazon). + // This is not the user-configured AWS account alias set using the IAM console. + // We recommend that you use the related parameter instead of this filter. + // + // * owner-id - The AWS account ID of the owner. We recommend that you use + // the related parameter instead of this filter. + // + // * progress - The progress of the snapshot, as a percentage (for example, + // 80%). + // + // * snapshot-id - The snapshot ID. + // + // * start-time - The time stamp when the snapshot was initiated. + // + // * status - The status of the snapshot (pending | completed | error). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * volume-id - The ID of the volume the snapshot is for. + // + // * volume-size - The size of the volume, in GiB. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of snapshot results returned by DescribeSnapshots in paginated + // output. When this parameter is used, DescribeSnapshots only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeSnapshots + // request with the returned NextToken value. This value can be between 5 and + // 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results + // are returned. If this parameter is not used, then DescribeSnapshots returns + // all results. You cannot specify this parameter and the snapshot IDs parameter + // in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeSnapshots + // request where MaxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the NextToken value. This value is null when there are no more results + // to return. + NextToken *string `type:"string"` + + // Scopes the results to snapshots with the specified owners. You can specify + // a combination of AWS account IDs, self, and amazon. + OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` + + // The IDs of the AWS accounts that can create volumes from the snapshot. + RestorableByUserIds []*string `locationName:"RestorableBy" type:"list"` + + // The snapshot IDs. + // + // Default: Describes the snapshots for which you have create volume permissions. + SnapshotIds []*string `locationName:"SnapshotId" locationNameList:"SnapshotId" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSnapshotsInput) SetDryRun(v bool) *DescribeSnapshotsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeSnapshotsInput) SetFilters(v []*Filter) *DescribeSnapshotsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSnapshotsInput) SetMaxResults(v int64) *DescribeSnapshotsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSnapshotsInput) SetNextToken(v string) *DescribeSnapshotsInput { + s.NextToken = &v + return s +} + +// SetOwnerIds sets the OwnerIds field's value. +func (s *DescribeSnapshotsInput) SetOwnerIds(v []*string) *DescribeSnapshotsInput { + s.OwnerIds = v + return s +} + +// SetRestorableByUserIds sets the RestorableByUserIds field's value. +func (s *DescribeSnapshotsInput) SetRestorableByUserIds(v []*string) *DescribeSnapshotsInput { + s.RestorableByUserIds = v + return s +} + +// SetSnapshotIds sets the SnapshotIds field's value. +func (s *DescribeSnapshotsInput) SetSnapshotIds(v []*string) *DescribeSnapshotsInput { + s.SnapshotIds = v + return s +} + +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeSnapshots request. When + // the results of a DescribeSnapshots request exceed MaxResults, this value + // can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the snapshots. + Snapshots []*Snapshot `locationName:"snapshotSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSnapshotsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSnapshotsOutput) SetNextToken(v string) *DescribeSnapshotsOutput { + s.NextToken = &v + return s +} + +// SetSnapshots sets the Snapshots field's value. +func (s *DescribeSnapshotsOutput) SetSnapshots(v []*Snapshot) *DescribeSnapshotsOutput { + s.Snapshots = v + return s +} + +// Contains the parameters for DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSpotDatafeedSubscriptionInput) SetDryRun(v bool) *DescribeSpotDatafeedSubscriptionInput { + s.DryRun = &v + return s +} + +// Contains the output of DescribeSpotDatafeedSubscription. +type DescribeSpotDatafeedSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The Spot Instance data feed subscription. + SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"` +} + +// String returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotDatafeedSubscriptionOutput) GoString() string { + return s.String() +} + +// SetSpotDatafeedSubscription sets the SpotDatafeedSubscription field's value. +func (s *DescribeSpotDatafeedSubscriptionOutput) SetSpotDatafeedSubscription(v *SpotDatafeedSubscription) *DescribeSpotDatafeedSubscriptionOutput { + s.SpotDatafeedSubscription = v + return s +} + +// Contains the parameters for DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot Fleet request. + // + // SpotFleetRequestId is a required field + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSpotFleetInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSpotFleetInstancesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.SpotFleetRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSpotFleetInstancesInput) SetDryRun(v bool) *DescribeSpotFleetInstancesInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSpotFleetInstancesInput) SetMaxResults(v int64) *DescribeSpotFleetInstancesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotFleetInstancesInput) SetNextToken(v string) *DescribeSpotFleetInstancesInput { + s.NextToken = &v + return s +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *DescribeSpotFleetInstancesInput) SetSpotFleetRequestId(v string) *DescribeSpotFleetInstancesInput { + s.SpotFleetRequestId = &v + return s +} + +// Contains the output of DescribeSpotFleetInstances. +type DescribeSpotFleetInstancesOutput struct { + _ struct{} `type:"structure"` + + // The running instances. This list is refreshed periodically and might be out + // of date. + ActiveInstances []*ActiveInstance `locationName:"activeInstanceSet" locationNameList:"item" type:"list"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot Fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string"` +} + +// String returns the string representation +func (s DescribeSpotFleetInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetInstancesOutput) GoString() string { + return s.String() +} + +// SetActiveInstances sets the ActiveInstances field's value. +func (s *DescribeSpotFleetInstancesOutput) SetActiveInstances(v []*ActiveInstance) *DescribeSpotFleetInstancesOutput { + s.ActiveInstances = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotFleetInstancesOutput) SetNextToken(v string) *DescribeSpotFleetInstancesOutput { + s.NextToken = &v + return s +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *DescribeSpotFleetInstancesOutput) SetSpotFleetRequestId(v string) *DescribeSpotFleetInstancesOutput { + s.SpotFleetRequestId = &v + return s +} + +// Contains the parameters for DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The type of events to describe. By default, all events are described. + EventType *string `locationName:"eventType" type:"string" enum:"EventType"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot Fleet request. + // + // SpotFleetRequestId is a required field + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // + // StartTime is a required field + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSpotFleetRequestHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSpotFleetRequestHistoryInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.SpotFleetRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSpotFleetRequestHistoryInput) SetDryRun(v bool) *DescribeSpotFleetRequestHistoryInput { + s.DryRun = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *DescribeSpotFleetRequestHistoryInput) SetEventType(v string) *DescribeSpotFleetRequestHistoryInput { + s.EventType = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSpotFleetRequestHistoryInput) SetMaxResults(v int64) *DescribeSpotFleetRequestHistoryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotFleetRequestHistoryInput) SetNextToken(v string) *DescribeSpotFleetRequestHistoryInput { + s.NextToken = &v + return s +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *DescribeSpotFleetRequestHistoryInput) SetSpotFleetRequestId(v string) *DescribeSpotFleetRequestHistoryInput { + s.SpotFleetRequestId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeSpotFleetRequestHistoryInput) SetStartTime(v time.Time) *DescribeSpotFleetRequestHistoryInput { + s.StartTime = &v + return s +} + +// Contains the output of DescribeSpotFleetRequestHistory. +type DescribeSpotFleetRequestHistoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the events in the history of the Spot Fleet request. + HistoryRecords []*HistoryRecord `locationName:"historyRecordSet" locationNameList:"item" type:"list"` + + // The last date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // All records up to this time were retrieved. + // + // If nextToken indicates that there are more results, this value is not present. + LastEvaluatedTime *time.Time `locationName:"lastEvaluatedTime" type:"timestamp"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the Spot Fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string"` + + // The starting date and time for the events, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestHistoryOutput) GoString() string { + return s.String() +} + +// SetHistoryRecords sets the HistoryRecords field's value. +func (s *DescribeSpotFleetRequestHistoryOutput) SetHistoryRecords(v []*HistoryRecord) *DescribeSpotFleetRequestHistoryOutput { + s.HistoryRecords = v + return s +} + +// SetLastEvaluatedTime sets the LastEvaluatedTime field's value. +func (s *DescribeSpotFleetRequestHistoryOutput) SetLastEvaluatedTime(v time.Time) *DescribeSpotFleetRequestHistoryOutput { + s.LastEvaluatedTime = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotFleetRequestHistoryOutput) SetNextToken(v string) *DescribeSpotFleetRequestHistoryOutput { + s.NextToken = &v + return s +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *DescribeSpotFleetRequestHistoryOutput) SetSpotFleetRequestId(v string) *DescribeSpotFleetRequestHistoryOutput { + s.SpotFleetRequestId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeSpotFleetRequestHistoryOutput) SetStartTime(v time.Time) *DescribeSpotFleetRequestHistoryOutput { + s.StartTime = &v + return s +} + +// Contains the parameters for DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The IDs of the Spot Fleet requests. + SpotFleetRequestIds []*string `locationName:"spotFleetRequestId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSpotFleetRequestsInput) SetDryRun(v bool) *DescribeSpotFleetRequestsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSpotFleetRequestsInput) SetMaxResults(v int64) *DescribeSpotFleetRequestsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotFleetRequestsInput) SetNextToken(v string) *DescribeSpotFleetRequestsInput { + s.NextToken = &v + return s +} + +// SetSpotFleetRequestIds sets the SpotFleetRequestIds field's value. +func (s *DescribeSpotFleetRequestsInput) SetSpotFleetRequestIds(v []*string) *DescribeSpotFleetRequestsInput { + s.SpotFleetRequestIds = v + return s +} + +// Contains the output of DescribeSpotFleetRequests. +type DescribeSpotFleetRequestsOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the configuration of your Spot Fleet. + SpotFleetRequestConfigs []*SpotFleetRequestConfig `locationName:"spotFleetRequestConfigSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotFleetRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotFleetRequestsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotFleetRequestsOutput) SetNextToken(v string) *DescribeSpotFleetRequestsOutput { + s.NextToken = &v + return s +} + +// SetSpotFleetRequestConfigs sets the SpotFleetRequestConfigs field's value. +func (s *DescribeSpotFleetRequestsOutput) SetSpotFleetRequestConfigs(v []*SpotFleetRequestConfig) *DescribeSpotFleetRequestsOutput { + s.SpotFleetRequestConfigs = v + return s +} + +// Contains the parameters for DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * availability-zone-group - The Availability Zone group. + // + // * create-time - The time stamp when the Spot Instance request was created. + // + // * fault-code - The fault code related to the request. + // + // * fault-message - The fault message related to the request. + // + // * instance-id - The ID of the instance that fulfilled the request. + // + // * launch-group - The Spot Instance launch group. + // + // * launch.block-device-mapping.delete-on-termination - Indicates whether + // the EBS volume is deleted on instance termination. + // + // * launch.block-device-mapping.device-name - The device name for the volume + // in the block device mapping (for example, /dev/sdh or xvdh). + // + // * launch.block-device-mapping.snapshot-id - The ID of the snapshot for + // the EBS volume. + // + // * launch.block-device-mapping.volume-size - The size of the EBS volume, + // in GiB. + // + // * launch.block-device-mapping.volume-type - The type of EBS volume: gp2 + // for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for + // Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic. + // + // * launch.group-id - The ID of the security group for the instance. + // + // * launch.group-name - The name of the security group for the instance. + // + // * launch.image-id - The ID of the AMI. + // + // * launch.instance-type - The type of instance (for example, m3.medium). + // + // * launch.kernel-id - The kernel ID. + // + // * launch.key-name - The name of the key pair the instance launched with. + // + // * launch.monitoring-enabled - Whether detailed monitoring is enabled for + // the Spot Instance. + // + // * launch.ramdisk-id - The RAM disk ID. + // + // * launched-availability-zone - The Availability Zone in which the request + // is launched. + // + // * network-interface.addresses.primary - Indicates whether the IP address + // is the primary private IP address. + // + // * network-interface.delete-on-termination - Indicates whether the network + // interface is deleted when the instance is terminated. + // + // * network-interface.description - A description of the network interface. + // + // * network-interface.device-index - The index of the device for the network + // interface attachment on the instance. + // + // * network-interface.group-id - The ID of the security group associated + // with the network interface. + // + // * network-interface.network-interface-id - The ID of the network interface. + // + // * network-interface.private-ip-address - The primary private IP address + // of the network interface. + // + // * network-interface.subnet-id - The ID of the subnet for the instance. + // + // * product-description - The product description associated with the instance + // (Linux/UNIX | Windows). + // + // * spot-instance-request-id - The Spot Instance request ID. + // + // * spot-price - The maximum hourly price for any Spot Instance launched + // to fulfill the request. + // + // * state - The state of the Spot Instance request (open | active | closed + // | cancelled | failed). Spot request status information can help you track + // your Amazon EC2 Spot Instance requests. For more information, see Spot + // request status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon EC2 User Guide for Linux Instances. + // + // * status-code - The short code describing the most recent evaluation of + // your Spot Instance request. + // + // * status-message - The message explaining the status of the Spot Instance + // request. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * type - The type of Spot Instance request (one-time | persistent). + // + // * valid-from - The start date of the request. + // + // * valid-until - The end date of the request. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. Specify a value + // between 5 and 1000. To retrieve the remaining results, make another call + // with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token to request the next set of results. This value is null when there + // are no more results to return. + NextToken *string `type:"string"` + + // One or more Spot Instance request IDs. + SpotInstanceRequestIds []*string `locationName:"SpotInstanceRequestId" locationNameList:"SpotInstanceRequestId" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSpotInstanceRequestsInput) SetDryRun(v bool) *DescribeSpotInstanceRequestsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeSpotInstanceRequestsInput) SetFilters(v []*Filter) *DescribeSpotInstanceRequestsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSpotInstanceRequestsInput) SetMaxResults(v int64) *DescribeSpotInstanceRequestsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotInstanceRequestsInput) SetNextToken(v string) *DescribeSpotInstanceRequestsInput { + s.NextToken = &v + return s +} + +// SetSpotInstanceRequestIds sets the SpotInstanceRequestIds field's value. +func (s *DescribeSpotInstanceRequestsInput) SetSpotInstanceRequestIds(v []*string) *DescribeSpotInstanceRequestsInput { + s.SpotInstanceRequestIds = v + return s +} + +// Contains the output of DescribeSpotInstanceRequests. +type DescribeSpotInstanceRequestsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // One or more Spot Instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotInstanceRequestsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotInstanceRequestsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotInstanceRequestsOutput) SetNextToken(v string) *DescribeSpotInstanceRequestsOutput { + s.NextToken = &v + return s +} + +// SetSpotInstanceRequests sets the SpotInstanceRequests field's value. +func (s *DescribeSpotInstanceRequestsOutput) SetSpotInstanceRequests(v []*SpotInstanceRequest) *DescribeSpotInstanceRequestsOutput { + s.SpotInstanceRequests = v + return s +} + +// Contains the parameters for DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryInput struct { + _ struct{} `type:"structure"` + + // Filters the results by the specified Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The date and time, up to the current date, from which to stop retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // One or more filters. + // + // * availability-zone - The Availability Zone for which prices should be + // returned. + // + // * instance-type - The type of instance (for example, m3.medium). + // + // * product-description - The product description for the Spot price (Linux/UNIX + // | Red Hat Enterprise Linux | SUSE Linux | Windows | Linux/UNIX (Amazon + // VPC) | Red Hat Enterprise Linux (Amazon VPC) | SUSE Linux (Amazon VPC) + // | Windows (Amazon VPC)). + // + // * spot-price - The Spot price. The value must match exactly (or use wildcards; + // greater than or less than comparison is not supported). + // + // * timestamp - The time stamp of the Spot price history, in UTC format + // (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). + // Greater than or less than comparison is not supported. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Filters the results by the specified instance types. + InstanceTypes []*string `locationName:"InstanceType" type:"list"` + + // The maximum number of results to return in a single call. Specify a value + // between 1 and 1000. The default value is 1000. To retrieve the remaining + // results, make another call with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Filters the results by the specified basic product descriptions. + ProductDescriptions []*string `locationName:"ProductDescription" type:"list"` + + // The date and time, up to the past 90 days, from which to start retrieving + // the price history data, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + StartTime *time.Time `locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryInput) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *DescribeSpotPriceHistoryInput) SetAvailabilityZone(v string) *DescribeSpotPriceHistoryInput { + s.AvailabilityZone = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSpotPriceHistoryInput) SetDryRun(v bool) *DescribeSpotPriceHistoryInput { + s.DryRun = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *DescribeSpotPriceHistoryInput) SetEndTime(v time.Time) *DescribeSpotPriceHistoryInput { + s.EndTime = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeSpotPriceHistoryInput) SetFilters(v []*Filter) *DescribeSpotPriceHistoryInput { + s.Filters = v + return s +} + +// SetInstanceTypes sets the InstanceTypes field's value. +func (s *DescribeSpotPriceHistoryInput) SetInstanceTypes(v []*string) *DescribeSpotPriceHistoryInput { + s.InstanceTypes = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSpotPriceHistoryInput) SetMaxResults(v int64) *DescribeSpotPriceHistoryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotPriceHistoryInput) SetNextToken(v string) *DescribeSpotPriceHistoryInput { + s.NextToken = &v + return s +} + +// SetProductDescriptions sets the ProductDescriptions field's value. +func (s *DescribeSpotPriceHistoryInput) SetProductDescriptions(v []*string) *DescribeSpotPriceHistoryInput { + s.ProductDescriptions = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeSpotPriceHistoryInput) SetStartTime(v time.Time) *DescribeSpotPriceHistoryInput { + s.StartTime = &v + return s +} + +// Contains the output of DescribeSpotPriceHistory. +type DescribeSpotPriceHistoryOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // or an empty string when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The historical Spot prices. + SpotPriceHistory []*SpotPrice `locationName:"spotPriceHistorySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSpotPriceHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSpotPriceHistoryOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSpotPriceHistoryOutput) SetNextToken(v string) *DescribeSpotPriceHistoryOutput { + s.NextToken = &v + return s +} + +// SetSpotPriceHistory sets the SpotPriceHistory field's value. +func (s *DescribeSpotPriceHistoryOutput) SetSpotPriceHistory(v []*SpotPrice) *DescribeSpotPriceHistoryOutput { + s.SpotPriceHistory = v + return s +} + +type DescribeStaleSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `min:"1" type:"string"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStaleSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStaleSecurityGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStaleSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStaleSecurityGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeStaleSecurityGroupsInput) SetDryRun(v bool) *DescribeStaleSecurityGroupsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeStaleSecurityGroupsInput) SetMaxResults(v int64) *DescribeStaleSecurityGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeStaleSecurityGroupsInput) SetNextToken(v string) *DescribeStaleSecurityGroupsInput { + s.NextToken = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DescribeStaleSecurityGroupsInput) SetVpcId(v string) *DescribeStaleSecurityGroupsInput { + s.VpcId = &v + return s +} + +type DescribeStaleSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the stale security groups. + StaleSecurityGroupSet []*StaleSecurityGroup `locationName:"staleSecurityGroupSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeStaleSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStaleSecurityGroupsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeStaleSecurityGroupsOutput) SetNextToken(v string) *DescribeStaleSecurityGroupsOutput { + s.NextToken = &v + return s +} + +// SetStaleSecurityGroupSet sets the StaleSecurityGroupSet field's value. +func (s *DescribeStaleSecurityGroupsOutput) SetStaleSecurityGroupSet(v []*StaleSecurityGroup) *DescribeStaleSecurityGroupsOutput { + s.StaleSecurityGroupSet = v + return s +} + +type DescribeSubnetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * availability-zone - The Availability Zone for the subnet. You can also + // use availabilityZone as the filter name. + // + // * availability-zone-id - The ID of the Availability Zone for the subnet. + // You can also use availabilityZoneId as the filter name. + // + // * available-ip-address-count - The number of IPv4 addresses in the subnet + // that are available. + // + // * cidr-block - The IPv4 CIDR block of the subnet. The CIDR block you specify + // must exactly match the subnet's CIDR block for information to be returned + // for the subnet. You can also use cidr or cidrBlock as the filter names. + // + // * default-for-az - Indicates whether this is the default subnet for the + // Availability Zone. You can also use defaultForAz as the filter name. + // + // * ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated + // with the subnet. + // + // * ipv6-cidr-block-association.association-id - An association ID for an + // IPv6 CIDR block associated with the subnet. + // + // * ipv6-cidr-block-association.state - The state of an IPv6 CIDR block + // associated with the subnet. + // + // * owner-id - The ID of the AWS account that owns the subnet. + // + // * state - The state of the subnet (pending | available). + // + // * subnet-arn - The Amazon Resource Name (ARN) of the subnet. + // + // * subnet-id - The ID of the subnet. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC for the subnet. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more subnet IDs. + // + // Default: Describes all your subnets. + SubnetIds []*string `locationName:"SubnetId" locationNameList:"SubnetId" type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSubnetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSubnetsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeSubnetsInput) SetDryRun(v bool) *DescribeSubnetsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeSubnetsInput) SetFilters(v []*Filter) *DescribeSubnetsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSubnetsInput) SetMaxResults(v int64) *DescribeSubnetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubnetsInput) SetNextToken(v string) *DescribeSubnetsInput { + s.NextToken = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *DescribeSubnetsInput) SetSubnetIds(v []*string) *DescribeSubnetsInput { + s.SubnetIds = v + return s +} + +type DescribeSubnetsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more subnets. + Subnets []*Subnet `locationName:"subnetSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubnetsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubnetsOutput) SetNextToken(v string) *DescribeSubnetsOutput { + s.NextToken = &v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *DescribeSubnetsOutput) SetSubnets(v []*Subnet) *DescribeSubnetsOutput { + s.Subnets = v + return s +} + +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * key - The tag key. + // + // * resource-id - The ID of the resource. + // + // * resource-type - The resource type (customer-gateway | dedicated-host + // | dhcp-options | elastic-ip | fleet | fpga-image | host-reservation | + // image | instance | internet-gateway | key-pair | launch-template | natgateway + // | network-acl | network-interface | placement-group | reserved-instances + // | route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection + // | vpn-connection | vpn-gateway). + // + // * tag: - The key/value combination of the tag. For example, specify + // "tag:Owner" for the filter name and "TeamA" for the filter value to find + // resources with the tag "Owner=TeamA". + // + // * value - The tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. This value can + // be between 5 and 1000. To retrieve the remaining results, make another call + // with the returned NextToken value. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTagsInput) SetDryRun(v bool) *DescribeTagsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTagsInput) SetFilters(v []*Filter) *DescribeTagsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTagsInput) SetMaxResults(v int64) *DescribeTagsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTagsInput) SetNextToken(v string) *DescribeTagsInput { + s.NextToken = &v + return s +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The tags. + Tags []*TagDescription `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTagsOutput) SetNextToken(v string) *DescribeTagsOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DescribeTagsOutput) SetTags(v []*TagDescription) *DescribeTagsOutput { + s.Tags = v + return s +} + +type DescribeTrafficMirrorFiltersInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * description: The Traffic Mirror filter description. + // + // * traffic-mirror-filter-id: The ID of the Traffic Mirror filter. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterIds []*string `locationName:"TrafficMirrorFilterId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrafficMirrorFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrafficMirrorFiltersInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetDryRun(v bool) *DescribeTrafficMirrorFiltersInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetFilters(v []*Filter) *DescribeTrafficMirrorFiltersInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetMaxResults(v int64) *DescribeTrafficMirrorFiltersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetNextToken(v string) *DescribeTrafficMirrorFiltersInput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorFilterIds sets the TrafficMirrorFilterIds field's value. +func (s *DescribeTrafficMirrorFiltersInput) SetTrafficMirrorFilterIds(v []*string) *DescribeTrafficMirrorFiltersInput { + s.TrafficMirrorFilterIds = v + return s +} + +type DescribeTrafficMirrorFiltersOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more Traffic Mirror filters. + TrafficMirrorFilters []*TrafficMirrorFilter `locationName:"trafficMirrorFilterSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorFiltersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorFiltersOutput) SetNextToken(v string) *DescribeTrafficMirrorFiltersOutput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorFilters sets the TrafficMirrorFilters field's value. +func (s *DescribeTrafficMirrorFiltersOutput) SetTrafficMirrorFilters(v []*TrafficMirrorFilter) *DescribeTrafficMirrorFiltersOutput { + s.TrafficMirrorFilters = v + return s +} + +type DescribeTrafficMirrorSessionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * description: The Traffic Mirror session description. + // + // * network-interface-id: The ID of the Traffic Mirror session network interface. + // + // * owner-id: The ID of the account that owns the Traffic Mirror session. + // + // * packet-length: The assigned number of packets to mirror. + // + // * session-number: The assigned session number. + // + // * traffic-mirror-filter-id: The ID of the Traffic Mirror filter. + // + // * traffic-mirror-session-id: The ID of the Traffic Mirror session. + // + // * traffic-mirror-target-id: The ID of the Traffic Mirror target. + // + // * virtual-network-id: The virtual network ID of the Traffic Mirror session. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the Traffic Mirror session. + TrafficMirrorSessionIds []*string `locationName:"TrafficMirrorSessionId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrafficMirrorSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrafficMirrorSessionsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetDryRun(v bool) *DescribeTrafficMirrorSessionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetFilters(v []*Filter) *DescribeTrafficMirrorSessionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetMaxResults(v int64) *DescribeTrafficMirrorSessionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetNextToken(v string) *DescribeTrafficMirrorSessionsInput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorSessionIds sets the TrafficMirrorSessionIds field's value. +func (s *DescribeTrafficMirrorSessionsInput) SetTrafficMirrorSessionIds(v []*string) *DescribeTrafficMirrorSessionsInput { + s.TrafficMirrorSessionIds = v + return s +} + +type DescribeTrafficMirrorSessionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Describes one or more Traffic Mirror sessions. By default, all Traffic Mirror + // sessions are described. Alternatively, you can filter the results. + TrafficMirrorSessions []*TrafficMirrorSession `locationName:"trafficMirrorSessionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorSessionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorSessionsOutput) SetNextToken(v string) *DescribeTrafficMirrorSessionsOutput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorSessions sets the TrafficMirrorSessions field's value. +func (s *DescribeTrafficMirrorSessionsOutput) SetTrafficMirrorSessions(v []*TrafficMirrorSession) *DescribeTrafficMirrorSessionsOutput { + s.TrafficMirrorSessions = v + return s +} + +type DescribeTrafficMirrorTargetsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * description: The Traffic Mirror target description. + // + // * network-interface-id: The ID of the Traffic Mirror session network interface. + // + // * network-load-balancer-arn: The Amazon Resource Name (ARN) of the Network + // Load Balancer that is associated with the session. + // + // * owner-id: The ID of the account that owns the Traffic Mirror session. + // + // * traffic-mirror-target-id: The ID of the Traffic Mirror target. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the Traffic Mirror targets. + TrafficMirrorTargetIds []*string `locationName:"TrafficMirrorTargetId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrafficMirrorTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrafficMirrorTargetsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetDryRun(v bool) *DescribeTrafficMirrorTargetsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetFilters(v []*Filter) *DescribeTrafficMirrorTargetsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetMaxResults(v int64) *DescribeTrafficMirrorTargetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetNextToken(v string) *DescribeTrafficMirrorTargetsInput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorTargetIds sets the TrafficMirrorTargetIds field's value. +func (s *DescribeTrafficMirrorTargetsInput) SetTrafficMirrorTargetIds(v []*string) *DescribeTrafficMirrorTargetsInput { + s.TrafficMirrorTargetIds = v + return s +} + +type DescribeTrafficMirrorTargetsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more Traffic Mirror targets. + TrafficMirrorTargets []*TrafficMirrorTarget `locationName:"trafficMirrorTargetSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTrafficMirrorTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTrafficMirrorTargetsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorTargetsOutput) SetNextToken(v string) *DescribeTrafficMirrorTargetsOutput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorTargets sets the TrafficMirrorTargets field's value. +func (s *DescribeTrafficMirrorTargetsOutput) SetTrafficMirrorTargets(v []*TrafficMirrorTarget) *DescribeTrafficMirrorTargetsOutput { + s.TrafficMirrorTargets = v + return s +} + +type DescribeTransitGatewayAttachmentsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * association.state - The state of the association (associating | associated + // | disassociating). + // + // * association.transit-gateway-route-table-id - The ID of the route table + // for the transit gateway. + // + // * resource-id - The ID of the resource. + // + // * resource-owner-id - The ID of the AWS account that owns the resource. + // + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. + // + // * state - The state of the attachment. Valid values are available | deleted + // | deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance + // | pending | rollingBack | rejected | rejecting. + // + // * transit-gateway-attachment-id - The ID of the attachment. + // + // * transit-gateway-id - The ID of the transit gateway. + // + // * transit-gateway-owner-id - The ID of the AWS account that owns the transit + // gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The IDs of the attachments. + TransitGatewayAttachmentIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayAttachmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayAttachmentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTransitGatewayAttachmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTransitGatewayAttachmentsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTransitGatewayAttachmentsInput) SetDryRun(v bool) *DescribeTransitGatewayAttachmentsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTransitGatewayAttachmentsInput) SetFilters(v []*Filter) *DescribeTransitGatewayAttachmentsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTransitGatewayAttachmentsInput) SetMaxResults(v int64) *DescribeTransitGatewayAttachmentsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayAttachmentsInput) SetNextToken(v string) *DescribeTransitGatewayAttachmentsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayAttachmentIds sets the TransitGatewayAttachmentIds field's value. +func (s *DescribeTransitGatewayAttachmentsInput) SetTransitGatewayAttachmentIds(v []*string) *DescribeTransitGatewayAttachmentsInput { + s.TransitGatewayAttachmentIds = v + return s +} + +type DescribeTransitGatewayAttachmentsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the attachments. + TransitGatewayAttachments []*TransitGatewayAttachment `locationName:"transitGatewayAttachments" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayAttachmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayAttachmentsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayAttachmentsOutput) SetNextToken(v string) *DescribeTransitGatewayAttachmentsOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayAttachments sets the TransitGatewayAttachments field's value. +func (s *DescribeTransitGatewayAttachmentsOutput) SetTransitGatewayAttachments(v []*TransitGatewayAttachment) *DescribeTransitGatewayAttachmentsOutput { + s.TransitGatewayAttachments = v + return s +} + +type DescribeTransitGatewayMulticastDomainsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * state - The state of the transit gateway multicast domain. Valid values + // are pending | available | deleting | deleted. + // + // * transit-gateway-id - The ID of the transit gateway. + // + // * transit-gateway-multicast-domain-id - The ID of the transit gateway + // multicast domain. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainIds []*string `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayMulticastDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayMulticastDomainsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTransitGatewayMulticastDomainsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTransitGatewayMulticastDomainsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTransitGatewayMulticastDomainsInput) SetDryRun(v bool) *DescribeTransitGatewayMulticastDomainsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTransitGatewayMulticastDomainsInput) SetFilters(v []*Filter) *DescribeTransitGatewayMulticastDomainsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTransitGatewayMulticastDomainsInput) SetMaxResults(v int64) *DescribeTransitGatewayMulticastDomainsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayMulticastDomainsInput) SetNextToken(v string) *DescribeTransitGatewayMulticastDomainsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayMulticastDomainIds sets the TransitGatewayMulticastDomainIds field's value. +func (s *DescribeTransitGatewayMulticastDomainsInput) SetTransitGatewayMulticastDomainIds(v []*string) *DescribeTransitGatewayMulticastDomainsInput { + s.TransitGatewayMulticastDomainIds = v + return s +} + +type DescribeTransitGatewayMulticastDomainsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the transit gateway multicast domains. + TransitGatewayMulticastDomains []*TransitGatewayMulticastDomain `locationName:"transitGatewayMulticastDomains" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayMulticastDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayMulticastDomainsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayMulticastDomainsOutput) SetNextToken(v string) *DescribeTransitGatewayMulticastDomainsOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayMulticastDomains sets the TransitGatewayMulticastDomains field's value. +func (s *DescribeTransitGatewayMulticastDomainsOutput) SetTransitGatewayMulticastDomains(v []*TransitGatewayMulticastDomain) *DescribeTransitGatewayMulticastDomainsOutput { + s.TransitGatewayMulticastDomains = v + return s +} + +type DescribeTransitGatewayPeeringAttachmentsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * transit-gateway-attachment-id - The ID of the transit gateway attachment. + // + // * local-owner-id - The ID of your AWS account. + // + // * remote-owner-id - The ID of the AWS account in the remote Region that + // owns the transit gateway. + // + // * state - The state of the peering attachment. Valid values are available + // | deleted | deleting | failed | failing | initiatingRequest | modifying + // | pendingAcceptance | pending | rollingBack | rejected | rejecting). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless + // of the tag value. + // + // * transit-gateway-id - The ID of the transit gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more IDs of the transit gateway peering attachments. + TransitGatewayAttachmentIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayPeeringAttachmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayPeeringAttachmentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTransitGatewayPeeringAttachmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTransitGatewayPeeringAttachmentsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTransitGatewayPeeringAttachmentsInput) SetDryRun(v bool) *DescribeTransitGatewayPeeringAttachmentsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTransitGatewayPeeringAttachmentsInput) SetFilters(v []*Filter) *DescribeTransitGatewayPeeringAttachmentsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTransitGatewayPeeringAttachmentsInput) SetMaxResults(v int64) *DescribeTransitGatewayPeeringAttachmentsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayPeeringAttachmentsInput) SetNextToken(v string) *DescribeTransitGatewayPeeringAttachmentsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayAttachmentIds sets the TransitGatewayAttachmentIds field's value. +func (s *DescribeTransitGatewayPeeringAttachmentsInput) SetTransitGatewayAttachmentIds(v []*string) *DescribeTransitGatewayPeeringAttachmentsInput { + s.TransitGatewayAttachmentIds = v + return s +} + +type DescribeTransitGatewayPeeringAttachmentsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The transit gateway peering attachments. + TransitGatewayPeeringAttachments []*TransitGatewayPeeringAttachment `locationName:"transitGatewayPeeringAttachments" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayPeeringAttachmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayPeeringAttachmentsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayPeeringAttachmentsOutput) SetNextToken(v string) *DescribeTransitGatewayPeeringAttachmentsOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayPeeringAttachments sets the TransitGatewayPeeringAttachments field's value. +func (s *DescribeTransitGatewayPeeringAttachmentsOutput) SetTransitGatewayPeeringAttachments(v []*TransitGatewayPeeringAttachment) *DescribeTransitGatewayPeeringAttachmentsOutput { + s.TransitGatewayPeeringAttachments = v + return s +} + +type DescribeTransitGatewayRouteTablesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * default-association-route-table - Indicates whether this is the default + // association route table for the transit gateway (true | false). + // + // * default-propagation-route-table - Indicates whether this is the default + // propagation route table for the transit gateway (true | false). + // + // * state - The state of the route table (available | deleting | deleted + // | pending). + // + // * transit-gateway-id - The ID of the transit gateway. + // + // * transit-gateway-route-table-id - The ID of the transit gateway route + // table. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The IDs of the transit gateway route tables. + TransitGatewayRouteTableIds []*string `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayRouteTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayRouteTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTransitGatewayRouteTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTransitGatewayRouteTablesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTransitGatewayRouteTablesInput) SetDryRun(v bool) *DescribeTransitGatewayRouteTablesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTransitGatewayRouteTablesInput) SetFilters(v []*Filter) *DescribeTransitGatewayRouteTablesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTransitGatewayRouteTablesInput) SetMaxResults(v int64) *DescribeTransitGatewayRouteTablesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayRouteTablesInput) SetNextToken(v string) *DescribeTransitGatewayRouteTablesInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayRouteTableIds sets the TransitGatewayRouteTableIds field's value. +func (s *DescribeTransitGatewayRouteTablesInput) SetTransitGatewayRouteTableIds(v []*string) *DescribeTransitGatewayRouteTablesInput { + s.TransitGatewayRouteTableIds = v + return s +} + +type DescribeTransitGatewayRouteTablesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the transit gateway route tables. + TransitGatewayRouteTables []*TransitGatewayRouteTable `locationName:"transitGatewayRouteTables" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayRouteTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayRouteTablesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayRouteTablesOutput) SetNextToken(v string) *DescribeTransitGatewayRouteTablesOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayRouteTables sets the TransitGatewayRouteTables field's value. +func (s *DescribeTransitGatewayRouteTablesOutput) SetTransitGatewayRouteTables(v []*TransitGatewayRouteTable) *DescribeTransitGatewayRouteTablesOutput { + s.TransitGatewayRouteTables = v + return s +} + +type DescribeTransitGatewayVpcAttachmentsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * state - The state of the attachment. Valid values are available | deleted + // | deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance + // | pending | rollingBack | rejected | rejecting. + // + // * transit-gateway-attachment-id - The ID of the attachment. + // + // * transit-gateway-id - The ID of the transit gateway. + // + // * vpc-id - The ID of the VPC. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The IDs of the attachments. + TransitGatewayAttachmentIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayVpcAttachmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayVpcAttachmentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTransitGatewayVpcAttachmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTransitGatewayVpcAttachmentsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTransitGatewayVpcAttachmentsInput) SetDryRun(v bool) *DescribeTransitGatewayVpcAttachmentsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTransitGatewayVpcAttachmentsInput) SetFilters(v []*Filter) *DescribeTransitGatewayVpcAttachmentsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTransitGatewayVpcAttachmentsInput) SetMaxResults(v int64) *DescribeTransitGatewayVpcAttachmentsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayVpcAttachmentsInput) SetNextToken(v string) *DescribeTransitGatewayVpcAttachmentsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayAttachmentIds sets the TransitGatewayAttachmentIds field's value. +func (s *DescribeTransitGatewayVpcAttachmentsInput) SetTransitGatewayAttachmentIds(v []*string) *DescribeTransitGatewayVpcAttachmentsInput { + s.TransitGatewayAttachmentIds = v + return s +} + +type DescribeTransitGatewayVpcAttachmentsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the VPC attachments. + TransitGatewayVpcAttachments []*TransitGatewayVpcAttachment `locationName:"transitGatewayVpcAttachments" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewayVpcAttachmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewayVpcAttachmentsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewayVpcAttachmentsOutput) SetNextToken(v string) *DescribeTransitGatewayVpcAttachmentsOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayVpcAttachments sets the TransitGatewayVpcAttachments field's value. +func (s *DescribeTransitGatewayVpcAttachmentsOutput) SetTransitGatewayVpcAttachments(v []*TransitGatewayVpcAttachment) *DescribeTransitGatewayVpcAttachmentsOutput { + s.TransitGatewayVpcAttachments = v + return s +} + +type DescribeTransitGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * options.propagation-default-route-table-id - The ID of the default propagation + // route table. + // + // * options.amazon-side-asn - The private ASN for the Amazon side of a BGP + // session. + // + // * options.association-default-route-table-id - The ID of the default association + // route table. + // + // * options.auto-accept-shared-attachments - Indicates whether there is + // automatic acceptance of attachment requests (enable | disable). + // + // * options.default-route-table-association - Indicates whether resource + // attachments are automatically associated with the default association + // route table (enable | disable). + // + // * options.default-route-table-propagation - Indicates whether resource + // attachments automatically propagate routes to the default propagation + // route table (enable | disable). + // + // * options.dns-support - Indicates whether DNS support is enabled (enable + // | disable). + // + // * options.vpn-ecmp-support - Indicates whether Equal Cost Multipath Protocol + // support is enabled (enable | disable). + // + // * owner-id - The ID of the AWS account that owns the transit gateway. + // + // * state - The state of the transit gateway (available | deleted | deleting + // | modifying | pending). + // + // * transit-gateway-id - The ID of the transit gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The IDs of the transit gateways. + TransitGatewayIds []*string `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTransitGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTransitGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTransitGatewaysInput) SetDryRun(v bool) *DescribeTransitGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTransitGatewaysInput) SetFilters(v []*Filter) *DescribeTransitGatewaysInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTransitGatewaysInput) SetMaxResults(v int64) *DescribeTransitGatewaysInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewaysInput) SetNextToken(v string) *DescribeTransitGatewaysInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayIds sets the TransitGatewayIds field's value. +func (s *DescribeTransitGatewaysInput) SetTransitGatewayIds(v []*string) *DescribeTransitGatewaysInput { + s.TransitGatewayIds = v + return s +} + +type DescribeTransitGatewaysOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the transit gateways. + TransitGateways []*TransitGateway `locationName:"transitGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeTransitGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTransitGatewaysOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTransitGatewaysOutput) SetNextToken(v string) *DescribeTransitGatewaysOutput { + s.NextToken = &v + return s +} + +// SetTransitGateways sets the TransitGateways field's value. +func (s *DescribeTransitGatewaysOutput) SetTransitGateways(v []*TransitGateway) *DescribeTransitGatewaysOutput { + s.TransitGateways = v + return s +} + +type DescribeVolumeAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute of the volume. This parameter is required. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"VolumeAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + // + // VolumeId is a required field + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVolumeAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVolumeAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeVolumeAttributeInput) SetAttribute(v string) *DescribeVolumeAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVolumeAttributeInput) SetDryRun(v bool) *DescribeVolumeAttributeInput { + s.DryRun = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *DescribeVolumeAttributeInput) SetVolumeId(v string) *DescribeVolumeAttributeInput { + s.VolumeId = &v + return s +} + +type DescribeVolumeAttributeOutput struct { + _ struct{} `type:"structure"` + + // The state of autoEnableIO attribute. + AutoEnableIO *AttributeBooleanValue `locationName:"autoEnableIO" type:"structure"` + + // A list of product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s DescribeVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeAttributeOutput) GoString() string { + return s.String() +} + +// SetAutoEnableIO sets the AutoEnableIO field's value. +func (s *DescribeVolumeAttributeOutput) SetAutoEnableIO(v *AttributeBooleanValue) *DescribeVolumeAttributeOutput { + s.AutoEnableIO = v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *DescribeVolumeAttributeOutput) SetProductCodes(v []*ProductCode) *DescribeVolumeAttributeOutput { + s.ProductCodes = v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *DescribeVolumeAttributeOutput) SetVolumeId(v string) *DescribeVolumeAttributeOutput { + s.VolumeId = &v + return s +} + +type DescribeVolumeStatusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * action.code - The action code for the event (for example, enable-volume-io). + // + // * action.description - A description of the action. + // + // * action.event-id - The event ID associated with the action. + // + // * availability-zone - The Availability Zone of the instance. + // + // * event.description - A description of the event. + // + // * event.event-id - The event ID. + // + // * event.event-type - The event type (for io-enabled: passed | failed; + // for io-performance: io-performance:degraded | io-performance:severely-degraded + // | io-performance:stalled). + // + // * event.not-after - The latest end time for the event. + // + // * event.not-before - The earliest start time for the event. + // + // * volume-status.details-name - The cause for volume-status.status (io-enabled + // | io-performance). + // + // * volume-status.details-status - The status of volume-status.details-name + // (for io-enabled: passed | failed; for io-performance: normal | degraded + // | severely-degraded | stalled). + // + // * volume-status.status - The status of the volume (ok | impaired | warning + // | insufficient-data). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumeStatus in + // paginated output. When this parameter is used, the request only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another request with + // the returned NextToken value. This value can be between 5 and 1,000; if MaxResults + // is given a value larger than 1,000, only 1,000 results are returned. If this + // parameter is not used, then DescribeVolumeStatus returns all results. You + // cannot specify this parameter and the volume IDs parameter in the same request. + MaxResults *int64 `type:"integer"` + + // The NextToken value to include in a future DescribeVolumeStatus request. + // When the results of the request exceed MaxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `type:"string"` + + // The IDs of the volumes. + // + // Default: Describes all your volumes. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumeStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVolumeStatusInput) SetDryRun(v bool) *DescribeVolumeStatusInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVolumeStatusInput) SetFilters(v []*Filter) *DescribeVolumeStatusInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVolumeStatusInput) SetMaxResults(v int64) *DescribeVolumeStatusInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVolumeStatusInput) SetNextToken(v string) *DescribeVolumeStatusInput { + s.NextToken = &v + return s +} + +// SetVolumeIds sets the VolumeIds field's value. +func (s *DescribeVolumeStatusInput) SetVolumeIds(v []*string) *DescribeVolumeStatusInput { + s.VolumeIds = v + return s +} + +type DescribeVolumeStatusOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the status of the volumes. + VolumeStatuses []*VolumeStatusItem `locationName:"volumeStatusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumeStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumeStatusOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVolumeStatusOutput) SetNextToken(v string) *DescribeVolumeStatusOutput { + s.NextToken = &v + return s +} + +// SetVolumeStatuses sets the VolumeStatuses field's value. +func (s *DescribeVolumeStatusOutput) SetVolumeStatuses(v []*VolumeStatusItem) *DescribeVolumeStatusOutput { + s.VolumeStatuses = v + return s +} + +type DescribeVolumesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The filters. + // + // * attachment.attach-time - The time stamp when the attachment initiated. + // + // * attachment.delete-on-termination - Whether the volume is deleted on + // instance termination. + // + // * attachment.device - The device name specified in the block device mapping + // (for example, /dev/sda1). + // + // * attachment.instance-id - The ID of the instance the volume is attached + // to. + // + // * attachment.status - The attachment state (attaching | attached | detaching). + // + // * availability-zone - The Availability Zone in which the volume was created. + // + // * create-time - The time stamp when the volume was created. + // + // * encrypted - Indicates whether the volume is encrypted (true | false) + // + // * multi-attach-enabled - Indicates whether the volume is enabled for Multi-Attach + // (true | false) + // + // * fast-restored - Indicates whether the volume was created from a snapshot + // that is enabled for fast snapshot restore (true | false). + // + // * size - The size of the volume, in GiB. + // + // * snapshot-id - The snapshot from which the volume was created. + // + // * status - The state of the volume (creating | available | in-use | deleting + // | deleted | error). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * volume-id - The volume ID. + // + // * volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | st1 + // | sc1| standard) + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of volume results returned by DescribeVolumes in paginated + // output. When this parameter is used, DescribeVolumes only returns MaxResults + // results in a single page along with a NextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeVolumes + // request with the returned NextToken value. This value can be between 5 and + // 500; if MaxResults is given a value larger than 500, only 500 results are + // returned. If this parameter is not used, then DescribeVolumes returns all + // results. You cannot specify this parameter and the volume IDs parameter in + // the same request. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The NextToken value returned from a previous paginated DescribeVolumes request + // where MaxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // NextToken value. This value is null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The volume IDs. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVolumesInput) SetDryRun(v bool) *DescribeVolumesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVolumesInput) SetFilters(v []*Filter) *DescribeVolumesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVolumesInput) SetMaxResults(v int64) *DescribeVolumesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVolumesInput) SetNextToken(v string) *DescribeVolumesInput { + s.NextToken = &v + return s +} + +// SetVolumeIds sets the VolumeIds field's value. +func (s *DescribeVolumesInput) SetVolumeIds(v []*string) *DescribeVolumesInput { + s.VolumeIds = v + return s +} + +type DescribeVolumesModificationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. + // + // * modification-state - The current modification state (modifying | optimizing + // | completed | failed). + // + // * original-iops - The original IOPS rate of the volume. + // + // * original-size - The original size of the volume, in GiB. + // + // * original-volume-type - The original volume type of the volume (standard + // | io1 | io2 | gp2 | sc1 | st1). + // + // * originalMultiAttachEnabled - Indicates whether Multi-Attach support + // was enabled (true | false). + // + // * start-time - The modification start time. + // + // * target-iops - The target IOPS rate of the volume. + // + // * target-size - The target size of the volume, in GiB. + // + // * target-volume-type - The target volume type of the volume (standard + // | io1 | io2 | gp2 | sc1 | st1). + // + // * targetMultiAttachEnabled - Indicates whether Multi-Attach support is + // to be enabled (true | false). + // + // * volume-id - The ID of the volume. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results (up to a limit of 500) to be returned in a + // paginated request. + MaxResults *int64 `type:"integer"` + + // The nextToken value returned by a previous paginated request. + NextToken *string `type:"string"` + + // The IDs of the volumes. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesModificationsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVolumesModificationsInput) SetDryRun(v bool) *DescribeVolumesModificationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVolumesModificationsInput) SetFilters(v []*Filter) *DescribeVolumesModificationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVolumesModificationsInput) SetMaxResults(v int64) *DescribeVolumesModificationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVolumesModificationsInput) SetNextToken(v string) *DescribeVolumesModificationsInput { + s.NextToken = &v + return s +} + +// SetVolumeIds sets the VolumeIds field's value. +func (s *DescribeVolumesModificationsInput) SetVolumeIds(v []*string) *DescribeVolumesModificationsInput { + s.VolumeIds = v + return s +} + +type DescribeVolumesModificationsOutput struct { + _ struct{} `type:"structure"` + + // Token for pagination, null if there are no more results + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the volume modifications. + VolumesModifications []*VolumeModification `locationName:"volumeModificationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesModificationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVolumesModificationsOutput) SetNextToken(v string) *DescribeVolumesModificationsOutput { + s.NextToken = &v + return s +} + +// SetVolumesModifications sets the VolumesModifications field's value. +func (s *DescribeVolumesModificationsOutput) SetVolumesModifications(v []*VolumeModification) *DescribeVolumesModificationsOutput { + s.VolumesModifications = v + return s +} + +type DescribeVolumesOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeVolumes request. When + // the results of a DescribeVolumes request exceed MaxResults, this value can + // be used to retrieve the next page of results. This value is null when there + // are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the volumes. + Volumes []*Volume `locationName:"volumeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVolumesOutput) SetNextToken(v string) *DescribeVolumesOutput { + s.NextToken = &v + return s +} + +// SetVolumes sets the Volumes field's value. +func (s *DescribeVolumesOutput) SetVolumes(v []*Volume) *DescribeVolumesOutput { + s.Volumes = v + return s +} + +type DescribeVpcAttributeInput struct { + _ struct{} `type:"structure"` + + // The VPC attribute. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"VpcAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVpcAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVpcAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeVpcAttributeInput) SetAttribute(v string) *DescribeVpcAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcAttributeInput) SetDryRun(v bool) *DescribeVpcAttributeInput { + s.DryRun = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DescribeVpcAttributeInput) SetVpcId(v string) *DescribeVpcAttributeInput { + s.VpcId = &v + return s +} + +type DescribeVpcAttributeOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // this attribute is true, instances in the VPC get DNS hostnames; otherwise, + // they do not. + EnableDnsHostnames *AttributeBooleanValue `locationName:"enableDnsHostnames" type:"structure"` + + // Indicates whether DNS resolution is enabled for the VPC. If this attribute + // is true, the Amazon DNS server resolves DNS hostnames for your instances + // to their corresponding IP addresses; otherwise, it does not. + EnableDnsSupport *AttributeBooleanValue `locationName:"enableDnsSupport" type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s DescribeVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcAttributeOutput) GoString() string { + return s.String() +} + +// SetEnableDnsHostnames sets the EnableDnsHostnames field's value. +func (s *DescribeVpcAttributeOutput) SetEnableDnsHostnames(v *AttributeBooleanValue) *DescribeVpcAttributeOutput { + s.EnableDnsHostnames = v + return s +} + +// SetEnableDnsSupport sets the EnableDnsSupport field's value. +func (s *DescribeVpcAttributeOutput) SetEnableDnsSupport(v *AttributeBooleanValue) *DescribeVpcAttributeOutput { + s.EnableDnsSupport = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DescribeVpcAttributeOutput) SetVpcId(v string) *DescribeVpcAttributeOutput { + s.VpcId = &v + return s +} + +type DescribeVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // One or more VPC IDs. + VpcIds []*string `locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVpcClassicLinkDnsSupportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVpcClassicLinkDnsSupportInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcClassicLinkDnsSupportInput) SetMaxResults(v int64) *DescribeVpcClassicLinkDnsSupportInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcClassicLinkDnsSupportInput) SetNextToken(v string) *DescribeVpcClassicLinkDnsSupportInput { + s.NextToken = &v + return s +} + +// SetVpcIds sets the VpcIds field's value. +func (s *DescribeVpcClassicLinkDnsSupportInput) SetVpcIds(v []*string) *DescribeVpcClassicLinkDnsSupportInput { + s.VpcIds = v + return s +} + +type DescribeVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Information about the ClassicLink DNS support status of the VPCs. + Vpcs []*ClassicLinkDnsSupport `locationName:"vpcs" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcClassicLinkDnsSupportOutput) SetNextToken(v string) *DescribeVpcClassicLinkDnsSupportOutput { + s.NextToken = &v + return s +} + +// SetVpcs sets the Vpcs field's value. +func (s *DescribeVpcClassicLinkDnsSupportOutput) SetVpcs(v []*ClassicLinkDnsSupport) *DescribeVpcClassicLinkDnsSupportOutput { + s.Vpcs = v + return s +} + +type DescribeVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * is-classic-link-enabled - Whether the VPC is enabled for ClassicLink + // (true | false). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPCs for which you want to describe the ClassicLink status. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcClassicLinkInput) SetDryRun(v bool) *DescribeVpcClassicLinkInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcClassicLinkInput) SetFilters(v []*Filter) *DescribeVpcClassicLinkInput { + s.Filters = v + return s +} + +// SetVpcIds sets the VpcIds field's value. +func (s *DescribeVpcClassicLinkInput) SetVpcIds(v []*string) *DescribeVpcClassicLinkInput { + s.VpcIds = v + return s +} + +type DescribeVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // The ClassicLink status of one or more VPCs. + Vpcs []*VpcClassicLink `locationName:"vpcSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// SetVpcs sets the Vpcs field's value. +func (s *DescribeVpcClassicLinkOutput) SetVpcs(v []*VpcClassicLink) *DescribeVpcClassicLinkOutput { + s.Vpcs = v + return s +} + +type DescribeVpcEndpointConnectionNotificationsInput struct { + _ struct{} `type:"structure"` + + // The ID of the notification. + ConnectionNotificationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * connection-notification-arn - The ARN of the SNS topic for the notification. + // + // * connection-notification-id - The ID of the notification. + // + // * connection-notification-state - The state of the notification (Enabled + // | Disabled). + // + // * connection-notification-type - The type of notification (Topic). + // + // * service-id - The ID of the endpoint service. + // + // * vpc-endpoint-id - The ID of the VPC endpoint. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another request with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVpcEndpointConnectionNotificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointConnectionNotificationsInput) GoString() string { + return s.String() +} + +// SetConnectionNotificationId sets the ConnectionNotificationId field's value. +func (s *DescribeVpcEndpointConnectionNotificationsInput) SetConnectionNotificationId(v string) *DescribeVpcEndpointConnectionNotificationsInput { + s.ConnectionNotificationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcEndpointConnectionNotificationsInput) SetDryRun(v bool) *DescribeVpcEndpointConnectionNotificationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcEndpointConnectionNotificationsInput) SetFilters(v []*Filter) *DescribeVpcEndpointConnectionNotificationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcEndpointConnectionNotificationsInput) SetMaxResults(v int64) *DescribeVpcEndpointConnectionNotificationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointConnectionNotificationsInput) SetNextToken(v string) *DescribeVpcEndpointConnectionNotificationsInput { + s.NextToken = &v + return s +} + +type DescribeVpcEndpointConnectionNotificationsOutput struct { + _ struct{} `type:"structure"` + + // One or more notifications. + ConnectionNotificationSet []*ConnectionNotification `locationName:"connectionNotificationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeVpcEndpointConnectionNotificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointConnectionNotificationsOutput) GoString() string { + return s.String() +} + +// SetConnectionNotificationSet sets the ConnectionNotificationSet field's value. +func (s *DescribeVpcEndpointConnectionNotificationsOutput) SetConnectionNotificationSet(v []*ConnectionNotification) *DescribeVpcEndpointConnectionNotificationsOutput { + s.ConnectionNotificationSet = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointConnectionNotificationsOutput) SetNextToken(v string) *DescribeVpcEndpointConnectionNotificationsOutput { + s.NextToken = &v + return s +} + +type DescribeVpcEndpointConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * service-id - The ID of the service. + // + // * vpc-endpoint-owner - The AWS account number of the owner of the endpoint. + // + // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | + // pending | available | deleting | deleted | rejected | failed). + // + // * vpc-endpoint-id - The ID of the endpoint. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results + // are returned. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeVpcEndpointConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointConnectionsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcEndpointConnectionsInput) SetDryRun(v bool) *DescribeVpcEndpointConnectionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcEndpointConnectionsInput) SetFilters(v []*Filter) *DescribeVpcEndpointConnectionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcEndpointConnectionsInput) SetMaxResults(v int64) *DescribeVpcEndpointConnectionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointConnectionsInput) SetNextToken(v string) *DescribeVpcEndpointConnectionsInput { + s.NextToken = &v + return s +} + +type DescribeVpcEndpointConnectionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more VPC endpoint connections. + VpcEndpointConnections []*VpcEndpointConnection `locationName:"vpcEndpointConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointConnectionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointConnectionsOutput) SetNextToken(v string) *DescribeVpcEndpointConnectionsOutput { + s.NextToken = &v + return s +} + +// SetVpcEndpointConnections sets the VpcEndpointConnections field's value. +func (s *DescribeVpcEndpointConnectionsOutput) SetVpcEndpointConnections(v []*VpcEndpointConnection) *DescribeVpcEndpointConnectionsOutput { + s.VpcEndpointConnections = v + return s +} + +type DescribeVpcEndpointServiceConfigurationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * service-name - The name of the service. + // + // * service-id - The ID of the service. + // + // * service-state - The state of the service (Pending | Available | Deleting + // | Deleted | Failed). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results + // are returned. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` + + // The IDs of one or more services. + ServiceIds []*string `locationName:"ServiceId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServiceConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServiceConfigurationsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcEndpointServiceConfigurationsInput) SetDryRun(v bool) *DescribeVpcEndpointServiceConfigurationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcEndpointServiceConfigurationsInput) SetFilters(v []*Filter) *DescribeVpcEndpointServiceConfigurationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcEndpointServiceConfigurationsInput) SetMaxResults(v int64) *DescribeVpcEndpointServiceConfigurationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointServiceConfigurationsInput) SetNextToken(v string) *DescribeVpcEndpointServiceConfigurationsInput { + s.NextToken = &v + return s +} + +// SetServiceIds sets the ServiceIds field's value. +func (s *DescribeVpcEndpointServiceConfigurationsInput) SetServiceIds(v []*string) *DescribeVpcEndpointServiceConfigurationsInput { + s.ServiceIds = v + return s +} + +type DescribeVpcEndpointServiceConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more services. + ServiceConfigurations []*ServiceConfiguration `locationName:"serviceConfigurationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServiceConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServiceConfigurationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointServiceConfigurationsOutput) SetNextToken(v string) *DescribeVpcEndpointServiceConfigurationsOutput { + s.NextToken = &v + return s +} + +// SetServiceConfigurations sets the ServiceConfigurations field's value. +func (s *DescribeVpcEndpointServiceConfigurationsOutput) SetServiceConfigurations(v []*ServiceConfiguration) *DescribeVpcEndpointServiceConfigurationsOutput { + s.ServiceConfigurations = v + return s +} + +type DescribeVpcEndpointServicePermissionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * principal - The ARN of the principal. + // + // * principal-type - The principal type (All | Service | OrganizationUnit + // | Account | User | Role). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results of the initial request can be seen by sending another + // request with the returned NextToken value. This value can be between 5 and + // 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results + // are returned. + MaxResults *int64 `type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `type:"string"` + + // The ID of the service. + // + // ServiceId is a required field + ServiceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVpcEndpointServicePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVpcEndpointServicePermissionsInput"} + if s.ServiceId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcEndpointServicePermissionsInput) SetDryRun(v bool) *DescribeVpcEndpointServicePermissionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcEndpointServicePermissionsInput) SetFilters(v []*Filter) *DescribeVpcEndpointServicePermissionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcEndpointServicePermissionsInput) SetMaxResults(v int64) *DescribeVpcEndpointServicePermissionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointServicePermissionsInput) SetNextToken(v string) *DescribeVpcEndpointServicePermissionsInput { + s.NextToken = &v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *DescribeVpcEndpointServicePermissionsInput) SetServiceId(v string) *DescribeVpcEndpointServicePermissionsInput { + s.ServiceId = &v + return s +} + +type DescribeVpcEndpointServicePermissionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more allowed principals. + AllowedPrincipals []*AllowedPrincipal `locationName:"allowedPrincipals" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicePermissionsOutput) GoString() string { + return s.String() +} + +// SetAllowedPrincipals sets the AllowedPrincipals field's value. +func (s *DescribeVpcEndpointServicePermissionsOutput) SetAllowedPrincipals(v []*AllowedPrincipal) *DescribeVpcEndpointServicePermissionsOutput { + s.AllowedPrincipals = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointServicePermissionsOutput) SetNextToken(v string) *DescribeVpcEndpointServicePermissionsOutput { + s.NextToken = &v + return s +} + +// Contains the parameters for DescribeVpcEndpointServices. +type DescribeVpcEndpointServicesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * service-name - The name of the service. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1,000, we return only 1,000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more service names. + ServiceNames []*string `locationName:"ServiceName" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcEndpointServicesInput) SetDryRun(v bool) *DescribeVpcEndpointServicesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcEndpointServicesInput) SetFilters(v []*Filter) *DescribeVpcEndpointServicesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcEndpointServicesInput) SetMaxResults(v int64) *DescribeVpcEndpointServicesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointServicesInput) SetNextToken(v string) *DescribeVpcEndpointServicesInput { + s.NextToken = &v + return s +} + +// SetServiceNames sets the ServiceNames field's value. +func (s *DescribeVpcEndpointServicesInput) SetServiceNames(v []*string) *DescribeVpcEndpointServicesInput { + s.ServiceNames = v + return s +} + +// Contains the output of DescribeVpcEndpointServices. +type DescribeVpcEndpointServicesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the service. + ServiceDetails []*ServiceDetail `locationName:"serviceDetailSet" locationNameList:"item" type:"list"` + + // A list of supported services. + ServiceNames []*string `locationName:"serviceNameSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointServicesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointServicesOutput) SetNextToken(v string) *DescribeVpcEndpointServicesOutput { + s.NextToken = &v + return s +} + +// SetServiceDetails sets the ServiceDetails field's value. +func (s *DescribeVpcEndpointServicesOutput) SetServiceDetails(v []*ServiceDetail) *DescribeVpcEndpointServicesOutput { + s.ServiceDetails = v + return s +} + +// SetServiceNames sets the ServiceNames field's value. +func (s *DescribeVpcEndpointServicesOutput) SetServiceNames(v []*string) *DescribeVpcEndpointServicesOutput { + s.ServiceNames = v + return s +} + +// Contains the parameters for DescribeVpcEndpoints. +type DescribeVpcEndpointsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * service-name - The name of the service. + // + // * vpc-id - The ID of the VPC in which the endpoint resides. + // + // * vpc-endpoint-id - The ID of the endpoint. + // + // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | + // pending | available | deleting | deleted | rejected | failed). + // + // * vpc-endpoint-type - The type of VPC endpoint (Interface | Gateway | + // GatewayLoadBalancer). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + // + // Constraint: If the value is greater than 1,000, we return only 1,000 items. + MaxResults *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `type:"string"` + + // One or more endpoint IDs. + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcEndpointsInput) SetDryRun(v bool) *DescribeVpcEndpointsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcEndpointsInput) SetFilters(v []*Filter) *DescribeVpcEndpointsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcEndpointsInput) SetMaxResults(v int64) *DescribeVpcEndpointsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointsInput) SetNextToken(v string) *DescribeVpcEndpointsInput { + s.NextToken = &v + return s +} + +// SetVpcEndpointIds sets the VpcEndpointIds field's value. +func (s *DescribeVpcEndpointsInput) SetVpcEndpointIds(v []*string) *DescribeVpcEndpointsInput { + s.VpcEndpointIds = v + return s +} + +// Contains the output of DescribeVpcEndpoints. +type DescribeVpcEndpointsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the endpoints. + VpcEndpoints []*VpcEndpoint `locationName:"vpcEndpointSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcEndpointsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcEndpointsOutput) SetNextToken(v string) *DescribeVpcEndpointsOutput { + s.NextToken = &v + return s +} + +// SetVpcEndpoints sets the VpcEndpoints field's value. +func (s *DescribeVpcEndpointsOutput) SetVpcEndpoints(v []*VpcEndpoint) *DescribeVpcEndpointsOutput { + s.VpcEndpoints = v + return s +} + +type DescribeVpcPeeringConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * accepter-vpc-info.cidr-block - The IPv4 CIDR block of the accepter VPC. + // + // * accepter-vpc-info.owner-id - The AWS account ID of the owner of the + // accepter VPC. + // + // * accepter-vpc-info.vpc-id - The ID of the accepter VPC. + // + // * expiration-time - The expiration date and time for the VPC peering connection. + // + // * requester-vpc-info.cidr-block - The IPv4 CIDR block of the requester's + // VPC. + // + // * requester-vpc-info.owner-id - The AWS account ID of the owner of the + // requester VPC. + // + // * requester-vpc-info.vpc-id - The ID of the requester VPC. + // + // * status-code - The status of the VPC peering connection (pending-acceptance + // | failed | expired | provisioning | active | deleting | deleted | rejected). + // + // * status-message - A message that provides more information about the + // status of the VPC peering connection, if applicable. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-peering-connection-id - The ID of the VPC peering connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more VPC peering connection IDs. + // + // Default: Describes all your VPC peering connections. + VpcPeeringConnectionIds []*string `locationName:"VpcPeeringConnectionId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVpcPeeringConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVpcPeeringConnectionsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcPeeringConnectionsInput) SetDryRun(v bool) *DescribeVpcPeeringConnectionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcPeeringConnectionsInput) SetFilters(v []*Filter) *DescribeVpcPeeringConnectionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcPeeringConnectionsInput) SetMaxResults(v int64) *DescribeVpcPeeringConnectionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcPeeringConnectionsInput) SetNextToken(v string) *DescribeVpcPeeringConnectionsInput { + s.NextToken = &v + return s +} + +// SetVpcPeeringConnectionIds sets the VpcPeeringConnectionIds field's value. +func (s *DescribeVpcPeeringConnectionsInput) SetVpcPeeringConnectionIds(v []*string) *DescribeVpcPeeringConnectionsInput { + s.VpcPeeringConnectionIds = v + return s +} + +type DescribeVpcPeeringConnectionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the VPC peering connections. + VpcPeeringConnections []*VpcPeeringConnection `locationName:"vpcPeeringConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcPeeringConnectionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcPeeringConnectionsOutput) SetNextToken(v string) *DescribeVpcPeeringConnectionsOutput { + s.NextToken = &v + return s +} + +// SetVpcPeeringConnections sets the VpcPeeringConnections field's value. +func (s *DescribeVpcPeeringConnectionsOutput) SetVpcPeeringConnections(v []*VpcPeeringConnection) *DescribeVpcPeeringConnectionsOutput { + s.VpcPeeringConnections = v + return s +} + +type DescribeVpcsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * cidr - The primary IPv4 CIDR block of the VPC. The CIDR block you specify + // must exactly match the VPC's CIDR block for information to be returned + // for the VPC. Must contain the slash followed by one or two digits (for + // example, /28). + // + // * cidr-block-association.cidr-block - An IPv4 CIDR block associated with + // the VPC. + // + // * cidr-block-association.association-id - The association ID for an IPv4 + // CIDR block associated with the VPC. + // + // * cidr-block-association.state - The state of an IPv4 CIDR block associated + // with the VPC. + // + // * dhcp-options-id - The ID of a set of DHCP options. + // + // * ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated + // with the VPC. + // + // * ipv6-cidr-block-association.ipv6-pool - The ID of the IPv6 address pool + // from which the IPv6 CIDR block is allocated. + // + // * ipv6-cidr-block-association.association-id - The association ID for + // an IPv6 CIDR block associated with the VPC. + // + // * ipv6-cidr-block-association.state - The state of an IPv6 CIDR block + // associated with the VPC. + // + // * isDefault - Indicates whether the VPC is the default VPC. + // + // * owner-id - The ID of the AWS account that owns the VPC. + // + // * state - The state of the VPC (pending | available). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more VPC IDs. + // + // Default: Describes all your VPCs. + VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVpcsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVpcsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpcsInput) SetDryRun(v bool) *DescribeVpcsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpcsInput) SetFilters(v []*Filter) *DescribeVpcsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVpcsInput) SetMaxResults(v int64) *DescribeVpcsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcsInput) SetNextToken(v string) *DescribeVpcsInput { + s.NextToken = &v + return s +} + +// SetVpcIds sets the VpcIds field's value. +func (s *DescribeVpcsInput) SetVpcIds(v []*string) *DescribeVpcsInput { + s.VpcIds = v + return s +} + +type DescribeVpcsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about one or more VPCs. + Vpcs []*Vpc `locationName:"vpcSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVpcsOutput) SetNextToken(v string) *DescribeVpcsOutput { + s.NextToken = &v + return s +} + +// SetVpcs sets the Vpcs field's value. +func (s *DescribeVpcsOutput) SetVpcs(v []*Vpc) *DescribeVpcsOutput { + s.Vpcs = v + return s +} + +// Contains the parameters for DescribeVpnConnections. +type DescribeVpnConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * customer-gateway-configuration - The configuration information for the + // customer gateway. + // + // * customer-gateway-id - The ID of a customer gateway associated with the + // VPN connection. + // + // * state - The state of the VPN connection (pending | available | deleting + // | deleted). + // + // * option.static-routes-only - Indicates whether the connection has static + // routes only. Used for devices that do not support Border Gateway Protocol + // (BGP). + // + // * route.destination-cidr-block - The destination CIDR block. This corresponds + // to the subnet used in a customer data center. + // + // * bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP + // device. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * type - The type of VPN connection. Currently the only supported type + // is ipsec.1. + // + // * vpn-connection-id - The ID of the VPN connection. + // + // * vpn-gateway-id - The ID of a virtual private gateway associated with + // the VPN connection. + // + // * transit-gateway-id - The ID of a transit gateway associated with the + // VPN connection. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more VPN connection IDs. + // + // Default: Describes your VPN connections. + VpnConnectionIds []*string `locationName:"VpnConnectionId" locationNameList:"VpnConnectionId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpnConnectionsInput) SetDryRun(v bool) *DescribeVpnConnectionsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpnConnectionsInput) SetFilters(v []*Filter) *DescribeVpnConnectionsInput { + s.Filters = v + return s +} + +// SetVpnConnectionIds sets the VpnConnectionIds field's value. +func (s *DescribeVpnConnectionsInput) SetVpnConnectionIds(v []*string) *DescribeVpnConnectionsInput { + s.VpnConnectionIds = v + return s +} + +// Contains the output of DescribeVpnConnections. +type DescribeVpnConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more VPN connections. + VpnConnections []*VpnConnection `locationName:"vpnConnectionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnConnectionsOutput) GoString() string { + return s.String() +} + +// SetVpnConnections sets the VpnConnections field's value. +func (s *DescribeVpnConnectionsOutput) SetVpnConnections(v []*VpnConnection) *DescribeVpnConnectionsOutput { + s.VpnConnections = v + return s +} + +// Contains the parameters for DescribeVpnGateways. +type DescribeVpnGatewaysInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // One or more filters. + // + // * amazon-side-asn - The Autonomous System Number (ASN) for the Amazon + // side of the gateway. + // + // * attachment.state - The current state of the attachment between the gateway + // and the VPC (attaching | attached | detaching | detached). + // + // * attachment.vpc-id - The ID of an attached VPC. + // + // * availability-zone - The Availability Zone for the virtual private gateway + // (if applicable). + // + // * state - The state of the virtual private gateway (pending | available + // | deleting | deleted). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * type - The type of virtual private gateway. Currently the only supported + // type is ipsec.1. + // + // * vpn-gateway-id - The ID of the virtual private gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more virtual private gateway IDs. + // + // Default: Describes all your virtual private gateways. + VpnGatewayIds []*string `locationName:"VpnGatewayId" locationNameList:"VpnGatewayId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVpnGatewaysInput) SetDryRun(v bool) *DescribeVpnGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVpnGatewaysInput) SetFilters(v []*Filter) *DescribeVpnGatewaysInput { + s.Filters = v + return s +} + +// SetVpnGatewayIds sets the VpnGatewayIds field's value. +func (s *DescribeVpnGatewaysInput) SetVpnGatewayIds(v []*string) *DescribeVpnGatewaysInput { + s.VpnGatewayIds = v + return s +} + +// Contains the output of DescribeVpnGateways. +type DescribeVpnGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more virtual private gateways. + VpnGateways []*VpnGateway `locationName:"vpnGatewaySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpnGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpnGatewaysOutput) GoString() string { + return s.String() +} + +// SetVpnGateways sets the VpnGateways field's value. +func (s *DescribeVpnGatewaysOutput) SetVpnGateways(v []*VpnGateway) *DescribeVpnGatewaysOutput { + s.VpnGateways = v + return s +} + +type DetachClassicLinkVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance to unlink from the VPC. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // The ID of the VPC to which the instance is linked. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachClassicLinkVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachClassicLinkVpcInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DetachClassicLinkVpcInput) SetDryRun(v bool) *DetachClassicLinkVpcInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DetachClassicLinkVpcInput) SetInstanceId(v string) *DetachClassicLinkVpcInput { + s.InstanceId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DetachClassicLinkVpcInput) SetVpcId(v string) *DetachClassicLinkVpcInput { + s.VpcId = &v + return s +} + +type DetachClassicLinkVpcOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DetachClassicLinkVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachClassicLinkVpcOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DetachClassicLinkVpcOutput) SetReturn(v bool) *DetachClassicLinkVpcOutput { + s.Return = &v + return s +} + +type DetachInternetGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the internet gateway. + // + // InternetGatewayId is a required field + InternetGatewayId *string `locationName:"internetGatewayId" type:"string" required:"true"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachInternetGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachInternetGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachInternetGatewayInput"} + if s.InternetGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("InternetGatewayId")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DetachInternetGatewayInput) SetDryRun(v bool) *DetachInternetGatewayInput { + s.DryRun = &v + return s +} + +// SetInternetGatewayId sets the InternetGatewayId field's value. +func (s *DetachInternetGatewayInput) SetInternetGatewayId(v string) *DetachInternetGatewayInput { + s.InternetGatewayId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DetachInternetGatewayInput) SetVpcId(v string) *DetachInternetGatewayInput { + s.VpcId = &v + return s +} + +type DetachInternetGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachInternetGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachInternetGatewayOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DetachNetworkInterface. +type DetachNetworkInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the attachment. + // + // AttachmentId is a required field + AttachmentId *string `locationName:"attachmentId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether to force a detachment. + // + // * Use the Force parameter only as a last resort to detach a network interface + // from a failed instance. + // + // * If you use the Force parameter to detach a network interface, you might + // not be able to attach a different network interface to the same index + // on the instance without first stopping and starting the instance. + // + // * If you force the detachment of a network interface, the instance metadata + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // might not get updated. This means that the attributes associated with + // the detached network interface might still be visible. The instance metadata + // will get updated when you stop and start the instance. + Force *bool `locationName:"force" type:"boolean"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachNetworkInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachNetworkInterfaceInput"} + if s.AttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("AttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttachmentId sets the AttachmentId field's value. +func (s *DetachNetworkInterfaceInput) SetAttachmentId(v string) *DetachNetworkInterfaceInput { + s.AttachmentId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DetachNetworkInterfaceInput) SetDryRun(v bool) *DetachNetworkInterfaceInput { + s.DryRun = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DetachNetworkInterfaceInput) SetForce(v bool) *DetachNetworkInterfaceInput { + s.Force = &v + return s +} + +type DetachNetworkInterfaceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachNetworkInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachNetworkInterfaceOutput) GoString() string { + return s.String() +} + +type DetachVolumeInput struct { + _ struct{} `type:"structure"` + + // The device name. + Device *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces detachment if the previous detachment attempt did not occur cleanly + // (for example, logging into an instance, unmounting the volume, and detaching + // normally). This option can lead to data loss or a corrupted file system. + // Use this option only as a last resort to detach a volume from a failed instance. + // The instance won't have an opportunity to flush file system caches or file + // system metadata. If you use this option, you must perform file system check + // and repair procedures. + Force *bool `type:"boolean"` + + // The ID of the instance. If you are detaching a Multi-Attach enabled volume, + // you must specify an instance ID. + InstanceId *string `type:"string"` + + // The ID of the volume. + // + // VolumeId is a required field + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDevice sets the Device field's value. +func (s *DetachVolumeInput) SetDevice(v string) *DetachVolumeInput { + s.Device = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DetachVolumeInput) SetDryRun(v bool) *DetachVolumeInput { + s.DryRun = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DetachVolumeInput) SetForce(v bool) *DetachVolumeInput { + s.Force = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DetachVolumeInput) SetInstanceId(v string) *DetachVolumeInput { + s.InstanceId = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *DetachVolumeInput) SetVolumeId(v string) *DetachVolumeInput { + s.VolumeId = &v + return s +} + +// Contains the parameters for DetachVpnGateway. +type DetachVpnGatewayInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway. + // + // VpnGatewayId is a required field + VpnGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachVpnGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachVpnGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachVpnGatewayInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + if s.VpnGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DetachVpnGatewayInput) SetDryRun(v bool) *DetachVpnGatewayInput { + s.DryRun = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DetachVpnGatewayInput) SetVpcId(v string) *DetachVpnGatewayInput { + s.VpcId = &v + return s +} + +// SetVpnGatewayId sets the VpnGatewayId field's value. +func (s *DetachVpnGatewayInput) SetVpnGatewayId(v string) *DetachVpnGatewayInput { + s.VpnGatewayId = &v + return s +} + +type DetachVpnGatewayOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachVpnGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachVpnGatewayOutput) GoString() string { + return s.String() +} + +// Describes a DHCP configuration option. +type DhcpConfiguration struct { + _ struct{} `type:"structure"` + + // The name of a DHCP option. + Key *string `locationName:"key" type:"string"` + + // One or more values for the DHCP option. + Values []*AttributeValue `locationName:"valueSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpConfiguration) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *DhcpConfiguration) SetKey(v string) *DhcpConfiguration { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *DhcpConfiguration) SetValues(v []*AttributeValue) *DhcpConfiguration { + s.Values = v + return s +} + +// Describes a set of DHCP options. +type DhcpOptions struct { + _ struct{} `type:"structure"` + + // One or more DHCP options in the set. + DhcpConfigurations []*DhcpConfiguration `locationName:"dhcpConfigurationSet" locationNameList:"item" type:"list"` + + // The ID of the set of DHCP options. + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // The ID of the AWS account that owns the DHCP options set. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any tags assigned to the DHCP options set. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DhcpOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DhcpOptions) GoString() string { + return s.String() +} + +// SetDhcpConfigurations sets the DhcpConfigurations field's value. +func (s *DhcpOptions) SetDhcpConfigurations(v []*DhcpConfiguration) *DhcpOptions { + s.DhcpConfigurations = v + return s +} + +// SetDhcpOptionsId sets the DhcpOptionsId field's value. +func (s *DhcpOptions) SetDhcpOptionsId(v string) *DhcpOptions { + s.DhcpOptionsId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *DhcpOptions) SetOwnerId(v string) *DhcpOptions { + s.OwnerId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DhcpOptions) SetTags(v []*Tag) *DhcpOptions { + s.Tags = v + return s +} + +// Describes an Active Directory. +type DirectoryServiceAuthentication struct { + _ struct{} `type:"structure"` + + // The ID of the Active Directory used for authentication. + DirectoryId *string `locationName:"directoryId" type:"string"` +} + +// String returns the string representation +func (s DirectoryServiceAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryServiceAuthentication) GoString() string { + return s.String() +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *DirectoryServiceAuthentication) SetDirectoryId(v string) *DirectoryServiceAuthentication { + s.DirectoryId = &v + return s +} + +// Describes the Active Directory to be used for client authentication. +type DirectoryServiceAuthenticationRequest struct { + _ struct{} `type:"structure"` + + // The ID of the Active Directory to be used for authentication. + DirectoryId *string `type:"string"` +} + +// String returns the string representation +func (s DirectoryServiceAuthenticationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryServiceAuthenticationRequest) GoString() string { + return s.String() +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *DirectoryServiceAuthenticationRequest) SetDirectoryId(v string) *DirectoryServiceAuthenticationRequest { + s.DirectoryId = &v + return s +} + +type DisableEbsEncryptionByDefaultInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DisableEbsEncryptionByDefaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableEbsEncryptionByDefaultInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableEbsEncryptionByDefaultInput) SetDryRun(v bool) *DisableEbsEncryptionByDefaultInput { + s.DryRun = &v + return s +} + +type DisableEbsEncryptionByDefaultOutput struct { + _ struct{} `type:"structure"` + + // The updated status of encryption by default. + EbsEncryptionByDefault *bool `locationName:"ebsEncryptionByDefault" type:"boolean"` +} + +// String returns the string representation +func (s DisableEbsEncryptionByDefaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableEbsEncryptionByDefaultOutput) GoString() string { + return s.String() +} + +// SetEbsEncryptionByDefault sets the EbsEncryptionByDefault field's value. +func (s *DisableEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *DisableEbsEncryptionByDefaultOutput { + s.EbsEncryptionByDefault = &v + return s +} + +// Contains information about the errors that occurred when disabling fast snapshot +// restores. +type DisableFastSnapshotRestoreErrorItem struct { + _ struct{} `type:"structure"` + + // The errors. + FastSnapshotRestoreStateErrors []*DisableFastSnapshotRestoreStateErrorItem `locationName:"fastSnapshotRestoreStateErrorSet" locationNameList:"item" type:"list"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoreErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableFastSnapshotRestoreErrorItem) GoString() string { + return s.String() +} + +// SetFastSnapshotRestoreStateErrors sets the FastSnapshotRestoreStateErrors field's value. +func (s *DisableFastSnapshotRestoreErrorItem) SetFastSnapshotRestoreStateErrors(v []*DisableFastSnapshotRestoreStateErrorItem) *DisableFastSnapshotRestoreErrorItem { + s.FastSnapshotRestoreStateErrors = v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *DisableFastSnapshotRestoreErrorItem) SetSnapshotId(v string) *DisableFastSnapshotRestoreErrorItem { + s.SnapshotId = &v + return s +} + +// Describes an error that occurred when disabling fast snapshot restores. +type DisableFastSnapshotRestoreStateError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoreStateError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableFastSnapshotRestoreStateError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *DisableFastSnapshotRestoreStateError) SetCode(v string) *DisableFastSnapshotRestoreStateError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DisableFastSnapshotRestoreStateError) SetMessage(v string) *DisableFastSnapshotRestoreStateError { + s.Message = &v + return s +} + +// Contains information about an error that occurred when disabling fast snapshot +// restores. +type DisableFastSnapshotRestoreStateErrorItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The error. + Error *DisableFastSnapshotRestoreStateError `locationName:"error" type:"structure"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoreStateErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableFastSnapshotRestoreStateErrorItem) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *DisableFastSnapshotRestoreStateErrorItem) SetAvailabilityZone(v string) *DisableFastSnapshotRestoreStateErrorItem { + s.AvailabilityZone = &v + return s +} + +// SetError sets the Error field's value. +func (s *DisableFastSnapshotRestoreStateErrorItem) SetError(v *DisableFastSnapshotRestoreStateError) *DisableFastSnapshotRestoreStateErrorItem { + s.Error = v + return s +} + +// Describes fast snapshot restores that were successfully disabled. +type DisableFastSnapshotRestoreSuccessItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time at which fast snapshot restores entered the disabled state. + DisabledTime *time.Time `locationName:"disabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the disabling state. + DisablingTime *time.Time `locationName:"disablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabled state. + EnabledTime *time.Time `locationName:"enabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabling state. + EnablingTime *time.Time `locationName:"enablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the optimizing state. + OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` + + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The state of fast snapshot restores for the snapshot. + State *string `locationName:"state" type:"string" enum:"FastSnapshotRestoreStateCode"` + + // The reason for the state transition. The possible values are as follows: + // + // * Client.UserInitiated - The state successfully transitioned to enabling + // or disabling. + // + // * Client.UserInitiated - Lifecycle state transition - The state successfully + // transitioned to optimizing, enabled, or disabled. + StateTransitionReason *string `locationName:"stateTransitionReason" type:"string"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoreSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableFastSnapshotRestoreSuccessItem) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetAvailabilityZone(v string) *DisableFastSnapshotRestoreSuccessItem { + s.AvailabilityZone = &v + return s +} + +// SetDisabledTime sets the DisabledTime field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetDisabledTime(v time.Time) *DisableFastSnapshotRestoreSuccessItem { + s.DisabledTime = &v + return s +} + +// SetDisablingTime sets the DisablingTime field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetDisablingTime(v time.Time) *DisableFastSnapshotRestoreSuccessItem { + s.DisablingTime = &v + return s +} + +// SetEnabledTime sets the EnabledTime field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetEnabledTime(v time.Time) *DisableFastSnapshotRestoreSuccessItem { + s.EnabledTime = &v + return s +} + +// SetEnablingTime sets the EnablingTime field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetEnablingTime(v time.Time) *DisableFastSnapshotRestoreSuccessItem { + s.EnablingTime = &v + return s +} + +// SetOptimizingTime sets the OptimizingTime field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetOptimizingTime(v time.Time) *DisableFastSnapshotRestoreSuccessItem { + s.OptimizingTime = &v + return s +} + +// SetOwnerAlias sets the OwnerAlias field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetOwnerAlias(v string) *DisableFastSnapshotRestoreSuccessItem { + s.OwnerAlias = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetOwnerId(v string) *DisableFastSnapshotRestoreSuccessItem { + s.OwnerId = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetSnapshotId(v string) *DisableFastSnapshotRestoreSuccessItem { + s.SnapshotId = &v + return s +} + +// SetState sets the State field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetState(v string) *DisableFastSnapshotRestoreSuccessItem { + s.State = &v + return s +} + +// SetStateTransitionReason sets the StateTransitionReason field's value. +func (s *DisableFastSnapshotRestoreSuccessItem) SetStateTransitionReason(v string) *DisableFastSnapshotRestoreSuccessItem { + s.StateTransitionReason = &v + return s +} + +type DisableFastSnapshotRestoresInput struct { + _ struct{} `type:"structure"` + + // One or more Availability Zones. For example, us-east-2a. + // + // AvailabilityZones is a required field + AvailabilityZones []*string `locationName:"AvailabilityZone" locationNameList:"AvailabilityZone" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of one or more snapshots. For example, snap-1234567890abcdef0. + // + // SourceSnapshotIds is a required field + SourceSnapshotIds []*string `locationName:"SourceSnapshotId" locationNameList:"SnapshotId" type:"list" required:"true"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoresInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableFastSnapshotRestoresInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableFastSnapshotRestoresInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableFastSnapshotRestoresInput"} + if s.AvailabilityZones == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZones")) + } + if s.SourceSnapshotIds == nil { + invalidParams.Add(request.NewErrParamRequired("SourceSnapshotIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *DisableFastSnapshotRestoresInput) SetAvailabilityZones(v []*string) *DisableFastSnapshotRestoresInput { + s.AvailabilityZones = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableFastSnapshotRestoresInput) SetDryRun(v bool) *DisableFastSnapshotRestoresInput { + s.DryRun = &v + return s +} + +// SetSourceSnapshotIds sets the SourceSnapshotIds field's value. +func (s *DisableFastSnapshotRestoresInput) SetSourceSnapshotIds(v []*string) *DisableFastSnapshotRestoresInput { + s.SourceSnapshotIds = v + return s +} + +type DisableFastSnapshotRestoresOutput struct { + _ struct{} `type:"structure"` + + // Information about the snapshots for which fast snapshot restores were successfully + // disabled. + Successful []*DisableFastSnapshotRestoreSuccessItem `locationName:"successful" locationNameList:"item" type:"list"` + + // Information about the snapshots for which fast snapshot restores could not + // be disabled. + Unsuccessful []*DisableFastSnapshotRestoreErrorItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoresOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableFastSnapshotRestoresOutput) GoString() string { + return s.String() +} + +// SetSuccessful sets the Successful field's value. +func (s *DisableFastSnapshotRestoresOutput) SetSuccessful(v []*DisableFastSnapshotRestoreSuccessItem) *DisableFastSnapshotRestoresOutput { + s.Successful = v + return s +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *DisableFastSnapshotRestoresOutput) SetUnsuccessful(v []*DisableFastSnapshotRestoreErrorItem) *DisableFastSnapshotRestoresOutput { + s.Unsuccessful = v + return s +} + +type DisableTransitGatewayRouteTablePropagationInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` + + // The ID of the propagation route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableTransitGatewayRouteTablePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableTransitGatewayRouteTablePropagationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableTransitGatewayRouteTablePropagationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableTransitGatewayRouteTablePropagationInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableTransitGatewayRouteTablePropagationInput) SetDryRun(v bool) *DisableTransitGatewayRouteTablePropagationInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *DisableTransitGatewayRouteTablePropagationInput) SetTransitGatewayAttachmentId(v string) *DisableTransitGatewayRouteTablePropagationInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *DisableTransitGatewayRouteTablePropagationInput) SetTransitGatewayRouteTableId(v string) *DisableTransitGatewayRouteTablePropagationInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type DisableTransitGatewayRouteTablePropagationOutput struct { + _ struct{} `type:"structure"` + + // Information about route propagation. + Propagation *TransitGatewayPropagation `locationName:"propagation" type:"structure"` +} + +// String returns the string representation +func (s DisableTransitGatewayRouteTablePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableTransitGatewayRouteTablePropagationOutput) GoString() string { + return s.String() +} + +// SetPropagation sets the Propagation field's value. +func (s *DisableTransitGatewayRouteTablePropagationOutput) SetPropagation(v *TransitGatewayPropagation) *DisableTransitGatewayRouteTablePropagationOutput { + s.Propagation = v + return s +} + +// Contains the parameters for DisableVgwRoutePropagation. +type DisableVgwRoutePropagationInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the virtual private gateway. + // + // GatewayId is a required field + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. + // + // RouteTableId is a required field + RouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableVgwRoutePropagationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableVgwRoutePropagationInput"} + if s.GatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayId")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableVgwRoutePropagationInput) SetDryRun(v bool) *DisableVgwRoutePropagationInput { + s.DryRun = &v + return s +} + +// SetGatewayId sets the GatewayId field's value. +func (s *DisableVgwRoutePropagationInput) SetGatewayId(v string) *DisableVgwRoutePropagationInput { + s.GatewayId = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *DisableVgwRoutePropagationInput) SetRouteTableId(v string) *DisableVgwRoutePropagationInput { + s.RouteTableId = &v + return s +} + +type DisableVgwRoutePropagationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +// SetVpcId sets the VpcId field's value. +func (s *DisableVpcClassicLinkDnsSupportInput) SetVpcId(v string) *DisableVpcClassicLinkDnsSupportInput { + s.VpcId = &v + return s +} + +type DisableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DisableVpcClassicLinkDnsSupportOutput) SetReturn(v bool) *DisableVpcClassicLinkDnsSupportOutput { + s.Return = &v + return s +} + +type DisableVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableVpcClassicLinkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableVpcClassicLinkInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DisableVpcClassicLinkInput) SetDryRun(v bool) *DisableVpcClassicLinkInput { + s.DryRun = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DisableVpcClassicLinkInput) SetVpcId(v string) *DisableVpcClassicLinkInput { + s.VpcId = &v + return s +} + +type DisableVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DisableVpcClassicLinkOutput) SetReturn(v bool) *DisableVpcClassicLinkOutput { + s.Return = &v + return s +} + +type DisassociateAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The association ID. Required for EC2-VPC. + AssociationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s DisassociateAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressInput) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DisassociateAddressInput) SetAssociationId(v string) *DisassociateAddressInput { + s.AssociationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisassociateAddressInput) SetDryRun(v bool) *DisassociateAddressInput { + s.DryRun = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *DisassociateAddressInput) SetPublicIp(v string) *DisassociateAddressInput { + s.PublicIp = &v + return s +} + +type DisassociateAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateAddressOutput) GoString() string { + return s.String() +} + +type DisassociateClientVpnTargetNetworkInput struct { + _ struct{} `type:"structure"` + + // The ID of the target network association. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` + + // The ID of the Client VPN endpoint from which to disassociate the target network. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DisassociateClientVpnTargetNetworkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateClientVpnTargetNetworkInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateClientVpnTargetNetworkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateClientVpnTargetNetworkInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DisassociateClientVpnTargetNetworkInput) SetAssociationId(v string) *DisassociateClientVpnTargetNetworkInput { + s.AssociationId = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *DisassociateClientVpnTargetNetworkInput) SetClientVpnEndpointId(v string) *DisassociateClientVpnTargetNetworkInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisassociateClientVpnTargetNetworkInput) SetDryRun(v bool) *DisassociateClientVpnTargetNetworkInput { + s.DryRun = &v + return s +} + +type DisassociateClientVpnTargetNetworkOutput struct { + _ struct{} `type:"structure"` + + // The ID of the target network association. + AssociationId *string `locationName:"associationId" type:"string"` + + // The current state of the target network association. + Status *AssociationStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s DisassociateClientVpnTargetNetworkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateClientVpnTargetNetworkOutput) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DisassociateClientVpnTargetNetworkOutput) SetAssociationId(v string) *DisassociateClientVpnTargetNetworkOutput { + s.AssociationId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DisassociateClientVpnTargetNetworkOutput) SetStatus(v *AssociationStatus) *DisassociateClientVpnTargetNetworkOutput { + s.Status = v + return s +} + +type DisassociateEnclaveCertificateIamRoleInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate from which to disassociate the IAM role. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ARN of the IAM role to disassociate. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DisassociateEnclaveCertificateIamRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateEnclaveCertificateIamRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateEnclaveCertificateIamRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateEnclaveCertificateIamRoleInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetCertificateArn(v string) *DisassociateEnclaveCertificateIamRoleInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetDryRun(v bool) *DisassociateEnclaveCertificateIamRoleInput { + s.DryRun = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetRoleArn(v string) *DisassociateEnclaveCertificateIamRoleInput { + s.RoleArn = &v + return s +} + +type DisassociateEnclaveCertificateIamRoleOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisassociateEnclaveCertificateIamRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateEnclaveCertificateIamRoleOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DisassociateEnclaveCertificateIamRoleOutput) SetReturn(v bool) *DisassociateEnclaveCertificateIamRoleOutput { + s.Return = &v + return s +} + +type DisassociateIamInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The ID of the IAM instance profile association. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateIamInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateIamInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateIamInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateIamInstanceProfileInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DisassociateIamInstanceProfileInput) SetAssociationId(v string) *DisassociateIamInstanceProfileInput { + s.AssociationId = &v + return s +} + +type DisassociateIamInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // Information about the IAM instance profile association. + IamInstanceProfileAssociation *IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociation" type:"structure"` +} + +// String returns the string representation +func (s DisassociateIamInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateIamInstanceProfileOutput) GoString() string { + return s.String() +} + +// SetIamInstanceProfileAssociation sets the IamInstanceProfileAssociation field's value. +func (s *DisassociateIamInstanceProfileOutput) SetIamInstanceProfileAssociation(v *IamInstanceProfileAssociation) *DisassociateIamInstanceProfileOutput { + s.IamInstanceProfileAssociation = v + return s +} + +type DisassociateRouteTableInput struct { + _ struct{} `type:"structure"` + + // The association ID representing the current association between the route + // table and subnet or gateway. + // + // AssociationId is a required field + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` +} + +// String returns the string representation +func (s DisassociateRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateRouteTableInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DisassociateRouteTableInput) SetAssociationId(v string) *DisassociateRouteTableInput { + s.AssociationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisassociateRouteTableInput) SetDryRun(v bool) *DisassociateRouteTableInput { + s.DryRun = &v + return s +} + +type DisassociateRouteTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateRouteTableOutput) GoString() string { + return s.String() +} + +type DisassociateSubnetCidrBlockInput struct { + _ struct{} `type:"structure"` + + // The association ID for the CIDR block. + // + // AssociationId is a required field + AssociationId *string `locationName:"associationId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateSubnetCidrBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateSubnetCidrBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateSubnetCidrBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateSubnetCidrBlockInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DisassociateSubnetCidrBlockInput) SetAssociationId(v string) *DisassociateSubnetCidrBlockInput { + s.AssociationId = &v + return s +} + +type DisassociateSubnetCidrBlockOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPv6 CIDR block association. + Ipv6CidrBlockAssociation *SubnetIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociation" type:"structure"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s DisassociateSubnetCidrBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateSubnetCidrBlockOutput) GoString() string { + return s.String() +} + +// SetIpv6CidrBlockAssociation sets the Ipv6CidrBlockAssociation field's value. +func (s *DisassociateSubnetCidrBlockOutput) SetIpv6CidrBlockAssociation(v *SubnetIpv6CidrBlockAssociation) *DisassociateSubnetCidrBlockOutput { + s.Ipv6CidrBlockAssociation = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *DisassociateSubnetCidrBlockOutput) SetSubnetId(v string) *DisassociateSubnetCidrBlockOutput { + s.SubnetId = &v + return s +} + +type DisassociateTransitGatewayMulticastDomainInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the subnets; + SubnetIds []*string `locationNameList:"item" type:"list"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `type:"string"` +} + +// String returns the string representation +func (s DisassociateTransitGatewayMulticastDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateTransitGatewayMulticastDomainInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DisassociateTransitGatewayMulticastDomainInput) SetDryRun(v bool) *DisassociateTransitGatewayMulticastDomainInput { + s.DryRun = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *DisassociateTransitGatewayMulticastDomainInput) SetSubnetIds(v []*string) *DisassociateTransitGatewayMulticastDomainInput { + s.SubnetIds = v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *DisassociateTransitGatewayMulticastDomainInput) SetTransitGatewayAttachmentId(v string) *DisassociateTransitGatewayMulticastDomainInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *DisassociateTransitGatewayMulticastDomainInput) SetTransitGatewayMulticastDomainId(v string) *DisassociateTransitGatewayMulticastDomainInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type DisassociateTransitGatewayMulticastDomainOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + Associations *TransitGatewayMulticastDomainAssociations `locationName:"associations" type:"structure"` +} + +// String returns the string representation +func (s DisassociateTransitGatewayMulticastDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateTransitGatewayMulticastDomainOutput) GoString() string { + return s.String() +} + +// SetAssociations sets the Associations field's value. +func (s *DisassociateTransitGatewayMulticastDomainOutput) SetAssociations(v *TransitGatewayMulticastDomainAssociations) *DisassociateTransitGatewayMulticastDomainOutput { + s.Associations = v + return s +} + +type DisassociateTransitGatewayRouteTableInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateTransitGatewayRouteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateTransitGatewayRouteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateTransitGatewayRouteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateTransitGatewayRouteTableInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DisassociateTransitGatewayRouteTableInput) SetDryRun(v bool) *DisassociateTransitGatewayRouteTableInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *DisassociateTransitGatewayRouteTableInput) SetTransitGatewayAttachmentId(v string) *DisassociateTransitGatewayRouteTableInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *DisassociateTransitGatewayRouteTableInput) SetTransitGatewayRouteTableId(v string) *DisassociateTransitGatewayRouteTableInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type DisassociateTransitGatewayRouteTableOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + Association *TransitGatewayAssociation `locationName:"association" type:"structure"` +} + +// String returns the string representation +func (s DisassociateTransitGatewayRouteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateTransitGatewayRouteTableOutput) GoString() string { + return s.String() +} + +// SetAssociation sets the Association field's value. +func (s *DisassociateTransitGatewayRouteTableOutput) SetAssociation(v *TransitGatewayAssociation) *DisassociateTransitGatewayRouteTableOutput { + s.Association = v + return s +} + +type DisassociateVpcCidrBlockInput struct { + _ struct{} `type:"structure"` + + // The association ID for the CIDR block. + // + // AssociationId is a required field + AssociationId *string `locationName:"associationId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateVpcCidrBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVpcCidrBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateVpcCidrBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateVpcCidrBlockInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DisassociateVpcCidrBlockInput) SetAssociationId(v string) *DisassociateVpcCidrBlockInput { + s.AssociationId = &v + return s +} + +type DisassociateVpcCidrBlockOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPv4 CIDR block association. + CidrBlockAssociation *VpcCidrBlockAssociation `locationName:"cidrBlockAssociation" type:"structure"` + + // Information about the IPv6 CIDR block association. + Ipv6CidrBlockAssociation *VpcIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociation" type:"structure"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s DisassociateVpcCidrBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateVpcCidrBlockOutput) GoString() string { + return s.String() +} + +// SetCidrBlockAssociation sets the CidrBlockAssociation field's value. +func (s *DisassociateVpcCidrBlockOutput) SetCidrBlockAssociation(v *VpcCidrBlockAssociation) *DisassociateVpcCidrBlockOutput { + s.CidrBlockAssociation = v + return s +} + +// SetIpv6CidrBlockAssociation sets the Ipv6CidrBlockAssociation field's value. +func (s *DisassociateVpcCidrBlockOutput) SetIpv6CidrBlockAssociation(v *VpcIpv6CidrBlockAssociation) *DisassociateVpcCidrBlockOutput { + s.Ipv6CidrBlockAssociation = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *DisassociateVpcCidrBlockOutput) SetVpcId(v string) *DisassociateVpcCidrBlockOutput { + s.VpcId = &v + return s +} + +// Describes a disk image. +type DiskImage struct { + _ struct{} `type:"structure"` + + // A description of the disk image. + Description *string `type:"string"` + + // Information about the disk image. + Image *DiskImageDetail `type:"structure"` + + // Information about the volume. + Volume *VolumeDetail `type:"structure"` +} + +// String returns the string representation +func (s DiskImage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImage) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DiskImage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DiskImage"} + if s.Image != nil { + if err := s.Image.Validate(); err != nil { + invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) + } + } + if s.Volume != nil { + if err := s.Volume.Validate(); err != nil { + invalidParams.AddNested("Volume", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *DiskImage) SetDescription(v string) *DiskImage { + s.Description = &v + return s +} + +// SetImage sets the Image field's value. +func (s *DiskImage) SetImage(v *DiskImageDetail) *DiskImage { + s.Image = v + return s +} + +// SetVolume sets the Volume field's value. +func (s *DiskImage) SetVolume(v *VolumeDetail) *DiskImage { + s.Volume = v + return s +} + +// Describes a disk image. +type DiskImageDescription struct { + _ struct{} `type:"structure"` + + // The checksum computed for the disk image. + Checksum *string `locationName:"checksum" type:"string"` + + // The disk image format. + Format *string `locationName:"format" type:"string" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3. For information + // about creating a presigned URL for an Amazon S3 object, read the "Query String + // Request Authentication Alternative" section of the Authenticating REST Requests + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + // + // For information about the import manifest referenced by this API action, + // see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string"` + + // The size of the disk image, in GiB. + Size *int64 `locationName:"size" type:"long"` +} + +// String returns the string representation +func (s DiskImageDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDescription) GoString() string { + return s.String() +} + +// SetChecksum sets the Checksum field's value. +func (s *DiskImageDescription) SetChecksum(v string) *DiskImageDescription { + s.Checksum = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *DiskImageDescription) SetFormat(v string) *DiskImageDescription { + s.Format = &v + return s +} + +// SetImportManifestUrl sets the ImportManifestUrl field's value. +func (s *DiskImageDescription) SetImportManifestUrl(v string) *DiskImageDescription { + s.ImportManifestUrl = &v + return s +} + +// SetSize sets the Size field's value. +func (s *DiskImageDescription) SetSize(v int64) *DiskImageDescription { + s.Size = &v + return s +} + +// Describes a disk image. +type DiskImageDetail struct { + _ struct{} `type:"structure"` + + // The size of the disk image, in GiB. + // + // Bytes is a required field + Bytes *int64 `locationName:"bytes" type:"long" required:"true"` + + // The disk image format. + // + // Format is a required field + Format *string `locationName:"format" type:"string" required:"true" enum:"DiskImageFormat"` + + // A presigned URL for the import manifest stored in Amazon S3 and presented + // here as an Amazon S3 presigned URL. For information about creating a presigned + // URL for an Amazon S3 object, read the "Query String Request Authentication + // Alternative" section of the Authenticating REST Requests (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // topic in the Amazon Simple Storage Service Developer Guide. + // + // For information about the import manifest referenced by this API action, + // see VM Import Manifest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/manifest.html). + // + // ImportManifestUrl is a required field + ImportManifestUrl *string `locationName:"importManifestUrl" type:"string" required:"true"` +} + +// String returns the string representation +func (s DiskImageDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageDetail) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DiskImageDetail) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DiskImageDetail"} + if s.Bytes == nil { + invalidParams.Add(request.NewErrParamRequired("Bytes")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.ImportManifestUrl == nil { + invalidParams.Add(request.NewErrParamRequired("ImportManifestUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBytes sets the Bytes field's value. +func (s *DiskImageDetail) SetBytes(v int64) *DiskImageDetail { + s.Bytes = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *DiskImageDetail) SetFormat(v string) *DiskImageDetail { + s.Format = &v + return s +} + +// SetImportManifestUrl sets the ImportManifestUrl field's value. +func (s *DiskImageDetail) SetImportManifestUrl(v string) *DiskImageDetail { + s.ImportManifestUrl = &v + return s +} + +// Describes a disk image volume. +type DiskImageVolumeDescription struct { + _ struct{} `type:"structure"` + + // The volume identifier. + Id *string `locationName:"id" type:"string"` + + // The size of the volume, in GiB. + Size *int64 `locationName:"size" type:"long"` +} + +// String returns the string representation +func (s DiskImageVolumeDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskImageVolumeDescription) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *DiskImageVolumeDescription) SetId(v string) *DiskImageVolumeDescription { + s.Id = &v + return s +} + +// SetSize sets the Size field's value. +func (s *DiskImageVolumeDescription) SetSize(v int64) *DiskImageVolumeDescription { + s.Size = &v + return s +} + +// Describes the disk. +type DiskInfo struct { + _ struct{} `type:"structure"` + + // The number of disks with this configuration. + Count *int64 `locationName:"count" type:"integer"` + + // The size of the disk in GB. + SizeInGB *int64 `locationName:"sizeInGB" type:"long"` + + // The type of disk. + Type *string `locationName:"type" type:"string" enum:"DiskType"` +} + +// String returns the string representation +func (s DiskInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DiskInfo) GoString() string { + return s.String() +} + +// SetCount sets the Count field's value. +func (s *DiskInfo) SetCount(v int64) *DiskInfo { + s.Count = &v + return s +} + +// SetSizeInGB sets the SizeInGB field's value. +func (s *DiskInfo) SetSizeInGB(v int64) *DiskInfo { + s.SizeInGB = &v + return s +} + +// SetType sets the Type field's value. +func (s *DiskInfo) SetType(v string) *DiskInfo { + s.Type = &v + return s +} + +// Describes a DNS entry. +type DnsEntry struct { + _ struct{} `type:"structure"` + + // The DNS name. + DnsName *string `locationName:"dnsName" type:"string"` + + // The ID of the private hosted zone. + HostedZoneId *string `locationName:"hostedZoneId" type:"string"` +} + +// String returns the string representation +func (s DnsEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DnsEntry) GoString() string { + return s.String() +} + +// SetDnsName sets the DnsName field's value. +func (s *DnsEntry) SetDnsName(v string) *DnsEntry { + s.DnsName = &v + return s +} + +// SetHostedZoneId sets the HostedZoneId field's value. +func (s *DnsEntry) SetHostedZoneId(v string) *DnsEntry { + s.HostedZoneId = &v + return s +} + +// Information about the DNS server to be used. +type DnsServersOptionsModifyStructure struct { + _ struct{} `type:"structure"` + + // The IPv4 address range, in CIDR notation, of the DNS servers to be used. + // You can specify up to two DNS servers. Ensure that the DNS servers can be + // reached by the clients. The specified values overwrite the existing values. + CustomDnsServers []*string `locationNameList:"item" type:"list"` + + // Indicates whether DNS servers should be used. Specify False to delete the + // existing DNS servers. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s DnsServersOptionsModifyStructure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DnsServersOptionsModifyStructure) GoString() string { + return s.String() +} + +// SetCustomDnsServers sets the CustomDnsServers field's value. +func (s *DnsServersOptionsModifyStructure) SetCustomDnsServers(v []*string) *DnsServersOptionsModifyStructure { + s.CustomDnsServers = v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *DnsServersOptionsModifyStructure) SetEnabled(v bool) *DnsServersOptionsModifyStructure { + s.Enabled = &v + return s +} + +// Describes a block device for an EBS volume. +type EbsBlockDevice struct { + _ struct{} `type:"structure"` + + // Indicates whether the EBS volume is deleted on instance termination. For + // more information, see Preserving Amazon EBS volumes on instance termination + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#preserving-volumes-on-termination) + // in the Amazon Elastic Compute Cloud User Guide. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // Indicates whether the encryption state of an EBS volume is changed while + // being restored from a backing snapshot. The effect of setting the encryption + // state to true depends on the volume origin (new or from a snapshot), starting + // encryption state, ownership, and whether encryption by default is enabled. + // For more information, see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-parameters) + // in the Amazon Elastic Compute Cloud User Guide. + // + // In no case can you remove encryption from an encrypted volume. + // + // Encrypted volumes can only be attached to instances that support Amazon EBS + // encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // + // This parameter is not returned by . + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, + // this represents the number of IOPS that are provisioned for the volume. For + // gp2 volumes, this represents the baseline performance of the volume and the + // rate at which the volume accumulates I/O credits for bursting. + // + // The following are the supported values for each volume type: + // + // * gp3: 3,000-16,000 IOPS + // + // * io1: 100-64,000 IOPS + // + // * io2: 100-64,000 IOPS + // + // For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built + // on the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Other instance families guarantee performance up to 32,000 IOPS. + // + // This parameter is required for io1 and io2 volumes. The default for gp3 volumes + // is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard + // volumes. + Iops *int64 `locationName:"iops" type:"integer"` + + // Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed + // CMK under which the EBS volume is encrypted. + // + // This parameter is only supported on BlockDeviceMapping objects called by + // RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), + // RequestSpotFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html), + // and RequestSpotInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html). + KmsKeyId *string `type:"string"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The throughput that the volume supports, in MiB/s. + // + // This parameter is valid only for gp3 volumes. + // + // Valid Range: Minimum value of 125. Maximum value of 1000. + Throughput *int64 `locationName:"throughput" type:"integer"` + + // The size of the volume, in GiBs. You must specify either a snapshot ID or + // a volume size. If you specify a snapshot, the default is the snapshot size. + // You can specify a volume size that is equal to or larger than the snapshot + // size. + // + // The following are the supported volumes sizes for each volume type: + // + // * gp2 and gp3:1-16,384 + // + // * io1 and io2: 4-16,384 + // + // * st1: 500-16,384 + // + // * sc1: 500-16,384 + // + // * standard: 1-1,024 + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` + + // The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. If the volume type is io1 + // or io2, you must specify the IOPS that the volume supports. + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s EbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsBlockDevice) GoString() string { + return s.String() +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *EbsBlockDevice) SetDeleteOnTermination(v bool) *EbsBlockDevice { + s.DeleteOnTermination = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *EbsBlockDevice) SetEncrypted(v bool) *EbsBlockDevice { + s.Encrypted = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *EbsBlockDevice) SetIops(v int64) *EbsBlockDevice { + s.Iops = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *EbsBlockDevice) SetKmsKeyId(v string) *EbsBlockDevice { + s.KmsKeyId = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *EbsBlockDevice) SetSnapshotId(v string) *EbsBlockDevice { + s.SnapshotId = &v + return s +} + +// SetThroughput sets the Throughput field's value. +func (s *EbsBlockDevice) SetThroughput(v int64) *EbsBlockDevice { + s.Throughput = &v + return s +} + +// SetVolumeSize sets the VolumeSize field's value. +func (s *EbsBlockDevice) SetVolumeSize(v int64) *EbsBlockDevice { + s.VolumeSize = &v + return s +} + +// SetVolumeType sets the VolumeType field's value. +func (s *EbsBlockDevice) SetVolumeType(v string) *EbsBlockDevice { + s.VolumeType = &v + return s +} + +// Describes the Amazon EBS features supported by the instance type. +type EbsInfo struct { + _ struct{} `type:"structure"` + + // Describes the optimized EBS performance for the instance type. + EbsOptimizedInfo *EbsOptimizedInfo `locationName:"ebsOptimizedInfo" type:"structure"` + + // Indicates whether the instance type is Amazon EBS-optimized. For more information, + // see Amazon EBS-Optimized Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) + // in Amazon EC2 User Guide for Linux Instances. + EbsOptimizedSupport *string `locationName:"ebsOptimizedSupport" type:"string" enum:"EbsOptimizedSupport"` + + // Indicates whether Amazon EBS encryption is supported. + EncryptionSupport *string `locationName:"encryptionSupport" type:"string" enum:"EbsEncryptionSupport"` + + // Indicates whether non-volatile memory express (NVMe) is supported. + NvmeSupport *string `locationName:"nvmeSupport" type:"string" enum:"EbsNvmeSupport"` +} + +// String returns the string representation +func (s EbsInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInfo) GoString() string { + return s.String() +} + +// SetEbsOptimizedInfo sets the EbsOptimizedInfo field's value. +func (s *EbsInfo) SetEbsOptimizedInfo(v *EbsOptimizedInfo) *EbsInfo { + s.EbsOptimizedInfo = v + return s +} + +// SetEbsOptimizedSupport sets the EbsOptimizedSupport field's value. +func (s *EbsInfo) SetEbsOptimizedSupport(v string) *EbsInfo { + s.EbsOptimizedSupport = &v + return s +} + +// SetEncryptionSupport sets the EncryptionSupport field's value. +func (s *EbsInfo) SetEncryptionSupport(v string) *EbsInfo { + s.EncryptionSupport = &v + return s +} + +// SetNvmeSupport sets the NvmeSupport field's value. +func (s *EbsInfo) SetNvmeSupport(v string) *EbsInfo { + s.NvmeSupport = &v + return s +} + +// Describes a parameter used to set up an EBS volume in a block device mapping. +type EbsInstanceBlockDevice struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s EbsInstanceBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDevice) GoString() string { + return s.String() +} + +// SetAttachTime sets the AttachTime field's value. +func (s *EbsInstanceBlockDevice) SetAttachTime(v time.Time) *EbsInstanceBlockDevice { + s.AttachTime = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *EbsInstanceBlockDevice) SetDeleteOnTermination(v bool) *EbsInstanceBlockDevice { + s.DeleteOnTermination = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *EbsInstanceBlockDevice) SetStatus(v string) *EbsInstanceBlockDevice { + s.Status = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *EbsInstanceBlockDevice) SetVolumeId(v string) *EbsInstanceBlockDevice { + s.VolumeId = &v + return s +} + +// Describes information used to set up an EBS volume specified in a block device +// mapping. +type EbsInstanceBlockDeviceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The ID of the EBS volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s EbsInstanceBlockDeviceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsInstanceBlockDeviceSpecification) GoString() string { + return s.String() +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *EbsInstanceBlockDeviceSpecification) SetDeleteOnTermination(v bool) *EbsInstanceBlockDeviceSpecification { + s.DeleteOnTermination = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *EbsInstanceBlockDeviceSpecification) SetVolumeId(v string) *EbsInstanceBlockDeviceSpecification { + s.VolumeId = &v + return s +} + +// Describes the optimized EBS performance for supported instance types. +type EbsOptimizedInfo struct { + _ struct{} `type:"structure"` + + // The baseline bandwidth performance for an EBS-optimized instance type, in + // Mbps. + BaselineBandwidthInMbps *int64 `locationName:"baselineBandwidthInMbps" type:"integer"` + + // The baseline input/output storage operations per seconds for an EBS-optimized + // instance type. + BaselineIops *int64 `locationName:"baselineIops" type:"integer"` + + // The baseline throughput performance for an EBS-optimized instance type, in + // MB/s. + BaselineThroughputInMBps *float64 `locationName:"baselineThroughputInMBps" type:"double"` + + // The maximum bandwidth performance for an EBS-optimized instance type, in + // Mbps. + MaximumBandwidthInMbps *int64 `locationName:"maximumBandwidthInMbps" type:"integer"` + + // The maximum input/output storage operations per second for an EBS-optimized + // instance type. + MaximumIops *int64 `locationName:"maximumIops" type:"integer"` + + // The maximum throughput performance for an EBS-optimized instance type, in + // MB/s. + MaximumThroughputInMBps *float64 `locationName:"maximumThroughputInMBps" type:"double"` +} + +// String returns the string representation +func (s EbsOptimizedInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsOptimizedInfo) GoString() string { + return s.String() +} + +// SetBaselineBandwidthInMbps sets the BaselineBandwidthInMbps field's value. +func (s *EbsOptimizedInfo) SetBaselineBandwidthInMbps(v int64) *EbsOptimizedInfo { + s.BaselineBandwidthInMbps = &v + return s +} + +// SetBaselineIops sets the BaselineIops field's value. +func (s *EbsOptimizedInfo) SetBaselineIops(v int64) *EbsOptimizedInfo { + s.BaselineIops = &v + return s +} + +// SetBaselineThroughputInMBps sets the BaselineThroughputInMBps field's value. +func (s *EbsOptimizedInfo) SetBaselineThroughputInMBps(v float64) *EbsOptimizedInfo { + s.BaselineThroughputInMBps = &v + return s +} + +// SetMaximumBandwidthInMbps sets the MaximumBandwidthInMbps field's value. +func (s *EbsOptimizedInfo) SetMaximumBandwidthInMbps(v int64) *EbsOptimizedInfo { + s.MaximumBandwidthInMbps = &v + return s +} + +// SetMaximumIops sets the MaximumIops field's value. +func (s *EbsOptimizedInfo) SetMaximumIops(v int64) *EbsOptimizedInfo { + s.MaximumIops = &v + return s +} + +// SetMaximumThroughputInMBps sets the MaximumThroughputInMBps field's value. +func (s *EbsOptimizedInfo) SetMaximumThroughputInMBps(v float64) *EbsOptimizedInfo { + s.MaximumThroughputInMBps = &v + return s +} + +// Describes an egress-only internet gateway. +type EgressOnlyInternetGateway struct { + _ struct{} `type:"structure"` + + // Information about the attachment of the egress-only internet gateway. + Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The ID of the egress-only internet gateway. + EgressOnlyInternetGatewayId *string `locationName:"egressOnlyInternetGatewayId" type:"string"` + + // The tags assigned to the egress-only internet gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s EgressOnlyInternetGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EgressOnlyInternetGateway) GoString() string { + return s.String() +} + +// SetAttachments sets the Attachments field's value. +func (s *EgressOnlyInternetGateway) SetAttachments(v []*InternetGatewayAttachment) *EgressOnlyInternetGateway { + s.Attachments = v + return s +} + +// SetEgressOnlyInternetGatewayId sets the EgressOnlyInternetGatewayId field's value. +func (s *EgressOnlyInternetGateway) SetEgressOnlyInternetGatewayId(v string) *EgressOnlyInternetGateway { + s.EgressOnlyInternetGatewayId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *EgressOnlyInternetGateway) SetTags(v []*Tag) *EgressOnlyInternetGateway { + s.Tags = v + return s +} + +// Describes the association between an instance and an Elastic Graphics accelerator. +type ElasticGpuAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the association. + ElasticGpuAssociationId *string `locationName:"elasticGpuAssociationId" type:"string"` + + // The state of the association between the instance and the Elastic Graphics + // accelerator. + ElasticGpuAssociationState *string `locationName:"elasticGpuAssociationState" type:"string"` + + // The time the Elastic Graphics accelerator was associated with the instance. + ElasticGpuAssociationTime *string `locationName:"elasticGpuAssociationTime" type:"string"` + + // The ID of the Elastic Graphics accelerator. + ElasticGpuId *string `locationName:"elasticGpuId" type:"string"` +} + +// String returns the string representation +func (s ElasticGpuAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpuAssociation) GoString() string { + return s.String() +} + +// SetElasticGpuAssociationId sets the ElasticGpuAssociationId field's value. +func (s *ElasticGpuAssociation) SetElasticGpuAssociationId(v string) *ElasticGpuAssociation { + s.ElasticGpuAssociationId = &v + return s +} + +// SetElasticGpuAssociationState sets the ElasticGpuAssociationState field's value. +func (s *ElasticGpuAssociation) SetElasticGpuAssociationState(v string) *ElasticGpuAssociation { + s.ElasticGpuAssociationState = &v + return s +} + +// SetElasticGpuAssociationTime sets the ElasticGpuAssociationTime field's value. +func (s *ElasticGpuAssociation) SetElasticGpuAssociationTime(v string) *ElasticGpuAssociation { + s.ElasticGpuAssociationTime = &v + return s +} + +// SetElasticGpuId sets the ElasticGpuId field's value. +func (s *ElasticGpuAssociation) SetElasticGpuId(v string) *ElasticGpuAssociation { + s.ElasticGpuId = &v + return s +} + +// Describes the status of an Elastic Graphics accelerator. +type ElasticGpuHealth struct { + _ struct{} `type:"structure"` + + // The health status. + Status *string `locationName:"status" type:"string" enum:"ElasticGpuStatus"` +} + +// String returns the string representation +func (s ElasticGpuHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpuHealth) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ElasticGpuHealth) SetStatus(v string) *ElasticGpuHealth { + s.Status = &v + return s +} + +// A specification for an Elastic Graphics accelerator. +type ElasticGpuSpecification struct { + _ struct{} `type:"structure"` + + // The type of Elastic Graphics accelerator. For more information about the + // values to specify for Type, see Elastic Graphics Basics (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html#elastic-graphics-basics), + // specifically the Elastic Graphics accelerator column, in the Amazon Elastic + // Compute Cloud User Guide for Windows Instances. + // + // Type is a required field + Type *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ElasticGpuSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpuSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ElasticGpuSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ElasticGpuSpecification"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetType sets the Type field's value. +func (s *ElasticGpuSpecification) SetType(v string) *ElasticGpuSpecification { + s.Type = &v + return s +} + +// Describes an elastic GPU. +type ElasticGpuSpecificationResponse struct { + _ struct{} `type:"structure"` + + // The elastic GPU type. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s ElasticGpuSpecificationResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpuSpecificationResponse) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *ElasticGpuSpecificationResponse) SetType(v string) *ElasticGpuSpecificationResponse { + s.Type = &v + return s +} + +// Describes an Elastic Graphics accelerator. +type ElasticGpus struct { + _ struct{} `type:"structure"` + + // The Availability Zone in the which the Elastic Graphics accelerator resides. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The status of the Elastic Graphics accelerator. + ElasticGpuHealth *ElasticGpuHealth `locationName:"elasticGpuHealth" type:"structure"` + + // The ID of the Elastic Graphics accelerator. + ElasticGpuId *string `locationName:"elasticGpuId" type:"string"` + + // The state of the Elastic Graphics accelerator. + ElasticGpuState *string `locationName:"elasticGpuState" type:"string" enum:"ElasticGpuState"` + + // The type of Elastic Graphics accelerator. + ElasticGpuType *string `locationName:"elasticGpuType" type:"string"` + + // The ID of the instance to which the Elastic Graphics accelerator is attached. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The tags assigned to the Elastic Graphics accelerator. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ElasticGpus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpus) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ElasticGpus) SetAvailabilityZone(v string) *ElasticGpus { + s.AvailabilityZone = &v + return s +} + +// SetElasticGpuHealth sets the ElasticGpuHealth field's value. +func (s *ElasticGpus) SetElasticGpuHealth(v *ElasticGpuHealth) *ElasticGpus { + s.ElasticGpuHealth = v + return s +} + +// SetElasticGpuId sets the ElasticGpuId field's value. +func (s *ElasticGpus) SetElasticGpuId(v string) *ElasticGpus { + s.ElasticGpuId = &v + return s +} + +// SetElasticGpuState sets the ElasticGpuState field's value. +func (s *ElasticGpus) SetElasticGpuState(v string) *ElasticGpus { + s.ElasticGpuState = &v + return s +} + +// SetElasticGpuType sets the ElasticGpuType field's value. +func (s *ElasticGpus) SetElasticGpuType(v string) *ElasticGpus { + s.ElasticGpuType = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ElasticGpus) SetInstanceId(v string) *ElasticGpus { + s.InstanceId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ElasticGpus) SetTags(v []*Tag) *ElasticGpus { + s.Tags = v + return s +} + +// Describes an elastic inference accelerator. +type ElasticInferenceAccelerator struct { + _ struct{} `type:"structure"` + + // The number of elastic inference accelerators to attach to the instance. + // + // Default: 1 + Count *int64 `min:"1" type:"integer"` + + // The type of elastic inference accelerator. The possible values are eia1.medium, + // eia1.large, eia1.xlarge, eia2.medium, eia2.large, and eia2.xlarge. + // + // Type is a required field + Type *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ElasticInferenceAccelerator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticInferenceAccelerator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ElasticInferenceAccelerator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ElasticInferenceAccelerator"} + if s.Count != nil && *s.Count < 1 { + invalidParams.Add(request.NewErrParamMinValue("Count", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCount sets the Count field's value. +func (s *ElasticInferenceAccelerator) SetCount(v int64) *ElasticInferenceAccelerator { + s.Count = &v + return s +} + +// SetType sets the Type field's value. +func (s *ElasticInferenceAccelerator) SetType(v string) *ElasticInferenceAccelerator { + s.Type = &v + return s +} + +// Describes the association between an instance and an elastic inference accelerator. +type ElasticInferenceAcceleratorAssociation struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the elastic inference accelerator. + ElasticInferenceAcceleratorArn *string `locationName:"elasticInferenceAcceleratorArn" type:"string"` + + // The ID of the association. + ElasticInferenceAcceleratorAssociationId *string `locationName:"elasticInferenceAcceleratorAssociationId" type:"string"` + + // The state of the elastic inference accelerator. + ElasticInferenceAcceleratorAssociationState *string `locationName:"elasticInferenceAcceleratorAssociationState" type:"string"` + + // The time at which the elastic inference accelerator is associated with an + // instance. + ElasticInferenceAcceleratorAssociationTime *time.Time `locationName:"elasticInferenceAcceleratorAssociationTime" type:"timestamp"` +} + +// String returns the string representation +func (s ElasticInferenceAcceleratorAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticInferenceAcceleratorAssociation) GoString() string { + return s.String() +} + +// SetElasticInferenceAcceleratorArn sets the ElasticInferenceAcceleratorArn field's value. +func (s *ElasticInferenceAcceleratorAssociation) SetElasticInferenceAcceleratorArn(v string) *ElasticInferenceAcceleratorAssociation { + s.ElasticInferenceAcceleratorArn = &v + return s +} + +// SetElasticInferenceAcceleratorAssociationId sets the ElasticInferenceAcceleratorAssociationId field's value. +func (s *ElasticInferenceAcceleratorAssociation) SetElasticInferenceAcceleratorAssociationId(v string) *ElasticInferenceAcceleratorAssociation { + s.ElasticInferenceAcceleratorAssociationId = &v + return s +} + +// SetElasticInferenceAcceleratorAssociationState sets the ElasticInferenceAcceleratorAssociationState field's value. +func (s *ElasticInferenceAcceleratorAssociation) SetElasticInferenceAcceleratorAssociationState(v string) *ElasticInferenceAcceleratorAssociation { + s.ElasticInferenceAcceleratorAssociationState = &v + return s +} + +// SetElasticInferenceAcceleratorAssociationTime sets the ElasticInferenceAcceleratorAssociationTime field's value. +func (s *ElasticInferenceAcceleratorAssociation) SetElasticInferenceAcceleratorAssociationTime(v time.Time) *ElasticInferenceAcceleratorAssociation { + s.ElasticInferenceAcceleratorAssociationTime = &v + return s +} + +type EnableEbsEncryptionByDefaultInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s EnableEbsEncryptionByDefaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableEbsEncryptionByDefaultInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableEbsEncryptionByDefaultInput) SetDryRun(v bool) *EnableEbsEncryptionByDefaultInput { + s.DryRun = &v + return s +} + +type EnableEbsEncryptionByDefaultOutput struct { + _ struct{} `type:"structure"` + + // The updated status of encryption by default. + EbsEncryptionByDefault *bool `locationName:"ebsEncryptionByDefault" type:"boolean"` +} + +// String returns the string representation +func (s EnableEbsEncryptionByDefaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableEbsEncryptionByDefaultOutput) GoString() string { + return s.String() +} + +// SetEbsEncryptionByDefault sets the EbsEncryptionByDefault field's value. +func (s *EnableEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *EnableEbsEncryptionByDefaultOutput { + s.EbsEncryptionByDefault = &v + return s +} + +// Contains information about the errors that occurred when enabling fast snapshot +// restores. +type EnableFastSnapshotRestoreErrorItem struct { + _ struct{} `type:"structure"` + + // The errors. + FastSnapshotRestoreStateErrors []*EnableFastSnapshotRestoreStateErrorItem `locationName:"fastSnapshotRestoreStateErrorSet" locationNameList:"item" type:"list"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoreErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableFastSnapshotRestoreErrorItem) GoString() string { + return s.String() +} + +// SetFastSnapshotRestoreStateErrors sets the FastSnapshotRestoreStateErrors field's value. +func (s *EnableFastSnapshotRestoreErrorItem) SetFastSnapshotRestoreStateErrors(v []*EnableFastSnapshotRestoreStateErrorItem) *EnableFastSnapshotRestoreErrorItem { + s.FastSnapshotRestoreStateErrors = v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *EnableFastSnapshotRestoreErrorItem) SetSnapshotId(v string) *EnableFastSnapshotRestoreErrorItem { + s.SnapshotId = &v + return s +} + +// Describes an error that occurred when enabling fast snapshot restores. +type EnableFastSnapshotRestoreStateError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoreStateError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableFastSnapshotRestoreStateError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *EnableFastSnapshotRestoreStateError) SetCode(v string) *EnableFastSnapshotRestoreStateError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *EnableFastSnapshotRestoreStateError) SetMessage(v string) *EnableFastSnapshotRestoreStateError { + s.Message = &v + return s +} + +// Contains information about an error that occurred when enabling fast snapshot +// restores. +type EnableFastSnapshotRestoreStateErrorItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The error. + Error *EnableFastSnapshotRestoreStateError `locationName:"error" type:"structure"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoreStateErrorItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableFastSnapshotRestoreStateErrorItem) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *EnableFastSnapshotRestoreStateErrorItem) SetAvailabilityZone(v string) *EnableFastSnapshotRestoreStateErrorItem { + s.AvailabilityZone = &v + return s +} + +// SetError sets the Error field's value. +func (s *EnableFastSnapshotRestoreStateErrorItem) SetError(v *EnableFastSnapshotRestoreStateError) *EnableFastSnapshotRestoreStateErrorItem { + s.Error = v + return s +} + +// Describes fast snapshot restores that were successfully enabled. +type EnableFastSnapshotRestoreSuccessItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time at which fast snapshot restores entered the disabled state. + DisabledTime *time.Time `locationName:"disabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the disabling state. + DisablingTime *time.Time `locationName:"disablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabled state. + EnabledTime *time.Time `locationName:"enabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabling state. + EnablingTime *time.Time `locationName:"enablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the optimizing state. + OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` + + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The state of fast snapshot restores. + State *string `locationName:"state" type:"string" enum:"FastSnapshotRestoreStateCode"` + + // The reason for the state transition. The possible values are as follows: + // + // * Client.UserInitiated - The state successfully transitioned to enabling + // or disabling. + // + // * Client.UserInitiated - Lifecycle state transition - The state successfully + // transitioned to optimizing, enabled, or disabled. + StateTransitionReason *string `locationName:"stateTransitionReason" type:"string"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoreSuccessItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableFastSnapshotRestoreSuccessItem) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetAvailabilityZone(v string) *EnableFastSnapshotRestoreSuccessItem { + s.AvailabilityZone = &v + return s +} + +// SetDisabledTime sets the DisabledTime field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetDisabledTime(v time.Time) *EnableFastSnapshotRestoreSuccessItem { + s.DisabledTime = &v + return s +} + +// SetDisablingTime sets the DisablingTime field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetDisablingTime(v time.Time) *EnableFastSnapshotRestoreSuccessItem { + s.DisablingTime = &v + return s +} + +// SetEnabledTime sets the EnabledTime field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetEnabledTime(v time.Time) *EnableFastSnapshotRestoreSuccessItem { + s.EnabledTime = &v + return s +} + +// SetEnablingTime sets the EnablingTime field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetEnablingTime(v time.Time) *EnableFastSnapshotRestoreSuccessItem { + s.EnablingTime = &v + return s +} + +// SetOptimizingTime sets the OptimizingTime field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetOptimizingTime(v time.Time) *EnableFastSnapshotRestoreSuccessItem { + s.OptimizingTime = &v + return s +} + +// SetOwnerAlias sets the OwnerAlias field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetOwnerAlias(v string) *EnableFastSnapshotRestoreSuccessItem { + s.OwnerAlias = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetOwnerId(v string) *EnableFastSnapshotRestoreSuccessItem { + s.OwnerId = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetSnapshotId(v string) *EnableFastSnapshotRestoreSuccessItem { + s.SnapshotId = &v + return s +} + +// SetState sets the State field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetState(v string) *EnableFastSnapshotRestoreSuccessItem { + s.State = &v + return s +} + +// SetStateTransitionReason sets the StateTransitionReason field's value. +func (s *EnableFastSnapshotRestoreSuccessItem) SetStateTransitionReason(v string) *EnableFastSnapshotRestoreSuccessItem { + s.StateTransitionReason = &v + return s +} + +type EnableFastSnapshotRestoresInput struct { + _ struct{} `type:"structure"` + + // One or more Availability Zones. For example, us-east-2a. + // + // AvailabilityZones is a required field + AvailabilityZones []*string `locationName:"AvailabilityZone" locationNameList:"AvailabilityZone" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of one or more snapshots. For example, snap-1234567890abcdef0. You + // can specify a snapshot that was shared with you from another AWS account. + // + // SourceSnapshotIds is a required field + SourceSnapshotIds []*string `locationName:"SourceSnapshotId" locationNameList:"SnapshotId" type:"list" required:"true"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoresInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableFastSnapshotRestoresInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableFastSnapshotRestoresInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableFastSnapshotRestoresInput"} + if s.AvailabilityZones == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZones")) + } + if s.SourceSnapshotIds == nil { + invalidParams.Add(request.NewErrParamRequired("SourceSnapshotIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *EnableFastSnapshotRestoresInput) SetAvailabilityZones(v []*string) *EnableFastSnapshotRestoresInput { + s.AvailabilityZones = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableFastSnapshotRestoresInput) SetDryRun(v bool) *EnableFastSnapshotRestoresInput { + s.DryRun = &v + return s +} + +// SetSourceSnapshotIds sets the SourceSnapshotIds field's value. +func (s *EnableFastSnapshotRestoresInput) SetSourceSnapshotIds(v []*string) *EnableFastSnapshotRestoresInput { + s.SourceSnapshotIds = v + return s +} + +type EnableFastSnapshotRestoresOutput struct { + _ struct{} `type:"structure"` + + // Information about the snapshots for which fast snapshot restores were successfully + // enabled. + Successful []*EnableFastSnapshotRestoreSuccessItem `locationName:"successful" locationNameList:"item" type:"list"` + + // Information about the snapshots for which fast snapshot restores could not + // be enabled. + Unsuccessful []*EnableFastSnapshotRestoreErrorItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoresOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableFastSnapshotRestoresOutput) GoString() string { + return s.String() +} + +// SetSuccessful sets the Successful field's value. +func (s *EnableFastSnapshotRestoresOutput) SetSuccessful(v []*EnableFastSnapshotRestoreSuccessItem) *EnableFastSnapshotRestoresOutput { + s.Successful = v + return s +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *EnableFastSnapshotRestoresOutput) SetUnsuccessful(v []*EnableFastSnapshotRestoreErrorItem) *EnableFastSnapshotRestoresOutput { + s.Unsuccessful = v + return s +} + +type EnableTransitGatewayRouteTablePropagationInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` + + // The ID of the propagation route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableTransitGatewayRouteTablePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableTransitGatewayRouteTablePropagationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableTransitGatewayRouteTablePropagationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableTransitGatewayRouteTablePropagationInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableTransitGatewayRouteTablePropagationInput) SetDryRun(v bool) *EnableTransitGatewayRouteTablePropagationInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *EnableTransitGatewayRouteTablePropagationInput) SetTransitGatewayAttachmentId(v string) *EnableTransitGatewayRouteTablePropagationInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *EnableTransitGatewayRouteTablePropagationInput) SetTransitGatewayRouteTableId(v string) *EnableTransitGatewayRouteTablePropagationInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type EnableTransitGatewayRouteTablePropagationOutput struct { + _ struct{} `type:"structure"` + + // Information about route propagation. + Propagation *TransitGatewayPropagation `locationName:"propagation" type:"structure"` +} + +// String returns the string representation +func (s EnableTransitGatewayRouteTablePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableTransitGatewayRouteTablePropagationOutput) GoString() string { + return s.String() +} + +// SetPropagation sets the Propagation field's value. +func (s *EnableTransitGatewayRouteTablePropagationOutput) SetPropagation(v *TransitGatewayPropagation) *EnableTransitGatewayRouteTablePropagationOutput { + s.Propagation = v + return s +} + +// Contains the parameters for EnableVgwRoutePropagation. +type EnableVgwRoutePropagationInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the virtual private gateway that is attached to a VPC. The virtual + // private gateway must be attached to the same VPC that the routing tables + // are associated with. + // + // GatewayId is a required field + GatewayId *string `type:"string" required:"true"` + + // The ID of the route table. The routing table must be associated with the + // same VPC that the virtual private gateway is attached to. + // + // RouteTableId is a required field + RouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableVgwRoutePropagationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableVgwRoutePropagationInput"} + if s.GatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayId")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableVgwRoutePropagationInput) SetDryRun(v bool) *EnableVgwRoutePropagationInput { + s.DryRun = &v + return s +} + +// SetGatewayId sets the GatewayId field's value. +func (s *EnableVgwRoutePropagationInput) SetGatewayId(v string) *EnableVgwRoutePropagationInput { + s.GatewayId = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *EnableVgwRoutePropagationInput) SetRouteTableId(v string) *EnableVgwRoutePropagationInput { + s.RouteTableId = &v + return s +} + +type EnableVgwRoutePropagationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableVgwRoutePropagationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVgwRoutePropagationOutput) GoString() string { + return s.String() +} + +type EnableVolumeIOInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + // + // VolumeId is a required field + VolumeId *string `locationName:"volumeId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVolumeIOInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableVolumeIOInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableVolumeIOInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableVolumeIOInput) SetDryRun(v bool) *EnableVolumeIOInput { + s.DryRun = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *EnableVolumeIOInput) SetVolumeId(v string) *EnableVolumeIOInput { + s.VolumeId = &v + return s +} + +type EnableVolumeIOOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableVolumeIOOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVolumeIOOutput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +// SetVpcId sets the VpcId field's value. +func (s *EnableVpcClassicLinkDnsSupportInput) SetVpcId(v string) *EnableVpcClassicLinkDnsSupportInput { + s.VpcId = &v + return s +} + +type EnableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *EnableVpcClassicLinkDnsSupportOutput) SetReturn(v bool) *EnableVpcClassicLinkDnsSupportOutput { + s.Return = &v + return s +} + +type EnableVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableVpcClassicLinkInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableVpcClassicLinkInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *EnableVpcClassicLinkInput) SetDryRun(v bool) *EnableVpcClassicLinkInput { + s.DryRun = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *EnableVpcClassicLinkInput) SetVpcId(v string) *EnableVpcClassicLinkInput { + s.VpcId = &v + return s +} + +type EnableVpcClassicLinkOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *EnableVpcClassicLinkOutput) SetReturn(v bool) *EnableVpcClassicLinkOutput { + s.Return = &v + return s +} + +// Indicates whether the instance is enabled for AWS Nitro Enclaves. +type EnclaveOptions struct { + _ struct{} `type:"structure"` + + // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; + // otherwise, it is not enabled for AWS Nitro Enclaves. + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s EnclaveOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnclaveOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *EnclaveOptions) SetEnabled(v bool) *EnclaveOptions { + s.Enabled = &v + return s +} + +// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more +// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) +// in the AWS Nitro Enclaves User Guide. +type EnclaveOptionsRequest struct { + _ struct{} `type:"structure"` + + // To enable the instance for AWS Nitro Enclaves, set this parameter to true. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s EnclaveOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnclaveOptionsRequest) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *EnclaveOptionsRequest) SetEnabled(v bool) *EnclaveOptionsRequest { + s.Enabled = &v + return s +} + +// Describes an EC2 Fleet or Spot Fleet event. +type EventInformation struct { + _ struct{} `type:"structure"` + + // The description of the event. + EventDescription *string `locationName:"eventDescription" type:"string"` + + // The event. + // + // The following are the error events: + // + // * iamFleetRoleInvalid - The EC2 Fleet or Spot Fleet did not have the required + // permissions either to launch or terminate an instance. + // + // * spotFleetRequestConfigurationInvalid - The configuration is not valid. + // For more information, see the description of the event. + // + // * spotInstanceCountLimitExceeded - You've reached the limit on the number + // of Spot Instances that you can launch. + // + // The following are the fleetRequestChange events: + // + // * active - The EC2 Fleet or Spot Fleet request has been validated and + // Amazon EC2 is attempting to maintain the target number of running Spot + // Instances. + // + // * cancelled - The EC2 Fleet or Spot Fleet request is canceled and has + // no running Spot Instances. The EC2 Fleet or Spot Fleet will be deleted + // two days after its instances were terminated. + // + // * cancelled_running - The EC2 Fleet or Spot Fleet request is canceled + // and does not launch additional Spot Instances. Existing Spot Instances + // continue to run until they are interrupted or terminated. + // + // * cancelled_terminating - The EC2 Fleet or Spot Fleet request is canceled + // and its Spot Instances are terminating. + // + // * expired - The EC2 Fleet or Spot Fleet request has expired. A subsequent + // event indicates that the instances were terminated, if the request was + // created with TerminateInstancesWithExpiration set. + // + // * modify_in_progress - A request to modify the EC2 Fleet or Spot Fleet + // request was accepted and is in progress. + // + // * modify_successful - The EC2 Fleet or Spot Fleet request was modified. + // + // * price_update - The price for a launch configuration was adjusted because + // it was too high. This change is permanent. + // + // * submitted - The EC2 Fleet or Spot Fleet request is being evaluated and + // Amazon EC2 is preparing to launch the target number of Spot Instances. + // + // The following are the instanceChange events: + // + // * launched - A request was fulfilled and a new instance was launched. + // + // * terminated - An instance was terminated by the user. + // + // The following are the Information events: + // + // * launchSpecTemporarilyBlacklisted - The configuration is not valid and + // several attempts to launch instances have failed. For more information, + // see the description of the event. + // + // * launchSpecUnusable - The price in a launch specification is not valid + // because it is below the Spot price or the Spot price is above the On-Demand + // price. + // + // * fleetProgressHalted - The price in every launch specification is not + // valid. A launch specification might become valid if the Spot price changes. + EventSubType *string `locationName:"eventSubType" type:"string"` + + // The ID of the instance. This information is available only for instanceChange + // events. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s EventInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventInformation) GoString() string { + return s.String() +} + +// SetEventDescription sets the EventDescription field's value. +func (s *EventInformation) SetEventDescription(v string) *EventInformation { + s.EventDescription = &v + return s +} + +// SetEventSubType sets the EventSubType field's value. +func (s *EventInformation) SetEventSubType(v string) *EventInformation { + s.EventSubType = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *EventInformation) SetInstanceId(v string) *EventInformation { + s.InstanceId = &v + return s +} + +type ExportClientVpnClientCertificateRevocationListInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s ExportClientVpnClientCertificateRevocationListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportClientVpnClientCertificateRevocationListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportClientVpnClientCertificateRevocationListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportClientVpnClientCertificateRevocationListInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *ExportClientVpnClientCertificateRevocationListInput) SetClientVpnEndpointId(v string) *ExportClientVpnClientCertificateRevocationListInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ExportClientVpnClientCertificateRevocationListInput) SetDryRun(v bool) *ExportClientVpnClientCertificateRevocationListInput { + s.DryRun = &v + return s +} + +type ExportClientVpnClientCertificateRevocationListOutput struct { + _ struct{} `type:"structure"` + + // Information about the client certificate revocation list. + CertificateRevocationList *string `locationName:"certificateRevocationList" type:"string"` + + // The current state of the client certificate revocation list. + Status *ClientCertificateRevocationListStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s ExportClientVpnClientCertificateRevocationListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportClientVpnClientCertificateRevocationListOutput) GoString() string { + return s.String() +} + +// SetCertificateRevocationList sets the CertificateRevocationList field's value. +func (s *ExportClientVpnClientCertificateRevocationListOutput) SetCertificateRevocationList(v string) *ExportClientVpnClientCertificateRevocationListOutput { + s.CertificateRevocationList = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportClientVpnClientCertificateRevocationListOutput) SetStatus(v *ClientCertificateRevocationListStatus) *ExportClientVpnClientCertificateRevocationListOutput { + s.Status = v + return s +} + +type ExportClientVpnClientConfigurationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s ExportClientVpnClientConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportClientVpnClientConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportClientVpnClientConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportClientVpnClientConfigurationInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *ExportClientVpnClientConfigurationInput) SetClientVpnEndpointId(v string) *ExportClientVpnClientConfigurationInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ExportClientVpnClientConfigurationInput) SetDryRun(v bool) *ExportClientVpnClientConfigurationInput { + s.DryRun = &v + return s +} + +type ExportClientVpnClientConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The contents of the Client VPN endpoint configuration file. + ClientConfiguration *string `locationName:"clientConfiguration" type:"string"` +} + +// String returns the string representation +func (s ExportClientVpnClientConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportClientVpnClientConfigurationOutput) GoString() string { + return s.String() +} + +// SetClientConfiguration sets the ClientConfiguration field's value. +func (s *ExportClientVpnClientConfigurationOutput) SetClientConfiguration(v string) *ExportClientVpnClientConfigurationOutput { + s.ClientConfiguration = &v + return s +} + +type ExportImageInput struct { + _ struct{} `type:"structure"` + + // Token to enable idempotency for export image requests. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // A description of the image being exported. The maximum length is 255 characters. + Description *string `type:"string"` + + // The disk image format. + // + // DiskImageFormat is a required field + DiskImageFormat *string `type:"string" required:"true" enum:"DiskImageFormat"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the image. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The name of the role that grants VM Import/Export permission to export images + // to your Amazon S3 bucket. If this parameter is not specified, the default + // role is named 'vmimport'. + RoleName *string `type:"string"` + + // Information about the destination Amazon S3 bucket. The bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // + // S3ExportLocation is a required field + S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` + + // The tags to apply to the image being exported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ExportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportImageInput"} + if s.DiskImageFormat == nil { + invalidParams.Add(request.NewErrParamRequired("DiskImageFormat")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.S3ExportLocation == nil { + invalidParams.Add(request.NewErrParamRequired("S3ExportLocation")) + } + if s.S3ExportLocation != nil { + if err := s.S3ExportLocation.Validate(); err != nil { + invalidParams.AddNested("S3ExportLocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportImageInput) SetClientToken(v string) *ExportImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ExportImageInput) SetDescription(v string) *ExportImageInput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageInput) SetDiskImageFormat(v string) *ExportImageInput { + s.DiskImageFormat = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ExportImageInput) SetDryRun(v bool) *ExportImageInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageInput) SetImageId(v string) *ExportImageInput { + s.ImageId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageInput) SetRoleName(v string) *ExportImageInput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageInput) SetS3ExportLocation(v *ExportTaskS3LocationRequest) *ExportImageInput { + s.S3ExportLocation = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ExportImageInput) SetTagSpecifications(v []*TagSpecification) *ExportImageInput { + s.TagSpecifications = v + return s +} + +type ExportImageOutput struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The disk image format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // The name of the role that grants VM Import/Export permission to export images + // to your Amazon S3 bucket. + RoleName *string `locationName:"roleName" type:"string"` + + // Information about the destination Amazon S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being exported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ExportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageOutput) SetDescription(v string) *ExportImageOutput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageOutput) SetDiskImageFormat(v string) *ExportImageOutput { + s.DiskImageFormat = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageOutput) SetExportImageTaskId(v string) *ExportImageOutput { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageOutput) SetImageId(v string) *ExportImageOutput { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageOutput) SetProgress(v string) *ExportImageOutput { + s.Progress = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageOutput) SetRoleName(v string) *ExportImageOutput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageOutput) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageOutput { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageOutput) SetStatus(v string) *ExportImageOutput { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageOutput) SetStatusMessage(v string) *ExportImageOutput { + s.StatusMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ExportImageOutput) SetTags(v []*Tag) *ExportImageOutput { + s.Tags = v + return s +} + +// Describes an export image task. +type ExportImageTask struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the destination Amazon S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being exported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ExportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageTask) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageTask) SetDescription(v string) *ExportImageTask { + s.Description = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageTask) SetExportImageTaskId(v string) *ExportImageTask { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageTask) SetImageId(v string) *ExportImageTask { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageTask) SetProgress(v string) *ExportImageTask { + s.Progress = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageTask) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageTask { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageTask) SetStatus(v string) *ExportImageTask { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageTask) SetStatusMessage(v string) *ExportImageTask { + s.StatusMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ExportImageTask) SetTags(v []*Tag) *ExportImageTask { + s.Tags = v + return s +} + +// Describes an instance export task. +type ExportTask struct { + _ struct{} `type:"structure"` + + // A description of the resource being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export task. + ExportTaskId *string `locationName:"exportTaskId" type:"string"` + + // Information about the export task. + ExportToS3Task *ExportToS3Task `locationName:"exportToS3" type:"structure"` + + // Information about the instance to export. + InstanceExportDetails *InstanceExportDetails `locationName:"instanceExport" type:"structure"` + + // The state of the export task. + State *string `locationName:"state" type:"string" enum:"ExportTaskState"` + + // The status message related to the export task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The tags for the export task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ExportTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTask) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportTask) SetDescription(v string) *ExportTask { + s.Description = &v + return s +} + +// SetExportTaskId sets the ExportTaskId field's value. +func (s *ExportTask) SetExportTaskId(v string) *ExportTask { + s.ExportTaskId = &v + return s +} + +// SetExportToS3Task sets the ExportToS3Task field's value. +func (s *ExportTask) SetExportToS3Task(v *ExportToS3Task) *ExportTask { + s.ExportToS3Task = v + return s +} + +// SetInstanceExportDetails sets the InstanceExportDetails field's value. +func (s *ExportTask) SetInstanceExportDetails(v *InstanceExportDetails) *ExportTask { + s.InstanceExportDetails = v + return s +} + +// SetState sets the State field's value. +func (s *ExportTask) SetState(v string) *ExportTask { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportTask) SetStatusMessage(v string) *ExportTask { + s.StatusMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ExportTask) SetTags(v []*Tag) *ExportTask { + s.Tags = v + return s +} + +// Describes the destination for an export image task. +type ExportTaskS3Location struct { + _ struct{} `type:"structure"` + + // The destination Amazon S3 bucket. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3Location) GoString() string { + return s.String() +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3Location) SetS3Bucket(v string) *ExportTaskS3Location { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3Location) SetS3Prefix(v string) *ExportTaskS3Location { + s.S3Prefix = &v + return s +} + +// Describes the destination for an export image task. +type ExportTaskS3LocationRequest struct { + _ struct{} `type:"structure"` + + // The destination Amazon S3 bucket. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3LocationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3LocationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTaskS3LocationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTaskS3LocationRequest"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3LocationRequest) SetS3Bucket(v string) *ExportTaskS3LocationRequest { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3LocationRequest) SetS3Prefix(v string) *ExportTaskS3LocationRequest { + s.S3Prefix = &v + return s +} + +// Describes the format and location for an instance export task. +type ExportToS3Task struct { + _ struct{} `type:"structure"` + + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The Amazon S3 bucket for the destination image. The destination bucket must + // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The encryption key for your S3 bucket. + S3Key *string `locationName:"s3Key" type:"string"` +} + +// String returns the string representation +func (s ExportToS3Task) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3Task) GoString() string { + return s.String() +} + +// SetContainerFormat sets the ContainerFormat field's value. +func (s *ExportToS3Task) SetContainerFormat(v string) *ExportToS3Task { + s.ContainerFormat = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportToS3Task) SetDiskImageFormat(v string) *ExportToS3Task { + s.DiskImageFormat = &v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportToS3Task) SetS3Bucket(v string) *ExportToS3Task { + s.S3Bucket = &v + return s +} + +// SetS3Key sets the S3Key field's value. +func (s *ExportToS3Task) SetS3Key(v string) *ExportToS3Task { + s.S3Key = &v + return s +} + +// Describes an instance export task. +type ExportToS3TaskSpecification struct { + _ struct{} `type:"structure"` + + // The container format used to combine disk images with metadata (such as OVF). + // If absent, only the disk image is exported. + ContainerFormat *string `locationName:"containerFormat" type:"string" enum:"ContainerFormat"` + + // The format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The Amazon S3 bucket for the destination image. The destination bucket must + // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The image is written to a single object in the Amazon S3 bucket at the S3 + // key s3prefix + exportTaskId + '.' + diskImageFormat. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportToS3TaskSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportToS3TaskSpecification) GoString() string { + return s.String() +} + +// SetContainerFormat sets the ContainerFormat field's value. +func (s *ExportToS3TaskSpecification) SetContainerFormat(v string) *ExportToS3TaskSpecification { + s.ContainerFormat = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportToS3TaskSpecification) SetDiskImageFormat(v string) *ExportToS3TaskSpecification { + s.DiskImageFormat = &v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportToS3TaskSpecification) SetS3Bucket(v string) *ExportToS3TaskSpecification { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportToS3TaskSpecification) SetS3Prefix(v string) *ExportToS3TaskSpecification { + s.S3Prefix = &v + return s +} + +type ExportTransitGatewayRoutesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * attachment.transit-gateway-attachment-id - The id of the transit gateway + // attachment. + // + // * attachment.resource-id - The resource id of the transit gateway attachment. + // + // * route-search.exact-match - The exact match of the specified filter. + // + // * route-search.longest-prefix-match - The longest prefix that matches + // the route. + // + // * route-search.subnet-of-match - The routes with a subnet that match the + // specified CIDR filter. + // + // * route-search.supernet-of-match - The routes with a CIDR that encompass + // the CIDR filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31 + // routes in your route table and you specify supernet-of-match as 10.0.1.0/30, + // then the result returns 10.0.1.0/29. + // + // * state - The state of the route (active | blackhole). + // + // * transit-gateway-route-destination-cidr-block - The CIDR range. + // + // * type - The type of route (propagated | static). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The name of the S3 bucket. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The ID of the route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExportTransitGatewayRoutesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTransitGatewayRoutesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTransitGatewayRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTransitGatewayRoutesInput"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ExportTransitGatewayRoutesInput) SetDryRun(v bool) *ExportTransitGatewayRoutesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *ExportTransitGatewayRoutesInput) SetFilters(v []*Filter) *ExportTransitGatewayRoutesInput { + s.Filters = v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTransitGatewayRoutesInput) SetS3Bucket(v string) *ExportTransitGatewayRoutesInput { + s.S3Bucket = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *ExportTransitGatewayRoutesInput) SetTransitGatewayRouteTableId(v string) *ExportTransitGatewayRoutesInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type ExportTransitGatewayRoutesOutput struct { + _ struct{} `type:"structure"` + + // The URL of the exported file in Amazon S3. For example, s3://bucket_name/VPCTransitGateway/TransitGatewayRouteTables/file_name. + S3Location *string `locationName:"s3Location" type:"string"` +} + +// String returns the string representation +func (s ExportTransitGatewayRoutesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTransitGatewayRoutesOutput) GoString() string { + return s.String() +} + +// SetS3Location sets the S3Location field's value. +func (s *ExportTransitGatewayRoutesOutput) SetS3Location(v string) *ExportTransitGatewayRoutesOutput { + s.S3Location = &v + return s +} + +// Describes a Reserved Instance whose queued purchase was not deleted. +type FailedQueuedPurchaseDeletion struct { + _ struct{} `type:"structure"` + + // The error. + Error *DeleteQueuedReservedInstancesError `locationName:"error" type:"structure"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s FailedQueuedPurchaseDeletion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedQueuedPurchaseDeletion) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *FailedQueuedPurchaseDeletion) SetError(v *DeleteQueuedReservedInstancesError) *FailedQueuedPurchaseDeletion { + s.Error = v + return s +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *FailedQueuedPurchaseDeletion) SetReservedInstancesId(v string) *FailedQueuedPurchaseDeletion { + s.ReservedInstancesId = &v + return s +} + +// Describes the IAM SAML identity providers used for federated authentication. +type FederatedAuthentication struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider. + SamlProviderArn *string `locationName:"samlProviderArn" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider for the + // self-service portal. + SelfServiceSamlProviderArn *string `locationName:"selfServiceSamlProviderArn" type:"string"` +} + +// String returns the string representation +func (s FederatedAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedAuthentication) GoString() string { + return s.String() +} + +// SetSamlProviderArn sets the SamlProviderArn field's value. +func (s *FederatedAuthentication) SetSamlProviderArn(v string) *FederatedAuthentication { + s.SamlProviderArn = &v + return s +} + +// SetSelfServiceSamlProviderArn sets the SelfServiceSamlProviderArn field's value. +func (s *FederatedAuthentication) SetSelfServiceSamlProviderArn(v string) *FederatedAuthentication { + s.SelfServiceSamlProviderArn = &v + return s +} + +// The IAM SAML identity provider used for federated authentication. +type FederatedAuthenticationRequest struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider. + SAMLProviderArn *string `type:"string"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider for the + // self-service portal. + SelfServiceSAMLProviderArn *string `type:"string"` +} + +// String returns the string representation +func (s FederatedAuthenticationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedAuthenticationRequest) GoString() string { + return s.String() +} + +// SetSAMLProviderArn sets the SAMLProviderArn field's value. +func (s *FederatedAuthenticationRequest) SetSAMLProviderArn(v string) *FederatedAuthenticationRequest { + s.SAMLProviderArn = &v + return s +} + +// SetSelfServiceSAMLProviderArn sets the SelfServiceSAMLProviderArn field's value. +func (s *FederatedAuthenticationRequest) SetSelfServiceSAMLProviderArn(v string) *FederatedAuthenticationRequest { + s.SelfServiceSAMLProviderArn = &v + return s +} + +// A filter name and value pair that is used to return a more specific list +// of results from a describe operation. Filters can be used to match a set +// of resources by specific criteria, such as tags, attributes, or IDs. The +// filters supported by a describe operation are documented with the describe +// operation. For example: +// +// * DescribeAvailabilityZones +// +// * DescribeImages +// +// * DescribeInstances +// +// * DescribeKeyPairs +// +// * DescribeSecurityGroups +// +// * DescribeSnapshots +// +// * DescribeSubnets +// +// * DescribeTags +// +// * DescribeVolumes +// +// * DescribeVpcs +type Filter struct { + _ struct{} `type:"structure"` + + // The name of the filter. Filter names are case-sensitive. + Name *string `type:"string"` + + // The filter values. Filter values are case-sensitive. + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *Filter) SetName(v string) *Filter { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *Filter) SetValues(v []*string) *Filter { + s.Values = v + return s +} + +// Describes an EC2 Fleet. +type FleetData struct { + _ struct{} `type:"structure"` + + // The progress of the EC2 Fleet. If there is an error, the status is error. + // After all requests are placed, the status is pending_fulfillment. If the + // size of the EC2 Fleet is equal to or greater than its target capacity, the + // status is fulfilled. If the size of the EC2 Fleet is decreased, the status + // is pending_termination while instances are terminating. + ActivityStatus *string `locationName:"activityStatus" type:"string" enum:"FleetActivityStatus"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Maximum 64 ASCII characters + ClientToken *string `locationName:"clientToken" type:"string"` + + // The creation date and time of the EC2 Fleet. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // Information about the instances that could not be launched by the fleet. + // Valid only when Type is set to instant. + Errors []*DescribeFleetError `locationName:"errorSet" locationNameList:"item" type:"list"` + + // Indicates whether running instances should be terminated if the target capacity + // of the EC2 Fleet is decreased below the current size of the EC2 Fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"FleetExcessCapacityTerminationPolicy"` + + // The ID of the EC2 Fleet. + FleetId *string `locationName:"fleetId" type:"string"` + + // The state of the EC2 Fleet. + FleetState *string `locationName:"fleetState" type:"string" enum:"FleetStateCode"` + + // The number of units fulfilled by this request compared to the set target + // capacity. + FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"` + + // The number of units fulfilled by this request compared to the set target + // On-Demand capacity. + FulfilledOnDemandCapacity *float64 `locationName:"fulfilledOnDemandCapacity" type:"double"` + + // Information about the instances that were launched by the fleet. Valid only + // when Type is set to instant. + Instances []*DescribeFleetsInstances `locationName:"fleetInstanceSet" locationNameList:"item" type:"list"` + + // The launch template and overrides. + LaunchTemplateConfigs []*FleetLaunchTemplateConfig `locationName:"launchTemplateConfigs" locationNameList:"item" type:"list"` + + // The allocation strategy of On-Demand Instances in an EC2 Fleet. + OnDemandOptions *OnDemandOptions `locationName:"onDemandOptions" type:"structure"` + + // Indicates whether EC2 Fleet should replace unhealthy instances. + ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"` + + // The configuration of Spot Instances in an EC2 Fleet. + SpotOptions *SpotOptions `locationName:"spotOptions" type:"structure"` + + // The tags for an EC2 Fleet resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The number of units to request. You can choose to set the target capacity + // in terms of instances or a performance characteristic that is important to + // your application workload, such as vCPUs, memory, or I/O. If the request + // type is maintain, you can specify a target capacity of 0 and add capacity + // later. + TargetCapacitySpecification *TargetCapacitySpecification `locationName:"targetCapacitySpecification" type:"structure"` + + // Indicates whether running instances should be terminated when the EC2 Fleet + // expires. + TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` + + // The type of request. Indicates whether the EC2 Fleet only requests the target + // capacity, or also attempts to maintain it. If you request a certain target + // capacity, EC2 Fleet only places the required requests; it does not attempt + // to replenish instances if capacity is diminished, and it does not submit + // requests in alternative capacity pools if capacity is unavailable. To maintain + // a certain target capacity, EC2 Fleet places the required requests to meet + // this target capacity. It also automatically replenishes any interrupted Spot + // Instances. Default: maintain. + Type *string `locationName:"type" type:"string" enum:"FleetType"` + + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The default is to start fulfilling the request immediately. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` + + // The end date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // At this point, no new instance requests are placed or able to fulfill the + // request. The default end date is 7 days from the current date. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` +} + +// String returns the string representation +func (s FleetData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetData) GoString() string { + return s.String() +} + +// SetActivityStatus sets the ActivityStatus field's value. +func (s *FleetData) SetActivityStatus(v string) *FleetData { + s.ActivityStatus = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *FleetData) SetClientToken(v string) *FleetData { + s.ClientToken = &v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *FleetData) SetCreateTime(v time.Time) *FleetData { + s.CreateTime = &v + return s +} + +// SetErrors sets the Errors field's value. +func (s *FleetData) SetErrors(v []*DescribeFleetError) *FleetData { + s.Errors = v + return s +} + +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. +func (s *FleetData) SetExcessCapacityTerminationPolicy(v string) *FleetData { + s.ExcessCapacityTerminationPolicy = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *FleetData) SetFleetId(v string) *FleetData { + s.FleetId = &v + return s +} + +// SetFleetState sets the FleetState field's value. +func (s *FleetData) SetFleetState(v string) *FleetData { + s.FleetState = &v + return s +} + +// SetFulfilledCapacity sets the FulfilledCapacity field's value. +func (s *FleetData) SetFulfilledCapacity(v float64) *FleetData { + s.FulfilledCapacity = &v + return s +} + +// SetFulfilledOnDemandCapacity sets the FulfilledOnDemandCapacity field's value. +func (s *FleetData) SetFulfilledOnDemandCapacity(v float64) *FleetData { + s.FulfilledOnDemandCapacity = &v + return s +} + +// SetInstances sets the Instances field's value. +func (s *FleetData) SetInstances(v []*DescribeFleetsInstances) *FleetData { + s.Instances = v + return s +} + +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *FleetData) SetLaunchTemplateConfigs(v []*FleetLaunchTemplateConfig) *FleetData { + s.LaunchTemplateConfigs = v + return s +} + +// SetOnDemandOptions sets the OnDemandOptions field's value. +func (s *FleetData) SetOnDemandOptions(v *OnDemandOptions) *FleetData { + s.OnDemandOptions = v + return s +} + +// SetReplaceUnhealthyInstances sets the ReplaceUnhealthyInstances field's value. +func (s *FleetData) SetReplaceUnhealthyInstances(v bool) *FleetData { + s.ReplaceUnhealthyInstances = &v + return s +} + +// SetSpotOptions sets the SpotOptions field's value. +func (s *FleetData) SetSpotOptions(v *SpotOptions) *FleetData { + s.SpotOptions = v + return s +} + +// SetTags sets the Tags field's value. +func (s *FleetData) SetTags(v []*Tag) *FleetData { + s.Tags = v + return s +} + +// SetTargetCapacitySpecification sets the TargetCapacitySpecification field's value. +func (s *FleetData) SetTargetCapacitySpecification(v *TargetCapacitySpecification) *FleetData { + s.TargetCapacitySpecification = v + return s +} + +// SetTerminateInstancesWithExpiration sets the TerminateInstancesWithExpiration field's value. +func (s *FleetData) SetTerminateInstancesWithExpiration(v bool) *FleetData { + s.TerminateInstancesWithExpiration = &v + return s +} + +// SetType sets the Type field's value. +func (s *FleetData) SetType(v string) *FleetData { + s.Type = &v + return s +} + +// SetValidFrom sets the ValidFrom field's value. +func (s *FleetData) SetValidFrom(v time.Time) *FleetData { + s.ValidFrom = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *FleetData) SetValidUntil(v time.Time) *FleetData { + s.ValidUntil = &v + return s +} + +// Describes a launch template and overrides. +type FleetLaunchTemplateConfig struct { + _ struct{} `type:"structure"` + + // The launch template. + LaunchTemplateSpecification *FleetLaunchTemplateSpecification `locationName:"launchTemplateSpecification" type:"structure"` + + // Any parameters that you specify override the same parameters in the launch + // template. + Overrides []*FleetLaunchTemplateOverrides `locationName:"overrides" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s FleetLaunchTemplateConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateConfig) GoString() string { + return s.String() +} + +// SetLaunchTemplateSpecification sets the LaunchTemplateSpecification field's value. +func (s *FleetLaunchTemplateConfig) SetLaunchTemplateSpecification(v *FleetLaunchTemplateSpecification) *FleetLaunchTemplateConfig { + s.LaunchTemplateSpecification = v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *FleetLaunchTemplateConfig) SetOverrides(v []*FleetLaunchTemplateOverrides) *FleetLaunchTemplateConfig { + s.Overrides = v + return s +} + +// Describes a launch template and overrides. +type FleetLaunchTemplateConfigRequest struct { + _ struct{} `type:"structure"` + + // The launch template to use. You must specify either the launch template ID + // or launch template name in the request. + LaunchTemplateSpecification *FleetLaunchTemplateSpecificationRequest `type:"structure"` + + // Any parameters that you specify override the same parameters in the launch + // template. + Overrides []*FleetLaunchTemplateOverridesRequest `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s FleetLaunchTemplateConfigRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateConfigRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FleetLaunchTemplateConfigRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FleetLaunchTemplateConfigRequest"} + if s.LaunchTemplateSpecification != nil { + if err := s.LaunchTemplateSpecification.Validate(); err != nil { + invalidParams.AddNested("LaunchTemplateSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLaunchTemplateSpecification sets the LaunchTemplateSpecification field's value. +func (s *FleetLaunchTemplateConfigRequest) SetLaunchTemplateSpecification(v *FleetLaunchTemplateSpecificationRequest) *FleetLaunchTemplateConfigRequest { + s.LaunchTemplateSpecification = v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *FleetLaunchTemplateConfigRequest) SetOverrides(v []*FleetLaunchTemplateOverridesRequest) *FleetLaunchTemplateConfigRequest { + s.Overrides = v + return s +} + +// Describes overrides for a launch template. +type FleetLaunchTemplateOverrides struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to launch the instances. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The maximum price per unit hour that you are willing to pay for a Spot Instance. + MaxPrice *string `locationName:"maxPrice" type:"string"` + + // The location where the instance launched, if applicable. + Placement *PlacementResponse `locationName:"placement" type:"structure"` + + // The priority for the launch template override. If AllocationStrategy is set + // to prioritized, EC2 Fleet uses priority to determine which launch template + // override to use first in fulfilling On-Demand capacity. The highest priority + // is launched first. Valid values are whole numbers starting at 0. The lower + // the number, the higher the priority. If no number is set, the override has + // the lowest priority. + Priority *float64 `locationName:"priority" type:"double"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The number of units provided by the specified instance type. + WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` +} + +// String returns the string representation +func (s FleetLaunchTemplateOverrides) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateOverrides) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *FleetLaunchTemplateOverrides) SetAvailabilityZone(v string) *FleetLaunchTemplateOverrides { + s.AvailabilityZone = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *FleetLaunchTemplateOverrides) SetInstanceType(v string) *FleetLaunchTemplateOverrides { + s.InstanceType = &v + return s +} + +// SetMaxPrice sets the MaxPrice field's value. +func (s *FleetLaunchTemplateOverrides) SetMaxPrice(v string) *FleetLaunchTemplateOverrides { + s.MaxPrice = &v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *FleetLaunchTemplateOverrides) SetPlacement(v *PlacementResponse) *FleetLaunchTemplateOverrides { + s.Placement = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *FleetLaunchTemplateOverrides) SetPriority(v float64) *FleetLaunchTemplateOverrides { + s.Priority = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *FleetLaunchTemplateOverrides) SetSubnetId(v string) *FleetLaunchTemplateOverrides { + s.SubnetId = &v + return s +} + +// SetWeightedCapacity sets the WeightedCapacity field's value. +func (s *FleetLaunchTemplateOverrides) SetWeightedCapacity(v float64) *FleetLaunchTemplateOverrides { + s.WeightedCapacity = &v + return s +} + +// Describes overrides for a launch template. +type FleetLaunchTemplateOverridesRequest struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to launch the instances. + AvailabilityZone *string `type:"string"` + + // The instance type. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The maximum price per unit hour that you are willing to pay for a Spot Instance. + MaxPrice *string `type:"string"` + + // The location where the instance launched, if applicable. + Placement *Placement `type:"structure"` + + // The priority for the launch template override. If AllocationStrategy is set + // to prioritized, EC2 Fleet uses priority to determine which launch template + // override to use first in fulfilling On-Demand capacity. The highest priority + // is launched first. Valid values are whole numbers starting at 0. The lower + // the number, the higher the priority. If no number is set, the launch template + // override has the lowest priority. + Priority *float64 `type:"double"` + + // The IDs of the subnets in which to launch the instances. Separate multiple + // subnet IDs using commas (for example, subnet-1234abcdeexample1, subnet-0987cdef6example2). + // A request of type instant can have only one subnet ID. + SubnetId *string `type:"string"` + + // The number of units provided by the specified instance type. + WeightedCapacity *float64 `type:"double"` +} + +// String returns the string representation +func (s FleetLaunchTemplateOverridesRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateOverridesRequest) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetAvailabilityZone(v string) *FleetLaunchTemplateOverridesRequest { + s.AvailabilityZone = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetInstanceType(v string) *FleetLaunchTemplateOverridesRequest { + s.InstanceType = &v + return s +} + +// SetMaxPrice sets the MaxPrice field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetMaxPrice(v string) *FleetLaunchTemplateOverridesRequest { + s.MaxPrice = &v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetPlacement(v *Placement) *FleetLaunchTemplateOverridesRequest { + s.Placement = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetPriority(v float64) *FleetLaunchTemplateOverridesRequest { + s.Priority = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetSubnetId(v string) *FleetLaunchTemplateOverridesRequest { + s.SubnetId = &v + return s +} + +// SetWeightedCapacity sets the WeightedCapacity field's value. +func (s *FleetLaunchTemplateOverridesRequest) SetWeightedCapacity(v float64) *FleetLaunchTemplateOverridesRequest { + s.WeightedCapacity = &v + return s +} + +// Describes the Amazon EC2 launch template and the launch template version +// that can be used by a Spot Fleet request to configure Amazon EC2 instances. +// For information about launch templates, see Launching an instance from a +// launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) +// in the Amazon EC2 User Guide for Linux Instances. +type FleetLaunchTemplateSpecification struct { + _ struct{} `type:"structure"` + + // The ID of the launch template. If you specify the template ID, you can't + // specify the template name. + LaunchTemplateId *string `locationName:"launchTemplateId" type:"string"` + + // The name of the launch template. If you specify the template name, you can't + // specify the template ID. + LaunchTemplateName *string `locationName:"launchTemplateName" min:"3" type:"string"` + + // The launch template version number, $Latest, or $Default. You must specify + // a value, otherwise the request fails. + // + // If the value is $Latest, Amazon EC2 uses the latest version of the launch + // template. + // + // If the value is $Default, Amazon EC2 uses the default version of the launch + // template. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s FleetLaunchTemplateSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FleetLaunchTemplateSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FleetLaunchTemplateSpecification"} + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *FleetLaunchTemplateSpecification) SetLaunchTemplateId(v string) *FleetLaunchTemplateSpecification { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *FleetLaunchTemplateSpecification) SetLaunchTemplateName(v string) *FleetLaunchTemplateSpecification { + s.LaunchTemplateName = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *FleetLaunchTemplateSpecification) SetVersion(v string) *FleetLaunchTemplateSpecification { + s.Version = &v + return s +} + +// Describes the Amazon EC2 launch template and the launch template version +// that can be used by an EC2 Fleet to configure Amazon EC2 instances. For information +// about launch templates, see Launching an instance from a launch template +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) +// in the Amazon Elastic Compute Cloud User Guide. +type FleetLaunchTemplateSpecificationRequest struct { + _ struct{} `type:"structure"` + + // The ID of the launch template. If you specify the template ID, you can't + // specify the template name. + LaunchTemplateId *string `type:"string"` + + // The name of the launch template. If you specify the template name, you can't + // specify the template ID. + LaunchTemplateName *string `min:"3" type:"string"` + + // The launch template version number, $Latest, or $Default. You must specify + // a value, otherwise the request fails. + // + // If the value is $Latest, Amazon EC2 uses the latest version of the launch + // template. + // + // If the value is $Default, Amazon EC2 uses the default version of the launch + // template. + Version *string `type:"string"` +} + +// String returns the string representation +func (s FleetLaunchTemplateSpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetLaunchTemplateSpecificationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FleetLaunchTemplateSpecificationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FleetLaunchTemplateSpecificationRequest"} + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *FleetLaunchTemplateSpecificationRequest) SetLaunchTemplateId(v string) *FleetLaunchTemplateSpecificationRequest { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *FleetLaunchTemplateSpecificationRequest) SetLaunchTemplateName(v string) *FleetLaunchTemplateSpecificationRequest { + s.LaunchTemplateName = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *FleetLaunchTemplateSpecificationRequest) SetVersion(v string) *FleetLaunchTemplateSpecificationRequest { + s.Version = &v + return s +} + +// The strategy to use when Amazon EC2 emits a signal that your Spot Instance +// is at an elevated risk of being interrupted. +type FleetSpotCapacityRebalance struct { + _ struct{} `type:"structure"` + + // To allow EC2 Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for an existing Spot Instance in the fleet, + // specify launch. Only available for fleets of type maintain. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can leave it + // running. You are charged for both instances while they are running. + ReplacementStrategy *string `locationName:"replacementStrategy" type:"string" enum:"FleetReplacementStrategy"` +} + +// String returns the string representation +func (s FleetSpotCapacityRebalance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotCapacityRebalance) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *FleetSpotCapacityRebalance) SetReplacementStrategy(v string) *FleetSpotCapacityRebalance { + s.ReplacementStrategy = &v + return s +} + +// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal +// that your Spot Instance is at an elevated risk of being interrupted. For +// more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-capacity-rebalance) +// in the Amazon Elastic Compute Cloud User Guide. +type FleetSpotCapacityRebalanceRequest struct { + _ struct{} `type:"structure"` + + // The replacement strategy to use. Only available for fleets of type maintain. + // + // To allow EC2 Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for an existing Spot Instance in the fleet, + // specify launch. You must specify a value, otherwise you get an error. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can leave it + // running. You are charged for all instances while they are running. + ReplacementStrategy *string `type:"string" enum:"FleetReplacementStrategy"` +} + +// String returns the string representation +func (s FleetSpotCapacityRebalanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotCapacityRebalanceRequest) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *FleetSpotCapacityRebalanceRequest) SetReplacementStrategy(v string) *FleetSpotCapacityRebalanceRequest { + s.ReplacementStrategy = &v + return s +} + +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type FleetSpotMaintenanceStrategies struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *FleetSpotCapacityRebalance `locationName:"capacityRebalance" type:"structure"` +} + +// String returns the string representation +func (s FleetSpotMaintenanceStrategies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotMaintenanceStrategies) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *FleetSpotMaintenanceStrategies) SetCapacityRebalance(v *FleetSpotCapacityRebalance) *FleetSpotMaintenanceStrategies { + s.CapacityRebalance = v + return s +} + +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type FleetSpotMaintenanceStrategiesRequest struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *FleetSpotCapacityRebalanceRequest `type:"structure"` +} + +// String returns the string representation +func (s FleetSpotMaintenanceStrategiesRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotMaintenanceStrategiesRequest) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *FleetSpotMaintenanceStrategiesRequest) SetCapacityRebalance(v *FleetSpotCapacityRebalanceRequest) *FleetSpotMaintenanceStrategiesRequest { + s.CapacityRebalance = v + return s +} + +// Describes a flow log. +type FlowLog struct { + _ struct{} `type:"structure"` + + // The date and time the flow log was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // Information about the error that occurred. Rate limited indicates that CloudWatch + // Logs throttling has been applied for one or more network interfaces, or that + // you've reached the limit on the number of log groups that you can create. + // Access error indicates that the IAM role associated with the flow log does + // not have sufficient permissions to publish to CloudWatch Logs. Unknown error + // indicates an internal error. + DeliverLogsErrorMessage *string `locationName:"deliverLogsErrorMessage" type:"string"` + + // The ARN of the IAM role that posts logs to CloudWatch Logs. + DeliverLogsPermissionArn *string `locationName:"deliverLogsPermissionArn" type:"string"` + + // The status of the logs delivery (SUCCESS | FAILED). + DeliverLogsStatus *string `locationName:"deliverLogsStatus" type:"string"` + + // The flow log ID. + FlowLogId *string `locationName:"flowLogId" type:"string"` + + // The status of the flow log (ACTIVE). + FlowLogStatus *string `locationName:"flowLogStatus" type:"string"` + + // Specifies the destination to which the flow log data is published. Flow log + // data can be published to an CloudWatch Logs log group or an Amazon S3 bucket. + // If the flow log publishes to CloudWatch Logs, this element indicates the + // Amazon Resource Name (ARN) of the CloudWatch Logs log group to which the + // data is published. If the flow log publishes to Amazon S3, this element indicates + // the ARN of the Amazon S3 bucket to which the data is published. + LogDestination *string `locationName:"logDestination" type:"string"` + + // Specifies the type of destination to which the flow log data is published. + // Flow log data can be published to CloudWatch Logs or Amazon S3. + LogDestinationType *string `locationName:"logDestinationType" type:"string" enum:"LogDestinationType"` + + // The format of the flow log record. + LogFormat *string `locationName:"logFormat" type:"string"` + + // The name of the flow log group. + LogGroupName *string `locationName:"logGroupName" type:"string"` + + // The maximum interval of time, in seconds, during which a flow of packets + // is captured and aggregated into a flow log record. + // + // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances), + // the aggregation interval is always 60 seconds (1 minute) or less, regardless + // of the specified value. + // + // Valid Values: 60 | 600 + MaxAggregationInterval *int64 `locationName:"maxAggregationInterval" type:"integer"` + + // The ID of the resource on which the flow log was created. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The tags for the flow log. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of traffic captured for the flow log. + TrafficType *string `locationName:"trafficType" type:"string" enum:"TrafficType"` +} + +// String returns the string representation +func (s FlowLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlowLog) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *FlowLog) SetCreationTime(v time.Time) *FlowLog { + s.CreationTime = &v + return s +} + +// SetDeliverLogsErrorMessage sets the DeliverLogsErrorMessage field's value. +func (s *FlowLog) SetDeliverLogsErrorMessage(v string) *FlowLog { + s.DeliverLogsErrorMessage = &v + return s +} + +// SetDeliverLogsPermissionArn sets the DeliverLogsPermissionArn field's value. +func (s *FlowLog) SetDeliverLogsPermissionArn(v string) *FlowLog { + s.DeliverLogsPermissionArn = &v + return s +} + +// SetDeliverLogsStatus sets the DeliverLogsStatus field's value. +func (s *FlowLog) SetDeliverLogsStatus(v string) *FlowLog { + s.DeliverLogsStatus = &v + return s +} + +// SetFlowLogId sets the FlowLogId field's value. +func (s *FlowLog) SetFlowLogId(v string) *FlowLog { + s.FlowLogId = &v + return s +} + +// SetFlowLogStatus sets the FlowLogStatus field's value. +func (s *FlowLog) SetFlowLogStatus(v string) *FlowLog { + s.FlowLogStatus = &v + return s +} + +// SetLogDestination sets the LogDestination field's value. +func (s *FlowLog) SetLogDestination(v string) *FlowLog { + s.LogDestination = &v + return s +} + +// SetLogDestinationType sets the LogDestinationType field's value. +func (s *FlowLog) SetLogDestinationType(v string) *FlowLog { + s.LogDestinationType = &v + return s +} + +// SetLogFormat sets the LogFormat field's value. +func (s *FlowLog) SetLogFormat(v string) *FlowLog { + s.LogFormat = &v + return s +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *FlowLog) SetLogGroupName(v string) *FlowLog { + s.LogGroupName = &v + return s +} + +// SetMaxAggregationInterval sets the MaxAggregationInterval field's value. +func (s *FlowLog) SetMaxAggregationInterval(v int64) *FlowLog { + s.MaxAggregationInterval = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *FlowLog) SetResourceId(v string) *FlowLog { + s.ResourceId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *FlowLog) SetTags(v []*Tag) *FlowLog { + s.Tags = v + return s +} + +// SetTrafficType sets the TrafficType field's value. +func (s *FlowLog) SetTrafficType(v string) *FlowLog { + s.TrafficType = &v + return s +} + +// Describes the FPGA accelerator for the instance type. +type FpgaDeviceInfo struct { + _ struct{} `type:"structure"` + + // The count of FPGA accelerators for the instance type. + Count *int64 `locationName:"count" type:"integer"` + + // The manufacturer of the FPGA accelerator. + Manufacturer *string `locationName:"manufacturer" type:"string"` + + // Describes the memory for the FPGA accelerator for the instance type. + MemoryInfo *FpgaDeviceMemoryInfo `locationName:"memoryInfo" type:"structure"` + + // The name of the FPGA accelerator. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s FpgaDeviceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaDeviceInfo) GoString() string { + return s.String() +} + +// SetCount sets the Count field's value. +func (s *FpgaDeviceInfo) SetCount(v int64) *FpgaDeviceInfo { + s.Count = &v + return s +} + +// SetManufacturer sets the Manufacturer field's value. +func (s *FpgaDeviceInfo) SetManufacturer(v string) *FpgaDeviceInfo { + s.Manufacturer = &v + return s +} + +// SetMemoryInfo sets the MemoryInfo field's value. +func (s *FpgaDeviceInfo) SetMemoryInfo(v *FpgaDeviceMemoryInfo) *FpgaDeviceInfo { + s.MemoryInfo = v + return s +} + +// SetName sets the Name field's value. +func (s *FpgaDeviceInfo) SetName(v string) *FpgaDeviceInfo { + s.Name = &v + return s +} + +// Describes the memory for the FPGA accelerator for the instance type. +type FpgaDeviceMemoryInfo struct { + _ struct{} `type:"structure"` + + // The size of the memory available to the FPGA accelerator, in MiB. + SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"` +} + +// String returns the string representation +func (s FpgaDeviceMemoryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaDeviceMemoryInfo) GoString() string { + return s.String() +} + +// SetSizeInMiB sets the SizeInMiB field's value. +func (s *FpgaDeviceMemoryInfo) SetSizeInMiB(v int64) *FpgaDeviceMemoryInfo { + s.SizeInMiB = &v + return s +} + +// Describes an Amazon FPGA image (AFI). +type FpgaImage struct { + _ struct{} `type:"structure"` + + // The date and time the AFI was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // Indicates whether data retention support is enabled for the AFI. + DataRetentionSupport *bool `locationName:"dataRetentionSupport" type:"boolean"` + + // The description of the AFI. + Description *string `locationName:"description" type:"string"` + + // The global FPGA image identifier (AGFI ID). + FpgaImageGlobalId *string `locationName:"fpgaImageGlobalId" type:"string"` + + // The FPGA image identifier (AFI ID). + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` + + // The name of the AFI. + Name *string `locationName:"name" type:"string"` + + // The alias of the AFI owner. Possible values include self, amazon, and aws-marketplace. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The AWS account ID of the AFI owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Information about the PCI bus. + PciId *PciId `locationName:"pciId" type:"structure"` + + // The product codes for the AFI. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // Indicates whether the AFI is public. + Public *bool `locationName:"public" type:"boolean"` + + // The version of the AWS Shell that was used to create the bitstream. + ShellVersion *string `locationName:"shellVersion" type:"string"` + + // Information about the state of the AFI. + State *FpgaImageState `locationName:"state" type:"structure"` + + // Any tags assigned to the AFI. + Tags []*Tag `locationName:"tags" locationNameList:"item" type:"list"` + + // The time of the most recent update to the AFI. + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp"` +} + +// String returns the string representation +func (s FpgaImage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaImage) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *FpgaImage) SetCreateTime(v time.Time) *FpgaImage { + s.CreateTime = &v + return s +} + +// SetDataRetentionSupport sets the DataRetentionSupport field's value. +func (s *FpgaImage) SetDataRetentionSupport(v bool) *FpgaImage { + s.DataRetentionSupport = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *FpgaImage) SetDescription(v string) *FpgaImage { + s.Description = &v + return s +} + +// SetFpgaImageGlobalId sets the FpgaImageGlobalId field's value. +func (s *FpgaImage) SetFpgaImageGlobalId(v string) *FpgaImage { + s.FpgaImageGlobalId = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *FpgaImage) SetFpgaImageId(v string) *FpgaImage { + s.FpgaImageId = &v + return s +} + +// SetName sets the Name field's value. +func (s *FpgaImage) SetName(v string) *FpgaImage { + s.Name = &v + return s +} + +// SetOwnerAlias sets the OwnerAlias field's value. +func (s *FpgaImage) SetOwnerAlias(v string) *FpgaImage { + s.OwnerAlias = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *FpgaImage) SetOwnerId(v string) *FpgaImage { + s.OwnerId = &v + return s +} + +// SetPciId sets the PciId field's value. +func (s *FpgaImage) SetPciId(v *PciId) *FpgaImage { + s.PciId = v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *FpgaImage) SetProductCodes(v []*ProductCode) *FpgaImage { + s.ProductCodes = v + return s +} + +// SetPublic sets the Public field's value. +func (s *FpgaImage) SetPublic(v bool) *FpgaImage { + s.Public = &v + return s +} + +// SetShellVersion sets the ShellVersion field's value. +func (s *FpgaImage) SetShellVersion(v string) *FpgaImage { + s.ShellVersion = &v + return s +} + +// SetState sets the State field's value. +func (s *FpgaImage) SetState(v *FpgaImageState) *FpgaImage { + s.State = v + return s +} + +// SetTags sets the Tags field's value. +func (s *FpgaImage) SetTags(v []*Tag) *FpgaImage { + s.Tags = v + return s +} + +// SetUpdateTime sets the UpdateTime field's value. +func (s *FpgaImage) SetUpdateTime(v time.Time) *FpgaImage { + s.UpdateTime = &v + return s +} + +// Describes an Amazon FPGA image (AFI) attribute. +type FpgaImageAttribute struct { + _ struct{} `type:"structure"` + + // The description of the AFI. + Description *string `locationName:"description" type:"string"` + + // The ID of the AFI. + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` + + // The load permissions. + LoadPermissions []*LoadPermission `locationName:"loadPermissions" locationNameList:"item" type:"list"` + + // The name of the AFI. + Name *string `locationName:"name" type:"string"` + + // The product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s FpgaImageAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaImageAttribute) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *FpgaImageAttribute) SetDescription(v string) *FpgaImageAttribute { + s.Description = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *FpgaImageAttribute) SetFpgaImageId(v string) *FpgaImageAttribute { + s.FpgaImageId = &v + return s +} + +// SetLoadPermissions sets the LoadPermissions field's value. +func (s *FpgaImageAttribute) SetLoadPermissions(v []*LoadPermission) *FpgaImageAttribute { + s.LoadPermissions = v + return s +} + +// SetName sets the Name field's value. +func (s *FpgaImageAttribute) SetName(v string) *FpgaImageAttribute { + s.Name = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *FpgaImageAttribute) SetProductCodes(v []*ProductCode) *FpgaImageAttribute { + s.ProductCodes = v + return s +} + +// Describes the state of the bitstream generation process for an Amazon FPGA +// image (AFI). +type FpgaImageState struct { + _ struct{} `type:"structure"` + + // The state. The following are the possible values: + // + // * pending - AFI bitstream generation is in progress. + // + // * available - The AFI is available for use. + // + // * failed - AFI bitstream generation failed. + // + // * unavailable - The AFI is no longer available for use. + Code *string `locationName:"code" type:"string" enum:"FpgaImageStateCode"` + + // If the state is failed, this is the error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s FpgaImageState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaImageState) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *FpgaImageState) SetCode(v string) *FpgaImageState { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *FpgaImageState) SetMessage(v string) *FpgaImageState { + s.Message = &v + return s +} + +// Describes the FPGAs for the instance type. +type FpgaInfo struct { + _ struct{} `type:"structure"` + + // Describes the FPGAs for the instance type. + Fpgas []*FpgaDeviceInfo `locationName:"fpgas" locationNameList:"item" type:"list"` + + // The total memory of all FPGA accelerators for the instance type. + TotalFpgaMemoryInMiB *int64 `locationName:"totalFpgaMemoryInMiB" type:"integer"` +} + +// String returns the string representation +func (s FpgaInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaInfo) GoString() string { + return s.String() +} + +// SetFpgas sets the Fpgas field's value. +func (s *FpgaInfo) SetFpgas(v []*FpgaDeviceInfo) *FpgaInfo { + s.Fpgas = v + return s +} + +// SetTotalFpgaMemoryInMiB sets the TotalFpgaMemoryInMiB field's value. +func (s *FpgaInfo) SetTotalFpgaMemoryInMiB(v int64) *FpgaInfo { + s.TotalFpgaMemoryInMiB = &v + return s +} + +type GetAssociatedEnclaveCertificateIamRolesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate for which to view the associated IAM roles, + // encryption keys, and Amazon S3 object information. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAssociatedEnclaveCertificateIamRolesInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) SetCertificateArn(v string) *GetAssociatedEnclaveCertificateIamRolesInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) SetDryRun(v bool) *GetAssociatedEnclaveCertificateIamRolesInput { + s.DryRun = &v + return s +} + +type GetAssociatedEnclaveCertificateIamRolesOutput struct { + _ struct{} `type:"structure"` + + // Information about the associated IAM roles. + AssociatedRoles []*AssociatedRole `locationName:"associatedRoleSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesOutput) GoString() string { + return s.String() +} + +// SetAssociatedRoles sets the AssociatedRoles field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesOutput) SetAssociatedRoles(v []*AssociatedRole) *GetAssociatedEnclaveCertificateIamRolesOutput { + s.AssociatedRoles = v + return s +} + +type GetAssociatedIpv6PoolCidrsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the IPv6 address pool. + // + // PoolId is a required field + PoolId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAssociatedIpv6PoolCidrsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedIpv6PoolCidrsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAssociatedIpv6PoolCidrsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAssociatedIpv6PoolCidrsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.PoolId == nil { + invalidParams.Add(request.NewErrParamRequired("PoolId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetAssociatedIpv6PoolCidrsInput) SetDryRun(v bool) *GetAssociatedIpv6PoolCidrsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetAssociatedIpv6PoolCidrsInput) SetMaxResults(v int64) *GetAssociatedIpv6PoolCidrsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetAssociatedIpv6PoolCidrsInput) SetNextToken(v string) *GetAssociatedIpv6PoolCidrsInput { + s.NextToken = &v + return s +} + +// SetPoolId sets the PoolId field's value. +func (s *GetAssociatedIpv6PoolCidrsInput) SetPoolId(v string) *GetAssociatedIpv6PoolCidrsInput { + s.PoolId = &v + return s +} + +type GetAssociatedIpv6PoolCidrsOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPv6 CIDR block associations. + Ipv6CidrAssociations []*Ipv6CidrAssociation `locationName:"ipv6CidrAssociationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetAssociatedIpv6PoolCidrsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedIpv6PoolCidrsOutput) GoString() string { + return s.String() +} + +// SetIpv6CidrAssociations sets the Ipv6CidrAssociations field's value. +func (s *GetAssociatedIpv6PoolCidrsOutput) SetIpv6CidrAssociations(v []*Ipv6CidrAssociation) *GetAssociatedIpv6PoolCidrsOutput { + s.Ipv6CidrAssociations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetAssociatedIpv6PoolCidrsOutput) SetNextToken(v string) *GetAssociatedIpv6PoolCidrsOutput { + s.NextToken = &v + return s +} + +type GetCapacityReservationUsageInput struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation. + // + // CapacityReservationId is a required field + CapacityReservationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + // + // Valid range: Minimum value of 1. Maximum value of 1000. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetCapacityReservationUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCapacityReservationUsageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCapacityReservationUsageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCapacityReservationUsageInput"} + if s.CapacityReservationId == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *GetCapacityReservationUsageInput) SetCapacityReservationId(v string) *GetCapacityReservationUsageInput { + s.CapacityReservationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetCapacityReservationUsageInput) SetDryRun(v bool) *GetCapacityReservationUsageInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetCapacityReservationUsageInput) SetMaxResults(v int64) *GetCapacityReservationUsageInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCapacityReservationUsageInput) SetNextToken(v string) *GetCapacityReservationUsageInput { + s.NextToken = &v + return s +} + +type GetCapacityReservationUsageOutput struct { + _ struct{} `type:"structure"` + + // The remaining capacity. Indicates the number of instances that can be launched + // in the Capacity Reservation. + AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + + // The ID of the Capacity Reservation. + CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + + // The type of instance for which the Capacity Reservation reserves capacity. + InstanceType *string `locationName:"instanceType" type:"string"` + + // Information about the Capacity Reservation usage. + InstanceUsages []*InstanceUsage `locationName:"instanceUsageSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The current state of the Capacity Reservation. A Capacity Reservation can + // be in one of the following states: + // + // * active - The Capacity Reservation is active and the capacity is available + // for your use. + // + // * expired - The Capacity Reservation expired automatically at the date + // and time specified in your request. The reserved capacity is no longer + // available for your use. + // + // * cancelled - The Capacity Reservation was manually cancelled. The reserved + // capacity is no longer available for your use. + // + // * pending - The Capacity Reservation request was successful but the capacity + // provisioning is still pending. + // + // * failed - The Capacity Reservation request has failed. A request might + // fail due to invalid request parameters, capacity constraints, or instance + // limit constraints. Failed requests are retained for 60 minutes. + State *string `locationName:"state" type:"string" enum:"CapacityReservationState"` + + // The number of instances for which the Capacity Reservation reserves capacity. + TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` +} + +// String returns the string representation +func (s GetCapacityReservationUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCapacityReservationUsageOutput) GoString() string { + return s.String() +} + +// SetAvailableInstanceCount sets the AvailableInstanceCount field's value. +func (s *GetCapacityReservationUsageOutput) SetAvailableInstanceCount(v int64) *GetCapacityReservationUsageOutput { + s.AvailableInstanceCount = &v + return s +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *GetCapacityReservationUsageOutput) SetCapacityReservationId(v string) *GetCapacityReservationUsageOutput { + s.CapacityReservationId = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *GetCapacityReservationUsageOutput) SetInstanceType(v string) *GetCapacityReservationUsageOutput { + s.InstanceType = &v + return s +} + +// SetInstanceUsages sets the InstanceUsages field's value. +func (s *GetCapacityReservationUsageOutput) SetInstanceUsages(v []*InstanceUsage) *GetCapacityReservationUsageOutput { + s.InstanceUsages = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCapacityReservationUsageOutput) SetNextToken(v string) *GetCapacityReservationUsageOutput { + s.NextToken = &v + return s +} + +// SetState sets the State field's value. +func (s *GetCapacityReservationUsageOutput) SetState(v string) *GetCapacityReservationUsageOutput { + s.State = &v + return s +} + +// SetTotalInstanceCount sets the TotalInstanceCount field's value. +func (s *GetCapacityReservationUsageOutput) SetTotalInstanceCount(v int64) *GetCapacityReservationUsageOutput { + s.TotalInstanceCount = &v + return s +} + +type GetCoipPoolUsageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. The following are the possible values: + // + // * coip-address-usage.allocation-id + // + // * coip-address-usage.aws-account-id + // + // * coip-address-usage.aws-service + // + // * coip-address-usage.co-ip + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the address pool. + // + // PoolId is a required field + PoolId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetCoipPoolUsageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCoipPoolUsageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCoipPoolUsageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCoipPoolUsageInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.PoolId == nil { + invalidParams.Add(request.NewErrParamRequired("PoolId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetCoipPoolUsageInput) SetDryRun(v bool) *GetCoipPoolUsageInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetCoipPoolUsageInput) SetFilters(v []*Filter) *GetCoipPoolUsageInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetCoipPoolUsageInput) SetMaxResults(v int64) *GetCoipPoolUsageInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCoipPoolUsageInput) SetNextToken(v string) *GetCoipPoolUsageInput { + s.NextToken = &v + return s +} + +// SetPoolId sets the PoolId field's value. +func (s *GetCoipPoolUsageInput) SetPoolId(v string) *GetCoipPoolUsageInput { + s.PoolId = &v + return s +} + +type GetCoipPoolUsageOutput struct { + _ struct{} `type:"structure"` + + // Information about the address usage. + CoipAddressUsages []*CoipAddressUsage `locationName:"coipAddressUsageSet" locationNameList:"item" type:"list"` + + // The ID of the customer-owned address pool. + CoipPoolId *string `locationName:"coipPoolId" type:"string"` + + // The ID of the local gateway route table. + LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s GetCoipPoolUsageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCoipPoolUsageOutput) GoString() string { + return s.String() +} + +// SetCoipAddressUsages sets the CoipAddressUsages field's value. +func (s *GetCoipPoolUsageOutput) SetCoipAddressUsages(v []*CoipAddressUsage) *GetCoipPoolUsageOutput { + s.CoipAddressUsages = v + return s +} + +// SetCoipPoolId sets the CoipPoolId field's value. +func (s *GetCoipPoolUsageOutput) SetCoipPoolId(v string) *GetCoipPoolUsageOutput { + s.CoipPoolId = &v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *GetCoipPoolUsageOutput) SetLocalGatewayRouteTableId(v string) *GetCoipPoolUsageOutput { + s.LocalGatewayRouteTableId = &v + return s +} + +type GetConsoleOutputInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // When enabled, retrieves the latest console output for the instance. + // + // Default: disabled (false) + Latest *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetConsoleOutputInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConsoleOutputInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConsoleOutputInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetConsoleOutputInput) SetDryRun(v bool) *GetConsoleOutputInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetConsoleOutputInput) SetInstanceId(v string) *GetConsoleOutputInput { + s.InstanceId = &v + return s +} + +// SetLatest sets the Latest field's value. +func (s *GetConsoleOutputInput) SetLatest(v bool) *GetConsoleOutputInput { + s.Latest = &v + return s +} + +type GetConsoleOutputOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The console output, base64-encoded. If you are using a command line tool, + // the tool decodes the output for you. + Output *string `locationName:"output" type:"string"` + + // The time at which the output was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s GetConsoleOutputOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleOutputOutput) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetConsoleOutputOutput) SetInstanceId(v string) *GetConsoleOutputOutput { + s.InstanceId = &v + return s +} + +// SetOutput sets the Output field's value. +func (s *GetConsoleOutputOutput) SetOutput(v string) *GetConsoleOutputOutput { + s.Output = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *GetConsoleOutputOutput) SetTimestamp(v time.Time) *GetConsoleOutputOutput { + s.Timestamp = &v + return s +} + +type GetConsoleScreenshotInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // When set to true, acts as keystroke input and wakes up an instance that's + // in standby or "sleep" mode. + WakeUp *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetConsoleScreenshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleScreenshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConsoleScreenshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConsoleScreenshotInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetConsoleScreenshotInput) SetDryRun(v bool) *GetConsoleScreenshotInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetConsoleScreenshotInput) SetInstanceId(v string) *GetConsoleScreenshotInput { + s.InstanceId = &v + return s +} + +// SetWakeUp sets the WakeUp field's value. +func (s *GetConsoleScreenshotInput) SetWakeUp(v bool) *GetConsoleScreenshotInput { + s.WakeUp = &v + return s +} + +type GetConsoleScreenshotOutput struct { + _ struct{} `type:"structure"` + + // The data that comprises the image. + ImageData *string `locationName:"imageData" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s GetConsoleScreenshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleScreenshotOutput) GoString() string { + return s.String() +} + +// SetImageData sets the ImageData field's value. +func (s *GetConsoleScreenshotOutput) SetImageData(v string) *GetConsoleScreenshotOutput { + s.ImageData = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetConsoleScreenshotOutput) SetInstanceId(v string) *GetConsoleScreenshotOutput { + s.InstanceId = &v + return s +} + +type GetDefaultCreditSpecificationInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance family. + // + // InstanceFamily is a required field + InstanceFamily *string `type:"string" required:"true" enum:"UnlimitedSupportedInstanceFamily"` +} + +// String returns the string representation +func (s GetDefaultCreditSpecificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDefaultCreditSpecificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDefaultCreditSpecificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDefaultCreditSpecificationInput"} + if s.InstanceFamily == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceFamily")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetDefaultCreditSpecificationInput) SetDryRun(v bool) *GetDefaultCreditSpecificationInput { + s.DryRun = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *GetDefaultCreditSpecificationInput) SetInstanceFamily(v string) *GetDefaultCreditSpecificationInput { + s.InstanceFamily = &v + return s +} + +type GetDefaultCreditSpecificationOutput struct { + _ struct{} `type:"structure"` + + // The default credit option for CPU usage of the instance family. + InstanceFamilyCreditSpecification *InstanceFamilyCreditSpecification `locationName:"instanceFamilyCreditSpecification" type:"structure"` +} + +// String returns the string representation +func (s GetDefaultCreditSpecificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDefaultCreditSpecificationOutput) GoString() string { + return s.String() +} + +// SetInstanceFamilyCreditSpecification sets the InstanceFamilyCreditSpecification field's value. +func (s *GetDefaultCreditSpecificationOutput) SetInstanceFamilyCreditSpecification(v *InstanceFamilyCreditSpecification) *GetDefaultCreditSpecificationOutput { + s.InstanceFamilyCreditSpecification = v + return s +} + +type GetEbsDefaultKmsKeyIdInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetEbsDefaultKmsKeyIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEbsDefaultKmsKeyIdInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *GetEbsDefaultKmsKeyIdInput) SetDryRun(v bool) *GetEbsDefaultKmsKeyIdInput { + s.DryRun = &v + return s +} + +type GetEbsDefaultKmsKeyIdOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the default CMK for encryption by default. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` +} + +// String returns the string representation +func (s GetEbsDefaultKmsKeyIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEbsDefaultKmsKeyIdOutput) GoString() string { + return s.String() +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *GetEbsDefaultKmsKeyIdOutput) SetKmsKeyId(v string) *GetEbsDefaultKmsKeyIdOutput { + s.KmsKeyId = &v + return s +} + +type GetEbsEncryptionByDefaultInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetEbsEncryptionByDefaultInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEbsEncryptionByDefaultInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *GetEbsEncryptionByDefaultInput) SetDryRun(v bool) *GetEbsEncryptionByDefaultInput { + s.DryRun = &v + return s +} + +type GetEbsEncryptionByDefaultOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether encryption by default is enabled. + EbsEncryptionByDefault *bool `locationName:"ebsEncryptionByDefault" type:"boolean"` +} + +// String returns the string representation +func (s GetEbsEncryptionByDefaultOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetEbsEncryptionByDefaultOutput) GoString() string { + return s.String() +} + +// SetEbsEncryptionByDefault sets the EbsEncryptionByDefault field's value. +func (s *GetEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *GetEbsEncryptionByDefaultOutput { + s.EbsEncryptionByDefault = &v + return s +} + +type GetGroupsForCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation. + // + // CapacityReservationId is a required field + CapacityReservationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetGroupsForCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupsForCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupsForCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGroupsForCapacityReservationInput"} + if s.CapacityReservationId == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *GetGroupsForCapacityReservationInput) SetCapacityReservationId(v string) *GetGroupsForCapacityReservationInput { + s.CapacityReservationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetGroupsForCapacityReservationInput) SetDryRun(v bool) *GetGroupsForCapacityReservationInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetGroupsForCapacityReservationInput) SetMaxResults(v int64) *GetGroupsForCapacityReservationInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetGroupsForCapacityReservationInput) SetNextToken(v string) *GetGroupsForCapacityReservationInput { + s.NextToken = &v + return s +} + +type GetGroupsForCapacityReservationOutput struct { + _ struct{} `type:"structure"` + + // Information about the resource groups to which the Capacity Reservation has + // been added. + CapacityReservationGroups []*CapacityReservationGroup `locationName:"capacityReservationGroupSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetGroupsForCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupsForCapacityReservationOutput) GoString() string { + return s.String() +} + +// SetCapacityReservationGroups sets the CapacityReservationGroups field's value. +func (s *GetGroupsForCapacityReservationOutput) SetCapacityReservationGroups(v []*CapacityReservationGroup) *GetGroupsForCapacityReservationOutput { + s.CapacityReservationGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetGroupsForCapacityReservationOutput) SetNextToken(v string) *GetGroupsForCapacityReservationOutput { + s.NextToken = &v + return s +} + +type GetHostReservationPurchasePreviewInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated Hosts with which the reservation is associated. + // + // HostIdSet is a required field + HostIdSet []*string `locationNameList:"item" type:"list" required:"true"` + + // The offering ID of the reservation. + // + // OfferingId is a required field + OfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetHostReservationPurchasePreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostReservationPurchasePreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetHostReservationPurchasePreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetHostReservationPurchasePreviewInput"} + if s.HostIdSet == nil { + invalidParams.Add(request.NewErrParamRequired("HostIdSet")) + } + if s.OfferingId == nil { + invalidParams.Add(request.NewErrParamRequired("OfferingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostIdSet sets the HostIdSet field's value. +func (s *GetHostReservationPurchasePreviewInput) SetHostIdSet(v []*string) *GetHostReservationPurchasePreviewInput { + s.HostIdSet = v + return s +} + +// SetOfferingId sets the OfferingId field's value. +func (s *GetHostReservationPurchasePreviewInput) SetOfferingId(v string) *GetHostReservationPurchasePreviewInput { + s.OfferingId = &v + return s +} + +type GetHostReservationPurchasePreviewOutput struct { + _ struct{} `type:"structure"` + + // The currency in which the totalUpfrontPrice and totalHourlyPrice amounts + // are specified. At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The purchase information of the Dedicated Host reservation and the Dedicated + // Hosts associated with it. + Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` + + // The potential total hourly price of the reservation per hour. + TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` + + // The potential total upfront price. This is billed immediately. + TotalUpfrontPrice *string `locationName:"totalUpfrontPrice" type:"string"` +} + +// String returns the string representation +func (s GetHostReservationPurchasePreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetHostReservationPurchasePreviewOutput) GoString() string { + return s.String() +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *GetHostReservationPurchasePreviewOutput) SetCurrencyCode(v string) *GetHostReservationPurchasePreviewOutput { + s.CurrencyCode = &v + return s +} + +// SetPurchase sets the Purchase field's value. +func (s *GetHostReservationPurchasePreviewOutput) SetPurchase(v []*Purchase) *GetHostReservationPurchasePreviewOutput { + s.Purchase = v + return s +} + +// SetTotalHourlyPrice sets the TotalHourlyPrice field's value. +func (s *GetHostReservationPurchasePreviewOutput) SetTotalHourlyPrice(v string) *GetHostReservationPurchasePreviewOutput { + s.TotalHourlyPrice = &v + return s +} + +// SetTotalUpfrontPrice sets the TotalUpfrontPrice field's value. +func (s *GetHostReservationPurchasePreviewOutput) SetTotalUpfrontPrice(v string) *GetHostReservationPurchasePreviewOutput { + s.TotalUpfrontPrice = &v + return s +} + +type GetLaunchTemplateDataInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLaunchTemplateDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLaunchTemplateDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLaunchTemplateDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLaunchTemplateDataInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetLaunchTemplateDataInput) SetDryRun(v bool) *GetLaunchTemplateDataInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetLaunchTemplateDataInput) SetInstanceId(v string) *GetLaunchTemplateDataInput { + s.InstanceId = &v + return s +} + +type GetLaunchTemplateDataOutput struct { + _ struct{} `type:"structure"` + + // The instance data. + LaunchTemplateData *ResponseLaunchTemplateData `locationName:"launchTemplateData" type:"structure"` +} + +// String returns the string representation +func (s GetLaunchTemplateDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLaunchTemplateDataOutput) GoString() string { + return s.String() +} + +// SetLaunchTemplateData sets the LaunchTemplateData field's value. +func (s *GetLaunchTemplateDataOutput) SetLaunchTemplateData(v *ResponseLaunchTemplateData) *GetLaunchTemplateDataOutput { + s.LaunchTemplateData = v + return s +} + +type GetManagedPrefixListAssociationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetManagedPrefixListAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedPrefixListAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedPrefixListAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetManagedPrefixListAssociationsInput) SetDryRun(v bool) *GetManagedPrefixListAssociationsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetManagedPrefixListAssociationsInput) SetMaxResults(v int64) *GetManagedPrefixListAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListAssociationsInput) SetNextToken(v string) *GetManagedPrefixListAssociationsInput { + s.NextToken = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *GetManagedPrefixListAssociationsInput) SetPrefixListId(v string) *GetManagedPrefixListAssociationsInput { + s.PrefixListId = &v + return s +} + +type GetManagedPrefixListAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the associations. + PrefixListAssociations []*PrefixListAssociation `locationName:"prefixListAssociationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetManagedPrefixListAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListAssociationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListAssociationsOutput) SetNextToken(v string) *GetManagedPrefixListAssociationsOutput { + s.NextToken = &v + return s +} + +// SetPrefixListAssociations sets the PrefixListAssociations field's value. +func (s *GetManagedPrefixListAssociationsOutput) SetPrefixListAssociations(v []*PrefixListAssociation) *GetManagedPrefixListAssociationsOutput { + s.PrefixListAssociations = v + return s +} + +type GetManagedPrefixListEntriesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The version of the prefix list for which to return the entries. The default + // is the current version. + TargetVersion *int64 `type:"long"` +} + +// String returns the string representation +func (s GetManagedPrefixListEntriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListEntriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedPrefixListEntriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedPrefixListEntriesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetManagedPrefixListEntriesInput) SetDryRun(v bool) *GetManagedPrefixListEntriesInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetManagedPrefixListEntriesInput) SetMaxResults(v int64) *GetManagedPrefixListEntriesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListEntriesInput) SetNextToken(v string) *GetManagedPrefixListEntriesInput { + s.NextToken = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *GetManagedPrefixListEntriesInput) SetPrefixListId(v string) *GetManagedPrefixListEntriesInput { + s.PrefixListId = &v + return s +} + +// SetTargetVersion sets the TargetVersion field's value. +func (s *GetManagedPrefixListEntriesInput) SetTargetVersion(v int64) *GetManagedPrefixListEntriesInput { + s.TargetVersion = &v + return s +} + +type GetManagedPrefixListEntriesOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list entries. + Entries []*PrefixListEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetManagedPrefixListEntriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListEntriesOutput) GoString() string { + return s.String() +} + +// SetEntries sets the Entries field's value. +func (s *GetManagedPrefixListEntriesOutput) SetEntries(v []*PrefixListEntry) *GetManagedPrefixListEntriesOutput { + s.Entries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListEntriesOutput) SetNextToken(v string) *GetManagedPrefixListEntriesOutput { + s.NextToken = &v + return s +} + +type GetPasswordDataInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the Windows instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPasswordDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPasswordDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPasswordDataInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetPasswordDataInput) SetDryRun(v bool) *GetPasswordDataInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetPasswordDataInput) SetInstanceId(v string) *GetPasswordDataInput { + s.InstanceId = &v + return s +} + +type GetPasswordDataOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Windows instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The password of the instance. Returns an empty string if the password is + // not available. + PasswordData *string `locationName:"passwordData" type:"string"` + + // The time the data was last updated. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s GetPasswordDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPasswordDataOutput) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetPasswordDataOutput) SetInstanceId(v string) *GetPasswordDataOutput { + s.InstanceId = &v + return s +} + +// SetPasswordData sets the PasswordData field's value. +func (s *GetPasswordDataOutput) SetPasswordData(v string) *GetPasswordDataOutput { + s.PasswordData = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *GetPasswordDataOutput) SetTimestamp(v time.Time) *GetPasswordDataOutput { + s.Timestamp = &v + return s +} + +// Contains the parameters for GetReservedInstanceExchangeQuote. +type GetReservedInstancesExchangeQuoteInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the Convertible Reserved Instances to exchange. + // + // ReservedInstanceIds is a required field + ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"` + + // The configuration of the target Convertible Reserved Instance to exchange + // for your current Convertible Reserved Instances. + TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"` +} + +// String returns the string representation +func (s GetReservedInstancesExchangeQuoteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReservedInstancesExchangeQuoteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetReservedInstancesExchangeQuoteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetReservedInstancesExchangeQuoteInput"} + if s.ReservedInstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstanceIds")) + } + if s.TargetConfigurations != nil { + for i, v := range s.TargetConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetReservedInstancesExchangeQuoteInput) SetDryRun(v bool) *GetReservedInstancesExchangeQuoteInput { + s.DryRun = &v + return s +} + +// SetReservedInstanceIds sets the ReservedInstanceIds field's value. +func (s *GetReservedInstancesExchangeQuoteInput) SetReservedInstanceIds(v []*string) *GetReservedInstancesExchangeQuoteInput { + s.ReservedInstanceIds = v + return s +} + +// SetTargetConfigurations sets the TargetConfigurations field's value. +func (s *GetReservedInstancesExchangeQuoteInput) SetTargetConfigurations(v []*TargetConfigurationRequest) *GetReservedInstancesExchangeQuoteInput { + s.TargetConfigurations = v + return s +} + +// Contains the output of GetReservedInstancesExchangeQuote. +type GetReservedInstancesExchangeQuoteOutput struct { + _ struct{} `type:"structure"` + + // The currency of the transaction. + CurrencyCode *string `locationName:"currencyCode" type:"string"` + + // If true, the exchange is valid. If false, the exchange cannot be completed. + IsValidExchange *bool `locationName:"isValidExchange" type:"boolean"` + + // The new end date of the reservation term. + OutputReservedInstancesWillExpireAt *time.Time `locationName:"outputReservedInstancesWillExpireAt" type:"timestamp"` + + // The total true upfront charge for the exchange. + PaymentDue *string `locationName:"paymentDue" type:"string"` + + // The cost associated with the Reserved Instance. + ReservedInstanceValueRollup *ReservationValue `locationName:"reservedInstanceValueRollup" type:"structure"` + + // The configuration of your Convertible Reserved Instances. + ReservedInstanceValueSet []*ReservedInstanceReservationValue `locationName:"reservedInstanceValueSet" locationNameList:"item" type:"list"` + + // The cost associated with the Reserved Instance. + TargetConfigurationValueRollup *ReservationValue `locationName:"targetConfigurationValueRollup" type:"structure"` + + // The values of the target Convertible Reserved Instances. + TargetConfigurationValueSet []*TargetReservationValue `locationName:"targetConfigurationValueSet" locationNameList:"item" type:"list"` + + // Describes the reason why the exchange cannot be completed. + ValidationFailureReason *string `locationName:"validationFailureReason" type:"string"` +} + +// String returns the string representation +func (s GetReservedInstancesExchangeQuoteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetReservedInstancesExchangeQuoteOutput) GoString() string { + return s.String() +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetCurrencyCode(v string) *GetReservedInstancesExchangeQuoteOutput { + s.CurrencyCode = &v + return s +} + +// SetIsValidExchange sets the IsValidExchange field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetIsValidExchange(v bool) *GetReservedInstancesExchangeQuoteOutput { + s.IsValidExchange = &v + return s +} + +// SetOutputReservedInstancesWillExpireAt sets the OutputReservedInstancesWillExpireAt field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetOutputReservedInstancesWillExpireAt(v time.Time) *GetReservedInstancesExchangeQuoteOutput { + s.OutputReservedInstancesWillExpireAt = &v + return s +} + +// SetPaymentDue sets the PaymentDue field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetPaymentDue(v string) *GetReservedInstancesExchangeQuoteOutput { + s.PaymentDue = &v + return s +} + +// SetReservedInstanceValueRollup sets the ReservedInstanceValueRollup field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetReservedInstanceValueRollup(v *ReservationValue) *GetReservedInstancesExchangeQuoteOutput { + s.ReservedInstanceValueRollup = v + return s +} + +// SetReservedInstanceValueSet sets the ReservedInstanceValueSet field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetReservedInstanceValueSet(v []*ReservedInstanceReservationValue) *GetReservedInstancesExchangeQuoteOutput { + s.ReservedInstanceValueSet = v + return s +} + +// SetTargetConfigurationValueRollup sets the TargetConfigurationValueRollup field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetTargetConfigurationValueRollup(v *ReservationValue) *GetReservedInstancesExchangeQuoteOutput { + s.TargetConfigurationValueRollup = v + return s +} + +// SetTargetConfigurationValueSet sets the TargetConfigurationValueSet field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetTargetConfigurationValueSet(v []*TargetReservationValue) *GetReservedInstancesExchangeQuoteOutput { + s.TargetConfigurationValueSet = v + return s +} + +// SetValidationFailureReason sets the ValidationFailureReason field's value. +func (s *GetReservedInstancesExchangeQuoteOutput) SetValidationFailureReason(v string) *GetReservedInstancesExchangeQuoteOutput { + s.ValidationFailureReason = &v + return s +} + +type GetTransitGatewayAttachmentPropagationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * transit-gateway-route-table-id - The ID of the transit gateway route + // table. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTransitGatewayAttachmentPropagationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayAttachmentPropagationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTransitGatewayAttachmentPropagationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTransitGatewayAttachmentPropagationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetTransitGatewayAttachmentPropagationsInput) SetDryRun(v bool) *GetTransitGatewayAttachmentPropagationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetTransitGatewayAttachmentPropagationsInput) SetFilters(v []*Filter) *GetTransitGatewayAttachmentPropagationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetTransitGatewayAttachmentPropagationsInput) SetMaxResults(v int64) *GetTransitGatewayAttachmentPropagationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayAttachmentPropagationsInput) SetNextToken(v string) *GetTransitGatewayAttachmentPropagationsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *GetTransitGatewayAttachmentPropagationsInput) SetTransitGatewayAttachmentId(v string) *GetTransitGatewayAttachmentPropagationsInput { + s.TransitGatewayAttachmentId = &v + return s +} + +type GetTransitGatewayAttachmentPropagationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the propagation route tables. + TransitGatewayAttachmentPropagations []*TransitGatewayAttachmentPropagation `locationName:"transitGatewayAttachmentPropagations" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetTransitGatewayAttachmentPropagationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayAttachmentPropagationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayAttachmentPropagationsOutput) SetNextToken(v string) *GetTransitGatewayAttachmentPropagationsOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayAttachmentPropagations sets the TransitGatewayAttachmentPropagations field's value. +func (s *GetTransitGatewayAttachmentPropagationsOutput) SetTransitGatewayAttachmentPropagations(v []*TransitGatewayAttachmentPropagation) *GetTransitGatewayAttachmentPropagationsOutput { + s.TransitGatewayAttachmentPropagations = v + return s +} + +type GetTransitGatewayMulticastDomainAssociationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * resource-id - The ID of the resource. + // + // * resource-type - The type of resource. The valid value is: vpc. + // + // * state - The state of the subnet association. Valid values are associated + // | associating | disassociated | disassociating. + // + // * subnet-id - The ID of the subnet. + // + // * transit-gateway-attachment-id - The id of the transit gateway attachment. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `type:"string"` +} + +// String returns the string representation +func (s GetTransitGatewayMulticastDomainAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayMulticastDomainAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTransitGatewayMulticastDomainAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTransitGatewayMulticastDomainAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetTransitGatewayMulticastDomainAssociationsInput) SetDryRun(v bool) *GetTransitGatewayMulticastDomainAssociationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetTransitGatewayMulticastDomainAssociationsInput) SetFilters(v []*Filter) *GetTransitGatewayMulticastDomainAssociationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetTransitGatewayMulticastDomainAssociationsInput) SetMaxResults(v int64) *GetTransitGatewayMulticastDomainAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayMulticastDomainAssociationsInput) SetNextToken(v string) *GetTransitGatewayMulticastDomainAssociationsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *GetTransitGatewayMulticastDomainAssociationsInput) SetTransitGatewayMulticastDomainId(v string) *GetTransitGatewayMulticastDomainAssociationsInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type GetTransitGatewayMulticastDomainAssociationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the multicast domain associations. + MulticastDomainAssociations []*TransitGatewayMulticastDomainAssociation `locationName:"multicastDomainAssociations" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetTransitGatewayMulticastDomainAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayMulticastDomainAssociationsOutput) GoString() string { + return s.String() +} + +// SetMulticastDomainAssociations sets the MulticastDomainAssociations field's value. +func (s *GetTransitGatewayMulticastDomainAssociationsOutput) SetMulticastDomainAssociations(v []*TransitGatewayMulticastDomainAssociation) *GetTransitGatewayMulticastDomainAssociationsOutput { + s.MulticastDomainAssociations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayMulticastDomainAssociationsOutput) SetNextToken(v string) *GetTransitGatewayMulticastDomainAssociationsOutput { + s.NextToken = &v + return s +} + +type GetTransitGatewayPrefixListReferencesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * attachment.resource-id - The ID of the resource for the attachment. + // + // * attachment.resource-type - The type of resource for the attachment. + // Valid values are vpc | vpn | direct-connect-gateway | peering. + // + // * attachment.transit-gateway-attachment-id - The ID of the attachment. + // + // * is-blackhole - Whether traffic matching the route is blocked (true | + // false). + // + // * prefix-list-id - The ID of the prefix list. + // + // * prefix-list-owner-id - The ID of the owner of the prefix list. + // + // * state - The state of the prefix list reference (pending | available + // | modifying | deleting). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTransitGatewayPrefixListReferencesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayPrefixListReferencesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTransitGatewayPrefixListReferencesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTransitGatewayPrefixListReferencesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetDryRun(v bool) *GetTransitGatewayPrefixListReferencesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetFilters(v []*Filter) *GetTransitGatewayPrefixListReferencesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetMaxResults(v int64) *GetTransitGatewayPrefixListReferencesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetNextToken(v string) *GetTransitGatewayPrefixListReferencesInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetTransitGatewayRouteTableId(v string) *GetTransitGatewayPrefixListReferencesInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type GetTransitGatewayPrefixListReferencesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the prefix list references. + TransitGatewayPrefixListReferences []*TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReferenceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetTransitGatewayPrefixListReferencesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayPrefixListReferencesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayPrefixListReferencesOutput) SetNextToken(v string) *GetTransitGatewayPrefixListReferencesOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayPrefixListReferences sets the TransitGatewayPrefixListReferences field's value. +func (s *GetTransitGatewayPrefixListReferencesOutput) SetTransitGatewayPrefixListReferences(v []*TransitGatewayPrefixListReference) *GetTransitGatewayPrefixListReferencesOutput { + s.TransitGatewayPrefixListReferences = v + return s +} + +type GetTransitGatewayRouteTableAssociationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * resource-id - The ID of the resource. + // + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. + // + // * transit-gateway-attachment-id - The ID of the attachment. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTransitGatewayRouteTableAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayRouteTableAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTransitGatewayRouteTableAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTransitGatewayRouteTableAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetTransitGatewayRouteTableAssociationsInput) SetDryRun(v bool) *GetTransitGatewayRouteTableAssociationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetTransitGatewayRouteTableAssociationsInput) SetFilters(v []*Filter) *GetTransitGatewayRouteTableAssociationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetTransitGatewayRouteTableAssociationsInput) SetMaxResults(v int64) *GetTransitGatewayRouteTableAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayRouteTableAssociationsInput) SetNextToken(v string) *GetTransitGatewayRouteTableAssociationsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *GetTransitGatewayRouteTableAssociationsInput) SetTransitGatewayRouteTableId(v string) *GetTransitGatewayRouteTableAssociationsInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type GetTransitGatewayRouteTableAssociationsOutput struct { + _ struct{} `type:"structure"` + + // Information about the associations. + Associations []*TransitGatewayRouteTableAssociation `locationName:"associations" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetTransitGatewayRouteTableAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayRouteTableAssociationsOutput) GoString() string { + return s.String() +} + +// SetAssociations sets the Associations field's value. +func (s *GetTransitGatewayRouteTableAssociationsOutput) SetAssociations(v []*TransitGatewayRouteTableAssociation) *GetTransitGatewayRouteTableAssociationsOutput { + s.Associations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayRouteTableAssociationsOutput) SetNextToken(v string) *GetTransitGatewayRouteTableAssociationsOutput { + s.NextToken = &v + return s +} + +type GetTransitGatewayRouteTablePropagationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * resource-id - The ID of the resource. + // + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. + // + // * transit-gateway-attachment-id - The ID of the attachment. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTransitGatewayRouteTablePropagationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayRouteTablePropagationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTransitGatewayRouteTablePropagationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTransitGatewayRouteTablePropagationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetTransitGatewayRouteTablePropagationsInput) SetDryRun(v bool) *GetTransitGatewayRouteTablePropagationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetTransitGatewayRouteTablePropagationsInput) SetFilters(v []*Filter) *GetTransitGatewayRouteTablePropagationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetTransitGatewayRouteTablePropagationsInput) SetMaxResults(v int64) *GetTransitGatewayRouteTablePropagationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayRouteTablePropagationsInput) SetNextToken(v string) *GetTransitGatewayRouteTablePropagationsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *GetTransitGatewayRouteTablePropagationsInput) SetTransitGatewayRouteTableId(v string) *GetTransitGatewayRouteTablePropagationsInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type GetTransitGatewayRouteTablePropagationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the route table propagations. + TransitGatewayRouteTablePropagations []*TransitGatewayRouteTablePropagation `locationName:"transitGatewayRouteTablePropagations" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetTransitGatewayRouteTablePropagationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayRouteTablePropagationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayRouteTablePropagationsOutput) SetNextToken(v string) *GetTransitGatewayRouteTablePropagationsOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayRouteTablePropagations sets the TransitGatewayRouteTablePropagations field's value. +func (s *GetTransitGatewayRouteTablePropagationsOutput) SetTransitGatewayRouteTablePropagations(v []*TransitGatewayRouteTablePropagation) *GetTransitGatewayRouteTablePropagationsOutput { + s.TransitGatewayRouteTablePropagations = v + return s +} + +// Describes the GPU accelerators for the instance type. +type GpuDeviceInfo struct { + _ struct{} `type:"structure"` + + // The number of GPUs for the instance type. + Count *int64 `locationName:"count" type:"integer"` + + // The manufacturer of the GPU accelerator. + Manufacturer *string `locationName:"manufacturer" type:"string"` + + // Describes the memory available to the GPU accelerator. + MemoryInfo *GpuDeviceMemoryInfo `locationName:"memoryInfo" type:"structure"` + + // The name of the GPU accelerator. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s GpuDeviceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GpuDeviceInfo) GoString() string { + return s.String() +} + +// SetCount sets the Count field's value. +func (s *GpuDeviceInfo) SetCount(v int64) *GpuDeviceInfo { + s.Count = &v + return s +} + +// SetManufacturer sets the Manufacturer field's value. +func (s *GpuDeviceInfo) SetManufacturer(v string) *GpuDeviceInfo { + s.Manufacturer = &v + return s +} + +// SetMemoryInfo sets the MemoryInfo field's value. +func (s *GpuDeviceInfo) SetMemoryInfo(v *GpuDeviceMemoryInfo) *GpuDeviceInfo { + s.MemoryInfo = v + return s +} + +// SetName sets the Name field's value. +func (s *GpuDeviceInfo) SetName(v string) *GpuDeviceInfo { + s.Name = &v + return s +} + +// Describes the memory available to the GPU accelerator. +type GpuDeviceMemoryInfo struct { + _ struct{} `type:"structure"` + + // The size of the memory available to the GPU accelerator, in MiB. + SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"` +} + +// String returns the string representation +func (s GpuDeviceMemoryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GpuDeviceMemoryInfo) GoString() string { + return s.String() +} + +// SetSizeInMiB sets the SizeInMiB field's value. +func (s *GpuDeviceMemoryInfo) SetSizeInMiB(v int64) *GpuDeviceMemoryInfo { + s.SizeInMiB = &v + return s +} + +// Describes the GPU accelerators for the instance type. +type GpuInfo struct { + _ struct{} `type:"structure"` + + // Describes the GPU accelerators for the instance type. + Gpus []*GpuDeviceInfo `locationName:"gpus" locationNameList:"item" type:"list"` + + // The total size of the memory for the GPU accelerators for the instance type, + // in MiB. + TotalGpuMemoryInMiB *int64 `locationName:"totalGpuMemoryInMiB" type:"integer"` +} + +// String returns the string representation +func (s GpuInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GpuInfo) GoString() string { + return s.String() +} + +// SetGpus sets the Gpus field's value. +func (s *GpuInfo) SetGpus(v []*GpuDeviceInfo) *GpuInfo { + s.Gpus = v + return s +} + +// SetTotalGpuMemoryInMiB sets the TotalGpuMemoryInMiB field's value. +func (s *GpuInfo) SetTotalGpuMemoryInMiB(v int64) *GpuInfo { + s.TotalGpuMemoryInMiB = &v + return s +} + +// Describes a security group. +type GroupIdentifier struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s GroupIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupIdentifier) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *GroupIdentifier) SetGroupId(v string) *GroupIdentifier { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { + s.GroupName = &v + return s +} + +// Indicates whether your instance is configured for hibernation. This parameter +// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// in the Amazon Elastic Compute Cloud User Guide. +type HibernationOptions struct { + _ struct{} `type:"structure"` + + // If this parameter is set to true, your instance is enabled for hibernation; + // otherwise, it is not enabled for hibernation. + Configured *bool `locationName:"configured" type:"boolean"` +} + +// String returns the string representation +func (s HibernationOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HibernationOptions) GoString() string { + return s.String() +} + +// SetConfigured sets the Configured field's value. +func (s *HibernationOptions) SetConfigured(v bool) *HibernationOptions { + s.Configured = &v + return s +} + +// Indicates whether your instance is configured for hibernation. This parameter +// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// in the Amazon Elastic Compute Cloud User Guide. +type HibernationOptionsRequest struct { + _ struct{} `type:"structure"` + + // If you set this parameter to true, your instance is enabled for hibernation. + // + // Default: false + Configured *bool `type:"boolean"` +} + +// String returns the string representation +func (s HibernationOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HibernationOptionsRequest) GoString() string { + return s.String() +} + +// SetConfigured sets the Configured field's value. +func (s *HibernationOptionsRequest) SetConfigured(v bool) *HibernationOptionsRequest { + s.Configured = &v + return s +} + +// Describes an event in the history of the Spot Fleet request. +type HistoryRecord struct { + _ struct{} `type:"structure"` + + // Information about the event. + EventInformation *EventInformation `locationName:"eventInformation" type:"structure"` + + // The event type. + // + // * error - An error with the Spot Fleet request. + // + // * fleetRequestChange - A change in the status or configuration of the + // Spot Fleet request. + // + // * instanceChange - An instance was launched or terminated. + // + // * Information - An informational event. + EventType *string `locationName:"eventType" type:"string" enum:"EventType"` + + // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s HistoryRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryRecord) GoString() string { + return s.String() +} + +// SetEventInformation sets the EventInformation field's value. +func (s *HistoryRecord) SetEventInformation(v *EventInformation) *HistoryRecord { + s.EventInformation = v + return s +} + +// SetEventType sets the EventType field's value. +func (s *HistoryRecord) SetEventType(v string) *HistoryRecord { + s.EventType = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *HistoryRecord) SetTimestamp(v time.Time) *HistoryRecord { + s.Timestamp = &v + return s +} + +// Describes an event in the history of an EC2 Fleet. +type HistoryRecordEntry struct { + _ struct{} `type:"structure"` + + // Information about the event. + EventInformation *EventInformation `locationName:"eventInformation" type:"structure"` + + // The event type. + EventType *string `locationName:"eventType" type:"string" enum:"FleetEventType"` + + // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s HistoryRecordEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryRecordEntry) GoString() string { + return s.String() +} + +// SetEventInformation sets the EventInformation field's value. +func (s *HistoryRecordEntry) SetEventInformation(v *EventInformation) *HistoryRecordEntry { + s.EventInformation = v + return s +} + +// SetEventType sets the EventType field's value. +func (s *HistoryRecordEntry) SetEventType(v string) *HistoryRecordEntry { + s.EventType = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *HistoryRecordEntry) SetTimestamp(v time.Time) *HistoryRecordEntry { + s.Timestamp = &v + return s +} + +// Describes the properties of the Dedicated Host. +type Host struct { + _ struct{} `type:"structure"` + + // The time that the Dedicated Host was allocated. + AllocationTime *time.Time `locationName:"allocationTime" type:"timestamp"` + + // Indicates whether the Dedicated Host supports multiple instance types of + // the same instance family, or a specific instance type only. one indicates + // that the Dedicated Host supports multiple instance types in the instance + // family. off indicates that the Dedicated Host supports a single instance + // type only. + AllowsMultipleInstanceTypes *string `locationName:"allowsMultipleInstanceTypes" type:"string" enum:"AllowsMultipleInstanceTypes"` + + // Whether auto-placement is on or off. + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The Availability Zone of the Dedicated Host. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The ID of the Availability Zone in which the Dedicated Host is allocated. + AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + + // Information about the instances running on the Dedicated Host. + AvailableCapacity *AvailableCapacity `locationName:"availableCapacity" type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The ID of the Dedicated Host. + HostId *string `locationName:"hostId" type:"string"` + + // The hardware specifications of the Dedicated Host. + HostProperties *HostProperties `locationName:"hostProperties" type:"structure"` + + // Indicates whether host recovery is enabled or disabled for the Dedicated + // Host. + HostRecovery *string `locationName:"hostRecovery" type:"string" enum:"HostRecovery"` + + // The reservation ID of the Dedicated Host. This returns a null response if + // the Dedicated Host doesn't have an associated reservation. + HostReservationId *string `locationName:"hostReservationId" type:"string"` + + // The IDs and instance type that are currently running on the Dedicated Host. + Instances []*HostInstance `locationName:"instances" locationNameList:"item" type:"list"` + + // Indicates whether the Dedicated Host is in a host resource group. If memberOfServiceLinkedResourceGroup + // is true, the host is in a host resource group; otherwise, it is not. + MemberOfServiceLinkedResourceGroup *bool `locationName:"memberOfServiceLinkedResourceGroup" type:"boolean"` + + // The ID of the AWS account that owns the Dedicated Host. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The time that the Dedicated Host was released. + ReleaseTime *time.Time `locationName:"releaseTime" type:"timestamp"` + + // The Dedicated Host's state. + State *string `locationName:"state" type:"string" enum:"AllocationState"` + + // Any tags assigned to the Dedicated Host. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s Host) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Host) GoString() string { + return s.String() +} + +// SetAllocationTime sets the AllocationTime field's value. +func (s *Host) SetAllocationTime(v time.Time) *Host { + s.AllocationTime = &v + return s +} + +// SetAllowsMultipleInstanceTypes sets the AllowsMultipleInstanceTypes field's value. +func (s *Host) SetAllowsMultipleInstanceTypes(v string) *Host { + s.AllowsMultipleInstanceTypes = &v + return s +} + +// SetAutoPlacement sets the AutoPlacement field's value. +func (s *Host) SetAutoPlacement(v string) *Host { + s.AutoPlacement = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *Host) SetAvailabilityZone(v string) *Host { + s.AvailabilityZone = &v + return s +} + +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *Host) SetAvailabilityZoneId(v string) *Host { + s.AvailabilityZoneId = &v + return s +} + +// SetAvailableCapacity sets the AvailableCapacity field's value. +func (s *Host) SetAvailableCapacity(v *AvailableCapacity) *Host { + s.AvailableCapacity = v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *Host) SetClientToken(v string) *Host { + s.ClientToken = &v + return s +} + +// SetHostId sets the HostId field's value. +func (s *Host) SetHostId(v string) *Host { + s.HostId = &v + return s +} + +// SetHostProperties sets the HostProperties field's value. +func (s *Host) SetHostProperties(v *HostProperties) *Host { + s.HostProperties = v + return s +} + +// SetHostRecovery sets the HostRecovery field's value. +func (s *Host) SetHostRecovery(v string) *Host { + s.HostRecovery = &v + return s +} + +// SetHostReservationId sets the HostReservationId field's value. +func (s *Host) SetHostReservationId(v string) *Host { + s.HostReservationId = &v + return s +} + +// SetInstances sets the Instances field's value. +func (s *Host) SetInstances(v []*HostInstance) *Host { + s.Instances = v + return s +} + +// SetMemberOfServiceLinkedResourceGroup sets the MemberOfServiceLinkedResourceGroup field's value. +func (s *Host) SetMemberOfServiceLinkedResourceGroup(v bool) *Host { + s.MemberOfServiceLinkedResourceGroup = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *Host) SetOwnerId(v string) *Host { + s.OwnerId = &v + return s +} + +// SetReleaseTime sets the ReleaseTime field's value. +func (s *Host) SetReleaseTime(v time.Time) *Host { + s.ReleaseTime = &v + return s +} + +// SetState sets the State field's value. +func (s *Host) SetState(v string) *Host { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Host) SetTags(v []*Tag) *Host { + s.Tags = v + return s +} + +// Describes an instance running on a Dedicated Host. +type HostInstance struct { + _ struct{} `type:"structure"` + + // The ID of instance that is running on the Dedicated Host. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance type (for example, m3.medium) of the running instance. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The ID of the AWS account that owns the instance. + OwnerId *string `locationName:"ownerId" type:"string"` +} + +// String returns the string representation +func (s HostInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostInstance) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *HostInstance) SetInstanceId(v string) *HostInstance { + s.InstanceId = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *HostInstance) SetInstanceType(v string) *HostInstance { + s.InstanceType = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *HostInstance) SetOwnerId(v string) *HostInstance { + s.OwnerId = &v + return s +} + +// Details about the Dedicated Host Reservation offering. +type HostOffering struct { + _ struct{} `type:"structure"` + + // The currency of the offering. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the offering (in seconds). + Duration *int64 `locationName:"duration" type:"integer"` + + // The hourly price of the offering. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The instance family of the offering. + InstanceFamily *string `locationName:"instanceFamily" type:"string"` + + // The ID of the offering. + OfferingId *string `locationName:"offeringId" type:"string"` + + // The available payment option. + PaymentOption *string `locationName:"paymentOption" type:"string" enum:"PaymentOption"` + + // The upfront price of the offering. Does not apply to No Upfront offerings. + UpfrontPrice *string `locationName:"upfrontPrice" type:"string"` +} + +// String returns the string representation +func (s HostOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostOffering) GoString() string { + return s.String() +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *HostOffering) SetCurrencyCode(v string) *HostOffering { + s.CurrencyCode = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *HostOffering) SetDuration(v int64) *HostOffering { + s.Duration = &v + return s +} + +// SetHourlyPrice sets the HourlyPrice field's value. +func (s *HostOffering) SetHourlyPrice(v string) *HostOffering { + s.HourlyPrice = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *HostOffering) SetInstanceFamily(v string) *HostOffering { + s.InstanceFamily = &v + return s +} + +// SetOfferingId sets the OfferingId field's value. +func (s *HostOffering) SetOfferingId(v string) *HostOffering { + s.OfferingId = &v + return s +} + +// SetPaymentOption sets the PaymentOption field's value. +func (s *HostOffering) SetPaymentOption(v string) *HostOffering { + s.PaymentOption = &v + return s +} + +// SetUpfrontPrice sets the UpfrontPrice field's value. +func (s *HostOffering) SetUpfrontPrice(v string) *HostOffering { + s.UpfrontPrice = &v + return s +} + +// Describes the properties of a Dedicated Host. +type HostProperties struct { + _ struct{} `type:"structure"` + + // The number of cores on the Dedicated Host. + Cores *int64 `locationName:"cores" type:"integer"` + + // The instance family supported by the Dedicated Host. For example, m5. + InstanceFamily *string `locationName:"instanceFamily" type:"string"` + + // The instance type supported by the Dedicated Host. For example, m5.large. + // If the host supports multiple instance types, no instanceType is returned. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The number of sockets on the Dedicated Host. + Sockets *int64 `locationName:"sockets" type:"integer"` + + // The total number of vCPUs on the Dedicated Host. + TotalVCpus *int64 `locationName:"totalVCpus" type:"integer"` +} + +// String returns the string representation +func (s HostProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostProperties) GoString() string { + return s.String() +} + +// SetCores sets the Cores field's value. +func (s *HostProperties) SetCores(v int64) *HostProperties { + s.Cores = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *HostProperties) SetInstanceFamily(v string) *HostProperties { + s.InstanceFamily = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *HostProperties) SetInstanceType(v string) *HostProperties { + s.InstanceType = &v + return s +} + +// SetSockets sets the Sockets field's value. +func (s *HostProperties) SetSockets(v int64) *HostProperties { + s.Sockets = &v + return s +} + +// SetTotalVCpus sets the TotalVCpus field's value. +func (s *HostProperties) SetTotalVCpus(v int64) *HostProperties { + s.TotalVCpus = &v + return s +} + +// Details about the Dedicated Host Reservation and associated Dedicated Hosts. +type HostReservation struct { + _ struct{} `type:"structure"` + + // The number of Dedicated Hosts the reservation is associated with. + Count *int64 `locationName:"count" type:"integer"` + + // The currency in which the upfrontPrice and hourlyPrice amounts are specified. + // At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The length of the reservation's term, specified in seconds. Can be 31536000 + // (1 year) | 94608000 (3 years). + Duration *int64 `locationName:"duration" type:"integer"` + + // The date and time that the reservation ends. + End *time.Time `locationName:"end" type:"timestamp"` + + // The IDs of the Dedicated Hosts associated with the reservation. + HostIdSet []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"` + + // The ID of the reservation that specifies the associated Dedicated Hosts. + HostReservationId *string `locationName:"hostReservationId" type:"string"` + + // The hourly price of the reservation. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The instance family of the Dedicated Host Reservation. The instance family + // on the Dedicated Host must be the same in order for it to benefit from the + // reservation. + InstanceFamily *string `locationName:"instanceFamily" type:"string"` + + // The ID of the reservation. This remains the same regardless of which Dedicated + // Hosts are associated with it. + OfferingId *string `locationName:"offeringId" type:"string"` + + // The payment option selected for this reservation. + PaymentOption *string `locationName:"paymentOption" type:"string" enum:"PaymentOption"` + + // The date and time that the reservation started. + Start *time.Time `locationName:"start" type:"timestamp"` + + // The state of the reservation. + State *string `locationName:"state" type:"string" enum:"ReservationState"` + + // Any tags assigned to the Dedicated Host Reservation. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The upfront price of the reservation. + UpfrontPrice *string `locationName:"upfrontPrice" type:"string"` +} + +// String returns the string representation +func (s HostReservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostReservation) GoString() string { + return s.String() +} + +// SetCount sets the Count field's value. +func (s *HostReservation) SetCount(v int64) *HostReservation { + s.Count = &v + return s +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *HostReservation) SetCurrencyCode(v string) *HostReservation { + s.CurrencyCode = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *HostReservation) SetDuration(v int64) *HostReservation { + s.Duration = &v + return s +} + +// SetEnd sets the End field's value. +func (s *HostReservation) SetEnd(v time.Time) *HostReservation { + s.End = &v + return s +} + +// SetHostIdSet sets the HostIdSet field's value. +func (s *HostReservation) SetHostIdSet(v []*string) *HostReservation { + s.HostIdSet = v + return s +} + +// SetHostReservationId sets the HostReservationId field's value. +func (s *HostReservation) SetHostReservationId(v string) *HostReservation { + s.HostReservationId = &v + return s +} + +// SetHourlyPrice sets the HourlyPrice field's value. +func (s *HostReservation) SetHourlyPrice(v string) *HostReservation { + s.HourlyPrice = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *HostReservation) SetInstanceFamily(v string) *HostReservation { + s.InstanceFamily = &v + return s +} + +// SetOfferingId sets the OfferingId field's value. +func (s *HostReservation) SetOfferingId(v string) *HostReservation { + s.OfferingId = &v + return s +} + +// SetPaymentOption sets the PaymentOption field's value. +func (s *HostReservation) SetPaymentOption(v string) *HostReservation { + s.PaymentOption = &v + return s +} + +// SetStart sets the Start field's value. +func (s *HostReservation) SetStart(v time.Time) *HostReservation { + s.Start = &v + return s +} + +// SetState sets the State field's value. +func (s *HostReservation) SetState(v string) *HostReservation { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *HostReservation) SetTags(v []*Tag) *HostReservation { + s.Tags = v + return s +} + +// SetUpfrontPrice sets the UpfrontPrice field's value. +func (s *HostReservation) SetUpfrontPrice(v string) *HostReservation { + s.UpfrontPrice = &v + return s +} + +// The internet key exchange (IKE) version permitted for the VPN tunnel. +type IKEVersionsListValue struct { + _ struct{} `type:"structure"` + + // The IKE version. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s IKEVersionsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IKEVersionsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *IKEVersionsListValue) SetValue(v string) *IKEVersionsListValue { + s.Value = &v + return s +} + +// The IKE version that is permitted for the VPN tunnel. +type IKEVersionsRequestListValue struct { + _ struct{} `type:"structure"` + + // The IKE version. + Value *string `type:"string"` +} + +// String returns the string representation +func (s IKEVersionsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IKEVersionsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *IKEVersionsRequestListValue) SetValue(v string) *IKEVersionsRequestListValue { + s.Value = &v + return s +} + +// Describes an IAM instance profile. +type IamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The ID of the instance profile. + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s IamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfile) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *IamInstanceProfile) SetArn(v string) *IamInstanceProfile { + s.Arn = &v + return s +} + +// SetId sets the Id field's value. +func (s *IamInstanceProfile) SetId(v string) *IamInstanceProfile { + s.Id = &v + return s +} + +// Describes an association between an IAM instance profile and an instance. +type IamInstanceProfileAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the association. + AssociationId *string `locationName:"associationId" type:"string"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfile `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The state of the association. + State *string `locationName:"state" type:"string" enum:"IamInstanceProfileAssociationState"` + + // The time the IAM instance profile was associated with the instance. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s IamInstanceProfileAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfileAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *IamInstanceProfileAssociation) SetAssociationId(v string) *IamInstanceProfileAssociation { + s.AssociationId = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *IamInstanceProfileAssociation) SetIamInstanceProfile(v *IamInstanceProfile) *IamInstanceProfileAssociation { + s.IamInstanceProfile = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *IamInstanceProfileAssociation) SetInstanceId(v string) *IamInstanceProfileAssociation { + s.InstanceId = &v + return s +} + +// SetState sets the State field's value. +func (s *IamInstanceProfileAssociation) SetState(v string) *IamInstanceProfileAssociation { + s.State = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *IamInstanceProfileAssociation) SetTimestamp(v time.Time) *IamInstanceProfileAssociation { + s.Timestamp = &v + return s +} + +// Describes an IAM instance profile. +type IamInstanceProfileSpecification struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The name of the instance profile. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s IamInstanceProfileSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfileSpecification) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *IamInstanceProfileSpecification) SetArn(v string) *IamInstanceProfileSpecification { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *IamInstanceProfileSpecification) SetName(v string) *IamInstanceProfileSpecification { + s.Name = &v + return s +} + +// Describes the ICMP type and code. +type IcmpTypeCode struct { + _ struct{} `type:"structure"` + + // The ICMP code. A value of -1 means all codes for the specified ICMP type. + Code *int64 `locationName:"code" type:"integer"` + + // The ICMP type. A value of -1 means all types. + Type *int64 `locationName:"type" type:"integer"` +} + +// String returns the string representation +func (s IcmpTypeCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IcmpTypeCode) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *IcmpTypeCode) SetCode(v int64) *IcmpTypeCode { + s.Code = &v + return s +} + +// SetType sets the Type field's value. +func (s *IcmpTypeCode) SetType(v int64) *IcmpTypeCode { + s.Type = &v + return s +} + +// Describes the ID format for a resource. +type IdFormat struct { + _ struct{} `type:"structure"` + + // The date in UTC at which you are permanently switched over to using longer + // IDs. If a deadline is not yet available for this resource type, this field + // is not returned. + Deadline *time.Time `locationName:"deadline" type:"timestamp"` + + // The type of resource. + Resource *string `locationName:"resource" type:"string"` + + // Indicates whether longer IDs (17-character IDs) are enabled for the resource. + UseLongIds *bool `locationName:"useLongIds" type:"boolean"` +} + +// String returns the string representation +func (s IdFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdFormat) GoString() string { + return s.String() +} + +// SetDeadline sets the Deadline field's value. +func (s *IdFormat) SetDeadline(v time.Time) *IdFormat { + s.Deadline = &v + return s +} + +// SetResource sets the Resource field's value. +func (s *IdFormat) SetResource(v string) *IdFormat { + s.Resource = &v + return s +} + +// SetUseLongIds sets the UseLongIds field's value. +func (s *IdFormat) SetUseLongIds(v bool) *IdFormat { + s.UseLongIds = &v + return s +} + +// Describes an image. +type Image struct { + _ struct{} `type:"structure"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The date and time the image was created. + CreationDate *string `locationName:"creationDate" type:"string"` + + // The description of the AMI that was provided during image creation. + Description *string `locationName:"description" type:"string"` + + // Specifies whether enhanced networking with ENA is enabled. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + + // The hypervisor type of the image. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The location of the AMI. + ImageLocation *string `locationName:"imageLocation" type:"string"` + + // The AWS account alias (for example, amazon, self) or the AWS account ID of + // the AMI owner. + ImageOwnerAlias *string `locationName:"imageOwnerAlias" type:"string"` + + // The type of image. + ImageType *string `locationName:"imageType" type:"string" enum:"ImageTypeValues"` + + // The kernel associated with the image, if any. Only applicable for machine + // images. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the AMI that was provided during image creation. + Name *string `locationName:"name" type:"string"` + + // The AWS account ID of the image owner. + OwnerId *string `locationName:"imageOwnerId" type:"string"` + + // This value is set to windows for Windows AMIs; otherwise, it is blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // The platform details associated with the billing code of the AMI. For more + // information, see Obtaining Billing Information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlatformDetails *string `locationName:"platformDetails" type:"string"` + + // Any product codes associated with the AMI. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // Indicates whether the image has public launch permissions. The value is true + // if this image has public launch permissions or false if it has only implicit + // and explicit launch permissions. + Public *bool `locationName:"isPublic" type:"boolean"` + + // The RAM disk associated with the image, if any. Only applicable for machine + // images. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The device name of the root device volume (for example, /dev/sda1). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The type of root device used by the AMI. The AMI can use an EBS volume or + // an instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // Specifies whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the AMI. If the state is available, the image is successfully + // registered and can be used to launch an instance. + State *string `locationName:"imageState" type:"string" enum:"ImageState"` + + // The reason for the state change. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // Any tags assigned to the image. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The operation of the Amazon EC2 instance and the billing code that is associated + // with the AMI. usageOperation corresponds to the lineitem/Operation (https://docs.aws.amazon.com/cur/latest/userguide/Lineitem-columns.html#Lineitem-details-O-Operation) + // column on your AWS Cost and Usage Report and in the AWS Price List API (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/price-changes.html). + // For the list of UsageOperation codes, see Platform Details and Usage Operation + // Billing Codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html#billing-info) + // in the Amazon Elastic Compute Cloud User Guide. + UsageOperation *string `locationName:"usageOperation" type:"string"` + + // The type of virtualization of the AMI. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` +} + +// String returns the string representation +func (s Image) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Image) GoString() string { + return s.String() +} + +// SetArchitecture sets the Architecture field's value. +func (s *Image) SetArchitecture(v string) *Image { + s.Architecture = &v + return s +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *Image) SetBlockDeviceMappings(v []*BlockDeviceMapping) *Image { + s.BlockDeviceMappings = v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Image) SetCreationDate(v string) *Image { + s.CreationDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Image) SetDescription(v string) *Image { + s.Description = &v + return s +} + +// SetEnaSupport sets the EnaSupport field's value. +func (s *Image) SetEnaSupport(v bool) *Image { + s.EnaSupport = &v + return s +} + +// SetHypervisor sets the Hypervisor field's value. +func (s *Image) SetHypervisor(v string) *Image { + s.Hypervisor = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *Image) SetImageId(v string) *Image { + s.ImageId = &v + return s +} + +// SetImageLocation sets the ImageLocation field's value. +func (s *Image) SetImageLocation(v string) *Image { + s.ImageLocation = &v + return s +} + +// SetImageOwnerAlias sets the ImageOwnerAlias field's value. +func (s *Image) SetImageOwnerAlias(v string) *Image { + s.ImageOwnerAlias = &v + return s +} + +// SetImageType sets the ImageType field's value. +func (s *Image) SetImageType(v string) *Image { + s.ImageType = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *Image) SetKernelId(v string) *Image { + s.KernelId = &v + return s +} + +// SetName sets the Name field's value. +func (s *Image) SetName(v string) *Image { + s.Name = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *Image) SetOwnerId(v string) *Image { + s.OwnerId = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *Image) SetPlatform(v string) *Image { + s.Platform = &v + return s +} + +// SetPlatformDetails sets the PlatformDetails field's value. +func (s *Image) SetPlatformDetails(v string) *Image { + s.PlatformDetails = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *Image) SetProductCodes(v []*ProductCode) *Image { + s.ProductCodes = v + return s +} + +// SetPublic sets the Public field's value. +func (s *Image) SetPublic(v bool) *Image { + s.Public = &v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *Image) SetRamdiskId(v string) *Image { + s.RamdiskId = &v + return s +} + +// SetRootDeviceName sets the RootDeviceName field's value. +func (s *Image) SetRootDeviceName(v string) *Image { + s.RootDeviceName = &v + return s +} + +// SetRootDeviceType sets the RootDeviceType field's value. +func (s *Image) SetRootDeviceType(v string) *Image { + s.RootDeviceType = &v + return s +} + +// SetSriovNetSupport sets the SriovNetSupport field's value. +func (s *Image) SetSriovNetSupport(v string) *Image { + s.SriovNetSupport = &v + return s +} + +// SetState sets the State field's value. +func (s *Image) SetState(v string) *Image { + s.State = &v + return s +} + +// SetStateReason sets the StateReason field's value. +func (s *Image) SetStateReason(v *StateReason) *Image { + s.StateReason = v + return s +} + +// SetTags sets the Tags field's value. +func (s *Image) SetTags(v []*Tag) *Image { + s.Tags = v + return s +} + +// SetUsageOperation sets the UsageOperation field's value. +func (s *Image) SetUsageOperation(v string) *Image { + s.UsageOperation = &v + return s +} + +// SetVirtualizationType sets the VirtualizationType field's value. +func (s *Image) SetVirtualizationType(v string) *Image { + s.VirtualizationType = &v + return s +} + +// Describes the disk container object for an import image task. +type ImageDiskContainer struct { + _ struct{} `type:"structure"` + + // The description of the disk image. + Description *string `type:"string"` + + // The block device mapping for the disk. + DeviceName *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: OVA | VHD | VHDX |VMDK + Format *string `type:"string"` + + // The ID of the EBS snapshot to be used for importing the snapshot. + SnapshotId *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. The URL can either + // be a https URL (https://..) or an Amazon S3 URL (s3://..) + Url *string `type:"string"` + + // The S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` +} + +// String returns the string representation +func (s ImageDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDiskContainer) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ImageDiskContainer) SetDescription(v string) *ImageDiskContainer { + s.Description = &v + return s +} + +// SetDeviceName sets the DeviceName field's value. +func (s *ImageDiskContainer) SetDeviceName(v string) *ImageDiskContainer { + s.DeviceName = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *ImageDiskContainer) SetFormat(v string) *ImageDiskContainer { + s.Format = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *ImageDiskContainer) SetSnapshotId(v string) *ImageDiskContainer { + s.SnapshotId = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *ImageDiskContainer) SetUrl(v string) *ImageDiskContainer { + s.Url = &v + return s +} + +// SetUserBucket sets the UserBucket field's value. +func (s *ImageDiskContainer) SetUserBucket(v *UserBucket) *ImageDiskContainer { + s.UserBucket = v + return s +} + +type ImportClientVpnClientCertificateRevocationListInput struct { + _ struct{} `type:"structure"` + + // The client certificate revocation list file. For more information, see Generate + // a Client Certificate Revocation List (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/cvpn-working-certificates.html#cvpn-working-certificates-generate) + // in the AWS Client VPN Administrator Guide. + // + // CertificateRevocationList is a required field + CertificateRevocationList *string `type:"string" required:"true"` + + // The ID of the Client VPN endpoint to which the client certificate revocation + // list applies. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s ImportClientVpnClientCertificateRevocationListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportClientVpnClientCertificateRevocationListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportClientVpnClientCertificateRevocationListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportClientVpnClientCertificateRevocationListInput"} + if s.CertificateRevocationList == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateRevocationList")) + } + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateRevocationList sets the CertificateRevocationList field's value. +func (s *ImportClientVpnClientCertificateRevocationListInput) SetCertificateRevocationList(v string) *ImportClientVpnClientCertificateRevocationListInput { + s.CertificateRevocationList = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *ImportClientVpnClientCertificateRevocationListInput) SetClientVpnEndpointId(v string) *ImportClientVpnClientCertificateRevocationListInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ImportClientVpnClientCertificateRevocationListInput) SetDryRun(v bool) *ImportClientVpnClientCertificateRevocationListInput { + s.DryRun = &v + return s +} + +type ImportClientVpnClientCertificateRevocationListOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ImportClientVpnClientCertificateRevocationListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportClientVpnClientCertificateRevocationListOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ImportClientVpnClientCertificateRevocationListOutput) SetReturn(v bool) *ImportClientVpnClientCertificateRevocationListOutput { + s.Return = &v + return s +} + +type ImportImageInput struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 | arm64 + Architecture *string `type:"string"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // The token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // A description string for the import image task. + Description *string `type:"string"` + + // Information about the disk containers. + DiskContainers []*ImageDiskContainer `locationName:"DiskContainer" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Specifies whether the destination AMI of the imported image should be encrypted. + // The default CMK for EBS is used unless you specify a non-default AWS Key + // Management Service (AWS KMS) CMK using KmsKeyId. For more information, see + // Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `type:"boolean"` + + // The target hypervisor platform. + // + // Valid values: xen + Hypervisor *string `type:"string"` + + // An identifier for the symmetric AWS Key Management Service (AWS KMS) customer + // master key (CMK) to use when creating the encrypted AMI. This parameter is + // only required if you want to use a non-default CMK; if this parameter is + // not specified, the default CMK for EBS is used. If a KmsKeyId is specified, + // the Encrypted flag must also be set. + // + // The CMK identifier may be provided in any of the following formats: + // + // * Key ID + // + // * Key alias. The alias ARN contains the arn:aws:kms namespace, followed + // by the Region of the CMK, the AWS account ID of the CMK owner, the alias + // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed + // by the Region of the CMK, the AWS account ID of the CMK owner, the key + // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // + // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, + // followed by the Region of the CMK, the AWS account ID of the CMK owner, + // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // AWS parses KmsKeyId asynchronously, meaning that the action you call may + // appear to complete even though you provided an invalid identifier. This action + // will eventually report failure. + // + // The specified CMK must exist in the Region that the AMI is being copied to. + // + // Amazon EBS does not support asymmetric CMKs. + KmsKeyId *string `type:"string"` + + // The ARNs of the license configurations. + LicenseSpecifications []*ImportImageLicenseConfigurationRequest `locationNameList:"item" type:"list"` + + // The license type to be used for the Amazon Machine Image (AMI) after importing. + // + // By default, we detect the source-system operating system (OS) and apply the + // appropriate license. Specify AWS to replace the source-system license with + // an AWS license, if appropriate. Specify BYOL to retain the source-system + // license, if appropriate. + // + // To use BYOL, you must have existing licenses with rights to use these licenses + // in a third party cloud, such as AWS. For more information, see Prerequisites + // (https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image) + // in the VM Import/Export User Guide. + LicenseType *string `type:"string"` + + // The operating system of the virtual machine. + // + // Valid values: Windows | Linux + Platform *string `type:"string"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` + + // The tags to apply to the image being imported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageInput) GoString() string { + return s.String() +} + +// SetArchitecture sets the Architecture field's value. +func (s *ImportImageInput) SetArchitecture(v string) *ImportImageInput { + s.Architecture = &v + return s +} + +// SetClientData sets the ClientData field's value. +func (s *ImportImageInput) SetClientData(v *ClientData) *ImportImageInput { + s.ClientData = v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *ImportImageInput) SetClientToken(v string) *ImportImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImportImageInput) SetDescription(v string) *ImportImageInput { + s.Description = &v + return s +} + +// SetDiskContainers sets the DiskContainers field's value. +func (s *ImportImageInput) SetDiskContainers(v []*ImageDiskContainer) *ImportImageInput { + s.DiskContainers = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ImportImageInput) SetDryRun(v bool) *ImportImageInput { + s.DryRun = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *ImportImageInput) SetEncrypted(v bool) *ImportImageInput { + s.Encrypted = &v + return s +} + +// SetHypervisor sets the Hypervisor field's value. +func (s *ImportImageInput) SetHypervisor(v string) *ImportImageInput { + s.Hypervisor = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ImportImageInput) SetKmsKeyId(v string) *ImportImageInput { + s.KmsKeyId = &v + return s +} + +// SetLicenseSpecifications sets the LicenseSpecifications field's value. +func (s *ImportImageInput) SetLicenseSpecifications(v []*ImportImageLicenseConfigurationRequest) *ImportImageInput { + s.LicenseSpecifications = v + return s +} + +// SetLicenseType sets the LicenseType field's value. +func (s *ImportImageInput) SetLicenseType(v string) *ImportImageInput { + s.LicenseType = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ImportImageInput) SetPlatform(v string) *ImportImageInput { + s.Platform = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ImportImageInput) SetRoleName(v string) *ImportImageInput { + s.RoleName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportImageInput) SetTagSpecifications(v []*TagSpecification) *ImportImageInput { + s.TagSpecifications = v + return s +} + +// The request information of license configurations. +type ImportImageLicenseConfigurationRequest struct { + _ struct{} `type:"structure"` + + // The ARN of a license configuration. + LicenseConfigurationArn *string `type:"string"` +} + +// String returns the string representation +func (s ImportImageLicenseConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageLicenseConfigurationRequest) GoString() string { + return s.String() +} + +// SetLicenseConfigurationArn sets the LicenseConfigurationArn field's value. +func (s *ImportImageLicenseConfigurationRequest) SetLicenseConfigurationArn(v string) *ImportImageLicenseConfigurationRequest { + s.LicenseConfigurationArn = &v + return s +} + +// The response information for license configurations. +type ImportImageLicenseConfigurationResponse struct { + _ struct{} `type:"structure"` + + // The ARN of a license configuration. + LicenseConfigurationArn *string `locationName:"licenseConfigurationArn" type:"string"` +} + +// String returns the string representation +func (s ImportImageLicenseConfigurationResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageLicenseConfigurationResponse) GoString() string { + return s.String() +} + +// SetLicenseConfigurationArn sets the LicenseConfigurationArn field's value. +func (s *ImportImageLicenseConfigurationResponse) SetLicenseConfigurationArn(v string) *ImportImageLicenseConfigurationResponse { + s.LicenseConfigurationArn = &v + return s +} + +type ImportImageOutput struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the AMI is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The target hypervisor of the import task. + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) created by the import task. + ImageId *string `locationName:"imageId" type:"string"` + + // The task ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The identifier for the symmetric AWS Key Management Service (AWS KMS) customer + // master key (CMK) that was used to create the encrypted AMI. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The ARNs of the license configurations. + LicenseSpecifications []*ImportImageLicenseConfigurationResponse `locationName:"licenseSpecifications" locationNameList:"item" type:"list"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The operating system of the virtual machine. + Platform *string `locationName:"platform" type:"string"` + + // The progress of the task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status of the task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message of the import task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being imported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageOutput) GoString() string { + return s.String() +} + +// SetArchitecture sets the Architecture field's value. +func (s *ImportImageOutput) SetArchitecture(v string) *ImportImageOutput { + s.Architecture = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImportImageOutput) SetDescription(v string) *ImportImageOutput { + s.Description = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *ImportImageOutput) SetEncrypted(v bool) *ImportImageOutput { + s.Encrypted = &v + return s +} + +// SetHypervisor sets the Hypervisor field's value. +func (s *ImportImageOutput) SetHypervisor(v string) *ImportImageOutput { + s.Hypervisor = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ImportImageOutput) SetImageId(v string) *ImportImageOutput { + s.ImageId = &v + return s +} + +// SetImportTaskId sets the ImportTaskId field's value. +func (s *ImportImageOutput) SetImportTaskId(v string) *ImportImageOutput { + s.ImportTaskId = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ImportImageOutput) SetKmsKeyId(v string) *ImportImageOutput { + s.KmsKeyId = &v + return s +} + +// SetLicenseSpecifications sets the LicenseSpecifications field's value. +func (s *ImportImageOutput) SetLicenseSpecifications(v []*ImportImageLicenseConfigurationResponse) *ImportImageOutput { + s.LicenseSpecifications = v + return s +} + +// SetLicenseType sets the LicenseType field's value. +func (s *ImportImageOutput) SetLicenseType(v string) *ImportImageOutput { + s.LicenseType = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ImportImageOutput) SetPlatform(v string) *ImportImageOutput { + s.Platform = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ImportImageOutput) SetProgress(v string) *ImportImageOutput { + s.Progress = &v + return s +} + +// SetSnapshotDetails sets the SnapshotDetails field's value. +func (s *ImportImageOutput) SetSnapshotDetails(v []*SnapshotDetail) *ImportImageOutput { + s.SnapshotDetails = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImportImageOutput) SetStatus(v string) *ImportImageOutput { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ImportImageOutput) SetStatusMessage(v string) *ImportImageOutput { + s.StatusMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ImportImageOutput) SetTags(v []*Tag) *ImportImageOutput { + s.Tags = v + return s +} + +// Describes an import image task. +type ImportImageTask struct { + _ struct{} `type:"structure"` + + // The architecture of the virtual machine. + // + // Valid values: i386 | x86_64 | arm64 + Architecture *string `locationName:"architecture" type:"string"` + + // A description of the import task. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the image is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The target hypervisor for the import task. + // + // Valid values: xen + Hypervisor *string `locationName:"hypervisor" type:"string"` + + // The ID of the Amazon Machine Image (AMI) of the imported virtual machine. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the import image task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // The identifier for the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to create the encrypted image. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The ARNs of the license configurations that are associated with the import + // image task. + LicenseSpecifications []*ImportImageLicenseConfigurationResponse `locationName:"licenseSpecifications" locationNameList:"item" type:"list"` + + // The license type of the virtual machine. + LicenseType *string `locationName:"licenseType" type:"string"` + + // The description string for the import image task. + Platform *string `locationName:"platform" type:"string"` + + // The percentage of progress of the import image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the snapshots. + SnapshotDetails []*SnapshotDetail `locationName:"snapshotDetailSet" locationNameList:"item" type:"list"` + + // A brief status for the import image task. + Status *string `locationName:"status" type:"string"` + + // A descriptive status message for the import image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The tags for the import image task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportImageTask) GoString() string { + return s.String() +} + +// SetArchitecture sets the Architecture field's value. +func (s *ImportImageTask) SetArchitecture(v string) *ImportImageTask { + s.Architecture = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImportImageTask) SetDescription(v string) *ImportImageTask { + s.Description = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *ImportImageTask) SetEncrypted(v bool) *ImportImageTask { + s.Encrypted = &v + return s +} + +// SetHypervisor sets the Hypervisor field's value. +func (s *ImportImageTask) SetHypervisor(v string) *ImportImageTask { + s.Hypervisor = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ImportImageTask) SetImageId(v string) *ImportImageTask { + s.ImageId = &v + return s +} + +// SetImportTaskId sets the ImportTaskId field's value. +func (s *ImportImageTask) SetImportTaskId(v string) *ImportImageTask { + s.ImportTaskId = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ImportImageTask) SetKmsKeyId(v string) *ImportImageTask { + s.KmsKeyId = &v + return s +} + +// SetLicenseSpecifications sets the LicenseSpecifications field's value. +func (s *ImportImageTask) SetLicenseSpecifications(v []*ImportImageLicenseConfigurationResponse) *ImportImageTask { + s.LicenseSpecifications = v + return s +} + +// SetLicenseType sets the LicenseType field's value. +func (s *ImportImageTask) SetLicenseType(v string) *ImportImageTask { + s.LicenseType = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ImportImageTask) SetPlatform(v string) *ImportImageTask { + s.Platform = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ImportImageTask) SetProgress(v string) *ImportImageTask { + s.Progress = &v + return s +} + +// SetSnapshotDetails sets the SnapshotDetails field's value. +func (s *ImportImageTask) SetSnapshotDetails(v []*SnapshotDetail) *ImportImageTask { + s.SnapshotDetails = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImportImageTask) SetStatus(v string) *ImportImageTask { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ImportImageTask) SetStatusMessage(v string) *ImportImageTask { + s.StatusMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ImportImageTask) SetTags(v []*Tag) *ImportImageTask { + s.Tags = v + return s +} + +type ImportInstanceInput struct { + _ struct{} `type:"structure"` + + // A description for the instance being imported. + Description *string `locationName:"description" type:"string"` + + // The disk image. + DiskImages []*DiskImage `locationName:"diskImage" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The launch specification. + LaunchSpecification *ImportInstanceLaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The instance operating system. + // + // Platform is a required field + Platform *string `locationName:"platform" type:"string" required:"true" enum:"PlatformValues"` +} + +// String returns the string representation +func (s ImportInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportInstanceInput"} + if s.Platform == nil { + invalidParams.Add(request.NewErrParamRequired("Platform")) + } + if s.DiskImages != nil { + for i, v := range s.DiskImages { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DiskImages", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ImportInstanceInput) SetDescription(v string) *ImportInstanceInput { + s.Description = &v + return s +} + +// SetDiskImages sets the DiskImages field's value. +func (s *ImportInstanceInput) SetDiskImages(v []*DiskImage) *ImportInstanceInput { + s.DiskImages = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ImportInstanceInput) SetDryRun(v bool) *ImportInstanceInput { + s.DryRun = &v + return s +} + +// SetLaunchSpecification sets the LaunchSpecification field's value. +func (s *ImportInstanceInput) SetLaunchSpecification(v *ImportInstanceLaunchSpecification) *ImportInstanceInput { + s.LaunchSpecification = v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ImportInstanceInput) SetPlatform(v string) *ImportInstanceInput { + s.Platform = &v + return s +} + +// Describes the launch specification for VM import. +type ImportInstanceLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The architecture of the instance. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // The security group IDs. + GroupIds []*string `locationName:"GroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The security group names. + GroupNames []*string `locationName:"GroupName" locationNameList:"SecurityGroup" type:"list"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The instance type. For more information about the instance types that you + // can import, see Instance Types (https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-instance-types) + // in the VM Import/Export User Guide. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether monitoring is enabled. + Monitoring *bool `locationName:"monitoring" type:"boolean"` + + // The placement information for the instance. + Placement *Placement `locationName:"placement" type:"structure"` + + // [EC2-VPC] An available IP address from the IP address range of the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // [EC2-VPC] The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded user data to make available to the instance. + UserData *UserData `locationName:"userData" type:"structure" sensitive:"true"` +} + +// String returns the string representation +func (s ImportInstanceLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceLaunchSpecification) GoString() string { + return s.String() +} + +// SetAdditionalInfo sets the AdditionalInfo field's value. +func (s *ImportInstanceLaunchSpecification) SetAdditionalInfo(v string) *ImportInstanceLaunchSpecification { + s.AdditionalInfo = &v + return s +} + +// SetArchitecture sets the Architecture field's value. +func (s *ImportInstanceLaunchSpecification) SetArchitecture(v string) *ImportInstanceLaunchSpecification { + s.Architecture = &v + return s +} + +// SetGroupIds sets the GroupIds field's value. +func (s *ImportInstanceLaunchSpecification) SetGroupIds(v []*string) *ImportInstanceLaunchSpecification { + s.GroupIds = v + return s +} + +// SetGroupNames sets the GroupNames field's value. +func (s *ImportInstanceLaunchSpecification) SetGroupNames(v []*string) *ImportInstanceLaunchSpecification { + s.GroupNames = v + return s +} + +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value. +func (s *ImportInstanceLaunchSpecification) SetInstanceInitiatedShutdownBehavior(v string) *ImportInstanceLaunchSpecification { + s.InstanceInitiatedShutdownBehavior = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ImportInstanceLaunchSpecification) SetInstanceType(v string) *ImportInstanceLaunchSpecification { + s.InstanceType = &v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *ImportInstanceLaunchSpecification) SetMonitoring(v bool) *ImportInstanceLaunchSpecification { + s.Monitoring = &v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *ImportInstanceLaunchSpecification) SetPlacement(v *Placement) *ImportInstanceLaunchSpecification { + s.Placement = v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *ImportInstanceLaunchSpecification) SetPrivateIpAddress(v string) *ImportInstanceLaunchSpecification { + s.PrivateIpAddress = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *ImportInstanceLaunchSpecification) SetSubnetId(v string) *ImportInstanceLaunchSpecification { + s.SubnetId = &v + return s +} + +// SetUserData sets the UserData field's value. +func (s *ImportInstanceLaunchSpecification) SetUserData(v *UserData) *ImportInstanceLaunchSpecification { + s.UserData = v + return s +} + +type ImportInstanceOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceOutput) GoString() string { + return s.String() +} + +// SetConversionTask sets the ConversionTask field's value. +func (s *ImportInstanceOutput) SetConversionTask(v *ConversionTask) *ImportInstanceOutput { + s.ConversionTask = v + return s +} + +// Describes an import instance task. +type ImportInstanceTaskDetails struct { + _ struct{} `type:"structure"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The instance operating system. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // The volumes. + Volumes []*ImportInstanceVolumeDetailItem `locationName:"volumes" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportInstanceTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceTaskDetails) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ImportInstanceTaskDetails) SetDescription(v string) *ImportInstanceTaskDetails { + s.Description = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ImportInstanceTaskDetails) SetInstanceId(v string) *ImportInstanceTaskDetails { + s.InstanceId = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ImportInstanceTaskDetails) SetPlatform(v string) *ImportInstanceTaskDetails { + s.Platform = &v + return s +} + +// SetVolumes sets the Volumes field's value. +func (s *ImportInstanceTaskDetails) SetVolumes(v []*ImportInstanceVolumeDetailItem) *ImportInstanceTaskDetails { + s.Volumes = v + return s +} + +// Describes an import volume task. +type ImportInstanceVolumeDetailItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the resulting instance will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long"` + + // A description of the task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure"` + + // The status of the import of this particular disk image. + Status *string `locationName:"status" type:"string"` + + // The status information or errors related to the disk image. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure"` +} + +// String returns the string representation +func (s ImportInstanceVolumeDetailItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstanceVolumeDetailItem) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ImportInstanceVolumeDetailItem) SetAvailabilityZone(v string) *ImportInstanceVolumeDetailItem { + s.AvailabilityZone = &v + return s +} + +// SetBytesConverted sets the BytesConverted field's value. +func (s *ImportInstanceVolumeDetailItem) SetBytesConverted(v int64) *ImportInstanceVolumeDetailItem { + s.BytesConverted = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImportInstanceVolumeDetailItem) SetDescription(v string) *ImportInstanceVolumeDetailItem { + s.Description = &v + return s +} + +// SetImage sets the Image field's value. +func (s *ImportInstanceVolumeDetailItem) SetImage(v *DiskImageDescription) *ImportInstanceVolumeDetailItem { + s.Image = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImportInstanceVolumeDetailItem) SetStatus(v string) *ImportInstanceVolumeDetailItem { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ImportInstanceVolumeDetailItem) SetStatusMessage(v string) *ImportInstanceVolumeDetailItem { + s.StatusMessage = &v + return s +} + +// SetVolume sets the Volume field's value. +func (s *ImportInstanceVolumeDetailItem) SetVolume(v *DiskImageVolumeDescription) *ImportInstanceVolumeDetailItem { + s.Volume = v + return s +} + +type ImportKeyPairInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // A unique name for the key pair. + // + // KeyName is a required field + KeyName *string `locationName:"keyName" type:"string" required:"true"` + + // The public key. For API calls, the text must be base64-encoded. For command + // line tools, base64 encoding is performed for you. + // + // PublicKeyMaterial is automatically base64 encoded/decoded by the SDK. + // + // PublicKeyMaterial is a required field + PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"` + + // The tags to apply to the imported key pair. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportKeyPairInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportKeyPairInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportKeyPairInput"} + if s.KeyName == nil { + invalidParams.Add(request.NewErrParamRequired("KeyName")) + } + if s.PublicKeyMaterial == nil { + invalidParams.Add(request.NewErrParamRequired("PublicKeyMaterial")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ImportKeyPairInput) SetDryRun(v bool) *ImportKeyPairInput { + s.DryRun = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *ImportKeyPairInput) SetKeyName(v string) *ImportKeyPairInput { + s.KeyName = &v + return s +} + +// SetPublicKeyMaterial sets the PublicKeyMaterial field's value. +func (s *ImportKeyPairInput) SetPublicKeyMaterial(v []byte) *ImportKeyPairInput { + s.PublicKeyMaterial = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportKeyPairInput) SetTagSpecifications(v []*TagSpecification) *ImportKeyPairInput { + s.TagSpecifications = v + return s +} + +type ImportKeyPairOutput struct { + _ struct{} `type:"structure"` + + // The MD5 public key fingerprint as specified in section 4 of RFC 4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The key pair name you provided. + KeyName *string `locationName:"keyName" type:"string"` + + // The ID of the resulting key pair. + KeyPairId *string `locationName:"keyPairId" type:"string"` + + // The tags applied to the imported key pair. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportKeyPairOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyPairOutput) GoString() string { + return s.String() +} + +// SetKeyFingerprint sets the KeyFingerprint field's value. +func (s *ImportKeyPairOutput) SetKeyFingerprint(v string) *ImportKeyPairOutput { + s.KeyFingerprint = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *ImportKeyPairOutput) SetKeyName(v string) *ImportKeyPairOutput { + s.KeyName = &v + return s +} + +// SetKeyPairId sets the KeyPairId field's value. +func (s *ImportKeyPairOutput) SetKeyPairId(v string) *ImportKeyPairOutput { + s.KeyPairId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ImportKeyPairOutput) SetTags(v []*Tag) *ImportKeyPairOutput { + s.Tags = v + return s +} + +type ImportSnapshotInput struct { + _ struct{} `type:"structure"` + + // The client-specific data. + ClientData *ClientData `type:"structure"` + + // Token to enable idempotency for VM import requests. + ClientToken *string `type:"string"` + + // The description string for the import snapshot task. + Description *string `type:"string"` + + // Information about the disk container. + DiskContainer *SnapshotDiskContainer `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Specifies whether the destination snapshot of the imported image should be + // encrypted. The default CMK for EBS is used unless you specify a non-default + // AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, + // see Amazon EBS Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) + // in the Amazon Elastic Compute Cloud User Guide. + Encrypted *bool `type:"boolean"` + + // An identifier for the symmetric AWS Key Management Service (AWS KMS) customer + // master key (CMK) to use when creating the encrypted snapshot. This parameter + // is only required if you want to use a non-default CMK; if this parameter + // is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, + // the Encrypted flag must also be set. + // + // The CMK identifier may be provided in any of the following formats: + // + // * Key ID + // + // * Key alias. The alias ARN contains the arn:aws:kms namespace, followed + // by the Region of the CMK, the AWS account ID of the CMK owner, the alias + // namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed + // by the Region of the CMK, the AWS account ID of the CMK owner, the key + // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // + // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, + // followed by the Region of the CMK, the AWS account ID of the CMK owner, + // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // AWS parses KmsKeyId asynchronously, meaning that the action you call may + // appear to complete even though you provided an invalid identifier. This action + // will eventually report failure. + // + // The specified CMK must exist in the Region that the snapshot is being copied + // to. + // + // Amazon EBS does not support asymmetric CMKs. + KmsKeyId *string `type:"string"` + + // The name of the role to use when not using the default role, 'vmimport'. + RoleName *string `type:"string"` + + // The tags to apply to the snapshot being imported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportSnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotInput) GoString() string { + return s.String() +} + +// SetClientData sets the ClientData field's value. +func (s *ImportSnapshotInput) SetClientData(v *ClientData) *ImportSnapshotInput { + s.ClientData = v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *ImportSnapshotInput) SetClientToken(v string) *ImportSnapshotInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImportSnapshotInput) SetDescription(v string) *ImportSnapshotInput { + s.Description = &v + return s +} + +// SetDiskContainer sets the DiskContainer field's value. +func (s *ImportSnapshotInput) SetDiskContainer(v *SnapshotDiskContainer) *ImportSnapshotInput { + s.DiskContainer = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ImportSnapshotInput) SetDryRun(v bool) *ImportSnapshotInput { + s.DryRun = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *ImportSnapshotInput) SetEncrypted(v bool) *ImportSnapshotInput { + s.Encrypted = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ImportSnapshotInput) SetKmsKeyId(v string) *ImportSnapshotInput { + s.KmsKeyId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ImportSnapshotInput) SetRoleName(v string) *ImportSnapshotInput { + s.RoleName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportSnapshotInput) SetTagSpecifications(v []*TagSpecification) *ImportSnapshotInput { + s.TagSpecifications = v + return s +} + +type ImportSnapshotOutput struct { + _ struct{} `type:"structure"` + + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Information about the import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` + + // Any tags assigned to the snapshot being imported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ImportSnapshotOutput) SetDescription(v string) *ImportSnapshotOutput { + s.Description = &v + return s +} + +// SetImportTaskId sets the ImportTaskId field's value. +func (s *ImportSnapshotOutput) SetImportTaskId(v string) *ImportSnapshotOutput { + s.ImportTaskId = &v + return s +} + +// SetSnapshotTaskDetail sets the SnapshotTaskDetail field's value. +func (s *ImportSnapshotOutput) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *ImportSnapshotOutput { + s.SnapshotTaskDetail = v + return s +} + +// SetTags sets the Tags field's value. +func (s *ImportSnapshotOutput) SetTags(v []*Tag) *ImportSnapshotOutput { + s.Tags = v + return s +} + +// Describes an import snapshot task. +type ImportSnapshotTask struct { + _ struct{} `type:"structure"` + + // A description of the import snapshot task. + Description *string `locationName:"description" type:"string"` + + // The ID of the import snapshot task. + ImportTaskId *string `locationName:"importTaskId" type:"string"` + + // Describes an import snapshot task. + SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` + + // The tags for the import snapshot task. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ImportSnapshotTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportSnapshotTask) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ImportSnapshotTask) SetDescription(v string) *ImportSnapshotTask { + s.Description = &v + return s +} + +// SetImportTaskId sets the ImportTaskId field's value. +func (s *ImportSnapshotTask) SetImportTaskId(v string) *ImportSnapshotTask { + s.ImportTaskId = &v + return s +} + +// SetSnapshotTaskDetail sets the SnapshotTaskDetail field's value. +func (s *ImportSnapshotTask) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *ImportSnapshotTask { + s.SnapshotTaskDetail = v + return s +} + +// SetTags sets the Tags field's value. +func (s *ImportSnapshotTask) SetTags(v []*Tag) *ImportSnapshotTask { + s.Tags = v + return s +} + +type ImportVolumeInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the resulting EBS volume. + // + // AvailabilityZone is a required field + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // A description of the volume. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The disk image. + // + // Image is a required field + Image *DiskImageDetail `locationName:"image" type:"structure" required:"true"` + + // The volume size. + // + // Volume is a required field + Volume *VolumeDetail `locationName:"volume" type:"structure" required:"true"` +} + +// String returns the string representation +func (s ImportVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportVolumeInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + if s.Image == nil { + invalidParams.Add(request.NewErrParamRequired("Image")) + } + if s.Volume == nil { + invalidParams.Add(request.NewErrParamRequired("Volume")) + } + if s.Image != nil { + if err := s.Image.Validate(); err != nil { + invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) + } + } + if s.Volume != nil { + if err := s.Volume.Validate(); err != nil { + invalidParams.AddNested("Volume", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ImportVolumeInput) SetAvailabilityZone(v string) *ImportVolumeInput { + s.AvailabilityZone = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImportVolumeInput) SetDescription(v string) *ImportVolumeInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ImportVolumeInput) SetDryRun(v bool) *ImportVolumeInput { + s.DryRun = &v + return s +} + +// SetImage sets the Image field's value. +func (s *ImportVolumeInput) SetImage(v *DiskImageDetail) *ImportVolumeInput { + s.Image = v + return s +} + +// SetVolume sets the Volume field's value. +func (s *ImportVolumeInput) SetVolume(v *VolumeDetail) *ImportVolumeInput { + s.Volume = v + return s +} + +type ImportVolumeOutput struct { + _ struct{} `type:"structure"` + + // Information about the conversion task. + ConversionTask *ConversionTask `locationName:"conversionTask" type:"structure"` +} + +// String returns the string representation +func (s ImportVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeOutput) GoString() string { + return s.String() +} + +// SetConversionTask sets the ConversionTask field's value. +func (s *ImportVolumeOutput) SetConversionTask(v *ConversionTask) *ImportVolumeOutput { + s.ConversionTask = v + return s +} + +// Describes an import volume task. +type ImportVolumeTaskDetails struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the resulting volume will reside. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of bytes converted so far. + BytesConverted *int64 `locationName:"bytesConverted" type:"long"` + + // The description you provided when starting the import volume task. + Description *string `locationName:"description" type:"string"` + + // The image. + Image *DiskImageDescription `locationName:"image" type:"structure"` + + // The volume. + Volume *DiskImageVolumeDescription `locationName:"volume" type:"structure"` +} + +// String returns the string representation +func (s ImportVolumeTaskDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportVolumeTaskDetails) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ImportVolumeTaskDetails) SetAvailabilityZone(v string) *ImportVolumeTaskDetails { + s.AvailabilityZone = &v + return s +} + +// SetBytesConverted sets the BytesConverted field's value. +func (s *ImportVolumeTaskDetails) SetBytesConverted(v int64) *ImportVolumeTaskDetails { + s.BytesConverted = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImportVolumeTaskDetails) SetDescription(v string) *ImportVolumeTaskDetails { + s.Description = &v + return s +} + +// SetImage sets the Image field's value. +func (s *ImportVolumeTaskDetails) SetImage(v *DiskImageDescription) *ImportVolumeTaskDetails { + s.Image = v + return s +} + +// SetVolume sets the Volume field's value. +func (s *ImportVolumeTaskDetails) SetVolume(v *DiskImageVolumeDescription) *ImportVolumeTaskDetails { + s.Volume = v + return s +} + +// Describes the Inference accelerators for the instance type. +type InferenceAcceleratorInfo struct { + _ struct{} `type:"structure"` + + // Describes the Inference accelerators for the instance type. + Accelerators []*InferenceDeviceInfo `locationName:"accelerators" type:"list"` +} + +// String returns the string representation +func (s InferenceAcceleratorInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceAcceleratorInfo) GoString() string { + return s.String() +} + +// SetAccelerators sets the Accelerators field's value. +func (s *InferenceAcceleratorInfo) SetAccelerators(v []*InferenceDeviceInfo) *InferenceAcceleratorInfo { + s.Accelerators = v + return s +} + +// Describes the Inference accelerators for the instance type. +type InferenceDeviceInfo struct { + _ struct{} `type:"structure"` + + // The number of Inference accelerators for the instance type. + Count *int64 `locationName:"count" type:"integer"` + + // The manufacturer of the Inference accelerator. + Manufacturer *string `locationName:"manufacturer" type:"string"` + + // The name of the Inference accelerator. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s InferenceDeviceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InferenceDeviceInfo) GoString() string { + return s.String() +} + +// SetCount sets the Count field's value. +func (s *InferenceDeviceInfo) SetCount(v int64) *InferenceDeviceInfo { + s.Count = &v + return s +} + +// SetManufacturer sets the Manufacturer field's value. +func (s *InferenceDeviceInfo) SetManufacturer(v string) *InferenceDeviceInfo { + s.Manufacturer = &v + return s +} + +// SetName sets the Name field's value. +func (s *InferenceDeviceInfo) SetName(v string) *InferenceDeviceInfo { + s.Name = &v + return s +} + +// Describes an instance. +type Instance struct { + _ struct{} `type:"structure"` + + // The AMI launch index, which can be used to find this instance in the launch + // group. + AmiLaunchIndex *int64 `locationName:"amiLaunchIndex" type:"integer"` + + // The architecture of the image. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // Any block device mapping entries for the instance. + BlockDeviceMappings []*InstanceBlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // The ID of the Capacity Reservation. + CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + + // Information about the Capacity Reservation targeting option. + CapacityReservationSpecification *CapacityReservationSpecificationResponse `locationName:"capacityReservationSpecification" type:"structure"` + + // The idempotency token you provided when you launched the instance, if applicable. + ClientToken *string `locationName:"clientToken" type:"string"` + + // The CPU options for the instance. + CpuOptions *CpuOptions `locationName:"cpuOptions" type:"structure"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The Elastic GPU associated with the instance. + ElasticGpuAssociations []*ElasticGpuAssociation `locationName:"elasticGpuAssociationSet" locationNameList:"item" type:"list"` + + // The elastic inference accelerator associated with the instance. + ElasticInferenceAcceleratorAssociations []*ElasticInferenceAcceleratorAssociation `locationName:"elasticInferenceAcceleratorAssociationSet" locationNameList:"item" type:"list"` + + // Specifies whether enhanced networking with ENA is enabled. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + + // Indicates whether the instance is enabled for AWS Nitro Enclaves. + EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"` + + // Indicates whether the instance is enabled for hibernation. + HibernationOptions *HibernationOptions `locationName:"hibernationOptions" type:"structure"` + + // The hypervisor type of the instance. The value xen is used for both Xen and + // Nitro hypervisors. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` + + // The IAM instance profile associated with the instance, if applicable. + IamInstanceProfile *IamInstanceProfile `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI used to launch the instance. + ImageId *string `locationName:"imageId" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // Indicates whether this is a Spot Instance or a Scheduled Instance. + InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The kernel associated with this instance, if applicable. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair, if this instance was launched with an associated + // key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // The time the instance was launched. + LaunchTime *time.Time `locationName:"launchTime" type:"timestamp"` + + // The license configurations. + Licenses []*LicenseConfiguration `locationName:"licenseSet" locationNameList:"item" type:"list"` + + // The metadata options for the instance. + MetadataOptions *InstanceMetadataOptionsResponse `locationName:"metadataOptions" type:"structure"` + + // The monitoring for the instance. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` + + // [EC2-VPC] The network interfaces for the instance. + NetworkInterfaces []*InstanceNetworkInterface `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `locationName:"outpostArn" type:"string"` + + // The location where the instance launched, if applicable. + Placement *Placement `locationName:"placement" type:"structure"` + + // The value is Windows for Windows instances; otherwise blank. + Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + + // (IPv4 only) The private DNS hostname name assigned to the instance. This + // DNS hostname can only be used inside the Amazon EC2 network. This name is + // not available until the instance enters the running state. + // + // [EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private + // DNS hostnames if you've enabled DNS resolution and DNS hostnames in your + // VPC. If you are not using the Amazon-provided DNS server in your VPC, your + // custom domain name servers must resolve the hostname as appropriate. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IPv4 address assigned to the instance. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The product codes attached to this instance, if applicable. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // (IPv4 only) The public DNS name assigned to the instance. This name is not + // available until the instance enters the running state. For EC2-VPC, this + // name is only available if you've enabled DNS hostnames for your VPC. + PublicDnsName *string `locationName:"dnsName" type:"string"` + + // The public IPv4 address, or the Carrier IP address assigned to the instance, + // if applicable. + // + // A Carrier IP address only applies to an instance launched in a subnet associated + // with a Wavelength Zone. + PublicIpAddress *string `locationName:"ipAddress" type:"string"` + + // The RAM disk associated with this instance, if applicable. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The device name of the root device volume (for example, /dev/sda1). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // The root device type used by the AMI. The AMI can use an EBS volume or an + // instance store volume. + RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` + + // The security groups for the instance. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // Specifies whether to enable an instance launched in a VPC to perform NAT. + // This controls whether source/destination checking is enabled on the instance. + // A value of true means that checking is enabled, and false means that checking + // is disabled. The value must be false for the instance to perform NAT. For + // more information, see NAT Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // If the request is a Spot Instance request, the ID of the request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // Specifies whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The current state of the instance. + State *InstanceState `locationName:"instanceState" type:"structure"` + + // The reason for the most recent state transition. + StateReason *StateReason `locationName:"stateReason" type:"structure"` + + // The reason for the most recent state transition. This might be an empty string. + StateTransitionReason *string `locationName:"reason" type:"string"` + + // [EC2-VPC] The ID of the subnet in which the instance is running. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the instance. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The virtualization type of the instance. + VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` + + // [EC2-VPC] The ID of the VPC in which the instance is running. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Instance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Instance) GoString() string { + return s.String() +} + +// SetAmiLaunchIndex sets the AmiLaunchIndex field's value. +func (s *Instance) SetAmiLaunchIndex(v int64) *Instance { + s.AmiLaunchIndex = &v + return s +} + +// SetArchitecture sets the Architecture field's value. +func (s *Instance) SetArchitecture(v string) *Instance { + s.Architecture = &v + return s +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *Instance) SetBlockDeviceMappings(v []*InstanceBlockDeviceMapping) *Instance { + s.BlockDeviceMappings = v + return s +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *Instance) SetCapacityReservationId(v string) *Instance { + s.CapacityReservationId = &v + return s +} + +// SetCapacityReservationSpecification sets the CapacityReservationSpecification field's value. +func (s *Instance) SetCapacityReservationSpecification(v *CapacityReservationSpecificationResponse) *Instance { + s.CapacityReservationSpecification = v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *Instance) SetClientToken(v string) *Instance { + s.ClientToken = &v + return s +} + +// SetCpuOptions sets the CpuOptions field's value. +func (s *Instance) SetCpuOptions(v *CpuOptions) *Instance { + s.CpuOptions = v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *Instance) SetEbsOptimized(v bool) *Instance { + s.EbsOptimized = &v + return s +} + +// SetElasticGpuAssociations sets the ElasticGpuAssociations field's value. +func (s *Instance) SetElasticGpuAssociations(v []*ElasticGpuAssociation) *Instance { + s.ElasticGpuAssociations = v + return s +} + +// SetElasticInferenceAcceleratorAssociations sets the ElasticInferenceAcceleratorAssociations field's value. +func (s *Instance) SetElasticInferenceAcceleratorAssociations(v []*ElasticInferenceAcceleratorAssociation) *Instance { + s.ElasticInferenceAcceleratorAssociations = v + return s +} + +// SetEnaSupport sets the EnaSupport field's value. +func (s *Instance) SetEnaSupport(v bool) *Instance { + s.EnaSupport = &v + return s +} + +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *Instance) SetEnclaveOptions(v *EnclaveOptions) *Instance { + s.EnclaveOptions = v + return s +} + +// SetHibernationOptions sets the HibernationOptions field's value. +func (s *Instance) SetHibernationOptions(v *HibernationOptions) *Instance { + s.HibernationOptions = v + return s +} + +// SetHypervisor sets the Hypervisor field's value. +func (s *Instance) SetHypervisor(v string) *Instance { + s.Hypervisor = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *Instance) SetIamInstanceProfile(v *IamInstanceProfile) *Instance { + s.IamInstanceProfile = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *Instance) SetImageId(v string) *Instance { + s.ImageId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *Instance) SetInstanceId(v string) *Instance { + s.InstanceId = &v + return s +} + +// SetInstanceLifecycle sets the InstanceLifecycle field's value. +func (s *Instance) SetInstanceLifecycle(v string) *Instance { + s.InstanceLifecycle = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *Instance) SetInstanceType(v string) *Instance { + s.InstanceType = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *Instance) SetKernelId(v string) *Instance { + s.KernelId = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *Instance) SetKeyName(v string) *Instance { + s.KeyName = &v + return s +} + +// SetLaunchTime sets the LaunchTime field's value. +func (s *Instance) SetLaunchTime(v time.Time) *Instance { + s.LaunchTime = &v + return s +} + +// SetLicenses sets the Licenses field's value. +func (s *Instance) SetLicenses(v []*LicenseConfiguration) *Instance { + s.Licenses = v + return s +} + +// SetMetadataOptions sets the MetadataOptions field's value. +func (s *Instance) SetMetadataOptions(v *InstanceMetadataOptionsResponse) *Instance { + s.MetadataOptions = v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *Instance) SetMonitoring(v *Monitoring) *Instance { + s.Monitoring = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *Instance) SetNetworkInterfaces(v []*InstanceNetworkInterface) *Instance { + s.NetworkInterfaces = v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *Instance) SetOutpostArn(v string) *Instance { + s.OutpostArn = &v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *Instance) SetPlacement(v *Placement) *Instance { + s.Placement = v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *Instance) SetPlatform(v string) *Instance { + s.Platform = &v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *Instance) SetPrivateDnsName(v string) *Instance { + s.PrivateDnsName = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *Instance) SetPrivateIpAddress(v string) *Instance { + s.PrivateIpAddress = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *Instance) SetProductCodes(v []*ProductCode) *Instance { + s.ProductCodes = v + return s +} + +// SetPublicDnsName sets the PublicDnsName field's value. +func (s *Instance) SetPublicDnsName(v string) *Instance { + s.PublicDnsName = &v + return s +} + +// SetPublicIpAddress sets the PublicIpAddress field's value. +func (s *Instance) SetPublicIpAddress(v string) *Instance { + s.PublicIpAddress = &v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *Instance) SetRamdiskId(v string) *Instance { + s.RamdiskId = &v + return s +} + +// SetRootDeviceName sets the RootDeviceName field's value. +func (s *Instance) SetRootDeviceName(v string) *Instance { + s.RootDeviceName = &v + return s +} + +// SetRootDeviceType sets the RootDeviceType field's value. +func (s *Instance) SetRootDeviceType(v string) *Instance { + s.RootDeviceType = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *Instance) SetSecurityGroups(v []*GroupIdentifier) *Instance { + s.SecurityGroups = v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *Instance) SetSourceDestCheck(v bool) *Instance { + s.SourceDestCheck = &v + return s +} + +// SetSpotInstanceRequestId sets the SpotInstanceRequestId field's value. +func (s *Instance) SetSpotInstanceRequestId(v string) *Instance { + s.SpotInstanceRequestId = &v + return s +} + +// SetSriovNetSupport sets the SriovNetSupport field's value. +func (s *Instance) SetSriovNetSupport(v string) *Instance { + s.SriovNetSupport = &v + return s +} + +// SetState sets the State field's value. +func (s *Instance) SetState(v *InstanceState) *Instance { + s.State = v + return s +} + +// SetStateReason sets the StateReason field's value. +func (s *Instance) SetStateReason(v *StateReason) *Instance { + s.StateReason = v + return s +} + +// SetStateTransitionReason sets the StateTransitionReason field's value. +func (s *Instance) SetStateTransitionReason(v string) *Instance { + s.StateTransitionReason = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *Instance) SetSubnetId(v string) *Instance { + s.SubnetId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Instance) SetTags(v []*Tag) *Instance { + s.Tags = v + return s +} + +// SetVirtualizationType sets the VirtualizationType field's value. +func (s *Instance) SetVirtualizationType(v string) *Instance { + s.VirtualizationType = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *Instance) SetVpcId(v string) *Instance { + s.VpcId = &v + return s +} + +// Describes a block device mapping. +type InstanceBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDevice `locationName:"ebs" type:"structure"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMapping) GoString() string { + return s.String() +} + +// SetDeviceName sets the DeviceName field's value. +func (s *InstanceBlockDeviceMapping) SetDeviceName(v string) *InstanceBlockDeviceMapping { + s.DeviceName = &v + return s +} + +// SetEbs sets the Ebs field's value. +func (s *InstanceBlockDeviceMapping) SetEbs(v *EbsInstanceBlockDevice) *InstanceBlockDeviceMapping { + s.Ebs = v + return s +} + +// Describes a block device mapping entry. +type InstanceBlockDeviceMappingSpecification struct { + _ struct{} `type:"structure"` + + // The device name (for example, /dev/sdh or xvdh). + DeviceName *string `locationName:"deviceName" type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsInstanceBlockDeviceSpecification `locationName:"ebs" type:"structure"` + + // suppress the specified device included in the block device mapping. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name. + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s InstanceBlockDeviceMappingSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceBlockDeviceMappingSpecification) GoString() string { + return s.String() +} + +// SetDeviceName sets the DeviceName field's value. +func (s *InstanceBlockDeviceMappingSpecification) SetDeviceName(v string) *InstanceBlockDeviceMappingSpecification { + s.DeviceName = &v + return s +} + +// SetEbs sets the Ebs field's value. +func (s *InstanceBlockDeviceMappingSpecification) SetEbs(v *EbsInstanceBlockDeviceSpecification) *InstanceBlockDeviceMappingSpecification { + s.Ebs = v + return s +} + +// SetNoDevice sets the NoDevice field's value. +func (s *InstanceBlockDeviceMappingSpecification) SetNoDevice(v string) *InstanceBlockDeviceMappingSpecification { + s.NoDevice = &v + return s +} + +// SetVirtualName sets the VirtualName field's value. +func (s *InstanceBlockDeviceMappingSpecification) SetVirtualName(v string) *InstanceBlockDeviceMappingSpecification { + s.VirtualName = &v + return s +} + +// Information about the number of instances that can be launched onto the Dedicated +// Host. +type InstanceCapacity struct { + _ struct{} `type:"structure"` + + // The number of instances that can be launched onto the Dedicated Host based + // on the host's available capacity. + AvailableCapacity *int64 `locationName:"availableCapacity" type:"integer"` + + // The instance type supported by the Dedicated Host. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The total number of instances that can be launched onto the Dedicated Host + // if there are no instances running on it. + TotalCapacity *int64 `locationName:"totalCapacity" type:"integer"` +} + +// String returns the string representation +func (s InstanceCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCapacity) GoString() string { + return s.String() +} + +// SetAvailableCapacity sets the AvailableCapacity field's value. +func (s *InstanceCapacity) SetAvailableCapacity(v int64) *InstanceCapacity { + s.AvailableCapacity = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *InstanceCapacity) SetInstanceType(v string) *InstanceCapacity { + s.InstanceType = &v + return s +} + +// SetTotalCapacity sets the TotalCapacity field's value. +func (s *InstanceCapacity) SetTotalCapacity(v int64) *InstanceCapacity { + s.TotalCapacity = &v + return s +} + +// Describes a Reserved Instance listing state. +type InstanceCount struct { + _ struct{} `type:"structure"` + + // The number of listed Reserved Instances in the state specified by the state. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The states of the listed Reserved Instances. + State *string `locationName:"state" type:"string" enum:"ListingState"` +} + +// String returns the string representation +func (s InstanceCount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCount) GoString() string { + return s.String() +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *InstanceCount) SetInstanceCount(v int64) *InstanceCount { + s.InstanceCount = &v + return s +} + +// SetState sets the State field's value. +func (s *InstanceCount) SetState(v string) *InstanceCount { + s.State = &v + return s +} + +// Describes the credit option for CPU usage of a burstable performance instance. +type InstanceCreditSpecification struct { + _ struct{} `type:"structure"` + + // The credit option for CPU usage of the instance. Valid values are standard + // and unlimited. + CpuCredits *string `locationName:"cpuCredits" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s InstanceCreditSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCreditSpecification) GoString() string { + return s.String() +} + +// SetCpuCredits sets the CpuCredits field's value. +func (s *InstanceCreditSpecification) SetCpuCredits(v string) *InstanceCreditSpecification { + s.CpuCredits = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceCreditSpecification) SetInstanceId(v string) *InstanceCreditSpecification { + s.InstanceId = &v + return s +} + +// Describes the credit option for CPU usage of a burstable performance instance. +type InstanceCreditSpecificationRequest struct { + _ struct{} `type:"structure"` + + // The credit option for CPU usage of the instance. Valid values are standard + // and unlimited. + CpuCredits *string `type:"string"` + + // The ID of the instance. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s InstanceCreditSpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceCreditSpecificationRequest) GoString() string { + return s.String() +} + +// SetCpuCredits sets the CpuCredits field's value. +func (s *InstanceCreditSpecificationRequest) SetCpuCredits(v string) *InstanceCreditSpecificationRequest { + s.CpuCredits = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceCreditSpecificationRequest) SetInstanceId(v string) *InstanceCreditSpecificationRequest { + s.InstanceId = &v + return s +} + +// Describes an instance to export. +type InstanceExportDetails struct { + _ struct{} `type:"structure"` + + // The ID of the resource being exported. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The target virtualization environment. + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` +} + +// String returns the string representation +func (s InstanceExportDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceExportDetails) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceExportDetails) SetInstanceId(v string) *InstanceExportDetails { + s.InstanceId = &v + return s +} + +// SetTargetEnvironment sets the TargetEnvironment field's value. +func (s *InstanceExportDetails) SetTargetEnvironment(v string) *InstanceExportDetails { + s.TargetEnvironment = &v + return s +} + +// Describes the default credit option for CPU usage of a burstable performance +// instance family. +type InstanceFamilyCreditSpecification struct { + _ struct{} `type:"structure"` + + // The default credit option for CPU usage of the instance family. Valid values + // are standard and unlimited. + CpuCredits *string `locationName:"cpuCredits" type:"string"` + + // The instance family. + InstanceFamily *string `locationName:"instanceFamily" type:"string" enum:"UnlimitedSupportedInstanceFamily"` +} + +// String returns the string representation +func (s InstanceFamilyCreditSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceFamilyCreditSpecification) GoString() string { + return s.String() +} + +// SetCpuCredits sets the CpuCredits field's value. +func (s *InstanceFamilyCreditSpecification) SetCpuCredits(v string) *InstanceFamilyCreditSpecification { + s.CpuCredits = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *InstanceFamilyCreditSpecification) SetInstanceFamily(v string) *InstanceFamilyCreditSpecification { + s.InstanceFamily = &v + return s +} + +// Describes an IPv6 address. +type InstanceIpv6Address struct { + _ struct{} `type:"structure"` + + // The IPv6 address. + Ipv6Address *string `locationName:"ipv6Address" type:"string"` +} + +// String returns the string representation +func (s InstanceIpv6Address) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceIpv6Address) GoString() string { + return s.String() +} + +// SetIpv6Address sets the Ipv6Address field's value. +func (s *InstanceIpv6Address) SetIpv6Address(v string) *InstanceIpv6Address { + s.Ipv6Address = &v + return s +} + +// Describes an IPv6 address. +type InstanceIpv6AddressRequest struct { + _ struct{} `type:"structure"` + + // The IPv6 address. + Ipv6Address *string `type:"string"` +} + +// String returns the string representation +func (s InstanceIpv6AddressRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceIpv6AddressRequest) GoString() string { + return s.String() +} + +// SetIpv6Address sets the Ipv6Address field's value. +func (s *InstanceIpv6AddressRequest) SetIpv6Address(v string) *InstanceIpv6AddressRequest { + s.Ipv6Address = &v + return s +} + +// Describes the market (purchasing) option for the instances. +type InstanceMarketOptionsRequest struct { + _ struct{} `type:"structure"` + + // The market type. + MarketType *string `type:"string" enum:"MarketType"` + + // The options for Spot Instances. + SpotOptions *SpotMarketOptions `type:"structure"` +} + +// String returns the string representation +func (s InstanceMarketOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMarketOptionsRequest) GoString() string { + return s.String() +} + +// SetMarketType sets the MarketType field's value. +func (s *InstanceMarketOptionsRequest) SetMarketType(v string) *InstanceMarketOptionsRequest { + s.MarketType = &v + return s +} + +// SetSpotOptions sets the SpotOptions field's value. +func (s *InstanceMarketOptionsRequest) SetSpotOptions(v *SpotMarketOptions) *InstanceMarketOptionsRequest { + s.SpotOptions = v + return s +} + +// The metadata options for the instance. +type InstanceMetadataOptionsRequest struct { + _ struct{} `type:"structure"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the default state is enabled. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint *string `type:"string" enum:"InstanceMetadataEndpointState"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credentials + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens *string `type:"string" enum:"HttpTokensState"` +} + +// String returns the string representation +func (s InstanceMetadataOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMetadataOptionsRequest) GoString() string { + return s.String() +} + +// SetHttpEndpoint sets the HttpEndpoint field's value. +func (s *InstanceMetadataOptionsRequest) SetHttpEndpoint(v string) *InstanceMetadataOptionsRequest { + s.HttpEndpoint = &v + return s +} + +// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value. +func (s *InstanceMetadataOptionsRequest) SetHttpPutResponseHopLimit(v int64) *InstanceMetadataOptionsRequest { + s.HttpPutResponseHopLimit = &v + return s +} + +// SetHttpTokens sets the HttpTokens field's value. +func (s *InstanceMetadataOptionsRequest) SetHttpTokens(v string) *InstanceMetadataOptionsRequest { + s.HttpTokens = &v + return s +} + +// The metadata options for the instance. +type InstanceMetadataOptionsResponse struct { + _ struct{} `type:"structure"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the default state is enabled. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint *string `locationName:"httpEndpoint" type:"string" enum:"InstanceMetadataEndpointState"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `locationName:"httpPutResponseHopLimit" type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credential + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens *string `locationName:"httpTokens" type:"string" enum:"HttpTokensState"` + + // The state of the metadata option changes. + // + // pending - The metadata options are being updated and the instance is not + // ready to process metadata traffic with the new selection. + // + // applied - The metadata options have been successfully applied on the instance. + State *string `locationName:"state" type:"string" enum:"InstanceMetadataOptionsState"` +} + +// String returns the string representation +func (s InstanceMetadataOptionsResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMetadataOptionsResponse) GoString() string { + return s.String() +} + +// SetHttpEndpoint sets the HttpEndpoint field's value. +func (s *InstanceMetadataOptionsResponse) SetHttpEndpoint(v string) *InstanceMetadataOptionsResponse { + s.HttpEndpoint = &v + return s +} + +// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value. +func (s *InstanceMetadataOptionsResponse) SetHttpPutResponseHopLimit(v int64) *InstanceMetadataOptionsResponse { + s.HttpPutResponseHopLimit = &v + return s +} + +// SetHttpTokens sets the HttpTokens field's value. +func (s *InstanceMetadataOptionsResponse) SetHttpTokens(v string) *InstanceMetadataOptionsResponse { + s.HttpTokens = &v + return s +} + +// SetState sets the State field's value. +func (s *InstanceMetadataOptionsResponse) SetState(v string) *InstanceMetadataOptionsResponse { + s.State = &v + return s +} + +// Describes the monitoring of an instance. +type InstanceMonitoring struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The monitoring for the instance. + Monitoring *Monitoring `locationName:"monitoring" type:"structure"` +} + +// String returns the string representation +func (s InstanceMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMonitoring) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceMonitoring) SetInstanceId(v string) *InstanceMonitoring { + s.InstanceId = &v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *InstanceMonitoring) SetMonitoring(v *Monitoring) *InstanceMonitoring { + s.Monitoring = v + return s +} + +// Describes a network interface. +type InstanceNetworkInterface struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IPv4 associated with the network + // interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *InstanceNetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The description. + Description *string `locationName:"description" type:"string"` + + // One or more security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // Describes the type of network interface. + // + // Valid values: interface | efa + InterfaceType *string `locationName:"interfaceType" type:"string"` + + // One or more IPv6 addresses associated with the network interface. + Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" locationNameList:"item" type:"list"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the AWS account that created the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IPv4 address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IPv4 addresses associated with the network interface. + PrivateIpAddresses []*InstancePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // Indicates whether to validate network traffic to or from this network interface. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterface) GoString() string { + return s.String() +} + +// SetAssociation sets the Association field's value. +func (s *InstanceNetworkInterface) SetAssociation(v *InstanceNetworkInterfaceAssociation) *InstanceNetworkInterface { + s.Association = v + return s +} + +// SetAttachment sets the Attachment field's value. +func (s *InstanceNetworkInterface) SetAttachment(v *InstanceNetworkInterfaceAttachment) *InstanceNetworkInterface { + s.Attachment = v + return s +} + +// SetDescription sets the Description field's value. +func (s *InstanceNetworkInterface) SetDescription(v string) *InstanceNetworkInterface { + s.Description = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *InstanceNetworkInterface) SetGroups(v []*GroupIdentifier) *InstanceNetworkInterface { + s.Groups = v + return s +} + +// SetInterfaceType sets the InterfaceType field's value. +func (s *InstanceNetworkInterface) SetInterfaceType(v string) *InstanceNetworkInterface { + s.InterfaceType = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *InstanceNetworkInterface) SetIpv6Addresses(v []*InstanceIpv6Address) *InstanceNetworkInterface { + s.Ipv6Addresses = v + return s +} + +// SetMacAddress sets the MacAddress field's value. +func (s *InstanceNetworkInterface) SetMacAddress(v string) *InstanceNetworkInterface { + s.MacAddress = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *InstanceNetworkInterface) SetNetworkInterfaceId(v string) *InstanceNetworkInterface { + s.NetworkInterfaceId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *InstanceNetworkInterface) SetOwnerId(v string) *InstanceNetworkInterface { + s.OwnerId = &v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *InstanceNetworkInterface) SetPrivateDnsName(v string) *InstanceNetworkInterface { + s.PrivateDnsName = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *InstanceNetworkInterface) SetPrivateIpAddress(v string) *InstanceNetworkInterface { + s.PrivateIpAddress = &v + return s +} + +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value. +func (s *InstanceNetworkInterface) SetPrivateIpAddresses(v []*InstancePrivateIpAddress) *InstanceNetworkInterface { + s.PrivateIpAddresses = v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *InstanceNetworkInterface) SetSourceDestCheck(v bool) *InstanceNetworkInterface { + s.SourceDestCheck = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstanceNetworkInterface) SetStatus(v string) *InstanceNetworkInterface { + s.Status = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *InstanceNetworkInterface) SetSubnetId(v string) *InstanceNetworkInterface { + s.SubnetId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *InstanceNetworkInterface) SetVpcId(v string) *InstanceNetworkInterface { + s.VpcId = &v + return s +} + +// Describes association information for an Elastic IP address (IPv4). +type InstanceNetworkInterfaceAssociation struct { + _ struct{} `type:"structure"` + + // The carrier IP address associated with the network interface. + CarrierIp *string `locationName:"carrierIp" type:"string"` + + // The ID of the owner of the Elastic IP address. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The public IP address or Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// SetCarrierIp sets the CarrierIp field's value. +func (s *InstanceNetworkInterfaceAssociation) SetCarrierIp(v string) *InstanceNetworkInterfaceAssociation { + s.CarrierIp = &v + return s +} + +// SetIpOwnerId sets the IpOwnerId field's value. +func (s *InstanceNetworkInterfaceAssociation) SetIpOwnerId(v string) *InstanceNetworkInterfaceAssociation { + s.IpOwnerId = &v + return s +} + +// SetPublicDnsName sets the PublicDnsName field's value. +func (s *InstanceNetworkInterfaceAssociation) SetPublicDnsName(v string) *InstanceNetworkInterfaceAssociation { + s.PublicDnsName = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *InstanceNetworkInterfaceAssociation) SetPublicIp(v string) *InstanceNetworkInterfaceAssociation { + s.PublicIp = &v + return s +} + +// Describes a network interface attachment. +type InstanceNetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The index of the device on the instance for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// SetAttachTime sets the AttachTime field's value. +func (s *InstanceNetworkInterfaceAttachment) SetAttachTime(v time.Time) *InstanceNetworkInterfaceAttachment { + s.AttachTime = &v + return s +} + +// SetAttachmentId sets the AttachmentId field's value. +func (s *InstanceNetworkInterfaceAttachment) SetAttachmentId(v string) *InstanceNetworkInterfaceAttachment { + s.AttachmentId = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *InstanceNetworkInterfaceAttachment) SetDeleteOnTermination(v bool) *InstanceNetworkInterfaceAttachment { + s.DeleteOnTermination = &v + return s +} + +// SetDeviceIndex sets the DeviceIndex field's value. +func (s *InstanceNetworkInterfaceAttachment) SetDeviceIndex(v int64) *InstanceNetworkInterfaceAttachment { + s.DeviceIndex = &v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *InstanceNetworkInterfaceAttachment) SetNetworkCardIndex(v int64) *InstanceNetworkInterfaceAttachment { + s.NetworkCardIndex = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstanceNetworkInterfaceAttachment) SetStatus(v string) *InstanceNetworkInterfaceAttachment { + s.Status = &v + return s +} + +// Describes a network interface. +type InstanceNetworkInterfaceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a carrier IP address to the network interface. + // + // You can only assign a carrier IP address to a network interface that is in + // a subnet in a Wavelength Zone. For more information about carrier IP addresses, + // see Carrier IP addresses in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `type:"boolean"` + + // Indicates whether to assign a public IPv4 address to an instance you launch + // in a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` + + // If set to true, the interface is deleted when the instance is terminated. + // You can specify true only if creating a new network interface when launching + // an instance. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The description of the network interface. Applies only if creating a network + // interface when launching an instance. + Description *string `locationName:"description" type:"string"` + + // The position of the network interface in the attachment order. A primary + // network interface has a device index of 0. + // + // If you specify a network interface when launching an instance, you must specify + // the device index. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The IDs of the security groups for the network interface. Applies only if + // creating a network interface when launching an instance. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The type of network interface. + // + // To create an Elastic Fabric Adapter (EFA), specify efa. For more information, + // see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // If you are not creating an EFA, specify interface or omit this parameter. + // + // Valid values: interface | efa + InterfaceType *string `type:"string"` + + // A number of IPv6 addresses to assign to the network interface. Amazon EC2 + // chooses the IPv6 addresses from the range of the subnet. You cannot specify + // this option and the option to assign specific IPv6 addresses in the same + // request. You can specify this option if you've specified a minimum number + // of instances to launch. + Ipv6AddressCount *int64 `locationName:"ipv6AddressCount" type:"integer"` + + // One or more IPv6 addresses to assign to the network interface. You cannot + // specify this option and the option to assign a number of IPv6 addresses in + // the same request. You cannot specify this option if you've specified a minimum + // number of instances to launch. + Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" queryName:"Ipv6Addresses" locationNameList:"item" type:"list"` + + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + + // The ID of the network interface. + // + // If you are creating a Spot Fleet, omit this parameter because you can’t + // specify a network interface ID in a launch specification. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IPv4 address of the network interface. Applies only if creating + // a network interface when launching an instance. You cannot specify this option + // if you're launching more than one instance in a RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + // request. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IPv4 addresses to assign to the network interface. Only + // one private IPv4 address can be designated as primary. You cannot specify + // this option if you're launching more than one instance in a RunInstances + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + // request. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddressesSet" queryName:"PrivateIpAddresses" locationNameList:"item" type:"list"` + + // The number of secondary private IPv4 addresses. You can't specify this option + // and specify more than one private IP address using the private IP addresses + // option. You cannot specify this option if you're launching more than one + // instance in a RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) + // request. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet associated with the network interface. Applies only + // if creating a network interface when launching an instance. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s InstanceNetworkInterfaceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceNetworkInterfaceSpecification) GoString() string { + return s.String() +} + +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *InstanceNetworkInterfaceSpecification) SetAssociateCarrierIpAddress(v bool) *InstanceNetworkInterfaceSpecification { + s.AssociateCarrierIpAddress = &v + return s +} + +// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. +func (s *InstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *InstanceNetworkInterfaceSpecification { + s.AssociatePublicIpAddress = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *InstanceNetworkInterfaceSpecification) SetDeleteOnTermination(v bool) *InstanceNetworkInterfaceSpecification { + s.DeleteOnTermination = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *InstanceNetworkInterfaceSpecification) SetDescription(v string) *InstanceNetworkInterfaceSpecification { + s.Description = &v + return s +} + +// SetDeviceIndex sets the DeviceIndex field's value. +func (s *InstanceNetworkInterfaceSpecification) SetDeviceIndex(v int64) *InstanceNetworkInterfaceSpecification { + s.DeviceIndex = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *InstanceNetworkInterfaceSpecification) SetGroups(v []*string) *InstanceNetworkInterfaceSpecification { + s.Groups = v + return s +} + +// SetInterfaceType sets the InterfaceType field's value. +func (s *InstanceNetworkInterfaceSpecification) SetInterfaceType(v string) *InstanceNetworkInterfaceSpecification { + s.InterfaceType = &v + return s +} + +// SetIpv6AddressCount sets the Ipv6AddressCount field's value. +func (s *InstanceNetworkInterfaceSpecification) SetIpv6AddressCount(v int64) *InstanceNetworkInterfaceSpecification { + s.Ipv6AddressCount = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *InstanceNetworkInterfaceSpecification) SetIpv6Addresses(v []*InstanceIpv6Address) *InstanceNetworkInterfaceSpecification { + s.Ipv6Addresses = v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *InstanceNetworkInterfaceSpecification) SetNetworkCardIndex(v int64) *InstanceNetworkInterfaceSpecification { + s.NetworkCardIndex = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *InstanceNetworkInterfaceSpecification) SetNetworkInterfaceId(v string) *InstanceNetworkInterfaceSpecification { + s.NetworkInterfaceId = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *InstanceNetworkInterfaceSpecification) SetPrivateIpAddress(v string) *InstanceNetworkInterfaceSpecification { + s.PrivateIpAddress = &v + return s +} + +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value. +func (s *InstanceNetworkInterfaceSpecification) SetPrivateIpAddresses(v []*PrivateIpAddressSpecification) *InstanceNetworkInterfaceSpecification { + s.PrivateIpAddresses = v + return s +} + +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value. +func (s *InstanceNetworkInterfaceSpecification) SetSecondaryPrivateIpAddressCount(v int64) *InstanceNetworkInterfaceSpecification { + s.SecondaryPrivateIpAddressCount = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *InstanceNetworkInterfaceSpecification) SetSubnetId(v string) *InstanceNetworkInterfaceSpecification { + s.SubnetId = &v + return s +} + +// Describes a private IPv4 address. +type InstancePrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address for the network interface. + Association *InstanceNetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IPv4 address is the primary private IP address of + // the network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private IPv4 DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IPv4 address of the network interface. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s InstancePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancePrivateIpAddress) GoString() string { + return s.String() +} + +// SetAssociation sets the Association field's value. +func (s *InstancePrivateIpAddress) SetAssociation(v *InstanceNetworkInterfaceAssociation) *InstancePrivateIpAddress { + s.Association = v + return s +} + +// SetPrimary sets the Primary field's value. +func (s *InstancePrivateIpAddress) SetPrimary(v bool) *InstancePrivateIpAddress { + s.Primary = &v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *InstancePrivateIpAddress) SetPrivateDnsName(v string) *InstancePrivateIpAddress { + s.PrivateDnsName = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *InstancePrivateIpAddress) SetPrivateIpAddress(v string) *InstancePrivateIpAddress { + s.PrivateIpAddress = &v + return s +} + +// The instance details to specify which volumes should be snapshotted. +type InstanceSpecification struct { + _ struct{} `type:"structure"` + + // Excludes the root volume from being snapshotted. + ExcludeBootVolume *bool `type:"boolean"` + + // The instance to specify which volumes should be snapshotted. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s InstanceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceSpecification) GoString() string { + return s.String() +} + +// SetExcludeBootVolume sets the ExcludeBootVolume field's value. +func (s *InstanceSpecification) SetExcludeBootVolume(v bool) *InstanceSpecification { + s.ExcludeBootVolume = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceSpecification) SetInstanceId(v string) *InstanceSpecification { + s.InstanceId = &v + return s +} + +// Describes the current state of an instance. +type InstanceState struct { + _ struct{} `type:"structure"` + + // The state of the instance as a 16-bit unsigned integer. + // + // The high byte is all of the bits between 2^8 and (2^16)-1, which equals decimal + // values between 256 and 65,535. These numerical values are used for internal + // purposes and should be ignored. + // + // The low byte is all of the bits between 2^0 and (2^8)-1, which equals decimal + // values between 0 and 255. + // + // The valid values for instance-state-code will all be in the range of the + // low byte and they are: + // + // * 0 : pending + // + // * 16 : running + // + // * 32 : shutting-down + // + // * 48 : terminated + // + // * 64 : stopping + // + // * 80 : stopped + // + // You can ignore the high byte value by zeroing out all of the bits above 2^8 + // or 256 in decimal. + Code *int64 `locationName:"code" type:"integer"` + + // The current state of the instance. + Name *string `locationName:"name" type:"string" enum:"InstanceStateName"` +} + +// String returns the string representation +func (s InstanceState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceState) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *InstanceState) SetCode(v int64) *InstanceState { + s.Code = &v + return s +} + +// SetName sets the Name field's value. +func (s *InstanceState) SetName(v string) *InstanceState { + s.Name = &v + return s +} + +// Describes an instance state change. +type InstanceStateChange struct { + _ struct{} `type:"structure"` + + // The current state of the instance. + CurrentState *InstanceState `locationName:"currentState" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The previous state of the instance. + PreviousState *InstanceState `locationName:"previousState" type:"structure"` +} + +// String returns the string representation +func (s InstanceStateChange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStateChange) GoString() string { + return s.String() +} + +// SetCurrentState sets the CurrentState field's value. +func (s *InstanceStateChange) SetCurrentState(v *InstanceState) *InstanceStateChange { + s.CurrentState = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceStateChange) SetInstanceId(v string) *InstanceStateChange { + s.InstanceId = &v + return s +} + +// SetPreviousState sets the PreviousState field's value. +func (s *InstanceStateChange) SetPreviousState(v *InstanceState) *InstanceStateChange { + s.PreviousState = v + return s +} + +// Describes the status of an instance. +type InstanceStatus struct { + _ struct{} `type:"structure"` + + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // Any scheduled events associated with the instance. + Events []*InstanceStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The intended state of the instance. DescribeInstanceStatus requires that + // an instance be in the running state. + InstanceState *InstanceState `locationName:"instanceState" type:"structure"` + + // Reports impaired functionality that stems from issues internal to the instance, + // such as impaired reachability. + InstanceStatus *InstanceStatusSummary `locationName:"instanceStatus" type:"structure"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `locationName:"outpostArn" type:"string"` + + // Reports impaired functionality that stems from issues related to the systems + // that support an instance, such as hardware failures and network connectivity + // problems. + SystemStatus *InstanceStatusSummary `locationName:"systemStatus" type:"structure"` +} + +// String returns the string representation +func (s InstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatus) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *InstanceStatus) SetAvailabilityZone(v string) *InstanceStatus { + s.AvailabilityZone = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *InstanceStatus) SetEvents(v []*InstanceStatusEvent) *InstanceStatus { + s.Events = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceStatus) SetInstanceId(v string) *InstanceStatus { + s.InstanceId = &v + return s +} + +// SetInstanceState sets the InstanceState field's value. +func (s *InstanceStatus) SetInstanceState(v *InstanceState) *InstanceStatus { + s.InstanceState = v + return s +} + +// SetInstanceStatus sets the InstanceStatus field's value. +func (s *InstanceStatus) SetInstanceStatus(v *InstanceStatusSummary) *InstanceStatus { + s.InstanceStatus = v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *InstanceStatus) SetOutpostArn(v string) *InstanceStatus { + s.OutpostArn = &v + return s +} + +// SetSystemStatus sets the SystemStatus field's value. +func (s *InstanceStatus) SetSystemStatus(v *InstanceStatusSummary) *InstanceStatus { + s.SystemStatus = v + return s +} + +// Describes the instance status. +type InstanceStatusDetails struct { + _ struct{} `type:"structure"` + + // The time when a status check failed. For an instance that was launched and + // impaired, this is the time when the instance was launched. + ImpairedSince *time.Time `locationName:"impairedSince" type:"timestamp"` + + // The type of instance status. + Name *string `locationName:"name" type:"string" enum:"StatusName"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"StatusType"` +} + +// String returns the string representation +func (s InstanceStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusDetails) GoString() string { + return s.String() +} + +// SetImpairedSince sets the ImpairedSince field's value. +func (s *InstanceStatusDetails) SetImpairedSince(v time.Time) *InstanceStatusDetails { + s.ImpairedSince = &v + return s +} + +// SetName sets the Name field's value. +func (s *InstanceStatusDetails) SetName(v string) *InstanceStatusDetails { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstanceStatusDetails) SetStatus(v string) *InstanceStatusDetails { + s.Status = &v + return s +} + +// Describes a scheduled event for an instance. +type InstanceStatusEvent struct { + _ struct{} `type:"structure"` + + // The event code. + Code *string `locationName:"code" type:"string" enum:"EventCode"` + + // A description of the event. + // + // After a scheduled event is completed, it can still be described for up to + // a week. If the event has been completed, this description starts with the + // following text: [Completed]. + Description *string `locationName:"description" type:"string"` + + // The ID of the event. + InstanceEventId *string `locationName:"instanceEventId" type:"string"` + + // The latest scheduled end time for the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp"` + + // The earliest scheduled start time for the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp"` + + // The deadline for starting the event. + NotBeforeDeadline *time.Time `locationName:"notBeforeDeadline" type:"timestamp"` +} + +// String returns the string representation +func (s InstanceStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusEvent) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *InstanceStatusEvent) SetCode(v string) *InstanceStatusEvent { + s.Code = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *InstanceStatusEvent) SetDescription(v string) *InstanceStatusEvent { + s.Description = &v + return s +} + +// SetInstanceEventId sets the InstanceEventId field's value. +func (s *InstanceStatusEvent) SetInstanceEventId(v string) *InstanceStatusEvent { + s.InstanceEventId = &v + return s +} + +// SetNotAfter sets the NotAfter field's value. +func (s *InstanceStatusEvent) SetNotAfter(v time.Time) *InstanceStatusEvent { + s.NotAfter = &v + return s +} + +// SetNotBefore sets the NotBefore field's value. +func (s *InstanceStatusEvent) SetNotBefore(v time.Time) *InstanceStatusEvent { + s.NotBefore = &v + return s +} + +// SetNotBeforeDeadline sets the NotBeforeDeadline field's value. +func (s *InstanceStatusEvent) SetNotBeforeDeadline(v time.Time) *InstanceStatusEvent { + s.NotBeforeDeadline = &v + return s +} + +// Describes the status of an instance. +type InstanceStatusSummary struct { + _ struct{} `type:"structure"` + + // The system instance health or application instance health. + Details []*InstanceStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status. + Status *string `locationName:"status" type:"string" enum:"SummaryStatus"` +} + +// String returns the string representation +func (s InstanceStatusSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStatusSummary) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *InstanceStatusSummary) SetDetails(v []*InstanceStatusDetails) *InstanceStatusSummary { + s.Details = v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary { + s.Status = &v + return s +} + +// Describes the disks that are available for the instance type. +type InstanceStorageInfo struct { + _ struct{} `type:"structure"` + + // Describes the disks that are available for the instance type. + Disks []*DiskInfo `locationName:"disks" locationNameList:"item" type:"list"` + + // Indicates whether non-volatile memory express (NVMe) is supported for instance + // store. + NvmeSupport *string `locationName:"nvmeSupport" type:"string" enum:"EphemeralNvmeSupport"` + + // The total size of the disks, in GB. + TotalSizeInGB *int64 `locationName:"totalSizeInGB" type:"long"` +} + +// String returns the string representation +func (s InstanceStorageInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceStorageInfo) GoString() string { + return s.String() +} + +// SetDisks sets the Disks field's value. +func (s *InstanceStorageInfo) SetDisks(v []*DiskInfo) *InstanceStorageInfo { + s.Disks = v + return s +} + +// SetNvmeSupport sets the NvmeSupport field's value. +func (s *InstanceStorageInfo) SetNvmeSupport(v string) *InstanceStorageInfo { + s.NvmeSupport = &v + return s +} + +// SetTotalSizeInGB sets the TotalSizeInGB field's value. +func (s *InstanceStorageInfo) SetTotalSizeInGB(v int64) *InstanceStorageInfo { + s.TotalSizeInGB = &v + return s +} + +// Describes the registered tag keys for the current Region. +type InstanceTagNotificationAttribute struct { + _ struct{} `type:"structure"` + + // Indicates wheter all tag keys in the current Region are registered to appear + // in scheduled event notifications. true indicates that all tag keys in the + // current Region are registered. + IncludeAllTagsOfInstance *bool `locationName:"includeAllTagsOfInstance" type:"boolean"` + + // The registered tag keys. + InstanceTagKeys []*string `locationName:"instanceTagKeySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s InstanceTagNotificationAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceTagNotificationAttribute) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *InstanceTagNotificationAttribute) SetIncludeAllTagsOfInstance(v bool) *InstanceTagNotificationAttribute { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *InstanceTagNotificationAttribute) SetInstanceTagKeys(v []*string) *InstanceTagNotificationAttribute { + s.InstanceTagKeys = v + return s +} + +// Describes the instance type. +type InstanceTypeInfo struct { + _ struct{} `type:"structure"` + + // Indicates whether auto recovery is supported. + AutoRecoverySupported *bool `locationName:"autoRecoverySupported" type:"boolean"` + + // Indicates whether the instance is a bare metal instance type. + BareMetal *bool `locationName:"bareMetal" type:"boolean"` + + // Indicates whether the instance type is a burstable performance instance type. + BurstablePerformanceSupported *bool `locationName:"burstablePerformanceSupported" type:"boolean"` + + // Indicates whether the instance type is current generation. + CurrentGeneration *bool `locationName:"currentGeneration" type:"boolean"` + + // Indicates whether Dedicated Hosts are supported on the instance type. + DedicatedHostsSupported *bool `locationName:"dedicatedHostsSupported" type:"boolean"` + + // Describes the Amazon EBS settings for the instance type. + EbsInfo *EbsInfo `locationName:"ebsInfo" type:"structure"` + + // Describes the FPGA accelerator settings for the instance type. + FpgaInfo *FpgaInfo `locationName:"fpgaInfo" type:"structure"` + + // Indicates whether the instance type is eligible for the free tier. + FreeTierEligible *bool `locationName:"freeTierEligible" type:"boolean"` + + // Describes the GPU accelerator settings for the instance type. + GpuInfo *GpuInfo `locationName:"gpuInfo" type:"structure"` + + // Indicates whether On-Demand hibernation is supported. + HibernationSupported *bool `locationName:"hibernationSupported" type:"boolean"` + + // The hypervisor for the instance type. + Hypervisor *string `locationName:"hypervisor" type:"string" enum:"InstanceTypeHypervisor"` + + // Describes the Inference accelerator settings for the instance type. + InferenceAcceleratorInfo *InferenceAcceleratorInfo `locationName:"inferenceAcceleratorInfo" type:"structure"` + + // Describes the instance storage for the instance type. + InstanceStorageInfo *InstanceStorageInfo `locationName:"instanceStorageInfo" type:"structure"` + + // Indicates whether instance storage is supported. + InstanceStorageSupported *bool `locationName:"instanceStorageSupported" type:"boolean"` + + // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Describes the memory for the instance type. + MemoryInfo *MemoryInfo `locationName:"memoryInfo" type:"structure"` + + // Describes the network settings for the instance type. + NetworkInfo *NetworkInfo `locationName:"networkInfo" type:"structure"` + + // Describes the placement group settings for the instance type. + PlacementGroupInfo *PlacementGroupInfo `locationName:"placementGroupInfo" type:"structure"` + + // Describes the processor. + ProcessorInfo *ProcessorInfo `locationName:"processorInfo" type:"structure"` + + // The supported root device types. + SupportedRootDeviceTypes []*string `locationName:"supportedRootDeviceTypes" locationNameList:"item" type:"list"` + + // Indicates whether the instance type is offered for spot or On-Demand. + SupportedUsageClasses []*string `locationName:"supportedUsageClasses" locationNameList:"item" type:"list"` + + // The supported virtualization types. + SupportedVirtualizationTypes []*string `locationName:"supportedVirtualizationTypes" locationNameList:"item" type:"list"` + + // Describes the vCPU configurations for the instance type. + VCpuInfo *VCpuInfo `locationName:"vCpuInfo" type:"structure"` +} + +// String returns the string representation +func (s InstanceTypeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceTypeInfo) GoString() string { + return s.String() +} + +// SetAutoRecoverySupported sets the AutoRecoverySupported field's value. +func (s *InstanceTypeInfo) SetAutoRecoverySupported(v bool) *InstanceTypeInfo { + s.AutoRecoverySupported = &v + return s +} + +// SetBareMetal sets the BareMetal field's value. +func (s *InstanceTypeInfo) SetBareMetal(v bool) *InstanceTypeInfo { + s.BareMetal = &v + return s +} + +// SetBurstablePerformanceSupported sets the BurstablePerformanceSupported field's value. +func (s *InstanceTypeInfo) SetBurstablePerformanceSupported(v bool) *InstanceTypeInfo { + s.BurstablePerformanceSupported = &v + return s +} + +// SetCurrentGeneration sets the CurrentGeneration field's value. +func (s *InstanceTypeInfo) SetCurrentGeneration(v bool) *InstanceTypeInfo { + s.CurrentGeneration = &v + return s +} + +// SetDedicatedHostsSupported sets the DedicatedHostsSupported field's value. +func (s *InstanceTypeInfo) SetDedicatedHostsSupported(v bool) *InstanceTypeInfo { + s.DedicatedHostsSupported = &v + return s +} + +// SetEbsInfo sets the EbsInfo field's value. +func (s *InstanceTypeInfo) SetEbsInfo(v *EbsInfo) *InstanceTypeInfo { + s.EbsInfo = v + return s +} + +// SetFpgaInfo sets the FpgaInfo field's value. +func (s *InstanceTypeInfo) SetFpgaInfo(v *FpgaInfo) *InstanceTypeInfo { + s.FpgaInfo = v + return s +} + +// SetFreeTierEligible sets the FreeTierEligible field's value. +func (s *InstanceTypeInfo) SetFreeTierEligible(v bool) *InstanceTypeInfo { + s.FreeTierEligible = &v + return s +} + +// SetGpuInfo sets the GpuInfo field's value. +func (s *InstanceTypeInfo) SetGpuInfo(v *GpuInfo) *InstanceTypeInfo { + s.GpuInfo = v + return s +} + +// SetHibernationSupported sets the HibernationSupported field's value. +func (s *InstanceTypeInfo) SetHibernationSupported(v bool) *InstanceTypeInfo { + s.HibernationSupported = &v + return s +} + +// SetHypervisor sets the Hypervisor field's value. +func (s *InstanceTypeInfo) SetHypervisor(v string) *InstanceTypeInfo { + s.Hypervisor = &v + return s +} + +// SetInferenceAcceleratorInfo sets the InferenceAcceleratorInfo field's value. +func (s *InstanceTypeInfo) SetInferenceAcceleratorInfo(v *InferenceAcceleratorInfo) *InstanceTypeInfo { + s.InferenceAcceleratorInfo = v + return s +} + +// SetInstanceStorageInfo sets the InstanceStorageInfo field's value. +func (s *InstanceTypeInfo) SetInstanceStorageInfo(v *InstanceStorageInfo) *InstanceTypeInfo { + s.InstanceStorageInfo = v + return s +} + +// SetInstanceStorageSupported sets the InstanceStorageSupported field's value. +func (s *InstanceTypeInfo) SetInstanceStorageSupported(v bool) *InstanceTypeInfo { + s.InstanceStorageSupported = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *InstanceTypeInfo) SetInstanceType(v string) *InstanceTypeInfo { + s.InstanceType = &v + return s +} + +// SetMemoryInfo sets the MemoryInfo field's value. +func (s *InstanceTypeInfo) SetMemoryInfo(v *MemoryInfo) *InstanceTypeInfo { + s.MemoryInfo = v + return s +} + +// SetNetworkInfo sets the NetworkInfo field's value. +func (s *InstanceTypeInfo) SetNetworkInfo(v *NetworkInfo) *InstanceTypeInfo { + s.NetworkInfo = v + return s +} + +// SetPlacementGroupInfo sets the PlacementGroupInfo field's value. +func (s *InstanceTypeInfo) SetPlacementGroupInfo(v *PlacementGroupInfo) *InstanceTypeInfo { + s.PlacementGroupInfo = v + return s +} + +// SetProcessorInfo sets the ProcessorInfo field's value. +func (s *InstanceTypeInfo) SetProcessorInfo(v *ProcessorInfo) *InstanceTypeInfo { + s.ProcessorInfo = v + return s +} + +// SetSupportedRootDeviceTypes sets the SupportedRootDeviceTypes field's value. +func (s *InstanceTypeInfo) SetSupportedRootDeviceTypes(v []*string) *InstanceTypeInfo { + s.SupportedRootDeviceTypes = v + return s +} + +// SetSupportedUsageClasses sets the SupportedUsageClasses field's value. +func (s *InstanceTypeInfo) SetSupportedUsageClasses(v []*string) *InstanceTypeInfo { + s.SupportedUsageClasses = v + return s +} + +// SetSupportedVirtualizationTypes sets the SupportedVirtualizationTypes field's value. +func (s *InstanceTypeInfo) SetSupportedVirtualizationTypes(v []*string) *InstanceTypeInfo { + s.SupportedVirtualizationTypes = v + return s +} + +// SetVCpuInfo sets the VCpuInfo field's value. +func (s *InstanceTypeInfo) SetVCpuInfo(v *VCpuInfo) *InstanceTypeInfo { + s.VCpuInfo = v + return s +} + +// The instance types offered. +type InstanceTypeOffering struct { + _ struct{} `type:"structure"` + + // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The identifier for the location. This depends on the location type. For example, + // if the location type is region, the location is the Region code (for example, + // us-east-2.) + Location *string `locationName:"location" type:"string"` + + // The location type. + LocationType *string `locationName:"locationType" type:"string" enum:"LocationType"` +} + +// String returns the string representation +func (s InstanceTypeOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceTypeOffering) GoString() string { + return s.String() +} + +// SetInstanceType sets the InstanceType field's value. +func (s *InstanceTypeOffering) SetInstanceType(v string) *InstanceTypeOffering { + s.InstanceType = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *InstanceTypeOffering) SetLocation(v string) *InstanceTypeOffering { + s.Location = &v + return s +} + +// SetLocationType sets the LocationType field's value. +func (s *InstanceTypeOffering) SetLocationType(v string) *InstanceTypeOffering { + s.LocationType = &v + return s +} + +// Information about the Capacity Reservation usage. +type InstanceUsage struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account that is making use of the Capacity Reservation. + AccountId *string `locationName:"accountId" type:"string"` + + // The number of instances the AWS account currently has in the Capacity Reservation. + UsedInstanceCount *int64 `locationName:"usedInstanceCount" type:"integer"` +} + +// String returns the string representation +func (s InstanceUsage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceUsage) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *InstanceUsage) SetAccountId(v string) *InstanceUsage { + s.AccountId = &v + return s +} + +// SetUsedInstanceCount sets the UsedInstanceCount field's value. +func (s *InstanceUsage) SetUsedInstanceCount(v int64) *InstanceUsage { + s.UsedInstanceCount = &v + return s +} + +// Describes an internet gateway. +type InternetGateway struct { + _ struct{} `type:"structure"` + + // Any VPCs attached to the internet gateway. + Attachments []*InternetGatewayAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The ID of the internet gateway. + InternetGatewayId *string `locationName:"internetGatewayId" type:"string"` + + // The ID of the AWS account that owns the internet gateway. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any tags assigned to the internet gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s InternetGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGateway) GoString() string { + return s.String() +} + +// SetAttachments sets the Attachments field's value. +func (s *InternetGateway) SetAttachments(v []*InternetGatewayAttachment) *InternetGateway { + s.Attachments = v + return s +} + +// SetInternetGatewayId sets the InternetGatewayId field's value. +func (s *InternetGateway) SetInternetGatewayId(v string) *InternetGateway { + s.InternetGatewayId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *InternetGateway) SetOwnerId(v string) *InternetGateway { + s.OwnerId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *InternetGateway) SetTags(v []*Tag) *InternetGateway { + s.Tags = v + return s +} + +// Describes the attachment of a VPC to an internet gateway or an egress-only +// internet gateway. +type InternetGatewayAttachment struct { + _ struct{} `type:"structure"` + + // The current state of the attachment. For an internet gateway, the state is + // available when attached to a VPC; otherwise, this value is not returned. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s InternetGatewayAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternetGatewayAttachment) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *InternetGatewayAttachment) SetState(v string) *InternetGatewayAttachment { + s.State = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *InternetGatewayAttachment) SetVpcId(v string) *InternetGatewayAttachment { + s.VpcId = &v + return s +} + +// Describes a set of permissions for a security group rule. +type IpPermission struct { + _ struct{} `type:"structure"` + + // The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 + // type number. A value of -1 indicates all ICMP/ICMPv6 types. If you specify + // all ICMP/ICMPv6 types, you must specify all codes. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The IP protocol name (tcp, udp, icmp, icmpv6) or number (see Protocol Numbers + // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // + // [VPC only] Use -1 to specify all protocols. When authorizing security group + // rules, specifying -1 or a protocol number other than tcp, udp, icmp, or icmpv6 + // allows traffic on all ports, regardless of any port range you specify. For + // tcp, udp, and icmp, you must specify a port range. For icmpv6, the port range + // is optional; if you omit the port range, traffic for all types and codes + // is allowed. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The IPv4 ranges. + IpRanges []*IpRange `locationName:"ipRanges" locationNameList:"item" type:"list"` + + // [VPC only] The IPv6 ranges. + Ipv6Ranges []*Ipv6Range `locationName:"ipv6Ranges" locationNameList:"item" type:"list"` + + // [VPC only] The prefix list IDs. + PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` + + // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. + // A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 + // types, you must specify all codes. + ToPort *int64 `locationName:"toPort" type:"integer"` + + // The security group and AWS account ID pairs. + UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s IpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpPermission) GoString() string { + return s.String() +} + +// SetFromPort sets the FromPort field's value. +func (s *IpPermission) SetFromPort(v int64) *IpPermission { + s.FromPort = &v + return s +} + +// SetIpProtocol sets the IpProtocol field's value. +func (s *IpPermission) SetIpProtocol(v string) *IpPermission { + s.IpProtocol = &v + return s +} + +// SetIpRanges sets the IpRanges field's value. +func (s *IpPermission) SetIpRanges(v []*IpRange) *IpPermission { + s.IpRanges = v + return s +} + +// SetIpv6Ranges sets the Ipv6Ranges field's value. +func (s *IpPermission) SetIpv6Ranges(v []*Ipv6Range) *IpPermission { + s.Ipv6Ranges = v + return s +} + +// SetPrefixListIds sets the PrefixListIds field's value. +func (s *IpPermission) SetPrefixListIds(v []*PrefixListId) *IpPermission { + s.PrefixListIds = v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *IpPermission) SetToPort(v int64) *IpPermission { + s.ToPort = &v + return s +} + +// SetUserIdGroupPairs sets the UserIdGroupPairs field's value. +func (s *IpPermission) SetUserIdGroupPairs(v []*UserIdGroupPair) *IpPermission { + s.UserIdGroupPairs = v + return s +} + +// Describes an IPv4 range. +type IpRange struct { + _ struct{} `type:"structure"` + + // The IPv4 CIDR range. You can either specify a CIDR range or a source security + // group, not both. To specify a single IPv4 address, use the /32 prefix length. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // A description for the security group rule that references this IPv4 address + // range. + // + // Constraints: Up to 255 characters in length. Allowed characters are a-z, + // A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s IpRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpRange) GoString() string { + return s.String() +} + +// SetCidrIp sets the CidrIp field's value. +func (s *IpRange) SetCidrIp(v string) *IpRange { + s.CidrIp = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *IpRange) SetDescription(v string) *IpRange { + s.Description = &v + return s +} + +// Describes an IPv6 CIDR block association. +type Ipv6CidrAssociation struct { + _ struct{} `type:"structure"` + + // The resource that's associated with the IPv6 CIDR block. + AssociatedResource *string `locationName:"associatedResource" type:"string"` + + // The IPv6 CIDR block. + Ipv6Cidr *string `locationName:"ipv6Cidr" type:"string"` +} + +// String returns the string representation +func (s Ipv6CidrAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ipv6CidrAssociation) GoString() string { + return s.String() +} + +// SetAssociatedResource sets the AssociatedResource field's value. +func (s *Ipv6CidrAssociation) SetAssociatedResource(v string) *Ipv6CidrAssociation { + s.AssociatedResource = &v + return s +} + +// SetIpv6Cidr sets the Ipv6Cidr field's value. +func (s *Ipv6CidrAssociation) SetIpv6Cidr(v string) *Ipv6CidrAssociation { + s.Ipv6Cidr = &v + return s +} + +// Describes an IPv6 CIDR block. +type Ipv6CidrBlock struct { + _ struct{} `type:"structure"` + + // The IPv6 CIDR block. + Ipv6CidrBlock *string `locationName:"ipv6CidrBlock" type:"string"` +} + +// String returns the string representation +func (s Ipv6CidrBlock) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ipv6CidrBlock) GoString() string { + return s.String() +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *Ipv6CidrBlock) SetIpv6CidrBlock(v string) *Ipv6CidrBlock { + s.Ipv6CidrBlock = &v + return s +} + +// Describes an IPv6 address pool. +type Ipv6Pool struct { + _ struct{} `type:"structure"` + + // The description for the address pool. + Description *string `locationName:"description" type:"string"` + + // The CIDR blocks for the address pool. + PoolCidrBlocks []*PoolCidrBlock `locationName:"poolCidrBlockSet" locationNameList:"item" type:"list"` + + // The ID of the address pool. + PoolId *string `locationName:"poolId" type:"string"` + + // Any tags for the address pool. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s Ipv6Pool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ipv6Pool) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *Ipv6Pool) SetDescription(v string) *Ipv6Pool { + s.Description = &v + return s +} + +// SetPoolCidrBlocks sets the PoolCidrBlocks field's value. +func (s *Ipv6Pool) SetPoolCidrBlocks(v []*PoolCidrBlock) *Ipv6Pool { + s.PoolCidrBlocks = v + return s +} + +// SetPoolId sets the PoolId field's value. +func (s *Ipv6Pool) SetPoolId(v string) *Ipv6Pool { + s.PoolId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Ipv6Pool) SetTags(v []*Tag) *Ipv6Pool { + s.Tags = v + return s +} + +// [EC2-VPC only] Describes an IPv6 range. +type Ipv6Range struct { + _ struct{} `type:"structure"` + + // The IPv6 CIDR range. You can either specify a CIDR range or a source security + // group, not both. To specify a single IPv6 address, use the /128 prefix length. + CidrIpv6 *string `locationName:"cidrIpv6" type:"string"` + + // A description for the security group rule that references this IPv6 address + // range. + // + // Constraints: Up to 255 characters in length. Allowed characters are a-z, + // A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s Ipv6Range) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ipv6Range) GoString() string { + return s.String() +} + +// SetCidrIpv6 sets the CidrIpv6 field's value. +func (s *Ipv6Range) SetCidrIpv6(v string) *Ipv6Range { + s.CidrIpv6 = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Ipv6Range) SetDescription(v string) *Ipv6Range { + s.Description = &v + return s +} + +// Describes a key pair. +type KeyPairInfo struct { + _ struct{} `type:"structure"` + + // If you used CreateKeyPair to create the key pair, this is the SHA-1 digest + // of the DER encoded private key. If you used ImportKeyPair to provide AWS + // the public key, this is the MD5 public key fingerprint as specified in section + // 4 of RFC4716. + KeyFingerprint *string `locationName:"keyFingerprint" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // The ID of the key pair. + KeyPairId *string `locationName:"keyPairId" type:"string"` + + // Any tags applied to the key pair. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s KeyPairInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyPairInfo) GoString() string { + return s.String() +} + +// SetKeyFingerprint sets the KeyFingerprint field's value. +func (s *KeyPairInfo) SetKeyFingerprint(v string) *KeyPairInfo { + s.KeyFingerprint = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *KeyPairInfo) SetKeyName(v string) *KeyPairInfo { + s.KeyName = &v + return s +} + +// SetKeyPairId sets the KeyPairId field's value. +func (s *KeyPairInfo) SetKeyPairId(v string) *KeyPairInfo { + s.KeyPairId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *KeyPairInfo) SetTags(v []*Tag) *KeyPairInfo { + s.Tags = v + return s +} + +// The last error that occurred for a VPC endpoint. +type LastError struct { + _ struct{} `type:"structure"` + + // The error code for the VPC endpoint error. + Code *string `locationName:"code" type:"string"` + + // The error message for the VPC endpoint error. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LastError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LastError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *LastError) SetCode(v string) *LastError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *LastError) SetMessage(v string) *LastError { + s.Message = &v + return s +} + +// Describes a launch permission. +type LaunchPermission struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s LaunchPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermission) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LaunchPermission) SetGroup(v string) *LaunchPermission { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LaunchPermission) SetUserId(v string) *LaunchPermission { + s.UserId = &v + return s +} + +// Describes a launch permission modification. +type LaunchPermissionModifications struct { + _ struct{} `type:"structure"` + + // The AWS account ID to add to the list of launch permissions for the AMI. + Add []*LaunchPermission `locationNameList:"item" type:"list"` + + // The AWS account ID to remove from the list of launch permissions for the + // AMI. + Remove []*LaunchPermission `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LaunchPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPermissionModifications) GoString() string { + return s.String() +} + +// SetAdd sets the Add field's value. +func (s *LaunchPermissionModifications) SetAdd(v []*LaunchPermission) *LaunchPermissionModifications { + s.Add = v + return s +} + +// SetRemove sets the Remove field's value. +func (s *LaunchPermissionModifications) SetRemove(v []*LaunchPermission) *LaunchPermissionModifications { + s.Remove = v + return s +} + +// Describes the launch specification for an instance. +type LaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Describes the monitoring of an instance. + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. If you specify a network interface, you must + // specify subnet IDs and security group IDs using the network interface. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The ID of the subnet in which to launch the instance. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded user data for the instance. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s LaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchSpecification) GoString() string { + return s.String() +} + +// SetAddressingType sets the AddressingType field's value. +func (s *LaunchSpecification) SetAddressingType(v string) *LaunchSpecification { + s.AddressingType = &v + return s +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *LaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *LaunchSpecification { + s.BlockDeviceMappings = v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *LaunchSpecification) SetEbsOptimized(v bool) *LaunchSpecification { + s.EbsOptimized = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *LaunchSpecification) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *LaunchSpecification { + s.IamInstanceProfile = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *LaunchSpecification) SetImageId(v string) *LaunchSpecification { + s.ImageId = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *LaunchSpecification) SetInstanceType(v string) *LaunchSpecification { + s.InstanceType = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *LaunchSpecification) SetKernelId(v string) *LaunchSpecification { + s.KernelId = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *LaunchSpecification) SetKeyName(v string) *LaunchSpecification { + s.KeyName = &v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *LaunchSpecification) SetMonitoring(v *RunInstancesMonitoringEnabled) *LaunchSpecification { + s.Monitoring = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *LaunchSpecification) SetNetworkInterfaces(v []*InstanceNetworkInterfaceSpecification) *LaunchSpecification { + s.NetworkInterfaces = v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *LaunchSpecification) SetPlacement(v *SpotPlacement) *LaunchSpecification { + s.Placement = v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *LaunchSpecification) SetRamdiskId(v string) *LaunchSpecification { + s.RamdiskId = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *LaunchSpecification) SetSecurityGroups(v []*GroupIdentifier) *LaunchSpecification { + s.SecurityGroups = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *LaunchSpecification) SetSubnetId(v string) *LaunchSpecification { + s.SubnetId = &v + return s +} + +// SetUserData sets the UserData field's value. +func (s *LaunchSpecification) SetUserData(v string) *LaunchSpecification { + s.UserData = &v + return s +} + +// Describes a launch template. +type LaunchTemplate struct { + _ struct{} `type:"structure"` + + // The time launch template was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // The principal that created the launch template. + CreatedBy *string `locationName:"createdBy" type:"string"` + + // The version number of the default version of the launch template. + DefaultVersionNumber *int64 `locationName:"defaultVersionNumber" type:"long"` + + // The version number of the latest version of the launch template. + LatestVersionNumber *int64 `locationName:"latestVersionNumber" type:"long"` + + // The ID of the launch template. + LaunchTemplateId *string `locationName:"launchTemplateId" type:"string"` + + // The name of the launch template. + LaunchTemplateName *string `locationName:"launchTemplateName" min:"3" type:"string"` + + // The tags for the launch template. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LaunchTemplate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplate) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *LaunchTemplate) SetCreateTime(v time.Time) *LaunchTemplate { + s.CreateTime = &v + return s +} + +// SetCreatedBy sets the CreatedBy field's value. +func (s *LaunchTemplate) SetCreatedBy(v string) *LaunchTemplate { + s.CreatedBy = &v + return s +} + +// SetDefaultVersionNumber sets the DefaultVersionNumber field's value. +func (s *LaunchTemplate) SetDefaultVersionNumber(v int64) *LaunchTemplate { + s.DefaultVersionNumber = &v + return s +} + +// SetLatestVersionNumber sets the LatestVersionNumber field's value. +func (s *LaunchTemplate) SetLatestVersionNumber(v int64) *LaunchTemplate { + s.LatestVersionNumber = &v + return s +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *LaunchTemplate) SetLaunchTemplateId(v string) *LaunchTemplate { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *LaunchTemplate) SetLaunchTemplateName(v string) *LaunchTemplate { + s.LaunchTemplateName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LaunchTemplate) SetTags(v []*Tag) *LaunchTemplate { + s.Tags = v + return s +} + +// Describes a launch template and overrides. +type LaunchTemplateAndOverridesResponse struct { + _ struct{} `type:"structure"` + + // The launch template. + LaunchTemplateSpecification *FleetLaunchTemplateSpecification `locationName:"launchTemplateSpecification" type:"structure"` + + // Any parameters that you specify override the same parameters in the launch + // template. + Overrides *FleetLaunchTemplateOverrides `locationName:"overrides" type:"structure"` +} + +// String returns the string representation +func (s LaunchTemplateAndOverridesResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateAndOverridesResponse) GoString() string { + return s.String() +} + +// SetLaunchTemplateSpecification sets the LaunchTemplateSpecification field's value. +func (s *LaunchTemplateAndOverridesResponse) SetLaunchTemplateSpecification(v *FleetLaunchTemplateSpecification) *LaunchTemplateAndOverridesResponse { + s.LaunchTemplateSpecification = v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *LaunchTemplateAndOverridesResponse) SetOverrides(v *FleetLaunchTemplateOverrides) *LaunchTemplateAndOverridesResponse { + s.Overrides = v + return s +} + +// Describes a block device mapping. +type LaunchTemplateBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name. + DeviceName *string `locationName:"deviceName" type:"string"` + + // Information about the block device for an EBS volume. + Ebs *LaunchTemplateEbsBlockDevice `locationName:"ebs" type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `locationName:"noDevice" type:"string"` + + // The virtual device name (ephemeralN). + VirtualName *string `locationName:"virtualName" type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateBlockDeviceMapping) GoString() string { + return s.String() +} + +// SetDeviceName sets the DeviceName field's value. +func (s *LaunchTemplateBlockDeviceMapping) SetDeviceName(v string) *LaunchTemplateBlockDeviceMapping { + s.DeviceName = &v + return s +} + +// SetEbs sets the Ebs field's value. +func (s *LaunchTemplateBlockDeviceMapping) SetEbs(v *LaunchTemplateEbsBlockDevice) *LaunchTemplateBlockDeviceMapping { + s.Ebs = v + return s +} + +// SetNoDevice sets the NoDevice field's value. +func (s *LaunchTemplateBlockDeviceMapping) SetNoDevice(v string) *LaunchTemplateBlockDeviceMapping { + s.NoDevice = &v + return s +} + +// SetVirtualName sets the VirtualName field's value. +func (s *LaunchTemplateBlockDeviceMapping) SetVirtualName(v string) *LaunchTemplateBlockDeviceMapping { + s.VirtualName = &v + return s +} + +// Describes a block device mapping. +type LaunchTemplateBlockDeviceMappingRequest struct { + _ struct{} `type:"structure"` + + // The device name (for example, /dev/sdh or xvdh). + DeviceName *string `type:"string"` + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *LaunchTemplateEbsBlockDeviceRequest `type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with 2 available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1. The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateBlockDeviceMappingRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateBlockDeviceMappingRequest) GoString() string { + return s.String() +} + +// SetDeviceName sets the DeviceName field's value. +func (s *LaunchTemplateBlockDeviceMappingRequest) SetDeviceName(v string) *LaunchTemplateBlockDeviceMappingRequest { + s.DeviceName = &v + return s +} + +// SetEbs sets the Ebs field's value. +func (s *LaunchTemplateBlockDeviceMappingRequest) SetEbs(v *LaunchTemplateEbsBlockDeviceRequest) *LaunchTemplateBlockDeviceMappingRequest { + s.Ebs = v + return s +} + +// SetNoDevice sets the NoDevice field's value. +func (s *LaunchTemplateBlockDeviceMappingRequest) SetNoDevice(v string) *LaunchTemplateBlockDeviceMappingRequest { + s.NoDevice = &v + return s +} + +// SetVirtualName sets the VirtualName field's value. +func (s *LaunchTemplateBlockDeviceMappingRequest) SetVirtualName(v string) *LaunchTemplateBlockDeviceMappingRequest { + s.VirtualName = &v + return s +} + +// Describes an instance's Capacity Reservation targeting option. You can specify +// only one option at a time. Use the CapacityReservationPreference parameter +// to configure the instance to run in On-Demand capacity or to run in any open +// Capacity Reservation that has matching attributes (instance type, platform, +// Availability Zone). Use the CapacityReservationTarget parameter to explicitly +// target a specific Capacity Reservation or a Capacity Reservation group. +type LaunchTemplateCapacityReservationSpecificationRequest struct { + _ struct{} `type:"structure"` + + // Indicates the instance's Capacity Reservation preferences. Possible preferences + // include: + // + // * open - The instance can run in any open Capacity Reservation that has + // matching attributes (instance type, platform, Availability Zone). + // + // * none - The instance avoids running in a Capacity Reservation even if + // one is available. The instance runs in On-Demand capacity. + CapacityReservationPreference *string `type:"string" enum:"CapacityReservationPreference"` + + // Information about the target Capacity Reservation or Capacity Reservation + // group. + CapacityReservationTarget *CapacityReservationTarget `type:"structure"` +} + +// String returns the string representation +func (s LaunchTemplateCapacityReservationSpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateCapacityReservationSpecificationRequest) GoString() string { + return s.String() +} + +// SetCapacityReservationPreference sets the CapacityReservationPreference field's value. +func (s *LaunchTemplateCapacityReservationSpecificationRequest) SetCapacityReservationPreference(v string) *LaunchTemplateCapacityReservationSpecificationRequest { + s.CapacityReservationPreference = &v + return s +} + +// SetCapacityReservationTarget sets the CapacityReservationTarget field's value. +func (s *LaunchTemplateCapacityReservationSpecificationRequest) SetCapacityReservationTarget(v *CapacityReservationTarget) *LaunchTemplateCapacityReservationSpecificationRequest { + s.CapacityReservationTarget = v + return s +} + +// Information about the Capacity Reservation targeting option. +type LaunchTemplateCapacityReservationSpecificationResponse struct { + _ struct{} `type:"structure"` + + // Indicates the instance's Capacity Reservation preferences. Possible preferences + // include: + // + // * open - The instance can run in any open Capacity Reservation that has + // matching attributes (instance type, platform, Availability Zone). + // + // * none - The instance avoids running in a Capacity Reservation even if + // one is available. The instance runs in On-Demand capacity. + CapacityReservationPreference *string `locationName:"capacityReservationPreference" type:"string" enum:"CapacityReservationPreference"` + + // Information about the target Capacity Reservation or Capacity Reservation + // group. + CapacityReservationTarget *CapacityReservationTargetResponse `locationName:"capacityReservationTarget" type:"structure"` +} + +// String returns the string representation +func (s LaunchTemplateCapacityReservationSpecificationResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateCapacityReservationSpecificationResponse) GoString() string { + return s.String() +} + +// SetCapacityReservationPreference sets the CapacityReservationPreference field's value. +func (s *LaunchTemplateCapacityReservationSpecificationResponse) SetCapacityReservationPreference(v string) *LaunchTemplateCapacityReservationSpecificationResponse { + s.CapacityReservationPreference = &v + return s +} + +// SetCapacityReservationTarget sets the CapacityReservationTarget field's value. +func (s *LaunchTemplateCapacityReservationSpecificationResponse) SetCapacityReservationTarget(v *CapacityReservationTargetResponse) *LaunchTemplateCapacityReservationSpecificationResponse { + s.CapacityReservationTarget = v + return s +} + +// Describes a launch template and overrides. +type LaunchTemplateConfig struct { + _ struct{} `type:"structure"` + + // The launch template. + LaunchTemplateSpecification *FleetLaunchTemplateSpecification `locationName:"launchTemplateSpecification" type:"structure"` + + // Any parameters that you specify override the same parameters in the launch + // template. + Overrides []*LaunchTemplateOverrides `locationName:"overrides" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LaunchTemplateConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LaunchTemplateConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LaunchTemplateConfig"} + if s.LaunchTemplateSpecification != nil { + if err := s.LaunchTemplateSpecification.Validate(); err != nil { + invalidParams.AddNested("LaunchTemplateSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLaunchTemplateSpecification sets the LaunchTemplateSpecification field's value. +func (s *LaunchTemplateConfig) SetLaunchTemplateSpecification(v *FleetLaunchTemplateSpecification) *LaunchTemplateConfig { + s.LaunchTemplateSpecification = v + return s +} + +// SetOverrides sets the Overrides field's value. +func (s *LaunchTemplateConfig) SetOverrides(v []*LaunchTemplateOverrides) *LaunchTemplateConfig { + s.Overrides = v + return s +} + +// The CPU options for the instance. +type LaunchTemplateCpuOptions struct { + _ struct{} `type:"structure"` + + // The number of CPU cores for the instance. + CoreCount *int64 `locationName:"coreCount" type:"integer"` + + // The number of threads per CPU core. + ThreadsPerCore *int64 `locationName:"threadsPerCore" type:"integer"` +} + +// String returns the string representation +func (s LaunchTemplateCpuOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateCpuOptions) GoString() string { + return s.String() +} + +// SetCoreCount sets the CoreCount field's value. +func (s *LaunchTemplateCpuOptions) SetCoreCount(v int64) *LaunchTemplateCpuOptions { + s.CoreCount = &v + return s +} + +// SetThreadsPerCore sets the ThreadsPerCore field's value. +func (s *LaunchTemplateCpuOptions) SetThreadsPerCore(v int64) *LaunchTemplateCpuOptions { + s.ThreadsPerCore = &v + return s +} + +// The CPU options for the instance. Both the core count and threads per core +// must be specified in the request. +type LaunchTemplateCpuOptionsRequest struct { + _ struct{} `type:"structure"` + + // The number of CPU cores for the instance. + CoreCount *int64 `type:"integer"` + + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. + ThreadsPerCore *int64 `type:"integer"` +} + +// String returns the string representation +func (s LaunchTemplateCpuOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateCpuOptionsRequest) GoString() string { + return s.String() +} + +// SetCoreCount sets the CoreCount field's value. +func (s *LaunchTemplateCpuOptionsRequest) SetCoreCount(v int64) *LaunchTemplateCpuOptionsRequest { + s.CoreCount = &v + return s +} + +// SetThreadsPerCore sets the ThreadsPerCore field's value. +func (s *LaunchTemplateCpuOptionsRequest) SetThreadsPerCore(v int64) *LaunchTemplateCpuOptionsRequest { + s.ThreadsPerCore = &v + return s +} + +// Describes a block device for an EBS volume. +type LaunchTemplateEbsBlockDevice struct { + _ struct{} `type:"structure"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // Indicates whether the EBS volume is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + Iops *int64 `locationName:"iops" type:"integer"` + + // The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The throughput that the volume supports, in MiB/s. + Throughput *int64 `locationName:"throughput" type:"integer"` + + // The size of the volume, in GiB. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` + + // The volume type. + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s LaunchTemplateEbsBlockDevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateEbsBlockDevice) GoString() string { + return s.String() +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *LaunchTemplateEbsBlockDevice) SetDeleteOnTermination(v bool) *LaunchTemplateEbsBlockDevice { + s.DeleteOnTermination = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *LaunchTemplateEbsBlockDevice) SetEncrypted(v bool) *LaunchTemplateEbsBlockDevice { + s.Encrypted = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *LaunchTemplateEbsBlockDevice) SetIops(v int64) *LaunchTemplateEbsBlockDevice { + s.Iops = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *LaunchTemplateEbsBlockDevice) SetKmsKeyId(v string) *LaunchTemplateEbsBlockDevice { + s.KmsKeyId = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *LaunchTemplateEbsBlockDevice) SetSnapshotId(v string) *LaunchTemplateEbsBlockDevice { + s.SnapshotId = &v + return s +} + +// SetThroughput sets the Throughput field's value. +func (s *LaunchTemplateEbsBlockDevice) SetThroughput(v int64) *LaunchTemplateEbsBlockDevice { + s.Throughput = &v + return s +} + +// SetVolumeSize sets the VolumeSize field's value. +func (s *LaunchTemplateEbsBlockDevice) SetVolumeSize(v int64) *LaunchTemplateEbsBlockDevice { + s.VolumeSize = &v + return s +} + +// SetVolumeType sets the VolumeType field's value. +func (s *LaunchTemplateEbsBlockDevice) SetVolumeType(v string) *LaunchTemplateEbsBlockDevice { + s.VolumeType = &v + return s +} + +// The parameters for a block device for an EBS volume. +type LaunchTemplateEbsBlockDeviceRequest struct { + _ struct{} `type:"structure"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the EBS volume is encrypted. Encrypted volumes can only + // be attached to instances that support Amazon EBS encryption. If you are creating + // a volume from a snapshot, you can't specify an encryption value. + Encrypted *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, + // this represents the number of IOPS that are provisioned for the volume. For + // gp2 volumes, this represents the baseline performance of the volume and the + // rate at which the volume accumulates I/O credits for bursting. + // + // The following are the supported values for each volume type: + // + // * gp3: 3,000-16,000 IOPS + // + // * io1: 100-64,000 IOPS + // + // * io2: 100-64,000 IOPS + // + // For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built + // on the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Other instance families guarantee performance up to 32,000 IOPS. + // + // This parameter is required for io1 and io2 volumes. The default for gp3 volumes + // is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard + // volumes. + Iops *int64 `type:"integer"` + + // The ARN of the symmetric AWS Key Management Service (AWS KMS) CMK used for + // encryption. + KmsKeyId *string `type:"string"` + + // The ID of the snapshot. + SnapshotId *string `type:"string"` + + // The throughput to provision for a gp3 volume, with a maximum of 1,000 MiB/s. + // + // Valid Range: Minimum value of 125. Maximum value of 1000. + Throughput *int64 `type:"integer"` + + // The size of the volume, in GiBs. You must specify either a snapshot ID or + // a volume size. If you specify a snapshot, the default is the snapshot size. + // You can specify a volume size that is equal to or larger than the snapshot + // size. + // + // The following are the supported volumes sizes for each volume type: + // + // * gp2 and gp3: 1-16,384 + // + // * io1 and io2: 4-16,384 + // + // * st1 and sc1: 125-16,384 + // + // * standard: 1-1,024 + VolumeSize *int64 `type:"integer"` + + // The volume type. The default is gp2. For more information, see Amazon EBS + // volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s LaunchTemplateEbsBlockDeviceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateEbsBlockDeviceRequest) GoString() string { + return s.String() +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *LaunchTemplateEbsBlockDeviceRequest) SetDeleteOnTermination(v bool) *LaunchTemplateEbsBlockDeviceRequest { + s.DeleteOnTermination = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *LaunchTemplateEbsBlockDeviceRequest) SetEncrypted(v bool) *LaunchTemplateEbsBlockDeviceRequest { + s.Encrypted = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *LaunchTemplateEbsBlockDeviceRequest) SetIops(v int64) *LaunchTemplateEbsBlockDeviceRequest { + s.Iops = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *LaunchTemplateEbsBlockDeviceRequest) SetKmsKeyId(v string) *LaunchTemplateEbsBlockDeviceRequest { + s.KmsKeyId = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *LaunchTemplateEbsBlockDeviceRequest) SetSnapshotId(v string) *LaunchTemplateEbsBlockDeviceRequest { + s.SnapshotId = &v + return s +} + +// SetThroughput sets the Throughput field's value. +func (s *LaunchTemplateEbsBlockDeviceRequest) SetThroughput(v int64) *LaunchTemplateEbsBlockDeviceRequest { + s.Throughput = &v + return s +} + +// SetVolumeSize sets the VolumeSize field's value. +func (s *LaunchTemplateEbsBlockDeviceRequest) SetVolumeSize(v int64) *LaunchTemplateEbsBlockDeviceRequest { + s.VolumeSize = &v + return s +} + +// SetVolumeType sets the VolumeType field's value. +func (s *LaunchTemplateEbsBlockDeviceRequest) SetVolumeType(v string) *LaunchTemplateEbsBlockDeviceRequest { + s.VolumeType = &v + return s +} + +// Describes an elastic inference accelerator. +type LaunchTemplateElasticInferenceAccelerator struct { + _ struct{} `type:"structure"` + + // The number of elastic inference accelerators to attach to the instance. + // + // Default: 1 + Count *int64 `min:"1" type:"integer"` + + // The type of elastic inference accelerator. The possible values are eia1.medium, + // eia1.large, and eia1.xlarge. + // + // Type is a required field + Type *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LaunchTemplateElasticInferenceAccelerator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateElasticInferenceAccelerator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LaunchTemplateElasticInferenceAccelerator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LaunchTemplateElasticInferenceAccelerator"} + if s.Count != nil && *s.Count < 1 { + invalidParams.Add(request.NewErrParamMinValue("Count", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCount sets the Count field's value. +func (s *LaunchTemplateElasticInferenceAccelerator) SetCount(v int64) *LaunchTemplateElasticInferenceAccelerator { + s.Count = &v + return s +} + +// SetType sets the Type field's value. +func (s *LaunchTemplateElasticInferenceAccelerator) SetType(v string) *LaunchTemplateElasticInferenceAccelerator { + s.Type = &v + return s +} + +// Describes an elastic inference accelerator. +type LaunchTemplateElasticInferenceAcceleratorResponse struct { + _ struct{} `type:"structure"` + + // The number of elastic inference accelerators to attach to the instance. + // + // Default: 1 + Count *int64 `locationName:"count" type:"integer"` + + // The type of elastic inference accelerator. The possible values are eia1.medium, + // eia1.large, and eia1.xlarge. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateElasticInferenceAcceleratorResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateElasticInferenceAcceleratorResponse) GoString() string { + return s.String() +} + +// SetCount sets the Count field's value. +func (s *LaunchTemplateElasticInferenceAcceleratorResponse) SetCount(v int64) *LaunchTemplateElasticInferenceAcceleratorResponse { + s.Count = &v + return s +} + +// SetType sets the Type field's value. +func (s *LaunchTemplateElasticInferenceAcceleratorResponse) SetType(v string) *LaunchTemplateElasticInferenceAcceleratorResponse { + s.Type = &v + return s +} + +// Indicates whether the instance is enabled for AWS Nitro Enclaves. +type LaunchTemplateEnclaveOptions struct { + _ struct{} `type:"structure"` + + // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; + // otherwise, it is not enabled for AWS Nitro Enclaves. + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplateEnclaveOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateEnclaveOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LaunchTemplateEnclaveOptions) SetEnabled(v bool) *LaunchTemplateEnclaveOptions { + s.Enabled = &v + return s +} + +// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more +// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) +// in the AWS Nitro Enclaves User Guide. +type LaunchTemplateEnclaveOptionsRequest struct { + _ struct{} `type:"structure"` + + // To enable the instance for AWS Nitro Enclaves, set this parameter to true. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplateEnclaveOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateEnclaveOptionsRequest) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LaunchTemplateEnclaveOptionsRequest) SetEnabled(v bool) *LaunchTemplateEnclaveOptionsRequest { + s.Enabled = &v + return s +} + +// Indicates whether an instance is configured for hibernation. +type LaunchTemplateHibernationOptions struct { + _ struct{} `type:"structure"` + + // If this parameter is set to true, the instance is enabled for hibernation; + // otherwise, it is not enabled for hibernation. + Configured *bool `locationName:"configured" type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplateHibernationOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateHibernationOptions) GoString() string { + return s.String() +} + +// SetConfigured sets the Configured field's value. +func (s *LaunchTemplateHibernationOptions) SetConfigured(v bool) *LaunchTemplateHibernationOptions { + s.Configured = &v + return s +} + +// Indicates whether the instance is configured for hibernation. This parameter +// is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). +type LaunchTemplateHibernationOptionsRequest struct { + _ struct{} `type:"structure"` + + // If you set this parameter to true, the instance is enabled for hibernation. + // + // Default: false + Configured *bool `type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplateHibernationOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateHibernationOptionsRequest) GoString() string { + return s.String() +} + +// SetConfigured sets the Configured field's value. +func (s *LaunchTemplateHibernationOptionsRequest) SetConfigured(v bool) *LaunchTemplateHibernationOptionsRequest { + s.Configured = &v + return s +} + +// Describes an IAM instance profile. +type LaunchTemplateIamInstanceProfileSpecification struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `locationName:"arn" type:"string"` + + // The name of the instance profile. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateIamInstanceProfileSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateIamInstanceProfileSpecification) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *LaunchTemplateIamInstanceProfileSpecification) SetArn(v string) *LaunchTemplateIamInstanceProfileSpecification { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *LaunchTemplateIamInstanceProfileSpecification) SetName(v string) *LaunchTemplateIamInstanceProfileSpecification { + s.Name = &v + return s +} + +// An IAM instance profile. +type LaunchTemplateIamInstanceProfileSpecificationRequest struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the instance profile. + Arn *string `type:"string"` + + // The name of the instance profile. + Name *string `type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateIamInstanceProfileSpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateIamInstanceProfileSpecificationRequest) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *LaunchTemplateIamInstanceProfileSpecificationRequest) SetArn(v string) *LaunchTemplateIamInstanceProfileSpecificationRequest { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *LaunchTemplateIamInstanceProfileSpecificationRequest) SetName(v string) *LaunchTemplateIamInstanceProfileSpecificationRequest { + s.Name = &v + return s +} + +// The market (purchasing) option for the instances. +type LaunchTemplateInstanceMarketOptions struct { + _ struct{} `type:"structure"` + + // The market type. + MarketType *string `locationName:"marketType" type:"string" enum:"MarketType"` + + // The options for Spot Instances. + SpotOptions *LaunchTemplateSpotMarketOptions `locationName:"spotOptions" type:"structure"` +} + +// String returns the string representation +func (s LaunchTemplateInstanceMarketOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateInstanceMarketOptions) GoString() string { + return s.String() +} + +// SetMarketType sets the MarketType field's value. +func (s *LaunchTemplateInstanceMarketOptions) SetMarketType(v string) *LaunchTemplateInstanceMarketOptions { + s.MarketType = &v + return s +} + +// SetSpotOptions sets the SpotOptions field's value. +func (s *LaunchTemplateInstanceMarketOptions) SetSpotOptions(v *LaunchTemplateSpotMarketOptions) *LaunchTemplateInstanceMarketOptions { + s.SpotOptions = v + return s +} + +// The market (purchasing) option for the instances. +type LaunchTemplateInstanceMarketOptionsRequest struct { + _ struct{} `type:"structure"` + + // The market type. + MarketType *string `type:"string" enum:"MarketType"` + + // The options for Spot Instances. + SpotOptions *LaunchTemplateSpotMarketOptionsRequest `type:"structure"` +} + +// String returns the string representation +func (s LaunchTemplateInstanceMarketOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateInstanceMarketOptionsRequest) GoString() string { + return s.String() +} + +// SetMarketType sets the MarketType field's value. +func (s *LaunchTemplateInstanceMarketOptionsRequest) SetMarketType(v string) *LaunchTemplateInstanceMarketOptionsRequest { + s.MarketType = &v + return s +} + +// SetSpotOptions sets the SpotOptions field's value. +func (s *LaunchTemplateInstanceMarketOptionsRequest) SetSpotOptions(v *LaunchTemplateSpotMarketOptionsRequest) *LaunchTemplateInstanceMarketOptionsRequest { + s.SpotOptions = v + return s +} + +// The metadata options for the instance. For more information, see Instance +// Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) +// in the Amazon Elastic Compute Cloud User Guide. +type LaunchTemplateInstanceMetadataOptions struct { + _ struct{} `type:"structure"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the default state is enabled. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint *string `locationName:"httpEndpoint" type:"string" enum:"LaunchTemplateInstanceMetadataEndpointState"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `locationName:"httpPutResponseHopLimit" type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credentials + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens *string `locationName:"httpTokens" type:"string" enum:"LaunchTemplateHttpTokensState"` + + // The state of the metadata option changes. + // + // pending - The metadata options are being updated and the instance is not + // ready to process metadata traffic with the new selection. + // + // applied - The metadata options have been successfully applied on the instance. + State *string `locationName:"state" type:"string" enum:"LaunchTemplateInstanceMetadataOptionsState"` +} + +// String returns the string representation +func (s LaunchTemplateInstanceMetadataOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateInstanceMetadataOptions) GoString() string { + return s.String() +} + +// SetHttpEndpoint sets the HttpEndpoint field's value. +func (s *LaunchTemplateInstanceMetadataOptions) SetHttpEndpoint(v string) *LaunchTemplateInstanceMetadataOptions { + s.HttpEndpoint = &v + return s +} + +// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value. +func (s *LaunchTemplateInstanceMetadataOptions) SetHttpPutResponseHopLimit(v int64) *LaunchTemplateInstanceMetadataOptions { + s.HttpPutResponseHopLimit = &v + return s +} + +// SetHttpTokens sets the HttpTokens field's value. +func (s *LaunchTemplateInstanceMetadataOptions) SetHttpTokens(v string) *LaunchTemplateInstanceMetadataOptions { + s.HttpTokens = &v + return s +} + +// SetState sets the State field's value. +func (s *LaunchTemplateInstanceMetadataOptions) SetState(v string) *LaunchTemplateInstanceMetadataOptions { + s.State = &v + return s +} + +// The metadata options for the instance. For more information, see Instance +// Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) +// in the Amazon Elastic Compute Cloud User Guide. +type LaunchTemplateInstanceMetadataOptionsRequest struct { + _ struct{} `type:"structure"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the default state is enabled. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint *string `type:"string" enum:"LaunchTemplateInstanceMetadataEndpointState"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credentials + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens *string `type:"string" enum:"LaunchTemplateHttpTokensState"` +} + +// String returns the string representation +func (s LaunchTemplateInstanceMetadataOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateInstanceMetadataOptionsRequest) GoString() string { + return s.String() +} + +// SetHttpEndpoint sets the HttpEndpoint field's value. +func (s *LaunchTemplateInstanceMetadataOptionsRequest) SetHttpEndpoint(v string) *LaunchTemplateInstanceMetadataOptionsRequest { + s.HttpEndpoint = &v + return s +} + +// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value. +func (s *LaunchTemplateInstanceMetadataOptionsRequest) SetHttpPutResponseHopLimit(v int64) *LaunchTemplateInstanceMetadataOptionsRequest { + s.HttpPutResponseHopLimit = &v + return s +} + +// SetHttpTokens sets the HttpTokens field's value. +func (s *LaunchTemplateInstanceMetadataOptionsRequest) SetHttpTokens(v string) *LaunchTemplateInstanceMetadataOptionsRequest { + s.HttpTokens = &v + return s +} + +// Describes a network interface. +type LaunchTemplateInstanceNetworkInterfaceSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether to associate a Carrier IP address with eth0 for a new network + // interface. + // + // Use this option when you launch an instance in a Wavelength Zone and want + // to associate a Carrier IP address with the network interface. For more information + // about Carrier IP addresses, see Carrier IP addresses (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip) + // in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `locationName:"associateCarrierIpAddress" type:"boolean"` + + // Indicates whether to associate a public IPv4 address with eth0 for a new + // network interface. + AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // A description for the network interface. + Description *string `locationName:"description" type:"string"` + + // The device index for the network interface attachment. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"groupSet" locationNameList:"groupId" type:"list"` + + // The type of network interface. + InterfaceType *string `locationName:"interfaceType" type:"string"` + + // The number of IPv6 addresses for the network interface. + Ipv6AddressCount *int64 `locationName:"ipv6AddressCount" type:"integer"` + + // The IPv6 addresses for the network interface. + Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" locationNameList:"item" type:"list"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The primary private IPv4 address of the network interface. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // One or more private IPv4 addresses. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // The number of secondary private IPv4 addresses for the network interface. + SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` + + // The ID of the subnet for the network interface. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateInstanceNetworkInterfaceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateInstanceNetworkInterfaceSpecification) GoString() string { + return s.String() +} + +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetAssociateCarrierIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.AssociateCarrierIpAddress = &v + return s +} + +// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.AssociatePublicIpAddress = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetDeleteOnTermination(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.DeleteOnTermination = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetDescription(v string) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.Description = &v + return s +} + +// SetDeviceIndex sets the DeviceIndex field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetDeviceIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.DeviceIndex = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetGroups(v []*string) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.Groups = v + return s +} + +// SetInterfaceType sets the InterfaceType field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetInterfaceType(v string) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.InterfaceType = &v + return s +} + +// SetIpv6AddressCount sets the Ipv6AddressCount field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv6AddressCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.Ipv6AddressCount = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv6Addresses(v []*InstanceIpv6Address) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.Ipv6Addresses = v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetNetworkCardIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.NetworkCardIndex = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetNetworkInterfaceId(v string) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.NetworkInterfaceId = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetPrivateIpAddress(v string) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.PrivateIpAddress = &v + return s +} + +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetPrivateIpAddresses(v []*PrivateIpAddressSpecification) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.PrivateIpAddresses = v + return s +} + +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetSecondaryPrivateIpAddressCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.SecondaryPrivateIpAddressCount = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetSubnetId(v string) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.SubnetId = &v + return s +} + +// The parameters for a network interface. +type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { + _ struct{} `type:"structure"` + + // Associates a Carrier IP address with eth0 for a new network interface. + // + // Use this option when you launch an instance in a Wavelength Zone and want + // to associate a Carrier IP address with the network interface. For more information + // about Carrier IP addresses, see Carrier IP addresses (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip) + // in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `type:"boolean"` + + // Associates a public IPv4 address with eth0 for a new network interface. + AssociatePublicIpAddress *bool `type:"boolean"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `type:"boolean"` + + // A description for the network interface. + Description *string `type:"string"` + + // The device index for the network interface attachment. + DeviceIndex *int64 `type:"integer"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The type of network interface. To create an Elastic Fabric Adapter (EFA), + // specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // If you are not creating an EFA, specify interface or omit this parameter. + // + // Valid values: interface | efa + InterfaceType *string `type:"string"` + + // The number of IPv6 addresses to assign to a network interface. Amazon EC2 + // automatically selects the IPv6 addresses from the subnet range. You can't + // use this option if specifying specific IPv6 addresses. + Ipv6AddressCount *int64 `type:"integer"` + + // One or more specific IPv6 addresses from the IPv6 CIDR block range of your + // subnet. You can't use this option if you're specifying a number of IPv6 addresses. + Ipv6Addresses []*InstanceIpv6AddressRequest `locationNameList:"InstanceIpv6Address" type:"list"` + + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + + // The ID of the network interface. + NetworkInterfaceId *string `type:"string"` + + // The primary private IPv4 address of the network interface. + PrivateIpAddress *string `type:"string"` + + // One or more private IPv4 addresses. + PrivateIpAddresses []*PrivateIpAddressSpecification `locationNameList:"item" type:"list"` + + // The number of secondary private IPv4 addresses to assign to a network interface. + SecondaryPrivateIpAddressCount *int64 `type:"integer"` + + // The ID of the subnet for the network interface. + SubnetId *string `type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) GoString() string { + return s.String() +} + +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetAssociateCarrierIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.AssociateCarrierIpAddress = &v + return s +} + +// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetAssociatePublicIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.AssociatePublicIpAddress = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetDeleteOnTermination(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.DeleteOnTermination = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetDescription(v string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.Description = &v + return s +} + +// SetDeviceIndex sets the DeviceIndex field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetDeviceIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.DeviceIndex = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetGroups(v []*string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.Groups = v + return s +} + +// SetInterfaceType sets the InterfaceType field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetInterfaceType(v string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.InterfaceType = &v + return s +} + +// SetIpv6AddressCount sets the Ipv6AddressCount field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv6AddressCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.Ipv6AddressCount = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv6Addresses(v []*InstanceIpv6AddressRequest) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.Ipv6Addresses = v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetNetworkCardIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.NetworkCardIndex = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetNetworkInterfaceId(v string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.NetworkInterfaceId = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetPrivateIpAddress(v string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.PrivateIpAddress = &v + return s +} + +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetPrivateIpAddresses(v []*PrivateIpAddressSpecification) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.PrivateIpAddresses = v + return s +} + +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetSecondaryPrivateIpAddressCount(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.SecondaryPrivateIpAddressCount = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetSubnetId(v string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.SubnetId = &v + return s +} + +// Describes a license configuration. +type LaunchTemplateLicenseConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the license configuration. + LicenseConfigurationArn *string `locationName:"licenseConfigurationArn" type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateLicenseConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateLicenseConfiguration) GoString() string { + return s.String() +} + +// SetLicenseConfigurationArn sets the LicenseConfigurationArn field's value. +func (s *LaunchTemplateLicenseConfiguration) SetLicenseConfigurationArn(v string) *LaunchTemplateLicenseConfiguration { + s.LicenseConfigurationArn = &v + return s +} + +// Describes a license configuration. +type LaunchTemplateLicenseConfigurationRequest struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the license configuration. + LicenseConfigurationArn *string `type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateLicenseConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateLicenseConfigurationRequest) GoString() string { + return s.String() +} + +// SetLicenseConfigurationArn sets the LicenseConfigurationArn field's value. +func (s *LaunchTemplateLicenseConfigurationRequest) SetLicenseConfigurationArn(v string) *LaunchTemplateLicenseConfigurationRequest { + s.LicenseConfigurationArn = &v + return s +} + +// Describes overrides for a launch template. +type LaunchTemplateOverrides struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which to launch the instances. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The priority for the launch template override. If OnDemandAllocationStrategy + // is set to prioritized, Spot Fleet uses priority to determine which launch + // template override to use first in fulfilling On-Demand capacity. The highest + // priority is launched first. Valid values are whole numbers starting at 0. + // The lower the number, the higher the priority. If no number is set, the launch + // template override has the lowest priority. + Priority *float64 `locationName:"priority" type:"double"` + + // The maximum price per unit hour that you are willing to pay for a Spot Instance. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The number of units provided by the specified instance type. + WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` +} + +// String returns the string representation +func (s LaunchTemplateOverrides) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateOverrides) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *LaunchTemplateOverrides) SetAvailabilityZone(v string) *LaunchTemplateOverrides { + s.AvailabilityZone = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *LaunchTemplateOverrides) SetInstanceType(v string) *LaunchTemplateOverrides { + s.InstanceType = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *LaunchTemplateOverrides) SetPriority(v float64) *LaunchTemplateOverrides { + s.Priority = &v + return s +} + +// SetSpotPrice sets the SpotPrice field's value. +func (s *LaunchTemplateOverrides) SetSpotPrice(v string) *LaunchTemplateOverrides { + s.SpotPrice = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *LaunchTemplateOverrides) SetSubnetId(v string) *LaunchTemplateOverrides { + s.SubnetId = &v + return s +} + +// SetWeightedCapacity sets the WeightedCapacity field's value. +func (s *LaunchTemplateOverrides) SetWeightedCapacity(v float64) *LaunchTemplateOverrides { + s.WeightedCapacity = &v + return s +} + +// Describes the placement of an instance. +type LaunchTemplatePlacement struct { + _ struct{} `type:"structure"` + + // The affinity setting for the instance on the Dedicated Host. + Affinity *string `locationName:"affinity" type:"string"` + + // The Availability Zone of the instance. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group for the instance. + GroupName *string `locationName:"groupName" type:"string"` + + // The ID of the Dedicated Host for the instance. + HostId *string `locationName:"hostId" type:"string"` + + // The ARN of the host resource group in which to launch the instances. + HostResourceGroupArn *string `locationName:"hostResourceGroupArn" type:"string"` + + // The number of the partition the instance should launch in. Valid only if + // the placement group strategy is set to partition. + PartitionNumber *int64 `locationName:"partitionNumber" type:"integer"` + + // Reserved for future use. + SpreadDomain *string `locationName:"spreadDomain" type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. + Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s LaunchTemplatePlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplatePlacement) GoString() string { + return s.String() +} + +// SetAffinity sets the Affinity field's value. +func (s *LaunchTemplatePlacement) SetAffinity(v string) *LaunchTemplatePlacement { + s.Affinity = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *LaunchTemplatePlacement) SetAvailabilityZone(v string) *LaunchTemplatePlacement { + s.AvailabilityZone = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *LaunchTemplatePlacement) SetGroupName(v string) *LaunchTemplatePlacement { + s.GroupName = &v + return s +} + +// SetHostId sets the HostId field's value. +func (s *LaunchTemplatePlacement) SetHostId(v string) *LaunchTemplatePlacement { + s.HostId = &v + return s +} + +// SetHostResourceGroupArn sets the HostResourceGroupArn field's value. +func (s *LaunchTemplatePlacement) SetHostResourceGroupArn(v string) *LaunchTemplatePlacement { + s.HostResourceGroupArn = &v + return s +} + +// SetPartitionNumber sets the PartitionNumber field's value. +func (s *LaunchTemplatePlacement) SetPartitionNumber(v int64) *LaunchTemplatePlacement { + s.PartitionNumber = &v + return s +} + +// SetSpreadDomain sets the SpreadDomain field's value. +func (s *LaunchTemplatePlacement) SetSpreadDomain(v string) *LaunchTemplatePlacement { + s.SpreadDomain = &v + return s +} + +// SetTenancy sets the Tenancy field's value. +func (s *LaunchTemplatePlacement) SetTenancy(v string) *LaunchTemplatePlacement { + s.Tenancy = &v + return s +} + +// Describes the placement of an instance. +type LaunchTemplatePlacementRequest struct { + _ struct{} `type:"structure"` + + // The affinity setting for an instance on a Dedicated Host. + Affinity *string `type:"string"` + + // The Availability Zone for the instance. + AvailabilityZone *string `type:"string"` + + // The name of the placement group for the instance. + GroupName *string `type:"string"` + + // The ID of the Dedicated Host for the instance. + HostId *string `type:"string"` + + // The ARN of the host resource group in which to launch the instances. If you + // specify a host resource group ARN, omit the Tenancy parameter or set it to + // host. + HostResourceGroupArn *string `type:"string"` + + // The number of the partition the instance should launch in. Valid only if + // the placement group strategy is set to partition. + PartitionNumber *int64 `type:"integer"` + + // Reserved for future use. + SpreadDomain *string `type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. + Tenancy *string `type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s LaunchTemplatePlacementRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplatePlacementRequest) GoString() string { + return s.String() +} + +// SetAffinity sets the Affinity field's value. +func (s *LaunchTemplatePlacementRequest) SetAffinity(v string) *LaunchTemplatePlacementRequest { + s.Affinity = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *LaunchTemplatePlacementRequest) SetAvailabilityZone(v string) *LaunchTemplatePlacementRequest { + s.AvailabilityZone = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *LaunchTemplatePlacementRequest) SetGroupName(v string) *LaunchTemplatePlacementRequest { + s.GroupName = &v + return s +} + +// SetHostId sets the HostId field's value. +func (s *LaunchTemplatePlacementRequest) SetHostId(v string) *LaunchTemplatePlacementRequest { + s.HostId = &v + return s +} + +// SetHostResourceGroupArn sets the HostResourceGroupArn field's value. +func (s *LaunchTemplatePlacementRequest) SetHostResourceGroupArn(v string) *LaunchTemplatePlacementRequest { + s.HostResourceGroupArn = &v + return s +} + +// SetPartitionNumber sets the PartitionNumber field's value. +func (s *LaunchTemplatePlacementRequest) SetPartitionNumber(v int64) *LaunchTemplatePlacementRequest { + s.PartitionNumber = &v + return s +} + +// SetSpreadDomain sets the SpreadDomain field's value. +func (s *LaunchTemplatePlacementRequest) SetSpreadDomain(v string) *LaunchTemplatePlacementRequest { + s.SpreadDomain = &v + return s +} + +// SetTenancy sets the Tenancy field's value. +func (s *LaunchTemplatePlacementRequest) SetTenancy(v string) *LaunchTemplatePlacementRequest { + s.Tenancy = &v + return s +} + +// The launch template to use. You must specify either the launch template ID +// or launch template name in the request, but not both. +type LaunchTemplateSpecification struct { + _ struct{} `type:"structure"` + + // The ID of the launch template. + LaunchTemplateId *string `type:"string"` + + // The name of the launch template. + LaunchTemplateName *string `type:"string"` + + // The version number of the launch template. + // + // Default: The default version for the launch template. + Version *string `type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateSpecification) GoString() string { + return s.String() +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *LaunchTemplateSpecification) SetLaunchTemplateId(v string) *LaunchTemplateSpecification { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *LaunchTemplateSpecification) SetLaunchTemplateName(v string) *LaunchTemplateSpecification { + s.LaunchTemplateName = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *LaunchTemplateSpecification) SetVersion(v string) *LaunchTemplateSpecification { + s.Version = &v + return s +} + +// The options for Spot Instances. +type LaunchTemplateSpotMarketOptions struct { + _ struct{} `type:"structure"` + + // The required duration for the Spot Instances (also known as Spot blocks), + // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, + // or 360). + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // The behavior when a Spot Instance is interrupted. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"InstanceInterruptionBehavior"` + + // The maximum hourly price you're willing to pay for the Spot Instances. + MaxPrice *string `locationName:"maxPrice" type:"string"` + + // The Spot Instance request type. + SpotInstanceType *string `locationName:"spotInstanceType" type:"string" enum:"SpotInstanceType"` + + // The end date of the request. For a one-time request, the request remains + // active until all instances launch, the request is canceled, or this date + // is reached. If the request is persistent, it remains active until it is canceled + // or this date and time is reached. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` +} + +// String returns the string representation +func (s LaunchTemplateSpotMarketOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateSpotMarketOptions) GoString() string { + return s.String() +} + +// SetBlockDurationMinutes sets the BlockDurationMinutes field's value. +func (s *LaunchTemplateSpotMarketOptions) SetBlockDurationMinutes(v int64) *LaunchTemplateSpotMarketOptions { + s.BlockDurationMinutes = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *LaunchTemplateSpotMarketOptions) SetInstanceInterruptionBehavior(v string) *LaunchTemplateSpotMarketOptions { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetMaxPrice sets the MaxPrice field's value. +func (s *LaunchTemplateSpotMarketOptions) SetMaxPrice(v string) *LaunchTemplateSpotMarketOptions { + s.MaxPrice = &v + return s +} + +// SetSpotInstanceType sets the SpotInstanceType field's value. +func (s *LaunchTemplateSpotMarketOptions) SetSpotInstanceType(v string) *LaunchTemplateSpotMarketOptions { + s.SpotInstanceType = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *LaunchTemplateSpotMarketOptions) SetValidUntil(v time.Time) *LaunchTemplateSpotMarketOptions { + s.ValidUntil = &v + return s +} + +// The options for Spot Instances. +type LaunchTemplateSpotMarketOptionsRequest struct { + _ struct{} `type:"structure"` + + // The required duration for the Spot Instances (also known as Spot blocks), + // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, + // or 360). + BlockDurationMinutes *int64 `type:"integer"` + + // The behavior when a Spot Instance is interrupted. The default is terminate. + InstanceInterruptionBehavior *string `type:"string" enum:"InstanceInterruptionBehavior"` + + // The maximum hourly price you're willing to pay for the Spot Instances. + MaxPrice *string `type:"string"` + + // The Spot Instance request type. + SpotInstanceType *string `type:"string" enum:"SpotInstanceType"` + + // The end date of the request. For a one-time request, the request remains + // active until all instances launch, the request is canceled, or this date + // is reached. If the request is persistent, it remains active until it is canceled + // or this date and time is reached. The default end date is 7 days from the + // current date. + ValidUntil *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s LaunchTemplateSpotMarketOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateSpotMarketOptionsRequest) GoString() string { + return s.String() +} + +// SetBlockDurationMinutes sets the BlockDurationMinutes field's value. +func (s *LaunchTemplateSpotMarketOptionsRequest) SetBlockDurationMinutes(v int64) *LaunchTemplateSpotMarketOptionsRequest { + s.BlockDurationMinutes = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *LaunchTemplateSpotMarketOptionsRequest) SetInstanceInterruptionBehavior(v string) *LaunchTemplateSpotMarketOptionsRequest { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetMaxPrice sets the MaxPrice field's value. +func (s *LaunchTemplateSpotMarketOptionsRequest) SetMaxPrice(v string) *LaunchTemplateSpotMarketOptionsRequest { + s.MaxPrice = &v + return s +} + +// SetSpotInstanceType sets the SpotInstanceType field's value. +func (s *LaunchTemplateSpotMarketOptionsRequest) SetSpotInstanceType(v string) *LaunchTemplateSpotMarketOptionsRequest { + s.SpotInstanceType = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *LaunchTemplateSpotMarketOptionsRequest) SetValidUntil(v time.Time) *LaunchTemplateSpotMarketOptionsRequest { + s.ValidUntil = &v + return s +} + +// The tag specification for the launch template. +type LaunchTemplateTagSpecification struct { + _ struct{} `type:"structure"` + + // The type of resource. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tags for the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LaunchTemplateTagSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateTagSpecification) GoString() string { + return s.String() +} + +// SetResourceType sets the ResourceType field's value. +func (s *LaunchTemplateTagSpecification) SetResourceType(v string) *LaunchTemplateTagSpecification { + s.ResourceType = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LaunchTemplateTagSpecification) SetTags(v []*Tag) *LaunchTemplateTagSpecification { + s.Tags = v + return s +} + +// The tags specification for the launch template. +type LaunchTemplateTagSpecificationRequest struct { + _ struct{} `type:"structure"` + + // The type of resource to tag. Currently, the resource types that support tagging + // on creation are instance and volume. To tag a resource after it has been + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). + ResourceType *string `type:"string" enum:"ResourceType"` + + // The tags to apply to the resource. + Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LaunchTemplateTagSpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateTagSpecificationRequest) GoString() string { + return s.String() +} + +// SetResourceType sets the ResourceType field's value. +func (s *LaunchTemplateTagSpecificationRequest) SetResourceType(v string) *LaunchTemplateTagSpecificationRequest { + s.ResourceType = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LaunchTemplateTagSpecificationRequest) SetTags(v []*Tag) *LaunchTemplateTagSpecificationRequest { + s.Tags = v + return s +} + +// Describes a launch template version. +type LaunchTemplateVersion struct { + _ struct{} `type:"structure"` + + // The time the version was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // The principal that created the version. + CreatedBy *string `locationName:"createdBy" type:"string"` + + // Indicates whether the version is the default version. + DefaultVersion *bool `locationName:"defaultVersion" type:"boolean"` + + // Information about the launch template. + LaunchTemplateData *ResponseLaunchTemplateData `locationName:"launchTemplateData" type:"structure"` + + // The ID of the launch template. + LaunchTemplateId *string `locationName:"launchTemplateId" type:"string"` + + // The name of the launch template. + LaunchTemplateName *string `locationName:"launchTemplateName" min:"3" type:"string"` + + // The description for the version. + VersionDescription *string `locationName:"versionDescription" type:"string"` + + // The version number. + VersionNumber *int64 `locationName:"versionNumber" type:"long"` +} + +// String returns the string representation +func (s LaunchTemplateVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateVersion) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *LaunchTemplateVersion) SetCreateTime(v time.Time) *LaunchTemplateVersion { + s.CreateTime = &v + return s +} + +// SetCreatedBy sets the CreatedBy field's value. +func (s *LaunchTemplateVersion) SetCreatedBy(v string) *LaunchTemplateVersion { + s.CreatedBy = &v + return s +} + +// SetDefaultVersion sets the DefaultVersion field's value. +func (s *LaunchTemplateVersion) SetDefaultVersion(v bool) *LaunchTemplateVersion { + s.DefaultVersion = &v + return s +} + +// SetLaunchTemplateData sets the LaunchTemplateData field's value. +func (s *LaunchTemplateVersion) SetLaunchTemplateData(v *ResponseLaunchTemplateData) *LaunchTemplateVersion { + s.LaunchTemplateData = v + return s +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *LaunchTemplateVersion) SetLaunchTemplateId(v string) *LaunchTemplateVersion { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *LaunchTemplateVersion) SetLaunchTemplateName(v string) *LaunchTemplateVersion { + s.LaunchTemplateName = &v + return s +} + +// SetVersionDescription sets the VersionDescription field's value. +func (s *LaunchTemplateVersion) SetVersionDescription(v string) *LaunchTemplateVersion { + s.VersionDescription = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *LaunchTemplateVersion) SetVersionNumber(v int64) *LaunchTemplateVersion { + s.VersionNumber = &v + return s +} + +// Describes the monitoring for the instance. +type LaunchTemplatesMonitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring + // is enabled. + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplatesMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplatesMonitoring) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LaunchTemplatesMonitoring) SetEnabled(v bool) *LaunchTemplatesMonitoring { + s.Enabled = &v + return s +} + +// Describes the monitoring for the instance. +type LaunchTemplatesMonitoringRequest struct { + _ struct{} `type:"structure"` + + // Specify true to enable detailed monitoring. Otherwise, basic monitoring is + // enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplatesMonitoringRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplatesMonitoringRequest) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LaunchTemplatesMonitoringRequest) SetEnabled(v bool) *LaunchTemplatesMonitoringRequest { + s.Enabled = &v + return s +} + +// Describes a license configuration. +type LicenseConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the license configuration. + LicenseConfigurationArn *string `locationName:"licenseConfigurationArn" type:"string"` +} + +// String returns the string representation +func (s LicenseConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LicenseConfiguration) GoString() string { + return s.String() +} + +// SetLicenseConfigurationArn sets the LicenseConfigurationArn field's value. +func (s *LicenseConfiguration) SetLicenseConfigurationArn(v string) *LicenseConfiguration { + s.LicenseConfigurationArn = &v + return s +} + +// Describes a license configuration. +type LicenseConfigurationRequest struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the license configuration. + LicenseConfigurationArn *string `type:"string"` +} + +// String returns the string representation +func (s LicenseConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LicenseConfigurationRequest) GoString() string { + return s.String() +} + +// SetLicenseConfigurationArn sets the LicenseConfigurationArn field's value. +func (s *LicenseConfigurationRequest) SetLicenseConfigurationArn(v string) *LicenseConfigurationRequest { + s.LicenseConfigurationArn = &v + return s +} + +// Describes the Classic Load Balancers and target groups to attach to a Spot +// Fleet request. +type LoadBalancersConfig struct { + _ struct{} `type:"structure"` + + // The Classic Load Balancers. + ClassicLoadBalancersConfig *ClassicLoadBalancersConfig `locationName:"classicLoadBalancersConfig" type:"structure"` + + // The target groups. + TargetGroupsConfig *TargetGroupsConfig `locationName:"targetGroupsConfig" type:"structure"` +} + +// String returns the string representation +func (s LoadBalancersConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancersConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoadBalancersConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoadBalancersConfig"} + if s.ClassicLoadBalancersConfig != nil { + if err := s.ClassicLoadBalancersConfig.Validate(); err != nil { + invalidParams.AddNested("ClassicLoadBalancersConfig", err.(request.ErrInvalidParams)) + } + } + if s.TargetGroupsConfig != nil { + if err := s.TargetGroupsConfig.Validate(); err != nil { + invalidParams.AddNested("TargetGroupsConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClassicLoadBalancersConfig sets the ClassicLoadBalancersConfig field's value. +func (s *LoadBalancersConfig) SetClassicLoadBalancersConfig(v *ClassicLoadBalancersConfig) *LoadBalancersConfig { + s.ClassicLoadBalancersConfig = v + return s +} + +// SetTargetGroupsConfig sets the TargetGroupsConfig field's value. +func (s *LoadBalancersConfig) SetTargetGroupsConfig(v *TargetGroupsConfig) *LoadBalancersConfig { + s.TargetGroupsConfig = v + return s +} + +// Describes a load permission. +type LoadPermission struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s LoadPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermission) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LoadPermission) SetGroup(v string) *LoadPermission { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LoadPermission) SetUserId(v string) *LoadPermission { + s.UserId = &v + return s +} + +// Describes modifications to the load permissions of an Amazon FPGA image (AFI). +type LoadPermissionModifications struct { + _ struct{} `type:"structure"` + + // The load permissions to add. + Add []*LoadPermissionRequest `locationNameList:"item" type:"list"` + + // The load permissions to remove. + Remove []*LoadPermissionRequest `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LoadPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermissionModifications) GoString() string { + return s.String() +} + +// SetAdd sets the Add field's value. +func (s *LoadPermissionModifications) SetAdd(v []*LoadPermissionRequest) *LoadPermissionModifications { + s.Add = v + return s +} + +// SetRemove sets the Remove field's value. +func (s *LoadPermissionModifications) SetRemove(v []*LoadPermissionRequest) *LoadPermissionModifications { + s.Remove = v + return s +} + +// Describes a load permission. +type LoadPermissionRequest struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s LoadPermissionRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermissionRequest) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LoadPermissionRequest) SetGroup(v string) *LoadPermissionRequest { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LoadPermissionRequest) SetUserId(v string) *LoadPermissionRequest { + s.UserId = &v + return s +} + +// Describes a local gateway. +type LocalGateway struct { + _ struct{} `type:"structure"` + + // The ID of the local gateway. + LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `locationName:"outpostArn" type:"string"` + + // The AWS account ID that owns the local gateway. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the local gateway. + State *string `locationName:"state" type:"string"` + + // The tags assigned to the local gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LocalGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalGateway) GoString() string { + return s.String() +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *LocalGateway) SetLocalGatewayId(v string) *LocalGateway { + s.LocalGatewayId = &v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *LocalGateway) SetOutpostArn(v string) *LocalGateway { + s.OutpostArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGateway) SetOwnerId(v string) *LocalGateway { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *LocalGateway) SetState(v string) *LocalGateway { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LocalGateway) SetTags(v []*Tag) *LocalGateway { + s.Tags = v + return s +} + +// Describes a route for a local gateway route table. +type LocalGatewayRoute struct { + _ struct{} `type:"structure"` + + // The CIDR block used for destination matches. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The Amazon Resource Name (ARN) of the local gateway route table. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + + // The ID of the local gateway route table. + LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` + + // The ID of the virtual interface group. + LocalGatewayVirtualInterfaceGroupId *string `locationName:"localGatewayVirtualInterfaceGroupId" type:"string"` + + // The AWS account ID that owns the local gateway route. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the route. + State *string `locationName:"state" type:"string" enum:"LocalGatewayRouteState"` + + // The route type. + Type *string `locationName:"type" type:"string" enum:"LocalGatewayRouteType"` +} + +// String returns the string representation +func (s LocalGatewayRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalGatewayRoute) GoString() string { + return s.String() +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *LocalGatewayRoute) SetDestinationCidrBlock(v string) *LocalGatewayRoute { + s.DestinationCidrBlock = &v + return s +} + +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRoute) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRoute { + s.LocalGatewayRouteTableArn = &v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *LocalGatewayRoute) SetLocalGatewayRouteTableId(v string) *LocalGatewayRoute { + s.LocalGatewayRouteTableId = &v + return s +} + +// SetLocalGatewayVirtualInterfaceGroupId sets the LocalGatewayVirtualInterfaceGroupId field's value. +func (s *LocalGatewayRoute) SetLocalGatewayVirtualInterfaceGroupId(v string) *LocalGatewayRoute { + s.LocalGatewayVirtualInterfaceGroupId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRoute) SetOwnerId(v string) *LocalGatewayRoute { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *LocalGatewayRoute) SetState(v string) *LocalGatewayRoute { + s.State = &v + return s +} + +// SetType sets the Type field's value. +func (s *LocalGatewayRoute) SetType(v string) *LocalGatewayRoute { + s.Type = &v + return s +} + +// Describes a local gateway route table. +type LocalGatewayRouteTable struct { + _ struct{} `type:"structure"` + + // The ID of the local gateway. + LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + + // The Amazon Resource Name (ARN) of the local gateway route table. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + + // The ID of the local gateway route table. + LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `locationName:"outpostArn" type:"string"` + + // The AWS account ID that owns the local gateway route table. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the local gateway route table. + State *string `locationName:"state" type:"string"` + + // The tags assigned to the local gateway route table. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LocalGatewayRouteTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalGatewayRouteTable) GoString() string { + return s.String() +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *LocalGatewayRouteTable) SetLocalGatewayId(v string) *LocalGatewayRouteTable { + s.LocalGatewayId = &v + return s +} + +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTable) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTable { + s.LocalGatewayRouteTableArn = &v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *LocalGatewayRouteTable) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTable { + s.LocalGatewayRouteTableId = &v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *LocalGatewayRouteTable) SetOutpostArn(v string) *LocalGatewayRouteTable { + s.OutpostArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTable) SetOwnerId(v string) *LocalGatewayRouteTable { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *LocalGatewayRouteTable) SetState(v string) *LocalGatewayRouteTable { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LocalGatewayRouteTable) SetTags(v []*Tag) *LocalGatewayRouteTable { + s.Tags = v + return s +} + +// Describes an association between a local gateway route table and a virtual +// interface group. +type LocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the local gateway. + LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + + // The Amazon Resource Name (ARN) of the local gateway route table for the virtual + // interface group. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + + // The ID of the local gateway route table. + LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` + + // The ID of the association. + LocalGatewayRouteTableVirtualInterfaceGroupAssociationId *string `locationName:"localGatewayRouteTableVirtualInterfaceGroupAssociationId" type:"string"` + + // The ID of the virtual interface group. + LocalGatewayVirtualInterfaceGroupId *string `locationName:"localGatewayVirtualInterfaceGroupId" type:"string"` + + // The AWS account ID that owns the local gateway virtual interface group association. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the association. + State *string `locationName:"state" type:"string"` + + // The tags assigned to the association. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LocalGatewayRouteTableVirtualInterfaceGroupAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalGatewayRouteTableVirtualInterfaceGroupAssociation) GoString() string { + return s.String() +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.LocalGatewayId = &v + return s +} + +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.LocalGatewayRouteTableArn = &v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.LocalGatewayRouteTableId = &v + return s +} + +// SetLocalGatewayRouteTableVirtualInterfaceGroupAssociationId sets the LocalGatewayRouteTableVirtualInterfaceGroupAssociationId field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayRouteTableVirtualInterfaceGroupAssociationId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.LocalGatewayRouteTableVirtualInterfaceGroupAssociationId = &v + return s +} + +// SetLocalGatewayVirtualInterfaceGroupId sets the LocalGatewayVirtualInterfaceGroupId field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayVirtualInterfaceGroupId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.LocalGatewayVirtualInterfaceGroupId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetOwnerId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetState(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetTags(v []*Tag) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.Tags = v + return s +} + +// Describes an association between a local gateway route table and a VPC. +type LocalGatewayRouteTableVpcAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the local gateway. + LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + + // The Amazon Resource Name (ARN) of the local gateway route table for the association. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + + // The ID of the local gateway route table. + LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` + + // The ID of the association. + LocalGatewayRouteTableVpcAssociationId *string `locationName:"localGatewayRouteTableVpcAssociationId" type:"string"` + + // The AWS account ID that owns the local gateway route table for the association. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the association. + State *string `locationName:"state" type:"string"` + + // The tags assigned to the association. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s LocalGatewayRouteTableVpcAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalGatewayRouteTableVpcAssociation) GoString() string { + return s.String() +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayId(v string) *LocalGatewayRouteTableVpcAssociation { + s.LocalGatewayId = &v + return s +} + +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTableVpcAssociation { + s.LocalGatewayRouteTableArn = &v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTableVpcAssociation { + s.LocalGatewayRouteTableId = &v + return s +} + +// SetLocalGatewayRouteTableVpcAssociationId sets the LocalGatewayRouteTableVpcAssociationId field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableVpcAssociationId(v string) *LocalGatewayRouteTableVpcAssociation { + s.LocalGatewayRouteTableVpcAssociationId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetOwnerId(v string) *LocalGatewayRouteTableVpcAssociation { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetState(v string) *LocalGatewayRouteTableVpcAssociation { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetTags(v []*Tag) *LocalGatewayRouteTableVpcAssociation { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetVpcId(v string) *LocalGatewayRouteTableVpcAssociation { + s.VpcId = &v + return s +} + +// Describes a local gateway virtual interface. +type LocalGatewayVirtualInterface struct { + _ struct{} `type:"structure"` + + // The local address. + LocalAddress *string `locationName:"localAddress" type:"string"` + + // The Border Gateway Protocol (BGP) Autonomous System Number (ASN) of the local + // gateway. + LocalBgpAsn *int64 `locationName:"localBgpAsn" type:"integer"` + + // The ID of the local gateway. + LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + + // The ID of the virtual interface. + LocalGatewayVirtualInterfaceId *string `locationName:"localGatewayVirtualInterfaceId" type:"string"` + + // The AWS account ID that owns the local gateway virtual interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The peer address. + PeerAddress *string `locationName:"peerAddress" type:"string"` + + // The peer BGP ASN. + PeerBgpAsn *int64 `locationName:"peerBgpAsn" type:"integer"` + + // The tags assigned to the virtual interface. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VLAN. + Vlan *int64 `locationName:"vlan" type:"integer"` +} + +// String returns the string representation +func (s LocalGatewayVirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalGatewayVirtualInterface) GoString() string { + return s.String() +} + +// SetLocalAddress sets the LocalAddress field's value. +func (s *LocalGatewayVirtualInterface) SetLocalAddress(v string) *LocalGatewayVirtualInterface { + s.LocalAddress = &v + return s +} + +// SetLocalBgpAsn sets the LocalBgpAsn field's value. +func (s *LocalGatewayVirtualInterface) SetLocalBgpAsn(v int64) *LocalGatewayVirtualInterface { + s.LocalBgpAsn = &v + return s +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *LocalGatewayVirtualInterface) SetLocalGatewayId(v string) *LocalGatewayVirtualInterface { + s.LocalGatewayId = &v + return s +} + +// SetLocalGatewayVirtualInterfaceId sets the LocalGatewayVirtualInterfaceId field's value. +func (s *LocalGatewayVirtualInterface) SetLocalGatewayVirtualInterfaceId(v string) *LocalGatewayVirtualInterface { + s.LocalGatewayVirtualInterfaceId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayVirtualInterface) SetOwnerId(v string) *LocalGatewayVirtualInterface { + s.OwnerId = &v + return s +} + +// SetPeerAddress sets the PeerAddress field's value. +func (s *LocalGatewayVirtualInterface) SetPeerAddress(v string) *LocalGatewayVirtualInterface { + s.PeerAddress = &v + return s +} + +// SetPeerBgpAsn sets the PeerBgpAsn field's value. +func (s *LocalGatewayVirtualInterface) SetPeerBgpAsn(v int64) *LocalGatewayVirtualInterface { + s.PeerBgpAsn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LocalGatewayVirtualInterface) SetTags(v []*Tag) *LocalGatewayVirtualInterface { + s.Tags = v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *LocalGatewayVirtualInterface) SetVlan(v int64) *LocalGatewayVirtualInterface { + s.Vlan = &v + return s +} + +// Describes a local gateway virtual interface group. +type LocalGatewayVirtualInterfaceGroup struct { + _ struct{} `type:"structure"` + + // The ID of the local gateway. + LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + + // The ID of the virtual interface group. + LocalGatewayVirtualInterfaceGroupId *string `locationName:"localGatewayVirtualInterfaceGroupId" type:"string"` + + // The IDs of the virtual interfaces. + LocalGatewayVirtualInterfaceIds []*string `locationName:"localGatewayVirtualInterfaceIdSet" locationNameList:"item" type:"list"` + + // The AWS account ID that owns the local gateway virtual interface group. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The tags assigned to the virtual interface group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LocalGatewayVirtualInterfaceGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalGatewayVirtualInterfaceGroup) GoString() string { + return s.String() +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *LocalGatewayVirtualInterfaceGroup) SetLocalGatewayId(v string) *LocalGatewayVirtualInterfaceGroup { + s.LocalGatewayId = &v + return s +} + +// SetLocalGatewayVirtualInterfaceGroupId sets the LocalGatewayVirtualInterfaceGroupId field's value. +func (s *LocalGatewayVirtualInterfaceGroup) SetLocalGatewayVirtualInterfaceGroupId(v string) *LocalGatewayVirtualInterfaceGroup { + s.LocalGatewayVirtualInterfaceGroupId = &v + return s +} + +// SetLocalGatewayVirtualInterfaceIds sets the LocalGatewayVirtualInterfaceIds field's value. +func (s *LocalGatewayVirtualInterfaceGroup) SetLocalGatewayVirtualInterfaceIds(v []*string) *LocalGatewayVirtualInterfaceGroup { + s.LocalGatewayVirtualInterfaceIds = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayVirtualInterfaceGroup) SetOwnerId(v string) *LocalGatewayVirtualInterfaceGroup { + s.OwnerId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LocalGatewayVirtualInterfaceGroup) SetTags(v []*Tag) *LocalGatewayVirtualInterfaceGroup { + s.Tags = v + return s +} + +// Describes a managed prefix list. +type ManagedPrefixList struct { + _ struct{} `type:"structure"` + + // The IP address version. + AddressFamily *string `locationName:"addressFamily" type:"string"` + + // The maximum number of entries for the prefix list. + MaxEntries *int64 `locationName:"maxEntries" type:"integer"` + + // The ID of the owner of the prefix list. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The Amazon Resource Name (ARN) for the prefix list. + PrefixListArn *string `locationName:"prefixListArn" min:"1" type:"string"` + + // The ID of the prefix list. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix list. + PrefixListName *string `locationName:"prefixListName" type:"string"` + + // The state of the prefix list. + State *string `locationName:"state" type:"string" enum:"PrefixListState"` + + // The state message. + StateMessage *string `locationName:"stateMessage" type:"string"` + + // The tags for the prefix list. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The version of the prefix list. + Version *int64 `locationName:"version" type:"long"` +} + +// String returns the string representation +func (s ManagedPrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedPrefixList) GoString() string { + return s.String() +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *ManagedPrefixList) SetAddressFamily(v string) *ManagedPrefixList { + s.AddressFamily = &v + return s +} + +// SetMaxEntries sets the MaxEntries field's value. +func (s *ManagedPrefixList) SetMaxEntries(v int64) *ManagedPrefixList { + s.MaxEntries = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *ManagedPrefixList) SetOwnerId(v string) *ManagedPrefixList { + s.OwnerId = &v + return s +} + +// SetPrefixListArn sets the PrefixListArn field's value. +func (s *ManagedPrefixList) SetPrefixListArn(v string) *ManagedPrefixList { + s.PrefixListArn = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ManagedPrefixList) SetPrefixListId(v string) *ManagedPrefixList { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *ManagedPrefixList) SetPrefixListName(v string) *ManagedPrefixList { + s.PrefixListName = &v + return s +} + +// SetState sets the State field's value. +func (s *ManagedPrefixList) SetState(v string) *ManagedPrefixList { + s.State = &v + return s +} + +// SetStateMessage sets the StateMessage field's value. +func (s *ManagedPrefixList) SetStateMessage(v string) *ManagedPrefixList { + s.StateMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ManagedPrefixList) SetTags(v []*Tag) *ManagedPrefixList { + s.Tags = v + return s +} + +// SetVersion sets the Version field's value. +func (s *ManagedPrefixList) SetVersion(v int64) *ManagedPrefixList { + s.Version = &v + return s +} + +// Describes the memory for the instance type. +type MemoryInfo struct { + _ struct{} `type:"structure"` + + // The size of the memory, in MiB. + SizeInMiB *int64 `locationName:"sizeInMiB" type:"long"` +} + +// String returns the string representation +func (s MemoryInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MemoryInfo) GoString() string { + return s.String() +} + +// SetSizeInMiB sets the SizeInMiB field's value. +func (s *MemoryInfo) SetSizeInMiB(v int64) *MemoryInfo { + s.SizeInMiB = &v + return s +} + +type ModifyAvailabilityZoneGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name of the Availability Zone group, Local Zone group, or Wavelength + // Zone group. + // + // GroupName is a required field + GroupName *string `type:"string" required:"true"` + + // Indicates whether you are opted in to the Local Zone group or Wavelength + // Zone group. The only valid value is opted-in. You must contact AWS Support + // (https://console.aws.amazon.com/support/home#/case/create%3FissueType=customer-service%26serviceCode=general-info%26getting-started%26categoryCode=using-aws%26services) + // to opt out of a Local Zone group, or Wavelength Zone group. + // + // OptInStatus is a required field + OptInStatus *string `type:"string" required:"true" enum:"ModifyAvailabilityZoneOptInStatus"` +} + +// String returns the string representation +func (s ModifyAvailabilityZoneGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyAvailabilityZoneGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyAvailabilityZoneGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyAvailabilityZoneGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.OptInStatus == nil { + invalidParams.Add(request.NewErrParamRequired("OptInStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyAvailabilityZoneGroupInput) SetDryRun(v bool) *ModifyAvailabilityZoneGroupInput { + s.DryRun = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *ModifyAvailabilityZoneGroupInput) SetGroupName(v string) *ModifyAvailabilityZoneGroupInput { + s.GroupName = &v + return s +} + +// SetOptInStatus sets the OptInStatus field's value. +func (s *ModifyAvailabilityZoneGroupInput) SetOptInStatus(v string) *ModifyAvailabilityZoneGroupInput { + s.OptInStatus = &v + return s +} + +type ModifyAvailabilityZoneGroupOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyAvailabilityZoneGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyAvailabilityZoneGroupOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyAvailabilityZoneGroupOutput) SetReturn(v bool) *ModifyAvailabilityZoneGroupOutput { + s.Return = &v + return s +} + +type ModifyCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation. + // + // CapacityReservationId is a required field + CapacityReservationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The date and time at which the Capacity Reservation expires. When a Capacity + // Reservation expires, the reserved capacity is released and you can no longer + // launch instances into it. The Capacity Reservation's state changes to expired + // when it reaches its end date and time. + // + // The Capacity Reservation is cancelled within an hour from the specified time. + // For example, if you specify 5/31/2019, 13:30:55, the Capacity Reservation + // is guaranteed to end between 13:30:55 and 14:30:55 on 5/31/2019. + // + // You must provide an EndDate value if EndDateType is limited. Omit EndDate + // if EndDateType is unlimited. + EndDate *time.Time `type:"timestamp"` + + // Indicates the way in which the Capacity Reservation ends. A Capacity Reservation + // can have one of the following end types: + // + // * unlimited - The Capacity Reservation remains active until you explicitly + // cancel it. Do not provide an EndDate value if EndDateType is unlimited. + // + // * limited - The Capacity Reservation expires automatically at a specified + // date and time. You must provide an EndDate value if EndDateType is limited. + EndDateType *string `type:"string" enum:"EndDateType"` + + // The number of instances for which to reserve capacity. + InstanceCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ModifyCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyCapacityReservationInput"} + if s.CapacityReservationId == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *ModifyCapacityReservationInput) SetCapacityReservationId(v string) *ModifyCapacityReservationInput { + s.CapacityReservationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyCapacityReservationInput) SetDryRun(v bool) *ModifyCapacityReservationInput { + s.DryRun = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *ModifyCapacityReservationInput) SetEndDate(v time.Time) *ModifyCapacityReservationInput { + s.EndDate = &v + return s +} + +// SetEndDateType sets the EndDateType field's value. +func (s *ModifyCapacityReservationInput) SetEndDateType(v string) *ModifyCapacityReservationInput { + s.EndDateType = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *ModifyCapacityReservationInput) SetInstanceCount(v int64) *ModifyCapacityReservationInput { + s.InstanceCount = &v + return s +} + +type ModifyCapacityReservationOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCapacityReservationOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyCapacityReservationOutput) SetReturn(v bool) *ModifyCapacityReservationOutput { + s.Return = &v + return s +} + +type ModifyClientVpnEndpointInput struct { + _ struct{} `type:"structure"` + + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectOptions `type:"structure"` + + // The ID of the Client VPN endpoint to modify. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Information about the client connection logging options. + // + // If you enable client connection logging, data about client connections is + // sent to a Cloudwatch Logs log stream. The following information is logged: + // + // * Client connection requests + // + // * Client connection results (successful and unsuccessful) + // + // * Reasons for unsuccessful client connection requests + // + // * Client connection termination time + ConnectionLogOptions *ConnectionLogOptions `type:"structure"` + + // A brief description of the Client VPN endpoint. + Description *string `type:"string"` + + // Information about the DNS servers to be used by Client VPN connections. A + // Client VPN endpoint can have up to two DNS servers. + DnsServers *DnsServersOptionsModifyStructure `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of one or more security groups to apply to the target network. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + // Specify whether to enable the self-service portal for the Client VPN endpoint. + SelfServicePortal *string `type:"string" enum:"SelfServicePortal"` + + // The ARN of the server certificate to be used. The server certificate must + // be provisioned in AWS Certificate Manager (ACM). + ServerCertificateArn *string `type:"string"` + + // Indicates whether the VPN is split-tunnel. + // + // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client + // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) + // in the AWS Client VPN Administrator Guide. + SplitTunnel *bool `type:"boolean"` + + // The ID of the VPC to associate with the Client VPN endpoint. + VpcId *string `type:"string"` + + // The port number to assign to the Client VPN endpoint for TCP and UDP traffic. + // + // Valid Values: 443 | 1194 + // + // Default Value: 443 + VpnPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s ModifyClientVpnEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClientVpnEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyClientVpnEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyClientVpnEndpointInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *ModifyClientVpnEndpointInput) SetClientConnectOptions(v *ClientConnectOptions) *ModifyClientVpnEndpointInput { + s.ClientConnectOptions = v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *ModifyClientVpnEndpointInput) SetClientVpnEndpointId(v string) *ModifyClientVpnEndpointInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetConnectionLogOptions sets the ConnectionLogOptions field's value. +func (s *ModifyClientVpnEndpointInput) SetConnectionLogOptions(v *ConnectionLogOptions) *ModifyClientVpnEndpointInput { + s.ConnectionLogOptions = v + return s +} + +// SetDescription sets the Description field's value. +func (s *ModifyClientVpnEndpointInput) SetDescription(v string) *ModifyClientVpnEndpointInput { + s.Description = &v + return s +} + +// SetDnsServers sets the DnsServers field's value. +func (s *ModifyClientVpnEndpointInput) SetDnsServers(v *DnsServersOptionsModifyStructure) *ModifyClientVpnEndpointInput { + s.DnsServers = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyClientVpnEndpointInput) SetDryRun(v bool) *ModifyClientVpnEndpointInput { + s.DryRun = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *ModifyClientVpnEndpointInput) SetSecurityGroupIds(v []*string) *ModifyClientVpnEndpointInput { + s.SecurityGroupIds = v + return s +} + +// SetSelfServicePortal sets the SelfServicePortal field's value. +func (s *ModifyClientVpnEndpointInput) SetSelfServicePortal(v string) *ModifyClientVpnEndpointInput { + s.SelfServicePortal = &v + return s +} + +// SetServerCertificateArn sets the ServerCertificateArn field's value. +func (s *ModifyClientVpnEndpointInput) SetServerCertificateArn(v string) *ModifyClientVpnEndpointInput { + s.ServerCertificateArn = &v + return s +} + +// SetSplitTunnel sets the SplitTunnel field's value. +func (s *ModifyClientVpnEndpointInput) SetSplitTunnel(v bool) *ModifyClientVpnEndpointInput { + s.SplitTunnel = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ModifyClientVpnEndpointInput) SetVpcId(v string) *ModifyClientVpnEndpointInput { + s.VpcId = &v + return s +} + +// SetVpnPort sets the VpnPort field's value. +func (s *ModifyClientVpnEndpointInput) SetVpnPort(v int64) *ModifyClientVpnEndpointInput { + s.VpnPort = &v + return s +} + +type ModifyClientVpnEndpointOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyClientVpnEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyClientVpnEndpointOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyClientVpnEndpointOutput) SetReturn(v bool) *ModifyClientVpnEndpointOutput { + s.Return = &v + return s +} + +type ModifyDefaultCreditSpecificationInput struct { + _ struct{} `type:"structure"` + + // The credit option for CPU usage of the instance family. + // + // Valid Values: standard | unlimited + // + // CpuCredits is a required field + CpuCredits *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance family. + // + // InstanceFamily is a required field + InstanceFamily *string `type:"string" required:"true" enum:"UnlimitedSupportedInstanceFamily"` +} + +// String returns the string representation +func (s ModifyDefaultCreditSpecificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDefaultCreditSpecificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDefaultCreditSpecificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDefaultCreditSpecificationInput"} + if s.CpuCredits == nil { + invalidParams.Add(request.NewErrParamRequired("CpuCredits")) + } + if s.InstanceFamily == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceFamily")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCpuCredits sets the CpuCredits field's value. +func (s *ModifyDefaultCreditSpecificationInput) SetCpuCredits(v string) *ModifyDefaultCreditSpecificationInput { + s.CpuCredits = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyDefaultCreditSpecificationInput) SetDryRun(v bool) *ModifyDefaultCreditSpecificationInput { + s.DryRun = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *ModifyDefaultCreditSpecificationInput) SetInstanceFamily(v string) *ModifyDefaultCreditSpecificationInput { + s.InstanceFamily = &v + return s +} + +type ModifyDefaultCreditSpecificationOutput struct { + _ struct{} `type:"structure"` + + // The default credit option for CPU usage of the instance family. + InstanceFamilyCreditSpecification *InstanceFamilyCreditSpecification `locationName:"instanceFamilyCreditSpecification" type:"structure"` +} + +// String returns the string representation +func (s ModifyDefaultCreditSpecificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDefaultCreditSpecificationOutput) GoString() string { + return s.String() +} + +// SetInstanceFamilyCreditSpecification sets the InstanceFamilyCreditSpecification field's value. +func (s *ModifyDefaultCreditSpecificationOutput) SetInstanceFamilyCreditSpecification(v *InstanceFamilyCreditSpecification) *ModifyDefaultCreditSpecificationOutput { + s.InstanceFamilyCreditSpecification = v + return s +} + +type ModifyEbsDefaultKmsKeyIdInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The identifier of the AWS Key Management Service (AWS KMS) customer master + // key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, + // your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted + // state must be true. + // + // You can specify the CMK using any of the following: + // + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Key alias. For example, alias/ExampleAlias. + // + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. + // + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // + // AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, + // alias, or ARN that is not valid, the action can appear to complete, but eventually + // fails. + // + // Amazon EBS does not support asymmetric CMKs. + // + // KmsKeyId is a required field + KmsKeyId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyEbsDefaultKmsKeyIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEbsDefaultKmsKeyIdInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyEbsDefaultKmsKeyIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyEbsDefaultKmsKeyIdInput"} + if s.KmsKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyEbsDefaultKmsKeyIdInput) SetDryRun(v bool) *ModifyEbsDefaultKmsKeyIdInput { + s.DryRun = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ModifyEbsDefaultKmsKeyIdInput) SetKmsKeyId(v string) *ModifyEbsDefaultKmsKeyIdInput { + s.KmsKeyId = &v + return s +} + +type ModifyEbsDefaultKmsKeyIdOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the default CMK for encryption by default. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` +} + +// String returns the string representation +func (s ModifyEbsDefaultKmsKeyIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyEbsDefaultKmsKeyIdOutput) GoString() string { + return s.String() +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ModifyEbsDefaultKmsKeyIdOutput) SetKmsKeyId(v string) *ModifyEbsDefaultKmsKeyIdOutput { + s.KmsKeyId = &v + return s +} + +type ModifyFleetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Indicates whether running instances should be terminated if the total target + // capacity of the EC2 Fleet is decreased below the current size of the EC2 + // Fleet. + ExcessCapacityTerminationPolicy *string `type:"string" enum:"FleetExcessCapacityTerminationPolicy"` + + // The ID of the EC2 Fleet. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // The launch template and overrides. + LaunchTemplateConfigs []*FleetLaunchTemplateConfigRequest `locationName:"LaunchTemplateConfig" locationNameList:"item" type:"list"` + + // The size of the EC2 Fleet. + TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure"` +} + +// String returns the string representation +func (s ModifyFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyFleetInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TargetCapacitySpecification != nil { + if err := s.TargetCapacitySpecification.Validate(); err != nil { + invalidParams.AddNested("TargetCapacitySpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyFleetInput) SetDryRun(v bool) *ModifyFleetInput { + s.DryRun = &v + return s +} + +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. +func (s *ModifyFleetInput) SetExcessCapacityTerminationPolicy(v string) *ModifyFleetInput { + s.ExcessCapacityTerminationPolicy = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *ModifyFleetInput) SetFleetId(v string) *ModifyFleetInput { + s.FleetId = &v + return s +} + +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *ModifyFleetInput) SetLaunchTemplateConfigs(v []*FleetLaunchTemplateConfigRequest) *ModifyFleetInput { + s.LaunchTemplateConfigs = v + return s +} + +// SetTargetCapacitySpecification sets the TargetCapacitySpecification field's value. +func (s *ModifyFleetInput) SetTargetCapacitySpecification(v *TargetCapacitySpecificationRequest) *ModifyFleetInput { + s.TargetCapacitySpecification = v + return s +} + +type ModifyFleetOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFleetOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyFleetOutput) SetReturn(v bool) *ModifyFleetOutput { + s.Return = &v + return s +} + +type ModifyFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Attribute *string `type:"string" enum:"FpgaImageAttributeName"` + + // A description for the AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` + + // The load permission for the AFI. + LoadPermission *LoadPermissionModifications `type:"structure"` + + // A name for the AFI. + Name *string `type:"string"` + + // The operation type. + OperationType *string `type:"string" enum:"OperationType"` + + // The product codes. After you add a product code to an AFI, it can't be removed. + // This parameter is valid only when modifying the productCodes attribute. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // The user groups. This parameter is valid only when modifying the loadPermission + // attribute. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // The AWS account IDs. This parameter is valid only when modifying the loadPermission + // attribute. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` +} + +// String returns the string representation +func (s ModifyFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyFpgaImageAttributeInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ModifyFpgaImageAttributeInput) SetAttribute(v string) *ModifyFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ModifyFpgaImageAttributeInput) SetDescription(v string) *ModifyFpgaImageAttributeInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyFpgaImageAttributeInput) SetDryRun(v bool) *ModifyFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *ModifyFpgaImageAttributeInput) SetFpgaImageId(v string) *ModifyFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// SetLoadPermission sets the LoadPermission field's value. +func (s *ModifyFpgaImageAttributeInput) SetLoadPermission(v *LoadPermissionModifications) *ModifyFpgaImageAttributeInput { + s.LoadPermission = v + return s +} + +// SetName sets the Name field's value. +func (s *ModifyFpgaImageAttributeInput) SetName(v string) *ModifyFpgaImageAttributeInput { + s.Name = &v + return s +} + +// SetOperationType sets the OperationType field's value. +func (s *ModifyFpgaImageAttributeInput) SetOperationType(v string) *ModifyFpgaImageAttributeInput { + s.OperationType = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *ModifyFpgaImageAttributeInput) SetProductCodes(v []*string) *ModifyFpgaImageAttributeInput { + s.ProductCodes = v + return s +} + +// SetUserGroups sets the UserGroups field's value. +func (s *ModifyFpgaImageAttributeInput) SetUserGroups(v []*string) *ModifyFpgaImageAttributeInput { + s.UserGroups = v + return s +} + +// SetUserIds sets the UserIds field's value. +func (s *ModifyFpgaImageAttributeInput) SetUserIds(v []*string) *ModifyFpgaImageAttributeInput { + s.UserIds = v + return s +} + +type ModifyFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Information about the attribute. + FpgaImageAttribute *FpgaImageAttribute `locationName:"fpgaImageAttribute" type:"structure"` +} + +// String returns the string representation +func (s ModifyFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetFpgaImageAttribute sets the FpgaImageAttribute field's value. +func (s *ModifyFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttribute) *ModifyFpgaImageAttributeOutput { + s.FpgaImageAttribute = v + return s +} + +type ModifyHostsInput struct { + _ struct{} `type:"structure"` + + // Specify whether to enable or disable auto-placement. + AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` + + // The IDs of the Dedicated Hosts to modify. + // + // HostIds is a required field + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` + + // Indicates whether to enable or disable host recovery for the Dedicated Host. + // For more information, see Host Recovery (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-recovery.html) + // in the Amazon Elastic Compute Cloud User Guide. + HostRecovery *string `type:"string" enum:"HostRecovery"` + + // Specifies the instance family to be supported by the Dedicated Host. Specify + // this parameter to modify a Dedicated Host to support multiple instance types + // within its current instance family. + // + // If you want to modify a Dedicated Host to support a specific instance type + // only, omit this parameter and specify InstanceType instead. You cannot specify + // InstanceFamily and InstanceType in the same request. + InstanceFamily *string `type:"string"` + + // Specifies the instance type to be supported by the Dedicated Host. Specify + // this parameter to modify a Dedicated Host to support only a specific instance + // type. + // + // If you want to modify a Dedicated Host to support multiple instance types + // in its current instance family, omit this parameter and specify InstanceFamily + // instead. You cannot specify InstanceType and InstanceFamily in the same request. + InstanceType *string `type:"string"` +} + +// String returns the string representation +func (s ModifyHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHostsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyHostsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyHostsInput"} + if s.HostIds == nil { + invalidParams.Add(request.NewErrParamRequired("HostIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoPlacement sets the AutoPlacement field's value. +func (s *ModifyHostsInput) SetAutoPlacement(v string) *ModifyHostsInput { + s.AutoPlacement = &v + return s +} + +// SetHostIds sets the HostIds field's value. +func (s *ModifyHostsInput) SetHostIds(v []*string) *ModifyHostsInput { + s.HostIds = v + return s +} + +// SetHostRecovery sets the HostRecovery field's value. +func (s *ModifyHostsInput) SetHostRecovery(v string) *ModifyHostsInput { + s.HostRecovery = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *ModifyHostsInput) SetInstanceFamily(v string) *ModifyHostsInput { + s.InstanceFamily = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ModifyHostsInput) SetInstanceType(v string) *ModifyHostsInput { + s.InstanceType = &v + return s +} + +type ModifyHostsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated Hosts that were successfully modified. + Successful []*string `locationName:"successful" locationNameList:"item" type:"list"` + + // The IDs of the Dedicated Hosts that could not be modified. Check whether + // the setting you requested can be used. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ModifyHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyHostsOutput) GoString() string { + return s.String() +} + +// SetSuccessful sets the Successful field's value. +func (s *ModifyHostsOutput) SetSuccessful(v []*string) *ModifyHostsOutput { + s.Successful = v + return s +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *ModifyHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ModifyHostsOutput { + s.Unsuccessful = v + return s +} + +type ModifyIdFormatInput struct { + _ struct{} `type:"structure"` + + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | route-table + // | route-table-association | security-group | subnet | subnet-cidr-block-association + // | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection + // | vpn-connection | vpn-gateway. + // + // Alternatively, use the all-current option to include all resource types that + // are currently within their opt-in period for longer IDs. + // + // Resource is a required field + Resource *string `type:"string" required:"true"` + + // Indicate whether the resource should use longer IDs (17-character IDs). + // + // UseLongIds is a required field + UseLongIds *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ModifyIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyIdFormatInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.UseLongIds == nil { + invalidParams.Add(request.NewErrParamRequired("UseLongIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResource sets the Resource field's value. +func (s *ModifyIdFormatInput) SetResource(v string) *ModifyIdFormatInput { + s.Resource = &v + return s +} + +// SetUseLongIds sets the UseLongIds field's value. +func (s *ModifyIdFormatInput) SetUseLongIds(v bool) *ModifyIdFormatInput { + s.UseLongIds = &v + return s +} + +type ModifyIdFormatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdFormatOutput) GoString() string { + return s.String() +} + +type ModifyIdentityIdFormatInput struct { + _ struct{} `type:"structure"` + + // The ARN of the principal, which can be an IAM user, IAM role, or the root + // user. Specify all to modify the ID format for all IAM users, IAM roles, and + // the root user of the account. + // + // PrincipalArn is a required field + PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"` + + // The type of resource: bundle | conversion-task | customer-gateway | dhcp-options + // | elastic-ip-allocation | elastic-ip-association | export-task | flow-log + // | image | import-task | internet-gateway | network-acl | network-acl-association + // | network-interface | network-interface-attachment | prefix-list | route-table + // | route-table-association | security-group | subnet | subnet-cidr-block-association + // | vpc | vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection + // | vpn-connection | vpn-gateway. + // + // Alternatively, use the all-current option to include all resource types that + // are currently within their opt-in period for longer IDs. + // + // Resource is a required field + Resource *string `locationName:"resource" type:"string" required:"true"` + + // Indicates whether the resource should use longer IDs (17-character IDs) + // + // UseLongIds is a required field + UseLongIds *bool `locationName:"useLongIds" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ModifyIdentityIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdentityIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyIdentityIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyIdentityIdFormatInput"} + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.UseLongIds == nil { + invalidParams.Add(request.NewErrParamRequired("UseLongIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *ModifyIdentityIdFormatInput) SetPrincipalArn(v string) *ModifyIdentityIdFormatInput { + s.PrincipalArn = &v + return s +} + +// SetResource sets the Resource field's value. +func (s *ModifyIdentityIdFormatInput) SetResource(v string) *ModifyIdentityIdFormatInput { + s.Resource = &v + return s +} + +// SetUseLongIds sets the UseLongIds field's value. +func (s *ModifyIdentityIdFormatInput) SetUseLongIds(v bool) *ModifyIdentityIdFormatInput { + s.UseLongIds = &v + return s +} + +type ModifyIdentityIdFormatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyIdentityIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdentityIdFormatOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyImageAttribute. +type ModifyImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute to modify. The valid values are description, launchPermission, + // and productCodes. + Attribute *string `type:"string"` + + // A new description for the AMI. + Description *AttributeValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // A new launch permission for the AMI. + LaunchPermission *LaunchPermissionModifications `type:"structure"` + + // The operation type. This parameter can be used only when the Attribute parameter + // is launchPermission. + OperationType *string `type:"string" enum:"OperationType"` + + // The DevPay product codes. After you add a product code to an AMI, it can't + // be removed. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // The user groups. This parameter can be used only when the Attribute parameter + // is launchPermission. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // The AWS account IDs. This parameter can be used only when the Attribute parameter + // is launchPermission. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` + + // The value of the attribute being modified. This parameter can be used only + // when the Attribute parameter is description or productCodes. + Value *string `type:"string"` +} + +// String returns the string representation +func (s ModifyImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyImageAttributeInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ModifyImageAttributeInput) SetAttribute(v string) *ModifyImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ModifyImageAttributeInput) SetDescription(v *AttributeValue) *ModifyImageAttributeInput { + s.Description = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyImageAttributeInput) SetDryRun(v bool) *ModifyImageAttributeInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ModifyImageAttributeInput) SetImageId(v string) *ModifyImageAttributeInput { + s.ImageId = &v + return s +} + +// SetLaunchPermission sets the LaunchPermission field's value. +func (s *ModifyImageAttributeInput) SetLaunchPermission(v *LaunchPermissionModifications) *ModifyImageAttributeInput { + s.LaunchPermission = v + return s +} + +// SetOperationType sets the OperationType field's value. +func (s *ModifyImageAttributeInput) SetOperationType(v string) *ModifyImageAttributeInput { + s.OperationType = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *ModifyImageAttributeInput) SetProductCodes(v []*string) *ModifyImageAttributeInput { + s.ProductCodes = v + return s +} + +// SetUserGroups sets the UserGroups field's value. +func (s *ModifyImageAttributeInput) SetUserGroups(v []*string) *ModifyImageAttributeInput { + s.UserGroups = v + return s +} + +// SetUserIds sets the UserIds field's value. +func (s *ModifyImageAttributeInput) SetUserIds(v []*string) *ModifyImageAttributeInput { + s.UserIds = v + return s +} + +// SetValue sets the Value field's value. +func (s *ModifyImageAttributeInput) SetValue(v string) *ModifyImageAttributeInput { + s.Value = &v + return s +} + +type ModifyImageAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyImageAttributeOutput) GoString() string { + return s.String() +} + +type ModifyInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Attribute *string `locationName:"attribute" type:"string" enum:"InstanceAttributeName"` + + // Modifies the DeleteOnTermination attribute for volumes that are currently + // attached. The volume must be owned by the caller. If no value is specified + // for DeleteOnTermination, the default is true and the volume is deleted when + // the instance is terminated. + // + // To add instance store volumes to an Amazon EBS-backed instance, you must + // add them when you launch the instance. For more information, see Updating + // the block device mapping when launching an instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) + // in the Amazon Elastic Compute Cloud User Guide. + BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // If the value is true, you can't terminate the instance using the Amazon EC2 + // console, CLI, or API; otherwise, you can. You cannot use this parameter for + // Spot Instances. + DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Specifies whether the instance is optimized for Amazon EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + + // Set to true to enable enhanced networking with ENA for the instance. + // + // This option is supported only for HVM instances. Specifying this option with + // a PV instance can make it unreachable. + EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` + + // [EC2-VPC] Changes the security groups of the instance. You must specify at + // least one security group, even if it's just the default security group for + // the VPC. You must specify the security group ID, not the security group name. + Groups []*string `locationName:"GroupId" locationNameList:"groupId" type:"list"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // Specifies whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` + + // Changes the instance type to the specified value. For more information, see + // Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue. + InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` + + // Changes the instance's kernel to the specified value. We recommend that you + // use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Kernel *AttributeValue `locationName:"kernel" type:"structure"` + + // Changes the instance's RAM disk to the specified value. We recommend that + // you use PV-GRUB instead of kernels and RAM disks. For more information, see + // PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html). + Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` + + // Specifies whether source/destination checking is enabled. A value of true + // means that checking is enabled, and false means that checking is disabled. + // This value must be false for a NAT instance to perform NAT. + SourceDestCheck *AttributeBooleanValue `type:"structure"` + + // Set to simple to enable enhanced networking with the Intel 82599 Virtual + // Function interface for the instance. + // + // There is no way to disable enhanced networking with the Intel 82599 Virtual + // Function interface at this time. + // + // This option is supported only for HVM instances. Specifying this option with + // a PV instance can make it unreachable. + SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` + + // Changes the instance's user data to the specified value. If you are using + // an AWS SDK or command line tool, base64-encoding is performed for you, and + // you can load the text from a file. Otherwise, you must provide base64-encoded + // text. + UserData *BlobAttributeValue `locationName:"userData" type:"structure"` + + // A new value for the attribute. Use only with the kernel, ramdisk, userData, + // disableApiTermination, or instanceInitiatedShutdownBehavior attribute. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceAttributeInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ModifyInstanceAttributeInput) SetAttribute(v string) *ModifyInstanceAttributeInput { + s.Attribute = &v + return s +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *ModifyInstanceAttributeInput) SetBlockDeviceMappings(v []*InstanceBlockDeviceMappingSpecification) *ModifyInstanceAttributeInput { + s.BlockDeviceMappings = v + return s +} + +// SetDisableApiTermination sets the DisableApiTermination field's value. +func (s *ModifyInstanceAttributeInput) SetDisableApiTermination(v *AttributeBooleanValue) *ModifyInstanceAttributeInput { + s.DisableApiTermination = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyInstanceAttributeInput) SetDryRun(v bool) *ModifyInstanceAttributeInput { + s.DryRun = &v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *ModifyInstanceAttributeInput) SetEbsOptimized(v *AttributeBooleanValue) *ModifyInstanceAttributeInput { + s.EbsOptimized = v + return s +} + +// SetEnaSupport sets the EnaSupport field's value. +func (s *ModifyInstanceAttributeInput) SetEnaSupport(v *AttributeBooleanValue) *ModifyInstanceAttributeInput { + s.EnaSupport = v + return s +} + +// SetGroups sets the Groups field's value. +func (s *ModifyInstanceAttributeInput) SetGroups(v []*string) *ModifyInstanceAttributeInput { + s.Groups = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ModifyInstanceAttributeInput) SetInstanceId(v string) *ModifyInstanceAttributeInput { + s.InstanceId = &v + return s +} + +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value. +func (s *ModifyInstanceAttributeInput) SetInstanceInitiatedShutdownBehavior(v *AttributeValue) *ModifyInstanceAttributeInput { + s.InstanceInitiatedShutdownBehavior = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ModifyInstanceAttributeInput) SetInstanceType(v *AttributeValue) *ModifyInstanceAttributeInput { + s.InstanceType = v + return s +} + +// SetKernel sets the Kernel field's value. +func (s *ModifyInstanceAttributeInput) SetKernel(v *AttributeValue) *ModifyInstanceAttributeInput { + s.Kernel = v + return s +} + +// SetRamdisk sets the Ramdisk field's value. +func (s *ModifyInstanceAttributeInput) SetRamdisk(v *AttributeValue) *ModifyInstanceAttributeInput { + s.Ramdisk = v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *ModifyInstanceAttributeInput) SetSourceDestCheck(v *AttributeBooleanValue) *ModifyInstanceAttributeInput { + s.SourceDestCheck = v + return s +} + +// SetSriovNetSupport sets the SriovNetSupport field's value. +func (s *ModifyInstanceAttributeInput) SetSriovNetSupport(v *AttributeValue) *ModifyInstanceAttributeInput { + s.SriovNetSupport = v + return s +} + +// SetUserData sets the UserData field's value. +func (s *ModifyInstanceAttributeInput) SetUserData(v *BlobAttributeValue) *ModifyInstanceAttributeInput { + s.UserData = v + return s +} + +// SetValue sets the Value field's value. +func (s *ModifyInstanceAttributeInput) SetValue(v string) *ModifyInstanceAttributeInput { + s.Value = &v + return s +} + +type ModifyInstanceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceAttributeOutput) GoString() string { + return s.String() +} + +type ModifyInstanceCapacityReservationAttributesInput struct { + _ struct{} `type:"structure"` + + // Information about the Capacity Reservation targeting option. + // + // CapacityReservationSpecification is a required field + CapacityReservationSpecification *CapacityReservationSpecification `type:"structure" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance to be modified. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyInstanceCapacityReservationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceCapacityReservationAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceCapacityReservationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceCapacityReservationAttributesInput"} + if s.CapacityReservationSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationSpecification")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationSpecification sets the CapacityReservationSpecification field's value. +func (s *ModifyInstanceCapacityReservationAttributesInput) SetCapacityReservationSpecification(v *CapacityReservationSpecification) *ModifyInstanceCapacityReservationAttributesInput { + s.CapacityReservationSpecification = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyInstanceCapacityReservationAttributesInput) SetDryRun(v bool) *ModifyInstanceCapacityReservationAttributesInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ModifyInstanceCapacityReservationAttributesInput) SetInstanceId(v string) *ModifyInstanceCapacityReservationAttributesInput { + s.InstanceId = &v + return s +} + +type ModifyInstanceCapacityReservationAttributesOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyInstanceCapacityReservationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceCapacityReservationAttributesOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyInstanceCapacityReservationAttributesOutput) SetReturn(v bool) *ModifyInstanceCapacityReservationAttributesOutput { + s.Return = &v + return s +} + +type ModifyInstanceCreditSpecificationInput struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive token that you provide to ensure idempotency of + // your modification request. For more information, see Ensuring Idempotency + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Information about the credit option for CPU usage. + // + // InstanceCreditSpecifications is a required field + InstanceCreditSpecifications []*InstanceCreditSpecificationRequest `locationName:"InstanceCreditSpecification" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyInstanceCreditSpecificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceCreditSpecificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceCreditSpecificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceCreditSpecificationInput"} + if s.InstanceCreditSpecifications == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCreditSpecifications")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ModifyInstanceCreditSpecificationInput) SetClientToken(v string) *ModifyInstanceCreditSpecificationInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyInstanceCreditSpecificationInput) SetDryRun(v bool) *ModifyInstanceCreditSpecificationInput { + s.DryRun = &v + return s +} + +// SetInstanceCreditSpecifications sets the InstanceCreditSpecifications field's value. +func (s *ModifyInstanceCreditSpecificationInput) SetInstanceCreditSpecifications(v []*InstanceCreditSpecificationRequest) *ModifyInstanceCreditSpecificationInput { + s.InstanceCreditSpecifications = v + return s +} + +type ModifyInstanceCreditSpecificationOutput struct { + _ struct{} `type:"structure"` + + // Information about the instances whose credit option for CPU usage was successfully + // modified. + SuccessfulInstanceCreditSpecifications []*SuccessfulInstanceCreditSpecificationItem `locationName:"successfulInstanceCreditSpecificationSet" locationNameList:"item" type:"list"` + + // Information about the instances whose credit option for CPU usage was not + // modified. + UnsuccessfulInstanceCreditSpecifications []*UnsuccessfulInstanceCreditSpecificationItem `locationName:"unsuccessfulInstanceCreditSpecificationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ModifyInstanceCreditSpecificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceCreditSpecificationOutput) GoString() string { + return s.String() +} + +// SetSuccessfulInstanceCreditSpecifications sets the SuccessfulInstanceCreditSpecifications field's value. +func (s *ModifyInstanceCreditSpecificationOutput) SetSuccessfulInstanceCreditSpecifications(v []*SuccessfulInstanceCreditSpecificationItem) *ModifyInstanceCreditSpecificationOutput { + s.SuccessfulInstanceCreditSpecifications = v + return s +} + +// SetUnsuccessfulInstanceCreditSpecifications sets the UnsuccessfulInstanceCreditSpecifications field's value. +func (s *ModifyInstanceCreditSpecificationOutput) SetUnsuccessfulInstanceCreditSpecifications(v []*UnsuccessfulInstanceCreditSpecificationItem) *ModifyInstanceCreditSpecificationOutput { + s.UnsuccessfulInstanceCreditSpecifications = v + return s +} + +type ModifyInstanceEventStartTimeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the event whose date and time you are modifying. + // + // InstanceEventId is a required field + InstanceEventId *string `type:"string" required:"true"` + + // The ID of the instance with the scheduled event. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The new date and time when the event will take place. + // + // NotBefore is a required field + NotBefore *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s ModifyInstanceEventStartTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceEventStartTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceEventStartTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceEventStartTimeInput"} + if s.InstanceEventId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceEventId")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.NotBefore == nil { + invalidParams.Add(request.NewErrParamRequired("NotBefore")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyInstanceEventStartTimeInput) SetDryRun(v bool) *ModifyInstanceEventStartTimeInput { + s.DryRun = &v + return s +} + +// SetInstanceEventId sets the InstanceEventId field's value. +func (s *ModifyInstanceEventStartTimeInput) SetInstanceEventId(v string) *ModifyInstanceEventStartTimeInput { + s.InstanceEventId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ModifyInstanceEventStartTimeInput) SetInstanceId(v string) *ModifyInstanceEventStartTimeInput { + s.InstanceId = &v + return s +} + +// SetNotBefore sets the NotBefore field's value. +func (s *ModifyInstanceEventStartTimeInput) SetNotBefore(v time.Time) *ModifyInstanceEventStartTimeInput { + s.NotBefore = &v + return s +} + +type ModifyInstanceEventStartTimeOutput struct { + _ struct{} `type:"structure"` + + // Describes a scheduled event for an instance. + Event *InstanceStatusEvent `locationName:"event" type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceEventStartTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceEventStartTimeOutput) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *ModifyInstanceEventStartTimeOutput) SetEvent(v *InstanceStatusEvent) *ModifyInstanceEventStartTimeOutput { + s.Event = v + return s +} + +type ModifyInstanceMetadataOptionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the existing state is maintained. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint *string `type:"string" enum:"InstanceMetadataEndpointState"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. If + // no parameter is specified, the existing state is maintained. + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credential + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens *string `type:"string" enum:"HttpTokensState"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyInstanceMetadataOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceMetadataOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceMetadataOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceMetadataOptionsInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyInstanceMetadataOptionsInput) SetDryRun(v bool) *ModifyInstanceMetadataOptionsInput { + s.DryRun = &v + return s +} + +// SetHttpEndpoint sets the HttpEndpoint field's value. +func (s *ModifyInstanceMetadataOptionsInput) SetHttpEndpoint(v string) *ModifyInstanceMetadataOptionsInput { + s.HttpEndpoint = &v + return s +} + +// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value. +func (s *ModifyInstanceMetadataOptionsInput) SetHttpPutResponseHopLimit(v int64) *ModifyInstanceMetadataOptionsInput { + s.HttpPutResponseHopLimit = &v + return s +} + +// SetHttpTokens sets the HttpTokens field's value. +func (s *ModifyInstanceMetadataOptionsInput) SetHttpTokens(v string) *ModifyInstanceMetadataOptionsInput { + s.HttpTokens = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ModifyInstanceMetadataOptionsInput) SetInstanceId(v string) *ModifyInstanceMetadataOptionsInput { + s.InstanceId = &v + return s +} + +type ModifyInstanceMetadataOptionsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The metadata options for the instance. + InstanceMetadataOptions *InstanceMetadataOptionsResponse `locationName:"instanceMetadataOptions" type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceMetadataOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceMetadataOptionsOutput) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ModifyInstanceMetadataOptionsOutput) SetInstanceId(v string) *ModifyInstanceMetadataOptionsOutput { + s.InstanceId = &v + return s +} + +// SetInstanceMetadataOptions sets the InstanceMetadataOptions field's value. +func (s *ModifyInstanceMetadataOptionsOutput) SetInstanceMetadataOptions(v *InstanceMetadataOptionsResponse) *ModifyInstanceMetadataOptionsOutput { + s.InstanceMetadataOptions = v + return s +} + +type ModifyInstancePlacementInput struct { + _ struct{} `type:"structure"` + + // The affinity setting for the instance. + Affinity *string `locationName:"affinity" type:"string" enum:"Affinity"` + + // The name of the placement group in which to place the instance. For spread + // placement groups, the instance must have a tenancy of default. For cluster + // and partition placement groups, the instance must have a tenancy of default + // or dedicated. + // + // To remove an instance from a placement group, specify an empty string (""). + GroupName *string `type:"string"` + + // The ID of the Dedicated Host with which to associate the instance. + HostId *string `locationName:"hostId" type:"string"` + + // The ARN of the host resource group in which to place the instance. + HostResourceGroupArn *string `type:"string"` + + // The ID of the instance that you are modifying. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + + // Reserved for future use. + PartitionNumber *int64 `type:"integer"` + + // The tenancy for the instance. + Tenancy *string `locationName:"tenancy" type:"string" enum:"HostTenancy"` +} + +// String returns the string representation +func (s ModifyInstancePlacementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstancePlacementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstancePlacementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstancePlacementInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAffinity sets the Affinity field's value. +func (s *ModifyInstancePlacementInput) SetAffinity(v string) *ModifyInstancePlacementInput { + s.Affinity = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *ModifyInstancePlacementInput) SetGroupName(v string) *ModifyInstancePlacementInput { + s.GroupName = &v + return s +} + +// SetHostId sets the HostId field's value. +func (s *ModifyInstancePlacementInput) SetHostId(v string) *ModifyInstancePlacementInput { + s.HostId = &v + return s +} + +// SetHostResourceGroupArn sets the HostResourceGroupArn field's value. +func (s *ModifyInstancePlacementInput) SetHostResourceGroupArn(v string) *ModifyInstancePlacementInput { + s.HostResourceGroupArn = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ModifyInstancePlacementInput) SetInstanceId(v string) *ModifyInstancePlacementInput { + s.InstanceId = &v + return s +} + +// SetPartitionNumber sets the PartitionNumber field's value. +func (s *ModifyInstancePlacementInput) SetPartitionNumber(v int64) *ModifyInstancePlacementInput { + s.PartitionNumber = &v + return s +} + +// SetTenancy sets the Tenancy field's value. +func (s *ModifyInstancePlacementInput) SetTenancy(v string) *ModifyInstancePlacementInput { + s.Tenancy = &v + return s +} + +type ModifyInstancePlacementOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyInstancePlacementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstancePlacementOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyInstancePlacementOutput) SetReturn(v bool) *ModifyInstancePlacementOutput { + s.Return = &v + return s +} + +type ModifyLaunchTemplateInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraint: Maximum 128 ASCII characters. + ClientToken *string `type:"string"` + + // The version number of the launch template to set as the default version. + DefaultVersion *string `locationName:"SetDefaultVersion" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the launch template. You must specify either the launch template + // ID or launch template name in the request. + LaunchTemplateId *string `type:"string"` + + // The name of the launch template. You must specify either the launch template + // ID or launch template name in the request. + LaunchTemplateName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s ModifyLaunchTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLaunchTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyLaunchTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyLaunchTemplateInput"} + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ModifyLaunchTemplateInput) SetClientToken(v string) *ModifyLaunchTemplateInput { + s.ClientToken = &v + return s +} + +// SetDefaultVersion sets the DefaultVersion field's value. +func (s *ModifyLaunchTemplateInput) SetDefaultVersion(v string) *ModifyLaunchTemplateInput { + s.DefaultVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyLaunchTemplateInput) SetDryRun(v bool) *ModifyLaunchTemplateInput { + s.DryRun = &v + return s +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *ModifyLaunchTemplateInput) SetLaunchTemplateId(v string) *ModifyLaunchTemplateInput { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *ModifyLaunchTemplateInput) SetLaunchTemplateName(v string) *ModifyLaunchTemplateInput { + s.LaunchTemplateName = &v + return s +} + +type ModifyLaunchTemplateOutput struct { + _ struct{} `type:"structure"` + + // Information about the launch template. + LaunchTemplate *LaunchTemplate `locationName:"launchTemplate" type:"structure"` +} + +// String returns the string representation +func (s ModifyLaunchTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLaunchTemplateOutput) GoString() string { + return s.String() +} + +// SetLaunchTemplate sets the LaunchTemplate field's value. +func (s *ModifyLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *ModifyLaunchTemplateOutput { + s.LaunchTemplate = v + return s +} + +type ModifyManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // One or more entries to add to the prefix list. + AddEntries []*AddPrefixListEntry `locationName:"AddEntry" type:"list"` + + // The current version of the prefix list. + CurrentVersion *int64 `type:"long"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // A name for the prefix list. + PrefixListName *string `type:"string"` + + // One or more entries to remove from the prefix list. + RemoveEntries []*RemovePrefixListEntry `locationName:"RemoveEntry" type:"list"` +} + +// String returns the string representation +func (s ModifyManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyManagedPrefixListInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.AddEntries != nil { + for i, v := range s.AddEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddEntries", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RemoveEntries != nil { + for i, v := range s.RemoveEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RemoveEntries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddEntries sets the AddEntries field's value. +func (s *ModifyManagedPrefixListInput) SetAddEntries(v []*AddPrefixListEntry) *ModifyManagedPrefixListInput { + s.AddEntries = v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *ModifyManagedPrefixListInput) SetCurrentVersion(v int64) *ModifyManagedPrefixListInput { + s.CurrentVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyManagedPrefixListInput) SetDryRun(v bool) *ModifyManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ModifyManagedPrefixListInput) SetPrefixListId(v string) *ModifyManagedPrefixListInput { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *ModifyManagedPrefixListInput) SetPrefixListName(v string) *ModifyManagedPrefixListInput { + s.PrefixListName = &v + return s +} + +// SetRemoveEntries sets the RemoveEntries field's value. +func (s *ModifyManagedPrefixListInput) SetRemoveEntries(v []*RemovePrefixListEntry) *ModifyManagedPrefixListInput { + s.RemoveEntries = v + return s +} + +type ModifyManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s ModifyManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *ModifyManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *ModifyManagedPrefixListOutput { + s.PrefixList = v + return s +} + +// Contains the parameters for ModifyNetworkInterfaceAttribute. +type ModifyNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // Information about the interface attachment. If modifying the 'delete on termination' + // attribute, you must specify the ID of the interface attachment. + Attachment *NetworkInterfaceAttachmentChanges `locationName:"attachment" type:"structure"` + + // A description for the network interface. + Description *AttributeValue `locationName:"description" type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Changes the security groups for the network interface. The new set of groups + // you specify replaces the current set. You must specify at least one group, + // even if it's just the default security group in the VPC. You must specify + // the ID of the security group, not the name. + Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // Indicates whether source/destination checking is enabled. A value of true + // means checking is enabled, and false means checking is disabled. This value + // must be false for a NAT instance to perform NAT. For more information, see + // NAT Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyNetworkInterfaceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyNetworkInterfaceAttributeInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttachment sets the Attachment field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetAttachment(v *NetworkInterfaceAttachmentChanges) *ModifyNetworkInterfaceAttributeInput { + s.Attachment = v + return s +} + +// SetDescription sets the Description field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetDescription(v *AttributeValue) *ModifyNetworkInterfaceAttributeInput { + s.Description = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetDryRun(v bool) *ModifyNetworkInterfaceAttributeInput { + s.DryRun = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetGroups(v []*string) *ModifyNetworkInterfaceAttributeInput { + s.Groups = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string) *ModifyNetworkInterfaceAttributeInput { + s.NetworkInterfaceId = &v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetSourceDestCheck(v *AttributeBooleanValue) *ModifyNetworkInterfaceAttributeInput { + s.SourceDestCheck = v + return s +} + +type ModifyNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifyReservedInstances. +type ModifyReservedInstancesInput struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive token you provide to ensure idempotency of your + // modification request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The IDs of the Reserved Instances to modify. + // + // ReservedInstancesIds is a required field + ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list" required:"true"` + + // The configuration settings for the Reserved Instances to modify. + // + // TargetConfigurations is a required field + TargetConfigurations []*ReservedInstancesConfiguration `locationName:"ReservedInstancesConfigurationSetItemType" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ModifyReservedInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyReservedInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyReservedInstancesInput"} + if s.ReservedInstancesIds == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesIds")) + } + if s.TargetConfigurations == nil { + invalidParams.Add(request.NewErrParamRequired("TargetConfigurations")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ModifyReservedInstancesInput) SetClientToken(v string) *ModifyReservedInstancesInput { + s.ClientToken = &v + return s +} + +// SetReservedInstancesIds sets the ReservedInstancesIds field's value. +func (s *ModifyReservedInstancesInput) SetReservedInstancesIds(v []*string) *ModifyReservedInstancesInput { + s.ReservedInstancesIds = v + return s +} + +// SetTargetConfigurations sets the TargetConfigurations field's value. +func (s *ModifyReservedInstancesInput) SetTargetConfigurations(v []*ReservedInstancesConfiguration) *ModifyReservedInstancesInput { + s.TargetConfigurations = v + return s +} + +// Contains the output of ModifyReservedInstances. +type ModifyReservedInstancesOutput struct { + _ struct{} `type:"structure"` + + // The ID for the modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` +} + +// String returns the string representation +func (s ModifyReservedInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReservedInstancesOutput) GoString() string { + return s.String() +} + +// SetReservedInstancesModificationId sets the ReservedInstancesModificationId field's value. +func (s *ModifyReservedInstancesOutput) SetReservedInstancesModificationId(v string) *ModifyReservedInstancesOutput { + s.ReservedInstancesModificationId = &v + return s +} + +type ModifySnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The snapshot attribute to modify. Only volume creation permissions can be + // modified. + Attribute *string `type:"string" enum:"SnapshotAttributeName"` + + // A JSON representation of the snapshot attribute modification. + CreateVolumePermission *CreateVolumePermissionModifications `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The group to modify for the snapshot. + GroupNames []*string `locationName:"UserGroup" locationNameList:"GroupName" type:"list"` + + // The type of operation to perform to the attribute. + OperationType *string `type:"string" enum:"OperationType"` + + // The ID of the snapshot. + // + // SnapshotId is a required field + SnapshotId *string `type:"string" required:"true"` + + // The account ID to modify for the snapshot. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifySnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifySnapshotAttributeInput"} + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ModifySnapshotAttributeInput) SetAttribute(v string) *ModifySnapshotAttributeInput { + s.Attribute = &v + return s +} + +// SetCreateVolumePermission sets the CreateVolumePermission field's value. +func (s *ModifySnapshotAttributeInput) SetCreateVolumePermission(v *CreateVolumePermissionModifications) *ModifySnapshotAttributeInput { + s.CreateVolumePermission = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifySnapshotAttributeInput) SetDryRun(v bool) *ModifySnapshotAttributeInput { + s.DryRun = &v + return s +} + +// SetGroupNames sets the GroupNames field's value. +func (s *ModifySnapshotAttributeInput) SetGroupNames(v []*string) *ModifySnapshotAttributeInput { + s.GroupNames = v + return s +} + +// SetOperationType sets the OperationType field's value. +func (s *ModifySnapshotAttributeInput) SetOperationType(v string) *ModifySnapshotAttributeInput { + s.OperationType = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *ModifySnapshotAttributeInput) SetSnapshotId(v string) *ModifySnapshotAttributeInput { + s.SnapshotId = &v + return s +} + +// SetUserIds sets the UserIds field's value. +func (s *ModifySnapshotAttributeInput) SetUserIds(v []*string) *ModifySnapshotAttributeInput { + s.UserIds = v + return s +} + +type ModifySnapshotAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySnapshotAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ModifySpotFleetRequest. +type ModifySpotFleetRequestInput struct { + _ struct{} `type:"structure"` + + // Indicates whether running Spot Instances should be terminated if the target + // capacity of the Spot Fleet request is decreased below the current size of + // the Spot Fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // The launch template and overrides. You can only use this parameter if you + // specified a launch template (LaunchTemplateConfigs) in your Spot Fleet request. + // If you specified LaunchSpecifications in your Spot Fleet request, then omit + // this parameter. + LaunchTemplateConfigs []*LaunchTemplateConfig `locationName:"LaunchTemplateConfig" locationNameList:"item" type:"list"` + + // The number of On-Demand Instances in the fleet. + OnDemandTargetCapacity *int64 `type:"integer"` + + // The ID of the Spot Fleet request. + // + // SpotFleetRequestId is a required field + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string" required:"true"` + + // The size of the fleet. + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifySpotFleetRequestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifySpotFleetRequestInput"} + if s.SpotFleetRequestId == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) + } + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. +func (s *ModifySpotFleetRequestInput) SetExcessCapacityTerminationPolicy(v string) *ModifySpotFleetRequestInput { + s.ExcessCapacityTerminationPolicy = &v + return s +} + +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *ModifySpotFleetRequestInput) SetLaunchTemplateConfigs(v []*LaunchTemplateConfig) *ModifySpotFleetRequestInput { + s.LaunchTemplateConfigs = v + return s +} + +// SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. +func (s *ModifySpotFleetRequestInput) SetOnDemandTargetCapacity(v int64) *ModifySpotFleetRequestInput { + s.OnDemandTargetCapacity = &v + return s +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *ModifySpotFleetRequestInput) SetSpotFleetRequestId(v string) *ModifySpotFleetRequestInput { + s.SpotFleetRequestId = &v + return s +} + +// SetTargetCapacity sets the TargetCapacity field's value. +func (s *ModifySpotFleetRequestInput) SetTargetCapacity(v int64) *ModifySpotFleetRequestInput { + s.TargetCapacity = &v + return s +} + +// Contains the output of ModifySpotFleetRequest. +type ModifySpotFleetRequestOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifySpotFleetRequestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySpotFleetRequestOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifySpotFleetRequestOutput) SetReturn(v bool) *ModifySpotFleetRequestOutput { + s.Return = &v + return s +} + +type ModifySubnetAttributeInput struct { + _ struct{} `type:"structure"` + + // Specify true to indicate that network interfaces created in the specified + // subnet should be assigned an IPv6 address. This includes a network interface + // that's created when launching an instance into the subnet (the instance therefore + // receives an IPv6 address). + // + // If you enable the IPv6 addressing feature for your subnet, your network interface + // or instance only receives an IPv6 address if it's created using version 2016-11-15 + // or later of the Amazon EC2 API. + AssignIpv6AddressOnCreation *AttributeBooleanValue `type:"structure"` + + // The customer-owned IPv4 address pool associated with the subnet. + // + // You must set this value when you specify true for MapCustomerOwnedIpOnLaunch. + CustomerOwnedIpv4Pool *string `type:"string"` + + // Specify true to indicate that network interfaces attached to instances created + // in the specified subnet should be assigned a customer-owned IPv4 address. + // + // When this value is true, you must specify the customer-owned IP pool using + // CustomerOwnedIpv4Pool. + MapCustomerOwnedIpOnLaunch *AttributeBooleanValue `type:"structure"` + + // Specify true to indicate that network interfaces attached to instances created + // in the specified subnet should be assigned a public IPv4 address. + MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"` + + // The ID of the subnet. + // + // SubnetId is a required field + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifySubnetAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifySubnetAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifySubnetAttributeInput"} + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssignIpv6AddressOnCreation sets the AssignIpv6AddressOnCreation field's value. +func (s *ModifySubnetAttributeInput) SetAssignIpv6AddressOnCreation(v *AttributeBooleanValue) *ModifySubnetAttributeInput { + s.AssignIpv6AddressOnCreation = v + return s +} + +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *ModifySubnetAttributeInput) SetCustomerOwnedIpv4Pool(v string) *ModifySubnetAttributeInput { + s.CustomerOwnedIpv4Pool = &v + return s +} + +// SetMapCustomerOwnedIpOnLaunch sets the MapCustomerOwnedIpOnLaunch field's value. +func (s *ModifySubnetAttributeInput) SetMapCustomerOwnedIpOnLaunch(v *AttributeBooleanValue) *ModifySubnetAttributeInput { + s.MapCustomerOwnedIpOnLaunch = v + return s +} + +// SetMapPublicIpOnLaunch sets the MapPublicIpOnLaunch field's value. +func (s *ModifySubnetAttributeInput) SetMapPublicIpOnLaunch(v *AttributeBooleanValue) *ModifySubnetAttributeInput { + s.MapPublicIpOnLaunch = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *ModifySubnetAttributeInput) SetSubnetId(v string) *ModifySubnetAttributeInput { + s.SubnetId = &v + return s +} + +type ModifySubnetAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySubnetAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifySubnetAttributeOutput) GoString() string { + return s.String() +} + +type ModifyTrafficMirrorFilterNetworkServicesInput struct { + _ struct{} `type:"structure"` + + // The network service, for example Amazon DNS, that you want to mirror. + AddNetworkServices []*string `locationName:"AddNetworkService" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The network service, for example Amazon DNS, that you no longer want to mirror. + RemoveNetworkServices []*string `locationName:"RemoveNetworkService" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror filter. + // + // TrafficMirrorFilterId is a required field + TrafficMirrorFilterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorFilterNetworkServicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorFilterNetworkServicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTrafficMirrorFilterNetworkServicesInput"} + if s.TrafficMirrorFilterId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddNetworkServices sets the AddNetworkServices field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) SetAddNetworkServices(v []*string) *ModifyTrafficMirrorFilterNetworkServicesInput { + s.AddNetworkServices = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) SetDryRun(v bool) *ModifyTrafficMirrorFilterNetworkServicesInput { + s.DryRun = &v + return s +} + +// SetRemoveNetworkServices sets the RemoveNetworkServices field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) SetRemoveNetworkServices(v []*string) *ModifyTrafficMirrorFilterNetworkServicesInput { + s.RemoveNetworkServices = v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesInput) SetTrafficMirrorFilterId(v string) *ModifyTrafficMirrorFilterNetworkServicesInput { + s.TrafficMirrorFilterId = &v + return s +} + +type ModifyTrafficMirrorFilterNetworkServicesOutput struct { + _ struct{} `type:"structure"` + + // The Traffic Mirror filter that the network service is associated with. + TrafficMirrorFilter *TrafficMirrorFilter `locationName:"trafficMirrorFilter" type:"structure"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorFilterNetworkServicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorFilterNetworkServicesOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorFilter sets the TrafficMirrorFilter field's value. +func (s *ModifyTrafficMirrorFilterNetworkServicesOutput) SetTrafficMirrorFilter(v *TrafficMirrorFilter) *ModifyTrafficMirrorFilterNetworkServicesOutput { + s.TrafficMirrorFilter = v + return s +} + +type ModifyTrafficMirrorFilterRuleInput struct { + _ struct{} `type:"structure"` + + // The description to assign to the Traffic Mirror rule. + Description *string `type:"string"` + + // The destination CIDR block to assign to the Traffic Mirror rule. + DestinationCidrBlock *string `type:"string"` + + // The destination ports that are associated with the Traffic Mirror rule. + DestinationPortRange *TrafficMirrorPortRangeRequest `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The protocol, for example TCP, to assign to the Traffic Mirror rule. + Protocol *int64 `type:"integer"` + + // The properties that you want to remove from the Traffic Mirror filter rule. + // + // When you remove a property from a Traffic Mirror filter rule, the property + // is set to the default. + RemoveFields []*string `locationName:"RemoveField" type:"list"` + + // The action to assign to the rule. + RuleAction *string `type:"string" enum:"TrafficMirrorRuleAction"` + + // The number of the Traffic Mirror rule. This number must be unique for each + // Traffic Mirror rule in a given direction. The rules are processed in ascending + // order by rule number. + RuleNumber *int64 `type:"integer"` + + // The source CIDR block to assign to the Traffic Mirror rule. + SourceCidrBlock *string `type:"string"` + + // The port range to assign to the Traffic Mirror rule. + SourcePortRange *TrafficMirrorPortRangeRequest `type:"structure"` + + // The type of traffic (ingress | egress) to assign to the rule. + TrafficDirection *string `type:"string" enum:"TrafficDirection"` + + // The ID of the Traffic Mirror rule. + // + // TrafficMirrorFilterRuleId is a required field + TrafficMirrorFilterRuleId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorFilterRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorFilterRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTrafficMirrorFilterRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTrafficMirrorFilterRuleInput"} + if s.TrafficMirrorFilterRuleId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorFilterRuleId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetDescription(v string) *ModifyTrafficMirrorFilterRuleInput { + s.Description = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetDestinationCidrBlock(v string) *ModifyTrafficMirrorFilterRuleInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationPortRange sets the DestinationPortRange field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetDestinationPortRange(v *TrafficMirrorPortRangeRequest) *ModifyTrafficMirrorFilterRuleInput { + s.DestinationPortRange = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetDryRun(v bool) *ModifyTrafficMirrorFilterRuleInput { + s.DryRun = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetProtocol(v int64) *ModifyTrafficMirrorFilterRuleInput { + s.Protocol = &v + return s +} + +// SetRemoveFields sets the RemoveFields field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetRemoveFields(v []*string) *ModifyTrafficMirrorFilterRuleInput { + s.RemoveFields = v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetRuleAction(v string) *ModifyTrafficMirrorFilterRuleInput { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetRuleNumber(v int64) *ModifyTrafficMirrorFilterRuleInput { + s.RuleNumber = &v + return s +} + +// SetSourceCidrBlock sets the SourceCidrBlock field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetSourceCidrBlock(v string) *ModifyTrafficMirrorFilterRuleInput { + s.SourceCidrBlock = &v + return s +} + +// SetSourcePortRange sets the SourcePortRange field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetSourcePortRange(v *TrafficMirrorPortRangeRequest) *ModifyTrafficMirrorFilterRuleInput { + s.SourcePortRange = v + return s +} + +// SetTrafficDirection sets the TrafficDirection field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetTrafficDirection(v string) *ModifyTrafficMirrorFilterRuleInput { + s.TrafficDirection = &v + return s +} + +// SetTrafficMirrorFilterRuleId sets the TrafficMirrorFilterRuleId field's value. +func (s *ModifyTrafficMirrorFilterRuleInput) SetTrafficMirrorFilterRuleId(v string) *ModifyTrafficMirrorFilterRuleInput { + s.TrafficMirrorFilterRuleId = &v + return s +} + +type ModifyTrafficMirrorFilterRuleOutput struct { + _ struct{} `type:"structure"` + + // Modifies a Traffic Mirror rule. + TrafficMirrorFilterRule *TrafficMirrorFilterRule `locationName:"trafficMirrorFilterRule" type:"structure"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorFilterRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorFilterRuleOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorFilterRule sets the TrafficMirrorFilterRule field's value. +func (s *ModifyTrafficMirrorFilterRuleOutput) SetTrafficMirrorFilterRule(v *TrafficMirrorFilterRule) *ModifyTrafficMirrorFilterRuleOutput { + s.TrafficMirrorFilterRule = v + return s +} + +type ModifyTrafficMirrorSessionInput struct { + _ struct{} `type:"structure"` + + // The description to assign to the Traffic Mirror session. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The number of bytes in each packet to mirror. These are bytes after the VXLAN + // header. To mirror a subset, set this to the length (in bytes) to mirror. + // For example, if you set this value to 100, then the first 100 bytes that + // meet the filter criteria are copied to the target. Do not specify this parameter + // when you want to mirror the entire packet. + PacketLength *int64 `type:"integer"` + + // The properties that you want to remove from the Traffic Mirror session. + // + // When you remove a property from a Traffic Mirror session, the property is + // set to the default. + RemoveFields []*string `locationName:"RemoveField" type:"list"` + + // The session number determines the order in which sessions are evaluated when + // an interface is used by multiple sessions. The first session with a matching + // filter is the one that mirrors the packets. + // + // Valid values are 1-32766. + SessionNumber *int64 `type:"integer"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterId *string `type:"string"` + + // The ID of the Traffic Mirror session. + // + // TrafficMirrorSessionId is a required field + TrafficMirrorSessionId *string `type:"string" required:"true"` + + // The Traffic Mirror target. The target must be in the same VPC as the source, + // or have a VPC peering connection with the source. + TrafficMirrorTargetId *string `type:"string"` + + // The virtual network ID of the Traffic Mirror session. + VirtualNetworkId *int64 `type:"integer"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTrafficMirrorSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTrafficMirrorSessionInput"} + if s.TrafficMirrorSessionId == nil { + invalidParams.Add(request.NewErrParamRequired("TrafficMirrorSessionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ModifyTrafficMirrorSessionInput) SetDescription(v string) *ModifyTrafficMirrorSessionInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTrafficMirrorSessionInput) SetDryRun(v bool) *ModifyTrafficMirrorSessionInput { + s.DryRun = &v + return s +} + +// SetPacketLength sets the PacketLength field's value. +func (s *ModifyTrafficMirrorSessionInput) SetPacketLength(v int64) *ModifyTrafficMirrorSessionInput { + s.PacketLength = &v + return s +} + +// SetRemoveFields sets the RemoveFields field's value. +func (s *ModifyTrafficMirrorSessionInput) SetRemoveFields(v []*string) *ModifyTrafficMirrorSessionInput { + s.RemoveFields = v + return s +} + +// SetSessionNumber sets the SessionNumber field's value. +func (s *ModifyTrafficMirrorSessionInput) SetSessionNumber(v int64) *ModifyTrafficMirrorSessionInput { + s.SessionNumber = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *ModifyTrafficMirrorSessionInput) SetTrafficMirrorFilterId(v string) *ModifyTrafficMirrorSessionInput { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorSessionId sets the TrafficMirrorSessionId field's value. +func (s *ModifyTrafficMirrorSessionInput) SetTrafficMirrorSessionId(v string) *ModifyTrafficMirrorSessionInput { + s.TrafficMirrorSessionId = &v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *ModifyTrafficMirrorSessionInput) SetTrafficMirrorTargetId(v string) *ModifyTrafficMirrorSessionInput { + s.TrafficMirrorTargetId = &v + return s +} + +// SetVirtualNetworkId sets the VirtualNetworkId field's value. +func (s *ModifyTrafficMirrorSessionInput) SetVirtualNetworkId(v int64) *ModifyTrafficMirrorSessionInput { + s.VirtualNetworkId = &v + return s +} + +type ModifyTrafficMirrorSessionOutput struct { + _ struct{} `type:"structure"` + + // Information about the Traffic Mirror session. + TrafficMirrorSession *TrafficMirrorSession `locationName:"trafficMirrorSession" type:"structure"` +} + +// String returns the string representation +func (s ModifyTrafficMirrorSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTrafficMirrorSessionOutput) GoString() string { + return s.String() +} + +// SetTrafficMirrorSession sets the TrafficMirrorSession field's value. +func (s *ModifyTrafficMirrorSessionOutput) SetTrafficMirrorSession(v *TrafficMirrorSession) *ModifyTrafficMirrorSessionOutput { + s.TrafficMirrorSession = v + return s +} + +type ModifyTransitGatewayInput struct { + _ struct{} `type:"structure"` + + // The description for the transit gateway. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The options to modify. + Options *ModifyTransitGatewayOptions `type:"structure"` + + // The ID of the transit gateway. + // + // TransitGatewayId is a required field + TransitGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTransitGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTransitGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTransitGatewayInput"} + if s.TransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ModifyTransitGatewayInput) SetDescription(v string) *ModifyTransitGatewayInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTransitGatewayInput) SetDryRun(v bool) *ModifyTransitGatewayInput { + s.DryRun = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *ModifyTransitGatewayInput) SetOptions(v *ModifyTransitGatewayOptions) *ModifyTransitGatewayInput { + s.Options = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *ModifyTransitGatewayInput) SetTransitGatewayId(v string) *ModifyTransitGatewayInput { + s.TransitGatewayId = &v + return s +} + +// The transit gateway options. +type ModifyTransitGatewayOptions struct { + _ struct{} `type:"structure"` + + // The ID of the default association route table. + AssociationDefaultRouteTableId *string `type:"string"` + + // Enable or disable automatic acceptance of attachment requests. + AutoAcceptSharedAttachments *string `type:"string" enum:"AutoAcceptSharedAttachmentsValue"` + + // Enable or disable automatic association with the default association route + // table. + DefaultRouteTableAssociation *string `type:"string" enum:"DefaultRouteTableAssociationValue"` + + // Enable or disable automatic propagation of routes to the default propagation + // route table. + DefaultRouteTablePropagation *string `type:"string" enum:"DefaultRouteTablePropagationValue"` + + // Enable or disable DNS support. + DnsSupport *string `type:"string" enum:"DnsSupportValue"` + + // The ID of the default propagation route table. + PropagationDefaultRouteTableId *string `type:"string"` + + // Enable or disable Equal Cost Multipath Protocol support. + VpnEcmpSupport *string `type:"string" enum:"VpnEcmpSupportValue"` +} + +// String returns the string representation +func (s ModifyTransitGatewayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayOptions) GoString() string { + return s.String() +} + +// SetAssociationDefaultRouteTableId sets the AssociationDefaultRouteTableId field's value. +func (s *ModifyTransitGatewayOptions) SetAssociationDefaultRouteTableId(v string) *ModifyTransitGatewayOptions { + s.AssociationDefaultRouteTableId = &v + return s +} + +// SetAutoAcceptSharedAttachments sets the AutoAcceptSharedAttachments field's value. +func (s *ModifyTransitGatewayOptions) SetAutoAcceptSharedAttachments(v string) *ModifyTransitGatewayOptions { + s.AutoAcceptSharedAttachments = &v + return s +} + +// SetDefaultRouteTableAssociation sets the DefaultRouteTableAssociation field's value. +func (s *ModifyTransitGatewayOptions) SetDefaultRouteTableAssociation(v string) *ModifyTransitGatewayOptions { + s.DefaultRouteTableAssociation = &v + return s +} + +// SetDefaultRouteTablePropagation sets the DefaultRouteTablePropagation field's value. +func (s *ModifyTransitGatewayOptions) SetDefaultRouteTablePropagation(v string) *ModifyTransitGatewayOptions { + s.DefaultRouteTablePropagation = &v + return s +} + +// SetDnsSupport sets the DnsSupport field's value. +func (s *ModifyTransitGatewayOptions) SetDnsSupport(v string) *ModifyTransitGatewayOptions { + s.DnsSupport = &v + return s +} + +// SetPropagationDefaultRouteTableId sets the PropagationDefaultRouteTableId field's value. +func (s *ModifyTransitGatewayOptions) SetPropagationDefaultRouteTableId(v string) *ModifyTransitGatewayOptions { + s.PropagationDefaultRouteTableId = &v + return s +} + +// SetVpnEcmpSupport sets the VpnEcmpSupport field's value. +func (s *ModifyTransitGatewayOptions) SetVpnEcmpSupport(v string) *ModifyTransitGatewayOptions { + s.VpnEcmpSupport = &v + return s +} + +type ModifyTransitGatewayOutput struct { + _ struct{} `type:"structure"` + + // Describes a transit gateway. + TransitGateway *TransitGateway `locationName:"transitGateway" type:"structure"` +} + +// String returns the string representation +func (s ModifyTransitGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayOutput) GoString() string { + return s.String() +} + +// SetTransitGateway sets the TransitGateway field's value. +func (s *ModifyTransitGatewayOutput) SetTransitGateway(v *TransitGateway) *ModifyTransitGatewayOutput { + s.TransitGateway = v + return s +} + +type ModifyTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to drop traffic that matches this route. + Blackhole *bool `type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the attachment to which traffic is routed. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlackhole sets the Blackhole field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetBlackhole(v bool) *ModifyTransitGatewayPrefixListReferenceInput { + s.Blackhole = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *ModifyTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetTransitGatewayAttachmentId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type ModifyTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *ModifyTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *ModifyTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + +type ModifyTransitGatewayVpcAttachmentInput struct { + _ struct{} `type:"structure"` + + // The IDs of one or more subnets to add. You can specify at most one subnet + // per Availability Zone. + AddSubnetIds []*string `locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The new VPC attachment options. + // + // You cannot modify the IPv6 options. + Options *ModifyTransitGatewayVpcAttachmentRequestOptions `type:"structure"` + + // The IDs of one or more subnets to remove. + RemoveSubnetIds []*string `locationNameList:"item" type:"list"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTransitGatewayVpcAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayVpcAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTransitGatewayVpcAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTransitGatewayVpcAttachmentInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddSubnetIds sets the AddSubnetIds field's value. +func (s *ModifyTransitGatewayVpcAttachmentInput) SetAddSubnetIds(v []*string) *ModifyTransitGatewayVpcAttachmentInput { + s.AddSubnetIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTransitGatewayVpcAttachmentInput) SetDryRun(v bool) *ModifyTransitGatewayVpcAttachmentInput { + s.DryRun = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *ModifyTransitGatewayVpcAttachmentInput) SetOptions(v *ModifyTransitGatewayVpcAttachmentRequestOptions) *ModifyTransitGatewayVpcAttachmentInput { + s.Options = v + return s +} + +// SetRemoveSubnetIds sets the RemoveSubnetIds field's value. +func (s *ModifyTransitGatewayVpcAttachmentInput) SetRemoveSubnetIds(v []*string) *ModifyTransitGatewayVpcAttachmentInput { + s.RemoveSubnetIds = v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *ModifyTransitGatewayVpcAttachmentInput) SetTransitGatewayAttachmentId(v string) *ModifyTransitGatewayVpcAttachmentInput { + s.TransitGatewayAttachmentId = &v + return s +} + +type ModifyTransitGatewayVpcAttachmentOutput struct { + _ struct{} `type:"structure"` + + // Information about the modified attachment. + TransitGatewayVpcAttachment *TransitGatewayVpcAttachment `locationName:"transitGatewayVpcAttachment" type:"structure"` +} + +// String returns the string representation +func (s ModifyTransitGatewayVpcAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayVpcAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayVpcAttachment sets the TransitGatewayVpcAttachment field's value. +func (s *ModifyTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment(v *TransitGatewayVpcAttachment) *ModifyTransitGatewayVpcAttachmentOutput { + s.TransitGatewayVpcAttachment = v + return s +} + +// Describes the options for a VPC attachment. +type ModifyTransitGatewayVpcAttachmentRequestOptions struct { + _ struct{} `type:"structure"` + + // Enable or disable support for appliance mode. If enabled, a traffic flow + // between a source and destination uses the same Availability Zone for the + // VPC attachment for the lifetime of that flow. The default is disable. + ApplianceModeSupport *string `type:"string" enum:"ApplianceModeSupportValue"` + + // Enable or disable DNS support. The default is enable. + DnsSupport *string `type:"string" enum:"DnsSupportValue"` + + // Enable or disable IPv6 support. The default is enable. + Ipv6Support *string `type:"string" enum:"Ipv6SupportValue"` +} + +// String returns the string representation +func (s ModifyTransitGatewayVpcAttachmentRequestOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayVpcAttachmentRequestOptions) GoString() string { + return s.String() +} + +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetApplianceModeSupport(v string) *ModifyTransitGatewayVpcAttachmentRequestOptions { + s.ApplianceModeSupport = &v + return s +} + +// SetDnsSupport sets the DnsSupport field's value. +func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetDnsSupport(v string) *ModifyTransitGatewayVpcAttachmentRequestOptions { + s.DnsSupport = &v + return s +} + +// SetIpv6Support sets the Ipv6Support field's value. +func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetIpv6Support(v string) *ModifyTransitGatewayVpcAttachmentRequestOptions { + s.Ipv6Support = &v + return s +} + +type ModifyVolumeAttributeInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume should be auto-enabled for I/O operations. + AutoEnableIO *AttributeBooleanValue `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the volume. + // + // VolumeId is a required field + VolumeId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVolumeAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVolumeAttributeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoEnableIO sets the AutoEnableIO field's value. +func (s *ModifyVolumeAttributeInput) SetAutoEnableIO(v *AttributeBooleanValue) *ModifyVolumeAttributeInput { + s.AutoEnableIO = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVolumeAttributeInput) SetDryRun(v bool) *ModifyVolumeAttributeInput { + s.DryRun = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *ModifyVolumeAttributeInput) SetVolumeId(v string) *ModifyVolumeAttributeInput { + s.VolumeId = &v + return s +} + +type ModifyVolumeAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyVolumeAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVolumeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The target IOPS rate of the volume. This parameter is valid only for gp3, + // io1, and io2 volumes. + // + // The following are the supported values for each volume type: + // + // * gp3: 3,000-16,000 IOPS + // + // * io1: 100-64,000 IOPS + // + // * io2: 100-64,000 IOPS + // + // Default: If no IOPS value is specified, the existing value is retained. + Iops *int64 `type:"integer"` + + // The target size of the volume, in GiB. The target volume size must be greater + // than or equal to the existing size of the volume. + // + // The following are the supported volumes sizes for each volume type: + // + // * gp2 and gp3: 1-16,384 + // + // * io1 and io2: 4-16,384 + // + // * st1 and sc1: 125-16,384 + // + // * standard: 1-1,024 + // + // Default: If no size is specified, the existing size is retained. + Size *int64 `type:"integer"` + + // The target throughput of the volume, in MiB/s. This parameter is valid only + // for gp3 volumes. The maximum value is 1,000. + // + // Default: If no throughput value is specified, the existing value is retained. + // + // Valid Range: Minimum value of 125. Maximum value of 1000. + Throughput *int64 `type:"integer"` + + // The ID of the volume. + // + // VolumeId is a required field + VolumeId *string `type:"string" required:"true"` + + // The target EBS volume type of the volume. For more information, see Amazon + // EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: If no type is specified, the existing type is retained. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s ModifyVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVolumeInput) SetDryRun(v bool) *ModifyVolumeInput { + s.DryRun = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *ModifyVolumeInput) SetIops(v int64) *ModifyVolumeInput { + s.Iops = &v + return s +} + +// SetSize sets the Size field's value. +func (s *ModifyVolumeInput) SetSize(v int64) *ModifyVolumeInput { + s.Size = &v + return s +} + +// SetThroughput sets the Throughput field's value. +func (s *ModifyVolumeInput) SetThroughput(v int64) *ModifyVolumeInput { + s.Throughput = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *ModifyVolumeInput) SetVolumeId(v string) *ModifyVolumeInput { + s.VolumeId = &v + return s +} + +// SetVolumeType sets the VolumeType field's value. +func (s *ModifyVolumeInput) SetVolumeType(v string) *ModifyVolumeInput { + s.VolumeType = &v + return s +} + +type ModifyVolumeOutput struct { + _ struct{} `type:"structure"` + + // Information about the volume modification. + VolumeModification *VolumeModification `locationName:"volumeModification" type:"structure"` +} + +// String returns the string representation +func (s ModifyVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeOutput) GoString() string { + return s.String() +} + +// SetVolumeModification sets the VolumeModification field's value. +func (s *ModifyVolumeOutput) SetVolumeModification(v *VolumeModification) *ModifyVolumeOutput { + s.VolumeModification = v + return s +} + +type ModifyVpcAttributeInput struct { + _ struct{} `type:"structure"` + + // Indicates whether the instances launched in the VPC get DNS hostnames. If + // enabled, instances in the VPC get DNS hostnames; otherwise, they do not. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. You can only enable + // DNS hostnames if you've enabled DNS support. + EnableDnsHostnames *AttributeBooleanValue `type:"structure"` + + // Indicates whether the DNS resolution is supported for the VPC. If enabled, + // queries to the Amazon provided DNS server at the 169.254.169.253 IP address, + // or the reserved IP address at the base of the VPC network range "plus two" + // succeed. If disabled, the Amazon provided DNS service in the VPC that resolves + // public DNS hostnames to IP addresses is not enabled. + // + // You cannot modify the DNS resolution and DNS hostnames attributes in the + // same request. Use separate requests for each attribute. + EnableDnsSupport *AttributeBooleanValue `type:"structure"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `locationName:"vpcId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcAttributeInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnableDnsHostnames sets the EnableDnsHostnames field's value. +func (s *ModifyVpcAttributeInput) SetEnableDnsHostnames(v *AttributeBooleanValue) *ModifyVpcAttributeInput { + s.EnableDnsHostnames = v + return s +} + +// SetEnableDnsSupport sets the EnableDnsSupport field's value. +func (s *ModifyVpcAttributeInput) SetEnableDnsSupport(v *AttributeBooleanValue) *ModifyVpcAttributeInput { + s.EnableDnsSupport = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ModifyVpcAttributeInput) SetVpcId(v string) *ModifyVpcAttributeInput { + s.VpcId = &v + return s +} + +type ModifyVpcAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcAttributeOutput) GoString() string { + return s.String() +} + +type ModifyVpcEndpointConnectionNotificationInput struct { + _ struct{} `type:"structure"` + + // One or more events for the endpoint. Valid values are Accept, Connect, Delete, + // and Reject. + ConnectionEvents []*string `locationNameList:"item" type:"list"` + + // The ARN for the SNS topic for the notification. + ConnectionNotificationArn *string `type:"string"` + + // The ID of the notification. + // + // ConnectionNotificationId is a required field + ConnectionNotificationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcEndpointConnectionNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointConnectionNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcEndpointConnectionNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcEndpointConnectionNotificationInput"} + if s.ConnectionNotificationId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionNotificationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionEvents sets the ConnectionEvents field's value. +func (s *ModifyVpcEndpointConnectionNotificationInput) SetConnectionEvents(v []*string) *ModifyVpcEndpointConnectionNotificationInput { + s.ConnectionEvents = v + return s +} + +// SetConnectionNotificationArn sets the ConnectionNotificationArn field's value. +func (s *ModifyVpcEndpointConnectionNotificationInput) SetConnectionNotificationArn(v string) *ModifyVpcEndpointConnectionNotificationInput { + s.ConnectionNotificationArn = &v + return s +} + +// SetConnectionNotificationId sets the ConnectionNotificationId field's value. +func (s *ModifyVpcEndpointConnectionNotificationInput) SetConnectionNotificationId(v string) *ModifyVpcEndpointConnectionNotificationInput { + s.ConnectionNotificationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcEndpointConnectionNotificationInput) SetDryRun(v bool) *ModifyVpcEndpointConnectionNotificationInput { + s.DryRun = &v + return s +} + +type ModifyVpcEndpointConnectionNotificationOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + ReturnValue *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcEndpointConnectionNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointConnectionNotificationOutput) GoString() string { + return s.String() +} + +// SetReturnValue sets the ReturnValue field's value. +func (s *ModifyVpcEndpointConnectionNotificationOutput) SetReturnValue(v bool) *ModifyVpcEndpointConnectionNotificationOutput { + s.ReturnValue = &v + return s +} + +// Contains the parameters for ModifyVpcEndpoint. +type ModifyVpcEndpointInput struct { + _ struct{} `type:"structure"` + + // (Gateway endpoint) One or more route tables IDs to associate with the endpoint. + AddRouteTableIds []*string `locationName:"AddRouteTableId" locationNameList:"item" type:"list"` + + // (Interface endpoint) One or more security group IDs to associate with the + // network interface. + AddSecurityGroupIds []*string `locationName:"AddSecurityGroupId" locationNameList:"item" type:"list"` + + // (Interface and Gateway Load Balancer endpoints) One or more subnet IDs in + // which to serve the endpoint. For a Gateway Load Balancer endpoint, you can + // specify only one subnet. + AddSubnetIds []*string `locationName:"AddSubnetId" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // (Interface and gateway endpoints) A policy to attach to the endpoint that + // controls access to the service. The policy must be in valid JSON format. + PolicyDocument *string `type:"string"` + + // (Interface endpoint) Indicates whether a private hosted zone is associated + // with the VPC. + PrivateDnsEnabled *bool `type:"boolean"` + + // (Gateway endpoint) One or more route table IDs to disassociate from the endpoint. + RemoveRouteTableIds []*string `locationName:"RemoveRouteTableId" locationNameList:"item" type:"list"` + + // (Interface endpoint) One or more security group IDs to disassociate from + // the network interface. + RemoveSecurityGroupIds []*string `locationName:"RemoveSecurityGroupId" locationNameList:"item" type:"list"` + + // (Interface endpoint) One or more subnets IDs in which to remove the endpoint. + RemoveSubnetIds []*string `locationName:"RemoveSubnetId" locationNameList:"item" type:"list"` + + // (Gateway endpoint) Specify true to reset the policy document to the default + // policy. The default policy allows full access to the service. + ResetPolicy *bool `type:"boolean"` + + // The ID of the endpoint. + // + // VpcEndpointId is a required field + VpcEndpointId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcEndpointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcEndpointInput"} + if s.VpcEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddRouteTableIds sets the AddRouteTableIds field's value. +func (s *ModifyVpcEndpointInput) SetAddRouteTableIds(v []*string) *ModifyVpcEndpointInput { + s.AddRouteTableIds = v + return s +} + +// SetAddSecurityGroupIds sets the AddSecurityGroupIds field's value. +func (s *ModifyVpcEndpointInput) SetAddSecurityGroupIds(v []*string) *ModifyVpcEndpointInput { + s.AddSecurityGroupIds = v + return s +} + +// SetAddSubnetIds sets the AddSubnetIds field's value. +func (s *ModifyVpcEndpointInput) SetAddSubnetIds(v []*string) *ModifyVpcEndpointInput { + s.AddSubnetIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcEndpointInput) SetDryRun(v bool) *ModifyVpcEndpointInput { + s.DryRun = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *ModifyVpcEndpointInput) SetPolicyDocument(v string) *ModifyVpcEndpointInput { + s.PolicyDocument = &v + return s +} + +// SetPrivateDnsEnabled sets the PrivateDnsEnabled field's value. +func (s *ModifyVpcEndpointInput) SetPrivateDnsEnabled(v bool) *ModifyVpcEndpointInput { + s.PrivateDnsEnabled = &v + return s +} + +// SetRemoveRouteTableIds sets the RemoveRouteTableIds field's value. +func (s *ModifyVpcEndpointInput) SetRemoveRouteTableIds(v []*string) *ModifyVpcEndpointInput { + s.RemoveRouteTableIds = v + return s +} + +// SetRemoveSecurityGroupIds sets the RemoveSecurityGroupIds field's value. +func (s *ModifyVpcEndpointInput) SetRemoveSecurityGroupIds(v []*string) *ModifyVpcEndpointInput { + s.RemoveSecurityGroupIds = v + return s +} + +// SetRemoveSubnetIds sets the RemoveSubnetIds field's value. +func (s *ModifyVpcEndpointInput) SetRemoveSubnetIds(v []*string) *ModifyVpcEndpointInput { + s.RemoveSubnetIds = v + return s +} + +// SetResetPolicy sets the ResetPolicy field's value. +func (s *ModifyVpcEndpointInput) SetResetPolicy(v bool) *ModifyVpcEndpointInput { + s.ResetPolicy = &v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *ModifyVpcEndpointInput) SetVpcEndpointId(v string) *ModifyVpcEndpointInput { + s.VpcEndpointId = &v + return s +} + +type ModifyVpcEndpointOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyVpcEndpointOutput) SetReturn(v bool) *ModifyVpcEndpointOutput { + s.Return = &v + return s +} + +type ModifyVpcEndpointServiceConfigurationInput struct { + _ struct{} `type:"structure"` + + // Indicates whether requests to create an endpoint to your service must be + // accepted. + AcceptanceRequired *bool `type:"boolean"` + + // The Amazon Resource Names (ARNs) of Gateway Load Balancers to add to your + // service configuration. + AddGatewayLoadBalancerArns []*string `locationName:"AddGatewayLoadBalancerArn" locationNameList:"item" type:"list"` + + // The Amazon Resource Names (ARNs) of Network Load Balancers to add to your + // service configuration. + AddNetworkLoadBalancerArns []*string `locationName:"AddNetworkLoadBalancerArn" locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // (Interface endpoint configuration) The private DNS name to assign to the + // endpoint service. + PrivateDnsName *string `type:"string"` + + // The Amazon Resource Names (ARNs) of Gateway Load Balancers to remove from + // your service configuration. + RemoveGatewayLoadBalancerArns []*string `locationName:"RemoveGatewayLoadBalancerArn" locationNameList:"item" type:"list"` + + // The Amazon Resource Names (ARNs) of Network Load Balancers to remove from + // your service configuration. + RemoveNetworkLoadBalancerArns []*string `locationName:"RemoveNetworkLoadBalancerArn" locationNameList:"item" type:"list"` + + // (Interface endpoint configuration) Removes the private DNS name of the endpoint + // service. + RemovePrivateDnsName *bool `type:"boolean"` + + // The ID of the service. + // + // ServiceId is a required field + ServiceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcEndpointServiceConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointServiceConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcEndpointServiceConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcEndpointServiceConfigurationInput"} + if s.ServiceId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptanceRequired sets the AcceptanceRequired field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetAcceptanceRequired(v bool) *ModifyVpcEndpointServiceConfigurationInput { + s.AcceptanceRequired = &v + return s +} + +// SetAddGatewayLoadBalancerArns sets the AddGatewayLoadBalancerArns field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetAddGatewayLoadBalancerArns(v []*string) *ModifyVpcEndpointServiceConfigurationInput { + s.AddGatewayLoadBalancerArns = v + return s +} + +// SetAddNetworkLoadBalancerArns sets the AddNetworkLoadBalancerArns field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetAddNetworkLoadBalancerArns(v []*string) *ModifyVpcEndpointServiceConfigurationInput { + s.AddNetworkLoadBalancerArns = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetDryRun(v bool) *ModifyVpcEndpointServiceConfigurationInput { + s.DryRun = &v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetPrivateDnsName(v string) *ModifyVpcEndpointServiceConfigurationInput { + s.PrivateDnsName = &v + return s +} + +// SetRemoveGatewayLoadBalancerArns sets the RemoveGatewayLoadBalancerArns field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetRemoveGatewayLoadBalancerArns(v []*string) *ModifyVpcEndpointServiceConfigurationInput { + s.RemoveGatewayLoadBalancerArns = v + return s +} + +// SetRemoveNetworkLoadBalancerArns sets the RemoveNetworkLoadBalancerArns field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetRemoveNetworkLoadBalancerArns(v []*string) *ModifyVpcEndpointServiceConfigurationInput { + s.RemoveNetworkLoadBalancerArns = v + return s +} + +// SetRemovePrivateDnsName sets the RemovePrivateDnsName field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetRemovePrivateDnsName(v bool) *ModifyVpcEndpointServiceConfigurationInput { + s.RemovePrivateDnsName = &v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetServiceId(v string) *ModifyVpcEndpointServiceConfigurationInput { + s.ServiceId = &v + return s +} + +type ModifyVpcEndpointServiceConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcEndpointServiceConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointServiceConfigurationOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyVpcEndpointServiceConfigurationOutput) SetReturn(v bool) *ModifyVpcEndpointServiceConfigurationOutput { + s.Return = &v + return s +} + +type ModifyVpcEndpointServicePermissionsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Names (ARN) of one or more principals. Permissions are + // granted to the principals in this list. To grant permissions to all principals, + // specify an asterisk (*). + AddAllowedPrincipals []*string `locationNameList:"item" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The Amazon Resource Names (ARN) of one or more principals. Permissions are + // revoked for principals in this list. + RemoveAllowedPrincipals []*string `locationNameList:"item" type:"list"` + + // The ID of the service. + // + // ServiceId is a required field + ServiceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcEndpointServicePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointServicePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcEndpointServicePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcEndpointServicePermissionsInput"} + if s.ServiceId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddAllowedPrincipals sets the AddAllowedPrincipals field's value. +func (s *ModifyVpcEndpointServicePermissionsInput) SetAddAllowedPrincipals(v []*string) *ModifyVpcEndpointServicePermissionsInput { + s.AddAllowedPrincipals = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcEndpointServicePermissionsInput) SetDryRun(v bool) *ModifyVpcEndpointServicePermissionsInput { + s.DryRun = &v + return s +} + +// SetRemoveAllowedPrincipals sets the RemoveAllowedPrincipals field's value. +func (s *ModifyVpcEndpointServicePermissionsInput) SetRemoveAllowedPrincipals(v []*string) *ModifyVpcEndpointServicePermissionsInput { + s.RemoveAllowedPrincipals = v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *ModifyVpcEndpointServicePermissionsInput) SetServiceId(v string) *ModifyVpcEndpointServicePermissionsInput { + s.ServiceId = &v + return s +} + +type ModifyVpcEndpointServicePermissionsOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + ReturnValue *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcEndpointServicePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcEndpointServicePermissionsOutput) GoString() string { + return s.String() +} + +// SetReturnValue sets the ReturnValue field's value. +func (s *ModifyVpcEndpointServicePermissionsOutput) SetReturnValue(v bool) *ModifyVpcEndpointServicePermissionsOutput { + s.ReturnValue = &v + return s +} + +type ModifyVpcPeeringConnectionOptionsInput struct { + _ struct{} `type:"structure"` + + // The VPC peering connection options for the accepter VPC. + AccepterPeeringConnectionOptions *PeeringConnectionOptionsRequest `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The VPC peering connection options for the requester VPC. + RequesterPeeringConnectionOptions *PeeringConnectionOptionsRequest `type:"structure"` + + // The ID of the VPC peering connection. + // + // VpcPeeringConnectionId is a required field + VpcPeeringConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcPeeringConnectionOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcPeeringConnectionOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcPeeringConnectionOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcPeeringConnectionOptionsInput"} + if s.VpcPeeringConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccepterPeeringConnectionOptions sets the AccepterPeeringConnectionOptions field's value. +func (s *ModifyVpcPeeringConnectionOptionsInput) SetAccepterPeeringConnectionOptions(v *PeeringConnectionOptionsRequest) *ModifyVpcPeeringConnectionOptionsInput { + s.AccepterPeeringConnectionOptions = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcPeeringConnectionOptionsInput) SetDryRun(v bool) *ModifyVpcPeeringConnectionOptionsInput { + s.DryRun = &v + return s +} + +// SetRequesterPeeringConnectionOptions sets the RequesterPeeringConnectionOptions field's value. +func (s *ModifyVpcPeeringConnectionOptionsInput) SetRequesterPeeringConnectionOptions(v *PeeringConnectionOptionsRequest) *ModifyVpcPeeringConnectionOptionsInput { + s.RequesterPeeringConnectionOptions = v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *ModifyVpcPeeringConnectionOptionsInput) SetVpcPeeringConnectionId(v string) *ModifyVpcPeeringConnectionOptionsInput { + s.VpcPeeringConnectionId = &v + return s +} + +type ModifyVpcPeeringConnectionOptionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC peering connection options for the accepter VPC. + AccepterPeeringConnectionOptions *PeeringConnectionOptions `locationName:"accepterPeeringConnectionOptions" type:"structure"` + + // Information about the VPC peering connection options for the requester VPC. + RequesterPeeringConnectionOptions *PeeringConnectionOptions `locationName:"requesterPeeringConnectionOptions" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpcPeeringConnectionOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcPeeringConnectionOptionsOutput) GoString() string { + return s.String() +} + +// SetAccepterPeeringConnectionOptions sets the AccepterPeeringConnectionOptions field's value. +func (s *ModifyVpcPeeringConnectionOptionsOutput) SetAccepterPeeringConnectionOptions(v *PeeringConnectionOptions) *ModifyVpcPeeringConnectionOptionsOutput { + s.AccepterPeeringConnectionOptions = v + return s +} + +// SetRequesterPeeringConnectionOptions sets the RequesterPeeringConnectionOptions field's value. +func (s *ModifyVpcPeeringConnectionOptionsOutput) SetRequesterPeeringConnectionOptions(v *PeeringConnectionOptions) *ModifyVpcPeeringConnectionOptionsOutput { + s.RequesterPeeringConnectionOptions = v + return s +} + +type ModifyVpcTenancyInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance tenancy attribute for the VPC. + // + // InstanceTenancy is a required field + InstanceTenancy *string `type:"string" required:"true" enum:"VpcTenancy"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcTenancyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcTenancyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcTenancyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcTenancyInput"} + if s.InstanceTenancy == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceTenancy")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcTenancyInput) SetDryRun(v bool) *ModifyVpcTenancyInput { + s.DryRun = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *ModifyVpcTenancyInput) SetInstanceTenancy(v string) *ModifyVpcTenancyInput { + s.InstanceTenancy = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ModifyVpcTenancyInput) SetVpcId(v string) *ModifyVpcTenancyInput { + s.VpcId = &v + return s +} + +type ModifyVpcTenancyOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + ReturnValue *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcTenancyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcTenancyOutput) GoString() string { + return s.String() +} + +// SetReturnValue sets the ReturnValue field's value. +func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput { + s.ReturnValue = &v + return s +} + +type ModifyVpnConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the transit gateway. + TransitGatewayId *string `type:"string"` + + // The ID of the VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` + + // The ID of the virtual private gateway at the AWS side of the VPN connection. + VpnGatewayId *string `type:"string"` +} + +// String returns the string representation +func (s ModifyVpnConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnConnectionInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *ModifyVpnConnectionInput) SetCustomerGatewayId(v string) *ModifyVpnConnectionInput { + s.CustomerGatewayId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnConnectionInput) SetDryRun(v bool) *ModifyVpnConnectionInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *ModifyVpnConnectionInput) SetTransitGatewayId(v string) *ModifyVpnConnectionInput { + s.TransitGatewayId = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnConnectionInput) SetVpnConnectionId(v string) *ModifyVpnConnectionInput { + s.VpnConnectionId = &v + return s +} + +// SetVpnGatewayId sets the VpnGatewayId field's value. +func (s *ModifyVpnConnectionInput) SetVpnGatewayId(v string) *ModifyVpnConnectionInput { + s.VpnGatewayId = &v + return s +} + +type ModifyVpnConnectionOptionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: 0.0.0.0/0 + LocalIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: ::/0 + LocalIpv6NetworkCidr *string `type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + // + // Default: 0.0.0.0/0 + RemoteIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + // + // Default: ::/0 + RemoteIpv6NetworkCidr *string `type:"string"` + + // The ID of the Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnConnectionOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnConnectionOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnConnectionOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnConnectionOptionsInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnConnectionOptionsInput) SetDryRun(v bool) *ModifyVpnConnectionOptionsInput { + s.DryRun = &v + return s +} + +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetLocalIpv4NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetLocalIpv6NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetRemoteIpv4NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetRemoteIpv6NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.RemoteIpv6NetworkCidr = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnConnectionOptionsInput) SetVpnConnectionId(v string) *ModifyVpnConnectionOptionsInput { + s.VpnConnectionId = &v + return s +} + +type ModifyVpnConnectionOptionsOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnConnectionOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnConnectionOptionsOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnConnectionOptionsOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnConnectionOptionsOutput { + s.VpnConnection = v + return s +} + +type ModifyVpnConnectionOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnConnectionOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnConnectionOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnConnectionOutput { + s.VpnConnection = v + return s +} + +type ModifyVpnTunnelCertificateInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AWS Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` + + // The external IP address of the VPN tunnel. + // + // VpnTunnelOutsideIpAddress is a required field + VpnTunnelOutsideIpAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnTunnelCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnTunnelCertificateInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + if s.VpnTunnelOutsideIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnTunnelCertificateInput) SetDryRun(v bool) *ModifyVpnTunnelCertificateInput { + s.DryRun = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnConnectionId(v string) *ModifyVpnTunnelCertificateInput { + s.VpnConnectionId = &v + return s +} + +// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnTunnelOutsideIpAddress(v string) *ModifyVpnTunnelCertificateInput { + s.VpnTunnelOutsideIpAddress = &v + return s +} + +type ModifyVpnTunnelCertificateOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnTunnelCertificateOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnTunnelCertificateOutput { + s.VpnConnection = v + return s +} + +type ModifyVpnTunnelOptionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tunnel options to modify. + // + // TunnelOptions is a required field + TunnelOptions *ModifyVpnTunnelOptionsSpecification `type:"structure" required:"true"` + + // The ID of the AWS Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` + + // The external IP address of the VPN tunnel. + // + // VpnTunnelOutsideIpAddress is a required field + VpnTunnelOutsideIpAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnTunnelOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnTunnelOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnTunnelOptionsInput"} + if s.TunnelOptions == nil { + invalidParams.Add(request.NewErrParamRequired("TunnelOptions")) + } + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + if s.VpnTunnelOutsideIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnTunnelOptionsInput) SetDryRun(v bool) *ModifyVpnTunnelOptionsInput { + s.DryRun = &v + return s +} + +// SetTunnelOptions sets the TunnelOptions field's value. +func (s *ModifyVpnTunnelOptionsInput) SetTunnelOptions(v *ModifyVpnTunnelOptionsSpecification) *ModifyVpnTunnelOptionsInput { + s.TunnelOptions = v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnTunnelOptionsInput) SetVpnConnectionId(v string) *ModifyVpnTunnelOptionsInput { + s.VpnConnectionId = &v + return s +} + +// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value. +func (s *ModifyVpnTunnelOptionsInput) SetVpnTunnelOutsideIpAddress(v string) *ModifyVpnTunnelOptionsInput { + s.VpnTunnelOutsideIpAddress = &v + return s +} + +type ModifyVpnTunnelOptionsOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnTunnelOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelOptionsOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnTunnelOptionsOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnTunnelOptionsOutput { + s.VpnConnection = v + return s +} + +// The AWS Site-to-Site VPN tunnel options to modify. +type ModifyVpnTunnelOptionsSpecification struct { + _ struct{} `type:"structure"` + + // The action to take after DPD timeout occurs. Specify restart to restart the + // IKE initiation. Specify clear to end the IKE session. + // + // Valid Values: clear | none | restart + // + // Default: clear + DPDTimeoutAction *string `type:"string"` + + // The number of seconds after which a DPD timeout occurs. + // + // Constraints: A value between 0 and 30. + // + // Default: 30 + DPDTimeoutSeconds *int64 `type:"integer"` + + // The IKE versions that are permitted for the VPN tunnel. + // + // Valid values: ikev1 | ikev2 + IKEVersions []*IKEVersionsRequestListValue `locationName:"IKEVersion" locationNameList:"item" type:"list"` + + // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel + // for phase 1 IKE negotiations. + // + // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 + Phase1DHGroupNumbers []*Phase1DHGroupNumbersRequestListValue `locationName:"Phase1DHGroupNumber" locationNameList:"item" type:"list"` + + // One or more encryption algorithms that are permitted for the VPN tunnel for + // phase 1 IKE negotiations. + // + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 + Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsRequestListValue `locationName:"Phase1EncryptionAlgorithm" locationNameList:"item" type:"list"` + + // One or more integrity algorithms that are permitted for the VPN tunnel for + // phase 1 IKE negotiations. + // + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 + Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsRequestListValue `locationName:"Phase1IntegrityAlgorithm" locationNameList:"item" type:"list"` + + // The lifetime for phase 1 of the IKE negotiation, in seconds. + // + // Constraints: A value between 900 and 28,800. + // + // Default: 28800 + Phase1LifetimeSeconds *int64 `type:"integer"` + + // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel + // for phase 2 IKE negotiations. + // + // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 + Phase2DHGroupNumbers []*Phase2DHGroupNumbersRequestListValue `locationName:"Phase2DHGroupNumber" locationNameList:"item" type:"list"` + + // One or more encryption algorithms that are permitted for the VPN tunnel for + // phase 2 IKE negotiations. + // + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 + Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsRequestListValue `locationName:"Phase2EncryptionAlgorithm" locationNameList:"item" type:"list"` + + // One or more integrity algorithms that are permitted for the VPN tunnel for + // phase 2 IKE negotiations. + // + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 + Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsRequestListValue `locationName:"Phase2IntegrityAlgorithm" locationNameList:"item" type:"list"` + + // The lifetime for phase 2 of the IKE negotiation, in seconds. + // + // Constraints: A value between 900 and 3,600. The value must be less than the + // value for Phase1LifetimeSeconds. + // + // Default: 3600 + Phase2LifetimeSeconds *int64 `type:"integer"` + + // The pre-shared key (PSK) to establish initial authentication between the + // virtual private gateway and the customer gateway. + // + // Constraints: Allowed characters are alphanumeric characters, periods (.), + // and underscores (_). Must be between 8 and 64 characters in length and cannot + // start with zero (0). + PreSharedKey *string `type:"string"` + + // The percentage of the rekey window (determined by RekeyMarginTimeSeconds) + // during which the rekey time is randomly selected. + // + // Constraints: A value between 0 and 100. + // + // Default: 100 + RekeyFuzzPercentage *int64 `type:"integer"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during + // which the AWS side of the VPN connection performs an IKE rekey. The exact + // time of the rekey is randomly selected based on the value for RekeyFuzzPercentage. + // + // Constraints: A value between 60 and half of Phase2LifetimeSeconds. + // + // Default: 540 + RekeyMarginTimeSeconds *int64 `type:"integer"` + + // The number of packets in an IKE replay window. + // + // Constraints: A value between 64 and 2048. + // + // Default: 1024 + ReplayWindowSize *int64 `type:"integer"` + + // The action to take when the establishing the tunnel for the VPN connection. + // By default, your customer gateway device must initiate the IKE negotiation + // and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. + // + // Valid Values: add | start + // + // Default: add + StartupAction *string `type:"string"` + + // The range of inside IPv4 addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same virtual private + // gateway. + // + // Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following + // CIDR blocks are reserved and cannot be used: + // + // * 169.254.0.0/30 + // + // * 169.254.1.0/30 + // + // * 169.254.2.0/30 + // + // * 169.254.3.0/30 + // + // * 169.254.4.0/30 + // + // * 169.254.5.0/30 + // + // * 169.254.169.252/30 + TunnelInsideCidr *string `type:"string"` + + // The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same transit gateway. + // + // Constraints: A size /126 CIDR block from the local fd00::/8 range. + TunnelInsideIpv6Cidr *string `type:"string"` +} + +// String returns the string representation +func (s ModifyVpnTunnelOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelOptionsSpecification) GoString() string { + return s.String() +} + +// SetDPDTimeoutAction sets the DPDTimeoutAction field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetDPDTimeoutAction(v string) *ModifyVpnTunnelOptionsSpecification { + s.DPDTimeoutAction = &v + return s +} + +// SetDPDTimeoutSeconds sets the DPDTimeoutSeconds field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { + s.DPDTimeoutSeconds = &v + return s +} + +// SetIKEVersions sets the IKEVersions field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetIKEVersions(v []*IKEVersionsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.IKEVersions = v + return s +} + +// SetPhase1DHGroupNumbers sets the Phase1DHGroupNumbers field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase1DHGroupNumbers(v []*Phase1DHGroupNumbersRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase1DHGroupNumbers = v + return s +} + +// SetPhase1EncryptionAlgorithms sets the Phase1EncryptionAlgorithms field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase1EncryptionAlgorithms(v []*Phase1EncryptionAlgorithmsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase1EncryptionAlgorithms = v + return s +} + +// SetPhase1IntegrityAlgorithms sets the Phase1IntegrityAlgorithms field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase1IntegrityAlgorithms(v []*Phase1IntegrityAlgorithmsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase1IntegrityAlgorithms = v + return s +} + +// SetPhase1LifetimeSeconds sets the Phase1LifetimeSeconds field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase1LifetimeSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { + s.Phase1LifetimeSeconds = &v + return s +} + +// SetPhase2DHGroupNumbers sets the Phase2DHGroupNumbers field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase2DHGroupNumbers(v []*Phase2DHGroupNumbersRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase2DHGroupNumbers = v + return s +} + +// SetPhase2EncryptionAlgorithms sets the Phase2EncryptionAlgorithms field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase2EncryptionAlgorithms(v []*Phase2EncryptionAlgorithmsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase2EncryptionAlgorithms = v + return s +} + +// SetPhase2IntegrityAlgorithms sets the Phase2IntegrityAlgorithms field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase2IntegrityAlgorithms(v []*Phase2IntegrityAlgorithmsRequestListValue) *ModifyVpnTunnelOptionsSpecification { + s.Phase2IntegrityAlgorithms = v + return s +} + +// SetPhase2LifetimeSeconds sets the Phase2LifetimeSeconds field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPhase2LifetimeSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { + s.Phase2LifetimeSeconds = &v + return s +} + +// SetPreSharedKey sets the PreSharedKey field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetPreSharedKey(v string) *ModifyVpnTunnelOptionsSpecification { + s.PreSharedKey = &v + return s +} + +// SetRekeyFuzzPercentage sets the RekeyFuzzPercentage field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetRekeyFuzzPercentage(v int64) *ModifyVpnTunnelOptionsSpecification { + s.RekeyFuzzPercentage = &v + return s +} + +// SetRekeyMarginTimeSeconds sets the RekeyMarginTimeSeconds field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetRekeyMarginTimeSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { + s.RekeyMarginTimeSeconds = &v + return s +} + +// SetReplayWindowSize sets the ReplayWindowSize field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetReplayWindowSize(v int64) *ModifyVpnTunnelOptionsSpecification { + s.ReplayWindowSize = &v + return s +} + +// SetStartupAction sets the StartupAction field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetStartupAction(v string) *ModifyVpnTunnelOptionsSpecification { + s.StartupAction = &v + return s +} + +// SetTunnelInsideCidr sets the TunnelInsideCidr field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *ModifyVpnTunnelOptionsSpecification { + s.TunnelInsideCidr = &v + return s +} + +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetTunnelInsideIpv6Cidr(v string) *ModifyVpnTunnelOptionsSpecification { + s.TunnelInsideIpv6Cidr = &v + return s +} + +type MonitorInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the instances. + // + // InstanceIds is a required field + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s MonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MonitorInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MonitorInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *MonitorInstancesInput) SetDryRun(v bool) *MonitorInstancesInput { + s.DryRun = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *MonitorInstancesInput) SetInstanceIds(v []*string) *MonitorInstancesInput { + s.InstanceIds = v + return s +} + +type MonitorInstancesOutput struct { + _ struct{} `type:"structure"` + + // The monitoring information. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s MonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MonitorInstancesOutput) GoString() string { + return s.String() +} + +// SetInstanceMonitorings sets the InstanceMonitorings field's value. +func (s *MonitorInstancesOutput) SetInstanceMonitorings(v []*InstanceMonitoring) *MonitorInstancesOutput { + s.InstanceMonitorings = v + return s +} + +// Describes the monitoring of an instance. +type Monitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring + // is enabled. + State *string `locationName:"state" type:"string" enum:"MonitoringState"` +} + +// String returns the string representation +func (s Monitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Monitoring) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *Monitoring) SetState(v string) *Monitoring { + s.State = &v + return s +} + +type MoveAddressToVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + // + // PublicIp is a required field + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` +} + +// String returns the string representation +func (s MoveAddressToVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MoveAddressToVpcInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MoveAddressToVpcInput"} + if s.PublicIp == nil { + invalidParams.Add(request.NewErrParamRequired("PublicIp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *MoveAddressToVpcInput) SetDryRun(v bool) *MoveAddressToVpcInput { + s.DryRun = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *MoveAddressToVpcInput) SetPublicIp(v string) *MoveAddressToVpcInput { + s.PublicIp = &v + return s +} + +type MoveAddressToVpcOutput struct { + _ struct{} `type:"structure"` + + // The allocation ID for the Elastic IP address. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The status of the move of the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s MoveAddressToVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MoveAddressToVpcOutput) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *MoveAddressToVpcOutput) SetAllocationId(v string) *MoveAddressToVpcOutput { + s.AllocationId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *MoveAddressToVpcOutput) SetStatus(v string) *MoveAddressToVpcOutput { + s.Status = &v + return s +} + +// Describes the status of a moving Elastic IP address. +type MovingAddressStatus struct { + _ struct{} `type:"structure"` + + // The status of the Elastic IP address that's being moved to the EC2-VPC platform, + // or restored to the EC2-Classic platform. + MoveStatus *string `locationName:"moveStatus" type:"string" enum:"MoveStatus"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s MovingAddressStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MovingAddressStatus) GoString() string { + return s.String() +} + +// SetMoveStatus sets the MoveStatus field's value. +func (s *MovingAddressStatus) SetMoveStatus(v string) *MovingAddressStatus { + s.MoveStatus = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *MovingAddressStatus) SetPublicIp(v string) *MovingAddressStatus { + s.PublicIp = &v + return s +} + +// Describes a NAT gateway. +type NatGateway struct { + _ struct{} `type:"structure"` + + // The date and time the NAT gateway was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // The date and time the NAT gateway was deleted, if applicable. + DeleteTime *time.Time `locationName:"deleteTime" type:"timestamp"` + + // If the NAT gateway could not be created, specifies the error code for the + // failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound + // | Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound) + FailureCode *string `locationName:"failureCode" type:"string"` + + // If the NAT gateway could not be created, specifies the error message for + // the failure, that corresponds to the error code. + // + // * For InsufficientFreeAddressesInSubnet: "Subnet has insufficient free + // addresses to create this NAT gateway" + // + // * For Gateway.NotAttached: "Network vpc-xxxxxxxx has no Internet gateway + // attached" + // + // * For InvalidAllocationID.NotFound: "Elastic IP address eipalloc-xxxxxxxx + // could not be associated with this NAT gateway" + // + // * For Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx + // is already associated" + // + // * For InternalError: "Network interface eni-xxxxxxxx, created and used + // internally by this NAT gateway is in an invalid state. Please try again." + // + // * For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx + // does not exist or could not be found." + FailureMessage *string `locationName:"failureMessage" type:"string"` + + // Information about the IP addresses and network interface associated with + // the NAT gateway. + NatGatewayAddresses []*NatGatewayAddress `locationName:"natGatewayAddressSet" locationNameList:"item" type:"list"` + + // The ID of the NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + ProvisionedBandwidth *ProvisionedBandwidth `locationName:"provisionedBandwidth" type:"structure"` + + // The state of the NAT gateway. + // + // * pending: The NAT gateway is being created and is not ready to process + // traffic. + // + // * failed: The NAT gateway could not be created. Check the failureCode + // and failureMessage fields for the reason. + // + // * available: The NAT gateway is able to process traffic. This status remains + // until you delete the NAT gateway, and does not indicate the health of + // the NAT gateway. + // + // * deleting: The NAT gateway is in the process of being terminated and + // may still be processing traffic. + // + // * deleted: The NAT gateway has been terminated and is no longer processing + // traffic. + State *string `locationName:"state" type:"string" enum:"NatGatewayState"` + + // The ID of the subnet in which the NAT gateway is located. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The tags for the NAT gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC in which the NAT gateway is located. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NatGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NatGateway) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *NatGateway) SetCreateTime(v time.Time) *NatGateway { + s.CreateTime = &v + return s +} + +// SetDeleteTime sets the DeleteTime field's value. +func (s *NatGateway) SetDeleteTime(v time.Time) *NatGateway { + s.DeleteTime = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *NatGateway) SetFailureCode(v string) *NatGateway { + s.FailureCode = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *NatGateway) SetFailureMessage(v string) *NatGateway { + s.FailureMessage = &v + return s +} + +// SetNatGatewayAddresses sets the NatGatewayAddresses field's value. +func (s *NatGateway) SetNatGatewayAddresses(v []*NatGatewayAddress) *NatGateway { + s.NatGatewayAddresses = v + return s +} + +// SetNatGatewayId sets the NatGatewayId field's value. +func (s *NatGateway) SetNatGatewayId(v string) *NatGateway { + s.NatGatewayId = &v + return s +} + +// SetProvisionedBandwidth sets the ProvisionedBandwidth field's value. +func (s *NatGateway) SetProvisionedBandwidth(v *ProvisionedBandwidth) *NatGateway { + s.ProvisionedBandwidth = v + return s +} + +// SetState sets the State field's value. +func (s *NatGateway) SetState(v string) *NatGateway { + s.State = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *NatGateway) SetSubnetId(v string) *NatGateway { + s.SubnetId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *NatGateway) SetTags(v []*Tag) *NatGateway { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *NatGateway) SetVpcId(v string) *NatGateway { + s.VpcId = &v + return s +} + +// Describes the IP addresses and network interface associated with a NAT gateway. +type NatGatewayAddress struct { + _ struct{} `type:"structure"` + + // The allocation ID of the Elastic IP address that's associated with the NAT + // gateway. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The ID of the network interface associated with the NAT gateway. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The private IP address associated with the Elastic IP address. + PrivateIp *string `locationName:"privateIp" type:"string"` + + // The Elastic IP address associated with the NAT gateway. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s NatGatewayAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NatGatewayAddress) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *NatGatewayAddress) SetAllocationId(v string) *NatGatewayAddress { + s.AllocationId = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *NatGatewayAddress) SetNetworkInterfaceId(v string) *NatGatewayAddress { + s.NetworkInterfaceId = &v + return s +} + +// SetPrivateIp sets the PrivateIp field's value. +func (s *NatGatewayAddress) SetPrivateIp(v string) *NatGatewayAddress { + s.PrivateIp = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *NatGatewayAddress) SetPublicIp(v string) *NatGatewayAddress { + s.PublicIp = &v + return s +} + +// Describes a network ACL. +type NetworkAcl struct { + _ struct{} `type:"structure"` + + // Any associations between the network ACL and one or more subnets + Associations []*NetworkAclAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // One or more entries (rules) in the network ACL. + Entries []*NetworkAclEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // Indicates whether this is the default network ACL for the VPC. + IsDefault *bool `locationName:"default" type:"boolean"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // The ID of the AWS account that owns the network ACL. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any tags assigned to the network ACL. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC for the network ACL. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NetworkAcl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAcl) GoString() string { + return s.String() +} + +// SetAssociations sets the Associations field's value. +func (s *NetworkAcl) SetAssociations(v []*NetworkAclAssociation) *NetworkAcl { + s.Associations = v + return s +} + +// SetEntries sets the Entries field's value. +func (s *NetworkAcl) SetEntries(v []*NetworkAclEntry) *NetworkAcl { + s.Entries = v + return s +} + +// SetIsDefault sets the IsDefault field's value. +func (s *NetworkAcl) SetIsDefault(v bool) *NetworkAcl { + s.IsDefault = &v + return s +} + +// SetNetworkAclId sets the NetworkAclId field's value. +func (s *NetworkAcl) SetNetworkAclId(v string) *NetworkAcl { + s.NetworkAclId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *NetworkAcl) SetOwnerId(v string) *NetworkAcl { + s.OwnerId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *NetworkAcl) SetTags(v []*Tag) *NetworkAcl { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *NetworkAcl) SetVpcId(v string) *NetworkAcl { + s.VpcId = &v + return s +} + +// Describes an association between a network ACL and a subnet. +type NetworkAclAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the association between a network ACL and a subnet. + NetworkAclAssociationId *string `locationName:"networkAclAssociationId" type:"string"` + + // The ID of the network ACL. + NetworkAclId *string `locationName:"networkAclId" type:"string"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s NetworkAclAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclAssociation) GoString() string { + return s.String() +} + +// SetNetworkAclAssociationId sets the NetworkAclAssociationId field's value. +func (s *NetworkAclAssociation) SetNetworkAclAssociationId(v string) *NetworkAclAssociation { + s.NetworkAclAssociationId = &v + return s +} + +// SetNetworkAclId sets the NetworkAclId field's value. +func (s *NetworkAclAssociation) SetNetworkAclId(v string) *NetworkAclAssociation { + s.NetworkAclId = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *NetworkAclAssociation) SetSubnetId(v string) *NetworkAclAssociation { + s.SubnetId = &v + return s +} + +// Describes an entry in a network ACL. +type NetworkAclEntry struct { + _ struct{} `type:"structure"` + + // The IPv4 network range to allow or deny, in CIDR notation. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Indicates whether the rule is an egress rule (applied to traffic leaving + // the subnet). + Egress *bool `locationName:"egress" type:"boolean"` + + // ICMP protocol: The ICMP type and code. + IcmpTypeCode *IcmpTypeCode `locationName:"icmpTypeCode" type:"structure"` + + // The IPv6 network range to allow or deny, in CIDR notation. + Ipv6CidrBlock *string `locationName:"ipv6CidrBlock" type:"string"` + + // TCP or UDP protocols: The range of ports the rule applies to. + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol number. A value of "-1" means all protocols. + Protocol *string `locationName:"protocol" type:"string"` + + // Indicates whether to allow or deny the traffic that matches the rule. + RuleAction *string `locationName:"ruleAction" type:"string" enum:"RuleAction"` + + // The rule number for the entry. ACL entries are processed in ascending order + // by rule number. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer"` +} + +// String returns the string representation +func (s NetworkAclEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkAclEntry) GoString() string { + return s.String() +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *NetworkAclEntry) SetCidrBlock(v string) *NetworkAclEntry { + s.CidrBlock = &v + return s +} + +// SetEgress sets the Egress field's value. +func (s *NetworkAclEntry) SetEgress(v bool) *NetworkAclEntry { + s.Egress = &v + return s +} + +// SetIcmpTypeCode sets the IcmpTypeCode field's value. +func (s *NetworkAclEntry) SetIcmpTypeCode(v *IcmpTypeCode) *NetworkAclEntry { + s.IcmpTypeCode = v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *NetworkAclEntry) SetIpv6CidrBlock(v string) *NetworkAclEntry { + s.Ipv6CidrBlock = &v + return s +} + +// SetPortRange sets the PortRange field's value. +func (s *NetworkAclEntry) SetPortRange(v *PortRange) *NetworkAclEntry { + s.PortRange = v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *NetworkAclEntry) SetProtocol(v string) *NetworkAclEntry { + s.Protocol = &v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *NetworkAclEntry) SetRuleAction(v string) *NetworkAclEntry { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *NetworkAclEntry) SetRuleNumber(v int64) *NetworkAclEntry { + s.RuleNumber = &v + return s +} + +// Describes the network card support of the instance type. +type NetworkCardInfo struct { + _ struct{} `type:"structure"` + + // The maximum number of network interfaces for the network card. + MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + + // The network performance of the network card. + NetworkPerformance *string `locationName:"networkPerformance" type:"string"` +} + +// String returns the string representation +func (s NetworkCardInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkCardInfo) GoString() string { + return s.String() +} + +// SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value. +func (s *NetworkCardInfo) SetMaximumNetworkInterfaces(v int64) *NetworkCardInfo { + s.MaximumNetworkInterfaces = &v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *NetworkCardInfo) SetNetworkCardIndex(v int64) *NetworkCardInfo { + s.NetworkCardIndex = &v + return s +} + +// SetNetworkPerformance sets the NetworkPerformance field's value. +func (s *NetworkCardInfo) SetNetworkPerformance(v string) *NetworkCardInfo { + s.NetworkPerformance = &v + return s +} + +// Describes the networking features of the instance type. +type NetworkInfo struct { + _ struct{} `type:"structure"` + + // The index of the default network card, starting at 0. + DefaultNetworkCardIndex *int64 `locationName:"defaultNetworkCardIndex" type:"integer"` + + // Indicates whether Elastic Fabric Adapter (EFA) is supported. + EfaSupported *bool `locationName:"efaSupported" type:"boolean"` + + // Indicates whether Elastic Network Adapter (ENA) is supported. + EnaSupport *string `locationName:"enaSupport" type:"string" enum:"EnaSupport"` + + // The maximum number of IPv4 addresses per network interface. + Ipv4AddressesPerInterface *int64 `locationName:"ipv4AddressesPerInterface" type:"integer"` + + // The maximum number of IPv6 addresses per network interface. + Ipv6AddressesPerInterface *int64 `locationName:"ipv6AddressesPerInterface" type:"integer"` + + // Indicates whether IPv6 is supported. + Ipv6Supported *bool `locationName:"ipv6Supported" type:"boolean"` + + // The maximum number of physical network cards that can be allocated to the + // instance. + MaximumNetworkCards *int64 `locationName:"maximumNetworkCards" type:"integer"` + + // The maximum number of network interfaces for the instance type. + MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"` + + // Describes the network cards for the instance type. + NetworkCards []*NetworkCardInfo `locationName:"networkCards" locationNameList:"item" type:"list"` + + // The network performance. + NetworkPerformance *string `locationName:"networkPerformance" type:"string"` +} + +// String returns the string representation +func (s NetworkInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInfo) GoString() string { + return s.String() +} + +// SetDefaultNetworkCardIndex sets the DefaultNetworkCardIndex field's value. +func (s *NetworkInfo) SetDefaultNetworkCardIndex(v int64) *NetworkInfo { + s.DefaultNetworkCardIndex = &v + return s +} + +// SetEfaSupported sets the EfaSupported field's value. +func (s *NetworkInfo) SetEfaSupported(v bool) *NetworkInfo { + s.EfaSupported = &v + return s +} + +// SetEnaSupport sets the EnaSupport field's value. +func (s *NetworkInfo) SetEnaSupport(v string) *NetworkInfo { + s.EnaSupport = &v + return s +} + +// SetIpv4AddressesPerInterface sets the Ipv4AddressesPerInterface field's value. +func (s *NetworkInfo) SetIpv4AddressesPerInterface(v int64) *NetworkInfo { + s.Ipv4AddressesPerInterface = &v + return s +} + +// SetIpv6AddressesPerInterface sets the Ipv6AddressesPerInterface field's value. +func (s *NetworkInfo) SetIpv6AddressesPerInterface(v int64) *NetworkInfo { + s.Ipv6AddressesPerInterface = &v + return s +} + +// SetIpv6Supported sets the Ipv6Supported field's value. +func (s *NetworkInfo) SetIpv6Supported(v bool) *NetworkInfo { + s.Ipv6Supported = &v + return s +} + +// SetMaximumNetworkCards sets the MaximumNetworkCards field's value. +func (s *NetworkInfo) SetMaximumNetworkCards(v int64) *NetworkInfo { + s.MaximumNetworkCards = &v + return s +} + +// SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value. +func (s *NetworkInfo) SetMaximumNetworkInterfaces(v int64) *NetworkInfo { + s.MaximumNetworkInterfaces = &v + return s +} + +// SetNetworkCards sets the NetworkCards field's value. +func (s *NetworkInfo) SetNetworkCards(v []*NetworkCardInfo) *NetworkInfo { + s.NetworkCards = v + return s +} + +// SetNetworkPerformance sets the NetworkPerformance field's value. +func (s *NetworkInfo) SetNetworkPerformance(v string) *NetworkInfo { + s.NetworkPerformance = &v + return s +} + +// Describes a network interface. +type NetworkInterface struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address (IPv4) associated with + // the network interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // The network interface attachment. + Attachment *NetworkInterfaceAttachment `locationName:"attachment" type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A description. + Description *string `locationName:"description" type:"string"` + + // Any security groups for the network interface. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The type of network interface. + InterfaceType *string `locationName:"interfaceType" type:"string" enum:"NetworkInterfaceType"` + + // The IPv6 addresses associated with the network interface. + Ipv6Addresses []*NetworkInterfaceIpv6Address `locationName:"ipv6AddressesSet" locationNameList:"item" type:"list"` + + // The MAC address. + MacAddress *string `locationName:"macAddress" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `locationName:"outpostArn" type:"string"` + + // The AWS account ID of the owner of the network interface. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The IPv4 address of the network interface within the subnet. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The private IPv4 addresses associated with the network interface. + PrivateIpAddresses []*NetworkInterfacePrivateIpAddress `locationName:"privateIpAddressesSet" locationNameList:"item" type:"list"` + + // The ID of the entity that launched the instance on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // Indicates whether the network interface is being managed by AWS. + RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"` + + // Indicates whether traffic to or from the instance is validated. + SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` + + // The status of the network interface. + Status *string `locationName:"status" type:"string" enum:"NetworkInterfaceStatus"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the network interface. + TagSet []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s NetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterface) GoString() string { + return s.String() +} + +// SetAssociation sets the Association field's value. +func (s *NetworkInterface) SetAssociation(v *NetworkInterfaceAssociation) *NetworkInterface { + s.Association = v + return s +} + +// SetAttachment sets the Attachment field's value. +func (s *NetworkInterface) SetAttachment(v *NetworkInterfaceAttachment) *NetworkInterface { + s.Attachment = v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *NetworkInterface) SetAvailabilityZone(v string) *NetworkInterface { + s.AvailabilityZone = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *NetworkInterface) SetDescription(v string) *NetworkInterface { + s.Description = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *NetworkInterface) SetGroups(v []*GroupIdentifier) *NetworkInterface { + s.Groups = v + return s +} + +// SetInterfaceType sets the InterfaceType field's value. +func (s *NetworkInterface) SetInterfaceType(v string) *NetworkInterface { + s.InterfaceType = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *NetworkInterface) SetIpv6Addresses(v []*NetworkInterfaceIpv6Address) *NetworkInterface { + s.Ipv6Addresses = v + return s +} + +// SetMacAddress sets the MacAddress field's value. +func (s *NetworkInterface) SetMacAddress(v string) *NetworkInterface { + s.MacAddress = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *NetworkInterface) SetNetworkInterfaceId(v string) *NetworkInterface { + s.NetworkInterfaceId = &v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *NetworkInterface) SetOutpostArn(v string) *NetworkInterface { + s.OutpostArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *NetworkInterface) SetOwnerId(v string) *NetworkInterface { + s.OwnerId = &v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *NetworkInterface) SetPrivateDnsName(v string) *NetworkInterface { + s.PrivateDnsName = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *NetworkInterface) SetPrivateIpAddress(v string) *NetworkInterface { + s.PrivateIpAddress = &v + return s +} + +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value. +func (s *NetworkInterface) SetPrivateIpAddresses(v []*NetworkInterfacePrivateIpAddress) *NetworkInterface { + s.PrivateIpAddresses = v + return s +} + +// SetRequesterId sets the RequesterId field's value. +func (s *NetworkInterface) SetRequesterId(v string) *NetworkInterface { + s.RequesterId = &v + return s +} + +// SetRequesterManaged sets the RequesterManaged field's value. +func (s *NetworkInterface) SetRequesterManaged(v bool) *NetworkInterface { + s.RequesterManaged = &v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *NetworkInterface) SetSourceDestCheck(v bool) *NetworkInterface { + s.SourceDestCheck = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *NetworkInterface) SetStatus(v string) *NetworkInterface { + s.Status = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *NetworkInterface) SetSubnetId(v string) *NetworkInterface { + s.SubnetId = &v + return s +} + +// SetTagSet sets the TagSet field's value. +func (s *NetworkInterface) SetTagSet(v []*Tag) *NetworkInterface { + s.TagSet = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *NetworkInterface) SetVpcId(v string) *NetworkInterface { + s.VpcId = &v + return s +} + +// Describes association information for an Elastic IP address (IPv4 only), +// or a Carrier IP address (for a network interface which resides in a subnet +// in a Wavelength Zone). +type NetworkInterfaceAssociation struct { + _ struct{} `type:"structure"` + + // The allocation ID. + AllocationId *string `locationName:"allocationId" type:"string"` + + // The association ID. + AssociationId *string `locationName:"associationId" type:"string"` + + // The carrier IP address associated with the network interface. + // + // This option is only available when the network interface is in a subnet which + // is associated with a Wavelength Zone. + CarrierIp *string `locationName:"carrierIp" type:"string"` + + // The customer-owned IP address associated with the network interface. + CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` + + // The ID of the Elastic IP address owner. + IpOwnerId *string `locationName:"ipOwnerId" type:"string"` + + // The public DNS name. + PublicDnsName *string `locationName:"publicDnsName" type:"string"` + + // The address of the Elastic IP address bound to the network interface. + PublicIp *string `locationName:"publicIp" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfaceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAssociation) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *NetworkInterfaceAssociation) SetAllocationId(v string) *NetworkInterfaceAssociation { + s.AllocationId = &v + return s +} + +// SetAssociationId sets the AssociationId field's value. +func (s *NetworkInterfaceAssociation) SetAssociationId(v string) *NetworkInterfaceAssociation { + s.AssociationId = &v + return s +} + +// SetCarrierIp sets the CarrierIp field's value. +func (s *NetworkInterfaceAssociation) SetCarrierIp(v string) *NetworkInterfaceAssociation { + s.CarrierIp = &v + return s +} + +// SetCustomerOwnedIp sets the CustomerOwnedIp field's value. +func (s *NetworkInterfaceAssociation) SetCustomerOwnedIp(v string) *NetworkInterfaceAssociation { + s.CustomerOwnedIp = &v + return s +} + +// SetIpOwnerId sets the IpOwnerId field's value. +func (s *NetworkInterfaceAssociation) SetIpOwnerId(v string) *NetworkInterfaceAssociation { + s.IpOwnerId = &v + return s +} + +// SetPublicDnsName sets the PublicDnsName field's value. +func (s *NetworkInterfaceAssociation) SetPublicDnsName(v string) *NetworkInterfaceAssociation { + s.PublicDnsName = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *NetworkInterfaceAssociation) SetPublicIp(v string) *NetworkInterfaceAssociation { + s.PublicIp = &v + return s +} + +// Describes a network interface attachment. +type NetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The timestamp indicating when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device index of the network interface attachment on the instance. + DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + + // The attachment state. + Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// SetAttachTime sets the AttachTime field's value. +func (s *NetworkInterfaceAttachment) SetAttachTime(v time.Time) *NetworkInterfaceAttachment { + s.AttachTime = &v + return s +} + +// SetAttachmentId sets the AttachmentId field's value. +func (s *NetworkInterfaceAttachment) SetAttachmentId(v string) *NetworkInterfaceAttachment { + s.AttachmentId = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *NetworkInterfaceAttachment) SetDeleteOnTermination(v bool) *NetworkInterfaceAttachment { + s.DeleteOnTermination = &v + return s +} + +// SetDeviceIndex sets the DeviceIndex field's value. +func (s *NetworkInterfaceAttachment) SetDeviceIndex(v int64) *NetworkInterfaceAttachment { + s.DeviceIndex = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *NetworkInterfaceAttachment) SetInstanceId(v string) *NetworkInterfaceAttachment { + s.InstanceId = &v + return s +} + +// SetInstanceOwnerId sets the InstanceOwnerId field's value. +func (s *NetworkInterfaceAttachment) SetInstanceOwnerId(v string) *NetworkInterfaceAttachment { + s.InstanceOwnerId = &v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *NetworkInterfaceAttachment) SetNetworkCardIndex(v int64) *NetworkInterfaceAttachment { + s.NetworkCardIndex = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *NetworkInterfaceAttachment) SetStatus(v string) *NetworkInterfaceAttachment { + s.Status = &v + return s +} + +// Describes an attachment change. +type NetworkInterfaceAttachmentChanges struct { + _ struct{} `type:"structure"` + + // The ID of the network interface attachment. + AttachmentId *string `locationName:"attachmentId" type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` +} + +// String returns the string representation +func (s NetworkInterfaceAttachmentChanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceAttachmentChanges) GoString() string { + return s.String() +} + +// SetAttachmentId sets the AttachmentId field's value. +func (s *NetworkInterfaceAttachmentChanges) SetAttachmentId(v string) *NetworkInterfaceAttachmentChanges { + s.AttachmentId = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *NetworkInterfaceAttachmentChanges) SetDeleteOnTermination(v bool) *NetworkInterfaceAttachmentChanges { + s.DeleteOnTermination = &v + return s +} + +// Describes an IPv6 address associated with a network interface. +type NetworkInterfaceIpv6Address struct { + _ struct{} `type:"structure"` + + // The IPv6 address. + Ipv6Address *string `locationName:"ipv6Address" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfaceIpv6Address) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfaceIpv6Address) GoString() string { + return s.String() +} + +// SetIpv6Address sets the Ipv6Address field's value. +func (s *NetworkInterfaceIpv6Address) SetIpv6Address(v string) *NetworkInterfaceIpv6Address { + s.Ipv6Address = &v + return s +} + +// Describes a permission for a network interface. +type NetworkInterfacePermission struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + AwsAccountId *string `locationName:"awsAccountId" type:"string"` + + // The AWS service. + AwsService *string `locationName:"awsService" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the network interface permission. + NetworkInterfacePermissionId *string `locationName:"networkInterfacePermissionId" type:"string"` + + // The type of permission. + Permission *string `locationName:"permission" type:"string" enum:"InterfacePermissionType"` + + // Information about the state of the permission. + PermissionState *NetworkInterfacePermissionState `locationName:"permissionState" type:"structure"` +} + +// String returns the string representation +func (s NetworkInterfacePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePermission) GoString() string { + return s.String() +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *NetworkInterfacePermission) SetAwsAccountId(v string) *NetworkInterfacePermission { + s.AwsAccountId = &v + return s +} + +// SetAwsService sets the AwsService field's value. +func (s *NetworkInterfacePermission) SetAwsService(v string) *NetworkInterfacePermission { + s.AwsService = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *NetworkInterfacePermission) SetNetworkInterfaceId(v string) *NetworkInterfacePermission { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkInterfacePermissionId sets the NetworkInterfacePermissionId field's value. +func (s *NetworkInterfacePermission) SetNetworkInterfacePermissionId(v string) *NetworkInterfacePermission { + s.NetworkInterfacePermissionId = &v + return s +} + +// SetPermission sets the Permission field's value. +func (s *NetworkInterfacePermission) SetPermission(v string) *NetworkInterfacePermission { + s.Permission = &v + return s +} + +// SetPermissionState sets the PermissionState field's value. +func (s *NetworkInterfacePermission) SetPermissionState(v *NetworkInterfacePermissionState) *NetworkInterfacePermission { + s.PermissionState = v + return s +} + +// Describes the state of a network interface permission. +type NetworkInterfacePermissionState struct { + _ struct{} `type:"structure"` + + // The state of the permission. + State *string `locationName:"state" type:"string" enum:"NetworkInterfacePermissionStateCode"` + + // A status message, if applicable. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfacePermissionState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePermissionState) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *NetworkInterfacePermissionState) SetState(v string) *NetworkInterfacePermissionState { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *NetworkInterfacePermissionState) SetStatusMessage(v string) *NetworkInterfacePermissionState { + s.StatusMessage = &v + return s +} + +// Describes the private IPv4 address of a network interface. +type NetworkInterfacePrivateIpAddress struct { + _ struct{} `type:"structure"` + + // The association information for an Elastic IP address (IPv4) associated with + // the network interface. + Association *NetworkInterfaceAssociation `locationName:"association" type:"structure"` + + // Indicates whether this IPv4 address is the primary private IPv4 address of + // the network interface. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private DNS name. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The private IPv4 address. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfacePrivateIpAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePrivateIpAddress) GoString() string { + return s.String() +} + +// SetAssociation sets the Association field's value. +func (s *NetworkInterfacePrivateIpAddress) SetAssociation(v *NetworkInterfaceAssociation) *NetworkInterfacePrivateIpAddress { + s.Association = v + return s +} + +// SetPrimary sets the Primary field's value. +func (s *NetworkInterfacePrivateIpAddress) SetPrimary(v bool) *NetworkInterfacePrivateIpAddress { + s.Primary = &v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *NetworkInterfacePrivateIpAddress) SetPrivateDnsName(v string) *NetworkInterfacePrivateIpAddress { + s.PrivateDnsName = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *NetworkInterfacePrivateIpAddress) SetPrivateIpAddress(v string) *NetworkInterfacePrivateIpAddress { + s.PrivateIpAddress = &v + return s +} + +type NewDhcpConfiguration struct { + _ struct{} `type:"structure"` + + Key *string `locationName:"key" type:"string"` + + Values []*string `locationName:"Value" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s NewDhcpConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewDhcpConfiguration) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NewDhcpConfiguration) SetKey(v string) *NewDhcpConfiguration { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *NewDhcpConfiguration) SetValues(v []*string) *NewDhcpConfiguration { + s.Values = v + return s +} + +// Describes the configuration of On-Demand Instances in an EC2 Fleet. +type OnDemandOptions struct { + _ struct{} `type:"structure"` + + // The order of the launch template overrides to use in fulfilling On-Demand + // capacity. If you specify lowest-price, EC2 Fleet uses price to determine + // the order, launching the lowest price first. If you specify prioritized, + // EC2 Fleet uses the priority that you assigned to each launch template override, + // launching the highest priority first. If you do not specify a value, EC2 + // Fleet defaults to lowest-price. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"FleetOnDemandAllocationStrategy"` + + // The strategy for using unused Capacity Reservations for fulfilling On-Demand + // capacity. Supported only for fleets of type instant. + CapacityReservationOptions *CapacityReservationOptions `locationName:"capacityReservationOptions" type:"structure"` + + // The maximum amount per hour for On-Demand Instances that you're willing to + // pay. + MaxTotalPrice *string `locationName:"maxTotalPrice" type:"string"` + + // The minimum target capacity for On-Demand Instances in the fleet. If the + // minimum target capacity is not reached, the fleet launches no instances. + MinTargetCapacity *int64 `locationName:"minTargetCapacity" type:"integer"` + + // Indicates that the fleet launches all On-Demand Instances into a single Availability + // Zone. Supported only for fleets of type instant. + SingleAvailabilityZone *bool `locationName:"singleAvailabilityZone" type:"boolean"` + + // Indicates that the fleet uses a single instance type to launch all On-Demand + // Instances in the fleet. Supported only for fleets of type instant. + SingleInstanceType *bool `locationName:"singleInstanceType" type:"boolean"` +} + +// String returns the string representation +func (s OnDemandOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OnDemandOptions) GoString() string { + return s.String() +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *OnDemandOptions) SetAllocationStrategy(v string) *OnDemandOptions { + s.AllocationStrategy = &v + return s +} + +// SetCapacityReservationOptions sets the CapacityReservationOptions field's value. +func (s *OnDemandOptions) SetCapacityReservationOptions(v *CapacityReservationOptions) *OnDemandOptions { + s.CapacityReservationOptions = v + return s +} + +// SetMaxTotalPrice sets the MaxTotalPrice field's value. +func (s *OnDemandOptions) SetMaxTotalPrice(v string) *OnDemandOptions { + s.MaxTotalPrice = &v + return s +} + +// SetMinTargetCapacity sets the MinTargetCapacity field's value. +func (s *OnDemandOptions) SetMinTargetCapacity(v int64) *OnDemandOptions { + s.MinTargetCapacity = &v + return s +} + +// SetSingleAvailabilityZone sets the SingleAvailabilityZone field's value. +func (s *OnDemandOptions) SetSingleAvailabilityZone(v bool) *OnDemandOptions { + s.SingleAvailabilityZone = &v + return s +} + +// SetSingleInstanceType sets the SingleInstanceType field's value. +func (s *OnDemandOptions) SetSingleInstanceType(v bool) *OnDemandOptions { + s.SingleInstanceType = &v + return s +} + +// Describes the configuration of On-Demand Instances in an EC2 Fleet. +type OnDemandOptionsRequest struct { + _ struct{} `type:"structure"` + + // The order of the launch template overrides to use in fulfilling On-Demand + // capacity. If you specify lowest-price, EC2 Fleet uses price to determine + // the order, launching the lowest price first. If you specify prioritized, + // EC2 Fleet uses the priority that you assigned to each launch template override, + // launching the highest priority first. If you do not specify a value, EC2 + // Fleet defaults to lowest-price. + AllocationStrategy *string `type:"string" enum:"FleetOnDemandAllocationStrategy"` + + // The strategy for using unused Capacity Reservations for fulfilling On-Demand + // capacity. Supported only for fleets of type instant. + CapacityReservationOptions *CapacityReservationOptionsRequest `type:"structure"` + + // The maximum amount per hour for On-Demand Instances that you're willing to + // pay. + MaxTotalPrice *string `type:"string"` + + // The minimum target capacity for On-Demand Instances in the fleet. If the + // minimum target capacity is not reached, the fleet launches no instances. + MinTargetCapacity *int64 `type:"integer"` + + // Indicates that the fleet launches all On-Demand Instances into a single Availability + // Zone. Supported only for fleets of type instant. + SingleAvailabilityZone *bool `type:"boolean"` + + // Indicates that the fleet uses a single instance type to launch all On-Demand + // Instances in the fleet. Supported only for fleets of type instant. + SingleInstanceType *bool `type:"boolean"` +} + +// String returns the string representation +func (s OnDemandOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OnDemandOptionsRequest) GoString() string { + return s.String() +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *OnDemandOptionsRequest) SetAllocationStrategy(v string) *OnDemandOptionsRequest { + s.AllocationStrategy = &v + return s +} + +// SetCapacityReservationOptions sets the CapacityReservationOptions field's value. +func (s *OnDemandOptionsRequest) SetCapacityReservationOptions(v *CapacityReservationOptionsRequest) *OnDemandOptionsRequest { + s.CapacityReservationOptions = v + return s +} + +// SetMaxTotalPrice sets the MaxTotalPrice field's value. +func (s *OnDemandOptionsRequest) SetMaxTotalPrice(v string) *OnDemandOptionsRequest { + s.MaxTotalPrice = &v + return s +} + +// SetMinTargetCapacity sets the MinTargetCapacity field's value. +func (s *OnDemandOptionsRequest) SetMinTargetCapacity(v int64) *OnDemandOptionsRequest { + s.MinTargetCapacity = &v + return s +} + +// SetSingleAvailabilityZone sets the SingleAvailabilityZone field's value. +func (s *OnDemandOptionsRequest) SetSingleAvailabilityZone(v bool) *OnDemandOptionsRequest { + s.SingleAvailabilityZone = &v + return s +} + +// SetSingleInstanceType sets the SingleInstanceType field's value. +func (s *OnDemandOptionsRequest) SetSingleInstanceType(v bool) *OnDemandOptionsRequest { + s.SingleInstanceType = &v + return s +} + +// Describes the data that identifies an Amazon FPGA image (AFI) on the PCI +// bus. +type PciId struct { + _ struct{} `type:"structure"` + + // The ID of the device. + DeviceId *string `type:"string"` + + // The ID of the subsystem. + SubsystemId *string `type:"string"` + + // The ID of the vendor for the subsystem. + SubsystemVendorId *string `type:"string"` + + // The ID of the vendor. + VendorId *string `type:"string"` +} + +// String returns the string representation +func (s PciId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PciId) GoString() string { + return s.String() +} + +// SetDeviceId sets the DeviceId field's value. +func (s *PciId) SetDeviceId(v string) *PciId { + s.DeviceId = &v + return s +} + +// SetSubsystemId sets the SubsystemId field's value. +func (s *PciId) SetSubsystemId(v string) *PciId { + s.SubsystemId = &v + return s +} + +// SetSubsystemVendorId sets the SubsystemVendorId field's value. +func (s *PciId) SetSubsystemVendorId(v string) *PciId { + s.SubsystemVendorId = &v + return s +} + +// SetVendorId sets the VendorId field's value. +func (s *PciId) SetVendorId(v string) *PciId { + s.VendorId = &v + return s +} + +// The status of the transit gateway peering attachment. +type PeeringAttachmentStatus struct { + _ struct{} `type:"structure"` + + // The status code. + Code *string `locationName:"code" type:"string"` + + // The status message, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s PeeringAttachmentStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PeeringAttachmentStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *PeeringAttachmentStatus) SetCode(v string) *PeeringAttachmentStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *PeeringAttachmentStatus) SetMessage(v string) *PeeringAttachmentStatus { + s.Message = &v + return s +} + +// Describes the VPC peering connection options. +type PeeringConnectionOptions struct { + _ struct{} `type:"structure"` + + // If true, the public DNS hostnames of instances in the specified VPC resolve + // to private IP addresses when queried from instances in the peer VPC. + AllowDnsResolutionFromRemoteVpc *bool `locationName:"allowDnsResolutionFromRemoteVpc" type:"boolean"` + + // If true, enables outbound communication from an EC2-Classic instance that's + // linked to a local VPC using ClassicLink to instances in a peer VPC. + AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"` + + // If true, enables outbound communication from instances in a local VPC to + // an EC2-Classic instance that's linked to a peer VPC using ClassicLink. + AllowEgressFromLocalVpcToRemoteClassicLink *bool `locationName:"allowEgressFromLocalVpcToRemoteClassicLink" type:"boolean"` +} + +// String returns the string representation +func (s PeeringConnectionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PeeringConnectionOptions) GoString() string { + return s.String() +} + +// SetAllowDnsResolutionFromRemoteVpc sets the AllowDnsResolutionFromRemoteVpc field's value. +func (s *PeeringConnectionOptions) SetAllowDnsResolutionFromRemoteVpc(v bool) *PeeringConnectionOptions { + s.AllowDnsResolutionFromRemoteVpc = &v + return s +} + +// SetAllowEgressFromLocalClassicLinkToRemoteVpc sets the AllowEgressFromLocalClassicLinkToRemoteVpc field's value. +func (s *PeeringConnectionOptions) SetAllowEgressFromLocalClassicLinkToRemoteVpc(v bool) *PeeringConnectionOptions { + s.AllowEgressFromLocalClassicLinkToRemoteVpc = &v + return s +} + +// SetAllowEgressFromLocalVpcToRemoteClassicLink sets the AllowEgressFromLocalVpcToRemoteClassicLink field's value. +func (s *PeeringConnectionOptions) SetAllowEgressFromLocalVpcToRemoteClassicLink(v bool) *PeeringConnectionOptions { + s.AllowEgressFromLocalVpcToRemoteClassicLink = &v + return s +} + +// The VPC peering connection options. +type PeeringConnectionOptionsRequest struct { + _ struct{} `type:"structure"` + + // If true, enables a local VPC to resolve public DNS hostnames to private IP + // addresses when queried from instances in the peer VPC. + AllowDnsResolutionFromRemoteVpc *bool `type:"boolean"` + + // If true, enables outbound communication from an EC2-Classic instance that's + // linked to a local VPC using ClassicLink to instances in a peer VPC. + AllowEgressFromLocalClassicLinkToRemoteVpc *bool `type:"boolean"` + + // If true, enables outbound communication from instances in a local VPC to + // an EC2-Classic instance that's linked to a peer VPC using ClassicLink. + AllowEgressFromLocalVpcToRemoteClassicLink *bool `type:"boolean"` +} + +// String returns the string representation +func (s PeeringConnectionOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PeeringConnectionOptionsRequest) GoString() string { + return s.String() +} + +// SetAllowDnsResolutionFromRemoteVpc sets the AllowDnsResolutionFromRemoteVpc field's value. +func (s *PeeringConnectionOptionsRequest) SetAllowDnsResolutionFromRemoteVpc(v bool) *PeeringConnectionOptionsRequest { + s.AllowDnsResolutionFromRemoteVpc = &v + return s +} + +// SetAllowEgressFromLocalClassicLinkToRemoteVpc sets the AllowEgressFromLocalClassicLinkToRemoteVpc field's value. +func (s *PeeringConnectionOptionsRequest) SetAllowEgressFromLocalClassicLinkToRemoteVpc(v bool) *PeeringConnectionOptionsRequest { + s.AllowEgressFromLocalClassicLinkToRemoteVpc = &v + return s +} + +// SetAllowEgressFromLocalVpcToRemoteClassicLink sets the AllowEgressFromLocalVpcToRemoteClassicLink field's value. +func (s *PeeringConnectionOptionsRequest) SetAllowEgressFromLocalVpcToRemoteClassicLink(v bool) *PeeringConnectionOptionsRequest { + s.AllowEgressFromLocalVpcToRemoteClassicLink = &v + return s +} + +// Information about the transit gateway in the peering attachment. +type PeeringTgwInfo struct { + _ struct{} `type:"structure"` + + // The AWS account ID of the owner of the transit gateway. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The Region of the transit gateway. + Region *string `locationName:"region" type:"string"` + + // The ID of the transit gateway. + TransitGatewayId *string `locationName:"transitGatewayId" type:"string"` +} + +// String returns the string representation +func (s PeeringTgwInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PeeringTgwInfo) GoString() string { + return s.String() +} + +// SetOwnerId sets the OwnerId field's value. +func (s *PeeringTgwInfo) SetOwnerId(v string) *PeeringTgwInfo { + s.OwnerId = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *PeeringTgwInfo) SetRegion(v string) *PeeringTgwInfo { + s.Region = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *PeeringTgwInfo) SetTransitGatewayId(v string) *PeeringTgwInfo { + s.TransitGatewayId = &v + return s +} + +// The Diffie-Hellmann group number for phase 1 IKE negotiations. +type Phase1DHGroupNumbersListValue struct { + _ struct{} `type:"structure"` + + // The Diffie-Hellmann group number. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation +func (s Phase1DHGroupNumbersListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1DHGroupNumbersListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1DHGroupNumbersListValue) SetValue(v int64) *Phase1DHGroupNumbersListValue { + s.Value = &v + return s +} + +// Specifies a Diffie-Hellman group number for the VPN tunnel for phase 1 IKE +// negotiations. +type Phase1DHGroupNumbersRequestListValue struct { + _ struct{} `type:"structure"` + + // The Diffie-Hellmann group number. + Value *int64 `type:"integer"` +} + +// String returns the string representation +func (s Phase1DHGroupNumbersRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1DHGroupNumbersRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1DHGroupNumbersRequestListValue) SetValue(v int64) *Phase1DHGroupNumbersRequestListValue { + s.Value = &v + return s +} + +// The encryption algorithm for phase 1 IKE negotiations. +type Phase1EncryptionAlgorithmsListValue struct { + _ struct{} `type:"structure"` + + // The value for the encryption algorithm. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Phase1EncryptionAlgorithmsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1EncryptionAlgorithmsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1EncryptionAlgorithmsListValue) SetValue(v string) *Phase1EncryptionAlgorithmsListValue { + s.Value = &v + return s +} + +// Specifies the encryption algorithm for the VPN tunnel for phase 1 IKE negotiations. +type Phase1EncryptionAlgorithmsRequestListValue struct { + _ struct{} `type:"structure"` + + // The value for the encryption algorithm. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Phase1EncryptionAlgorithmsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1EncryptionAlgorithmsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1EncryptionAlgorithmsRequestListValue) SetValue(v string) *Phase1EncryptionAlgorithmsRequestListValue { + s.Value = &v + return s +} + +// The integrity algorithm for phase 1 IKE negotiations. +type Phase1IntegrityAlgorithmsListValue struct { + _ struct{} `type:"structure"` + + // The value for the integrity algorithm. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Phase1IntegrityAlgorithmsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1IntegrityAlgorithmsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1IntegrityAlgorithmsListValue) SetValue(v string) *Phase1IntegrityAlgorithmsListValue { + s.Value = &v + return s +} + +// Specifies the integrity algorithm for the VPN tunnel for phase 1 IKE negotiations. +type Phase1IntegrityAlgorithmsRequestListValue struct { + _ struct{} `type:"structure"` + + // The value for the integrity algorithm. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Phase1IntegrityAlgorithmsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase1IntegrityAlgorithmsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase1IntegrityAlgorithmsRequestListValue) SetValue(v string) *Phase1IntegrityAlgorithmsRequestListValue { + s.Value = &v + return s +} + +// The Diffie-Hellmann group number for phase 2 IKE negotiations. +type Phase2DHGroupNumbersListValue struct { + _ struct{} `type:"structure"` + + // The Diffie-Hellmann group number. + Value *int64 `locationName:"value" type:"integer"` +} + +// String returns the string representation +func (s Phase2DHGroupNumbersListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2DHGroupNumbersListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2DHGroupNumbersListValue) SetValue(v int64) *Phase2DHGroupNumbersListValue { + s.Value = &v + return s +} + +// Specifies a Diffie-Hellman group number for the VPN tunnel for phase 2 IKE +// negotiations. +type Phase2DHGroupNumbersRequestListValue struct { + _ struct{} `type:"structure"` + + // The Diffie-Hellmann group number. + Value *int64 `type:"integer"` +} + +// String returns the string representation +func (s Phase2DHGroupNumbersRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2DHGroupNumbersRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2DHGroupNumbersRequestListValue) SetValue(v int64) *Phase2DHGroupNumbersRequestListValue { + s.Value = &v + return s +} + +// The encryption algorithm for phase 2 IKE negotiations. +type Phase2EncryptionAlgorithmsListValue struct { + _ struct{} `type:"structure"` + + // The encryption algorithm. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Phase2EncryptionAlgorithmsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2EncryptionAlgorithmsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2EncryptionAlgorithmsListValue) SetValue(v string) *Phase2EncryptionAlgorithmsListValue { + s.Value = &v + return s +} + +// Specifies the encryption algorithm for the VPN tunnel for phase 2 IKE negotiations. +type Phase2EncryptionAlgorithmsRequestListValue struct { + _ struct{} `type:"structure"` + + // The encryption algorithm. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Phase2EncryptionAlgorithmsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2EncryptionAlgorithmsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2EncryptionAlgorithmsRequestListValue) SetValue(v string) *Phase2EncryptionAlgorithmsRequestListValue { + s.Value = &v + return s +} + +// The integrity algorithm for phase 2 IKE negotiations. +type Phase2IntegrityAlgorithmsListValue struct { + _ struct{} `type:"structure"` + + // The integrity algorithm. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Phase2IntegrityAlgorithmsListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2IntegrityAlgorithmsListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2IntegrityAlgorithmsListValue) SetValue(v string) *Phase2IntegrityAlgorithmsListValue { + s.Value = &v + return s +} + +// Specifies the integrity algorithm for the VPN tunnel for phase 2 IKE negotiations. +type Phase2IntegrityAlgorithmsRequestListValue struct { + _ struct{} `type:"structure"` + + // The integrity algorithm. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Phase2IntegrityAlgorithmsRequestListValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Phase2IntegrityAlgorithmsRequestListValue) GoString() string { + return s.String() +} + +// SetValue sets the Value field's value. +func (s *Phase2IntegrityAlgorithmsRequestListValue) SetValue(v string) *Phase2IntegrityAlgorithmsRequestListValue { + s.Value = &v + return s +} + +// Describes the placement of an instance. +type Placement struct { + _ struct{} `type:"structure"` + + // The affinity setting for the instance on the Dedicated Host. This parameter + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + Affinity *string `locationName:"affinity" type:"string"` + + // The Availability Zone of the instance. + // + // If not specified, an Availability Zone will be automatically chosen for you + // based on the load balancing criteria for the Region. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group the instance is in. + GroupName *string `locationName:"groupName" type:"string"` + + // The ID of the Dedicated Host on which the instance resides. This parameter + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + HostId *string `locationName:"hostId" type:"string"` + + // The ARN of the host resource group in which to launch the instances. If you + // specify a host resource group ARN, omit the Tenancy parameter or set it to + // host. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + HostResourceGroupArn *string `locationName:"hostResourceGroupArn" type:"string"` + + // The number of the partition the instance is in. Valid only if the placement + // group strategy is set to partition. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + PartitionNumber *int64 `locationName:"partitionNumber" type:"integer"` + + // Reserved for future use. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + SpreadDomain *string `locationName:"spreadDomain" type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s Placement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Placement) GoString() string { + return s.String() +} + +// SetAffinity sets the Affinity field's value. +func (s *Placement) SetAffinity(v string) *Placement { + s.Affinity = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *Placement) SetAvailabilityZone(v string) *Placement { + s.AvailabilityZone = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *Placement) SetGroupName(v string) *Placement { + s.GroupName = &v + return s +} + +// SetHostId sets the HostId field's value. +func (s *Placement) SetHostId(v string) *Placement { + s.HostId = &v + return s +} + +// SetHostResourceGroupArn sets the HostResourceGroupArn field's value. +func (s *Placement) SetHostResourceGroupArn(v string) *Placement { + s.HostResourceGroupArn = &v + return s +} + +// SetPartitionNumber sets the PartitionNumber field's value. +func (s *Placement) SetPartitionNumber(v int64) *Placement { + s.PartitionNumber = &v + return s +} + +// SetSpreadDomain sets the SpreadDomain field's value. +func (s *Placement) SetSpreadDomain(v string) *Placement { + s.SpreadDomain = &v + return s +} + +// SetTenancy sets the Tenancy field's value. +func (s *Placement) SetTenancy(v string) *Placement { + s.Tenancy = &v + return s +} + +// Describes a placement group. +type PlacementGroup struct { + _ struct{} `type:"structure"` + + // The ID of the placement group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string"` + + // The number of partitions. Valid only if strategy is set to partition. + PartitionCount *int64 `locationName:"partitionCount" type:"integer"` + + // The state of the placement group. + State *string `locationName:"state" type:"string" enum:"PlacementGroupState"` + + // The placement strategy. + Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"` + + // Any tags applied to the placement group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PlacementGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementGroup) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *PlacementGroup) SetGroupId(v string) *PlacementGroup { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *PlacementGroup) SetGroupName(v string) *PlacementGroup { + s.GroupName = &v + return s +} + +// SetPartitionCount sets the PartitionCount field's value. +func (s *PlacementGroup) SetPartitionCount(v int64) *PlacementGroup { + s.PartitionCount = &v + return s +} + +// SetState sets the State field's value. +func (s *PlacementGroup) SetState(v string) *PlacementGroup { + s.State = &v + return s +} + +// SetStrategy sets the Strategy field's value. +func (s *PlacementGroup) SetStrategy(v string) *PlacementGroup { + s.Strategy = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PlacementGroup) SetTags(v []*Tag) *PlacementGroup { + s.Tags = v + return s +} + +// Describes the placement group support of the instance type. +type PlacementGroupInfo struct { + _ struct{} `type:"structure"` + + // The supported placement group types. + SupportedStrategies []*string `locationName:"supportedStrategies" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PlacementGroupInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementGroupInfo) GoString() string { + return s.String() +} + +// SetSupportedStrategies sets the SupportedStrategies field's value. +func (s *PlacementGroupInfo) SetSupportedStrategies(v []*string) *PlacementGroupInfo { + s.SupportedStrategies = v + return s +} + +// Describes the placement of an instance. +type PlacementResponse struct { + _ struct{} `type:"structure"` + + // The name of the placement group that the instance is in. + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s PlacementResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlacementResponse) GoString() string { + return s.String() +} + +// SetGroupName sets the GroupName field's value. +func (s *PlacementResponse) SetGroupName(v string) *PlacementResponse { + s.GroupName = &v + return s +} + +// Describes a CIDR block for an address pool. +type PoolCidrBlock struct { + _ struct{} `type:"structure"` + + // The CIDR block. + Cidr *string `locationName:"poolCidrBlock" type:"string"` +} + +// String returns the string representation +func (s PoolCidrBlock) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PoolCidrBlock) GoString() string { + return s.String() +} + +// SetCidr sets the Cidr field's value. +func (s *PoolCidrBlock) SetCidr(v string) *PoolCidrBlock { + s.Cidr = &v + return s +} + +// Describes a range of ports. +type PortRange struct { + _ struct{} `type:"structure"` + + // The first port in the range. + From *int64 `locationName:"from" type:"integer"` + + // The last port in the range. + To *int64 `locationName:"to" type:"integer"` +} + +// String returns the string representation +func (s PortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortRange) GoString() string { + return s.String() +} + +// SetFrom sets the From field's value. +func (s *PortRange) SetFrom(v int64) *PortRange { + s.From = &v + return s +} + +// SetTo sets the To field's value. +func (s *PortRange) SetTo(v int64) *PortRange { + s.To = &v + return s +} + +// Describes prefixes for AWS services. +type PrefixList struct { + _ struct{} `type:"structure"` + + // The IP address range of the AWS service. + Cidrs []*string `locationName:"cidrSet" locationNameList:"item" type:"list"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix. + PrefixListName *string `locationName:"prefixListName" type:"string"` +} + +// String returns the string representation +func (s PrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixList) GoString() string { + return s.String() +} + +// SetCidrs sets the Cidrs field's value. +func (s *PrefixList) SetCidrs(v []*string) *PrefixList { + s.Cidrs = v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *PrefixList) SetPrefixListId(v string) *PrefixList { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *PrefixList) SetPrefixListName(v string) *PrefixList { + s.PrefixListName = &v + return s +} + +// Describes the resource with which a prefix list is associated. +type PrefixListAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The owner of the resource. + ResourceOwner *string `locationName:"resourceOwner" type:"string"` +} + +// String returns the string representation +func (s PrefixListAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListAssociation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *PrefixListAssociation) SetResourceId(v string) *PrefixListAssociation { + s.ResourceId = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *PrefixListAssociation) SetResourceOwner(v string) *PrefixListAssociation { + s.ResourceOwner = &v + return s +} + +// Describes a prefix list entry. +type PrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + Cidr *string `locationName:"cidr" type:"string"` + + // The description. + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s PrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListEntry) GoString() string { + return s.String() +} + +// SetCidr sets the Cidr field's value. +func (s *PrefixListEntry) SetCidr(v string) *PrefixListEntry { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *PrefixListEntry) SetDescription(v string) *PrefixListEntry { + s.Description = &v + return s +} + +// Describes a prefix list ID. +type PrefixListId struct { + _ struct{} `type:"structure"` + + // A description for the security group rule that references this prefix list + // ID. + // + // Constraints: Up to 255 characters in length. Allowed characters are a-z, + // A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + Description *string `locationName:"description" type:"string"` + + // The ID of the prefix. + PrefixListId *string `locationName:"prefixListId" type:"string"` +} + +// String returns the string representation +func (s PrefixListId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListId) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *PrefixListId) SetDescription(v string) *PrefixListId { + s.Description = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *PrefixListId) SetPrefixListId(v string) *PrefixListId { + s.PrefixListId = &v + return s +} + +// Describes the price for a Reserved Instance. +type PriceSchedule struct { + _ struct{} `type:"structure"` + + // The current price schedule, as determined by the term remaining for the Reserved + // Instance in the listing. + // + // A specific price schedule is always in effect, but only one price schedule + // can be active at any time. Take, for example, a Reserved Instance listing + // that has five months remaining in its term. When you specify price schedules + // for five months and two months, this means that schedule 1, covering the + // first three months of the remaining term, will be active during months 5, + // 4, and 3. Then schedule 2, covering the last two months of the term, will + // be active for months 2 and 1. + Active *bool `locationName:"active" type:"boolean"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` +} + +// String returns the string representation +func (s PriceSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceSchedule) GoString() string { + return s.String() +} + +// SetActive sets the Active field's value. +func (s *PriceSchedule) SetActive(v bool) *PriceSchedule { + s.Active = &v + return s +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *PriceSchedule) SetCurrencyCode(v string) *PriceSchedule { + s.CurrencyCode = &v + return s +} + +// SetPrice sets the Price field's value. +func (s *PriceSchedule) SetPrice(v float64) *PriceSchedule { + s.Price = &v + return s +} + +// SetTerm sets the Term field's value. +func (s *PriceSchedule) SetTerm(v int64) *PriceSchedule { + s.Term = &v + return s +} + +// Describes the price for a Reserved Instance. +type PriceScheduleSpecification struct { + _ struct{} `type:"structure"` + + // The currency for transacting the Reserved Instance resale. At this time, + // the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The fixed price for the term. + Price *float64 `locationName:"price" type:"double"` + + // The number of months remaining in the reservation. For example, 2 is the + // second to the last month before the capacity reservation expires. + Term *int64 `locationName:"term" type:"long"` +} + +// String returns the string representation +func (s PriceScheduleSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PriceScheduleSpecification) GoString() string { + return s.String() +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *PriceScheduleSpecification) SetCurrencyCode(v string) *PriceScheduleSpecification { + s.CurrencyCode = &v + return s +} + +// SetPrice sets the Price field's value. +func (s *PriceScheduleSpecification) SetPrice(v float64) *PriceScheduleSpecification { + s.Price = &v + return s +} + +// SetTerm sets the Term field's value. +func (s *PriceScheduleSpecification) SetTerm(v int64) *PriceScheduleSpecification { + s.Term = &v + return s +} + +// Describes a Reserved Instance offering. +type PricingDetail struct { + _ struct{} `type:"structure"` + + // The number of reservations available for the price. + Count *int64 `locationName:"count" type:"integer"` + + // The price per instance. + Price *float64 `locationName:"price" type:"double"` +} + +// String returns the string representation +func (s PricingDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PricingDetail) GoString() string { + return s.String() +} + +// SetCount sets the Count field's value. +func (s *PricingDetail) SetCount(v int64) *PricingDetail { + s.Count = &v + return s +} + +// SetPrice sets the Price field's value. +func (s *PricingDetail) SetPrice(v float64) *PricingDetail { + s.Price = &v + return s +} + +// PrincipalIdFormat description +type PrincipalIdFormat struct { + _ struct{} `type:"structure"` + + // PrincipalIdFormatARN description + Arn *string `locationName:"arn" type:"string"` + + // PrincipalIdFormatStatuses description + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PrincipalIdFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrincipalIdFormat) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *PrincipalIdFormat) SetArn(v string) *PrincipalIdFormat { + s.Arn = &v + return s +} + +// SetStatuses sets the Statuses field's value. +func (s *PrincipalIdFormat) SetStatuses(v []*IdFormat) *PrincipalIdFormat { + s.Statuses = v + return s +} + +// Information about the Private DNS name for interface endpoints. +type PrivateDnsDetails struct { + _ struct{} `type:"structure"` + + // The private DNS name assigned to the VPC endpoint service. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` +} + +// String returns the string representation +func (s PrivateDnsDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrivateDnsDetails) GoString() string { + return s.String() +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *PrivateDnsDetails) SetPrivateDnsName(v string) *PrivateDnsDetails { + s.PrivateDnsName = &v + return s +} + +// Information about the private DNS name for the service endpoint. For more +// information about these parameters, see VPC Endpoint Service Private DNS +// Name Verification (https://docs.aws.amazon.com/vpc/latest/userguide/ndpoint-services-dns-validation.html) +// in the Amazon Virtual Private Cloud User Guide. +type PrivateDnsNameConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the record subdomain the service provider needs to create. The + // service provider adds the value text to the name. + Name *string `locationName:"name" type:"string"` + + // The verification state of the VPC endpoint service. + // + // >Consumers of the endpoint service can use the private name only when the + // state is verified. + State *string `locationName:"state" type:"string" enum:"DnsNameState"` + + // The endpoint service verification type, for example TXT. + Type *string `locationName:"type" type:"string"` + + // The value the service provider adds to the private DNS name domain record + // before verification. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s PrivateDnsNameConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrivateDnsNameConfiguration) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *PrivateDnsNameConfiguration) SetName(v string) *PrivateDnsNameConfiguration { + s.Name = &v + return s +} + +// SetState sets the State field's value. +func (s *PrivateDnsNameConfiguration) SetState(v string) *PrivateDnsNameConfiguration { + s.State = &v + return s +} + +// SetType sets the Type field's value. +func (s *PrivateDnsNameConfiguration) SetType(v string) *PrivateDnsNameConfiguration { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *PrivateDnsNameConfiguration) SetValue(v string) *PrivateDnsNameConfiguration { + s.Value = &v + return s +} + +// Describes a secondary private IPv4 address for a network interface. +type PrivateIpAddressSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether the private IPv4 address is the primary private IPv4 address. + // Only one IPv4 address can be designated as primary. + Primary *bool `locationName:"primary" type:"boolean"` + + // The private IPv4 addresses. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` +} + +// String returns the string representation +func (s PrivateIpAddressSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrivateIpAddressSpecification) GoString() string { + return s.String() +} + +// SetPrimary sets the Primary field's value. +func (s *PrivateIpAddressSpecification) SetPrimary(v bool) *PrivateIpAddressSpecification { + s.Primary = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *PrivateIpAddressSpecification) SetPrivateIpAddress(v string) *PrivateIpAddressSpecification { + s.PrivateIpAddress = &v + return s +} + +// Describes the processor used by the instance type. +type ProcessorInfo struct { + _ struct{} `type:"structure"` + + // The architectures supported by the instance type. + SupportedArchitectures []*string `locationName:"supportedArchitectures" locationNameList:"item" type:"list"` + + // The speed of the processor, in GHz. + SustainedClockSpeedInGhz *float64 `locationName:"sustainedClockSpeedInGhz" type:"double"` +} + +// String returns the string representation +func (s ProcessorInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProcessorInfo) GoString() string { + return s.String() +} + +// SetSupportedArchitectures sets the SupportedArchitectures field's value. +func (s *ProcessorInfo) SetSupportedArchitectures(v []*string) *ProcessorInfo { + s.SupportedArchitectures = v + return s +} + +// SetSustainedClockSpeedInGhz sets the SustainedClockSpeedInGhz field's value. +func (s *ProcessorInfo) SetSustainedClockSpeedInGhz(v float64) *ProcessorInfo { + s.SustainedClockSpeedInGhz = &v + return s +} + +// Describes a product code. +type ProductCode struct { + _ struct{} `type:"structure"` + + // The product code. + ProductCodeId *string `locationName:"productCode" type:"string"` + + // The type of product code. + ProductCodeType *string `locationName:"type" type:"string" enum:"ProductCodeValues"` +} + +// String returns the string representation +func (s ProductCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProductCode) GoString() string { + return s.String() +} + +// SetProductCodeId sets the ProductCodeId field's value. +func (s *ProductCode) SetProductCodeId(v string) *ProductCode { + s.ProductCodeId = &v + return s +} + +// SetProductCodeType sets the ProductCodeType field's value. +func (s *ProductCode) SetProductCodeType(v string) *ProductCode { + s.ProductCodeType = &v + return s +} + +// Describes a virtual private gateway propagating route. +type PropagatingVgw struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway. + GatewayId *string `locationName:"gatewayId" type:"string"` +} + +// String returns the string representation +func (s PropagatingVgw) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PropagatingVgw) GoString() string { + return s.String() +} + +// SetGatewayId sets the GatewayId field's value. +func (s *PropagatingVgw) SetGatewayId(v string) *PropagatingVgw { + s.GatewayId = &v + return s +} + +type ProvisionByoipCidrInput struct { + _ struct{} `type:"structure"` + + // The public IPv4 or IPv6 address range, in CIDR notation. The most specific + // IPv4 prefix that you can specify is /24. The most specific IPv6 prefix you + // can specify is /56. The address range cannot overlap with another address + // range that you've brought to this or another Region. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // A signed document that proves that you are authorized to bring the specified + // IP address range to Amazon using BYOIP. + CidrAuthorizationContext *CidrAuthorizationContext `type:"structure"` + + // A description for the address range and the address pool. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags to apply to the address pool. + PoolTagSpecifications []*TagSpecification `locationName:"PoolTagSpecification" locationNameList:"item" type:"list"` + + // (IPv6 only) Indicate whether the address range will be publicly advertised + // to the internet. + // + // Default: true + PubliclyAdvertisable *bool `type:"boolean"` +} + +// String returns the string representation +func (s ProvisionByoipCidrInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionByoipCidrInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisionByoipCidrInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisionByoipCidrInput"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + if s.CidrAuthorizationContext != nil { + if err := s.CidrAuthorizationContext.Validate(); err != nil { + invalidParams.AddNested("CidrAuthorizationContext", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *ProvisionByoipCidrInput) SetCidr(v string) *ProvisionByoipCidrInput { + s.Cidr = &v + return s +} + +// SetCidrAuthorizationContext sets the CidrAuthorizationContext field's value. +func (s *ProvisionByoipCidrInput) SetCidrAuthorizationContext(v *CidrAuthorizationContext) *ProvisionByoipCidrInput { + s.CidrAuthorizationContext = v + return s +} + +// SetDescription sets the Description field's value. +func (s *ProvisionByoipCidrInput) SetDescription(v string) *ProvisionByoipCidrInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ProvisionByoipCidrInput) SetDryRun(v bool) *ProvisionByoipCidrInput { + s.DryRun = &v + return s +} + +// SetPoolTagSpecifications sets the PoolTagSpecifications field's value. +func (s *ProvisionByoipCidrInput) SetPoolTagSpecifications(v []*TagSpecification) *ProvisionByoipCidrInput { + s.PoolTagSpecifications = v + return s +} + +// SetPubliclyAdvertisable sets the PubliclyAdvertisable field's value. +func (s *ProvisionByoipCidrInput) SetPubliclyAdvertisable(v bool) *ProvisionByoipCidrInput { + s.PubliclyAdvertisable = &v + return s +} + +type ProvisionByoipCidrOutput struct { + _ struct{} `type:"structure"` + + // Information about the address range. + ByoipCidr *ByoipCidr `locationName:"byoipCidr" type:"structure"` +} + +// String returns the string representation +func (s ProvisionByoipCidrOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionByoipCidrOutput) GoString() string { + return s.String() +} + +// SetByoipCidr sets the ByoipCidr field's value. +func (s *ProvisionByoipCidrOutput) SetByoipCidr(v *ByoipCidr) *ProvisionByoipCidrOutput { + s.ByoipCidr = v + return s +} + +// Reserved. If you need to sustain traffic greater than the documented limits +// (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), +// contact us through the Support Center (https://console.aws.amazon.com/support/home?). +type ProvisionedBandwidth struct { + _ struct{} `type:"structure"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + ProvisionTime *time.Time `locationName:"provisionTime" type:"timestamp"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Provisioned *string `locationName:"provisioned" type:"string"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + RequestTime *time.Time `locationName:"requestTime" type:"timestamp"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Requested *string `locationName:"requested" type:"string"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s ProvisionedBandwidth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedBandwidth) GoString() string { + return s.String() +} + +// SetProvisionTime sets the ProvisionTime field's value. +func (s *ProvisionedBandwidth) SetProvisionTime(v time.Time) *ProvisionedBandwidth { + s.ProvisionTime = &v + return s +} + +// SetProvisioned sets the Provisioned field's value. +func (s *ProvisionedBandwidth) SetProvisioned(v string) *ProvisionedBandwidth { + s.Provisioned = &v + return s +} + +// SetRequestTime sets the RequestTime field's value. +func (s *ProvisionedBandwidth) SetRequestTime(v time.Time) *ProvisionedBandwidth { + s.RequestTime = &v + return s +} + +// SetRequested sets the Requested field's value. +func (s *ProvisionedBandwidth) SetRequested(v string) *ProvisionedBandwidth { + s.Requested = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ProvisionedBandwidth) SetStatus(v string) *ProvisionedBandwidth { + s.Status = &v + return s +} + +// Describes an IPv4 address pool. +type PublicIpv4Pool struct { + _ struct{} `type:"structure"` + + // A description of the address pool. + Description *string `locationName:"description" type:"string"` + + // The name of the location from which the address pool is advertised. A network + // border group is a unique set of Availability Zones or Local Zones from where + // AWS advertises public IP addresses. + NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` + + // The address ranges. + PoolAddressRanges []*PublicIpv4PoolRange `locationName:"poolAddressRangeSet" locationNameList:"item" type:"list"` + + // The ID of the address pool. + PoolId *string `locationName:"poolId" type:"string"` + + // Any tags for the address pool. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The total number of addresses. + TotalAddressCount *int64 `locationName:"totalAddressCount" type:"integer"` + + // The total number of available addresses. + TotalAvailableAddressCount *int64 `locationName:"totalAvailableAddressCount" type:"integer"` +} + +// String returns the string representation +func (s PublicIpv4Pool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicIpv4Pool) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *PublicIpv4Pool) SetDescription(v string) *PublicIpv4Pool { + s.Description = &v + return s +} + +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *PublicIpv4Pool) SetNetworkBorderGroup(v string) *PublicIpv4Pool { + s.NetworkBorderGroup = &v + return s +} + +// SetPoolAddressRanges sets the PoolAddressRanges field's value. +func (s *PublicIpv4Pool) SetPoolAddressRanges(v []*PublicIpv4PoolRange) *PublicIpv4Pool { + s.PoolAddressRanges = v + return s +} + +// SetPoolId sets the PoolId field's value. +func (s *PublicIpv4Pool) SetPoolId(v string) *PublicIpv4Pool { + s.PoolId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PublicIpv4Pool) SetTags(v []*Tag) *PublicIpv4Pool { + s.Tags = v + return s +} + +// SetTotalAddressCount sets the TotalAddressCount field's value. +func (s *PublicIpv4Pool) SetTotalAddressCount(v int64) *PublicIpv4Pool { + s.TotalAddressCount = &v + return s +} + +// SetTotalAvailableAddressCount sets the TotalAvailableAddressCount field's value. +func (s *PublicIpv4Pool) SetTotalAvailableAddressCount(v int64) *PublicIpv4Pool { + s.TotalAvailableAddressCount = &v + return s +} + +// Describes an address range of an IPv4 address pool. +type PublicIpv4PoolRange struct { + _ struct{} `type:"structure"` + + // The number of addresses in the range. + AddressCount *int64 `locationName:"addressCount" type:"integer"` + + // The number of available addresses in the range. + AvailableAddressCount *int64 `locationName:"availableAddressCount" type:"integer"` + + // The first IP address in the range. + FirstAddress *string `locationName:"firstAddress" type:"string"` + + // The last IP address in the range. + LastAddress *string `locationName:"lastAddress" type:"string"` +} + +// String returns the string representation +func (s PublicIpv4PoolRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicIpv4PoolRange) GoString() string { + return s.String() +} + +// SetAddressCount sets the AddressCount field's value. +func (s *PublicIpv4PoolRange) SetAddressCount(v int64) *PublicIpv4PoolRange { + s.AddressCount = &v + return s +} + +// SetAvailableAddressCount sets the AvailableAddressCount field's value. +func (s *PublicIpv4PoolRange) SetAvailableAddressCount(v int64) *PublicIpv4PoolRange { + s.AvailableAddressCount = &v + return s +} + +// SetFirstAddress sets the FirstAddress field's value. +func (s *PublicIpv4PoolRange) SetFirstAddress(v string) *PublicIpv4PoolRange { + s.FirstAddress = &v + return s +} + +// SetLastAddress sets the LastAddress field's value. +func (s *PublicIpv4PoolRange) SetLastAddress(v string) *PublicIpv4PoolRange { + s.LastAddress = &v + return s +} + +// Describes the result of the purchase. +type Purchase struct { + _ struct{} `type:"structure"` + + // The currency in which the UpfrontPrice and HourlyPrice amounts are specified. + // At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the reservation's term in seconds. + Duration *int64 `locationName:"duration" type:"integer"` + + // The IDs of the Dedicated Hosts associated with the reservation. + HostIdSet []*string `locationName:"hostIdSet" locationNameList:"item" type:"list"` + + // The ID of the reservation. + HostReservationId *string `locationName:"hostReservationId" type:"string"` + + // The hourly price of the reservation per hour. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The instance family on the Dedicated Host that the reservation can be associated + // with. + InstanceFamily *string `locationName:"instanceFamily" type:"string"` + + // The payment option for the reservation. + PaymentOption *string `locationName:"paymentOption" type:"string" enum:"PaymentOption"` + + // The upfront price of the reservation. + UpfrontPrice *string `locationName:"upfrontPrice" type:"string"` +} + +// String returns the string representation +func (s Purchase) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Purchase) GoString() string { + return s.String() +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *Purchase) SetCurrencyCode(v string) *Purchase { + s.CurrencyCode = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *Purchase) SetDuration(v int64) *Purchase { + s.Duration = &v + return s +} + +// SetHostIdSet sets the HostIdSet field's value. +func (s *Purchase) SetHostIdSet(v []*string) *Purchase { + s.HostIdSet = v + return s +} + +// SetHostReservationId sets the HostReservationId field's value. +func (s *Purchase) SetHostReservationId(v string) *Purchase { + s.HostReservationId = &v + return s +} + +// SetHourlyPrice sets the HourlyPrice field's value. +func (s *Purchase) SetHourlyPrice(v string) *Purchase { + s.HourlyPrice = &v + return s +} + +// SetInstanceFamily sets the InstanceFamily field's value. +func (s *Purchase) SetInstanceFamily(v string) *Purchase { + s.InstanceFamily = &v + return s +} + +// SetPaymentOption sets the PaymentOption field's value. +func (s *Purchase) SetPaymentOption(v string) *Purchase { + s.PaymentOption = &v + return s +} + +// SetUpfrontPrice sets the UpfrontPrice field's value. +func (s *Purchase) SetUpfrontPrice(v string) *Purchase { + s.UpfrontPrice = &v + return s +} + +type PurchaseHostReservationInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The currency in which the totalUpfrontPrice, LimitPrice, and totalHourlyPrice + // amounts are specified. At this time, the only supported currency is USD. + CurrencyCode *string `type:"string" enum:"CurrencyCodeValues"` + + // The IDs of the Dedicated Hosts with which the reservation will be associated. + // + // HostIdSet is a required field + HostIdSet []*string `locationNameList:"item" type:"list" required:"true"` + + // The specified limit is checked against the total upfront cost of the reservation + // (calculated as the offering's upfront cost multiplied by the host count). + // If the total upfront cost is greater than the specified price limit, the + // request fails. This is used to ensure that the purchase does not exceed the + // expected upfront cost of the purchase. At this time, the only supported currency + // is USD. For example, to indicate a limit price of USD 100, specify 100.00. + LimitPrice *string `type:"string"` + + // The ID of the offering. + // + // OfferingId is a required field + OfferingId *string `type:"string" required:"true"` + + // The tags to apply to the Dedicated Host Reservation during purchase. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PurchaseHostReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseHostReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseHostReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseHostReservationInput"} + if s.HostIdSet == nil { + invalidParams.Add(request.NewErrParamRequired("HostIdSet")) + } + if s.OfferingId == nil { + invalidParams.Add(request.NewErrParamRequired("OfferingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *PurchaseHostReservationInput) SetClientToken(v string) *PurchaseHostReservationInput { + s.ClientToken = &v + return s +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *PurchaseHostReservationInput) SetCurrencyCode(v string) *PurchaseHostReservationInput { + s.CurrencyCode = &v + return s +} + +// SetHostIdSet sets the HostIdSet field's value. +func (s *PurchaseHostReservationInput) SetHostIdSet(v []*string) *PurchaseHostReservationInput { + s.HostIdSet = v + return s +} + +// SetLimitPrice sets the LimitPrice field's value. +func (s *PurchaseHostReservationInput) SetLimitPrice(v string) *PurchaseHostReservationInput { + s.LimitPrice = &v + return s +} + +// SetOfferingId sets the OfferingId field's value. +func (s *PurchaseHostReservationInput) SetOfferingId(v string) *PurchaseHostReservationInput { + s.OfferingId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *PurchaseHostReservationInput) SetTagSpecifications(v []*TagSpecification) *PurchaseHostReservationInput { + s.TagSpecifications = v + return s +} + +type PurchaseHostReservationOutput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The currency in which the totalUpfrontPrice and totalHourlyPrice amounts + // are specified. At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // Describes the details of the purchase. + Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` + + // The total hourly price of the reservation calculated per hour. + TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` + + // The total amount charged to your account when you purchase the reservation. + TotalUpfrontPrice *string `locationName:"totalUpfrontPrice" type:"string"` +} + +// String returns the string representation +func (s PurchaseHostReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseHostReservationOutput) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *PurchaseHostReservationOutput) SetClientToken(v string) *PurchaseHostReservationOutput { + s.ClientToken = &v + return s +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *PurchaseHostReservationOutput) SetCurrencyCode(v string) *PurchaseHostReservationOutput { + s.CurrencyCode = &v + return s +} + +// SetPurchase sets the Purchase field's value. +func (s *PurchaseHostReservationOutput) SetPurchase(v []*Purchase) *PurchaseHostReservationOutput { + s.Purchase = v + return s +} + +// SetTotalHourlyPrice sets the TotalHourlyPrice field's value. +func (s *PurchaseHostReservationOutput) SetTotalHourlyPrice(v string) *PurchaseHostReservationOutput { + s.TotalHourlyPrice = &v + return s +} + +// SetTotalUpfrontPrice sets the TotalUpfrontPrice field's value. +func (s *PurchaseHostReservationOutput) SetTotalUpfrontPrice(v string) *PurchaseHostReservationOutput { + s.TotalUpfrontPrice = &v + return s +} + +// Describes a request to purchase Scheduled Instances. +type PurchaseRequest struct { + _ struct{} `type:"structure"` + + // The number of instances. + // + // InstanceCount is a required field + InstanceCount *int64 `type:"integer" required:"true"` + + // The purchase token. + // + // PurchaseToken is a required field + PurchaseToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseRequest"} + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.PurchaseToken == nil { + invalidParams.Add(request.NewErrParamRequired("PurchaseToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *PurchaseRequest) SetInstanceCount(v int64) *PurchaseRequest { + s.InstanceCount = &v + return s +} + +// SetPurchaseToken sets the PurchaseToken field's value. +func (s *PurchaseRequest) SetPurchaseToken(v string) *PurchaseRequest { + s.PurchaseToken = &v + return s +} + +// Contains the parameters for PurchaseReservedInstancesOffering. +type PurchaseReservedInstancesOfferingInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The number of Reserved Instances to purchase. + // + // InstanceCount is a required field + InstanceCount *int64 `type:"integer" required:"true"` + + // Specified for Reserved Instance Marketplace offerings to limit the total + // order and ensure that the Reserved Instances are not purchased at unexpected + // prices. + LimitPrice *ReservedInstanceLimitPrice `locationName:"limitPrice" type:"structure"` + + // The time at which to purchase the Reserved Instance, in UTC format (for example, + // YYYY-MM-DDTHH:MM:SSZ). + PurchaseTime *time.Time `type:"timestamp"` + + // The ID of the Reserved Instance offering to purchase. + // + // ReservedInstancesOfferingId is a required field + ReservedInstancesOfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseReservedInstancesOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseReservedInstancesOfferingInput"} + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.ReservedInstancesOfferingId == nil { + invalidParams.Add(request.NewErrParamRequired("ReservedInstancesOfferingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *PurchaseReservedInstancesOfferingInput) SetDryRun(v bool) *PurchaseReservedInstancesOfferingInput { + s.DryRun = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *PurchaseReservedInstancesOfferingInput) SetInstanceCount(v int64) *PurchaseReservedInstancesOfferingInput { + s.InstanceCount = &v + return s +} + +// SetLimitPrice sets the LimitPrice field's value. +func (s *PurchaseReservedInstancesOfferingInput) SetLimitPrice(v *ReservedInstanceLimitPrice) *PurchaseReservedInstancesOfferingInput { + s.LimitPrice = v + return s +} + +// SetPurchaseTime sets the PurchaseTime field's value. +func (s *PurchaseReservedInstancesOfferingInput) SetPurchaseTime(v time.Time) *PurchaseReservedInstancesOfferingInput { + s.PurchaseTime = &v + return s +} + +// SetReservedInstancesOfferingId sets the ReservedInstancesOfferingId field's value. +func (s *PurchaseReservedInstancesOfferingInput) SetReservedInstancesOfferingId(v string) *PurchaseReservedInstancesOfferingInput { + s.ReservedInstancesOfferingId = &v + return s +} + +// Contains the output of PurchaseReservedInstancesOffering. +type PurchaseReservedInstancesOfferingOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the purchased Reserved Instances. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseReservedInstancesOfferingOutput) GoString() string { + return s.String() +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *PurchaseReservedInstancesOfferingOutput) SetReservedInstancesId(v string) *PurchaseReservedInstancesOfferingOutput { + s.ReservedInstancesId = &v + return s +} + +// Contains the parameters for PurchaseScheduledInstances. +type PurchaseScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The purchase requests. + // + // PurchaseRequests is a required field + PurchaseRequests []*PurchaseRequest `locationName:"PurchaseRequest" locationNameList:"PurchaseRequest" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PurchaseScheduledInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseScheduledInstancesInput"} + if s.PurchaseRequests == nil { + invalidParams.Add(request.NewErrParamRequired("PurchaseRequests")) + } + if s.PurchaseRequests != nil && len(s.PurchaseRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PurchaseRequests", 1)) + } + if s.PurchaseRequests != nil { + for i, v := range s.PurchaseRequests { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PurchaseRequests", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *PurchaseScheduledInstancesInput) SetClientToken(v string) *PurchaseScheduledInstancesInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *PurchaseScheduledInstancesInput) SetDryRun(v bool) *PurchaseScheduledInstancesInput { + s.DryRun = &v + return s +} + +// SetPurchaseRequests sets the PurchaseRequests field's value. +func (s *PurchaseScheduledInstancesInput) SetPurchaseRequests(v []*PurchaseRequest) *PurchaseScheduledInstancesInput { + s.PurchaseRequests = v + return s +} + +// Contains the output of PurchaseScheduledInstances. +type PurchaseScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesOutput) GoString() string { + return s.String() +} + +// SetScheduledInstanceSet sets the ScheduledInstanceSet field's value. +func (s *PurchaseScheduledInstancesOutput) SetScheduledInstanceSet(v []*ScheduledInstance) *PurchaseScheduledInstancesOutput { + s.ScheduledInstanceSet = v + return s +} + +type RebootInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The instance IDs. + // + // InstanceIds is a required field + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *RebootInstancesInput) SetDryRun(v bool) *RebootInstancesInput { + s.DryRun = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *RebootInstancesInput) SetInstanceIds(v []*string) *RebootInstancesInput { + s.InstanceIds = v + return s +} + +type RebootInstancesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RebootInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootInstancesOutput) GoString() string { + return s.String() +} + +// Describes a recurring charge. +type RecurringCharge struct { + _ struct{} `type:"structure"` + + // The amount of the recurring charge. + Amount *float64 `locationName:"amount" type:"double"` + + // The frequency of the recurring charge. + Frequency *string `locationName:"frequency" type:"string" enum:"RecurringChargeFrequency"` +} + +// String returns the string representation +func (s RecurringCharge) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecurringCharge) GoString() string { + return s.String() +} + +// SetAmount sets the Amount field's value. +func (s *RecurringCharge) SetAmount(v float64) *RecurringCharge { + s.Amount = &v + return s +} + +// SetFrequency sets the Frequency field's value. +func (s *RecurringCharge) SetFrequency(v string) *RecurringCharge { + s.Frequency = &v + return s +} + +// Describes a Region. +type Region struct { + _ struct{} `type:"structure"` + + // The Region service endpoint. + Endpoint *string `locationName:"regionEndpoint" type:"string"` + + // The Region opt-in status. The possible values are opt-in-not-required, opted-in, + // and not-opted-in. + OptInStatus *string `locationName:"optInStatus" type:"string"` + + // The name of the Region. + RegionName *string `locationName:"regionName" type:"string"` +} + +// String returns the string representation +func (s Region) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Region) GoString() string { + return s.String() +} + +// SetEndpoint sets the Endpoint field's value. +func (s *Region) SetEndpoint(v string) *Region { + s.Endpoint = &v + return s +} + +// SetOptInStatus sets the OptInStatus field's value. +func (s *Region) SetOptInStatus(v string) *Region { + s.OptInStatus = &v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *Region) SetRegionName(v string) *Region { + s.RegionName = &v + return s +} + +// Contains the parameters for RegisterImage. +type RegisterImageInput struct { + _ struct{} `type:"structure"` + + // The architecture of the AMI. + // + // Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, + // the architecture specified in the manifest file. + Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + + // The billing product codes. Your account must be authorized to specify billing + // product codes. Otherwise, you can use the AWS Marketplace to bill for the + // use of an AMI. + BillingProducts []*string `locationName:"BillingProduct" locationNameList:"item" type:"list"` + + // The block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // A description for your AMI. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Set to true to enable enhanced networking with ENA for the AMI and any instances + // that you launch from the AMI. + // + // This option is supported only for HVM AMIs. Specifying this option with a + // PV AMI can make instances launched from the AMI unreachable. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + + // The full path to your AMI manifest in Amazon S3 storage. The specified bucket + // must have the aws-exec-read canned access control list (ACL) to ensure that + // it can be accessed by Amazon EC2. For more information, see Canned ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) + // in the Amazon S3 Service Developer Guide. + ImageLocation *string `type:"string"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // A name for your AMI. + // + // Constraints: 3-128 alphanumeric characters, parentheses (()), square brackets + // ([]), spaces ( ), periods (.), slashes (/), dashes (-), single quotes ('), + // at-signs (@), or underscores(_) + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // The device name of the root device volume (for example, /dev/sda1). + RootDeviceName *string `locationName:"rootDeviceName" type:"string"` + + // Set to simple to enable enhanced networking with the Intel 82599 Virtual + // Function interface for the AMI and any instances that you launch from the + // AMI. + // + // There is no way to disable sriovNetSupport at this time. + // + // This option is supported only for HVM AMIs. Specifying this option with a + // PV AMI can make instances launched from the AMI unreachable. + SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` + + // The type of virtualization (hvm | paravirtual). + // + // Default: paravirtual + VirtualizationType *string `locationName:"virtualizationType" type:"string"` +} + +// String returns the string representation +func (s RegisterImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterImageInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArchitecture sets the Architecture field's value. +func (s *RegisterImageInput) SetArchitecture(v string) *RegisterImageInput { + s.Architecture = &v + return s +} + +// SetBillingProducts sets the BillingProducts field's value. +func (s *RegisterImageInput) SetBillingProducts(v []*string) *RegisterImageInput { + s.BillingProducts = v + return s +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *RegisterImageInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *RegisterImageInput { + s.BlockDeviceMappings = v + return s +} + +// SetDescription sets the Description field's value. +func (s *RegisterImageInput) SetDescription(v string) *RegisterImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RegisterImageInput) SetDryRun(v bool) *RegisterImageInput { + s.DryRun = &v + return s +} + +// SetEnaSupport sets the EnaSupport field's value. +func (s *RegisterImageInput) SetEnaSupport(v bool) *RegisterImageInput { + s.EnaSupport = &v + return s +} + +// SetImageLocation sets the ImageLocation field's value. +func (s *RegisterImageInput) SetImageLocation(v string) *RegisterImageInput { + s.ImageLocation = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *RegisterImageInput) SetKernelId(v string) *RegisterImageInput { + s.KernelId = &v + return s +} + +// SetName sets the Name field's value. +func (s *RegisterImageInput) SetName(v string) *RegisterImageInput { + s.Name = &v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *RegisterImageInput) SetRamdiskId(v string) *RegisterImageInput { + s.RamdiskId = &v + return s +} + +// SetRootDeviceName sets the RootDeviceName field's value. +func (s *RegisterImageInput) SetRootDeviceName(v string) *RegisterImageInput { + s.RootDeviceName = &v + return s +} + +// SetSriovNetSupport sets the SriovNetSupport field's value. +func (s *RegisterImageInput) SetSriovNetSupport(v string) *RegisterImageInput { + s.SriovNetSupport = &v + return s +} + +// SetVirtualizationType sets the VirtualizationType field's value. +func (s *RegisterImageInput) SetVirtualizationType(v string) *RegisterImageInput { + s.VirtualizationType = &v + return s +} + +// Contains the output of RegisterImage. +type RegisterImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the newly registered AMI. + ImageId *string `locationName:"imageId" type:"string"` +} + +// String returns the string representation +func (s RegisterImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterImageOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *RegisterImageOutput) SetImageId(v string) *RegisterImageOutput { + s.ImageId = &v + return s +} + +type RegisterInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Information about the tag keys to register. + InstanceTagAttribute *RegisterInstanceTagAttributeRequest `type:"structure"` +} + +// String returns the string representation +func (s RegisterInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *RegisterInstanceEventNotificationAttributesInput) SetDryRun(v bool) *RegisterInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *RegisterInstanceEventNotificationAttributesInput) SetInstanceTagAttribute(v *RegisterInstanceTagAttributeRequest) *RegisterInstanceEventNotificationAttributesInput { + s.InstanceTagAttribute = v + return s +} + +type RegisterInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // The resulting set of tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s RegisterInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *RegisterInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *RegisterInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + +// Information about the tag keys to register for the current Region. You can +// either specify individual tag keys or register all tag keys in the current +// Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys +// in the request +type RegisterInstanceTagAttributeRequest struct { + _ struct{} `type:"structure"` + + // Indicates whether to register all tag keys in the current Region. Specify + // true to register all tag keys. + IncludeAllTagsOfInstance *bool `type:"boolean"` + + // The tag keys to register. + InstanceTagKeys []*string `locationName:"InstanceTagKey" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RegisterInstanceTagAttributeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceTagAttributeRequest) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *RegisterInstanceTagAttributeRequest) SetIncludeAllTagsOfInstance(v bool) *RegisterInstanceTagAttributeRequest { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *RegisterInstanceTagAttributeRequest) SetInstanceTagKeys(v []*string) *RegisterInstanceTagAttributeRequest { + s.InstanceTagKeys = v + return s +} + +type RegisterTransitGatewayMulticastGroupMembersInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `type:"string"` + + // The group members' network interface IDs to register with the transit gateway + // multicast group. + NetworkInterfaceIds []*string `locationNameList:"item" type:"list"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `type:"string"` +} + +// String returns the string representation +func (s RegisterTransitGatewayMulticastGroupMembersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTransitGatewayMulticastGroupMembersInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *RegisterTransitGatewayMulticastGroupMembersInput) SetDryRun(v bool) *RegisterTransitGatewayMulticastGroupMembersInput { + s.DryRun = &v + return s +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *RegisterTransitGatewayMulticastGroupMembersInput) SetGroupIpAddress(v string) *RegisterTransitGatewayMulticastGroupMembersInput { + s.GroupIpAddress = &v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *RegisterTransitGatewayMulticastGroupMembersInput) SetNetworkInterfaceIds(v []*string) *RegisterTransitGatewayMulticastGroupMembersInput { + s.NetworkInterfaceIds = v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *RegisterTransitGatewayMulticastGroupMembersInput) SetTransitGatewayMulticastDomainId(v string) *RegisterTransitGatewayMulticastGroupMembersInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type RegisterTransitGatewayMulticastGroupMembersOutput struct { + _ struct{} `type:"structure"` + + // Information about the registered transit gateway multicast group members. + RegisteredMulticastGroupMembers *TransitGatewayMulticastRegisteredGroupMembers `locationName:"registeredMulticastGroupMembers" type:"structure"` +} + +// String returns the string representation +func (s RegisterTransitGatewayMulticastGroupMembersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTransitGatewayMulticastGroupMembersOutput) GoString() string { + return s.String() +} + +// SetRegisteredMulticastGroupMembers sets the RegisteredMulticastGroupMembers field's value. +func (s *RegisterTransitGatewayMulticastGroupMembersOutput) SetRegisteredMulticastGroupMembers(v *TransitGatewayMulticastRegisteredGroupMembers) *RegisterTransitGatewayMulticastGroupMembersOutput { + s.RegisteredMulticastGroupMembers = v + return s +} + +type RegisterTransitGatewayMulticastGroupSourcesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `type:"string"` + + // The group sources' network interface IDs to register with the transit gateway + // multicast group. + NetworkInterfaceIds []*string `locationNameList:"item" type:"list"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `type:"string"` +} + +// String returns the string representation +func (s RegisterTransitGatewayMulticastGroupSourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTransitGatewayMulticastGroupSourcesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *RegisterTransitGatewayMulticastGroupSourcesInput) SetDryRun(v bool) *RegisterTransitGatewayMulticastGroupSourcesInput { + s.DryRun = &v + return s +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *RegisterTransitGatewayMulticastGroupSourcesInput) SetGroupIpAddress(v string) *RegisterTransitGatewayMulticastGroupSourcesInput { + s.GroupIpAddress = &v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *RegisterTransitGatewayMulticastGroupSourcesInput) SetNetworkInterfaceIds(v []*string) *RegisterTransitGatewayMulticastGroupSourcesInput { + s.NetworkInterfaceIds = v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *RegisterTransitGatewayMulticastGroupSourcesInput) SetTransitGatewayMulticastDomainId(v string) *RegisterTransitGatewayMulticastGroupSourcesInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type RegisterTransitGatewayMulticastGroupSourcesOutput struct { + _ struct{} `type:"structure"` + + // Information about the transit gateway multicast group sources. + RegisteredMulticastGroupSources *TransitGatewayMulticastRegisteredGroupSources `locationName:"registeredMulticastGroupSources" type:"structure"` +} + +// String returns the string representation +func (s RegisterTransitGatewayMulticastGroupSourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTransitGatewayMulticastGroupSourcesOutput) GoString() string { + return s.String() +} + +// SetRegisteredMulticastGroupSources sets the RegisteredMulticastGroupSources field's value. +func (s *RegisterTransitGatewayMulticastGroupSourcesOutput) SetRegisteredMulticastGroupSources(v *TransitGatewayMulticastRegisteredGroupSources) *RegisterTransitGatewayMulticastGroupSourcesOutput { + s.RegisteredMulticastGroupSources = v + return s +} + +type RejectTransitGatewayPeeringAttachmentInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the transit gateway peering attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RejectTransitGatewayPeeringAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectTransitGatewayPeeringAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RejectTransitGatewayPeeringAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RejectTransitGatewayPeeringAttachmentInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *RejectTransitGatewayPeeringAttachmentInput) SetDryRun(v bool) *RejectTransitGatewayPeeringAttachmentInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *RejectTransitGatewayPeeringAttachmentInput) SetTransitGatewayAttachmentId(v string) *RejectTransitGatewayPeeringAttachmentInput { + s.TransitGatewayAttachmentId = &v + return s +} + +type RejectTransitGatewayPeeringAttachmentOutput struct { + _ struct{} `type:"structure"` + + // The transit gateway peering attachment. + TransitGatewayPeeringAttachment *TransitGatewayPeeringAttachment `locationName:"transitGatewayPeeringAttachment" type:"structure"` +} + +// String returns the string representation +func (s RejectTransitGatewayPeeringAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectTransitGatewayPeeringAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPeeringAttachment sets the TransitGatewayPeeringAttachment field's value. +func (s *RejectTransitGatewayPeeringAttachmentOutput) SetTransitGatewayPeeringAttachment(v *TransitGatewayPeeringAttachment) *RejectTransitGatewayPeeringAttachmentOutput { + s.TransitGatewayPeeringAttachment = v + return s +} + +type RejectTransitGatewayVpcAttachmentInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + // + // TransitGatewayAttachmentId is a required field + TransitGatewayAttachmentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RejectTransitGatewayVpcAttachmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectTransitGatewayVpcAttachmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RejectTransitGatewayVpcAttachmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RejectTransitGatewayVpcAttachmentInput"} + if s.TransitGatewayAttachmentId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayAttachmentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *RejectTransitGatewayVpcAttachmentInput) SetDryRun(v bool) *RejectTransitGatewayVpcAttachmentInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *RejectTransitGatewayVpcAttachmentInput) SetTransitGatewayAttachmentId(v string) *RejectTransitGatewayVpcAttachmentInput { + s.TransitGatewayAttachmentId = &v + return s +} + +type RejectTransitGatewayVpcAttachmentOutput struct { + _ struct{} `type:"structure"` + + // Information about the attachment. + TransitGatewayVpcAttachment *TransitGatewayVpcAttachment `locationName:"transitGatewayVpcAttachment" type:"structure"` +} + +// String returns the string representation +func (s RejectTransitGatewayVpcAttachmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectTransitGatewayVpcAttachmentOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayVpcAttachment sets the TransitGatewayVpcAttachment field's value. +func (s *RejectTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment(v *TransitGatewayVpcAttachment) *RejectTransitGatewayVpcAttachmentOutput { + s.TransitGatewayVpcAttachment = v + return s +} + +type RejectVpcEndpointConnectionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the service. + // + // ServiceId is a required field + ServiceId *string `type:"string" required:"true"` + + // The IDs of one or more VPC endpoints. + // + // VpcEndpointIds is a required field + VpcEndpointIds []*string `locationName:"VpcEndpointId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s RejectVpcEndpointConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcEndpointConnectionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RejectVpcEndpointConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RejectVpcEndpointConnectionsInput"} + if s.ServiceId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceId")) + } + if s.VpcEndpointIds == nil { + invalidParams.Add(request.NewErrParamRequired("VpcEndpointIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *RejectVpcEndpointConnectionsInput) SetDryRun(v bool) *RejectVpcEndpointConnectionsInput { + s.DryRun = &v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *RejectVpcEndpointConnectionsInput) SetServiceId(v string) *RejectVpcEndpointConnectionsInput { + s.ServiceId = &v + return s +} + +// SetVpcEndpointIds sets the VpcEndpointIds field's value. +func (s *RejectVpcEndpointConnectionsInput) SetVpcEndpointIds(v []*string) *RejectVpcEndpointConnectionsInput { + s.VpcEndpointIds = v + return s +} + +type RejectVpcEndpointConnectionsOutput struct { + _ struct{} `type:"structure"` + + // Information about the endpoints that were not rejected, if applicable. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RejectVpcEndpointConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcEndpointConnectionsOutput) GoString() string { + return s.String() +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *RejectVpcEndpointConnectionsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *RejectVpcEndpointConnectionsOutput { + s.Unsuccessful = v + return s +} + +type RejectVpcPeeringConnectionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the VPC peering connection. + // + // VpcPeeringConnectionId is a required field + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RejectVpcPeeringConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RejectVpcPeeringConnectionInput"} + if s.VpcPeeringConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *RejectVpcPeeringConnectionInput) SetDryRun(v bool) *RejectVpcPeeringConnectionInput { + s.DryRun = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *RejectVpcPeeringConnectionInput) SetVpcPeeringConnectionId(v string) *RejectVpcPeeringConnectionInput { + s.VpcPeeringConnectionId = &v + return s +} + +type RejectVpcPeeringConnectionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s RejectVpcPeeringConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectVpcPeeringConnectionOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *RejectVpcPeeringConnectionOutput) SetReturn(v bool) *RejectVpcPeeringConnectionOutput { + s.Return = &v + return s +} + +type ReleaseAddressInput struct { + _ struct{} `type:"structure"` + + // [EC2-VPC] The allocation ID. Required for EC2-VPC. + AllocationId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The set of Availability Zones, Local Zones, or Wavelength Zones from which + // AWS advertises IP addresses. + // + // If you provide an incorrect network border group, you will receive an InvalidAddress.NotFound + // error. For more information, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). + // + // You cannot use a network border group with EC2 Classic. If you attempt this + // operation on EC2 classic, you will receive an InvalidParameterCombination + // error. For more information, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). + NetworkBorderGroup *string `type:"string"` + + // [EC2-Classic] The Elastic IP address. Required for EC2-Classic. + PublicIp *string `type:"string"` +} + +// String returns the string representation +func (s ReleaseAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressInput) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *ReleaseAddressInput) SetAllocationId(v string) *ReleaseAddressInput { + s.AllocationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ReleaseAddressInput) SetDryRun(v bool) *ReleaseAddressInput { + s.DryRun = &v + return s +} + +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *ReleaseAddressInput) SetNetworkBorderGroup(v string) *ReleaseAddressInput { + s.NetworkBorderGroup = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *ReleaseAddressInput) SetPublicIp(v string) *ReleaseAddressInput { + s.PublicIp = &v + return s +} + +type ReleaseAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReleaseAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseAddressOutput) GoString() string { + return s.String() +} + +type ReleaseHostsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated Hosts to release. + // + // HostIds is a required field + HostIds []*string `locationName:"hostId" locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s ReleaseHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseHostsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReleaseHostsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReleaseHostsInput"} + if s.HostIds == nil { + invalidParams.Add(request.NewErrParamRequired("HostIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostIds sets the HostIds field's value. +func (s *ReleaseHostsInput) SetHostIds(v []*string) *ReleaseHostsInput { + s.HostIds = v + return s +} + +type ReleaseHostsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the Dedicated Hosts that were successfully released. + Successful []*string `locationName:"successful" locationNameList:"item" type:"list"` + + // The IDs of the Dedicated Hosts that could not be released, including an error + // message. + Unsuccessful []*UnsuccessfulItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ReleaseHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseHostsOutput) GoString() string { + return s.String() +} + +// SetSuccessful sets the Successful field's value. +func (s *ReleaseHostsOutput) SetSuccessful(v []*string) *ReleaseHostsOutput { + s.Successful = v + return s +} + +// SetUnsuccessful sets the Unsuccessful field's value. +func (s *ReleaseHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ReleaseHostsOutput { + s.Unsuccessful = v + return s +} + +// An entry for a prefix list. +type RemovePrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePrefixListEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemovePrefixListEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemovePrefixListEntry"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *RemovePrefixListEntry) SetCidr(v string) *RemovePrefixListEntry { + s.Cidr = &v + return s +} + +type ReplaceIamInstanceProfileAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the existing IAM instance profile association. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` + + // The IAM instance profile. + // + // IamInstanceProfile is a required field + IamInstanceProfile *IamInstanceProfileSpecification `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplaceIamInstanceProfileAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceIamInstanceProfileAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceIamInstanceProfileAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceIamInstanceProfileAssociationInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.IamInstanceProfile == nil { + invalidParams.Add(request.NewErrParamRequired("IamInstanceProfile")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *ReplaceIamInstanceProfileAssociationInput) SetAssociationId(v string) *ReplaceIamInstanceProfileAssociationInput { + s.AssociationId = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *ReplaceIamInstanceProfileAssociationInput) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *ReplaceIamInstanceProfileAssociationInput { + s.IamInstanceProfile = v + return s +} + +type ReplaceIamInstanceProfileAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the IAM instance profile association. + IamInstanceProfileAssociation *IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociation" type:"structure"` +} + +// String returns the string representation +func (s ReplaceIamInstanceProfileAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceIamInstanceProfileAssociationOutput) GoString() string { + return s.String() +} + +// SetIamInstanceProfileAssociation sets the IamInstanceProfileAssociation field's value. +func (s *ReplaceIamInstanceProfileAssociationOutput) SetIamInstanceProfileAssociation(v *IamInstanceProfileAssociation) *ReplaceIamInstanceProfileAssociationOutput { + s.IamInstanceProfileAssociation = v + return s +} + +type ReplaceNetworkAclAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the current association between the original network ACL and the + // subnet. + // + // AssociationId is a required field + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new network ACL to associate with the subnet. + // + // NetworkAclId is a required field + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceNetworkAclAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceNetworkAclAssociationInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *ReplaceNetworkAclAssociationInput) SetAssociationId(v string) *ReplaceNetworkAclAssociationInput { + s.AssociationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ReplaceNetworkAclAssociationInput) SetDryRun(v bool) *ReplaceNetworkAclAssociationInput { + s.DryRun = &v + return s +} + +// SetNetworkAclId sets the NetworkAclId field's value. +func (s *ReplaceNetworkAclAssociationInput) SetNetworkAclId(v string) *ReplaceNetworkAclAssociationInput { + s.NetworkAclId = &v + return s +} + +type ReplaceNetworkAclAssociationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` +} + +// String returns the string representation +func (s ReplaceNetworkAclAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclAssociationOutput) GoString() string { + return s.String() +} + +// SetNewAssociationId sets the NewAssociationId field's value. +func (s *ReplaceNetworkAclAssociationOutput) SetNewAssociationId(v string) *ReplaceNetworkAclAssociationOutput { + s.NewAssociationId = &v + return s +} + +type ReplaceNetworkAclEntryInput struct { + _ struct{} `type:"structure"` + + // The IPv4 network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether to replace the egress rule. + // + // Default: If no value is specified, we replace the ingress rule. + // + // Egress is a required field + Egress *bool `locationName:"egress" type:"boolean" required:"true"` + + // ICMP protocol: The ICMP or ICMPv6 type and code. Required if specifying protocol + // 1 (ICMP) or protocol 58 (ICMPv6) with an IPv6 CIDR block. + IcmpTypeCode *IcmpTypeCode `locationName:"Icmp" type:"structure"` + + // The IPv6 network range to allow or deny, in CIDR notation (for example 2001:bd8:1234:1a00::/64). + Ipv6CidrBlock *string `locationName:"ipv6CidrBlock" type:"string"` + + // The ID of the ACL. + // + // NetworkAclId is a required field + NetworkAclId *string `locationName:"networkAclId" type:"string" required:"true"` + + // TCP or UDP protocols: The range of ports the rule applies to. Required if + // specifying protocol 6 (TCP) or 17 (UDP). + PortRange *PortRange `locationName:"portRange" type:"structure"` + + // The protocol number. A value of "-1" means all protocols. If you specify + // "-1" or a protocol number other than "6" (TCP), "17" (UDP), or "1" (ICMP), + // traffic on all ports is allowed, regardless of any ports or ICMP types or + // codes that you specify. If you specify protocol "58" (ICMPv6) and specify + // an IPv4 CIDR block, traffic for all ICMP types and codes allowed, regardless + // of any that you specify. If you specify protocol "58" (ICMPv6) and specify + // an IPv6 CIDR block, you must specify an ICMP type and code. + // + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true"` + + // Indicates whether to allow or deny the traffic that matches the rule. + // + // RuleAction is a required field + RuleAction *string `locationName:"ruleAction" type:"string" required:"true" enum:"RuleAction"` + + // The rule number of the entry to replace. + // + // RuleNumber is a required field + RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceNetworkAclEntryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceNetworkAclEntryInput"} + if s.Egress == nil { + invalidParams.Add(request.NewErrParamRequired("Egress")) + } + if s.NetworkAclId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkAclId")) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.RuleAction == nil { + invalidParams.Add(request.NewErrParamRequired("RuleAction")) + } + if s.RuleNumber == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *ReplaceNetworkAclEntryInput) SetCidrBlock(v string) *ReplaceNetworkAclEntryInput { + s.CidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ReplaceNetworkAclEntryInput) SetDryRun(v bool) *ReplaceNetworkAclEntryInput { + s.DryRun = &v + return s +} + +// SetEgress sets the Egress field's value. +func (s *ReplaceNetworkAclEntryInput) SetEgress(v bool) *ReplaceNetworkAclEntryInput { + s.Egress = &v + return s +} + +// SetIcmpTypeCode sets the IcmpTypeCode field's value. +func (s *ReplaceNetworkAclEntryInput) SetIcmpTypeCode(v *IcmpTypeCode) *ReplaceNetworkAclEntryInput { + s.IcmpTypeCode = v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *ReplaceNetworkAclEntryInput) SetIpv6CidrBlock(v string) *ReplaceNetworkAclEntryInput { + s.Ipv6CidrBlock = &v + return s +} + +// SetNetworkAclId sets the NetworkAclId field's value. +func (s *ReplaceNetworkAclEntryInput) SetNetworkAclId(v string) *ReplaceNetworkAclEntryInput { + s.NetworkAclId = &v + return s +} + +// SetPortRange sets the PortRange field's value. +func (s *ReplaceNetworkAclEntryInput) SetPortRange(v *PortRange) *ReplaceNetworkAclEntryInput { + s.PortRange = v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *ReplaceNetworkAclEntryInput) SetProtocol(v string) *ReplaceNetworkAclEntryInput { + s.Protocol = &v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *ReplaceNetworkAclEntryInput) SetRuleAction(v string) *ReplaceNetworkAclEntryInput { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *ReplaceNetworkAclEntryInput) SetRuleNumber(v int64) *ReplaceNetworkAclEntryInput { + s.RuleNumber = &v + return s +} + +type ReplaceNetworkAclEntryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceNetworkAclEntryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceNetworkAclEntryOutput) GoString() string { + return s.String() +} + +type ReplaceRouteInput struct { + _ struct{} `type:"structure"` + + // [IPv4 traffic only] The ID of a carrier gateway. + CarrierGatewayId *string `type:"string"` + + // The IPv4 CIDR address block used for the destination match. The value that + // you provide must match the CIDR of an existing route in the table. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The IPv6 CIDR address block used for the destination match. The value that + // you provide must match the CIDR of an existing route in the table. + DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + + // The ID of the prefix list for the route. + DestinationPrefixListId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // [IPv6 traffic only] The ID of an egress-only internet gateway. + EgressOnlyInternetGatewayId *string `locationName:"egressOnlyInternetGatewayId" type:"string"` + + // The ID of an internet gateway or virtual private gateway. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The ID of the local gateway. + LocalGatewayId *string `type:"string"` + + // Specifies whether to reset the local route to its default target (local). + LocalTarget *bool `type:"boolean"` + + // [IPv4 traffic only] The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of a network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the route table. + // + // RouteTableId is a required field + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` + + // The ID of a transit gateway. + TransitGatewayId *string `type:"string"` + + // The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only. + VpcEndpointId *string `type:"string"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s ReplaceRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceRouteInput"} + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *ReplaceRouteInput) SetCarrierGatewayId(v string) *ReplaceRouteInput { + s.CarrierGatewayId = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *ReplaceRouteInput) SetDestinationCidrBlock(v string) *ReplaceRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationIpv6CidrBlock sets the DestinationIpv6CidrBlock field's value. +func (s *ReplaceRouteInput) SetDestinationIpv6CidrBlock(v string) *ReplaceRouteInput { + s.DestinationIpv6CidrBlock = &v + return s +} + +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *ReplaceRouteInput) SetDestinationPrefixListId(v string) *ReplaceRouteInput { + s.DestinationPrefixListId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ReplaceRouteInput) SetDryRun(v bool) *ReplaceRouteInput { + s.DryRun = &v + return s +} + +// SetEgressOnlyInternetGatewayId sets the EgressOnlyInternetGatewayId field's value. +func (s *ReplaceRouteInput) SetEgressOnlyInternetGatewayId(v string) *ReplaceRouteInput { + s.EgressOnlyInternetGatewayId = &v + return s +} + +// SetGatewayId sets the GatewayId field's value. +func (s *ReplaceRouteInput) SetGatewayId(v string) *ReplaceRouteInput { + s.GatewayId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ReplaceRouteInput) SetInstanceId(v string) *ReplaceRouteInput { + s.InstanceId = &v + return s +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *ReplaceRouteInput) SetLocalGatewayId(v string) *ReplaceRouteInput { + s.LocalGatewayId = &v + return s +} + +// SetLocalTarget sets the LocalTarget field's value. +func (s *ReplaceRouteInput) SetLocalTarget(v bool) *ReplaceRouteInput { + s.LocalTarget = &v + return s +} + +// SetNatGatewayId sets the NatGatewayId field's value. +func (s *ReplaceRouteInput) SetNatGatewayId(v string) *ReplaceRouteInput { + s.NatGatewayId = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *ReplaceRouteInput) SetNetworkInterfaceId(v string) *ReplaceRouteInput { + s.NetworkInterfaceId = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *ReplaceRouteInput) SetRouteTableId(v string) *ReplaceRouteInput { + s.RouteTableId = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *ReplaceRouteInput) SetTransitGatewayId(v string) *ReplaceRouteInput { + s.TransitGatewayId = &v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *ReplaceRouteInput) SetVpcEndpointId(v string) *ReplaceRouteInput { + s.VpcEndpointId = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *ReplaceRouteInput) SetVpcPeeringConnectionId(v string) *ReplaceRouteInput { + s.VpcPeeringConnectionId = &v + return s +} + +type ReplaceRouteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReplaceRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteOutput) GoString() string { + return s.String() +} + +type ReplaceRouteTableAssociationInput struct { + _ struct{} `type:"structure"` + + // The association ID. + // + // AssociationId is a required field + AssociationId *string `locationName:"associationId" type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the new route table to associate with the subnet. + // + // RouteTableId is a required field + RouteTableId *string `locationName:"routeTableId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceRouteTableAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceRouteTableAssociationInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.RouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("RouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *ReplaceRouteTableAssociationInput) SetAssociationId(v string) *ReplaceRouteTableAssociationInput { + s.AssociationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ReplaceRouteTableAssociationInput) SetDryRun(v bool) *ReplaceRouteTableAssociationInput { + s.DryRun = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *ReplaceRouteTableAssociationInput) SetRouteTableId(v string) *ReplaceRouteTableAssociationInput { + s.RouteTableId = &v + return s +} + +type ReplaceRouteTableAssociationOutput struct { + _ struct{} `type:"structure"` + + // The state of the association. + AssociationState *RouteTableAssociationState `locationName:"associationState" type:"structure"` + + // The ID of the new association. + NewAssociationId *string `locationName:"newAssociationId" type:"string"` +} + +// String returns the string representation +func (s ReplaceRouteTableAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceRouteTableAssociationOutput) GoString() string { + return s.String() +} + +// SetAssociationState sets the AssociationState field's value. +func (s *ReplaceRouteTableAssociationOutput) SetAssociationState(v *RouteTableAssociationState) *ReplaceRouteTableAssociationOutput { + s.AssociationState = v + return s +} + +// SetNewAssociationId sets the NewAssociationId field's value. +func (s *ReplaceRouteTableAssociationOutput) SetNewAssociationId(v string) *ReplaceRouteTableAssociationOutput { + s.NewAssociationId = &v + return s +} + +type ReplaceTransitGatewayRouteInput struct { + _ struct{} `type:"structure"` + + // Indicates whether traffic matching this route is to be dropped. + Blackhole *bool `type:"boolean"` + + // The CIDR range used for the destination match. Routing decisions are based + // on the most specific match. + // + // DestinationCidrBlock is a required field + DestinationCidrBlock *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceTransitGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceTransitGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceTransitGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceTransitGatewayRouteInput"} + if s.DestinationCidrBlock == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationCidrBlock")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlackhole sets the Blackhole field's value. +func (s *ReplaceTransitGatewayRouteInput) SetBlackhole(v bool) *ReplaceTransitGatewayRouteInput { + s.Blackhole = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *ReplaceTransitGatewayRouteInput) SetDestinationCidrBlock(v string) *ReplaceTransitGatewayRouteInput { + s.DestinationCidrBlock = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ReplaceTransitGatewayRouteInput) SetDryRun(v bool) *ReplaceTransitGatewayRouteInput { + s.DryRun = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *ReplaceTransitGatewayRouteInput) SetTransitGatewayAttachmentId(v string) *ReplaceTransitGatewayRouteInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *ReplaceTransitGatewayRouteInput) SetTransitGatewayRouteTableId(v string) *ReplaceTransitGatewayRouteInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type ReplaceTransitGatewayRouteOutput struct { + _ struct{} `type:"structure"` + + // Information about the modified route. + Route *TransitGatewayRoute `locationName:"route" type:"structure"` +} + +// String returns the string representation +func (s ReplaceTransitGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceTransitGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetRoute sets the Route field's value. +func (s *ReplaceTransitGatewayRouteOutput) SetRoute(v *TransitGatewayRoute) *ReplaceTransitGatewayRouteOutput { + s.Route = v + return s +} + +type ReportInstanceStatusInput struct { + _ struct{} `type:"structure"` + + // Descriptive text about the health state of your instance. + Description *string `locationName:"description" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The time at which the reported instance health state ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The instances. + // + // Instances is a required field + Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"` + + // The reason codes that describe the health state of your instance. + // + // * instance-stuck-in-state: My instance is stuck in a state. + // + // * unresponsive: My instance is unresponsive. + // + // * not-accepting-credentials: My instance is not accepting my credentials. + // + // * password-not-available: A password is not available for my instance. + // + // * performance-network: My instance is experiencing performance problems + // that I believe are network related. + // + // * performance-instance-store: My instance is experiencing performance + // problems that I believe are related to the instance stores. + // + // * performance-ebs-volume: My instance is experiencing performance problems + // that I believe are related to an EBS volume. + // + // * performance-other: My instance is experiencing performance problems. + // + // * other: [explain using the description parameter] + // + // ReasonCodes is a required field + ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"` + + // The time at which the reported instance health state began. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // The status of all instances listed. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"ReportStatusType"` +} + +// String returns the string representation +func (s ReportInstanceStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReportInstanceStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReportInstanceStatusInput"} + if s.Instances == nil { + invalidParams.Add(request.NewErrParamRequired("Instances")) + } + if s.ReasonCodes == nil { + invalidParams.Add(request.NewErrParamRequired("ReasonCodes")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ReportInstanceStatusInput) SetDescription(v string) *ReportInstanceStatusInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ReportInstanceStatusInput) SetDryRun(v bool) *ReportInstanceStatusInput { + s.DryRun = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ReportInstanceStatusInput) SetEndTime(v time.Time) *ReportInstanceStatusInput { + s.EndTime = &v + return s +} + +// SetInstances sets the Instances field's value. +func (s *ReportInstanceStatusInput) SetInstances(v []*string) *ReportInstanceStatusInput { + s.Instances = v + return s +} + +// SetReasonCodes sets the ReasonCodes field's value. +func (s *ReportInstanceStatusInput) SetReasonCodes(v []*string) *ReportInstanceStatusInput { + s.ReasonCodes = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ReportInstanceStatusInput) SetStartTime(v time.Time) *ReportInstanceStatusInput { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReportInstanceStatusInput) SetStatus(v string) *ReportInstanceStatusInput { + s.Status = &v + return s +} + +type ReportInstanceStatusOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReportInstanceStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportInstanceStatusOutput) GoString() string { + return s.String() +} + +// The information to include in the launch template. +type RequestLaunchTemplateData struct { + _ struct{} `type:"structure"` + + // The block device mapping. + BlockDeviceMappings []*LaunchTemplateBlockDeviceMappingRequest `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // The Capacity Reservation targeting option. If you do not specify this parameter, + // the instance's Capacity Reservation preference defaults to open, which enables + // it to run in any open Capacity Reservation that has matching attributes (instance + // type, platform, Availability Zone). + CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationRequest `type:"structure"` + + // The CPU options for the instance. For more information, see Optimizing CPU + // Options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // in the Amazon Elastic Compute Cloud User Guide. + CpuOptions *LaunchTemplateCpuOptionsRequest `type:"structure"` + + // The credit option for CPU usage of the instance. Valid for T2, T3, or T3a + // instances only. + CreditSpecification *CreditSpecificationRequest `type:"structure"` + + // If you set this parameter to true, you can't terminate the instance using + // the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute + // after launch, use ModifyInstanceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html). + // Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, + // you can terminate the instance by running the shutdown command from the instance. + DisableApiTermination *bool `type:"boolean"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal Amazon EBS I/O performance. This optimization isn't + // available with all instance types. Additional usage charges apply when using + // an EBS-optimized instance. + EbsOptimized *bool `type:"boolean"` + + // An elastic GPU to associate with the instance. + ElasticGpuSpecifications []*ElasticGpuSpecification `locationName:"ElasticGpuSpecification" locationNameList:"ElasticGpuSpecification" type:"list"` + + // The elastic inference accelerator for the instance. + ElasticInferenceAccelerators []*LaunchTemplateElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` + + // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more + // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) + // in the AWS Nitro Enclaves User Guide. + // + // You can't enable AWS Nitro Enclaves and hibernation on the same instance. + EnclaveOptions *LaunchTemplateEnclaveOptionsRequest `type:"structure"` + + // Indicates whether an instance is enabled for hibernation. This parameter + // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). + // For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // in the Amazon Elastic Compute Cloud User Guide. + HibernationOptions *LaunchTemplateHibernationOptionsRequest `type:"structure"` + + // The IAM instance profile. + IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecificationRequest `type:"structure"` + + // The ID of the AMI. + ImageId *string `type:"string"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + // + // Default: stop + InstanceInitiatedShutdownBehavior *string `type:"string" enum:"ShutdownBehavior"` + + // The market (purchasing) option for the instances. + InstanceMarketOptions *LaunchTemplateInstanceMarketOptionsRequest `type:"structure"` + + // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + InstanceType *string `type:"string" enum:"InstanceType"` + + // The ID of the kernel. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more + // information, see User Provided Kernels (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + KernelId *string `type:"string"` + + // The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) + // or ImportKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html). + // + // If you do not specify a key pair, you can't connect to the instance unless + // you choose an AMI that is configured to allow users another way to log in. + KeyName *string `type:"string"` + + // The license configurations. + LicenseSpecifications []*LaunchTemplateLicenseConfigurationRequest `locationName:"LicenseSpecification" locationNameList:"item" type:"list"` + + // The metadata options for the instance. For more information, see Instance + // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // in the Amazon Elastic Compute Cloud User Guide. + MetadataOptions *LaunchTemplateInstanceMetadataOptionsRequest `type:"structure"` + + // The monitoring for the instance. + Monitoring *LaunchTemplatesMonitoringRequest `type:"structure"` + + // One or more network interfaces. If you specify a network interface, you must + // specify any security groups and subnets as part of the network interface. + NetworkInterfaces []*LaunchTemplateInstanceNetworkInterfaceSpecificationRequest `locationName:"NetworkInterface" locationNameList:"InstanceNetworkInterfaceSpecification" type:"list"` + + // The placement for the instance. + Placement *LaunchTemplatePlacementRequest `type:"structure"` + + // The ID of the RAM disk. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more + // information, see User Provided Kernels (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + RamDiskId *string `type:"string"` + + // One or more security group IDs. You can create a security group using CreateSecurityGroup + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). + // You cannot specify both a security group ID and security name in the same + // request. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // [EC2-Classic, default VPC] One or more security group names. For a nondefault + // VPC, you must use security group IDs instead. You cannot specify both a security + // group ID and security name in the same request. + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` + + // The tags to apply to the resources during launch. You can only tag instances + // and volumes on launch. The specified tags are applied to all instances or + // volumes that are created during launch. To tag a resource after it has been + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). + TagSpecifications []*LaunchTemplateTagSpecificationRequest `locationName:"TagSpecification" locationNameList:"LaunchTemplateTagSpecificationRequest" type:"list"` + + // The Base64-encoded user data to make available to the instance. For more + // information, see Running Commands on Your Linux Instance at Launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // (Linux) and Adding User Data (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) + // (Windows). + UserData *string `type:"string"` +} + +// String returns the string representation +func (s RequestLaunchTemplateData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestLaunchTemplateData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestLaunchTemplateData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestLaunchTemplateData"} + if s.CreditSpecification != nil { + if err := s.CreditSpecification.Validate(); err != nil { + invalidParams.AddNested("CreditSpecification", err.(request.ErrInvalidParams)) + } + } + if s.ElasticGpuSpecifications != nil { + for i, v := range s.ElasticGpuSpecifications { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ElasticGpuSpecifications", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ElasticInferenceAccelerators != nil { + for i, v := range s.ElasticInferenceAccelerators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ElasticInferenceAccelerators", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *RequestLaunchTemplateData) SetBlockDeviceMappings(v []*LaunchTemplateBlockDeviceMappingRequest) *RequestLaunchTemplateData { + s.BlockDeviceMappings = v + return s +} + +// SetCapacityReservationSpecification sets the CapacityReservationSpecification field's value. +func (s *RequestLaunchTemplateData) SetCapacityReservationSpecification(v *LaunchTemplateCapacityReservationSpecificationRequest) *RequestLaunchTemplateData { + s.CapacityReservationSpecification = v + return s +} + +// SetCpuOptions sets the CpuOptions field's value. +func (s *RequestLaunchTemplateData) SetCpuOptions(v *LaunchTemplateCpuOptionsRequest) *RequestLaunchTemplateData { + s.CpuOptions = v + return s +} + +// SetCreditSpecification sets the CreditSpecification field's value. +func (s *RequestLaunchTemplateData) SetCreditSpecification(v *CreditSpecificationRequest) *RequestLaunchTemplateData { + s.CreditSpecification = v + return s +} + +// SetDisableApiTermination sets the DisableApiTermination field's value. +func (s *RequestLaunchTemplateData) SetDisableApiTermination(v bool) *RequestLaunchTemplateData { + s.DisableApiTermination = &v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *RequestLaunchTemplateData) SetEbsOptimized(v bool) *RequestLaunchTemplateData { + s.EbsOptimized = &v + return s +} + +// SetElasticGpuSpecifications sets the ElasticGpuSpecifications field's value. +func (s *RequestLaunchTemplateData) SetElasticGpuSpecifications(v []*ElasticGpuSpecification) *RequestLaunchTemplateData { + s.ElasticGpuSpecifications = v + return s +} + +// SetElasticInferenceAccelerators sets the ElasticInferenceAccelerators field's value. +func (s *RequestLaunchTemplateData) SetElasticInferenceAccelerators(v []*LaunchTemplateElasticInferenceAccelerator) *RequestLaunchTemplateData { + s.ElasticInferenceAccelerators = v + return s +} + +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *RequestLaunchTemplateData) SetEnclaveOptions(v *LaunchTemplateEnclaveOptionsRequest) *RequestLaunchTemplateData { + s.EnclaveOptions = v + return s +} + +// SetHibernationOptions sets the HibernationOptions field's value. +func (s *RequestLaunchTemplateData) SetHibernationOptions(v *LaunchTemplateHibernationOptionsRequest) *RequestLaunchTemplateData { + s.HibernationOptions = v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *RequestLaunchTemplateData) SetIamInstanceProfile(v *LaunchTemplateIamInstanceProfileSpecificationRequest) *RequestLaunchTemplateData { + s.IamInstanceProfile = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *RequestLaunchTemplateData) SetImageId(v string) *RequestLaunchTemplateData { + s.ImageId = &v + return s +} + +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value. +func (s *RequestLaunchTemplateData) SetInstanceInitiatedShutdownBehavior(v string) *RequestLaunchTemplateData { + s.InstanceInitiatedShutdownBehavior = &v + return s +} + +// SetInstanceMarketOptions sets the InstanceMarketOptions field's value. +func (s *RequestLaunchTemplateData) SetInstanceMarketOptions(v *LaunchTemplateInstanceMarketOptionsRequest) *RequestLaunchTemplateData { + s.InstanceMarketOptions = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *RequestLaunchTemplateData) SetInstanceType(v string) *RequestLaunchTemplateData { + s.InstanceType = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *RequestLaunchTemplateData) SetKernelId(v string) *RequestLaunchTemplateData { + s.KernelId = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *RequestLaunchTemplateData) SetKeyName(v string) *RequestLaunchTemplateData { + s.KeyName = &v + return s +} + +// SetLicenseSpecifications sets the LicenseSpecifications field's value. +func (s *RequestLaunchTemplateData) SetLicenseSpecifications(v []*LaunchTemplateLicenseConfigurationRequest) *RequestLaunchTemplateData { + s.LicenseSpecifications = v + return s +} + +// SetMetadataOptions sets the MetadataOptions field's value. +func (s *RequestLaunchTemplateData) SetMetadataOptions(v *LaunchTemplateInstanceMetadataOptionsRequest) *RequestLaunchTemplateData { + s.MetadataOptions = v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *RequestLaunchTemplateData) SetMonitoring(v *LaunchTemplatesMonitoringRequest) *RequestLaunchTemplateData { + s.Monitoring = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *RequestLaunchTemplateData) SetNetworkInterfaces(v []*LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) *RequestLaunchTemplateData { + s.NetworkInterfaces = v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *RequestLaunchTemplateData) SetPlacement(v *LaunchTemplatePlacementRequest) *RequestLaunchTemplateData { + s.Placement = v + return s +} + +// SetRamDiskId sets the RamDiskId field's value. +func (s *RequestLaunchTemplateData) SetRamDiskId(v string) *RequestLaunchTemplateData { + s.RamDiskId = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *RequestLaunchTemplateData) SetSecurityGroupIds(v []*string) *RequestLaunchTemplateData { + s.SecurityGroupIds = v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *RequestLaunchTemplateData) SetSecurityGroups(v []*string) *RequestLaunchTemplateData { + s.SecurityGroups = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *RequestLaunchTemplateData) SetTagSpecifications(v []*LaunchTemplateTagSpecificationRequest) *RequestLaunchTemplateData { + s.TagSpecifications = v + return s +} + +// SetUserData sets the UserData field's value. +func (s *RequestLaunchTemplateData) SetUserData(v string) *RequestLaunchTemplateData { + s.UserData = &v + return s +} + +// Contains the parameters for RequestSpotFleet. +type RequestSpotFleetInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The configuration for the Spot Fleet request. + // + // SpotFleetRequestConfig is a required field + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure" required:"true"` +} + +// String returns the string representation +func (s RequestSpotFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestSpotFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestSpotFleetInput"} + if s.SpotFleetRequestConfig == nil { + invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestConfig")) + } + if s.SpotFleetRequestConfig != nil { + if err := s.SpotFleetRequestConfig.Validate(); err != nil { + invalidParams.AddNested("SpotFleetRequestConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *RequestSpotFleetInput) SetDryRun(v bool) *RequestSpotFleetInput { + s.DryRun = &v + return s +} + +// SetSpotFleetRequestConfig sets the SpotFleetRequestConfig field's value. +func (s *RequestSpotFleetInput) SetSpotFleetRequestConfig(v *SpotFleetRequestConfigData) *RequestSpotFleetInput { + s.SpotFleetRequestConfig = v + return s +} + +// Contains the output of RequestSpotFleet. +type RequestSpotFleetOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Spot Fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string"` +} + +// String returns the string representation +func (s RequestSpotFleetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotFleetOutput) GoString() string { + return s.String() +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *RequestSpotFleetOutput) SetSpotFleetRequestId(v string) *RequestSpotFleetOutput { + s.SpotFleetRequestId = &v + return s +} + +// Contains the parameters for RequestSpotInstances. +type RequestSpotInstancesInput struct { + _ struct{} `type:"structure"` + + // The user-specified name for a logical grouping of requests. + // + // When you specify an Availability Zone group in a Spot Instance request, all + // Spot Instances in the request are launched in the same Availability Zone. + // Instance proximity is maintained with this parameter, but the choice of Availability + // Zone is not. The group applies only to requests for Spot Instances of the + // same instance type. Any additional Spot Instance requests that are specified + // with the same Availability Zone group name are launched in that same Availability + // Zone, as long as at least one instance from the group is still active. + // + // If there is no active instance running in the Availability Zone group that + // you specify for a new Spot Instance request (all instances are terminated, + // the request is expired, or the maximum price you specified falls below current + // Spot price), then Amazon EC2 launches the instance in any Availability Zone + // where the constraint can be met. Consequently, the subsequent set of Spot + // Instances could be placed in a different zone from the original request, + // even if you specified the same Availability Zone group. + // + // Default: Instances are launched in any available Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The required duration for the Spot Instances (also known as Spot blocks), + // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, + // or 360). + // + // The duration period starts as soon as your Spot Instance receives its instance + // ID. At the end of the duration period, Amazon EC2 marks the Spot Instance + // for termination and provides a Spot Instance termination notice, which gives + // the instance a two-minute warning before it terminates. + // + // You can't specify an Availability Zone group or a launch group if you specify + // a duration. + // + // New accounts or accounts with no previous billing history with AWS are not + // eligible for Spot Instances with a defined duration (also known as Spot blocks). + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon EC2 User Guide for Linux Instances. + ClientToken *string `locationName:"clientToken" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The maximum number of Spot Instances to launch. + // + // Default: 1 + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The behavior when a Spot Instance is interrupted. The default is terminate. + InstanceInterruptionBehavior *string `type:"string" enum:"InstanceInterruptionBehavior"` + + // The instance launch group. Launch groups are Spot Instances that launch together + // and terminate together. + // + // Default: Instances are launched and terminated individually + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // The launch specification. + LaunchSpecification *RequestSpotLaunchSpecification `type:"structure"` + + // The maximum price per hour that you are willing to pay for a Spot Instance. + // The default is the On-Demand price. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The key-value pair for tagging the Spot Instance request on creation. The + // value for ResourceType must be spot-instances-request, otherwise the Spot + // Instance request fails. To tag the Spot Instance request after it has been + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The Spot Instance request type. + // + // Default: one-time + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request. If this is a one-time request, the request + // becomes active at this date and time and remains active until all instances + // launch, the request expires, or the request is canceled. If the request is + // persistent, the request becomes active at this date and time and remains + // active until it expires or is canceled. + // + // The specified start date and time cannot be equal to the current date and + // time. You must specify a start date and time that occurs after the current + // date and time. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` + + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // + // * For a persistent request, the request remains active until the ValidUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, the request remains active until all instances + // launch, the request is canceled, or the ValidUntil date and time is reached. + // By default, the request is valid for 7 days from the date the request + // was created. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` +} + +// String returns the string representation +func (s RequestSpotInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestSpotInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestSpotInstancesInput"} + if s.LaunchSpecification != nil { + if err := s.LaunchSpecification.Validate(); err != nil { + invalidParams.AddNested("LaunchSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZoneGroup sets the AvailabilityZoneGroup field's value. +func (s *RequestSpotInstancesInput) SetAvailabilityZoneGroup(v string) *RequestSpotInstancesInput { + s.AvailabilityZoneGroup = &v + return s +} + +// SetBlockDurationMinutes sets the BlockDurationMinutes field's value. +func (s *RequestSpotInstancesInput) SetBlockDurationMinutes(v int64) *RequestSpotInstancesInput { + s.BlockDurationMinutes = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *RequestSpotInstancesInput) SetClientToken(v string) *RequestSpotInstancesInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RequestSpotInstancesInput) SetDryRun(v bool) *RequestSpotInstancesInput { + s.DryRun = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *RequestSpotInstancesInput) SetInstanceCount(v int64) *RequestSpotInstancesInput { + s.InstanceCount = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *RequestSpotInstancesInput) SetInstanceInterruptionBehavior(v string) *RequestSpotInstancesInput { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetLaunchGroup sets the LaunchGroup field's value. +func (s *RequestSpotInstancesInput) SetLaunchGroup(v string) *RequestSpotInstancesInput { + s.LaunchGroup = &v + return s +} + +// SetLaunchSpecification sets the LaunchSpecification field's value. +func (s *RequestSpotInstancesInput) SetLaunchSpecification(v *RequestSpotLaunchSpecification) *RequestSpotInstancesInput { + s.LaunchSpecification = v + return s +} + +// SetSpotPrice sets the SpotPrice field's value. +func (s *RequestSpotInstancesInput) SetSpotPrice(v string) *RequestSpotInstancesInput { + s.SpotPrice = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *RequestSpotInstancesInput) SetTagSpecifications(v []*TagSpecification) *RequestSpotInstancesInput { + s.TagSpecifications = v + return s +} + +// SetType sets the Type field's value. +func (s *RequestSpotInstancesInput) SetType(v string) *RequestSpotInstancesInput { + s.Type = &v + return s +} + +// SetValidFrom sets the ValidFrom field's value. +func (s *RequestSpotInstancesInput) SetValidFrom(v time.Time) *RequestSpotInstancesInput { + s.ValidFrom = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *RequestSpotInstancesInput) SetValidUntil(v time.Time) *RequestSpotInstancesInput { + s.ValidUntil = &v + return s +} + +// Contains the output of RequestSpotInstances. +type RequestSpotInstancesOutput struct { + _ struct{} `type:"structure"` + + // One or more Spot Instance requests. + SpotInstanceRequests []*SpotInstanceRequest `locationName:"spotInstanceRequestSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RequestSpotInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotInstancesOutput) GoString() string { + return s.String() +} + +// SetSpotInstanceRequests sets the SpotInstanceRequests field's value. +func (s *RequestSpotInstancesOutput) SetSpotInstanceRequests(v []*SpotInstanceRequest) *RequestSpotInstancesOutput { + s.SpotInstanceRequests = v + return s +} + +// Describes the launch specification for an instance. +type RequestSpotLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block device mapping entries. You can't specify both a snapshot + // ID and an encryption value. This is because only blank volumes can be encrypted + // on creation. If a snapshot is the basis for a volume, it is not blank and + // its encryption status is used for the volume encryption status. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instance is optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Indicates whether basic or detailed monitoring is enabled for the instance. + // + // Default: Disabled + Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. If you specify a network interface, you must + // specify subnet IDs and security group IDs using the network interface. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"NetworkInterface" locationNameList:"item" type:"list"` + + // The placement information for the instance. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security group IDs. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` + + // The IDs of the subnets in which to launch the instance. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". + SubnetId *string `locationName:"subnetId" type:"string"` + + // The Base64-encoded user data for the instance. User data is limited to 16 + // KB. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s RequestSpotLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestSpotLaunchSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestSpotLaunchSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestSpotLaunchSpecification"} + if s.Monitoring != nil { + if err := s.Monitoring.Validate(); err != nil { + invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressingType sets the AddressingType field's value. +func (s *RequestSpotLaunchSpecification) SetAddressingType(v string) *RequestSpotLaunchSpecification { + s.AddressingType = &v + return s +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *RequestSpotLaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *RequestSpotLaunchSpecification { + s.BlockDeviceMappings = v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *RequestSpotLaunchSpecification) SetEbsOptimized(v bool) *RequestSpotLaunchSpecification { + s.EbsOptimized = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *RequestSpotLaunchSpecification) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *RequestSpotLaunchSpecification { + s.IamInstanceProfile = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *RequestSpotLaunchSpecification) SetImageId(v string) *RequestSpotLaunchSpecification { + s.ImageId = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *RequestSpotLaunchSpecification) SetInstanceType(v string) *RequestSpotLaunchSpecification { + s.InstanceType = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *RequestSpotLaunchSpecification) SetKernelId(v string) *RequestSpotLaunchSpecification { + s.KernelId = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *RequestSpotLaunchSpecification) SetKeyName(v string) *RequestSpotLaunchSpecification { + s.KeyName = &v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *RequestSpotLaunchSpecification) SetMonitoring(v *RunInstancesMonitoringEnabled) *RequestSpotLaunchSpecification { + s.Monitoring = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *RequestSpotLaunchSpecification) SetNetworkInterfaces(v []*InstanceNetworkInterfaceSpecification) *RequestSpotLaunchSpecification { + s.NetworkInterfaces = v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *RequestSpotLaunchSpecification) SetPlacement(v *SpotPlacement) *RequestSpotLaunchSpecification { + s.Placement = v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *RequestSpotLaunchSpecification) SetRamdiskId(v string) *RequestSpotLaunchSpecification { + s.RamdiskId = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *RequestSpotLaunchSpecification) SetSecurityGroupIds(v []*string) *RequestSpotLaunchSpecification { + s.SecurityGroupIds = v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *RequestSpotLaunchSpecification) SetSecurityGroups(v []*string) *RequestSpotLaunchSpecification { + s.SecurityGroups = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *RequestSpotLaunchSpecification) SetSubnetId(v string) *RequestSpotLaunchSpecification { + s.SubnetId = &v + return s +} + +// SetUserData sets the UserData field's value. +func (s *RequestSpotLaunchSpecification) SetUserData(v string) *RequestSpotLaunchSpecification { + s.UserData = &v + return s +} + +// Describes a launch request for one or more instances, and includes owner, +// requester, and security group information that applies to all instances in +// the launch request. +type Reservation struct { + _ struct{} `type:"structure"` + + // [EC2-Classic only] The security groups. + Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The instances. + Instances []*Instance `locationName:"instancesSet" locationNameList:"item" type:"list"` + + // The ID of the AWS account that owns the reservation. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the requester that launched the instances on your behalf (for example, + // AWS Management Console or Auto Scaling). + RequesterId *string `locationName:"requesterId" type:"string"` + + // The ID of the reservation. + ReservationId *string `locationName:"reservationId" type:"string"` +} + +// String returns the string representation +func (s Reservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Reservation) GoString() string { + return s.String() +} + +// SetGroups sets the Groups field's value. +func (s *Reservation) SetGroups(v []*GroupIdentifier) *Reservation { + s.Groups = v + return s +} + +// SetInstances sets the Instances field's value. +func (s *Reservation) SetInstances(v []*Instance) *Reservation { + s.Instances = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *Reservation) SetOwnerId(v string) *Reservation { + s.OwnerId = &v + return s +} + +// SetRequesterId sets the RequesterId field's value. +func (s *Reservation) SetRequesterId(v string) *Reservation { + s.RequesterId = &v + return s +} + +// SetReservationId sets the ReservationId field's value. +func (s *Reservation) SetReservationId(v string) *Reservation { + s.ReservationId = &v + return s +} + +// The cost associated with the Reserved Instance. +type ReservationValue struct { + _ struct{} `type:"structure"` + + // The hourly rate of the reservation. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The balance of the total value (the sum of remainingUpfrontValue + hourlyPrice + // * number of hours remaining). + RemainingTotalValue *string `locationName:"remainingTotalValue" type:"string"` + + // The remaining upfront cost of the reservation. + RemainingUpfrontValue *string `locationName:"remainingUpfrontValue" type:"string"` +} + +// String returns the string representation +func (s ReservationValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservationValue) GoString() string { + return s.String() +} + +// SetHourlyPrice sets the HourlyPrice field's value. +func (s *ReservationValue) SetHourlyPrice(v string) *ReservationValue { + s.HourlyPrice = &v + return s +} + +// SetRemainingTotalValue sets the RemainingTotalValue field's value. +func (s *ReservationValue) SetRemainingTotalValue(v string) *ReservationValue { + s.RemainingTotalValue = &v + return s +} + +// SetRemainingUpfrontValue sets the RemainingUpfrontValue field's value. +func (s *ReservationValue) SetRemainingUpfrontValue(v string) *ReservationValue { + s.RemainingUpfrontValue = &v + return s +} + +// Describes the limit price of a Reserved Instance offering. +type ReservedInstanceLimitPrice struct { + _ struct{} `type:"structure"` + + // Used for Reserved Instance Marketplace offerings. Specifies the limit price + // on the total order (instanceCount * price). + Amount *float64 `locationName:"amount" type:"double"` + + // The currency in which the limitPrice amount is specified. At this time, the + // only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` +} + +// String returns the string representation +func (s ReservedInstanceLimitPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstanceLimitPrice) GoString() string { + return s.String() +} + +// SetAmount sets the Amount field's value. +func (s *ReservedInstanceLimitPrice) SetAmount(v float64) *ReservedInstanceLimitPrice { + s.Amount = &v + return s +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *ReservedInstanceLimitPrice) SetCurrencyCode(v string) *ReservedInstanceLimitPrice { + s.CurrencyCode = &v + return s +} + +// The total value of the Convertible Reserved Instance. +type ReservedInstanceReservationValue struct { + _ struct{} `type:"structure"` + + // The total value of the Convertible Reserved Instance that you are exchanging. + ReservationValue *ReservationValue `locationName:"reservationValue" type:"structure"` + + // The ID of the Convertible Reserved Instance that you are exchanging. + ReservedInstanceId *string `locationName:"reservedInstanceId" type:"string"` +} + +// String returns the string representation +func (s ReservedInstanceReservationValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstanceReservationValue) GoString() string { + return s.String() +} + +// SetReservationValue sets the ReservationValue field's value. +func (s *ReservedInstanceReservationValue) SetReservationValue(v *ReservationValue) *ReservedInstanceReservationValue { + s.ReservationValue = v + return s +} + +// SetReservedInstanceId sets the ReservedInstanceId field's value. +func (s *ReservedInstanceReservationValue) SetReservedInstanceId(v string) *ReservedInstanceReservationValue { + s.ReservedInstanceId = &v + return s +} + +// Describes a Reserved Instance. +type ReservedInstances struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance. It's specified using ISO 4217 standard + // currency codes. At this time, the only supported currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The time when the Reserved Instance expires. + End *time.Time `locationName:"end" type:"timestamp"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The number of reservations purchased. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The tenancy of the instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The offering class of the Reserved Instance. + OfferingClass *string `locationName:"offeringClass" type:"string" enum:"OfferingClassType"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The scope of the Reserved Instance. + Scope *string `locationName:"scope" type:"string" enum:"Scope"` + + // The date and time the Reserved Instance started. + Start *time.Time `locationName:"start" type:"timestamp"` + + // The state of the Reserved Instance purchase. + State *string `locationName:"state" type:"string" enum:"ReservedInstanceState"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` +} + +// String returns the string representation +func (s ReservedInstances) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstances) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ReservedInstances) SetAvailabilityZone(v string) *ReservedInstances { + s.AvailabilityZone = &v + return s +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *ReservedInstances) SetCurrencyCode(v string) *ReservedInstances { + s.CurrencyCode = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *ReservedInstances) SetDuration(v int64) *ReservedInstances { + s.Duration = &v + return s +} + +// SetEnd sets the End field's value. +func (s *ReservedInstances) SetEnd(v time.Time) *ReservedInstances { + s.End = &v + return s +} + +// SetFixedPrice sets the FixedPrice field's value. +func (s *ReservedInstances) SetFixedPrice(v float64) *ReservedInstances { + s.FixedPrice = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *ReservedInstances) SetInstanceCount(v int64) *ReservedInstances { + s.InstanceCount = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *ReservedInstances) SetInstanceTenancy(v string) *ReservedInstances { + s.InstanceTenancy = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ReservedInstances) SetInstanceType(v string) *ReservedInstances { + s.InstanceType = &v + return s +} + +// SetOfferingClass sets the OfferingClass field's value. +func (s *ReservedInstances) SetOfferingClass(v string) *ReservedInstances { + s.OfferingClass = &v + return s +} + +// SetOfferingType sets the OfferingType field's value. +func (s *ReservedInstances) SetOfferingType(v string) *ReservedInstances { + s.OfferingType = &v + return s +} + +// SetProductDescription sets the ProductDescription field's value. +func (s *ReservedInstances) SetProductDescription(v string) *ReservedInstances { + s.ProductDescription = &v + return s +} + +// SetRecurringCharges sets the RecurringCharges field's value. +func (s *ReservedInstances) SetRecurringCharges(v []*RecurringCharge) *ReservedInstances { + s.RecurringCharges = v + return s +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *ReservedInstances) SetReservedInstancesId(v string) *ReservedInstances { + s.ReservedInstancesId = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *ReservedInstances) SetScope(v string) *ReservedInstances { + s.Scope = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ReservedInstances) SetStart(v time.Time) *ReservedInstances { + s.Start = &v + return s +} + +// SetState sets the State field's value. +func (s *ReservedInstances) SetState(v string) *ReservedInstances { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReservedInstances) SetTags(v []*Tag) *ReservedInstances { + s.Tags = v + return s +} + +// SetUsagePrice sets the UsagePrice field's value. +func (s *ReservedInstances) SetUsagePrice(v float64) *ReservedInstances { + s.UsagePrice = &v + return s +} + +// Describes the configuration settings for the modified Reserved Instances. +type ReservedInstancesConfiguration struct { + _ struct{} `type:"structure"` + + // The Availability Zone for the modified Reserved Instances. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of modified Reserved Instances. + // + // This is a required field for a request. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type for the modified Reserved Instances. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The network platform of the modified Reserved Instances, which is either + // EC2-Classic or EC2-VPC. + Platform *string `locationName:"platform" type:"string"` + + // Whether the Reserved Instance is applied to instances in a Region or instances + // in a specific Availability Zone. + Scope *string `locationName:"scope" type:"string" enum:"Scope"` +} + +// String returns the string representation +func (s ReservedInstancesConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesConfiguration) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ReservedInstancesConfiguration) SetAvailabilityZone(v string) *ReservedInstancesConfiguration { + s.AvailabilityZone = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *ReservedInstancesConfiguration) SetInstanceCount(v int64) *ReservedInstancesConfiguration { + s.InstanceCount = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ReservedInstancesConfiguration) SetInstanceType(v string) *ReservedInstancesConfiguration { + s.InstanceType = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ReservedInstancesConfiguration) SetPlatform(v string) *ReservedInstancesConfiguration { + s.Platform = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *ReservedInstancesConfiguration) SetScope(v string) *ReservedInstancesConfiguration { + s.Scope = &v + return s +} + +// Describes the ID of a Reserved Instance. +type ReservedInstancesId struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s ReservedInstancesId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesId) GoString() string { + return s.String() +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *ReservedInstancesId) SetReservedInstancesId(v string) *ReservedInstancesId { + s.ReservedInstancesId = &v + return s +} + +// Describes a Reserved Instance listing. +type ReservedInstancesListing struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time the listing was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp"` + + // The number of instances in this state. + InstanceCounts []*InstanceCount `locationName:"instanceCounts" locationNameList:"item" type:"list"` + + // The price of the Reserved Instance listing. + PriceSchedules []*PriceSchedule `locationName:"priceSchedules" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The ID of the Reserved Instance listing. + ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` + + // The status of the Reserved Instance listing. + Status *string `locationName:"status" type:"string" enum:"ListingStatus"` + + // The reason for the current status of the Reserved Instance listing. The response + // can be blank. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The last modified timestamp of the listing. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp"` +} + +// String returns the string representation +func (s ReservedInstancesListing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesListing) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *ReservedInstancesListing) SetClientToken(v string) *ReservedInstancesListing { + s.ClientToken = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *ReservedInstancesListing) SetCreateDate(v time.Time) *ReservedInstancesListing { + s.CreateDate = &v + return s +} + +// SetInstanceCounts sets the InstanceCounts field's value. +func (s *ReservedInstancesListing) SetInstanceCounts(v []*InstanceCount) *ReservedInstancesListing { + s.InstanceCounts = v + return s +} + +// SetPriceSchedules sets the PriceSchedules field's value. +func (s *ReservedInstancesListing) SetPriceSchedules(v []*PriceSchedule) *ReservedInstancesListing { + s.PriceSchedules = v + return s +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *ReservedInstancesListing) SetReservedInstancesId(v string) *ReservedInstancesListing { + s.ReservedInstancesId = &v + return s +} + +// SetReservedInstancesListingId sets the ReservedInstancesListingId field's value. +func (s *ReservedInstancesListing) SetReservedInstancesListingId(v string) *ReservedInstancesListing { + s.ReservedInstancesListingId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReservedInstancesListing) SetStatus(v string) *ReservedInstancesListing { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ReservedInstancesListing) SetStatusMessage(v string) *ReservedInstancesListing { + s.StatusMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReservedInstancesListing) SetTags(v []*Tag) *ReservedInstancesListing { + s.Tags = v + return s +} + +// SetUpdateDate sets the UpdateDate field's value. +func (s *ReservedInstancesListing) SetUpdateDate(v time.Time) *ReservedInstancesListing { + s.UpdateDate = &v + return s +} + +// Describes a Reserved Instance modification. +type ReservedInstancesModification struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive key supplied by the client to ensure that the request + // is idempotent. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // The time when the modification request was created. + CreateDate *time.Time `locationName:"createDate" type:"timestamp"` + + // The time for the modification to become effective. + EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp"` + + // Contains target configurations along with their corresponding new Reserved + // Instance IDs. + ModificationResults []*ReservedInstancesModificationResult `locationName:"modificationResultSet" locationNameList:"item" type:"list"` + + // The IDs of one or more Reserved Instances. + ReservedInstancesIds []*ReservedInstancesId `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` + + // A unique ID for the Reserved Instance modification. + ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` + + // The status of the Reserved Instances modification request. + Status *string `locationName:"status" type:"string"` + + // The reason for the status. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The time when the modification request was last updated. + UpdateDate *time.Time `locationName:"updateDate" type:"timestamp"` +} + +// String returns the string representation +func (s ReservedInstancesModification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModification) GoString() string { + return s.String() +} + +// SetClientToken sets the ClientToken field's value. +func (s *ReservedInstancesModification) SetClientToken(v string) *ReservedInstancesModification { + s.ClientToken = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *ReservedInstancesModification) SetCreateDate(v time.Time) *ReservedInstancesModification { + s.CreateDate = &v + return s +} + +// SetEffectiveDate sets the EffectiveDate field's value. +func (s *ReservedInstancesModification) SetEffectiveDate(v time.Time) *ReservedInstancesModification { + s.EffectiveDate = &v + return s +} + +// SetModificationResults sets the ModificationResults field's value. +func (s *ReservedInstancesModification) SetModificationResults(v []*ReservedInstancesModificationResult) *ReservedInstancesModification { + s.ModificationResults = v + return s +} + +// SetReservedInstancesIds sets the ReservedInstancesIds field's value. +func (s *ReservedInstancesModification) SetReservedInstancesIds(v []*ReservedInstancesId) *ReservedInstancesModification { + s.ReservedInstancesIds = v + return s +} + +// SetReservedInstancesModificationId sets the ReservedInstancesModificationId field's value. +func (s *ReservedInstancesModification) SetReservedInstancesModificationId(v string) *ReservedInstancesModification { + s.ReservedInstancesModificationId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReservedInstancesModification) SetStatus(v string) *ReservedInstancesModification { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ReservedInstancesModification) SetStatusMessage(v string) *ReservedInstancesModification { + s.StatusMessage = &v + return s +} + +// SetUpdateDate sets the UpdateDate field's value. +func (s *ReservedInstancesModification) SetUpdateDate(v time.Time) *ReservedInstancesModification { + s.UpdateDate = &v + return s +} + +// Describes the modification request/s. +type ReservedInstancesModificationResult struct { + _ struct{} `type:"structure"` + + // The ID for the Reserved Instances that were created as part of the modification + // request. This field is only available when the modification is fulfilled. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` + + // The target Reserved Instances configurations supplied as part of the modification + // request. + TargetConfiguration *ReservedInstancesConfiguration `locationName:"targetConfiguration" type:"structure"` +} + +// String returns the string representation +func (s ReservedInstancesModificationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesModificationResult) GoString() string { + return s.String() +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *ReservedInstancesModificationResult) SetReservedInstancesId(v string) *ReservedInstancesModificationResult { + s.ReservedInstancesId = &v + return s +} + +// SetTargetConfiguration sets the TargetConfiguration field's value. +func (s *ReservedInstancesModificationResult) SetTargetConfiguration(v *ReservedInstancesConfiguration) *ReservedInstancesModificationResult { + s.TargetConfiguration = v + return s +} + +// Describes a Reserved Instance offering. +type ReservedInstancesOffering struct { + _ struct{} `type:"structure"` + + // The Availability Zone in which the Reserved Instance can be used. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The currency of the Reserved Instance offering you are purchasing. It's specified + // using ISO 4217 standard currency codes. At this time, the only supported + // currency is USD. + CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` + + // The duration of the Reserved Instance, in seconds. + Duration *int64 `locationName:"duration" type:"long"` + + // The purchase price of the Reserved Instance. + FixedPrice *float64 `locationName:"fixedPrice" type:"float"` + + // The tenancy of the instance. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // The instance type on which the Reserved Instance can be used. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // Indicates whether the offering is available through the Reserved Instance + // Marketplace (resale) or AWS. If it's a Reserved Instance Marketplace offering, + // this is true. + Marketplace *bool `locationName:"marketplace" type:"boolean"` + + // If convertible it can be exchanged for Reserved Instances of the same or + // higher monetary value, with different configurations. If standard, it is + // not possible to perform an exchange. + OfferingClass *string `locationName:"offeringClass" type:"string" enum:"OfferingClassType"` + + // The Reserved Instance offering type. + OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` + + // The pricing details of the Reserved Instance offering. + PricingDetails []*PricingDetail `locationName:"pricingDetailsSet" locationNameList:"item" type:"list"` + + // The Reserved Instance product platform description. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The recurring charge tag assigned to the resource. + RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` + + // The ID of the Reserved Instance offering. This is the offering ID used in + // GetReservedInstancesExchangeQuote to confirm that an exchange can be made. + ReservedInstancesOfferingId *string `locationName:"reservedInstancesOfferingId" type:"string"` + + // Whether the Reserved Instance is applied to instances in a Region or an Availability + // Zone. + Scope *string `locationName:"scope" type:"string" enum:"Scope"` + + // The usage price of the Reserved Instance, per hour. + UsagePrice *float64 `locationName:"usagePrice" type:"float"` +} + +// String returns the string representation +func (s ReservedInstancesOffering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReservedInstancesOffering) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ReservedInstancesOffering) SetAvailabilityZone(v string) *ReservedInstancesOffering { + s.AvailabilityZone = &v + return s +} + +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *ReservedInstancesOffering) SetCurrencyCode(v string) *ReservedInstancesOffering { + s.CurrencyCode = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *ReservedInstancesOffering) SetDuration(v int64) *ReservedInstancesOffering { + s.Duration = &v + return s +} + +// SetFixedPrice sets the FixedPrice field's value. +func (s *ReservedInstancesOffering) SetFixedPrice(v float64) *ReservedInstancesOffering { + s.FixedPrice = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *ReservedInstancesOffering) SetInstanceTenancy(v string) *ReservedInstancesOffering { + s.InstanceTenancy = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ReservedInstancesOffering) SetInstanceType(v string) *ReservedInstancesOffering { + s.InstanceType = &v + return s +} + +// SetMarketplace sets the Marketplace field's value. +func (s *ReservedInstancesOffering) SetMarketplace(v bool) *ReservedInstancesOffering { + s.Marketplace = &v + return s +} + +// SetOfferingClass sets the OfferingClass field's value. +func (s *ReservedInstancesOffering) SetOfferingClass(v string) *ReservedInstancesOffering { + s.OfferingClass = &v + return s +} + +// SetOfferingType sets the OfferingType field's value. +func (s *ReservedInstancesOffering) SetOfferingType(v string) *ReservedInstancesOffering { + s.OfferingType = &v + return s +} + +// SetPricingDetails sets the PricingDetails field's value. +func (s *ReservedInstancesOffering) SetPricingDetails(v []*PricingDetail) *ReservedInstancesOffering { + s.PricingDetails = v + return s +} + +// SetProductDescription sets the ProductDescription field's value. +func (s *ReservedInstancesOffering) SetProductDescription(v string) *ReservedInstancesOffering { + s.ProductDescription = &v + return s +} + +// SetRecurringCharges sets the RecurringCharges field's value. +func (s *ReservedInstancesOffering) SetRecurringCharges(v []*RecurringCharge) *ReservedInstancesOffering { + s.RecurringCharges = v + return s +} + +// SetReservedInstancesOfferingId sets the ReservedInstancesOfferingId field's value. +func (s *ReservedInstancesOffering) SetReservedInstancesOfferingId(v string) *ReservedInstancesOffering { + s.ReservedInstancesOfferingId = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *ReservedInstancesOffering) SetScope(v string) *ReservedInstancesOffering { + s.Scope = &v + return s +} + +// SetUsagePrice sets the UsagePrice field's value. +func (s *ReservedInstancesOffering) SetUsagePrice(v float64) *ReservedInstancesOffering { + s.UsagePrice = &v + return s +} + +type ResetEbsDefaultKmsKeyIdInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResetEbsDefaultKmsKeyIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetEbsDefaultKmsKeyIdInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetEbsDefaultKmsKeyIdInput) SetDryRun(v bool) *ResetEbsDefaultKmsKeyIdInput { + s.DryRun = &v + return s +} + +type ResetEbsDefaultKmsKeyIdOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the default CMK for EBS encryption by default. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` +} + +// String returns the string representation +func (s ResetEbsDefaultKmsKeyIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetEbsDefaultKmsKeyIdOutput) GoString() string { + return s.String() +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *ResetEbsDefaultKmsKeyIdOutput) SetKmsKeyId(v string) *ResetEbsDefaultKmsKeyIdOutput { + s.KmsKeyId = &v + return s +} + +type ResetFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute. + Attribute *string `type:"string" enum:"ResetFpgaImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetFpgaImageAttributeInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ResetFpgaImageAttributeInput) SetAttribute(v string) *ResetFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetFpgaImageAttributeInput) SetDryRun(v bool) *ResetFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *ResetFpgaImageAttributeInput) SetFpgaImageId(v string) *ResetFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +type ResetFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ResetFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ResetFpgaImageAttributeOutput) SetReturn(v bool) *ResetFpgaImageAttributeOutput { + s.Return = &v + return s +} + +// Contains the parameters for ResetImageAttribute. +type ResetImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset (currently you can only reset the launch permission + // attribute). + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"ResetImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the AMI. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetImageAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ResetImageAttributeInput) SetAttribute(v string) *ResetImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetImageAttributeInput) SetDryRun(v bool) *ResetImageAttributeInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ResetImageAttributeInput) SetImageId(v string) *ResetImageAttributeInput { + s.ImageId = &v + return s +} + +type ResetImageAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetImageAttributeOutput) GoString() string { + return s.String() +} + +type ResetInstanceAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset. + // + // You can only reset the following attributes: kernel | ramdisk | sourceDestCheck. + // To change an instance attribute, use ModifyInstanceAttribute. + // + // Attribute is a required field + Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `locationName:"instanceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetInstanceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetInstanceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetInstanceAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ResetInstanceAttributeInput) SetAttribute(v string) *ResetInstanceAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetInstanceAttributeInput) SetDryRun(v bool) *ResetInstanceAttributeInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ResetInstanceAttributeInput) SetInstanceId(v string) *ResetInstanceAttributeInput { + s.InstanceId = &v + return s +} + +type ResetInstanceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetInstanceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetInstanceAttributeOutput) GoString() string { + return s.String() +} + +// Contains the parameters for ResetNetworkInterfaceAttribute. +type ResetNetworkInterfaceAttributeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The source/destination checking attribute. Resets the value to true. + SourceDestCheck *string `locationName:"sourceDestCheck" type:"string"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetNetworkInterfaceAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetNetworkInterfaceAttributeInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetNetworkInterfaceAttributeInput) SetDryRun(v bool) *ResetNetworkInterfaceAttributeInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *ResetNetworkInterfaceAttributeInput) SetNetworkInterfaceId(v string) *ResetNetworkInterfaceAttributeInput { + s.NetworkInterfaceId = &v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *ResetNetworkInterfaceAttributeInput) SetSourceDestCheck(v string) *ResetNetworkInterfaceAttributeInput { + s.SourceDestCheck = &v + return s +} + +type ResetNetworkInterfaceAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetNetworkInterfaceAttributeOutput) GoString() string { + return s.String() +} + +type ResetSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute to reset. Currently, only the attribute for permission to create + // volumes can be reset. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"SnapshotAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The ID of the snapshot. + // + // SnapshotId is a required field + SnapshotId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetSnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetSnapshotAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ResetSnapshotAttributeInput) SetAttribute(v string) *ResetSnapshotAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetSnapshotAttributeInput) SetDryRun(v bool) *ResetSnapshotAttributeInput { + s.DryRun = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *ResetSnapshotAttributeInput) SetSnapshotId(v string) *ResetSnapshotAttributeInput { + s.SnapshotId = &v + return s +} + +type ResetSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetSnapshotAttributeOutput) GoString() string { + return s.String() +} + +// Describes the error that's returned when you cannot delete a launch template +// version. +type ResponseError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" enum:"LaunchTemplateErrorCode"` + + // The error message, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResponseError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResponseError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ResponseError) SetCode(v string) *ResponseError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ResponseError) SetMessage(v string) *ResponseError { + s.Message = &v + return s +} + +// The information for a launch template. +type ResponseLaunchTemplateData struct { + _ struct{} `type:"structure"` + + // The block device mappings. + BlockDeviceMappings []*LaunchTemplateBlockDeviceMapping `locationName:"blockDeviceMappingSet" locationNameList:"item" type:"list"` + + // Information about the Capacity Reservation targeting option. + CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationResponse `locationName:"capacityReservationSpecification" type:"structure"` + + // The CPU options for the instance. For more information, see Optimizing CPU + // Options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // in the Amazon Elastic Compute Cloud User Guide. + CpuOptions *LaunchTemplateCpuOptions `locationName:"cpuOptions" type:"structure"` + + // The credit option for CPU usage of the instance. + CreditSpecification *CreditSpecification `locationName:"creditSpecification" type:"structure"` + + // If set to true, indicates that the instance cannot be terminated using the + // Amazon EC2 console, command line tool, or API. + DisableApiTermination *bool `locationName:"disableApiTermination" type:"boolean"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The elastic GPU specification. + ElasticGpuSpecifications []*ElasticGpuSpecificationResponse `locationName:"elasticGpuSpecificationSet" locationNameList:"item" type:"list"` + + // The elastic inference accelerator for the instance. + ElasticInferenceAccelerators []*LaunchTemplateElasticInferenceAcceleratorResponse `locationName:"elasticInferenceAcceleratorSet" locationNameList:"item" type:"list"` + + // Indicates whether the instance is enabled for AWS Nitro Enclaves. + EnclaveOptions *LaunchTemplateEnclaveOptions `locationName:"enclaveOptions" type:"structure"` + + // Indicates whether an instance is configured for hibernation. For more information, + // see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // in the Amazon Elastic Compute Cloud User Guide. + HibernationOptions *LaunchTemplateHibernationOptions `locationName:"hibernationOptions" type:"structure"` + + // The IAM instance profile. + IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI that was used to launch the instance. + ImageId *string `locationName:"imageId" type:"string"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The market (purchasing) option for the instances. + InstanceMarketOptions *LaunchTemplateInstanceMarketOptions `locationName:"instanceMarketOptions" type:"structure"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel, if applicable. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // The license configurations. + LicenseSpecifications []*LaunchTemplateLicenseConfiguration `locationName:"licenseSet" locationNameList:"item" type:"list"` + + // The metadata options for the instance. For more information, see Instance + // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // in the Amazon Elastic Compute Cloud User Guide. + MetadataOptions *LaunchTemplateInstanceMetadataOptions `locationName:"metadataOptions" type:"structure"` + + // The monitoring for the instance. + Monitoring *LaunchTemplatesMonitoring `locationName:"monitoring" type:"structure"` + + // The network interfaces. + NetworkInterfaces []*LaunchTemplateInstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement of the instance. + Placement *LaunchTemplatePlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk, if applicable. + RamDiskId *string `locationName:"ramDiskId" type:"string"` + + // The security group IDs. + SecurityGroupIds []*string `locationName:"securityGroupIdSet" locationNameList:"item" type:"list"` + + // The security group names. + SecurityGroups []*string `locationName:"securityGroupSet" locationNameList:"item" type:"list"` + + // The tags. + TagSpecifications []*LaunchTemplateTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"` + + // The user data for the instance. + UserData *string `locationName:"userData" type:"string"` +} + +// String returns the string representation +func (s ResponseLaunchTemplateData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResponseLaunchTemplateData) GoString() string { + return s.String() +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *ResponseLaunchTemplateData) SetBlockDeviceMappings(v []*LaunchTemplateBlockDeviceMapping) *ResponseLaunchTemplateData { + s.BlockDeviceMappings = v + return s +} + +// SetCapacityReservationSpecification sets the CapacityReservationSpecification field's value. +func (s *ResponseLaunchTemplateData) SetCapacityReservationSpecification(v *LaunchTemplateCapacityReservationSpecificationResponse) *ResponseLaunchTemplateData { + s.CapacityReservationSpecification = v + return s +} + +// SetCpuOptions sets the CpuOptions field's value. +func (s *ResponseLaunchTemplateData) SetCpuOptions(v *LaunchTemplateCpuOptions) *ResponseLaunchTemplateData { + s.CpuOptions = v + return s +} + +// SetCreditSpecification sets the CreditSpecification field's value. +func (s *ResponseLaunchTemplateData) SetCreditSpecification(v *CreditSpecification) *ResponseLaunchTemplateData { + s.CreditSpecification = v + return s +} + +// SetDisableApiTermination sets the DisableApiTermination field's value. +func (s *ResponseLaunchTemplateData) SetDisableApiTermination(v bool) *ResponseLaunchTemplateData { + s.DisableApiTermination = &v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *ResponseLaunchTemplateData) SetEbsOptimized(v bool) *ResponseLaunchTemplateData { + s.EbsOptimized = &v + return s +} + +// SetElasticGpuSpecifications sets the ElasticGpuSpecifications field's value. +func (s *ResponseLaunchTemplateData) SetElasticGpuSpecifications(v []*ElasticGpuSpecificationResponse) *ResponseLaunchTemplateData { + s.ElasticGpuSpecifications = v + return s +} + +// SetElasticInferenceAccelerators sets the ElasticInferenceAccelerators field's value. +func (s *ResponseLaunchTemplateData) SetElasticInferenceAccelerators(v []*LaunchTemplateElasticInferenceAcceleratorResponse) *ResponseLaunchTemplateData { + s.ElasticInferenceAccelerators = v + return s +} + +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *ResponseLaunchTemplateData) SetEnclaveOptions(v *LaunchTemplateEnclaveOptions) *ResponseLaunchTemplateData { + s.EnclaveOptions = v + return s +} + +// SetHibernationOptions sets the HibernationOptions field's value. +func (s *ResponseLaunchTemplateData) SetHibernationOptions(v *LaunchTemplateHibernationOptions) *ResponseLaunchTemplateData { + s.HibernationOptions = v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *ResponseLaunchTemplateData) SetIamInstanceProfile(v *LaunchTemplateIamInstanceProfileSpecification) *ResponseLaunchTemplateData { + s.IamInstanceProfile = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ResponseLaunchTemplateData) SetImageId(v string) *ResponseLaunchTemplateData { + s.ImageId = &v + return s +} + +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value. +func (s *ResponseLaunchTemplateData) SetInstanceInitiatedShutdownBehavior(v string) *ResponseLaunchTemplateData { + s.InstanceInitiatedShutdownBehavior = &v + return s +} + +// SetInstanceMarketOptions sets the InstanceMarketOptions field's value. +func (s *ResponseLaunchTemplateData) SetInstanceMarketOptions(v *LaunchTemplateInstanceMarketOptions) *ResponseLaunchTemplateData { + s.InstanceMarketOptions = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ResponseLaunchTemplateData) SetInstanceType(v string) *ResponseLaunchTemplateData { + s.InstanceType = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *ResponseLaunchTemplateData) SetKernelId(v string) *ResponseLaunchTemplateData { + s.KernelId = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *ResponseLaunchTemplateData) SetKeyName(v string) *ResponseLaunchTemplateData { + s.KeyName = &v + return s +} + +// SetLicenseSpecifications sets the LicenseSpecifications field's value. +func (s *ResponseLaunchTemplateData) SetLicenseSpecifications(v []*LaunchTemplateLicenseConfiguration) *ResponseLaunchTemplateData { + s.LicenseSpecifications = v + return s +} + +// SetMetadataOptions sets the MetadataOptions field's value. +func (s *ResponseLaunchTemplateData) SetMetadataOptions(v *LaunchTemplateInstanceMetadataOptions) *ResponseLaunchTemplateData { + s.MetadataOptions = v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *ResponseLaunchTemplateData) SetMonitoring(v *LaunchTemplatesMonitoring) *ResponseLaunchTemplateData { + s.Monitoring = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *ResponseLaunchTemplateData) SetNetworkInterfaces(v []*LaunchTemplateInstanceNetworkInterfaceSpecification) *ResponseLaunchTemplateData { + s.NetworkInterfaces = v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *ResponseLaunchTemplateData) SetPlacement(v *LaunchTemplatePlacement) *ResponseLaunchTemplateData { + s.Placement = v + return s +} + +// SetRamDiskId sets the RamDiskId field's value. +func (s *ResponseLaunchTemplateData) SetRamDiskId(v string) *ResponseLaunchTemplateData { + s.RamDiskId = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *ResponseLaunchTemplateData) SetSecurityGroupIds(v []*string) *ResponseLaunchTemplateData { + s.SecurityGroupIds = v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *ResponseLaunchTemplateData) SetSecurityGroups(v []*string) *ResponseLaunchTemplateData { + s.SecurityGroups = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ResponseLaunchTemplateData) SetTagSpecifications(v []*LaunchTemplateTagSpecification) *ResponseLaunchTemplateData { + s.TagSpecifications = v + return s +} + +// SetUserData sets the UserData field's value. +func (s *ResponseLaunchTemplateData) SetUserData(v string) *ResponseLaunchTemplateData { + s.UserData = &v + return s +} + +type RestoreAddressToClassicInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The Elastic IP address. + // + // PublicIp is a required field + PublicIp *string `locationName:"publicIp" type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreAddressToClassicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreAddressToClassicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreAddressToClassicInput"} + if s.PublicIp == nil { + invalidParams.Add(request.NewErrParamRequired("PublicIp")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *RestoreAddressToClassicInput) SetDryRun(v bool) *RestoreAddressToClassicInput { + s.DryRun = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *RestoreAddressToClassicInput) SetPublicIp(v string) *RestoreAddressToClassicInput { + s.PublicIp = &v + return s +} + +type RestoreAddressToClassicOutput struct { + _ struct{} `type:"structure"` + + // The Elastic IP address. + PublicIp *string `locationName:"publicIp" type:"string"` + + // The move status for the IP address. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s RestoreAddressToClassicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreAddressToClassicOutput) GoString() string { + return s.String() +} + +// SetPublicIp sets the PublicIp field's value. +func (s *RestoreAddressToClassicOutput) SetPublicIp(v string) *RestoreAddressToClassicOutput { + s.PublicIp = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *RestoreAddressToClassicOutput) SetStatus(v string) *RestoreAddressToClassicOutput { + s.Status = &v + return s +} + +type RestoreManagedPrefixListVersionInput struct { + _ struct{} `type:"structure"` + + // The current version number for the prefix list. + // + // CurrentVersion is a required field + CurrentVersion *int64 `type:"long" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The version to restore. + // + // PreviousVersion is a required field + PreviousVersion *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s RestoreManagedPrefixListVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreManagedPrefixListVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreManagedPrefixListVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreManagedPrefixListVersionInput"} + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.PreviousVersion == nil { + invalidParams.Add(request.NewErrParamRequired("PreviousVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *RestoreManagedPrefixListVersionInput) SetCurrentVersion(v int64) *RestoreManagedPrefixListVersionInput { + s.CurrentVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RestoreManagedPrefixListVersionInput) SetDryRun(v bool) *RestoreManagedPrefixListVersionInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *RestoreManagedPrefixListVersionInput) SetPrefixListId(v string) *RestoreManagedPrefixListVersionInput { + s.PrefixListId = &v + return s +} + +// SetPreviousVersion sets the PreviousVersion field's value. +func (s *RestoreManagedPrefixListVersionInput) SetPreviousVersion(v int64) *RestoreManagedPrefixListVersionInput { + s.PreviousVersion = &v + return s +} + +type RestoreManagedPrefixListVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s RestoreManagedPrefixListVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreManagedPrefixListVersionOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *RestoreManagedPrefixListVersionOutput) SetPrefixList(v *ManagedPrefixList) *RestoreManagedPrefixListVersionOutput { + s.PrefixList = v + return s +} + +type RevokeClientVpnIngressInput struct { + _ struct{} `type:"structure"` + + // The ID of the Active Directory group for which to revoke access. + AccessGroupId *string `type:"string"` + + // The ID of the Client VPN endpoint with which the authorization rule is associated. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Indicates whether access should be revoked for all clients. + RevokeAllGroups *bool `type:"boolean"` + + // The IPv4 address range, in CIDR notation, of the network for which access + // is being removed. + // + // TargetNetworkCidr is a required field + TargetNetworkCidr *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RevokeClientVpnIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeClientVpnIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokeClientVpnIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokeClientVpnIngressInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + if s.TargetNetworkCidr == nil { + invalidParams.Add(request.NewErrParamRequired("TargetNetworkCidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessGroupId sets the AccessGroupId field's value. +func (s *RevokeClientVpnIngressInput) SetAccessGroupId(v string) *RevokeClientVpnIngressInput { + s.AccessGroupId = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *RevokeClientVpnIngressInput) SetClientVpnEndpointId(v string) *RevokeClientVpnIngressInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RevokeClientVpnIngressInput) SetDryRun(v bool) *RevokeClientVpnIngressInput { + s.DryRun = &v + return s +} + +// SetRevokeAllGroups sets the RevokeAllGroups field's value. +func (s *RevokeClientVpnIngressInput) SetRevokeAllGroups(v bool) *RevokeClientVpnIngressInput { + s.RevokeAllGroups = &v + return s +} + +// SetTargetNetworkCidr sets the TargetNetworkCidr field's value. +func (s *RevokeClientVpnIngressInput) SetTargetNetworkCidr(v string) *RevokeClientVpnIngressInput { + s.TargetNetworkCidr = &v + return s +} + +type RevokeClientVpnIngressOutput struct { + _ struct{} `type:"structure"` + + // The current state of the authorization rule. + Status *ClientVpnAuthorizationRuleStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s RevokeClientVpnIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeClientVpnIngressOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *RevokeClientVpnIngressOutput) SetStatus(v *ClientVpnAuthorizationRuleStatus) *RevokeClientVpnIngressOutput { + s.Status = v + return s +} + +type RevokeSecurityGroupEgressInput struct { + _ struct{} `type:"structure"` + + // Not supported. Use a set of IP permissions to specify the CIDR. + CidrIp *string `locationName:"cidrIp" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Not supported. Use a set of IP permissions to specify the port. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The ID of the security group. + // + // GroupId is a required field + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // The sets of IP permissions. You can't specify a destination security group + // and a CIDR IP address range in the same set of permissions. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // Not supported. Use a set of IP permissions to specify the protocol name or + // number. + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // Not supported. Use a set of IP permissions to specify a destination security + // group. + SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` + + // Not supported. Use a set of IP permissions to specify a destination security + // group. + SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` + + // Not supported. Use a set of IP permissions to specify the port. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RevokeSecurityGroupEgressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RevokeSecurityGroupEgressInput"} + if s.GroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidrIp sets the CidrIp field's value. +func (s *RevokeSecurityGroupEgressInput) SetCidrIp(v string) *RevokeSecurityGroupEgressInput { + s.CidrIp = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RevokeSecurityGroupEgressInput) SetDryRun(v bool) *RevokeSecurityGroupEgressInput { + s.DryRun = &v + return s +} + +// SetFromPort sets the FromPort field's value. +func (s *RevokeSecurityGroupEgressInput) SetFromPort(v int64) *RevokeSecurityGroupEgressInput { + s.FromPort = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *RevokeSecurityGroupEgressInput) SetGroupId(v string) *RevokeSecurityGroupEgressInput { + s.GroupId = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *RevokeSecurityGroupEgressInput) SetIpPermissions(v []*IpPermission) *RevokeSecurityGroupEgressInput { + s.IpPermissions = v + return s +} + +// SetIpProtocol sets the IpProtocol field's value. +func (s *RevokeSecurityGroupEgressInput) SetIpProtocol(v string) *RevokeSecurityGroupEgressInput { + s.IpProtocol = &v + return s +} + +// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value. +func (s *RevokeSecurityGroupEgressInput) SetSourceSecurityGroupName(v string) *RevokeSecurityGroupEgressInput { + s.SourceSecurityGroupName = &v + return s +} + +// SetSourceSecurityGroupOwnerId sets the SourceSecurityGroupOwnerId field's value. +func (s *RevokeSecurityGroupEgressInput) SetSourceSecurityGroupOwnerId(v string) *RevokeSecurityGroupEgressInput { + s.SourceSecurityGroupOwnerId = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *RevokeSecurityGroupEgressInput) SetToPort(v int64) *RevokeSecurityGroupEgressInput { + s.ToPort = &v + return s +} + +type RevokeSecurityGroupEgressOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The outbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RevokeSecurityGroupEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupEgressOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupEgressOutput) SetReturn(v bool) *RevokeSecurityGroupEgressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupEgressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupEgressOutput { + s.UnknownIpPermissions = v + return s +} + +type RevokeSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The CIDR IP address range. You can't specify this parameter when specifying + // a source security group. + CidrIp *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The start of port range for the TCP and UDP protocols, or an ICMP type number. + // For the ICMP type number, use -1 to specify all ICMP types. + FromPort *int64 `type:"integer"` + + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You must specify + // either the security group ID or the security group name in the request. + GroupName *string `type:"string"` + + // The sets of IP permissions. You can't specify a source security group and + // a CIDR IP address range in the same set of permissions. + IpPermissions []*IpPermission `locationNameList:"item" type:"list"` + + // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). + // Use -1 to specify all. + IpProtocol *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the source security group. You can't + // specify this parameter in combination with the following parameters: the + // CIDR IP address range, the start of the port range, the IP protocol, and + // the end of the port range. For EC2-VPC, the source security group must be + // in the same VPC. To revoke a specific rule for an IP protocol and port range, + // use a set of IP permissions instead. + SourceSecurityGroupName *string `type:"string"` + + // [EC2-Classic] The AWS account ID of the source security group, if the source + // security group is in a different account. You can't specify this parameter + // in combination with the following parameters: the CIDR IP address range, + // the IP protocol, the start of the port range, and the end of the port range. + // To revoke a specific rule for an IP protocol and port range, use a set of + // IP permissions instead. + SourceSecurityGroupOwnerId *string `type:"string"` + + // The end of port range for the TCP and UDP protocols, or an ICMP code number. + // For the ICMP code number, use -1 to specify all ICMP codes for the ICMP type. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// SetCidrIp sets the CidrIp field's value. +func (s *RevokeSecurityGroupIngressInput) SetCidrIp(v string) *RevokeSecurityGroupIngressInput { + s.CidrIp = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RevokeSecurityGroupIngressInput) SetDryRun(v bool) *RevokeSecurityGroupIngressInput { + s.DryRun = &v + return s +} + +// SetFromPort sets the FromPort field's value. +func (s *RevokeSecurityGroupIngressInput) SetFromPort(v int64) *RevokeSecurityGroupIngressInput { + s.FromPort = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *RevokeSecurityGroupIngressInput) SetGroupId(v string) *RevokeSecurityGroupIngressInput { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *RevokeSecurityGroupIngressInput) SetGroupName(v string) *RevokeSecurityGroupIngressInput { + s.GroupName = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *RevokeSecurityGroupIngressInput) SetIpPermissions(v []*IpPermission) *RevokeSecurityGroupIngressInput { + s.IpPermissions = v + return s +} + +// SetIpProtocol sets the IpProtocol field's value. +func (s *RevokeSecurityGroupIngressInput) SetIpProtocol(v string) *RevokeSecurityGroupIngressInput { + s.IpProtocol = &v + return s +} + +// SetSourceSecurityGroupName sets the SourceSecurityGroupName field's value. +func (s *RevokeSecurityGroupIngressInput) SetSourceSecurityGroupName(v string) *RevokeSecurityGroupIngressInput { + s.SourceSecurityGroupName = &v + return s +} + +// SetSourceSecurityGroupOwnerId sets the SourceSecurityGroupOwnerId field's value. +func (s *RevokeSecurityGroupIngressInput) SetSourceSecurityGroupOwnerId(v string) *RevokeSecurityGroupIngressInput { + s.SourceSecurityGroupOwnerId = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *RevokeSecurityGroupIngressInput) SetToPort(v int64) *RevokeSecurityGroupIngressInput { + s.ToPort = &v + return s +} + +type RevokeSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The inbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RevokeSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RevokeSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupIngressOutput) SetReturn(v bool) *RevokeSecurityGroupIngressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupIngressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupIngressOutput { + s.UnknownIpPermissions = v + return s +} + +// Describes a route in a route table. +type Route struct { + _ struct{} `type:"structure"` + + // The ID of the carrier gateway. + CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"` + + // The IPv4 CIDR block used for the destination match. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The IPv6 CIDR block used for the destination match. + DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + + // The prefix of the AWS service. + DestinationPrefixListId *string `locationName:"destinationPrefixListId" type:"string"` + + // The ID of the egress-only internet gateway. + EgressOnlyInternetGatewayId *string `locationName:"egressOnlyInternetGatewayId" type:"string"` + + // The ID of a gateway attached to your VPC. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // The ID of a NAT instance in your VPC. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + + // The ID of the local gateway. + LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + + // The ID of a NAT gateway. + NatGatewayId *string `locationName:"natGatewayId" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // Describes how the route was created. + // + // * CreateRouteTable - The route was automatically created when the route + // table was created. + // + // * CreateRoute - The route was manually added to the route table. + // + // * EnableVgwRoutePropagation - The route was propagated by route propagation. + Origin *string `locationName:"origin" type:"string" enum:"RouteOrigin"` + + // The state of the route. The blackhole state indicates that the route's target + // isn't available (for example, the specified gateway isn't attached to the + // VPC, or the specified NAT instance has been terminated). + State *string `locationName:"state" type:"string" enum:"RouteState"` + + // The ID of a transit gateway. + TransitGatewayId *string `locationName:"transitGatewayId" type:"string"` + + // The ID of a VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s Route) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Route) GoString() string { + return s.String() +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *Route) SetCarrierGatewayId(v string) *Route { + s.CarrierGatewayId = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *Route) SetDestinationCidrBlock(v string) *Route { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationIpv6CidrBlock sets the DestinationIpv6CidrBlock field's value. +func (s *Route) SetDestinationIpv6CidrBlock(v string) *Route { + s.DestinationIpv6CidrBlock = &v + return s +} + +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *Route) SetDestinationPrefixListId(v string) *Route { + s.DestinationPrefixListId = &v + return s +} + +// SetEgressOnlyInternetGatewayId sets the EgressOnlyInternetGatewayId field's value. +func (s *Route) SetEgressOnlyInternetGatewayId(v string) *Route { + s.EgressOnlyInternetGatewayId = &v + return s +} + +// SetGatewayId sets the GatewayId field's value. +func (s *Route) SetGatewayId(v string) *Route { + s.GatewayId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *Route) SetInstanceId(v string) *Route { + s.InstanceId = &v + return s +} + +// SetInstanceOwnerId sets the InstanceOwnerId field's value. +func (s *Route) SetInstanceOwnerId(v string) *Route { + s.InstanceOwnerId = &v + return s +} + +// SetLocalGatewayId sets the LocalGatewayId field's value. +func (s *Route) SetLocalGatewayId(v string) *Route { + s.LocalGatewayId = &v + return s +} + +// SetNatGatewayId sets the NatGatewayId field's value. +func (s *Route) SetNatGatewayId(v string) *Route { + s.NatGatewayId = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *Route) SetNetworkInterfaceId(v string) *Route { + s.NetworkInterfaceId = &v + return s +} + +// SetOrigin sets the Origin field's value. +func (s *Route) SetOrigin(v string) *Route { + s.Origin = &v + return s +} + +// SetState sets the State field's value. +func (s *Route) SetState(v string) *Route { + s.State = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *Route) SetTransitGatewayId(v string) *Route { + s.TransitGatewayId = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *Route) SetVpcPeeringConnectionId(v string) *Route { + s.VpcPeeringConnectionId = &v + return s +} + +// Describes a route table. +type RouteTable struct { + _ struct{} `type:"structure"` + + // The associations between the route table and one or more subnets or a gateway. + Associations []*RouteTableAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` + + // The ID of the AWS account that owns the route table. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any virtual private gateway (VGW) propagating routes. + PropagatingVgws []*PropagatingVgw `locationName:"propagatingVgwSet" locationNameList:"item" type:"list"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The routes in the route table. + Routes []*Route `locationName:"routeSet" locationNameList:"item" type:"list"` + + // Any tags assigned to the route table. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s RouteTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTable) GoString() string { + return s.String() +} + +// SetAssociations sets the Associations field's value. +func (s *RouteTable) SetAssociations(v []*RouteTableAssociation) *RouteTable { + s.Associations = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *RouteTable) SetOwnerId(v string) *RouteTable { + s.OwnerId = &v + return s +} + +// SetPropagatingVgws sets the PropagatingVgws field's value. +func (s *RouteTable) SetPropagatingVgws(v []*PropagatingVgw) *RouteTable { + s.PropagatingVgws = v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *RouteTable) SetRouteTableId(v string) *RouteTable { + s.RouteTableId = &v + return s +} + +// SetRoutes sets the Routes field's value. +func (s *RouteTable) SetRoutes(v []*Route) *RouteTable { + s.Routes = v + return s +} + +// SetTags sets the Tags field's value. +func (s *RouteTable) SetTags(v []*Tag) *RouteTable { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *RouteTable) SetVpcId(v string) *RouteTable { + s.VpcId = &v + return s +} + +// Describes an association between a route table and a subnet or gateway. +type RouteTableAssociation struct { + _ struct{} `type:"structure"` + + // The state of the association. + AssociationState *RouteTableAssociationState `locationName:"associationState" type:"structure"` + + // The ID of the internet gateway or virtual private gateway. + GatewayId *string `locationName:"gatewayId" type:"string"` + + // Indicates whether this is the main route table. + Main *bool `locationName:"main" type:"boolean"` + + // The ID of the association. + RouteTableAssociationId *string `locationName:"routeTableAssociationId" type:"string"` + + // The ID of the route table. + RouteTableId *string `locationName:"routeTableId" type:"string"` + + // The ID of the subnet. A subnet ID is not returned for an implicit association. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s RouteTableAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTableAssociation) GoString() string { + return s.String() +} + +// SetAssociationState sets the AssociationState field's value. +func (s *RouteTableAssociation) SetAssociationState(v *RouteTableAssociationState) *RouteTableAssociation { + s.AssociationState = v + return s +} + +// SetGatewayId sets the GatewayId field's value. +func (s *RouteTableAssociation) SetGatewayId(v string) *RouteTableAssociation { + s.GatewayId = &v + return s +} + +// SetMain sets the Main field's value. +func (s *RouteTableAssociation) SetMain(v bool) *RouteTableAssociation { + s.Main = &v + return s +} + +// SetRouteTableAssociationId sets the RouteTableAssociationId field's value. +func (s *RouteTableAssociation) SetRouteTableAssociationId(v string) *RouteTableAssociation { + s.RouteTableAssociationId = &v + return s +} + +// SetRouteTableId sets the RouteTableId field's value. +func (s *RouteTableAssociation) SetRouteTableId(v string) *RouteTableAssociation { + s.RouteTableId = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *RouteTableAssociation) SetSubnetId(v string) *RouteTableAssociation { + s.SubnetId = &v + return s +} + +// Describes the state of an association between a route table and a subnet +// or gateway. +type RouteTableAssociationState struct { + _ struct{} `type:"structure"` + + // The state of the association. + State *string `locationName:"state" type:"string" enum:"RouteTableAssociationStateCode"` + + // The status message, if applicable. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s RouteTableAssociationState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteTableAssociationState) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *RouteTableAssociationState) SetState(v string) *RouteTableAssociationState { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *RouteTableAssociationState) SetStatusMessage(v string) *RouteTableAssociationState { + s.StatusMessage = &v + return s +} + +type RunInstancesInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // The block device mapping entries. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Information about the Capacity Reservation targeting option. If you do not + // specify this parameter, the instance's Capacity Reservation preference defaults + // to open, which enables it to run in any open Capacity Reservation that has + // matching attributes (instance type, platform, Availability Zone). + CapacityReservationSpecification *CapacityReservationSpecification `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. If you do not specify a client token, a randomly generated token + // is used for the request to ensure idempotency. + // + // For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Maximum 64 ASCII characters + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // The CPU options for the instance. For more information, see Optimizing CPU + // options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // in the Amazon Elastic Compute Cloud User Guide. + CpuOptions *CpuOptionsRequest `type:"structure"` + + // The credit option for CPU usage of the burstable performance instance. Valid + // values are standard and unlimited. To change this attribute after launch, + // use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). + // For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: standard (T2 instances) or unlimited (T3/T3a instances) + CreditSpecification *CreditSpecificationRequest `type:"structure"` + + // If you set this parameter to true, you can't terminate the instance using + // the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute + // after launch, use ModifyInstanceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html). + // Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, + // you can terminate the instance by running the shutdown command from the instance. + // + // Default: false + DisableApiTermination *bool `locationName:"disableApiTermination" type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal Amazon EBS I/O performance. This optimization isn't + // available with all instance types. Additional usage charges apply when using + // an EBS-optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // An elastic GPU to associate with the instance. An Elastic GPU is a GPU resource + // that you can attach to your Windows instance to accelerate the graphics performance + // of your applications. For more information, see Amazon EC2 Elastic GPUs (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html) + // in the Amazon Elastic Compute Cloud User Guide. + ElasticGpuSpecification []*ElasticGpuSpecification `locationNameList:"item" type:"list"` + + // An elastic inference accelerator to associate with the instance. Elastic + // inference accelerators are a resource you can attach to your Amazon EC2 instances + // to accelerate your Deep Learning (DL) inference workloads. + // + // You cannot specify accelerators from different generations in the same request. + ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` + + // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more + // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) + // in the AWS Nitro Enclaves User Guide. + // + // You can't enable AWS Nitro Enclaves and hibernation on the same instance. + EnclaveOptions *EnclaveOptionsRequest `type:"structure"` + + // Indicates whether an instance is enabled for hibernation. For more information, + // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // You can't enable hibernation and AWS Nitro Enclaves on the same instance. + HibernationOptions *HibernationOptionsRequest `type:"structure"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. An AMI ID is required to launch an instance and must be + // specified here or in a launch template. + ImageId *string `type:"string"` + + // Indicates whether an instance stops or terminates when you initiate shutdown + // from the instance (using the operating system command for system shutdown). + // + // Default: stop + InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"` + + // The market (purchasing) option for the instances. + // + // For RunInstances, persistent Spot Instance requests are only supported when + // InstanceInterruptionBehavior is set to either hibernate or stop. + InstanceMarketOptions *InstanceMarketOptionsRequest `type:"structure"` + + // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: m1.small + InstanceType *string `type:"string" enum:"InstanceType"` + + // [EC2-VPC] The number of IPv6 addresses to associate with the primary network + // interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. + // You cannot specify this option and the option to assign specific IPv6 addresses + // in the same request. You can specify this option if you've specified a minimum + // number of instances to launch. + // + // You cannot specify this option and the network interfaces option in the same + // request. + Ipv6AddressCount *int64 `type:"integer"` + + // [EC2-VPC] The IPv6 addresses from the range of the subnet to associate with + // the primary network interface. You cannot specify this option and the option + // to assign a number of IPv6 addresses in the same request. You cannot specify + // this option if you've specified a minimum number of instances to launch. + // + // You cannot specify this option and the network interfaces option in the same + // request. + Ipv6Addresses []*InstanceIpv6Address `locationName:"Ipv6Address" locationNameList:"item" type:"list"` + + // The ID of the kernel. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more + // information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + KernelId *string `type:"string"` + + // The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) + // or ImportKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html). + // + // If you do not specify a key pair, you can't connect to the instance unless + // you choose an AMI that is configured to allow users another way to log in. + KeyName *string `type:"string"` + + // The launch template to use to launch the instances. Any parameters that you + // specify in RunInstances override the same parameters in the launch template. + // You can specify either the name or ID of a launch template, but not both. + LaunchTemplate *LaunchTemplateSpecification `type:"structure"` + + // The license configurations. + LicenseSpecifications []*LicenseConfigurationRequest `locationName:"LicenseSpecification" locationNameList:"item" type:"list"` + + // The maximum number of instances to launch. If you specify more instances + // than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches + // the largest possible number of instances above MinCount. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 FAQ. + // + // MaxCount is a required field + MaxCount *int64 `type:"integer" required:"true"` + + // The metadata options for the instance. For more information, see Instance + // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + MetadataOptions *InstanceMetadataOptionsRequest `type:"structure"` + + // The minimum number of instances to launch. If you specify a minimum that + // is more instances than Amazon EC2 can launch in the target Availability Zone, + // Amazon EC2 launches no instances. + // + // Constraints: Between 1 and the maximum number you're allowed for the specified + // instance type. For more information about the default limits, and how to + // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + // in the Amazon EC2 General FAQ. + // + // MinCount is a required field + MinCount *int64 `type:"integer" required:"true"` + + // Specifies whether detailed monitoring is enabled for the instance. + Monitoring *RunInstancesMonitoringEnabled `type:"structure"` + + // The network interfaces to associate with the instance. If you specify a network + // interface, you must specify any security groups and subnets as part of the + // network interface. + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterface" locationNameList:"item" type:"list"` + + // The placement for the instance. + Placement *Placement `type:"structure"` + + // [EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 + // address range of the subnet. + // + // Only one private IP address can be designated as primary. You can't specify + // this option if you've specified the option to designate a private IP address + // as the primary IP address in a network interface specification. You cannot + // specify this option if you're launching more than one instance in the request. + // + // You cannot specify this option and the network interfaces option in the same + // request. + PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` + + // The ID of the RAM disk to select. Some kernels require additional drivers + // at launch. Check the kernel requirements for information about whether you + // need to specify a RAM disk. To find kernel requirements, go to the AWS Resource + // Center and search for the kernel ID. + // + // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more + // information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + // in the Amazon Elastic Compute Cloud User Guide. + RamdiskId *string `type:"string"` + + // The IDs of the security groups. You can create a security group using CreateSecurityGroup + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). + // + // If you specify a network interface, you must specify any security groups + // as part of the network interface. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // [EC2-Classic, default VPC] The names of the security groups. For a nondefault + // VPC, you must use security group IDs instead. + // + // If you specify a network interface, you must specify any security groups + // as part of the network interface. + // + // Default: Amazon EC2 uses the default security group. + SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` + + // [EC2-VPC] The ID of the subnet to launch the instance into. + // + // If you specify a network interface, you must specify any subnets as part + // of the network interface. + SubnetId *string `type:"string"` + + // The tags to apply to the resources during launch. You can only tag instances + // and volumes on launch. The specified tags are applied to all instances or + // volumes that are created during launch. To tag a resource after it has been + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The user data to make available to the instance. For more information, see + // Running commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // (Linux) and Adding User Data (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) + // (Windows). If you are using a command line tool, base64-encoding is performed + // for you, and you can load the text from a file. Otherwise, you must provide + // base64-encoded text. User data is limited to 16 KB. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s RunInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunInstancesInput"} + if s.MaxCount == nil { + invalidParams.Add(request.NewErrParamRequired("MaxCount")) + } + if s.MinCount == nil { + invalidParams.Add(request.NewErrParamRequired("MinCount")) + } + if s.CreditSpecification != nil { + if err := s.CreditSpecification.Validate(); err != nil { + invalidParams.AddNested("CreditSpecification", err.(request.ErrInvalidParams)) + } + } + if s.ElasticGpuSpecification != nil { + for i, v := range s.ElasticGpuSpecification { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ElasticGpuSpecification", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ElasticInferenceAccelerators != nil { + for i, v := range s.ElasticInferenceAccelerators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ElasticInferenceAccelerators", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Monitoring != nil { + if err := s.Monitoring.Validate(); err != nil { + invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalInfo sets the AdditionalInfo field's value. +func (s *RunInstancesInput) SetAdditionalInfo(v string) *RunInstancesInput { + s.AdditionalInfo = &v + return s +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *RunInstancesInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *RunInstancesInput { + s.BlockDeviceMappings = v + return s +} + +// SetCapacityReservationSpecification sets the CapacityReservationSpecification field's value. +func (s *RunInstancesInput) SetCapacityReservationSpecification(v *CapacityReservationSpecification) *RunInstancesInput { + s.CapacityReservationSpecification = v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *RunInstancesInput) SetClientToken(v string) *RunInstancesInput { + s.ClientToken = &v + return s +} + +// SetCpuOptions sets the CpuOptions field's value. +func (s *RunInstancesInput) SetCpuOptions(v *CpuOptionsRequest) *RunInstancesInput { + s.CpuOptions = v + return s +} + +// SetCreditSpecification sets the CreditSpecification field's value. +func (s *RunInstancesInput) SetCreditSpecification(v *CreditSpecificationRequest) *RunInstancesInput { + s.CreditSpecification = v + return s +} + +// SetDisableApiTermination sets the DisableApiTermination field's value. +func (s *RunInstancesInput) SetDisableApiTermination(v bool) *RunInstancesInput { + s.DisableApiTermination = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RunInstancesInput) SetDryRun(v bool) *RunInstancesInput { + s.DryRun = &v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *RunInstancesInput) SetEbsOptimized(v bool) *RunInstancesInput { + s.EbsOptimized = &v + return s +} + +// SetElasticGpuSpecification sets the ElasticGpuSpecification field's value. +func (s *RunInstancesInput) SetElasticGpuSpecification(v []*ElasticGpuSpecification) *RunInstancesInput { + s.ElasticGpuSpecification = v + return s +} + +// SetElasticInferenceAccelerators sets the ElasticInferenceAccelerators field's value. +func (s *RunInstancesInput) SetElasticInferenceAccelerators(v []*ElasticInferenceAccelerator) *RunInstancesInput { + s.ElasticInferenceAccelerators = v + return s +} + +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *RunInstancesInput) SetEnclaveOptions(v *EnclaveOptionsRequest) *RunInstancesInput { + s.EnclaveOptions = v + return s +} + +// SetHibernationOptions sets the HibernationOptions field's value. +func (s *RunInstancesInput) SetHibernationOptions(v *HibernationOptionsRequest) *RunInstancesInput { + s.HibernationOptions = v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *RunInstancesInput) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *RunInstancesInput { + s.IamInstanceProfile = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *RunInstancesInput) SetImageId(v string) *RunInstancesInput { + s.ImageId = &v + return s +} + +// SetInstanceInitiatedShutdownBehavior sets the InstanceInitiatedShutdownBehavior field's value. +func (s *RunInstancesInput) SetInstanceInitiatedShutdownBehavior(v string) *RunInstancesInput { + s.InstanceInitiatedShutdownBehavior = &v + return s +} + +// SetInstanceMarketOptions sets the InstanceMarketOptions field's value. +func (s *RunInstancesInput) SetInstanceMarketOptions(v *InstanceMarketOptionsRequest) *RunInstancesInput { + s.InstanceMarketOptions = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *RunInstancesInput) SetInstanceType(v string) *RunInstancesInput { + s.InstanceType = &v + return s +} + +// SetIpv6AddressCount sets the Ipv6AddressCount field's value. +func (s *RunInstancesInput) SetIpv6AddressCount(v int64) *RunInstancesInput { + s.Ipv6AddressCount = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *RunInstancesInput) SetIpv6Addresses(v []*InstanceIpv6Address) *RunInstancesInput { + s.Ipv6Addresses = v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *RunInstancesInput) SetKernelId(v string) *RunInstancesInput { + s.KernelId = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *RunInstancesInput) SetKeyName(v string) *RunInstancesInput { + s.KeyName = &v + return s +} + +// SetLaunchTemplate sets the LaunchTemplate field's value. +func (s *RunInstancesInput) SetLaunchTemplate(v *LaunchTemplateSpecification) *RunInstancesInput { + s.LaunchTemplate = v + return s +} + +// SetLicenseSpecifications sets the LicenseSpecifications field's value. +func (s *RunInstancesInput) SetLicenseSpecifications(v []*LicenseConfigurationRequest) *RunInstancesInput { + s.LicenseSpecifications = v + return s +} + +// SetMaxCount sets the MaxCount field's value. +func (s *RunInstancesInput) SetMaxCount(v int64) *RunInstancesInput { + s.MaxCount = &v + return s +} + +// SetMetadataOptions sets the MetadataOptions field's value. +func (s *RunInstancesInput) SetMetadataOptions(v *InstanceMetadataOptionsRequest) *RunInstancesInput { + s.MetadataOptions = v + return s +} + +// SetMinCount sets the MinCount field's value. +func (s *RunInstancesInput) SetMinCount(v int64) *RunInstancesInput { + s.MinCount = &v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *RunInstancesInput) SetMonitoring(v *RunInstancesMonitoringEnabled) *RunInstancesInput { + s.Monitoring = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *RunInstancesInput) SetNetworkInterfaces(v []*InstanceNetworkInterfaceSpecification) *RunInstancesInput { + s.NetworkInterfaces = v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *RunInstancesInput) SetPlacement(v *Placement) *RunInstancesInput { + s.Placement = v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *RunInstancesInput) SetPrivateIpAddress(v string) *RunInstancesInput { + s.PrivateIpAddress = &v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *RunInstancesInput) SetRamdiskId(v string) *RunInstancesInput { + s.RamdiskId = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *RunInstancesInput) SetSecurityGroupIds(v []*string) *RunInstancesInput { + s.SecurityGroupIds = v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *RunInstancesInput) SetSecurityGroups(v []*string) *RunInstancesInput { + s.SecurityGroups = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *RunInstancesInput) SetSubnetId(v string) *RunInstancesInput { + s.SubnetId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *RunInstancesInput) SetTagSpecifications(v []*TagSpecification) *RunInstancesInput { + s.TagSpecifications = v + return s +} + +// SetUserData sets the UserData field's value. +func (s *RunInstancesInput) SetUserData(v string) *RunInstancesInput { + s.UserData = &v + return s +} + +// Describes the monitoring of an instance. +type RunInstancesMonitoringEnabled struct { + _ struct{} `type:"structure"` + + // Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring + // is enabled. + // + // Enabled is a required field + Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s RunInstancesMonitoringEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunInstancesMonitoringEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunInstancesMonitoringEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunInstancesMonitoringEnabled"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *RunInstancesMonitoringEnabled) SetEnabled(v bool) *RunInstancesMonitoringEnabled { + s.Enabled = &v + return s +} + +// Contains the parameters for RunScheduledInstances. +type RunScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The number of instances. + // + // Default: 1 + InstanceCount *int64 `type:"integer"` + + // The launch specification. You must match the instance type, Availability + // Zone, network, and platform of the schedule that you purchased. + // + // LaunchSpecification is a required field + LaunchSpecification *ScheduledInstancesLaunchSpecification `type:"structure" required:"true"` + + // The Scheduled Instance ID. + // + // ScheduledInstanceId is a required field + ScheduledInstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RunScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunScheduledInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunScheduledInstancesInput"} + if s.LaunchSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchSpecification")) + } + if s.ScheduledInstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("ScheduledInstanceId")) + } + if s.LaunchSpecification != nil { + if err := s.LaunchSpecification.Validate(); err != nil { + invalidParams.AddNested("LaunchSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *RunScheduledInstancesInput) SetClientToken(v string) *RunScheduledInstancesInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RunScheduledInstancesInput) SetDryRun(v bool) *RunScheduledInstancesInput { + s.DryRun = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *RunScheduledInstancesInput) SetInstanceCount(v int64) *RunScheduledInstancesInput { + s.InstanceCount = &v + return s +} + +// SetLaunchSpecification sets the LaunchSpecification field's value. +func (s *RunScheduledInstancesInput) SetLaunchSpecification(v *ScheduledInstancesLaunchSpecification) *RunScheduledInstancesInput { + s.LaunchSpecification = v + return s +} + +// SetScheduledInstanceId sets the ScheduledInstanceId field's value. +func (s *RunScheduledInstancesInput) SetScheduledInstanceId(v string) *RunScheduledInstancesInput { + s.ScheduledInstanceId = &v + return s +} + +// Contains the output of RunScheduledInstances. +type RunScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the newly launched instances. + InstanceIdSet []*string `locationName:"instanceIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RunScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesOutput) GoString() string { + return s.String() +} + +// SetInstanceIdSet sets the InstanceIdSet field's value. +func (s *RunScheduledInstancesOutput) SetInstanceIdSet(v []*string) *RunScheduledInstancesOutput { + s.InstanceIdSet = v + return s +} + +// Describes the storage parameters for S3 and S3 buckets for an instance store-backed +// AMI. +type S3Storage struct { + _ struct{} `type:"structure"` + + // The access key ID of the owner of the bucket. Before you specify a value + // for your access key ID, review and follow the guidance in Best Practices + // for Managing AWS Access Keys (https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). + AWSAccessKeyId *string `type:"string"` + + // The bucket in which to store the AMI. You can specify a bucket that you already + // own or a new bucket that Amazon EC2 creates on your behalf. If you specify + // a bucket that belongs to someone else, Amazon EC2 returns an error. + Bucket *string `locationName:"bucket" type:"string"` + + // The beginning of the file name of the AMI. + Prefix *string `locationName:"prefix" type:"string"` + + // An Amazon S3 upload policy that gives Amazon EC2 permission to upload items + // into Amazon S3 on your behalf. + // + // UploadPolicy is automatically base64 encoded/decoded by the SDK. + UploadPolicy []byte `locationName:"uploadPolicy" type:"blob"` + + // The signature of the JSON document. + UploadPolicySignature *string `locationName:"uploadPolicySignature" type:"string"` +} + +// String returns the string representation +func (s S3Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Storage) GoString() string { + return s.String() +} + +// SetAWSAccessKeyId sets the AWSAccessKeyId field's value. +func (s *S3Storage) SetAWSAccessKeyId(v string) *S3Storage { + s.AWSAccessKeyId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *S3Storage) SetBucket(v string) *S3Storage { + s.Bucket = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *S3Storage) SetPrefix(v string) *S3Storage { + s.Prefix = &v + return s +} + +// SetUploadPolicy sets the UploadPolicy field's value. +func (s *S3Storage) SetUploadPolicy(v []byte) *S3Storage { + s.UploadPolicy = v + return s +} + +// SetUploadPolicySignature sets the UploadPolicySignature field's value. +func (s *S3Storage) SetUploadPolicySignature(v string) *S3Storage { + s.UploadPolicySignature = &v + return s +} + +// Describes a Scheduled Instance. +type ScheduledInstance struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The date when the Scheduled Instance was purchased. + CreateDate *time.Time `locationName:"createDate" type:"timestamp"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The number of instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The time for the next schedule to start. + NextSlotStartTime *time.Time `locationName:"nextSlotStartTime" type:"timestamp"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The time that the previous schedule ended or will end. + PreviousSlotEndTime *time.Time `locationName:"previousSlotEndTime" type:"timestamp"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `locationName:"scheduledInstanceId" type:"string"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The end date for the Scheduled Instance. + TermEndDate *time.Time `locationName:"termEndDate" type:"timestamp"` + + // The start date for the Scheduled Instance. + TermStartDate *time.Time `locationName:"termStartDate" type:"timestamp"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstance) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ScheduledInstance) SetAvailabilityZone(v string) *ScheduledInstance { + s.AvailabilityZone = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *ScheduledInstance) SetCreateDate(v time.Time) *ScheduledInstance { + s.CreateDate = &v + return s +} + +// SetHourlyPrice sets the HourlyPrice field's value. +func (s *ScheduledInstance) SetHourlyPrice(v string) *ScheduledInstance { + s.HourlyPrice = &v + return s +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *ScheduledInstance) SetInstanceCount(v int64) *ScheduledInstance { + s.InstanceCount = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ScheduledInstance) SetInstanceType(v string) *ScheduledInstance { + s.InstanceType = &v + return s +} + +// SetNetworkPlatform sets the NetworkPlatform field's value. +func (s *ScheduledInstance) SetNetworkPlatform(v string) *ScheduledInstance { + s.NetworkPlatform = &v + return s +} + +// SetNextSlotStartTime sets the NextSlotStartTime field's value. +func (s *ScheduledInstance) SetNextSlotStartTime(v time.Time) *ScheduledInstance { + s.NextSlotStartTime = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ScheduledInstance) SetPlatform(v string) *ScheduledInstance { + s.Platform = &v + return s +} + +// SetPreviousSlotEndTime sets the PreviousSlotEndTime field's value. +func (s *ScheduledInstance) SetPreviousSlotEndTime(v time.Time) *ScheduledInstance { + s.PreviousSlotEndTime = &v + return s +} + +// SetRecurrence sets the Recurrence field's value. +func (s *ScheduledInstance) SetRecurrence(v *ScheduledInstanceRecurrence) *ScheduledInstance { + s.Recurrence = v + return s +} + +// SetScheduledInstanceId sets the ScheduledInstanceId field's value. +func (s *ScheduledInstance) SetScheduledInstanceId(v string) *ScheduledInstance { + s.ScheduledInstanceId = &v + return s +} + +// SetSlotDurationInHours sets the SlotDurationInHours field's value. +func (s *ScheduledInstance) SetSlotDurationInHours(v int64) *ScheduledInstance { + s.SlotDurationInHours = &v + return s +} + +// SetTermEndDate sets the TermEndDate field's value. +func (s *ScheduledInstance) SetTermEndDate(v time.Time) *ScheduledInstance { + s.TermEndDate = &v + return s +} + +// SetTermStartDate sets the TermStartDate field's value. +func (s *ScheduledInstance) SetTermStartDate(v time.Time) *ScheduledInstance { + s.TermStartDate = &v + return s +} + +// SetTotalScheduledInstanceHours sets the TotalScheduledInstanceHours field's value. +func (s *ScheduledInstance) SetTotalScheduledInstanceHours(v int64) *ScheduledInstance { + s.TotalScheduledInstanceHours = &v + return s +} + +// Describes a schedule that is available for your Scheduled Instances. +type ScheduledInstanceAvailability struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of available instances. + AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + + // The time period for the first schedule to start. + FirstSlotStartTime *time.Time `locationName:"firstSlotStartTime" type:"timestamp"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The instance type. You can specify one of the C3, C4, M4, or R3 instance + // types. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The maximum term. The only possible value is 365 days. + MaxTermDurationInDays *int64 `locationName:"maxTermDurationInDays" type:"integer"` + + // The minimum term. The only possible value is 365 days. + MinTermDurationInDays *int64 `locationName:"minTermDurationInDays" type:"integer"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The purchase token. This token expires in two hours. + PurchaseToken *string `locationName:"purchaseToken" type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstanceAvailability) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceAvailability) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ScheduledInstanceAvailability) SetAvailabilityZone(v string) *ScheduledInstanceAvailability { + s.AvailabilityZone = &v + return s +} + +// SetAvailableInstanceCount sets the AvailableInstanceCount field's value. +func (s *ScheduledInstanceAvailability) SetAvailableInstanceCount(v int64) *ScheduledInstanceAvailability { + s.AvailableInstanceCount = &v + return s +} + +// SetFirstSlotStartTime sets the FirstSlotStartTime field's value. +func (s *ScheduledInstanceAvailability) SetFirstSlotStartTime(v time.Time) *ScheduledInstanceAvailability { + s.FirstSlotStartTime = &v + return s +} + +// SetHourlyPrice sets the HourlyPrice field's value. +func (s *ScheduledInstanceAvailability) SetHourlyPrice(v string) *ScheduledInstanceAvailability { + s.HourlyPrice = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ScheduledInstanceAvailability) SetInstanceType(v string) *ScheduledInstanceAvailability { + s.InstanceType = &v + return s +} + +// SetMaxTermDurationInDays sets the MaxTermDurationInDays field's value. +func (s *ScheduledInstanceAvailability) SetMaxTermDurationInDays(v int64) *ScheduledInstanceAvailability { + s.MaxTermDurationInDays = &v + return s +} + +// SetMinTermDurationInDays sets the MinTermDurationInDays field's value. +func (s *ScheduledInstanceAvailability) SetMinTermDurationInDays(v int64) *ScheduledInstanceAvailability { + s.MinTermDurationInDays = &v + return s +} + +// SetNetworkPlatform sets the NetworkPlatform field's value. +func (s *ScheduledInstanceAvailability) SetNetworkPlatform(v string) *ScheduledInstanceAvailability { + s.NetworkPlatform = &v + return s +} + +// SetPlatform sets the Platform field's value. +func (s *ScheduledInstanceAvailability) SetPlatform(v string) *ScheduledInstanceAvailability { + s.Platform = &v + return s +} + +// SetPurchaseToken sets the PurchaseToken field's value. +func (s *ScheduledInstanceAvailability) SetPurchaseToken(v string) *ScheduledInstanceAvailability { + s.PurchaseToken = &v + return s +} + +// SetRecurrence sets the Recurrence field's value. +func (s *ScheduledInstanceAvailability) SetRecurrence(v *ScheduledInstanceRecurrence) *ScheduledInstanceAvailability { + s.Recurrence = v + return s +} + +// SetSlotDurationInHours sets the SlotDurationInHours field's value. +func (s *ScheduledInstanceAvailability) SetSlotDurationInHours(v int64) *ScheduledInstanceAvailability { + s.SlotDurationInHours = &v + return s +} + +// SetTotalScheduledInstanceHours sets the TotalScheduledInstanceHours field's value. +func (s *ScheduledInstanceAvailability) SetTotalScheduledInstanceHours(v int64) *ScheduledInstanceAvailability { + s.TotalScheduledInstanceHours = &v + return s +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrence struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `locationName:"frequency" type:"string"` + + // The interval quantity. The interval unit depends on the value of frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `locationName:"interval" type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). + OccurrenceDaySet []*int64 `locationName:"occurrenceDaySet" locationNameList:"item" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. + OccurrenceRelativeToEnd *bool `locationName:"occurrenceRelativeToEnd" type:"boolean"` + + // The unit for occurrenceDaySet (DayOfWeek or DayOfMonth). + OccurrenceUnit *string `locationName:"occurrenceUnit" type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrence) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrence) GoString() string { + return s.String() +} + +// SetFrequency sets the Frequency field's value. +func (s *ScheduledInstanceRecurrence) SetFrequency(v string) *ScheduledInstanceRecurrence { + s.Frequency = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *ScheduledInstanceRecurrence) SetInterval(v int64) *ScheduledInstanceRecurrence { + s.Interval = &v + return s +} + +// SetOccurrenceDaySet sets the OccurrenceDaySet field's value. +func (s *ScheduledInstanceRecurrence) SetOccurrenceDaySet(v []*int64) *ScheduledInstanceRecurrence { + s.OccurrenceDaySet = v + return s +} + +// SetOccurrenceRelativeToEnd sets the OccurrenceRelativeToEnd field's value. +func (s *ScheduledInstanceRecurrence) SetOccurrenceRelativeToEnd(v bool) *ScheduledInstanceRecurrence { + s.OccurrenceRelativeToEnd = &v + return s +} + +// SetOccurrenceUnit sets the OccurrenceUnit field's value. +func (s *ScheduledInstanceRecurrence) SetOccurrenceUnit(v string) *ScheduledInstanceRecurrence { + s.OccurrenceUnit = &v + return s +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrenceRequest struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `type:"string"` + + // The interval quantity. The interval unit depends on the value of Frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). You can't specify this value with a daily schedule. If the occurrence + // is relative to the end of the month, you can specify only a single day. + OccurrenceDays []*int64 `locationName:"OccurrenceDay" locationNameList:"OccurenceDay" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. You can't specify this value with a daily schedule. + OccurrenceRelativeToEnd *bool `type:"boolean"` + + // The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required + // for a monthly schedule. You can't specify DayOfWeek with a weekly schedule. + // You can't specify this value with a daily schedule. + OccurrenceUnit *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrenceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrenceRequest) GoString() string { + return s.String() +} + +// SetFrequency sets the Frequency field's value. +func (s *ScheduledInstanceRecurrenceRequest) SetFrequency(v string) *ScheduledInstanceRecurrenceRequest { + s.Frequency = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *ScheduledInstanceRecurrenceRequest) SetInterval(v int64) *ScheduledInstanceRecurrenceRequest { + s.Interval = &v + return s +} + +// SetOccurrenceDays sets the OccurrenceDays field's value. +func (s *ScheduledInstanceRecurrenceRequest) SetOccurrenceDays(v []*int64) *ScheduledInstanceRecurrenceRequest { + s.OccurrenceDays = v + return s +} + +// SetOccurrenceRelativeToEnd sets the OccurrenceRelativeToEnd field's value. +func (s *ScheduledInstanceRecurrenceRequest) SetOccurrenceRelativeToEnd(v bool) *ScheduledInstanceRecurrenceRequest { + s.OccurrenceRelativeToEnd = &v + return s +} + +// SetOccurrenceUnit sets the OccurrenceUnit field's value. +func (s *ScheduledInstanceRecurrenceRequest) SetOccurrenceUnit(v string) *ScheduledInstanceRecurrenceRequest { + s.OccurrenceUnit = &v + return s +} + +// Describes a block device mapping for a Scheduled Instance. +type ScheduledInstancesBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name (for example, /dev/sdh or xvdh). + DeviceName *string `type:"string"` + + // Parameters used to set up EBS volumes automatically when the instance is + // launched. + Ebs *ScheduledInstancesEbs `type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with two available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1. The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) GoString() string { + return s.String() +} + +// SetDeviceName sets the DeviceName field's value. +func (s *ScheduledInstancesBlockDeviceMapping) SetDeviceName(v string) *ScheduledInstancesBlockDeviceMapping { + s.DeviceName = &v + return s +} + +// SetEbs sets the Ebs field's value. +func (s *ScheduledInstancesBlockDeviceMapping) SetEbs(v *ScheduledInstancesEbs) *ScheduledInstancesBlockDeviceMapping { + s.Ebs = v + return s +} + +// SetNoDevice sets the NoDevice field's value. +func (s *ScheduledInstancesBlockDeviceMapping) SetNoDevice(v string) *ScheduledInstancesBlockDeviceMapping { + s.NoDevice = &v + return s +} + +// SetVirtualName sets the VirtualName field's value. +func (s *ScheduledInstancesBlockDeviceMapping) SetVirtualName(v string) *ScheduledInstancesBlockDeviceMapping { + s.VirtualName = &v + return s +} + +// Describes an EBS volume for a Scheduled Instance. +type ScheduledInstancesEbs struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the volume is encrypted. You can attached encrypted volumes + // only to instances that support them. + Encrypted *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) to provision for an io1 or + // io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB + // for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum + // IOPS of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Other instance families guarantee performance up to 32,000 IOPS. For more + // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes. + Iops *int64 `type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `type:"string"` + + // The size of the volume, in GiB. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `type:"integer"` + + // The volume type. gp2 for General Purpose SSD, io1 or io2 for Provisioned + // IOPS SSD, Throughput Optimized HDD for st1, Cold HDD for sc1, or standard + // for Magnetic. + // + // Default: gp2 + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesEbs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesEbs) GoString() string { + return s.String() +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *ScheduledInstancesEbs) SetDeleteOnTermination(v bool) *ScheduledInstancesEbs { + s.DeleteOnTermination = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *ScheduledInstancesEbs) SetEncrypted(v bool) *ScheduledInstancesEbs { + s.Encrypted = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *ScheduledInstancesEbs) SetIops(v int64) *ScheduledInstancesEbs { + s.Iops = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *ScheduledInstancesEbs) SetSnapshotId(v string) *ScheduledInstancesEbs { + s.SnapshotId = &v + return s +} + +// SetVolumeSize sets the VolumeSize field's value. +func (s *ScheduledInstancesEbs) SetVolumeSize(v int64) *ScheduledInstancesEbs { + s.VolumeSize = &v + return s +} + +// SetVolumeType sets the VolumeType field's value. +func (s *ScheduledInstancesEbs) SetVolumeType(v string) *ScheduledInstancesEbs { + s.VolumeType = &v + return s +} + +// Describes an IAM instance profile for a Scheduled Instance. +type ScheduledInstancesIamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). + Arn *string `type:"string"` + + // The name. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesIamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesIamInstanceProfile) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *ScheduledInstancesIamInstanceProfile) SetArn(v string) *ScheduledInstancesIamInstanceProfile { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *ScheduledInstancesIamInstanceProfile) SetName(v string) *ScheduledInstancesIamInstanceProfile { + s.Name = &v + return s +} + +// Describes an IPv6 address. +type ScheduledInstancesIpv6Address struct { + _ struct{} `type:"structure"` + + // The IPv6 address. + Ipv6Address *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesIpv6Address) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesIpv6Address) GoString() string { + return s.String() +} + +// SetIpv6Address sets the Ipv6Address field's value. +func (s *ScheduledInstancesIpv6Address) SetIpv6Address(v string) *ScheduledInstancesIpv6Address { + s.Ipv6Address = &v + return s +} + +// Describes the launch specification for a Scheduled Instance. +// +// If you are launching the Scheduled Instance in EC2-VPC, you must specify +// the ID of the subnet. You can specify the subnet using either SubnetId or +// NetworkInterface. +type ScheduledInstancesLaunchSpecification struct { + _ struct{} `type:"structure"` + + // The block device mapping entries. + BlockDeviceMappings []*ScheduledInstancesBlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *ScheduledInstancesIamInstanceProfile `type:"structure"` + + // The ID of the Amazon Machine Image (AMI). + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The instance type. + InstanceType *string `type:"string"` + + // The ID of the kernel. + KernelId *string `type:"string"` + + // The name of the key pair. + KeyName *string `type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *ScheduledInstancesMonitoring `type:"structure"` + + // The network interfaces. + NetworkInterfaces []*ScheduledInstancesNetworkInterface `locationName:"NetworkInterface" locationNameList:"NetworkInterface" type:"list"` + + // The placement information. + Placement *ScheduledInstancesPlacement `type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `type:"string"` + + // The IDs of the security groups. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `type:"string"` + + // The base64-encoded MIME user data. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesLaunchSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScheduledInstancesLaunchSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScheduledInstancesLaunchSpecification"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *ScheduledInstancesLaunchSpecification) SetBlockDeviceMappings(v []*ScheduledInstancesBlockDeviceMapping) *ScheduledInstancesLaunchSpecification { + s.BlockDeviceMappings = v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *ScheduledInstancesLaunchSpecification) SetEbsOptimized(v bool) *ScheduledInstancesLaunchSpecification { + s.EbsOptimized = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *ScheduledInstancesLaunchSpecification) SetIamInstanceProfile(v *ScheduledInstancesIamInstanceProfile) *ScheduledInstancesLaunchSpecification { + s.IamInstanceProfile = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ScheduledInstancesLaunchSpecification) SetImageId(v string) *ScheduledInstancesLaunchSpecification { + s.ImageId = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *ScheduledInstancesLaunchSpecification) SetInstanceType(v string) *ScheduledInstancesLaunchSpecification { + s.InstanceType = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *ScheduledInstancesLaunchSpecification) SetKernelId(v string) *ScheduledInstancesLaunchSpecification { + s.KernelId = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *ScheduledInstancesLaunchSpecification) SetKeyName(v string) *ScheduledInstancesLaunchSpecification { + s.KeyName = &v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *ScheduledInstancesLaunchSpecification) SetMonitoring(v *ScheduledInstancesMonitoring) *ScheduledInstancesLaunchSpecification { + s.Monitoring = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *ScheduledInstancesLaunchSpecification) SetNetworkInterfaces(v []*ScheduledInstancesNetworkInterface) *ScheduledInstancesLaunchSpecification { + s.NetworkInterfaces = v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *ScheduledInstancesLaunchSpecification) SetPlacement(v *ScheduledInstancesPlacement) *ScheduledInstancesLaunchSpecification { + s.Placement = v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *ScheduledInstancesLaunchSpecification) SetRamdiskId(v string) *ScheduledInstancesLaunchSpecification { + s.RamdiskId = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *ScheduledInstancesLaunchSpecification) SetSecurityGroupIds(v []*string) *ScheduledInstancesLaunchSpecification { + s.SecurityGroupIds = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *ScheduledInstancesLaunchSpecification) SetSubnetId(v string) *ScheduledInstancesLaunchSpecification { + s.SubnetId = &v + return s +} + +// SetUserData sets the UserData field's value. +func (s *ScheduledInstancesLaunchSpecification) SetUserData(v string) *ScheduledInstancesLaunchSpecification { + s.UserData = &v + return s +} + +// Describes whether monitoring is enabled for a Scheduled Instance. +type ScheduledInstancesMonitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ScheduledInstancesMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesMonitoring) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ScheduledInstancesMonitoring) SetEnabled(v bool) *ScheduledInstancesMonitoring { + s.Enabled = &v + return s +} + +// Describes a network interface for a Scheduled Instance. +type ScheduledInstancesNetworkInterface struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a public IPv4 address to instances launched in + // a VPC. The public IPv4 address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `type:"boolean"` + + // Indicates whether to delete the interface when the instance is terminated. + DeleteOnTermination *bool `type:"boolean"` + + // The description. + Description *string `type:"string"` + + // The index of the device for the network interface attachment. + DeviceIndex *int64 `type:"integer"` + + // The IDs of the security groups. + Groups []*string `locationName:"Group" locationNameList:"SecurityGroupId" type:"list"` + + // The number of IPv6 addresses to assign to the network interface. The IPv6 + // addresses are automatically selected from the subnet range. + Ipv6AddressCount *int64 `type:"integer"` + + // The specific IPv6 addresses from the subnet range. + Ipv6Addresses []*ScheduledInstancesIpv6Address `locationName:"Ipv6Address" locationNameList:"Ipv6Address" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `type:"string"` + + // The IPv4 address of the network interface within the subnet. + PrivateIpAddress *string `type:"string"` + + // The private IPv4 addresses. + PrivateIpAddressConfigs []*ScheduledInstancesPrivateIpAddressConfig `locationName:"PrivateIpAddressConfig" locationNameList:"PrivateIpAddressConfigSet" type:"list"` + + // The number of secondary private IPv4 addresses. + SecondaryPrivateIpAddressCount *int64 `type:"integer"` + + // The ID of the subnet. + SubnetId *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesNetworkInterface) GoString() string { + return s.String() +} + +// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. +func (s *ScheduledInstancesNetworkInterface) SetAssociatePublicIpAddress(v bool) *ScheduledInstancesNetworkInterface { + s.AssociatePublicIpAddress = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *ScheduledInstancesNetworkInterface) SetDeleteOnTermination(v bool) *ScheduledInstancesNetworkInterface { + s.DeleteOnTermination = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ScheduledInstancesNetworkInterface) SetDescription(v string) *ScheduledInstancesNetworkInterface { + s.Description = &v + return s +} + +// SetDeviceIndex sets the DeviceIndex field's value. +func (s *ScheduledInstancesNetworkInterface) SetDeviceIndex(v int64) *ScheduledInstancesNetworkInterface { + s.DeviceIndex = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *ScheduledInstancesNetworkInterface) SetGroups(v []*string) *ScheduledInstancesNetworkInterface { + s.Groups = v + return s +} + +// SetIpv6AddressCount sets the Ipv6AddressCount field's value. +func (s *ScheduledInstancesNetworkInterface) SetIpv6AddressCount(v int64) *ScheduledInstancesNetworkInterface { + s.Ipv6AddressCount = &v + return s +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *ScheduledInstancesNetworkInterface) SetIpv6Addresses(v []*ScheduledInstancesIpv6Address) *ScheduledInstancesNetworkInterface { + s.Ipv6Addresses = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *ScheduledInstancesNetworkInterface) SetNetworkInterfaceId(v string) *ScheduledInstancesNetworkInterface { + s.NetworkInterfaceId = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *ScheduledInstancesNetworkInterface) SetPrivateIpAddress(v string) *ScheduledInstancesNetworkInterface { + s.PrivateIpAddress = &v + return s +} + +// SetPrivateIpAddressConfigs sets the PrivateIpAddressConfigs field's value. +func (s *ScheduledInstancesNetworkInterface) SetPrivateIpAddressConfigs(v []*ScheduledInstancesPrivateIpAddressConfig) *ScheduledInstancesNetworkInterface { + s.PrivateIpAddressConfigs = v + return s +} + +// SetSecondaryPrivateIpAddressCount sets the SecondaryPrivateIpAddressCount field's value. +func (s *ScheduledInstancesNetworkInterface) SetSecondaryPrivateIpAddressCount(v int64) *ScheduledInstancesNetworkInterface { + s.SecondaryPrivateIpAddressCount = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *ScheduledInstancesNetworkInterface) SetSubnetId(v string) *ScheduledInstancesNetworkInterface { + s.SubnetId = &v + return s +} + +// Describes the placement for a Scheduled Instance. +type ScheduledInstancesPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `type:"string"` + + // The name of the placement group. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPlacement) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ScheduledInstancesPlacement) SetAvailabilityZone(v string) *ScheduledInstancesPlacement { + s.AvailabilityZone = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *ScheduledInstancesPlacement) SetGroupName(v string) *ScheduledInstancesPlacement { + s.GroupName = &v + return s +} + +// Describes a private IPv4 address for a Scheduled Instance. +type ScheduledInstancesPrivateIpAddressConfig struct { + _ struct{} `type:"structure"` + + // Indicates whether this is a primary IPv4 address. Otherwise, this is a secondary + // IPv4 address. + Primary *bool `type:"boolean"` + + // The IPv4 address. + PrivateIpAddress *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) GoString() string { + return s.String() +} + +// SetPrimary sets the Primary field's value. +func (s *ScheduledInstancesPrivateIpAddressConfig) SetPrimary(v bool) *ScheduledInstancesPrivateIpAddressConfig { + s.Primary = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *ScheduledInstancesPrivateIpAddressConfig) SetPrivateIpAddress(v string) *ScheduledInstancesPrivateIpAddressConfig { + s.PrivateIpAddress = &v + return s +} + +type SearchLocalGatewayRoutesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // Filters is a required field + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list" required:"true"` + + // The ID of the local gateway route table. + // + // LocalGatewayRouteTableId is a required field + LocalGatewayRouteTableId *string `type:"string" required:"true"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s SearchLocalGatewayRoutesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchLocalGatewayRoutesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SearchLocalGatewayRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SearchLocalGatewayRoutesInput"} + if s.Filters == nil { + invalidParams.Add(request.NewErrParamRequired("Filters")) + } + if s.LocalGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("LocalGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *SearchLocalGatewayRoutesInput) SetDryRun(v bool) *SearchLocalGatewayRoutesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *SearchLocalGatewayRoutesInput) SetFilters(v []*Filter) *SearchLocalGatewayRoutesInput { + s.Filters = v + return s +} + +// SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. +func (s *SearchLocalGatewayRoutesInput) SetLocalGatewayRouteTableId(v string) *SearchLocalGatewayRoutesInput { + s.LocalGatewayRouteTableId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *SearchLocalGatewayRoutesInput) SetMaxResults(v int64) *SearchLocalGatewayRoutesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *SearchLocalGatewayRoutesInput) SetNextToken(v string) *SearchLocalGatewayRoutesInput { + s.NextToken = &v + return s +} + +type SearchLocalGatewayRoutesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the routes. + Routes []*LocalGatewayRoute `locationName:"routeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s SearchLocalGatewayRoutesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchLocalGatewayRoutesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *SearchLocalGatewayRoutesOutput) SetNextToken(v string) *SearchLocalGatewayRoutesOutput { + s.NextToken = &v + return s +} + +// SetRoutes sets the Routes field's value. +func (s *SearchLocalGatewayRoutesOutput) SetRoutes(v []*LocalGatewayRoute) *SearchLocalGatewayRoutesOutput { + s.Routes = v + return s +} + +type SearchTransitGatewayMulticastGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * group-ip-address - The IP address of the transit gateway multicast group. + // + // * is-group-member - The resource is a group member. Valid values are true + // | false. + // + // * is-group-source - The resource is a group source. Valid values are true + // | false. + // + // * member-type - The member type. Valid values are igmp | static. + // + // * resource-id - The ID of the resource. + // + // * resource-type - The type of resource. Valid values are vpc | vpn | direct-connect-gateway + // | tgw-peering. + // + // * source-type - The source type. Valid values are igmp | static. + // + // * state - The state of the subnet association. Valid values are associated + // | associated | disassociated | disassociating. + // + // * subnet-id - The ID of the subnet. + // + // * transit-gateway-attachment-id - The id of the transit gateway attachment. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `type:"string"` +} + +// String returns the string representation +func (s SearchTransitGatewayMulticastGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchTransitGatewayMulticastGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SearchTransitGatewayMulticastGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SearchTransitGatewayMulticastGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *SearchTransitGatewayMulticastGroupsInput) SetDryRun(v bool) *SearchTransitGatewayMulticastGroupsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *SearchTransitGatewayMulticastGroupsInput) SetFilters(v []*Filter) *SearchTransitGatewayMulticastGroupsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *SearchTransitGatewayMulticastGroupsInput) SetMaxResults(v int64) *SearchTransitGatewayMulticastGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *SearchTransitGatewayMulticastGroupsInput) SetNextToken(v string) *SearchTransitGatewayMulticastGroupsInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *SearchTransitGatewayMulticastGroupsInput) SetTransitGatewayMulticastDomainId(v string) *SearchTransitGatewayMulticastGroupsInput { + s.TransitGatewayMulticastDomainId = &v + return s +} + +type SearchTransitGatewayMulticastGroupsOutput struct { + _ struct{} `type:"structure"` + + // Information about the transit gateway multicast group. + MulticastGroups []*TransitGatewayMulticastGroup `locationName:"multicastGroups" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s SearchTransitGatewayMulticastGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchTransitGatewayMulticastGroupsOutput) GoString() string { + return s.String() +} + +// SetMulticastGroups sets the MulticastGroups field's value. +func (s *SearchTransitGatewayMulticastGroupsOutput) SetMulticastGroups(v []*TransitGatewayMulticastGroup) *SearchTransitGatewayMulticastGroupsOutput { + s.MulticastGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *SearchTransitGatewayMulticastGroupsOutput) SetNextToken(v string) *SearchTransitGatewayMulticastGroupsOutput { + s.NextToken = &v + return s +} + +type SearchTransitGatewayRoutesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * attachment.transit-gateway-attachment-id- The id of the transit gateway + // attachment. + // + // * attachment.resource-id - The resource id of the transit gateway attachment. + // + // * attachment.resource-type - The attachment resource type. Valid values + // are vpc | vpn | direct-connect-gateway | peering. + // + // * prefix-list-id - The ID of the prefix list. + // + // * route-search.exact-match - The exact match of the specified filter. + // + // * route-search.longest-prefix-match - The longest prefix that matches + // the route. + // + // * route-search.subnet-of-match - The routes with a subnet that match the + // specified CIDR filter. + // + // * route-search.supernet-of-match - The routes with a CIDR that encompass + // the CIDR filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31 + // routes in your route table and you specify supernet-of-match as 10.0.1.0/30, + // then the result returns 10.0.1.0/29. + // + // * state - The state of the route (active | blackhole). + // + // * type - The type of route (propagated | static). + // + // Filters is a required field + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list" required:"true"` + + // The maximum number of routes to return. + MaxResults *int64 `min:"5" type:"integer"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SearchTransitGatewayRoutesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchTransitGatewayRoutesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SearchTransitGatewayRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SearchTransitGatewayRoutesInput"} + if s.Filters == nil { + invalidParams.Add(request.NewErrParamRequired("Filters")) + } + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *SearchTransitGatewayRoutesInput) SetDryRun(v bool) *SearchTransitGatewayRoutesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *SearchTransitGatewayRoutesInput) SetFilters(v []*Filter) *SearchTransitGatewayRoutesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *SearchTransitGatewayRoutesInput) SetMaxResults(v int64) *SearchTransitGatewayRoutesInput { + s.MaxResults = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *SearchTransitGatewayRoutesInput) SetTransitGatewayRouteTableId(v string) *SearchTransitGatewayRoutesInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type SearchTransitGatewayRoutesOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether there are additional routes available. + AdditionalRoutesAvailable *bool `locationName:"additionalRoutesAvailable" type:"boolean"` + + // Information about the routes. + Routes []*TransitGatewayRoute `locationName:"routeSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s SearchTransitGatewayRoutesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchTransitGatewayRoutesOutput) GoString() string { + return s.String() +} + +// SetAdditionalRoutesAvailable sets the AdditionalRoutesAvailable field's value. +func (s *SearchTransitGatewayRoutesOutput) SetAdditionalRoutesAvailable(v bool) *SearchTransitGatewayRoutesOutput { + s.AdditionalRoutesAvailable = &v + return s +} + +// SetRoutes sets the Routes field's value. +func (s *SearchTransitGatewayRoutesOutput) SetRoutes(v []*TransitGatewayRoute) *SearchTransitGatewayRoutesOutput { + s.Routes = v + return s +} + +// Describes a security group +type SecurityGroup struct { + _ struct{} `type:"structure"` + + // A description of the security group. + Description *string `locationName:"groupDescription" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + // The inbound rules associated with the security group. + IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` + + // [VPC only] The outbound rules associated with the security group. + IpPermissionsEgress []*IpPermission `locationName:"ipPermissionsEgress" locationNameList:"item" type:"list"` + + // The AWS account ID of the owner of the security group. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Any tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // [VPC only] The ID of the VPC for the security group. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroup) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SecurityGroup) SetDescription(v string) *SecurityGroup { + s.Description = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *SecurityGroup) SetGroupId(v string) *SecurityGroup { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *SecurityGroup) SetGroupName(v string) *SecurityGroup { + s.GroupName = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *SecurityGroup) SetIpPermissions(v []*IpPermission) *SecurityGroup { + s.IpPermissions = v + return s +} + +// SetIpPermissionsEgress sets the IpPermissionsEgress field's value. +func (s *SecurityGroup) SetIpPermissionsEgress(v []*IpPermission) *SecurityGroup { + s.IpPermissionsEgress = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *SecurityGroup) SetOwnerId(v string) *SecurityGroup { + s.OwnerId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SecurityGroup) SetTags(v []*Tag) *SecurityGroup { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *SecurityGroup) SetVpcId(v string) *SecurityGroup { + s.VpcId = &v + return s +} + +// Describes a security group. +type SecurityGroupIdentifier struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` +} + +// String returns the string representation +func (s SecurityGroupIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupIdentifier) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *SecurityGroupIdentifier) SetGroupId(v string) *SecurityGroupIdentifier { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *SecurityGroupIdentifier) SetGroupName(v string) *SecurityGroupIdentifier { + s.GroupName = &v + return s +} + +// Describes a VPC with a security group that references your security group. +type SecurityGroupReference struct { + _ struct{} `type:"structure"` + + // The ID of your security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The ID of the VPC with the referencing security group. + ReferencingVpcId *string `locationName:"referencingVpcId" type:"string"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s SecurityGroupReference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupReference) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *SecurityGroupReference) SetGroupId(v string) *SecurityGroupReference { + s.GroupId = &v + return s +} + +// SetReferencingVpcId sets the ReferencingVpcId field's value. +func (s *SecurityGroupReference) SetReferencingVpcId(v string) *SecurityGroupReference { + s.ReferencingVpcId = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *SecurityGroupReference) SetVpcPeeringConnectionId(v string) *SecurityGroupReference { + s.VpcPeeringConnectionId = &v + return s +} + +type SendDiagnosticInterruptInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendDiagnosticInterruptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendDiagnosticInterruptInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *SendDiagnosticInterruptInput) SetDryRun(v bool) *SendDiagnosticInterruptInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *SendDiagnosticInterruptInput) SetInstanceId(v string) *SendDiagnosticInterruptInput { + s.InstanceId = &v + return s +} + +type SendDiagnosticInterruptOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptOutput) GoString() string { + return s.String() +} + +// Describes a service configuration for a VPC endpoint service. +type ServiceConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether requests from other AWS accounts to create an endpoint + // to the service must first be accepted. + AcceptanceRequired *bool `locationName:"acceptanceRequired" type:"boolean"` + + // The Availability Zones in which the service is available. + AvailabilityZones []*string `locationName:"availabilityZoneSet" locationNameList:"item" type:"list"` + + // The DNS names for the service. + BaseEndpointDnsNames []*string `locationName:"baseEndpointDnsNameSet" locationNameList:"item" type:"list"` + + // The Amazon Resource Names (ARNs) of the Gateway Load Balancers for the service. + GatewayLoadBalancerArns []*string `locationName:"gatewayLoadBalancerArnSet" locationNameList:"item" type:"list"` + + // Indicates whether the service manages its VPC endpoints. Management of the + // service VPC endpoints using the VPC endpoint API is restricted. + ManagesVpcEndpoints *bool `locationName:"managesVpcEndpoints" type:"boolean"` + + // The Amazon Resource Names (ARNs) of the Network Load Balancers for the service. + NetworkLoadBalancerArns []*string `locationName:"networkLoadBalancerArnSet" locationNameList:"item" type:"list"` + + // The private DNS name for the service. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // Information about the endpoint service private DNS name configuration. + PrivateDnsNameConfiguration *PrivateDnsNameConfiguration `locationName:"privateDnsNameConfiguration" type:"structure"` + + // The ID of the service. + ServiceId *string `locationName:"serviceId" type:"string"` + + // The name of the service. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The service state. + ServiceState *string `locationName:"serviceState" type:"string" enum:"ServiceState"` + + // The type of service. + ServiceType []*ServiceTypeDetail `locationName:"serviceType" locationNameList:"item" type:"list"` + + // Any tags assigned to the service. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ServiceConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceConfiguration) GoString() string { + return s.String() +} + +// SetAcceptanceRequired sets the AcceptanceRequired field's value. +func (s *ServiceConfiguration) SetAcceptanceRequired(v bool) *ServiceConfiguration { + s.AcceptanceRequired = &v + return s +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *ServiceConfiguration) SetAvailabilityZones(v []*string) *ServiceConfiguration { + s.AvailabilityZones = v + return s +} + +// SetBaseEndpointDnsNames sets the BaseEndpointDnsNames field's value. +func (s *ServiceConfiguration) SetBaseEndpointDnsNames(v []*string) *ServiceConfiguration { + s.BaseEndpointDnsNames = v + return s +} + +// SetGatewayLoadBalancerArns sets the GatewayLoadBalancerArns field's value. +func (s *ServiceConfiguration) SetGatewayLoadBalancerArns(v []*string) *ServiceConfiguration { + s.GatewayLoadBalancerArns = v + return s +} + +// SetManagesVpcEndpoints sets the ManagesVpcEndpoints field's value. +func (s *ServiceConfiguration) SetManagesVpcEndpoints(v bool) *ServiceConfiguration { + s.ManagesVpcEndpoints = &v + return s +} + +// SetNetworkLoadBalancerArns sets the NetworkLoadBalancerArns field's value. +func (s *ServiceConfiguration) SetNetworkLoadBalancerArns(v []*string) *ServiceConfiguration { + s.NetworkLoadBalancerArns = v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *ServiceConfiguration) SetPrivateDnsName(v string) *ServiceConfiguration { + s.PrivateDnsName = &v + return s +} + +// SetPrivateDnsNameConfiguration sets the PrivateDnsNameConfiguration field's value. +func (s *ServiceConfiguration) SetPrivateDnsNameConfiguration(v *PrivateDnsNameConfiguration) *ServiceConfiguration { + s.PrivateDnsNameConfiguration = v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *ServiceConfiguration) SetServiceId(v string) *ServiceConfiguration { + s.ServiceId = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceConfiguration) SetServiceName(v string) *ServiceConfiguration { + s.ServiceName = &v + return s +} + +// SetServiceState sets the ServiceState field's value. +func (s *ServiceConfiguration) SetServiceState(v string) *ServiceConfiguration { + s.ServiceState = &v + return s +} + +// SetServiceType sets the ServiceType field's value. +func (s *ServiceConfiguration) SetServiceType(v []*ServiceTypeDetail) *ServiceConfiguration { + s.ServiceType = v + return s +} + +// SetTags sets the Tags field's value. +func (s *ServiceConfiguration) SetTags(v []*Tag) *ServiceConfiguration { + s.Tags = v + return s +} + +// Describes a VPC endpoint service. +type ServiceDetail struct { + _ struct{} `type:"structure"` + + // Indicates whether VPC endpoint connection requests to the service must be + // accepted by the service owner. + AcceptanceRequired *bool `locationName:"acceptanceRequired" type:"boolean"` + + // The Availability Zones in which the service is available. + AvailabilityZones []*string `locationName:"availabilityZoneSet" locationNameList:"item" type:"list"` + + // The DNS names for the service. + BaseEndpointDnsNames []*string `locationName:"baseEndpointDnsNameSet" locationNameList:"item" type:"list"` + + // Indicates whether the service manages its VPC endpoints. Management of the + // service VPC endpoints using the VPC endpoint API is restricted. + ManagesVpcEndpoints *bool `locationName:"managesVpcEndpoints" type:"boolean"` + + // The AWS account ID of the service owner. + Owner *string `locationName:"owner" type:"string"` + + // The private DNS name for the service. + PrivateDnsName *string `locationName:"privateDnsName" type:"string"` + + // The verification state of the VPC endpoint service. + // + // Consumers of the endpoint service cannot use the private name when the state + // is not verified. + PrivateDnsNameVerificationState *string `locationName:"privateDnsNameVerificationState" type:"string" enum:"DnsNameState"` + + // The private DNS names assigned to the VPC endpoint service. + PrivateDnsNames []*PrivateDnsDetails `locationName:"privateDnsNameSet" locationNameList:"item" type:"list"` + + // The ID of the endpoint service. + ServiceId *string `locationName:"serviceId" type:"string"` + + // The Amazon Resource Name (ARN) of the service. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The type of service. + ServiceType []*ServiceTypeDetail `locationName:"serviceType" locationNameList:"item" type:"list"` + + // Any tags assigned to the service. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // Indicates whether the service supports endpoint policies. + VpcEndpointPolicySupported *bool `locationName:"vpcEndpointPolicySupported" type:"boolean"` +} + +// String returns the string representation +func (s ServiceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceDetail) GoString() string { + return s.String() +} + +// SetAcceptanceRequired sets the AcceptanceRequired field's value. +func (s *ServiceDetail) SetAcceptanceRequired(v bool) *ServiceDetail { + s.AcceptanceRequired = &v + return s +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *ServiceDetail) SetAvailabilityZones(v []*string) *ServiceDetail { + s.AvailabilityZones = v + return s +} + +// SetBaseEndpointDnsNames sets the BaseEndpointDnsNames field's value. +func (s *ServiceDetail) SetBaseEndpointDnsNames(v []*string) *ServiceDetail { + s.BaseEndpointDnsNames = v + return s +} + +// SetManagesVpcEndpoints sets the ManagesVpcEndpoints field's value. +func (s *ServiceDetail) SetManagesVpcEndpoints(v bool) *ServiceDetail { + s.ManagesVpcEndpoints = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ServiceDetail) SetOwner(v string) *ServiceDetail { + s.Owner = &v + return s +} + +// SetPrivateDnsName sets the PrivateDnsName field's value. +func (s *ServiceDetail) SetPrivateDnsName(v string) *ServiceDetail { + s.PrivateDnsName = &v + return s +} + +// SetPrivateDnsNameVerificationState sets the PrivateDnsNameVerificationState field's value. +func (s *ServiceDetail) SetPrivateDnsNameVerificationState(v string) *ServiceDetail { + s.PrivateDnsNameVerificationState = &v + return s +} + +// SetPrivateDnsNames sets the PrivateDnsNames field's value. +func (s *ServiceDetail) SetPrivateDnsNames(v []*PrivateDnsDetails) *ServiceDetail { + s.PrivateDnsNames = v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *ServiceDetail) SetServiceId(v string) *ServiceDetail { + s.ServiceId = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceDetail) SetServiceName(v string) *ServiceDetail { + s.ServiceName = &v + return s +} + +// SetServiceType sets the ServiceType field's value. +func (s *ServiceDetail) SetServiceType(v []*ServiceTypeDetail) *ServiceDetail { + s.ServiceType = v + return s +} + +// SetTags sets the Tags field's value. +func (s *ServiceDetail) SetTags(v []*Tag) *ServiceDetail { + s.Tags = v + return s +} + +// SetVpcEndpointPolicySupported sets the VpcEndpointPolicySupported field's value. +func (s *ServiceDetail) SetVpcEndpointPolicySupported(v bool) *ServiceDetail { + s.VpcEndpointPolicySupported = &v + return s +} + +// Describes the type of service for a VPC endpoint. +type ServiceTypeDetail struct { + _ struct{} `type:"structure"` + + // The type of service. + ServiceType *string `locationName:"serviceType" type:"string" enum:"ServiceType"` +} + +// String returns the string representation +func (s ServiceTypeDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceTypeDetail) GoString() string { + return s.String() +} + +// SetServiceType sets the ServiceType field's value. +func (s *ServiceTypeDetail) SetServiceType(v string) *ServiceTypeDetail { + s.ServiceType = &v + return s +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +// The time period must span less than one day. +type SlotDateTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + // + // EarliestTime is a required field + EarliestTime *time.Time `type:"timestamp" required:"true"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. This + // value must be later than or equal to the earliest date and at most three + // months in the future. + // + // LatestTime is a required field + LatestTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s SlotDateTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotDateTimeRangeRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SlotDateTimeRangeRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SlotDateTimeRangeRequest"} + if s.EarliestTime == nil { + invalidParams.Add(request.NewErrParamRequired("EarliestTime")) + } + if s.LatestTime == nil { + invalidParams.Add(request.NewErrParamRequired("LatestTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEarliestTime sets the EarliestTime field's value. +func (s *SlotDateTimeRangeRequest) SetEarliestTime(v time.Time) *SlotDateTimeRangeRequest { + s.EarliestTime = &v + return s +} + +// SetLatestTime sets the LatestTime field's value. +func (s *SlotDateTimeRangeRequest) SetLatestTime(v time.Time) *SlotDateTimeRangeRequest { + s.LatestTime = &v + return s +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +type SlotStartTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. + LatestTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s SlotStartTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotStartTimeRangeRequest) GoString() string { + return s.String() +} + +// SetEarliestTime sets the EarliestTime field's value. +func (s *SlotStartTimeRangeRequest) SetEarliestTime(v time.Time) *SlotStartTimeRangeRequest { + s.EarliestTime = &v + return s +} + +// SetLatestTime sets the LatestTime field's value. +func (s *SlotStartTimeRangeRequest) SetLatestTime(v time.Time) *SlotStartTimeRangeRequest { + s.LatestTime = &v + return s +} + +// Describes a snapshot. +type Snapshot struct { + _ struct{} `type:"structure"` + + // The data encryption key identifier for the snapshot. This value is a unique + // identifier that corresponds to the data encryption key that was used to encrypt + // the original volume or snapshot copy. Because data encryption keys are inherited + // by volumes created from snapshots, and vice versa, if snapshots share the + // same data encryption key identifier, then they belong to the same volume/snapshot + // lineage. This parameter is only returned by DescribeSnapshots. + DataEncryptionKeyId *string `locationName:"dataEncryptionKeyId" type:"string"` + + // The description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) + // customer master key (CMK) that was used to protect the volume encryption + // key for the parent volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The AWS owner alias, from an Amazon-maintained list (amazon). This is not + // the user-configured AWS account alias set using the IAM console. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The AWS account ID of the EBS snapshot owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The progress of the snapshot, as a percentage. + Progress *string `locationName:"progress" type:"string"` + + // The ID of the snapshot. Each snapshot receives a unique identifier when it + // is created. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The time stamp when the snapshot was initiated. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // The snapshot state. + State *string `locationName:"status" type:"string" enum:"SnapshotState"` + + // Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy + // operation fails (for example, if the proper AWS Key Management Service (AWS + // KMS) permissions are not obtained) this field displays error state details + // to help you diagnose why the error occurred. This parameter is only returned + // by DescribeSnapshots. + StateMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the volume that was used to create the snapshot. Snapshots created + // by the CopySnapshot action have an arbitrary volume ID that should not be + // used for any purpose. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The size of the volume, in GiB. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` +} + +// String returns the string representation +func (s Snapshot) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Snapshot) GoString() string { + return s.String() +} + +// SetDataEncryptionKeyId sets the DataEncryptionKeyId field's value. +func (s *Snapshot) SetDataEncryptionKeyId(v string) *Snapshot { + s.DataEncryptionKeyId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Snapshot) SetDescription(v string) *Snapshot { + s.Description = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *Snapshot) SetEncrypted(v bool) *Snapshot { + s.Encrypted = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *Snapshot) SetKmsKeyId(v string) *Snapshot { + s.KmsKeyId = &v + return s +} + +// SetOwnerAlias sets the OwnerAlias field's value. +func (s *Snapshot) SetOwnerAlias(v string) *Snapshot { + s.OwnerAlias = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *Snapshot) SetOwnerId(v string) *Snapshot { + s.OwnerId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *Snapshot) SetProgress(v string) *Snapshot { + s.Progress = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *Snapshot) SetSnapshotId(v string) *Snapshot { + s.SnapshotId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Snapshot) SetStartTime(v time.Time) *Snapshot { + s.StartTime = &v + return s +} + +// SetState sets the State field's value. +func (s *Snapshot) SetState(v string) *Snapshot { + s.State = &v + return s +} + +// SetStateMessage sets the StateMessage field's value. +func (s *Snapshot) SetStateMessage(v string) *Snapshot { + s.StateMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Snapshot) SetTags(v []*Tag) *Snapshot { + s.Tags = v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *Snapshot) SetVolumeId(v string) *Snapshot { + s.VolumeId = &v + return s +} + +// SetVolumeSize sets the VolumeSize field's value. +func (s *Snapshot) SetVolumeSize(v int64) *Snapshot { + s.VolumeSize = &v + return s +} + +// Describes the snapshot created from the imported disk. +type SnapshotDetail struct { + _ struct{} `type:"structure"` + + // A description for the snapshot. + Description *string `locationName:"description" type:"string"` + + // The block device mapping for the snapshot. + DeviceName *string `locationName:"deviceName" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The percentage of progress for the task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status of the snapshot creation. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the snapshot creation. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL used to access the disk image. + Url *string `locationName:"url" type:"string"` + + // The Amazon S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` +} + +// String returns the string representation +func (s SnapshotDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDetail) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SnapshotDetail) SetDescription(v string) *SnapshotDetail { + s.Description = &v + return s +} + +// SetDeviceName sets the DeviceName field's value. +func (s *SnapshotDetail) SetDeviceName(v string) *SnapshotDetail { + s.DeviceName = &v + return s +} + +// SetDiskImageSize sets the DiskImageSize field's value. +func (s *SnapshotDetail) SetDiskImageSize(v float64) *SnapshotDetail { + s.DiskImageSize = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *SnapshotDetail) SetFormat(v string) *SnapshotDetail { + s.Format = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *SnapshotDetail) SetProgress(v string) *SnapshotDetail { + s.Progress = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *SnapshotDetail) SetSnapshotId(v string) *SnapshotDetail { + s.SnapshotId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SnapshotDetail) SetStatus(v string) *SnapshotDetail { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *SnapshotDetail) SetStatusMessage(v string) *SnapshotDetail { + s.StatusMessage = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *SnapshotDetail) SetUrl(v string) *SnapshotDetail { + s.Url = &v + return s +} + +// SetUserBucket sets the UserBucket field's value. +func (s *SnapshotDetail) SetUserBucket(v *UserBucketDetails) *SnapshotDetail { + s.UserBucket = v + return s +} + +// The disk container object for the import snapshot request. +type SnapshotDiskContainer struct { + _ struct{} `type:"structure"` + + // The description of the disk image being imported. + Description *string `type:"string"` + + // The format of the disk image being imported. + // + // Valid values: VHD | VMDK + Format *string `type:"string"` + + // The URL to the Amazon S3-based disk image being imported. It can either be + // a https URL (https://..) or an Amazon S3 URL (s3://..). + Url *string `type:"string"` + + // The Amazon S3 bucket for the disk image. + UserBucket *UserBucket `type:"structure"` +} + +// String returns the string representation +func (s SnapshotDiskContainer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotDiskContainer) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SnapshotDiskContainer) SetDescription(v string) *SnapshotDiskContainer { + s.Description = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *SnapshotDiskContainer) SetFormat(v string) *SnapshotDiskContainer { + s.Format = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *SnapshotDiskContainer) SetUrl(v string) *SnapshotDiskContainer { + s.Url = &v + return s +} + +// SetUserBucket sets the UserBucket field's value. +func (s *SnapshotDiskContainer) SetUserBucket(v *UserBucket) *SnapshotDiskContainer { + s.UserBucket = v + return s +} + +// Information about a snapshot. +type SnapshotInfo struct { + _ struct{} `type:"structure"` + + // Description specified by the CreateSnapshotRequest that has been applied + // to all snapshots. + Description *string `locationName:"description" type:"string"` + + // Indicates whether the snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // Account id used when creating this snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Progress this snapshot has made towards completing. + Progress *string `locationName:"progress" type:"string"` + + // Snapshot id that can be used to describe this snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // Time this snapshot was started. This is the same for all snapshots initiated + // by the same request. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // Current state of the snapshot. + State *string `locationName:"state" type:"string" enum:"SnapshotState"` + + // Tags associated with this snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // Source volume from which this snapshot was created. + VolumeId *string `locationName:"volumeId" type:"string"` + + // Size of the volume from which this snapshot was created. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` +} + +// String returns the string representation +func (s SnapshotInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotInfo) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SnapshotInfo) SetDescription(v string) *SnapshotInfo { + s.Description = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *SnapshotInfo) SetEncrypted(v bool) *SnapshotInfo { + s.Encrypted = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *SnapshotInfo) SetOwnerId(v string) *SnapshotInfo { + s.OwnerId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *SnapshotInfo) SetProgress(v string) *SnapshotInfo { + s.Progress = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *SnapshotInfo) SetSnapshotId(v string) *SnapshotInfo { + s.SnapshotId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *SnapshotInfo) SetStartTime(v time.Time) *SnapshotInfo { + s.StartTime = &v + return s +} + +// SetState sets the State field's value. +func (s *SnapshotInfo) SetState(v string) *SnapshotInfo { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SnapshotInfo) SetTags(v []*Tag) *SnapshotInfo { + s.Tags = v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *SnapshotInfo) SetVolumeId(v string) *SnapshotInfo { + s.VolumeId = &v + return s +} + +// SetVolumeSize sets the VolumeSize field's value. +func (s *SnapshotInfo) SetVolumeSize(v int64) *SnapshotInfo { + s.VolumeSize = &v + return s +} + +// Details about the import snapshot task. +type SnapshotTaskDetail struct { + _ struct{} `type:"structure"` + + // The description of the snapshot. + Description *string `locationName:"description" type:"string"` + + // The size of the disk in the snapshot, in GiB. + DiskImageSize *float64 `locationName:"diskImageSize" type:"double"` + + // Indicates whether the snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // The format of the disk image from which the snapshot is created. + Format *string `locationName:"format" type:"string"` + + // The identifier for the AWS Key Management Service (AWS KMS) customer master + // key (CMK) that was used to create the encrypted snapshot. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // The percentage of completion for the import snapshot task. + Progress *string `locationName:"progress" type:"string"` + + // The snapshot ID of the disk being imported. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // A brief status for the import snapshot task. + Status *string `locationName:"status" type:"string"` + + // A detailed status message for the import snapshot task. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The URL of the disk image from which the snapshot is created. + Url *string `locationName:"url" type:"string"` + + // The Amazon S3 bucket for the disk image. + UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` +} + +// String returns the string representation +func (s SnapshotTaskDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotTaskDetail) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SnapshotTaskDetail) SetDescription(v string) *SnapshotTaskDetail { + s.Description = &v + return s +} + +// SetDiskImageSize sets the DiskImageSize field's value. +func (s *SnapshotTaskDetail) SetDiskImageSize(v float64) *SnapshotTaskDetail { + s.DiskImageSize = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *SnapshotTaskDetail) SetEncrypted(v bool) *SnapshotTaskDetail { + s.Encrypted = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *SnapshotTaskDetail) SetFormat(v string) *SnapshotTaskDetail { + s.Format = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *SnapshotTaskDetail) SetKmsKeyId(v string) *SnapshotTaskDetail { + s.KmsKeyId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *SnapshotTaskDetail) SetProgress(v string) *SnapshotTaskDetail { + s.Progress = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *SnapshotTaskDetail) SetSnapshotId(v string) *SnapshotTaskDetail { + s.SnapshotId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SnapshotTaskDetail) SetStatus(v string) *SnapshotTaskDetail { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *SnapshotTaskDetail) SetStatusMessage(v string) *SnapshotTaskDetail { + s.StatusMessage = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *SnapshotTaskDetail) SetUrl(v string) *SnapshotTaskDetail { + s.Url = &v + return s +} + +// SetUserBucket sets the UserBucket field's value. +func (s *SnapshotTaskDetail) SetUserBucket(v *UserBucketDetails) *SnapshotTaskDetail { + s.UserBucket = v + return s +} + +// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal +// that your Spot Instance is at an elevated risk of being interrupted. For +// more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#spot-fleet-capacity-rebalance) +// in the Amazon EC2 User Guide for Linux Instances. +type SpotCapacityRebalance struct { + _ struct{} `type:"structure"` + + // The replacement strategy to use. Only available for fleets of type maintain. + // You must specify a value, otherwise you get an error. + // + // To allow Spot Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for a Spot Instance in the fleet, specify + // launch. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can leave it + // running. You are charged for all instances while they are running. + ReplacementStrategy *string `locationName:"replacementStrategy" type:"string" enum:"ReplacementStrategy"` +} + +// String returns the string representation +func (s SpotCapacityRebalance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotCapacityRebalance) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *SpotCapacityRebalance) SetReplacementStrategy(v string) *SpotCapacityRebalance { + s.ReplacementStrategy = &v + return s +} + +// Describes the data feed for a Spot Instance. +type SpotDatafeedSubscription struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket where the Spot Instance data feed is located. + Bucket *string `locationName:"bucket" type:"string"` + + // The fault codes for the Spot Instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The AWS account ID of the account. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The prefix for the data feed files. + Prefix *string `locationName:"prefix" type:"string"` + + // The state of the Spot Instance data feed subscription. + State *string `locationName:"state" type:"string" enum:"DatafeedSubscriptionState"` +} + +// String returns the string representation +func (s SpotDatafeedSubscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotDatafeedSubscription) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *SpotDatafeedSubscription) SetBucket(v string) *SpotDatafeedSubscription { + s.Bucket = &v + return s +} + +// SetFault sets the Fault field's value. +func (s *SpotDatafeedSubscription) SetFault(v *SpotInstanceStateFault) *SpotDatafeedSubscription { + s.Fault = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *SpotDatafeedSubscription) SetOwnerId(v string) *SpotDatafeedSubscription { + s.OwnerId = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *SpotDatafeedSubscription) SetPrefix(v string) *SpotDatafeedSubscription { + s.Prefix = &v + return s +} + +// SetState sets the State field's value. +func (s *SpotDatafeedSubscription) SetState(v string) *SpotDatafeedSubscription { + s.State = &v + return s +} + +// Describes the launch specification for one or more Spot Instances. If you +// include On-Demand capacity in your fleet request or want to specify an EFA +// network device, you can't use SpotFleetLaunchSpecification; you must use +// LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). +type SpotFleetLaunchSpecification struct { + _ struct{} `type:"structure"` + + // Deprecated. + AddressingType *string `locationName:"addressingType" type:"string"` + + // One or more block devices that are mapped to the Spot Instances. You can't + // specify both a snapshot ID and an encryption value. This is because only + // blank volumes can be encrypted on creation. If a snapshot is the basis for + // a volume, it is not blank and its encryption status is used for the volume + // encryption status. + BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS + // Optimized instance. + // + // Default: false + EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the AMI. + ImageId *string `locationName:"imageId" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // The ID of the kernel. + KernelId *string `locationName:"kernelId" type:"string"` + + // The name of the key pair. + KeyName *string `locationName:"keyName" type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *SpotFleetMonitoring `locationName:"monitoring" type:"structure"` + + // One or more network interfaces. If you specify a network interface, you must + // specify subnet IDs and security group IDs using the network interface. + // + // SpotFleetLaunchSpecification currently does not support Elastic Fabric Adapter + // (EFA). To specify an EFA, you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). + NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` + + // The placement information. + Placement *SpotPlacement `locationName:"placement" type:"structure"` + + // The ID of the RAM disk. Some kernels require additional drivers at launch. + // Check the kernel requirements for information about whether you need to specify + // a RAM disk. To find kernel requirements, refer to the AWS Resource Center + // and search for the kernel ID. + RamdiskId *string `locationName:"ramdiskId" type:"string"` + + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. + SecurityGroups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The maximum price per unit hour that you are willing to pay for a Spot Instance. + // If this value is not specified, the default is the Spot price specified for + // the fleet. To determine the Spot price per unit hour, divide the Spot price + // by the value of WeightedCapacity. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The IDs of the subnets in which to launch the instances. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". + SubnetId *string `locationName:"subnetId" type:"string"` + + // The tags to apply during creation. + TagSpecifications []*SpotFleetTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"` + + // The Base64-encoded user data that instances use when starting up. + UserData *string `locationName:"userData" type:"string"` + + // The number of units provided by the specified instance type. These are the + // same units that you chose to set the target capacity in terms of instances, + // or a performance characteristic such as vCPUs, memory, or I/O. + // + // If the target capacity divided by this value is not a whole number, Amazon + // EC2 rounds the number of instances to the next whole number. If this value + // is not specified, the default is 1. + WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` +} + +// String returns the string representation +func (s SpotFleetLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetLaunchSpecification) GoString() string { + return s.String() +} + +// SetAddressingType sets the AddressingType field's value. +func (s *SpotFleetLaunchSpecification) SetAddressingType(v string) *SpotFleetLaunchSpecification { + s.AddressingType = &v + return s +} + +// SetBlockDeviceMappings sets the BlockDeviceMappings field's value. +func (s *SpotFleetLaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *SpotFleetLaunchSpecification { + s.BlockDeviceMappings = v + return s +} + +// SetEbsOptimized sets the EbsOptimized field's value. +func (s *SpotFleetLaunchSpecification) SetEbsOptimized(v bool) *SpotFleetLaunchSpecification { + s.EbsOptimized = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *SpotFleetLaunchSpecification) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *SpotFleetLaunchSpecification { + s.IamInstanceProfile = v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *SpotFleetLaunchSpecification) SetImageId(v string) *SpotFleetLaunchSpecification { + s.ImageId = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *SpotFleetLaunchSpecification) SetInstanceType(v string) *SpotFleetLaunchSpecification { + s.InstanceType = &v + return s +} + +// SetKernelId sets the KernelId field's value. +func (s *SpotFleetLaunchSpecification) SetKernelId(v string) *SpotFleetLaunchSpecification { + s.KernelId = &v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *SpotFleetLaunchSpecification) SetKeyName(v string) *SpotFleetLaunchSpecification { + s.KeyName = &v + return s +} + +// SetMonitoring sets the Monitoring field's value. +func (s *SpotFleetLaunchSpecification) SetMonitoring(v *SpotFleetMonitoring) *SpotFleetLaunchSpecification { + s.Monitoring = v + return s +} + +// SetNetworkInterfaces sets the NetworkInterfaces field's value. +func (s *SpotFleetLaunchSpecification) SetNetworkInterfaces(v []*InstanceNetworkInterfaceSpecification) *SpotFleetLaunchSpecification { + s.NetworkInterfaces = v + return s +} + +// SetPlacement sets the Placement field's value. +func (s *SpotFleetLaunchSpecification) SetPlacement(v *SpotPlacement) *SpotFleetLaunchSpecification { + s.Placement = v + return s +} + +// SetRamdiskId sets the RamdiskId field's value. +func (s *SpotFleetLaunchSpecification) SetRamdiskId(v string) *SpotFleetLaunchSpecification { + s.RamdiskId = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *SpotFleetLaunchSpecification) SetSecurityGroups(v []*GroupIdentifier) *SpotFleetLaunchSpecification { + s.SecurityGroups = v + return s +} + +// SetSpotPrice sets the SpotPrice field's value. +func (s *SpotFleetLaunchSpecification) SetSpotPrice(v string) *SpotFleetLaunchSpecification { + s.SpotPrice = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *SpotFleetLaunchSpecification) SetSubnetId(v string) *SpotFleetLaunchSpecification { + s.SubnetId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *SpotFleetLaunchSpecification) SetTagSpecifications(v []*SpotFleetTagSpecification) *SpotFleetLaunchSpecification { + s.TagSpecifications = v + return s +} + +// SetUserData sets the UserData field's value. +func (s *SpotFleetLaunchSpecification) SetUserData(v string) *SpotFleetLaunchSpecification { + s.UserData = &v + return s +} + +// SetWeightedCapacity sets the WeightedCapacity field's value. +func (s *SpotFleetLaunchSpecification) SetWeightedCapacity(v float64) *SpotFleetLaunchSpecification { + s.WeightedCapacity = &v + return s +} + +// Describes whether monitoring is enabled. +type SpotFleetMonitoring struct { + _ struct{} `type:"structure"` + + // Enables monitoring for the instance. + // + // Default: false + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s SpotFleetMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetMonitoring) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *SpotFleetMonitoring) SetEnabled(v bool) *SpotFleetMonitoring { + s.Enabled = &v + return s +} + +// Describes a Spot Fleet request. +type SpotFleetRequestConfig struct { + _ struct{} `type:"structure"` + + // The progress of the Spot Fleet request. If there is an error, the status + // is error. After all requests are placed, the status is pending_fulfillment. + // If the size of the fleet is equal to or greater than its target capacity, + // the status is fulfilled. If the size of the fleet is decreased, the status + // is pending_termination while Spot Instances are terminating. + ActivityStatus *string `locationName:"activityStatus" type:"string" enum:"ActivityStatus"` + + // The creation date and time of the request. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // The configuration of the Spot Fleet request. + SpotFleetRequestConfig *SpotFleetRequestConfigData `locationName:"spotFleetRequestConfig" type:"structure"` + + // The ID of the Spot Fleet request. + SpotFleetRequestId *string `locationName:"spotFleetRequestId" type:"string"` + + // The state of the Spot Fleet request. + SpotFleetRequestState *string `locationName:"spotFleetRequestState" type:"string" enum:"BatchState"` + + // The tags for a Spot Fleet resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s SpotFleetRequestConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfig) GoString() string { + return s.String() +} + +// SetActivityStatus sets the ActivityStatus field's value. +func (s *SpotFleetRequestConfig) SetActivityStatus(v string) *SpotFleetRequestConfig { + s.ActivityStatus = &v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *SpotFleetRequestConfig) SetCreateTime(v time.Time) *SpotFleetRequestConfig { + s.CreateTime = &v + return s +} + +// SetSpotFleetRequestConfig sets the SpotFleetRequestConfig field's value. +func (s *SpotFleetRequestConfig) SetSpotFleetRequestConfig(v *SpotFleetRequestConfigData) *SpotFleetRequestConfig { + s.SpotFleetRequestConfig = v + return s +} + +// SetSpotFleetRequestId sets the SpotFleetRequestId field's value. +func (s *SpotFleetRequestConfig) SetSpotFleetRequestId(v string) *SpotFleetRequestConfig { + s.SpotFleetRequestId = &v + return s +} + +// SetSpotFleetRequestState sets the SpotFleetRequestState field's value. +func (s *SpotFleetRequestConfig) SetSpotFleetRequestState(v string) *SpotFleetRequestConfig { + s.SpotFleetRequestState = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SpotFleetRequestConfig) SetTags(v []*Tag) *SpotFleetRequestConfig { + s.Tags = v + return s +} + +// Describes the configuration of a Spot Fleet request. +type SpotFleetRequestConfigData struct { + _ struct{} `type:"structure"` + + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the Spot Fleet request. + // + // If the allocation strategy is lowestPrice, Spot Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, Spot Fleet launches instances + // from all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, Spot Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` + + // A unique, case-sensitive identifier that you provide to ensure the idempotency + // of your listings. This helps to avoid duplicate listings. For more information, + // see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `locationName:"clientToken" type:"string"` + + // Indicates whether running Spot Instances should be terminated if you decrease + // the target capacity of the Spot Fleet request below the current size of the + // Spot Fleet. + ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + + // The number of units fulfilled by this request compared to the set target + // capacity. You cannot set this value. + FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"` + + // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) + // role that grants the Spot Fleet the permission to request, launch, terminate, + // and tag instances on your behalf. For more information, see Spot Fleet prerequisites + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites) + // in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate + // Spot Instances on your behalf when you cancel its Spot Fleet request using + // CancelSpotFleetRequests (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests) + // or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration. + // + // IamFleetRole is a required field + IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` + + // The behavior when a Spot Instance is interrupted. The default is terminate. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"InstanceInterruptionBehavior"` + + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when Spot AllocationStrategy is set to lowest-price. Spot Fleet + // selects the cheapest Spot pools and evenly allocates your target Spot capacity + // across the number of Spot pools that you specify. + InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` + + // The launch specifications for the Spot Fleet request. If you specify LaunchSpecifications, + // you can't specify LaunchTemplateConfigs. If you include On-Demand capacity + // in your request, you must use LaunchTemplateConfigs. + LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" type:"list"` + + // The launch template and overrides. If you specify LaunchTemplateConfigs, + // you can't specify LaunchSpecifications. If you include On-Demand capacity + // in your request, you must use LaunchTemplateConfigs. + LaunchTemplateConfigs []*LaunchTemplateConfig `locationName:"launchTemplateConfigs" locationNameList:"item" type:"list"` + + // One or more Classic Load Balancers and target groups to attach to the Spot + // Fleet request. Spot Fleet registers the running Spot Instances with the specified + // Classic Load Balancers and target groups. + // + // With Network Load Balancers, Spot Fleet cannot register instances that have + // the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, + // HS1, M1, M2, M3, and T1. + LoadBalancersConfig *LoadBalancersConfig `locationName:"loadBalancersConfig" type:"structure"` + + // The order of the launch template overrides to use in fulfilling On-Demand + // capacity. If you specify lowestPrice, Spot Fleet uses price to determine + // the order, launching the lowest price first. If you specify prioritized, + // Spot Fleet uses the priority that you assign to each Spot Fleet launch template + // override, launching the highest priority first. If you do not specify a value, + // Spot Fleet defaults to lowestPrice. + OnDemandAllocationStrategy *string `locationName:"onDemandAllocationStrategy" type:"string" enum:"OnDemandAllocationStrategy"` + + // The number of On-Demand units fulfilled by this request compared to the set + // target On-Demand capacity. + OnDemandFulfilledCapacity *float64 `locationName:"onDemandFulfilledCapacity" type:"double"` + + // The maximum amount per hour for On-Demand Instances that you're willing to + // pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice + // parameter, or both parameters to ensure that your fleet cost does not exceed + // your budget. If you set a maximum price per hour for the On-Demand Instances + // and Spot Instances in your request, Spot Fleet will launch instances until + // it reaches the maximum amount you're willing to pay. When the maximum amount + // you're willing to pay is reached, the fleet stops launching instances even + // if it hasn’t met the target capacity. + OnDemandMaxTotalPrice *string `locationName:"onDemandMaxTotalPrice" type:"string"` + + // The number of On-Demand units to request. You can choose to set the target + // capacity in terms of instances or a performance characteristic that is important + // to your application workload, such as vCPUs, memory, or I/O. If the request + // type is maintain, you can specify a target capacity of 0 and add capacity + // later. + OnDemandTargetCapacity *int64 `locationName:"onDemandTargetCapacity" type:"integer"` + + // Indicates whether Spot Fleet should replace unhealthy instances. + ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"` + + // The strategies for managing your Spot Instances that are at an elevated risk + // of being interrupted. + SpotMaintenanceStrategies *SpotMaintenanceStrategies `locationName:"spotMaintenanceStrategies" type:"structure"` + + // The maximum amount per hour for Spot Instances that you're willing to pay. + // You can use the spotdMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, + // or both parameters to ensure that your fleet cost does not exceed your budget. + // If you set a maximum price per hour for the On-Demand Instances and Spot + // Instances in your request, Spot Fleet will launch instances until it reaches + // the maximum amount you're willing to pay. When the maximum amount you're + // willing to pay is reached, the fleet stops launching instances even if it + // hasn’t met the target capacity. + SpotMaxTotalPrice *string `locationName:"spotMaxTotalPrice" type:"string"` + + // The maximum price per unit hour that you are willing to pay for a Spot Instance. + // The default is the On-Demand price. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The key-value pair for tagging the Spot Fleet request on creation. The value + // for ResourceType must be spot-fleet-request, otherwise the Spot Fleet request + // fails. To tag instances at launch, specify the tags in the launch template + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template) + // (valid only if you use LaunchTemplateConfigs) or in the SpotFleetTagSpecification + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetTagSpecification.html) + // (valid only if you use LaunchSpecifications). For information about tagging + // after launch, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The number of units to request for the Spot Fleet. You can choose to set + // the target capacity in terms of instances or a performance characteristic + // that is important to your application workload, such as vCPUs, memory, or + // I/O. If the request type is maintain, you can specify a target capacity of + // 0 and add capacity later. + // + // TargetCapacity is a required field + TargetCapacity *int64 `locationName:"targetCapacity" type:"integer" required:"true"` + + // Indicates whether running Spot Instances are terminated when the Spot Fleet + // request expires. + TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` + + // The type of request. Indicates whether the Spot Fleet only requests the target + // capacity or also attempts to maintain it. When this value is request, the + // Spot Fleet only places the required requests. It does not attempt to replenish + // Spot Instances if capacity is diminished, nor does it submit requests in + // alternative Spot pools if capacity is not available. When this value is maintain, + // the Spot Fleet maintains the target capacity. The Spot Fleet places the required + // requests to meet capacity and automatically replenishes any interrupted instances. + // Default: maintain. instant is listed but is not used by Spot Fleet. + Type *string `locationName:"type" type:"string" enum:"FleetType"` + + // The start date and time of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // By default, Amazon EC2 starts fulfilling the request immediately. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` + + // The end date and time of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // After the end date and time, no new Spot Instance requests are placed or + // able to fulfill the request. If no value is specified, the Spot Fleet request + // remains until you cancel it. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` +} + +// String returns the string representation +func (s SpotFleetRequestConfigData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetRequestConfigData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SpotFleetRequestConfigData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SpotFleetRequestConfigData"} + if s.IamFleetRole == nil { + invalidParams.Add(request.NewErrParamRequired("IamFleetRole")) + } + if s.TargetCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("TargetCapacity")) + } + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LoadBalancersConfig != nil { + if err := s.LoadBalancersConfig.Validate(); err != nil { + invalidParams.AddNested("LoadBalancersConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *SpotFleetRequestConfigData) SetAllocationStrategy(v string) *SpotFleetRequestConfigData { + s.AllocationStrategy = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *SpotFleetRequestConfigData) SetClientToken(v string) *SpotFleetRequestConfigData { + s.ClientToken = &v + return s +} + +// SetExcessCapacityTerminationPolicy sets the ExcessCapacityTerminationPolicy field's value. +func (s *SpotFleetRequestConfigData) SetExcessCapacityTerminationPolicy(v string) *SpotFleetRequestConfigData { + s.ExcessCapacityTerminationPolicy = &v + return s +} + +// SetFulfilledCapacity sets the FulfilledCapacity field's value. +func (s *SpotFleetRequestConfigData) SetFulfilledCapacity(v float64) *SpotFleetRequestConfigData { + s.FulfilledCapacity = &v + return s +} + +// SetIamFleetRole sets the IamFleetRole field's value. +func (s *SpotFleetRequestConfigData) SetIamFleetRole(v string) *SpotFleetRequestConfigData { + s.IamFleetRole = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotFleetRequestConfigData) SetInstanceInterruptionBehavior(v string) *SpotFleetRequestConfigData { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetInstancePoolsToUseCount sets the InstancePoolsToUseCount field's value. +func (s *SpotFleetRequestConfigData) SetInstancePoolsToUseCount(v int64) *SpotFleetRequestConfigData { + s.InstancePoolsToUseCount = &v + return s +} + +// SetLaunchSpecifications sets the LaunchSpecifications field's value. +func (s *SpotFleetRequestConfigData) SetLaunchSpecifications(v []*SpotFleetLaunchSpecification) *SpotFleetRequestConfigData { + s.LaunchSpecifications = v + return s +} + +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *SpotFleetRequestConfigData) SetLaunchTemplateConfigs(v []*LaunchTemplateConfig) *SpotFleetRequestConfigData { + s.LaunchTemplateConfigs = v + return s +} + +// SetLoadBalancersConfig sets the LoadBalancersConfig field's value. +func (s *SpotFleetRequestConfigData) SetLoadBalancersConfig(v *LoadBalancersConfig) *SpotFleetRequestConfigData { + s.LoadBalancersConfig = v + return s +} + +// SetOnDemandAllocationStrategy sets the OnDemandAllocationStrategy field's value. +func (s *SpotFleetRequestConfigData) SetOnDemandAllocationStrategy(v string) *SpotFleetRequestConfigData { + s.OnDemandAllocationStrategy = &v + return s +} + +// SetOnDemandFulfilledCapacity sets the OnDemandFulfilledCapacity field's value. +func (s *SpotFleetRequestConfigData) SetOnDemandFulfilledCapacity(v float64) *SpotFleetRequestConfigData { + s.OnDemandFulfilledCapacity = &v + return s +} + +// SetOnDemandMaxTotalPrice sets the OnDemandMaxTotalPrice field's value. +func (s *SpotFleetRequestConfigData) SetOnDemandMaxTotalPrice(v string) *SpotFleetRequestConfigData { + s.OnDemandMaxTotalPrice = &v + return s +} + +// SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. +func (s *SpotFleetRequestConfigData) SetOnDemandTargetCapacity(v int64) *SpotFleetRequestConfigData { + s.OnDemandTargetCapacity = &v + return s +} + +// SetReplaceUnhealthyInstances sets the ReplaceUnhealthyInstances field's value. +func (s *SpotFleetRequestConfigData) SetReplaceUnhealthyInstances(v bool) *SpotFleetRequestConfigData { + s.ReplaceUnhealthyInstances = &v + return s +} + +// SetSpotMaintenanceStrategies sets the SpotMaintenanceStrategies field's value. +func (s *SpotFleetRequestConfigData) SetSpotMaintenanceStrategies(v *SpotMaintenanceStrategies) *SpotFleetRequestConfigData { + s.SpotMaintenanceStrategies = v + return s +} + +// SetSpotMaxTotalPrice sets the SpotMaxTotalPrice field's value. +func (s *SpotFleetRequestConfigData) SetSpotMaxTotalPrice(v string) *SpotFleetRequestConfigData { + s.SpotMaxTotalPrice = &v + return s +} + +// SetSpotPrice sets the SpotPrice field's value. +func (s *SpotFleetRequestConfigData) SetSpotPrice(v string) *SpotFleetRequestConfigData { + s.SpotPrice = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *SpotFleetRequestConfigData) SetTagSpecifications(v []*TagSpecification) *SpotFleetRequestConfigData { + s.TagSpecifications = v + return s +} + +// SetTargetCapacity sets the TargetCapacity field's value. +func (s *SpotFleetRequestConfigData) SetTargetCapacity(v int64) *SpotFleetRequestConfigData { + s.TargetCapacity = &v + return s +} + +// SetTerminateInstancesWithExpiration sets the TerminateInstancesWithExpiration field's value. +func (s *SpotFleetRequestConfigData) SetTerminateInstancesWithExpiration(v bool) *SpotFleetRequestConfigData { + s.TerminateInstancesWithExpiration = &v + return s +} + +// SetType sets the Type field's value. +func (s *SpotFleetRequestConfigData) SetType(v string) *SpotFleetRequestConfigData { + s.Type = &v + return s +} + +// SetValidFrom sets the ValidFrom field's value. +func (s *SpotFleetRequestConfigData) SetValidFrom(v time.Time) *SpotFleetRequestConfigData { + s.ValidFrom = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *SpotFleetRequestConfigData) SetValidUntil(v time.Time) *SpotFleetRequestConfigData { + s.ValidUntil = &v + return s +} + +// The tags for a Spot Fleet resource. +type SpotFleetTagSpecification struct { + _ struct{} `type:"structure"` + + // The type of resource. Currently, the only resource type that is supported + // is instance. To tag the Spot Fleet request on creation, use the TagSpecifications + // parameter in SpotFleetRequestConfigData (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetRequestConfigData.html). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tags. + Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s SpotFleetTagSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetTagSpecification) GoString() string { + return s.String() +} + +// SetResourceType sets the ResourceType field's value. +func (s *SpotFleetTagSpecification) SetResourceType(v string) *SpotFleetTagSpecification { + s.ResourceType = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SpotFleetTagSpecification) SetTags(v []*Tag) *SpotFleetTagSpecification { + s.Tags = v + return s +} + +// Describes a Spot Instance request. +type SpotInstanceRequest struct { + _ struct{} `type:"structure"` + + // If you specified a duration and your Spot Instance request was fulfilled, + // this is the fixed hourly price in effect for the Spot Instance while it runs. + ActualBlockHourlyPrice *string `locationName:"actualBlockHourlyPrice" type:"string"` + + // The Availability Zone group. If you specify the same Availability Zone group + // for all Spot Instance requests, all Spot Instances are launched in the same + // Availability Zone. + AvailabilityZoneGroup *string `locationName:"availabilityZoneGroup" type:"string"` + + // The duration for the Spot Instance, in minutes. + BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` + + // The date and time when the Spot Instance request was created, in UTC format + // (for example, YYYY-MM-DDTHH:MM:SSZ). + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // The fault codes for the Spot Instance request, if any. + Fault *SpotInstanceStateFault `locationName:"fault" type:"structure"` + + // The instance ID, if an instance has been launched to fulfill the Spot Instance + // request. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The behavior when a Spot Instance is interrupted. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"InstanceInterruptionBehavior"` + + // The instance launch group. Launch groups are Spot Instances that launch together + // and terminate together. + LaunchGroup *string `locationName:"launchGroup" type:"string"` + + // Additional information for launching instances. + LaunchSpecification *LaunchSpecification `locationName:"launchSpecification" type:"structure"` + + // The Availability Zone in which the request is launched. + LaunchedAvailabilityZone *string `locationName:"launchedAvailabilityZone" type:"string"` + + // The product description associated with the Spot Instance. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The ID of the Spot Instance request. + SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` + + // The maximum price per hour that you are willing to pay for a Spot Instance. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The state of the Spot Instance request. Spot status information helps track + // your Spot Instance requests. For more information, see Spot status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // in the Amazon EC2 User Guide for Linux Instances. + State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` + + // The status code and status message describing the Spot Instance request. + Status *SpotInstanceStatus `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The Spot Instance request type. + Type *string `locationName:"type" type:"string" enum:"SpotInstanceType"` + + // The start date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + // The request becomes active at this date and time. + ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` + + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // + // * For a persistent request, the request remains active until the validUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, the request remains active until all instances + // launch, the request is canceled, or the validUntil date and time is reached. + // By default, the request is valid for 7 days from the date the request + // was created. + ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` +} + +// String returns the string representation +func (s SpotInstanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceRequest) GoString() string { + return s.String() +} + +// SetActualBlockHourlyPrice sets the ActualBlockHourlyPrice field's value. +func (s *SpotInstanceRequest) SetActualBlockHourlyPrice(v string) *SpotInstanceRequest { + s.ActualBlockHourlyPrice = &v + return s +} + +// SetAvailabilityZoneGroup sets the AvailabilityZoneGroup field's value. +func (s *SpotInstanceRequest) SetAvailabilityZoneGroup(v string) *SpotInstanceRequest { + s.AvailabilityZoneGroup = &v + return s +} + +// SetBlockDurationMinutes sets the BlockDurationMinutes field's value. +func (s *SpotInstanceRequest) SetBlockDurationMinutes(v int64) *SpotInstanceRequest { + s.BlockDurationMinutes = &v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *SpotInstanceRequest) SetCreateTime(v time.Time) *SpotInstanceRequest { + s.CreateTime = &v + return s +} + +// SetFault sets the Fault field's value. +func (s *SpotInstanceRequest) SetFault(v *SpotInstanceStateFault) *SpotInstanceRequest { + s.Fault = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *SpotInstanceRequest) SetInstanceId(v string) *SpotInstanceRequest { + s.InstanceId = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotInstanceRequest) SetInstanceInterruptionBehavior(v string) *SpotInstanceRequest { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetLaunchGroup sets the LaunchGroup field's value. +func (s *SpotInstanceRequest) SetLaunchGroup(v string) *SpotInstanceRequest { + s.LaunchGroup = &v + return s +} + +// SetLaunchSpecification sets the LaunchSpecification field's value. +func (s *SpotInstanceRequest) SetLaunchSpecification(v *LaunchSpecification) *SpotInstanceRequest { + s.LaunchSpecification = v + return s +} + +// SetLaunchedAvailabilityZone sets the LaunchedAvailabilityZone field's value. +func (s *SpotInstanceRequest) SetLaunchedAvailabilityZone(v string) *SpotInstanceRequest { + s.LaunchedAvailabilityZone = &v + return s +} + +// SetProductDescription sets the ProductDescription field's value. +func (s *SpotInstanceRequest) SetProductDescription(v string) *SpotInstanceRequest { + s.ProductDescription = &v + return s +} + +// SetSpotInstanceRequestId sets the SpotInstanceRequestId field's value. +func (s *SpotInstanceRequest) SetSpotInstanceRequestId(v string) *SpotInstanceRequest { + s.SpotInstanceRequestId = &v + return s +} + +// SetSpotPrice sets the SpotPrice field's value. +func (s *SpotInstanceRequest) SetSpotPrice(v string) *SpotInstanceRequest { + s.SpotPrice = &v + return s +} + +// SetState sets the State field's value. +func (s *SpotInstanceRequest) SetState(v string) *SpotInstanceRequest { + s.State = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SpotInstanceRequest) SetStatus(v *SpotInstanceStatus) *SpotInstanceRequest { + s.Status = v + return s +} + +// SetTags sets the Tags field's value. +func (s *SpotInstanceRequest) SetTags(v []*Tag) *SpotInstanceRequest { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *SpotInstanceRequest) SetType(v string) *SpotInstanceRequest { + s.Type = &v + return s +} + +// SetValidFrom sets the ValidFrom field's value. +func (s *SpotInstanceRequest) SetValidFrom(v time.Time) *SpotInstanceRequest { + s.ValidFrom = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *SpotInstanceRequest) SetValidUntil(v time.Time) *SpotInstanceRequest { + s.ValidUntil = &v + return s +} + +// Describes a Spot Instance state change. +type SpotInstanceStateFault struct { + _ struct{} `type:"structure"` + + // The reason code for the Spot Instance state change. + Code *string `locationName:"code" type:"string"` + + // The message for the Spot Instance state change. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s SpotInstanceStateFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStateFault) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *SpotInstanceStateFault) SetCode(v string) *SpotInstanceStateFault { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *SpotInstanceStateFault) SetMessage(v string) *SpotInstanceStateFault { + s.Message = &v + return s +} + +// Describes the status of a Spot Instance request. +type SpotInstanceStatus struct { + _ struct{} `type:"structure"` + + // The status code. For a list of status codes, see Spot status codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) + // in the Amazon EC2 User Guide for Linux Instances. + Code *string `locationName:"code" type:"string"` + + // The description for the status code. + Message *string `locationName:"message" type:"string"` + + // The date and time of the most recent status update, in UTC format (for example, + // YYYY-MM-DDTHH:MM:SSZ). + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp"` +} + +// String returns the string representation +func (s SpotInstanceStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotInstanceStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *SpotInstanceStatus) SetCode(v string) *SpotInstanceStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *SpotInstanceStatus) SetMessage(v string) *SpotInstanceStatus { + s.Message = &v + return s +} + +// SetUpdateTime sets the UpdateTime field's value. +func (s *SpotInstanceStatus) SetUpdateTime(v time.Time) *SpotInstanceStatus { + s.UpdateTime = &v + return s +} + +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type SpotMaintenanceStrategies struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *SpotCapacityRebalance `locationName:"capacityRebalance" type:"structure"` +} + +// String returns the string representation +func (s SpotMaintenanceStrategies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotMaintenanceStrategies) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *SpotMaintenanceStrategies) SetCapacityRebalance(v *SpotCapacityRebalance) *SpotMaintenanceStrategies { + s.CapacityRebalance = v + return s +} + +// The options for Spot Instances. +type SpotMarketOptions struct { + _ struct{} `type:"structure"` + + // The required duration for the Spot Instances (also known as Spot blocks), + // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, + // or 360). + // + // The duration period starts as soon as your Spot Instance receives its instance + // ID. At the end of the duration period, Amazon EC2 marks the Spot Instance + // for termination and provides a Spot Instance termination notice, which gives + // the instance a two-minute warning before it terminates. + // + // You can't specify an Availability Zone group or a launch group if you specify + // a duration. + // + // New accounts or accounts with no previous billing history with AWS are not + // eligible for Spot Instances with a defined duration (also known as Spot blocks). + BlockDurationMinutes *int64 `type:"integer"` + + // The behavior when a Spot Instance is interrupted. The default is terminate. + InstanceInterruptionBehavior *string `type:"string" enum:"InstanceInterruptionBehavior"` + + // The maximum hourly price you're willing to pay for the Spot Instances. The + // default is the On-Demand price. + MaxPrice *string `type:"string"` + + // The Spot Instance request type. For RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances), + // persistent Spot Instance requests are only supported when InstanceInterruptionBehavior + // is set to either hibernate or stop. + SpotInstanceType *string `type:"string" enum:"SpotInstanceType"` + + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported + // only for persistent requests. + // + // * For a persistent request, the request remains active until the ValidUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, ValidUntil is not supported. The request remains + // active until all instances launch or you cancel the request. + ValidUntil *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s SpotMarketOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotMarketOptions) GoString() string { + return s.String() +} + +// SetBlockDurationMinutes sets the BlockDurationMinutes field's value. +func (s *SpotMarketOptions) SetBlockDurationMinutes(v int64) *SpotMarketOptions { + s.BlockDurationMinutes = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotMarketOptions) SetInstanceInterruptionBehavior(v string) *SpotMarketOptions { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetMaxPrice sets the MaxPrice field's value. +func (s *SpotMarketOptions) SetMaxPrice(v string) *SpotMarketOptions { + s.MaxPrice = &v + return s +} + +// SetSpotInstanceType sets the SpotInstanceType field's value. +func (s *SpotMarketOptions) SetSpotInstanceType(v string) *SpotMarketOptions { + s.SpotInstanceType = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *SpotMarketOptions) SetValidUntil(v time.Time) *SpotMarketOptions { + s.ValidUntil = &v + return s +} + +// Describes the configuration of Spot Instances in an EC2 Fleet. +type SpotOptions struct { + _ struct{} `type:"structure"` + + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowest-price, EC2 Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all of the Spot Instance pools that you specify. + // + // If the allocation strategy is capacity-optimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"` + + // The behavior when a Spot Instance is interrupted. The default is terminate. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"SpotInstanceInterruptionBehavior"` + + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when AllocationStrategy is set to lowest-price. EC2 Fleet selects + // the cheapest Spot pools and evenly allocates your target Spot capacity across + // the number of Spot pools that you specify. + InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` + + // The strategies for managing your workloads on your Spot Instances that will + // be interrupted. Currently only the capacity rebalance strategy is available. + MaintenanceStrategies *FleetSpotMaintenanceStrategies `locationName:"maintenanceStrategies" type:"structure"` + + // The maximum amount per hour for Spot Instances that you're willing to pay. + MaxTotalPrice *string `locationName:"maxTotalPrice" type:"string"` + + // The minimum target capacity for Spot Instances in the fleet. If the minimum + // target capacity is not reached, the fleet launches no instances. + MinTargetCapacity *int64 `locationName:"minTargetCapacity" type:"integer"` + + // Indicates that the fleet launches all Spot Instances into a single Availability + // Zone. Supported only for fleets of type instant. + SingleAvailabilityZone *bool `locationName:"singleAvailabilityZone" type:"boolean"` + + // Indicates that the fleet uses a single instance type to launch all Spot Instances + // in the fleet. Supported only for fleets of type instant. + SingleInstanceType *bool `locationName:"singleInstanceType" type:"boolean"` +} + +// String returns the string representation +func (s SpotOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotOptions) GoString() string { + return s.String() +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *SpotOptions) SetAllocationStrategy(v string) *SpotOptions { + s.AllocationStrategy = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotOptions) SetInstanceInterruptionBehavior(v string) *SpotOptions { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetInstancePoolsToUseCount sets the InstancePoolsToUseCount field's value. +func (s *SpotOptions) SetInstancePoolsToUseCount(v int64) *SpotOptions { + s.InstancePoolsToUseCount = &v + return s +} + +// SetMaintenanceStrategies sets the MaintenanceStrategies field's value. +func (s *SpotOptions) SetMaintenanceStrategies(v *FleetSpotMaintenanceStrategies) *SpotOptions { + s.MaintenanceStrategies = v + return s +} + +// SetMaxTotalPrice sets the MaxTotalPrice field's value. +func (s *SpotOptions) SetMaxTotalPrice(v string) *SpotOptions { + s.MaxTotalPrice = &v + return s +} + +// SetMinTargetCapacity sets the MinTargetCapacity field's value. +func (s *SpotOptions) SetMinTargetCapacity(v int64) *SpotOptions { + s.MinTargetCapacity = &v + return s +} + +// SetSingleAvailabilityZone sets the SingleAvailabilityZone field's value. +func (s *SpotOptions) SetSingleAvailabilityZone(v bool) *SpotOptions { + s.SingleAvailabilityZone = &v + return s +} + +// SetSingleInstanceType sets the SingleInstanceType field's value. +func (s *SpotOptions) SetSingleInstanceType(v bool) *SpotOptions { + s.SingleInstanceType = &v + return s +} + +// Describes the configuration of Spot Instances in an EC2 Fleet request. +type SpotOptionsRequest struct { + _ struct{} `type:"structure"` + + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowest-price, EC2 Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all of the Spot Instance pools that you specify. + // + // If the allocation strategy is capacity-optimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. + AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"` + + // The behavior when a Spot Instance is interrupted. The default is terminate. + InstanceInterruptionBehavior *string `type:"string" enum:"SpotInstanceInterruptionBehavior"` + + // The number of Spot pools across which to allocate your target Spot capacity. + // Valid only when Spot AllocationStrategy is set to lowest-price. EC2 Fleet + // selects the cheapest Spot pools and evenly allocates your target Spot capacity + // across the number of Spot pools that you specify. + InstancePoolsToUseCount *int64 `type:"integer"` + + // The strategies for managing your Spot Instances that are at an elevated risk + // of being interrupted. + MaintenanceStrategies *FleetSpotMaintenanceStrategiesRequest `type:"structure"` + + // The maximum amount per hour for Spot Instances that you're willing to pay. + MaxTotalPrice *string `type:"string"` + + // The minimum target capacity for Spot Instances in the fleet. If the minimum + // target capacity is not reached, the fleet launches no instances. + MinTargetCapacity *int64 `type:"integer"` + + // Indicates that the fleet launches all Spot Instances into a single Availability + // Zone. Supported only for fleets of type instant. + SingleAvailabilityZone *bool `type:"boolean"` + + // Indicates that the fleet uses a single instance type to launch all Spot Instances + // in the fleet. Supported only for fleets of type instant. + SingleInstanceType *bool `type:"boolean"` +} + +// String returns the string representation +func (s SpotOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotOptionsRequest) GoString() string { + return s.String() +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *SpotOptionsRequest) SetAllocationStrategy(v string) *SpotOptionsRequest { + s.AllocationStrategy = &v + return s +} + +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotOptionsRequest) SetInstanceInterruptionBehavior(v string) *SpotOptionsRequest { + s.InstanceInterruptionBehavior = &v + return s +} + +// SetInstancePoolsToUseCount sets the InstancePoolsToUseCount field's value. +func (s *SpotOptionsRequest) SetInstancePoolsToUseCount(v int64) *SpotOptionsRequest { + s.InstancePoolsToUseCount = &v + return s +} + +// SetMaintenanceStrategies sets the MaintenanceStrategies field's value. +func (s *SpotOptionsRequest) SetMaintenanceStrategies(v *FleetSpotMaintenanceStrategiesRequest) *SpotOptionsRequest { + s.MaintenanceStrategies = v + return s +} + +// SetMaxTotalPrice sets the MaxTotalPrice field's value. +func (s *SpotOptionsRequest) SetMaxTotalPrice(v string) *SpotOptionsRequest { + s.MaxTotalPrice = &v + return s +} + +// SetMinTargetCapacity sets the MinTargetCapacity field's value. +func (s *SpotOptionsRequest) SetMinTargetCapacity(v int64) *SpotOptionsRequest { + s.MinTargetCapacity = &v + return s +} + +// SetSingleAvailabilityZone sets the SingleAvailabilityZone field's value. +func (s *SpotOptionsRequest) SetSingleAvailabilityZone(v bool) *SpotOptionsRequest { + s.SingleAvailabilityZone = &v + return s +} + +// SetSingleInstanceType sets the SingleInstanceType field's value. +func (s *SpotOptionsRequest) SetSingleInstanceType(v bool) *SpotOptionsRequest { + s.SingleInstanceType = &v + return s +} + +// Describes Spot Instance placement. +type SpotPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + // + // [Spot Fleet only] To specify multiple Availability Zones, separate them using + // commas; for example, "us-west-2a, us-west-2b". + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The name of the placement group. + GroupName *string `locationName:"groupName" type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy + // is not supported for Spot Instances. + Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` +} + +// String returns the string representation +func (s SpotPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPlacement) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *SpotPlacement) SetAvailabilityZone(v string) *SpotPlacement { + s.AvailabilityZone = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *SpotPlacement) SetGroupName(v string) *SpotPlacement { + s.GroupName = &v + return s +} + +// SetTenancy sets the Tenancy field's value. +func (s *SpotPlacement) SetTenancy(v string) *SpotPlacement { + s.Tenancy = &v + return s +} + +// Describes the maximum price per hour that you are willing to pay for a Spot +// Instance. +type SpotPrice struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` + + // A general description of the AMI. + ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` + + // The maximum price per hour that you are willing to pay for a Spot Instance. + SpotPrice *string `locationName:"spotPrice" type:"string"` + + // The date and time the request was created, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). + Timestamp *time.Time `locationName:"timestamp" type:"timestamp"` +} + +// String returns the string representation +func (s SpotPrice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotPrice) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *SpotPrice) SetAvailabilityZone(v string) *SpotPrice { + s.AvailabilityZone = &v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *SpotPrice) SetInstanceType(v string) *SpotPrice { + s.InstanceType = &v + return s +} + +// SetProductDescription sets the ProductDescription field's value. +func (s *SpotPrice) SetProductDescription(v string) *SpotPrice { + s.ProductDescription = &v + return s +} + +// SetSpotPrice sets the SpotPrice field's value. +func (s *SpotPrice) SetSpotPrice(v string) *SpotPrice { + s.SpotPrice = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *SpotPrice) SetTimestamp(v time.Time) *SpotPrice { + s.Timestamp = &v + return s +} + +// Describes a stale rule in a security group. +type StaleIpPermission struct { + _ struct{} `type:"structure"` + + // The start of the port range for the TCP and UDP protocols, or an ICMP type + // number. A value of -1 indicates all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers) + // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // The IP ranges. Not applicable for stale security group rules. + IpRanges []*string `locationName:"ipRanges" locationNameList:"item" type:"list"` + + // The prefix list IDs. Not applicable for stale security group rules. + PrefixListIds []*string `locationName:"prefixListIds" locationNameList:"item" type:"list"` + + // The end of the port range for the TCP and UDP protocols, or an ICMP type + // number. A value of -1 indicates all ICMP types. + ToPort *int64 `locationName:"toPort" type:"integer"` + + // The security group pairs. Returns the ID of the referenced security group + // and VPC, and the ID and status of the VPC peering connection. + UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StaleIpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StaleIpPermission) GoString() string { + return s.String() +} + +// SetFromPort sets the FromPort field's value. +func (s *StaleIpPermission) SetFromPort(v int64) *StaleIpPermission { + s.FromPort = &v + return s +} + +// SetIpProtocol sets the IpProtocol field's value. +func (s *StaleIpPermission) SetIpProtocol(v string) *StaleIpPermission { + s.IpProtocol = &v + return s +} + +// SetIpRanges sets the IpRanges field's value. +func (s *StaleIpPermission) SetIpRanges(v []*string) *StaleIpPermission { + s.IpRanges = v + return s +} + +// SetPrefixListIds sets the PrefixListIds field's value. +func (s *StaleIpPermission) SetPrefixListIds(v []*string) *StaleIpPermission { + s.PrefixListIds = v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *StaleIpPermission) SetToPort(v int64) *StaleIpPermission { + s.ToPort = &v + return s +} + +// SetUserIdGroupPairs sets the UserIdGroupPairs field's value. +func (s *StaleIpPermission) SetUserIdGroupPairs(v []*UserIdGroupPair) *StaleIpPermission { + s.UserIdGroupPairs = v + return s +} + +// Describes a stale security group (a security group that contains stale rules). +type StaleSecurityGroup struct { + _ struct{} `type:"structure"` + + // The description of the security group. + Description *string `locationName:"description" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + // Information about the stale inbound rules in the security group. + StaleIpPermissions []*StaleIpPermission `locationName:"staleIpPermissions" locationNameList:"item" type:"list"` + + // Information about the stale outbound rules in the security group. + StaleIpPermissionsEgress []*StaleIpPermission `locationName:"staleIpPermissionsEgress" locationNameList:"item" type:"list"` + + // The ID of the VPC for the security group. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s StaleSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StaleSecurityGroup) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *StaleSecurityGroup) SetDescription(v string) *StaleSecurityGroup { + s.Description = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *StaleSecurityGroup) SetGroupId(v string) *StaleSecurityGroup { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *StaleSecurityGroup) SetGroupName(v string) *StaleSecurityGroup { + s.GroupName = &v + return s +} + +// SetStaleIpPermissions sets the StaleIpPermissions field's value. +func (s *StaleSecurityGroup) SetStaleIpPermissions(v []*StaleIpPermission) *StaleSecurityGroup { + s.StaleIpPermissions = v + return s +} + +// SetStaleIpPermissionsEgress sets the StaleIpPermissionsEgress field's value. +func (s *StaleSecurityGroup) SetStaleIpPermissionsEgress(v []*StaleIpPermission) *StaleSecurityGroup { + s.StaleIpPermissionsEgress = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *StaleSecurityGroup) SetVpcId(v string) *StaleSecurityGroup { + s.VpcId = &v + return s +} + +type StartInstancesInput struct { + _ struct{} `type:"structure"` + + // Reserved. + AdditionalInfo *string `locationName:"additionalInfo" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the instances. + // + // InstanceIds is a required field + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalInfo sets the AdditionalInfo field's value. +func (s *StartInstancesInput) SetAdditionalInfo(v string) *StartInstancesInput { + s.AdditionalInfo = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *StartInstancesInput) SetDryRun(v bool) *StartInstancesInput { + s.DryRun = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *StartInstancesInput) SetInstanceIds(v []*string) *StartInstancesInput { + s.InstanceIds = v + return s +} + +type StartInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the started instances. + StartingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StartInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstancesOutput) GoString() string { + return s.String() +} + +// SetStartingInstances sets the StartingInstances field's value. +func (s *StartInstancesOutput) SetStartingInstances(v []*InstanceStateChange) *StartInstancesOutput { + s.StartingInstances = v + return s +} + +type StartVpcEndpointServicePrivateDnsVerificationInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the endpoint service. + // + // ServiceId is a required field + ServiceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartVpcEndpointServicePrivateDnsVerificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartVpcEndpointServicePrivateDnsVerificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartVpcEndpointServicePrivateDnsVerificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartVpcEndpointServicePrivateDnsVerificationInput"} + if s.ServiceId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *StartVpcEndpointServicePrivateDnsVerificationInput) SetDryRun(v bool) *StartVpcEndpointServicePrivateDnsVerificationInput { + s.DryRun = &v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *StartVpcEndpointServicePrivateDnsVerificationInput) SetServiceId(v string) *StartVpcEndpointServicePrivateDnsVerificationInput { + s.ServiceId = &v + return s +} + +type StartVpcEndpointServicePrivateDnsVerificationOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + ReturnValue *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s StartVpcEndpointServicePrivateDnsVerificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartVpcEndpointServicePrivateDnsVerificationOutput) GoString() string { + return s.String() +} + +// SetReturnValue sets the ReturnValue field's value. +func (s *StartVpcEndpointServicePrivateDnsVerificationOutput) SetReturnValue(v bool) *StartVpcEndpointServicePrivateDnsVerificationOutput { + s.ReturnValue = &v + return s +} + +// Describes a state change. +type StateReason struct { + _ struct{} `type:"structure"` + + // The reason code for the state change. + Code *string `locationName:"code" type:"string"` + + // The message for the state change. + // + // * Server.InsufficientInstanceCapacity: There was insufficient capacity + // available to satisfy the launch request. + // + // * Server.InternalError: An internal error caused the instance to terminate + // during launch. + // + // * Server.ScheduledStop: The instance was stopped due to a scheduled retirement. + // + // * Server.SpotInstanceShutdown: The instance was stopped because the number + // of Spot requests with a maximum price equal to or higher than the Spot + // price exceeded available capacity or because of an increase in the Spot + // price. + // + // * Server.SpotInstanceTermination: The instance was terminated because + // the number of Spot requests with a maximum price equal to or higher than + // the Spot price exceeded available capacity or because of an increase in + // the Spot price. + // + // * Client.InstanceInitiatedShutdown: The instance was shut down using the + // shutdown -h command from the instance. + // + // * Client.InstanceTerminated: The instance was terminated or rebooted during + // AMI creation. + // + // * Client.InternalError: A client error caused the instance to terminate + // during launch. + // + // * Client.InvalidSnapshot.NotFound: The specified snapshot was not found. + // + // * Client.UserInitiatedHibernate: Hibernation was initiated on the instance. + // + // * Client.UserInitiatedShutdown: The instance was shut down using the Amazon + // EC2 API. + // + // * Client.VolumeLimitExceeded: The limit on the number of EBS volumes or + // total storage was exceeded. Decrease usage or request an increase in your + // account limits. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StateReason) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *StateReason) SetCode(v string) *StateReason { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *StateReason) SetMessage(v string) *StateReason { + s.Message = &v + return s +} + +type StopInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // Forces the instances to stop. The instances do not have an opportunity to + // flush file system caches or file system metadata. If you use this option, + // you must perform file system check and repair procedures. This option is + // not recommended for Windows instances. + // + // Default: false + Force *bool `locationName:"force" type:"boolean"` + + // Hibernates the instance if the instance was enabled for hibernation at launch. + // If the instance cannot hibernate successfully, a normal shutdown occurs. + // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Default: false + Hibernate *bool `type:"boolean"` + + // The IDs of the instances. + // + // InstanceIds is a required field + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s StopInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *StopInstancesInput) SetDryRun(v bool) *StopInstancesInput { + s.DryRun = &v + return s +} + +// SetForce sets the Force field's value. +func (s *StopInstancesInput) SetForce(v bool) *StopInstancesInput { + s.Force = &v + return s +} + +// SetHibernate sets the Hibernate field's value. +func (s *StopInstancesInput) SetHibernate(v bool) *StopInstancesInput { + s.Hibernate = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *StopInstancesInput) SetInstanceIds(v []*string) *StopInstancesInput { + s.InstanceIds = v + return s +} + +type StopInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the stopped instances. + StoppingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StopInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopInstancesOutput) GoString() string { + return s.String() +} + +// SetStoppingInstances sets the StoppingInstances field's value. +func (s *StopInstancesOutput) SetStoppingInstances(v []*InstanceStateChange) *StopInstancesOutput { + s.StoppingInstances = v + return s +} + +// Describes the storage location for an instance store-backed AMI. +type Storage struct { + _ struct{} `type:"structure"` + + // An Amazon S3 storage location. + S3 *S3Storage `type:"structure"` +} + +// String returns the string representation +func (s Storage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Storage) GoString() string { + return s.String() +} + +// SetS3 sets the S3 field's value. +func (s *Storage) SetS3(v *S3Storage) *Storage { + s.S3 = v + return s +} + +// Describes a storage location in Amazon S3. +type StorageLocation struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket. + Bucket *string `type:"string"` + + // The key. + Key *string `type:"string"` +} + +// String returns the string representation +func (s StorageLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageLocation) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *StorageLocation) SetBucket(v string) *StorageLocation { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *StorageLocation) SetKey(v string) *StorageLocation { + s.Key = &v + return s +} + +// Describes a subnet. +type Subnet struct { + _ struct{} `type:"structure"` + + // Indicates whether a network interface created in this subnet (including a + // network interface created by RunInstances) receives an IPv6 address. + AssignIpv6AddressOnCreation *bool `locationName:"assignIpv6AddressOnCreation" type:"boolean"` + + // The Availability Zone of the subnet. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The AZ ID of the subnet. + AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + + // The number of unused private IPv4 addresses in the subnet. The IPv4 addresses + // for any stopped instances are considered unavailable. + AvailableIpAddressCount *int64 `locationName:"availableIpAddressCount" type:"integer"` + + // The IPv4 CIDR block assigned to the subnet. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // The customer-owned IPv4 address pool associated with the subnet. + CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` + + // Indicates whether this is the default subnet for the Availability Zone. + DefaultForAz *bool `locationName:"defaultForAz" type:"boolean"` + + // Information about the IPv6 CIDR blocks associated with the subnet. + Ipv6CidrBlockAssociationSet []*SubnetIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociationSet" locationNameList:"item" type:"list"` + + // Indicates whether a network interface created in this subnet (including a + // network interface created by RunInstances) receives a customer-owned IPv4 + // address. + MapCustomerOwnedIpOnLaunch *bool `locationName:"mapCustomerOwnedIpOnLaunch" type:"boolean"` + + // Indicates whether instances launched in this subnet receive a public IPv4 + // address. + MapPublicIpOnLaunch *bool `locationName:"mapPublicIpOnLaunch" type:"boolean"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `locationName:"outpostArn" type:"string"` + + // The ID of the AWS account that owns the subnet. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The current state of the subnet. + State *string `locationName:"state" type:"string" enum:"SubnetState"` + + // The Amazon Resource Name (ARN) of the subnet. + SubnetArn *string `locationName:"subnetArn" type:"string"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // Any tags assigned to the subnet. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC the subnet is in. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Subnet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subnet) GoString() string { + return s.String() +} + +// SetAssignIpv6AddressOnCreation sets the AssignIpv6AddressOnCreation field's value. +func (s *Subnet) SetAssignIpv6AddressOnCreation(v bool) *Subnet { + s.AssignIpv6AddressOnCreation = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *Subnet) SetAvailabilityZone(v string) *Subnet { + s.AvailabilityZone = &v + return s +} + +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *Subnet) SetAvailabilityZoneId(v string) *Subnet { + s.AvailabilityZoneId = &v + return s +} + +// SetAvailableIpAddressCount sets the AvailableIpAddressCount field's value. +func (s *Subnet) SetAvailableIpAddressCount(v int64) *Subnet { + s.AvailableIpAddressCount = &v + return s +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *Subnet) SetCidrBlock(v string) *Subnet { + s.CidrBlock = &v + return s +} + +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *Subnet) SetCustomerOwnedIpv4Pool(v string) *Subnet { + s.CustomerOwnedIpv4Pool = &v + return s +} + +// SetDefaultForAz sets the DefaultForAz field's value. +func (s *Subnet) SetDefaultForAz(v bool) *Subnet { + s.DefaultForAz = &v + return s +} + +// SetIpv6CidrBlockAssociationSet sets the Ipv6CidrBlockAssociationSet field's value. +func (s *Subnet) SetIpv6CidrBlockAssociationSet(v []*SubnetIpv6CidrBlockAssociation) *Subnet { + s.Ipv6CidrBlockAssociationSet = v + return s +} + +// SetMapCustomerOwnedIpOnLaunch sets the MapCustomerOwnedIpOnLaunch field's value. +func (s *Subnet) SetMapCustomerOwnedIpOnLaunch(v bool) *Subnet { + s.MapCustomerOwnedIpOnLaunch = &v + return s +} + +// SetMapPublicIpOnLaunch sets the MapPublicIpOnLaunch field's value. +func (s *Subnet) SetMapPublicIpOnLaunch(v bool) *Subnet { + s.MapPublicIpOnLaunch = &v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *Subnet) SetOutpostArn(v string) *Subnet { + s.OutpostArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *Subnet) SetOwnerId(v string) *Subnet { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *Subnet) SetState(v string) *Subnet { + s.State = &v + return s +} + +// SetSubnetArn sets the SubnetArn field's value. +func (s *Subnet) SetSubnetArn(v string) *Subnet { + s.SubnetArn = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *Subnet) SetSubnetId(v string) *Subnet { + s.SubnetId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Subnet) SetTags(v []*Tag) *Subnet { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *Subnet) SetVpcId(v string) *Subnet { + s.VpcId = &v + return s +} + +// Describes the subnet association with the transit gateway multicast domain. +type SubnetAssociation struct { + _ struct{} `type:"structure"` + + // The state of the subnet association. + State *string `locationName:"state" type:"string" enum:"TransitGatewayMulitcastDomainAssociationState"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` +} + +// String returns the string representation +func (s SubnetAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubnetAssociation) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *SubnetAssociation) SetState(v string) *SubnetAssociation { + s.State = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *SubnetAssociation) SetSubnetId(v string) *SubnetAssociation { + s.SubnetId = &v + return s +} + +// Describes the state of a CIDR block. +type SubnetCidrBlockState struct { + _ struct{} `type:"structure"` + + // The state of a CIDR block. + State *string `locationName:"state" type:"string" enum:"SubnetCidrBlockStateCode"` + + // A message about the status of the CIDR block, if applicable. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s SubnetCidrBlockState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubnetCidrBlockState) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *SubnetCidrBlockState) SetState(v string) *SubnetCidrBlockState { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *SubnetCidrBlockState) SetStatusMessage(v string) *SubnetCidrBlockState { + s.StatusMessage = &v + return s +} + +// Describes an IPv6 CIDR block associated with a subnet. +type SubnetIpv6CidrBlockAssociation struct { + _ struct{} `type:"structure"` + + // The association ID for the CIDR block. + AssociationId *string `locationName:"associationId" type:"string"` + + // The IPv6 CIDR block. + Ipv6CidrBlock *string `locationName:"ipv6CidrBlock" type:"string"` + + // Information about the state of the CIDR block. + Ipv6CidrBlockState *SubnetCidrBlockState `locationName:"ipv6CidrBlockState" type:"structure"` +} + +// String returns the string representation +func (s SubnetIpv6CidrBlockAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubnetIpv6CidrBlockAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *SubnetIpv6CidrBlockAssociation) SetAssociationId(v string) *SubnetIpv6CidrBlockAssociation { + s.AssociationId = &v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *SubnetIpv6CidrBlockAssociation) SetIpv6CidrBlock(v string) *SubnetIpv6CidrBlockAssociation { + s.Ipv6CidrBlock = &v + return s +} + +// SetIpv6CidrBlockState sets the Ipv6CidrBlockState field's value. +func (s *SubnetIpv6CidrBlockAssociation) SetIpv6CidrBlockState(v *SubnetCidrBlockState) *SubnetIpv6CidrBlockAssociation { + s.Ipv6CidrBlockState = v + return s +} + +// Describes the burstable performance instance whose credit option for CPU +// usage was successfully modified. +type SuccessfulInstanceCreditSpecificationItem struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s SuccessfulInstanceCreditSpecificationItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuccessfulInstanceCreditSpecificationItem) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *SuccessfulInstanceCreditSpecificationItem) SetInstanceId(v string) *SuccessfulInstanceCreditSpecificationItem { + s.InstanceId = &v + return s +} + +// Describes a Reserved Instance whose queued purchase was successfully deleted. +type SuccessfulQueuedPurchaseDeletion struct { + _ struct{} `type:"structure"` + + // The ID of the Reserved Instance. + ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` +} + +// String returns the string representation +func (s SuccessfulQueuedPurchaseDeletion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuccessfulQueuedPurchaseDeletion) GoString() string { + return s.String() +} + +// SetReservedInstancesId sets the ReservedInstancesId field's value. +func (s *SuccessfulQueuedPurchaseDeletion) SetReservedInstancesId(v string) *SuccessfulQueuedPurchaseDeletion { + s.ReservedInstancesId = &v + return s +} + +// Describes a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode + // characters. May not begin with aws:. + Key *string `locationName:"key" type:"string"` + + // The value of the tag. + // + // Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode + // characters. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Describes a tag. +type TagDescription struct { + _ struct{} `type:"structure"` + + // The tag key. + Key *string `locationName:"key" type:"string"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tag value. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *TagDescription) SetKey(v string) *TagDescription { + s.Key = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *TagDescription) SetResourceId(v string) *TagDescription { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TagDescription) SetResourceType(v string) *TagDescription { + s.ResourceType = &v + return s +} + +// SetValue sets the Value field's value. +func (s *TagDescription) SetValue(v string) *TagDescription { + s.Value = &v + return s +} + +// The tags to apply to a resource when the resource is being created. +type TagSpecification struct { + _ struct{} `type:"structure"` + + // The type of resource to tag. Currently, the resource types that support tagging + // on creation are: capacity-reservation | carrier-gateway | client-vpn-endpoint + // | customer-gateway | dedicated-host | dhcp-options | export-image-task | + // export-instance-task | fleet | fpga-image | host-reservation | import-image-task + // | import-snapshot-task | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 + // | key-pair | launch-template | placement-group | prefix-list | natgateway + // | network-acl | route-table | security-group | spot-fleet-request | spot-instances-request + // | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target + // | transit-gateway | transit-gateway-attachment | transit-gateway-route-table + // | volume |vpc | vpc-peering-connection | vpc-endpoint (for interface and + // gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log + // | vpn-connection | vpn-gateway. + // + // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tags to apply to the resource. + Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s TagSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagSpecification) GoString() string { + return s.String() +} + +// SetResourceType sets the ResourceType field's value. +func (s *TagSpecification) SetResourceType(v string) *TagSpecification { + s.ResourceType = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagSpecification) SetTags(v []*Tag) *TagSpecification { + s.Tags = v + return s +} + +// The number of units to request. You can choose to set the target capacity +// in terms of instances or a performance characteristic that is important to +// your application workload, such as vCPUs, memory, or I/O. If the request +// type is maintain, you can specify a target capacity of 0 and add capacity +// later. +// +// You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance +// MaxTotalPrice, or both to ensure that your fleet cost does not exceed your +// budget. If you set a maximum price per hour for the On-Demand Instances and +// Spot Instances in your request, EC2 Fleet will launch instances until it +// reaches the maximum amount that you're willing to pay. When the maximum amount +// you're willing to pay is reached, the fleet stops launching instances even +// if it hasn’t met the target capacity. The MaxTotalPrice parameters are +// located in OnDemandOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_OnDemandOptions.html) +// and SpotOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotOptions) +type TargetCapacitySpecification struct { + _ struct{} `type:"structure"` + + // The default TotalTargetCapacity, which is either Spot or On-Demand. + DefaultTargetCapacityType *string `locationName:"defaultTargetCapacityType" type:"string" enum:"DefaultTargetCapacityType"` + + // The number of On-Demand units to request. If you specify a target capacity + // for Spot units, you cannot specify a target capacity for On-Demand units. + OnDemandTargetCapacity *int64 `locationName:"onDemandTargetCapacity" type:"integer"` + + // The maximum number of Spot units to launch. If you specify a target capacity + // for On-Demand units, you cannot specify a target capacity for Spot units. + SpotTargetCapacity *int64 `locationName:"spotTargetCapacity" type:"integer"` + + // The number of units to request, filled using DefaultTargetCapacityType. + TotalTargetCapacity *int64 `locationName:"totalTargetCapacity" type:"integer"` +} + +// String returns the string representation +func (s TargetCapacitySpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetCapacitySpecification) GoString() string { + return s.String() +} + +// SetDefaultTargetCapacityType sets the DefaultTargetCapacityType field's value. +func (s *TargetCapacitySpecification) SetDefaultTargetCapacityType(v string) *TargetCapacitySpecification { + s.DefaultTargetCapacityType = &v + return s +} + +// SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. +func (s *TargetCapacitySpecification) SetOnDemandTargetCapacity(v int64) *TargetCapacitySpecification { + s.OnDemandTargetCapacity = &v + return s +} + +// SetSpotTargetCapacity sets the SpotTargetCapacity field's value. +func (s *TargetCapacitySpecification) SetSpotTargetCapacity(v int64) *TargetCapacitySpecification { + s.SpotTargetCapacity = &v + return s +} + +// SetTotalTargetCapacity sets the TotalTargetCapacity field's value. +func (s *TargetCapacitySpecification) SetTotalTargetCapacity(v int64) *TargetCapacitySpecification { + s.TotalTargetCapacity = &v + return s +} + +// The number of units to request. You can choose to set the target capacity +// as the number of instances. Or you can set the target capacity to a performance +// characteristic that is important to your application workload, such as vCPUs, +// memory, or I/O. If the request type is maintain, you can specify a target +// capacity of 0 and add capacity later. +// +// You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance +// MaxTotalPrice parameter, or both parameters to ensure that your fleet cost +// does not exceed your budget. If you set a maximum price per hour for the +// On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch +// instances until it reaches the maximum amount that you're willing to pay. +// When the maximum amount you're willing to pay is reached, the fleet stops +// launching instances even if it hasn’t met the target capacity. The MaxTotalPrice +// parameters are located in OnDemandOptionsRequest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_OnDemandOptionsRequest) +// and SpotOptionsRequest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotOptionsRequest). +type TargetCapacitySpecificationRequest struct { + _ struct{} `type:"structure"` + + // The default TotalTargetCapacity, which is either Spot or On-Demand. + DefaultTargetCapacityType *string `type:"string" enum:"DefaultTargetCapacityType"` + + // The number of On-Demand units to request. + OnDemandTargetCapacity *int64 `type:"integer"` + + // The number of Spot units to request. + SpotTargetCapacity *int64 `type:"integer"` + + // The number of units to request, filled using DefaultTargetCapacityType. + // + // TotalTargetCapacity is a required field + TotalTargetCapacity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s TargetCapacitySpecificationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetCapacitySpecificationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetCapacitySpecificationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetCapacitySpecificationRequest"} + if s.TotalTargetCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("TotalTargetCapacity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultTargetCapacityType sets the DefaultTargetCapacityType field's value. +func (s *TargetCapacitySpecificationRequest) SetDefaultTargetCapacityType(v string) *TargetCapacitySpecificationRequest { + s.DefaultTargetCapacityType = &v + return s +} + +// SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. +func (s *TargetCapacitySpecificationRequest) SetOnDemandTargetCapacity(v int64) *TargetCapacitySpecificationRequest { + s.OnDemandTargetCapacity = &v + return s +} + +// SetSpotTargetCapacity sets the SpotTargetCapacity field's value. +func (s *TargetCapacitySpecificationRequest) SetSpotTargetCapacity(v int64) *TargetCapacitySpecificationRequest { + s.SpotTargetCapacity = &v + return s +} + +// SetTotalTargetCapacity sets the TotalTargetCapacity field's value. +func (s *TargetCapacitySpecificationRequest) SetTotalTargetCapacity(v int64) *TargetCapacitySpecificationRequest { + s.TotalTargetCapacity = &v + return s +} + +// Information about the Convertible Reserved Instance offering. +type TargetConfiguration struct { + _ struct{} `type:"structure"` + + // The number of instances the Convertible Reserved Instance offering can be + // applied to. This parameter is reserved and cannot be specified in a request + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The ID of the Convertible Reserved Instance offering. + OfferingId *string `locationName:"offeringId" type:"string"` +} + +// String returns the string representation +func (s TargetConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetConfiguration) GoString() string { + return s.String() +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *TargetConfiguration) SetInstanceCount(v int64) *TargetConfiguration { + s.InstanceCount = &v + return s +} + +// SetOfferingId sets the OfferingId field's value. +func (s *TargetConfiguration) SetOfferingId(v string) *TargetConfiguration { + s.OfferingId = &v + return s +} + +// Details about the target configuration. +type TargetConfigurationRequest struct { + _ struct{} `type:"structure"` + + // The number of instances the Covertible Reserved Instance offering can be + // applied to. This parameter is reserved and cannot be specified in a request + InstanceCount *int64 `type:"integer"` + + // The Convertible Reserved Instance offering ID. + // + // OfferingId is a required field + OfferingId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TargetConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetConfigurationRequest"} + if s.OfferingId == nil { + invalidParams.Add(request.NewErrParamRequired("OfferingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceCount sets the InstanceCount field's value. +func (s *TargetConfigurationRequest) SetInstanceCount(v int64) *TargetConfigurationRequest { + s.InstanceCount = &v + return s +} + +// SetOfferingId sets the OfferingId field's value. +func (s *TargetConfigurationRequest) SetOfferingId(v string) *TargetConfigurationRequest { + s.OfferingId = &v + return s +} + +// Describes a load balancer target group. +type TargetGroup struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + Arn *string `locationName:"arn" type:"string"` +} + +// String returns the string representation +func (s TargetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGroup) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *TargetGroup) SetArn(v string) *TargetGroup { + s.Arn = &v + return s +} + +// Describes the target groups to attach to a Spot Fleet. Spot Fleet registers +// the running Spot Instances with these target groups. +type TargetGroupsConfig struct { + _ struct{} `type:"structure"` + + // One or more target groups. + TargetGroups []*TargetGroup `locationName:"targetGroups" locationNameList:"item" min:"1" type:"list"` +} + +// String returns the string representation +func (s TargetGroupsConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGroupsConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGroupsConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGroupsConfig"} + if s.TargetGroups != nil && len(s.TargetGroups) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetGroups", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetGroups sets the TargetGroups field's value. +func (s *TargetGroupsConfig) SetTargetGroups(v []*TargetGroup) *TargetGroupsConfig { + s.TargetGroups = v + return s +} + +// Describes a target network associated with a Client VPN endpoint. +type TargetNetwork struct { + _ struct{} `type:"structure"` + + // The ID of the association. + AssociationId *string `locationName:"associationId" type:"string"` + + // The ID of the Client VPN endpoint with which the target network is associated. + ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` + + // The IDs of the security groups applied to the target network association. + SecurityGroups []*string `locationName:"securityGroups" locationNameList:"item" type:"list"` + + // The current state of the target network association. + Status *AssociationStatus `locationName:"status" type:"structure"` + + // The ID of the subnet specified as the target network. + TargetNetworkId *string `locationName:"targetNetworkId" type:"string"` + + // The ID of the VPC in which the target network (subnet) is located. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s TargetNetwork) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetNetwork) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *TargetNetwork) SetAssociationId(v string) *TargetNetwork { + s.AssociationId = &v + return s +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *TargetNetwork) SetClientVpnEndpointId(v string) *TargetNetwork { + s.ClientVpnEndpointId = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *TargetNetwork) SetSecurityGroups(v []*string) *TargetNetwork { + s.SecurityGroups = v + return s +} + +// SetStatus sets the Status field's value. +func (s *TargetNetwork) SetStatus(v *AssociationStatus) *TargetNetwork { + s.Status = v + return s +} + +// SetTargetNetworkId sets the TargetNetworkId field's value. +func (s *TargetNetwork) SetTargetNetworkId(v string) *TargetNetwork { + s.TargetNetworkId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *TargetNetwork) SetVpcId(v string) *TargetNetwork { + s.VpcId = &v + return s +} + +// The total value of the new Convertible Reserved Instances. +type TargetReservationValue struct { + _ struct{} `type:"structure"` + + // The total value of the Convertible Reserved Instances that make up the exchange. + // This is the sum of the list value, remaining upfront price, and additional + // upfront cost of the exchange. + ReservationValue *ReservationValue `locationName:"reservationValue" type:"structure"` + + // The configuration of the Convertible Reserved Instances that make up the + // exchange. + TargetConfiguration *TargetConfiguration `locationName:"targetConfiguration" type:"structure"` +} + +// String returns the string representation +func (s TargetReservationValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetReservationValue) GoString() string { + return s.String() +} + +// SetReservationValue sets the ReservationValue field's value. +func (s *TargetReservationValue) SetReservationValue(v *ReservationValue) *TargetReservationValue { + s.ReservationValue = v + return s +} + +// SetTargetConfiguration sets the TargetConfiguration field's value. +func (s *TargetReservationValue) SetTargetConfiguration(v *TargetConfiguration) *TargetReservationValue { + s.TargetConfiguration = v + return s +} + +type TerminateClientVpnConnectionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint to which the client is connected. + // + // ClientVpnEndpointId is a required field + ClientVpnEndpointId *string `type:"string" required:"true"` + + // The ID of the client connection to be terminated. + ConnectionId *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name of the user who initiated the connection. Use this option to terminate + // all active connections for the specified user. This option can only be used + // if the user has established up to five connections. + Username *string `type:"string"` +} + +// String returns the string representation +func (s TerminateClientVpnConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateClientVpnConnectionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateClientVpnConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateClientVpnConnectionsInput"} + if s.ClientVpnEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientVpnEndpointId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *TerminateClientVpnConnectionsInput) SetClientVpnEndpointId(v string) *TerminateClientVpnConnectionsInput { + s.ClientVpnEndpointId = &v + return s +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *TerminateClientVpnConnectionsInput) SetConnectionId(v string) *TerminateClientVpnConnectionsInput { + s.ConnectionId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *TerminateClientVpnConnectionsInput) SetDryRun(v bool) *TerminateClientVpnConnectionsInput { + s.DryRun = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *TerminateClientVpnConnectionsInput) SetUsername(v string) *TerminateClientVpnConnectionsInput { + s.Username = &v + return s +} + +type TerminateClientVpnConnectionsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the Client VPN endpoint. + ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` + + // The current state of the client connections. + ConnectionStatuses []*TerminateConnectionStatus `locationName:"connectionStatuses" locationNameList:"item" type:"list"` + + // The user who established the terminated client connections. + Username *string `locationName:"username" type:"string"` +} + +// String returns the string representation +func (s TerminateClientVpnConnectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateClientVpnConnectionsOutput) GoString() string { + return s.String() +} + +// SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. +func (s *TerminateClientVpnConnectionsOutput) SetClientVpnEndpointId(v string) *TerminateClientVpnConnectionsOutput { + s.ClientVpnEndpointId = &v + return s +} + +// SetConnectionStatuses sets the ConnectionStatuses field's value. +func (s *TerminateClientVpnConnectionsOutput) SetConnectionStatuses(v []*TerminateConnectionStatus) *TerminateClientVpnConnectionsOutput { + s.ConnectionStatuses = v + return s +} + +// SetUsername sets the Username field's value. +func (s *TerminateClientVpnConnectionsOutput) SetUsername(v string) *TerminateClientVpnConnectionsOutput { + s.Username = &v + return s +} + +// Information about a terminated Client VPN endpoint client connection. +type TerminateConnectionStatus struct { + _ struct{} `type:"structure"` + + // The ID of the client connection. + ConnectionId *string `locationName:"connectionId" type:"string"` + + // A message about the status of the client connection, if applicable. + CurrentStatus *ClientVpnConnectionStatus `locationName:"currentStatus" type:"structure"` + + // The state of the client connection. + PreviousStatus *ClientVpnConnectionStatus `locationName:"previousStatus" type:"structure"` +} + +// String returns the string representation +func (s TerminateConnectionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateConnectionStatus) GoString() string { + return s.String() +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *TerminateConnectionStatus) SetConnectionId(v string) *TerminateConnectionStatus { + s.ConnectionId = &v + return s +} + +// SetCurrentStatus sets the CurrentStatus field's value. +func (s *TerminateConnectionStatus) SetCurrentStatus(v *ClientVpnConnectionStatus) *TerminateConnectionStatus { + s.CurrentStatus = v + return s +} + +// SetPreviousStatus sets the PreviousStatus field's value. +func (s *TerminateConnectionStatus) SetPreviousStatus(v *ClientVpnConnectionStatus) *TerminateConnectionStatus { + s.PreviousStatus = v + return s +} + +type TerminateInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the instances. + // + // Constraints: Up to 1000 instance IDs. We recommend breaking up this request + // into smaller batches. + // + // InstanceIds is a required field + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *TerminateInstancesInput) SetDryRun(v bool) *TerminateInstancesInput { + s.DryRun = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *TerminateInstancesInput) SetInstanceIds(v []*string) *TerminateInstancesInput { + s.InstanceIds = v + return s +} + +type TerminateInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the terminated instances. + TerminatingInstances []*InstanceStateChange `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s TerminateInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateInstancesOutput) GoString() string { + return s.String() +} + +// SetTerminatingInstances sets the TerminatingInstances field's value. +func (s *TerminateInstancesOutput) SetTerminatingInstances(v []*InstanceStateChange) *TerminateInstancesOutput { + s.TerminatingInstances = v + return s +} + +// Describes the Traffic Mirror filter. +type TrafficMirrorFilter struct { + _ struct{} `type:"structure"` + + // The description of the Traffic Mirror filter. + Description *string `locationName:"description" type:"string"` + + // Information about the egress rules that are associated with the Traffic Mirror + // filter. + EgressFilterRules []*TrafficMirrorFilterRule `locationName:"egressFilterRuleSet" locationNameList:"item" type:"list"` + + // Information about the ingress rules that are associated with the Traffic + // Mirror filter. + IngressFilterRules []*TrafficMirrorFilterRule `locationName:"ingressFilterRuleSet" locationNameList:"item" type:"list"` + + // The network service traffic that is associated with the Traffic Mirror filter. + NetworkServices []*string `locationName:"networkServiceSet" locationNameList:"item" type:"list"` + + // The tags assigned to the Traffic Mirror filter. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterId *string `locationName:"trafficMirrorFilterId" type:"string"` +} + +// String returns the string representation +func (s TrafficMirrorFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorFilter) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TrafficMirrorFilter) SetDescription(v string) *TrafficMirrorFilter { + s.Description = &v + return s +} + +// SetEgressFilterRules sets the EgressFilterRules field's value. +func (s *TrafficMirrorFilter) SetEgressFilterRules(v []*TrafficMirrorFilterRule) *TrafficMirrorFilter { + s.EgressFilterRules = v + return s +} + +// SetIngressFilterRules sets the IngressFilterRules field's value. +func (s *TrafficMirrorFilter) SetIngressFilterRules(v []*TrafficMirrorFilterRule) *TrafficMirrorFilter { + s.IngressFilterRules = v + return s +} + +// SetNetworkServices sets the NetworkServices field's value. +func (s *TrafficMirrorFilter) SetNetworkServices(v []*string) *TrafficMirrorFilter { + s.NetworkServices = v + return s +} + +// SetTags sets the Tags field's value. +func (s *TrafficMirrorFilter) SetTags(v []*Tag) *TrafficMirrorFilter { + s.Tags = v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *TrafficMirrorFilter) SetTrafficMirrorFilterId(v string) *TrafficMirrorFilter { + s.TrafficMirrorFilterId = &v + return s +} + +// Describes the Traffic Mirror rule. +type TrafficMirrorFilterRule struct { + _ struct{} `type:"structure"` + + // The description of the Traffic Mirror rule. + Description *string `locationName:"description" type:"string"` + + // The destination CIDR block assigned to the Traffic Mirror rule. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The destination port range assigned to the Traffic Mirror rule. + DestinationPortRange *TrafficMirrorPortRange `locationName:"destinationPortRange" type:"structure"` + + // The protocol assigned to the Traffic Mirror rule. + Protocol *int64 `locationName:"protocol" type:"integer"` + + // The action assigned to the Traffic Mirror rule. + RuleAction *string `locationName:"ruleAction" type:"string" enum:"TrafficMirrorRuleAction"` + + // The rule number of the Traffic Mirror rule. + RuleNumber *int64 `locationName:"ruleNumber" type:"integer"` + + // The source CIDR block assigned to the Traffic Mirror rule. + SourceCidrBlock *string `locationName:"sourceCidrBlock" type:"string"` + + // The source port range assigned to the Traffic Mirror rule. + SourcePortRange *TrafficMirrorPortRange `locationName:"sourcePortRange" type:"structure"` + + // The traffic direction assigned to the Traffic Mirror rule. + TrafficDirection *string `locationName:"trafficDirection" type:"string" enum:"TrafficDirection"` + + // The ID of the Traffic Mirror filter that the rule is associated with. + TrafficMirrorFilterId *string `locationName:"trafficMirrorFilterId" type:"string"` + + // The ID of the Traffic Mirror rule. + TrafficMirrorFilterRuleId *string `locationName:"trafficMirrorFilterRuleId" type:"string"` +} + +// String returns the string representation +func (s TrafficMirrorFilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorFilterRule) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TrafficMirrorFilterRule) SetDescription(v string) *TrafficMirrorFilterRule { + s.Description = &v + return s +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *TrafficMirrorFilterRule) SetDestinationCidrBlock(v string) *TrafficMirrorFilterRule { + s.DestinationCidrBlock = &v + return s +} + +// SetDestinationPortRange sets the DestinationPortRange field's value. +func (s *TrafficMirrorFilterRule) SetDestinationPortRange(v *TrafficMirrorPortRange) *TrafficMirrorFilterRule { + s.DestinationPortRange = v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *TrafficMirrorFilterRule) SetProtocol(v int64) *TrafficMirrorFilterRule { + s.Protocol = &v + return s +} + +// SetRuleAction sets the RuleAction field's value. +func (s *TrafficMirrorFilterRule) SetRuleAction(v string) *TrafficMirrorFilterRule { + s.RuleAction = &v + return s +} + +// SetRuleNumber sets the RuleNumber field's value. +func (s *TrafficMirrorFilterRule) SetRuleNumber(v int64) *TrafficMirrorFilterRule { + s.RuleNumber = &v + return s +} + +// SetSourceCidrBlock sets the SourceCidrBlock field's value. +func (s *TrafficMirrorFilterRule) SetSourceCidrBlock(v string) *TrafficMirrorFilterRule { + s.SourceCidrBlock = &v + return s +} + +// SetSourcePortRange sets the SourcePortRange field's value. +func (s *TrafficMirrorFilterRule) SetSourcePortRange(v *TrafficMirrorPortRange) *TrafficMirrorFilterRule { + s.SourcePortRange = v + return s +} + +// SetTrafficDirection sets the TrafficDirection field's value. +func (s *TrafficMirrorFilterRule) SetTrafficDirection(v string) *TrafficMirrorFilterRule { + s.TrafficDirection = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *TrafficMirrorFilterRule) SetTrafficMirrorFilterId(v string) *TrafficMirrorFilterRule { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorFilterRuleId sets the TrafficMirrorFilterRuleId field's value. +func (s *TrafficMirrorFilterRule) SetTrafficMirrorFilterRuleId(v string) *TrafficMirrorFilterRule { + s.TrafficMirrorFilterRuleId = &v + return s +} + +// Describes the Traffic Mirror port range. +type TrafficMirrorPortRange struct { + _ struct{} `type:"structure"` + + // The start of the Traffic Mirror port range. This applies to the TCP and UDP + // protocols. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The end of the Traffic Mirror port range. This applies to the TCP and UDP + // protocols. + ToPort *int64 `locationName:"toPort" type:"integer"` +} + +// String returns the string representation +func (s TrafficMirrorPortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorPortRange) GoString() string { + return s.String() +} + +// SetFromPort sets the FromPort field's value. +func (s *TrafficMirrorPortRange) SetFromPort(v int64) *TrafficMirrorPortRange { + s.FromPort = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *TrafficMirrorPortRange) SetToPort(v int64) *TrafficMirrorPortRange { + s.ToPort = &v + return s +} + +// Information about the Traffic Mirror filter rule port range. +type TrafficMirrorPortRangeRequest struct { + _ struct{} `type:"structure"` + + // The first port in the Traffic Mirror port range. This applies to the TCP + // and UDP protocols. + FromPort *int64 `type:"integer"` + + // The last port in the Traffic Mirror port range. This applies to the TCP and + // UDP protocols. + ToPort *int64 `type:"integer"` +} + +// String returns the string representation +func (s TrafficMirrorPortRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorPortRangeRequest) GoString() string { + return s.String() +} + +// SetFromPort sets the FromPort field's value. +func (s *TrafficMirrorPortRangeRequest) SetFromPort(v int64) *TrafficMirrorPortRangeRequest { + s.FromPort = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *TrafficMirrorPortRangeRequest) SetToPort(v int64) *TrafficMirrorPortRangeRequest { + s.ToPort = &v + return s +} + +// Describes a Traffic Mirror session. +type TrafficMirrorSession struct { + _ struct{} `type:"structure"` + + // The description of the Traffic Mirror session. + Description *string `locationName:"description" type:"string"` + + // The ID of the Traffic Mirror session's network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the account that owns the Traffic Mirror session. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The number of bytes in each packet to mirror. These are the bytes after the + // VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. + // For example, if you set this value to 100, then the first 100 bytes that + // meet the filter criteria are copied to the target. Do not specify this parameter + // when you want to mirror the entire packet + PacketLength *int64 `locationName:"packetLength" type:"integer"` + + // The session number determines the order in which sessions are evaluated when + // an interface is used by multiple sessions. The first session with a matching + // filter is the one that mirrors the packets. + // + // Valid values are 1-32766. + SessionNumber *int64 `locationName:"sessionNumber" type:"integer"` + + // The tags assigned to the Traffic Mirror session. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror filter. + TrafficMirrorFilterId *string `locationName:"trafficMirrorFilterId" type:"string"` + + // The ID for the Traffic Mirror session. + TrafficMirrorSessionId *string `locationName:"trafficMirrorSessionId" type:"string"` + + // The ID of the Traffic Mirror target. + TrafficMirrorTargetId *string `locationName:"trafficMirrorTargetId" type:"string"` + + // The virtual network ID associated with the Traffic Mirror session. + VirtualNetworkId *int64 `locationName:"virtualNetworkId" type:"integer"` +} + +// String returns the string representation +func (s TrafficMirrorSession) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorSession) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TrafficMirrorSession) SetDescription(v string) *TrafficMirrorSession { + s.Description = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *TrafficMirrorSession) SetNetworkInterfaceId(v string) *TrafficMirrorSession { + s.NetworkInterfaceId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *TrafficMirrorSession) SetOwnerId(v string) *TrafficMirrorSession { + s.OwnerId = &v + return s +} + +// SetPacketLength sets the PacketLength field's value. +func (s *TrafficMirrorSession) SetPacketLength(v int64) *TrafficMirrorSession { + s.PacketLength = &v + return s +} + +// SetSessionNumber sets the SessionNumber field's value. +func (s *TrafficMirrorSession) SetSessionNumber(v int64) *TrafficMirrorSession { + s.SessionNumber = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TrafficMirrorSession) SetTags(v []*Tag) *TrafficMirrorSession { + s.Tags = v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *TrafficMirrorSession) SetTrafficMirrorFilterId(v string) *TrafficMirrorSession { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorSessionId sets the TrafficMirrorSessionId field's value. +func (s *TrafficMirrorSession) SetTrafficMirrorSessionId(v string) *TrafficMirrorSession { + s.TrafficMirrorSessionId = &v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *TrafficMirrorSession) SetTrafficMirrorTargetId(v string) *TrafficMirrorSession { + s.TrafficMirrorTargetId = &v + return s +} + +// SetVirtualNetworkId sets the VirtualNetworkId field's value. +func (s *TrafficMirrorSession) SetVirtualNetworkId(v int64) *TrafficMirrorSession { + s.VirtualNetworkId = &v + return s +} + +// Describes a Traffic Mirror target. +type TrafficMirrorTarget struct { + _ struct{} `type:"structure"` + + // Information about the Traffic Mirror target. + Description *string `locationName:"description" type:"string"` + + // The network interface ID that is attached to the target. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The Amazon Resource Name (ARN) of the Network Load Balancer. + NetworkLoadBalancerArn *string `locationName:"networkLoadBalancerArn" type:"string"` + + // The ID of the account that owns the Traffic Mirror target. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The tags assigned to the Traffic Mirror target. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the Traffic Mirror target. + TrafficMirrorTargetId *string `locationName:"trafficMirrorTargetId" type:"string"` + + // The type of Traffic Mirror target. + Type *string `locationName:"type" type:"string" enum:"TrafficMirrorTargetType"` +} + +// String returns the string representation +func (s TrafficMirrorTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrafficMirrorTarget) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TrafficMirrorTarget) SetDescription(v string) *TrafficMirrorTarget { + s.Description = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *TrafficMirrorTarget) SetNetworkInterfaceId(v string) *TrafficMirrorTarget { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkLoadBalancerArn sets the NetworkLoadBalancerArn field's value. +func (s *TrafficMirrorTarget) SetNetworkLoadBalancerArn(v string) *TrafficMirrorTarget { + s.NetworkLoadBalancerArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *TrafficMirrorTarget) SetOwnerId(v string) *TrafficMirrorTarget { + s.OwnerId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TrafficMirrorTarget) SetTags(v []*Tag) *TrafficMirrorTarget { + s.Tags = v + return s +} + +// SetTrafficMirrorTargetId sets the TrafficMirrorTargetId field's value. +func (s *TrafficMirrorTarget) SetTrafficMirrorTargetId(v string) *TrafficMirrorTarget { + s.TrafficMirrorTargetId = &v + return s +} + +// SetType sets the Type field's value. +func (s *TrafficMirrorTarget) SetType(v string) *TrafficMirrorTarget { + s.Type = &v + return s +} + +// Describes a transit gateway. +type TransitGateway struct { + _ struct{} `type:"structure"` + + // The creation time. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The description of the transit gateway. + Description *string `locationName:"description" type:"string"` + + // The transit gateway options. + Options *TransitGatewayOptions `locationName:"options" type:"structure"` + + // The ID of the AWS account ID that owns the transit gateway. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the transit gateway. + State *string `locationName:"state" type:"string" enum:"TransitGatewayState"` + + // The tags for the transit gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The Amazon Resource Name (ARN) of the transit gateway. + TransitGatewayArn *string `locationName:"transitGatewayArn" type:"string"` + + // The ID of the transit gateway. + TransitGatewayId *string `locationName:"transitGatewayId" type:"string"` +} + +// String returns the string representation +func (s TransitGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGateway) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TransitGateway) SetCreationTime(v time.Time) *TransitGateway { + s.CreationTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *TransitGateway) SetDescription(v string) *TransitGateway { + s.Description = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *TransitGateway) SetOptions(v *TransitGatewayOptions) *TransitGateway { + s.Options = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *TransitGateway) SetOwnerId(v string) *TransitGateway { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGateway) SetState(v string) *TransitGateway { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TransitGateway) SetTags(v []*Tag) *TransitGateway { + s.Tags = v + return s +} + +// SetTransitGatewayArn sets the TransitGatewayArn field's value. +func (s *TransitGateway) SetTransitGatewayArn(v string) *TransitGateway { + s.TransitGatewayArn = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *TransitGateway) SetTransitGatewayId(v string) *TransitGateway { + s.TransitGatewayId = &v + return s +} + +// Describes an association between a resource attachment and a transit gateway +// route table. +type TransitGatewayAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The state of the association. + State *string `locationName:"state" type:"string" enum:"TransitGatewayAssociationState"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` + + // The ID of the transit gateway route table. + TransitGatewayRouteTableId *string `locationName:"transitGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayAssociation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayAssociation) SetResourceId(v string) *TransitGatewayAssociation { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayAssociation) SetResourceType(v string) *TransitGatewayAssociation { + s.ResourceType = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayAssociation) SetState(v string) *TransitGatewayAssociation { + s.State = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayAssociation) SetTransitGatewayAttachmentId(v string) *TransitGatewayAssociation { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *TransitGatewayAssociation) SetTransitGatewayRouteTableId(v string) *TransitGatewayAssociation { + s.TransitGatewayRouteTableId = &v + return s +} + +// Describes an attachment between a resource and a transit gateway. +type TransitGatewayAttachment struct { + _ struct{} `type:"structure"` + + // The association. + Association *TransitGatewayAttachmentAssociation `locationName:"association" type:"structure"` + + // The creation time. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The ID of the AWS account that owns the resource. + ResourceOwnerId *string `locationName:"resourceOwnerId" type:"string"` + + // The resource type. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The attachment state. Note that the initiating state has been deprecated. + State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` + + // The tags for the attachment. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` + + // The ID of the transit gateway. + TransitGatewayId *string `locationName:"transitGatewayId" type:"string"` + + // The ID of the AWS account that owns the transit gateway. + TransitGatewayOwnerId *string `locationName:"transitGatewayOwnerId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayAttachment) GoString() string { + return s.String() +} + +// SetAssociation sets the Association field's value. +func (s *TransitGatewayAttachment) SetAssociation(v *TransitGatewayAttachmentAssociation) *TransitGatewayAttachment { + s.Association = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TransitGatewayAttachment) SetCreationTime(v time.Time) *TransitGatewayAttachment { + s.CreationTime = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayAttachment) SetResourceId(v string) *TransitGatewayAttachment { + s.ResourceId = &v + return s +} + +// SetResourceOwnerId sets the ResourceOwnerId field's value. +func (s *TransitGatewayAttachment) SetResourceOwnerId(v string) *TransitGatewayAttachment { + s.ResourceOwnerId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayAttachment) SetResourceType(v string) *TransitGatewayAttachment { + s.ResourceType = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayAttachment) SetState(v string) *TransitGatewayAttachment { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TransitGatewayAttachment) SetTags(v []*Tag) *TransitGatewayAttachment { + s.Tags = v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayAttachment) SetTransitGatewayAttachmentId(v string) *TransitGatewayAttachment { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *TransitGatewayAttachment) SetTransitGatewayId(v string) *TransitGatewayAttachment { + s.TransitGatewayId = &v + return s +} + +// SetTransitGatewayOwnerId sets the TransitGatewayOwnerId field's value. +func (s *TransitGatewayAttachment) SetTransitGatewayOwnerId(v string) *TransitGatewayAttachment { + s.TransitGatewayOwnerId = &v + return s +} + +// Describes an association. +type TransitGatewayAttachmentAssociation struct { + _ struct{} `type:"structure"` + + // The state of the association. + State *string `locationName:"state" type:"string" enum:"TransitGatewayAssociationState"` + + // The ID of the route table for the transit gateway. + TransitGatewayRouteTableId *string `locationName:"transitGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayAttachmentAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayAttachmentAssociation) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *TransitGatewayAttachmentAssociation) SetState(v string) *TransitGatewayAttachmentAssociation { + s.State = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *TransitGatewayAttachmentAssociation) SetTransitGatewayRouteTableId(v string) *TransitGatewayAttachmentAssociation { + s.TransitGatewayRouteTableId = &v + return s +} + +// Describes a propagation route table. +type TransitGatewayAttachmentPropagation struct { + _ struct{} `type:"structure"` + + // The state of the propagation route table. + State *string `locationName:"state" type:"string" enum:"TransitGatewayPropagationState"` + + // The ID of the propagation route table. + TransitGatewayRouteTableId *string `locationName:"transitGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayAttachmentPropagation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayAttachmentPropagation) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *TransitGatewayAttachmentPropagation) SetState(v string) *TransitGatewayAttachmentPropagation { + s.State = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *TransitGatewayAttachmentPropagation) SetTransitGatewayRouteTableId(v string) *TransitGatewayAttachmentPropagation { + s.TransitGatewayRouteTableId = &v + return s +} + +// Describes the deregistered transit gateway multicast group members. +type TransitGatewayMulticastDeregisteredGroupMembers struct { + _ struct{} `type:"structure"` + + // The network interface IDs of the deregistered members. + DeregisteredNetworkInterfaceIds []*string `locationName:"deregisteredNetworkInterfaceIds" locationNameList:"item" type:"list"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `locationName:"groupIpAddress" type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `locationName:"transitGatewayMulticastDomainId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayMulticastDeregisteredGroupMembers) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayMulticastDeregisteredGroupMembers) GoString() string { + return s.String() +} + +// SetDeregisteredNetworkInterfaceIds sets the DeregisteredNetworkInterfaceIds field's value. +func (s *TransitGatewayMulticastDeregisteredGroupMembers) SetDeregisteredNetworkInterfaceIds(v []*string) *TransitGatewayMulticastDeregisteredGroupMembers { + s.DeregisteredNetworkInterfaceIds = v + return s +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *TransitGatewayMulticastDeregisteredGroupMembers) SetGroupIpAddress(v string) *TransitGatewayMulticastDeregisteredGroupMembers { + s.GroupIpAddress = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *TransitGatewayMulticastDeregisteredGroupMembers) SetTransitGatewayMulticastDomainId(v string) *TransitGatewayMulticastDeregisteredGroupMembers { + s.TransitGatewayMulticastDomainId = &v + return s +} + +// Describes the deregistered transit gateway multicast group sources. +type TransitGatewayMulticastDeregisteredGroupSources struct { + _ struct{} `type:"structure"` + + // The network interface IDs of the non-registered members. + DeregisteredNetworkInterfaceIds []*string `locationName:"deregisteredNetworkInterfaceIds" locationNameList:"item" type:"list"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `locationName:"groupIpAddress" type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `locationName:"transitGatewayMulticastDomainId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayMulticastDeregisteredGroupSources) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayMulticastDeregisteredGroupSources) GoString() string { + return s.String() +} + +// SetDeregisteredNetworkInterfaceIds sets the DeregisteredNetworkInterfaceIds field's value. +func (s *TransitGatewayMulticastDeregisteredGroupSources) SetDeregisteredNetworkInterfaceIds(v []*string) *TransitGatewayMulticastDeregisteredGroupSources { + s.DeregisteredNetworkInterfaceIds = v + return s +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *TransitGatewayMulticastDeregisteredGroupSources) SetGroupIpAddress(v string) *TransitGatewayMulticastDeregisteredGroupSources { + s.GroupIpAddress = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *TransitGatewayMulticastDeregisteredGroupSources) SetTransitGatewayMulticastDomainId(v string) *TransitGatewayMulticastDeregisteredGroupSources { + s.TransitGatewayMulticastDomainId = &v + return s +} + +// Describes the transit gateway multicast domain. +type TransitGatewayMulticastDomain struct { + _ struct{} `type:"structure"` + + // The time the transit gateway multicast domain was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The state of the transit gateway multicast domain. + State *string `locationName:"state" type:"string" enum:"TransitGatewayMulticastDomainState"` + + // The tags for the transit gateway multicast domain. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the transit gateway. + TransitGatewayId *string `locationName:"transitGatewayId" type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `locationName:"transitGatewayMulticastDomainId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayMulticastDomain) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayMulticastDomain) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TransitGatewayMulticastDomain) SetCreationTime(v time.Time) *TransitGatewayMulticastDomain { + s.CreationTime = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayMulticastDomain) SetState(v string) *TransitGatewayMulticastDomain { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TransitGatewayMulticastDomain) SetTags(v []*Tag) *TransitGatewayMulticastDomain { + s.Tags = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *TransitGatewayMulticastDomain) SetTransitGatewayId(v string) *TransitGatewayMulticastDomain { + s.TransitGatewayId = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *TransitGatewayMulticastDomain) SetTransitGatewayMulticastDomainId(v string) *TransitGatewayMulticastDomain { + s.TransitGatewayMulticastDomainId = &v + return s +} + +// Describes the resources associated with the transit gateway multicast domain. +type TransitGatewayMulticastDomainAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The type of resource, for example a VPC attachment. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The subnet associated with the transit gateway multicast domain. + Subnet *SubnetAssociation `locationName:"subnet" type:"structure"` + + // The ID of the transit gateway attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayMulticastDomainAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayMulticastDomainAssociation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayMulticastDomainAssociation) SetResourceId(v string) *TransitGatewayMulticastDomainAssociation { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayMulticastDomainAssociation) SetResourceType(v string) *TransitGatewayMulticastDomainAssociation { + s.ResourceType = &v + return s +} + +// SetSubnet sets the Subnet field's value. +func (s *TransitGatewayMulticastDomainAssociation) SetSubnet(v *SubnetAssociation) *TransitGatewayMulticastDomainAssociation { + s.Subnet = v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayMulticastDomainAssociation) SetTransitGatewayAttachmentId(v string) *TransitGatewayMulticastDomainAssociation { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes the multicast domain associations. +type TransitGatewayMulticastDomainAssociations struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The type of resource, for example a VPC attachment. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The subnets associated with the multicast domain. + Subnets []*SubnetAssociation `locationName:"subnets" locationNameList:"item" type:"list"` + + // The ID of the transit gateway attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `locationName:"transitGatewayMulticastDomainId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayMulticastDomainAssociations) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayMulticastDomainAssociations) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayMulticastDomainAssociations) SetResourceId(v string) *TransitGatewayMulticastDomainAssociations { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayMulticastDomainAssociations) SetResourceType(v string) *TransitGatewayMulticastDomainAssociations { + s.ResourceType = &v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *TransitGatewayMulticastDomainAssociations) SetSubnets(v []*SubnetAssociation) *TransitGatewayMulticastDomainAssociations { + s.Subnets = v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayMulticastDomainAssociations) SetTransitGatewayAttachmentId(v string) *TransitGatewayMulticastDomainAssociations { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *TransitGatewayMulticastDomainAssociations) SetTransitGatewayMulticastDomainId(v string) *TransitGatewayMulticastDomainAssociations { + s.TransitGatewayMulticastDomainId = &v + return s +} + +// Describes the transit gateway multicast group resources. +type TransitGatewayMulticastGroup struct { + _ struct{} `type:"structure"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `locationName:"groupIpAddress" type:"string"` + + // Indicates that the resource is a transit gateway multicast group member. + GroupMember *bool `locationName:"groupMember" type:"boolean"` + + // Indicates that the resource is a transit gateway multicast group member. + GroupSource *bool `locationName:"groupSource" type:"boolean"` + + // The member type (for example, static). + MemberType *string `locationName:"memberType" type:"string" enum:"MembershipType"` + + // The ID of the transit gateway attachment. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The type of resource, for example a VPC attachment. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The source type. + SourceType *string `locationName:"sourceType" type:"string" enum:"MembershipType"` + + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" type:"string"` + + // The ID of the transit gateway attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayMulticastGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayMulticastGroup) GoString() string { + return s.String() +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *TransitGatewayMulticastGroup) SetGroupIpAddress(v string) *TransitGatewayMulticastGroup { + s.GroupIpAddress = &v + return s +} + +// SetGroupMember sets the GroupMember field's value. +func (s *TransitGatewayMulticastGroup) SetGroupMember(v bool) *TransitGatewayMulticastGroup { + s.GroupMember = &v + return s +} + +// SetGroupSource sets the GroupSource field's value. +func (s *TransitGatewayMulticastGroup) SetGroupSource(v bool) *TransitGatewayMulticastGroup { + s.GroupSource = &v + return s +} + +// SetMemberType sets the MemberType field's value. +func (s *TransitGatewayMulticastGroup) SetMemberType(v string) *TransitGatewayMulticastGroup { + s.MemberType = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *TransitGatewayMulticastGroup) SetNetworkInterfaceId(v string) *TransitGatewayMulticastGroup { + s.NetworkInterfaceId = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayMulticastGroup) SetResourceId(v string) *TransitGatewayMulticastGroup { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayMulticastGroup) SetResourceType(v string) *TransitGatewayMulticastGroup { + s.ResourceType = &v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *TransitGatewayMulticastGroup) SetSourceType(v string) *TransitGatewayMulticastGroup { + s.SourceType = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *TransitGatewayMulticastGroup) SetSubnetId(v string) *TransitGatewayMulticastGroup { + s.SubnetId = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayMulticastGroup) SetTransitGatewayAttachmentId(v string) *TransitGatewayMulticastGroup { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes the registered transit gateway multicast group members. +type TransitGatewayMulticastRegisteredGroupMembers struct { + _ struct{} `type:"structure"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `locationName:"groupIpAddress" type:"string"` + + // The ID of the registered network interfaces. + RegisteredNetworkInterfaceIds []*string `locationName:"registeredNetworkInterfaceIds" locationNameList:"item" type:"list"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `locationName:"transitGatewayMulticastDomainId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayMulticastRegisteredGroupMembers) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayMulticastRegisteredGroupMembers) GoString() string { + return s.String() +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *TransitGatewayMulticastRegisteredGroupMembers) SetGroupIpAddress(v string) *TransitGatewayMulticastRegisteredGroupMembers { + s.GroupIpAddress = &v + return s +} + +// SetRegisteredNetworkInterfaceIds sets the RegisteredNetworkInterfaceIds field's value. +func (s *TransitGatewayMulticastRegisteredGroupMembers) SetRegisteredNetworkInterfaceIds(v []*string) *TransitGatewayMulticastRegisteredGroupMembers { + s.RegisteredNetworkInterfaceIds = v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *TransitGatewayMulticastRegisteredGroupMembers) SetTransitGatewayMulticastDomainId(v string) *TransitGatewayMulticastRegisteredGroupMembers { + s.TransitGatewayMulticastDomainId = &v + return s +} + +// Describes the members registered with the transit gateway multicast group. +type TransitGatewayMulticastRegisteredGroupSources struct { + _ struct{} `type:"structure"` + + // The IP address assigned to the transit gateway multicast group. + GroupIpAddress *string `locationName:"groupIpAddress" type:"string"` + + // The IDs of the network interfaces members registered with the transit gateway + // multicast group. + RegisteredNetworkInterfaceIds []*string `locationName:"registeredNetworkInterfaceIds" locationNameList:"item" type:"list"` + + // The ID of the transit gateway multicast domain. + TransitGatewayMulticastDomainId *string `locationName:"transitGatewayMulticastDomainId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayMulticastRegisteredGroupSources) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayMulticastRegisteredGroupSources) GoString() string { + return s.String() +} + +// SetGroupIpAddress sets the GroupIpAddress field's value. +func (s *TransitGatewayMulticastRegisteredGroupSources) SetGroupIpAddress(v string) *TransitGatewayMulticastRegisteredGroupSources { + s.GroupIpAddress = &v + return s +} + +// SetRegisteredNetworkInterfaceIds sets the RegisteredNetworkInterfaceIds field's value. +func (s *TransitGatewayMulticastRegisteredGroupSources) SetRegisteredNetworkInterfaceIds(v []*string) *TransitGatewayMulticastRegisteredGroupSources { + s.RegisteredNetworkInterfaceIds = v + return s +} + +// SetTransitGatewayMulticastDomainId sets the TransitGatewayMulticastDomainId field's value. +func (s *TransitGatewayMulticastRegisteredGroupSources) SetTransitGatewayMulticastDomainId(v string) *TransitGatewayMulticastRegisteredGroupSources { + s.TransitGatewayMulticastDomainId = &v + return s +} + +// Describes the options for a transit gateway. +type TransitGatewayOptions struct { + _ struct{} `type:"structure"` + + // A private Autonomous System Number (ASN) for the Amazon side of a BGP session. + // The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 + // for 32-bit ASNs. + AmazonSideAsn *int64 `locationName:"amazonSideAsn" type:"long"` + + // The ID of the default association route table. + AssociationDefaultRouteTableId *string `locationName:"associationDefaultRouteTableId" type:"string"` + + // Indicates whether attachment requests are automatically accepted. + AutoAcceptSharedAttachments *string `locationName:"autoAcceptSharedAttachments" type:"string" enum:"AutoAcceptSharedAttachmentsValue"` + + // Indicates whether resource attachments are automatically associated with + // the default association route table. + DefaultRouteTableAssociation *string `locationName:"defaultRouteTableAssociation" type:"string" enum:"DefaultRouteTableAssociationValue"` + + // Indicates whether resource attachments automatically propagate routes to + // the default propagation route table. + DefaultRouteTablePropagation *string `locationName:"defaultRouteTablePropagation" type:"string" enum:"DefaultRouteTablePropagationValue"` + + // Indicates whether DNS support is enabled. + DnsSupport *string `locationName:"dnsSupport" type:"string" enum:"DnsSupportValue"` + + // Indicates whether multicast is enabled on the transit gateway + MulticastSupport *string `locationName:"multicastSupport" type:"string" enum:"MulticastSupportValue"` + + // The ID of the default propagation route table. + PropagationDefaultRouteTableId *string `locationName:"propagationDefaultRouteTableId" type:"string"` + + // Indicates whether Equal Cost Multipath Protocol support is enabled. + VpnEcmpSupport *string `locationName:"vpnEcmpSupport" type:"string" enum:"VpnEcmpSupportValue"` +} + +// String returns the string representation +func (s TransitGatewayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayOptions) GoString() string { + return s.String() +} + +// SetAmazonSideAsn sets the AmazonSideAsn field's value. +func (s *TransitGatewayOptions) SetAmazonSideAsn(v int64) *TransitGatewayOptions { + s.AmazonSideAsn = &v + return s +} + +// SetAssociationDefaultRouteTableId sets the AssociationDefaultRouteTableId field's value. +func (s *TransitGatewayOptions) SetAssociationDefaultRouteTableId(v string) *TransitGatewayOptions { + s.AssociationDefaultRouteTableId = &v + return s +} + +// SetAutoAcceptSharedAttachments sets the AutoAcceptSharedAttachments field's value. +func (s *TransitGatewayOptions) SetAutoAcceptSharedAttachments(v string) *TransitGatewayOptions { + s.AutoAcceptSharedAttachments = &v + return s +} + +// SetDefaultRouteTableAssociation sets the DefaultRouteTableAssociation field's value. +func (s *TransitGatewayOptions) SetDefaultRouteTableAssociation(v string) *TransitGatewayOptions { + s.DefaultRouteTableAssociation = &v + return s +} + +// SetDefaultRouteTablePropagation sets the DefaultRouteTablePropagation field's value. +func (s *TransitGatewayOptions) SetDefaultRouteTablePropagation(v string) *TransitGatewayOptions { + s.DefaultRouteTablePropagation = &v + return s +} + +// SetDnsSupport sets the DnsSupport field's value. +func (s *TransitGatewayOptions) SetDnsSupport(v string) *TransitGatewayOptions { + s.DnsSupport = &v + return s +} + +// SetMulticastSupport sets the MulticastSupport field's value. +func (s *TransitGatewayOptions) SetMulticastSupport(v string) *TransitGatewayOptions { + s.MulticastSupport = &v + return s +} + +// SetPropagationDefaultRouteTableId sets the PropagationDefaultRouteTableId field's value. +func (s *TransitGatewayOptions) SetPropagationDefaultRouteTableId(v string) *TransitGatewayOptions { + s.PropagationDefaultRouteTableId = &v + return s +} + +// SetVpnEcmpSupport sets the VpnEcmpSupport field's value. +func (s *TransitGatewayOptions) SetVpnEcmpSupport(v string) *TransitGatewayOptions { + s.VpnEcmpSupport = &v + return s +} + +// Describes the transit gateway peering attachment. +type TransitGatewayPeeringAttachment struct { + _ struct{} `type:"structure"` + + // Information about the accepter transit gateway. + AccepterTgwInfo *PeeringTgwInfo `locationName:"accepterTgwInfo" type:"structure"` + + // The time the transit gateway peering attachment was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // Information about the requester transit gateway. + RequesterTgwInfo *PeeringTgwInfo `locationName:"requesterTgwInfo" type:"structure"` + + // The state of the transit gateway peering attachment. Note that the initiating + // state has been deprecated. + State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` + + // The status of the transit gateway peering attachment. + Status *PeeringAttachmentStatus `locationName:"status" type:"structure"` + + // The tags for the transit gateway peering attachment. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the transit gateway peering attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayPeeringAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayPeeringAttachment) GoString() string { + return s.String() +} + +// SetAccepterTgwInfo sets the AccepterTgwInfo field's value. +func (s *TransitGatewayPeeringAttachment) SetAccepterTgwInfo(v *PeeringTgwInfo) *TransitGatewayPeeringAttachment { + s.AccepterTgwInfo = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TransitGatewayPeeringAttachment) SetCreationTime(v time.Time) *TransitGatewayPeeringAttachment { + s.CreationTime = &v + return s +} + +// SetRequesterTgwInfo sets the RequesterTgwInfo field's value. +func (s *TransitGatewayPeeringAttachment) SetRequesterTgwInfo(v *PeeringTgwInfo) *TransitGatewayPeeringAttachment { + s.RequesterTgwInfo = v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayPeeringAttachment) SetState(v string) *TransitGatewayPeeringAttachment { + s.State = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *TransitGatewayPeeringAttachment) SetStatus(v *PeeringAttachmentStatus) *TransitGatewayPeeringAttachment { + s.Status = v + return s +} + +// SetTags sets the Tags field's value. +func (s *TransitGatewayPeeringAttachment) SetTags(v []*Tag) *TransitGatewayPeeringAttachment { + s.Tags = v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayPeeringAttachment) SetTransitGatewayAttachmentId(v string) *TransitGatewayPeeringAttachment { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes a transit gateway prefix list attachment. +type TransitGatewayPrefixListAttachment struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayPrefixListAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayPrefixListAttachment) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayPrefixListAttachment) SetResourceId(v string) *TransitGatewayPrefixListAttachment { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayPrefixListAttachment) SetResourceType(v string) *TransitGatewayPrefixListAttachment { + s.ResourceType = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayPrefixListAttachment) SetTransitGatewayAttachmentId(v string) *TransitGatewayPrefixListAttachment { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes a prefix list reference. +type TransitGatewayPrefixListReference struct { + _ struct{} `type:"structure"` + + // Indicates whether traffic that matches this route is dropped. + Blackhole *bool `locationName:"blackhole" type:"boolean"` + + // The ID of the prefix list. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The ID of the prefix list owner. + PrefixListOwnerId *string `locationName:"prefixListOwnerId" type:"string"` + + // The state of the prefix list reference. + State *string `locationName:"state" type:"string" enum:"TransitGatewayPrefixListReferenceState"` + + // Information about the transit gateway attachment. + TransitGatewayAttachment *TransitGatewayPrefixListAttachment `locationName:"transitGatewayAttachment" type:"structure"` + + // The ID of the transit gateway route table. + TransitGatewayRouteTableId *string `locationName:"transitGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayPrefixListReference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayPrefixListReference) GoString() string { + return s.String() +} + +// SetBlackhole sets the Blackhole field's value. +func (s *TransitGatewayPrefixListReference) SetBlackhole(v bool) *TransitGatewayPrefixListReference { + s.Blackhole = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *TransitGatewayPrefixListReference) SetPrefixListId(v string) *TransitGatewayPrefixListReference { + s.PrefixListId = &v + return s +} + +// SetPrefixListOwnerId sets the PrefixListOwnerId field's value. +func (s *TransitGatewayPrefixListReference) SetPrefixListOwnerId(v string) *TransitGatewayPrefixListReference { + s.PrefixListOwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayPrefixListReference) SetState(v string) *TransitGatewayPrefixListReference { + s.State = &v + return s +} + +// SetTransitGatewayAttachment sets the TransitGatewayAttachment field's value. +func (s *TransitGatewayPrefixListReference) SetTransitGatewayAttachment(v *TransitGatewayPrefixListAttachment) *TransitGatewayPrefixListReference { + s.TransitGatewayAttachment = v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *TransitGatewayPrefixListReference) SetTransitGatewayRouteTableId(v string) *TransitGatewayPrefixListReference { + s.TransitGatewayRouteTableId = &v + return s +} + +// Describes route propagation. +type TransitGatewayPropagation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The state. + State *string `locationName:"state" type:"string" enum:"TransitGatewayPropagationState"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` + + // The ID of the transit gateway route table. + TransitGatewayRouteTableId *string `locationName:"transitGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayPropagation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayPropagation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayPropagation) SetResourceId(v string) *TransitGatewayPropagation { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayPropagation) SetResourceType(v string) *TransitGatewayPropagation { + s.ResourceType = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayPropagation) SetState(v string) *TransitGatewayPropagation { + s.State = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayPropagation) SetTransitGatewayAttachmentId(v string) *TransitGatewayPropagation { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *TransitGatewayPropagation) SetTransitGatewayRouteTableId(v string) *TransitGatewayPropagation { + s.TransitGatewayRouteTableId = &v + return s +} + +// Describes the options for a transit gateway. +type TransitGatewayRequestOptions struct { + _ struct{} `type:"structure"` + + // A private Autonomous System Number (ASN) for the Amazon side of a BGP session. + // The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 + // for 32-bit ASNs. The default is 64512. + AmazonSideAsn *int64 `type:"long"` + + // Enable or disable automatic acceptance of attachment requests. Disabled by + // default. + AutoAcceptSharedAttachments *string `type:"string" enum:"AutoAcceptSharedAttachmentsValue"` + + // Enable or disable automatic association with the default association route + // table. Enabled by default. + DefaultRouteTableAssociation *string `type:"string" enum:"DefaultRouteTableAssociationValue"` + + // Enable or disable automatic propagation of routes to the default propagation + // route table. Enabled by default. + DefaultRouteTablePropagation *string `type:"string" enum:"DefaultRouteTablePropagationValue"` + + // Enable or disable DNS support. Enabled by default. + DnsSupport *string `type:"string" enum:"DnsSupportValue"` + + // Indicates whether multicast is enabled on the transit gateway + MulticastSupport *string `type:"string" enum:"MulticastSupportValue"` + + // Enable or disable Equal Cost Multipath Protocol support. Enabled by default. + VpnEcmpSupport *string `type:"string" enum:"VpnEcmpSupportValue"` +} + +// String returns the string representation +func (s TransitGatewayRequestOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayRequestOptions) GoString() string { + return s.String() +} + +// SetAmazonSideAsn sets the AmazonSideAsn field's value. +func (s *TransitGatewayRequestOptions) SetAmazonSideAsn(v int64) *TransitGatewayRequestOptions { + s.AmazonSideAsn = &v + return s +} + +// SetAutoAcceptSharedAttachments sets the AutoAcceptSharedAttachments field's value. +func (s *TransitGatewayRequestOptions) SetAutoAcceptSharedAttachments(v string) *TransitGatewayRequestOptions { + s.AutoAcceptSharedAttachments = &v + return s +} + +// SetDefaultRouteTableAssociation sets the DefaultRouteTableAssociation field's value. +func (s *TransitGatewayRequestOptions) SetDefaultRouteTableAssociation(v string) *TransitGatewayRequestOptions { + s.DefaultRouteTableAssociation = &v + return s +} + +// SetDefaultRouteTablePropagation sets the DefaultRouteTablePropagation field's value. +func (s *TransitGatewayRequestOptions) SetDefaultRouteTablePropagation(v string) *TransitGatewayRequestOptions { + s.DefaultRouteTablePropagation = &v + return s +} + +// SetDnsSupport sets the DnsSupport field's value. +func (s *TransitGatewayRequestOptions) SetDnsSupport(v string) *TransitGatewayRequestOptions { + s.DnsSupport = &v + return s +} + +// SetMulticastSupport sets the MulticastSupport field's value. +func (s *TransitGatewayRequestOptions) SetMulticastSupport(v string) *TransitGatewayRequestOptions { + s.MulticastSupport = &v + return s +} + +// SetVpnEcmpSupport sets the VpnEcmpSupport field's value. +func (s *TransitGatewayRequestOptions) SetVpnEcmpSupport(v string) *TransitGatewayRequestOptions { + s.VpnEcmpSupport = &v + return s +} + +// Describes a route for a transit gateway route table. +type TransitGatewayRoute struct { + _ struct{} `type:"structure"` + + // The CIDR block used for destination matches. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // The ID of the prefix list used for destination matches. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The state of the route. + State *string `locationName:"state" type:"string" enum:"TransitGatewayRouteState"` + + // The attachments. + TransitGatewayAttachments []*TransitGatewayRouteAttachment `locationName:"transitGatewayAttachments" locationNameList:"item" type:"list"` + + // The route type. + Type *string `locationName:"type" type:"string" enum:"TransitGatewayRouteType"` +} + +// String returns the string representation +func (s TransitGatewayRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayRoute) GoString() string { + return s.String() +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *TransitGatewayRoute) SetDestinationCidrBlock(v string) *TransitGatewayRoute { + s.DestinationCidrBlock = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *TransitGatewayRoute) SetPrefixListId(v string) *TransitGatewayRoute { + s.PrefixListId = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayRoute) SetState(v string) *TransitGatewayRoute { + s.State = &v + return s +} + +// SetTransitGatewayAttachments sets the TransitGatewayAttachments field's value. +func (s *TransitGatewayRoute) SetTransitGatewayAttachments(v []*TransitGatewayRouteAttachment) *TransitGatewayRoute { + s.TransitGatewayAttachments = v + return s +} + +// SetType sets the Type field's value. +func (s *TransitGatewayRoute) SetType(v string) *TransitGatewayRoute { + s.Type = &v + return s +} + +// Describes a route attachment. +type TransitGatewayRouteAttachment struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayRouteAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayRouteAttachment) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayRouteAttachment) SetResourceId(v string) *TransitGatewayRouteAttachment { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayRouteAttachment) SetResourceType(v string) *TransitGatewayRouteAttachment { + s.ResourceType = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayRouteAttachment) SetTransitGatewayAttachmentId(v string) *TransitGatewayRouteAttachment { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes a transit gateway route table. +type TransitGatewayRouteTable struct { + _ struct{} `type:"structure"` + + // The creation time. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // Indicates whether this is the default association route table for the transit + // gateway. + DefaultAssociationRouteTable *bool `locationName:"defaultAssociationRouteTable" type:"boolean"` + + // Indicates whether this is the default propagation route table for the transit + // gateway. + DefaultPropagationRouteTable *bool `locationName:"defaultPropagationRouteTable" type:"boolean"` + + // The state of the transit gateway route table. + State *string `locationName:"state" type:"string" enum:"TransitGatewayRouteTableState"` + + // Any tags assigned to the route table. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the transit gateway. + TransitGatewayId *string `locationName:"transitGatewayId" type:"string"` + + // The ID of the transit gateway route table. + TransitGatewayRouteTableId *string `locationName:"transitGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayRouteTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayRouteTable) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TransitGatewayRouteTable) SetCreationTime(v time.Time) *TransitGatewayRouteTable { + s.CreationTime = &v + return s +} + +// SetDefaultAssociationRouteTable sets the DefaultAssociationRouteTable field's value. +func (s *TransitGatewayRouteTable) SetDefaultAssociationRouteTable(v bool) *TransitGatewayRouteTable { + s.DefaultAssociationRouteTable = &v + return s +} + +// SetDefaultPropagationRouteTable sets the DefaultPropagationRouteTable field's value. +func (s *TransitGatewayRouteTable) SetDefaultPropagationRouteTable(v bool) *TransitGatewayRouteTable { + s.DefaultPropagationRouteTable = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayRouteTable) SetState(v string) *TransitGatewayRouteTable { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TransitGatewayRouteTable) SetTags(v []*Tag) *TransitGatewayRouteTable { + s.Tags = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *TransitGatewayRouteTable) SetTransitGatewayId(v string) *TransitGatewayRouteTable { + s.TransitGatewayId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *TransitGatewayRouteTable) SetTransitGatewayRouteTableId(v string) *TransitGatewayRouteTable { + s.TransitGatewayRouteTableId = &v + return s +} + +// Describes an association between a route table and a resource attachment. +type TransitGatewayRouteTableAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The state of the association. + State *string `locationName:"state" type:"string" enum:"TransitGatewayAssociationState"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayRouteTableAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayRouteTableAssociation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayRouteTableAssociation) SetResourceId(v string) *TransitGatewayRouteTableAssociation { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayRouteTableAssociation) SetResourceType(v string) *TransitGatewayRouteTableAssociation { + s.ResourceType = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayRouteTableAssociation) SetState(v string) *TransitGatewayRouteTableAssociation { + s.State = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayRouteTableAssociation) SetTransitGatewayAttachmentId(v string) *TransitGatewayRouteTableAssociation { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes a route table propagation. +type TransitGatewayRouteTablePropagation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The type of resource. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The state of the resource. + State *string `locationName:"state" type:"string" enum:"TransitGatewayPropagationState"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayRouteTablePropagation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayRouteTablePropagation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayRouteTablePropagation) SetResourceId(v string) *TransitGatewayRouteTablePropagation { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayRouteTablePropagation) SetResourceType(v string) *TransitGatewayRouteTablePropagation { + s.ResourceType = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayRouteTablePropagation) SetState(v string) *TransitGatewayRouteTablePropagation { + s.State = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayRouteTablePropagation) SetTransitGatewayAttachmentId(v string) *TransitGatewayRouteTablePropagation { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes a VPC attachment. +type TransitGatewayVpcAttachment struct { + _ struct{} `type:"structure"` + + // The creation time. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + + // The VPC attachment options. + Options *TransitGatewayVpcAttachmentOptions `locationName:"options" type:"structure"` + + // The state of the VPC attachment. Note that the initiating state has been + // deprecated. + State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` + + // The IDs of the subnets. + SubnetIds []*string `locationName:"subnetIds" locationNameList:"item" type:"list"` + + // The tags for the VPC attachment. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` + + // The ID of the transit gateway. + TransitGatewayId *string `locationName:"transitGatewayId" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + // The ID of the AWS account that owns the VPC. + VpcOwnerId *string `locationName:"vpcOwnerId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayVpcAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayVpcAttachment) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TransitGatewayVpcAttachment) SetCreationTime(v time.Time) *TransitGatewayVpcAttachment { + s.CreationTime = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *TransitGatewayVpcAttachment) SetOptions(v *TransitGatewayVpcAttachmentOptions) *TransitGatewayVpcAttachment { + s.Options = v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayVpcAttachment) SetState(v string) *TransitGatewayVpcAttachment { + s.State = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *TransitGatewayVpcAttachment) SetSubnetIds(v []*string) *TransitGatewayVpcAttachment { + s.SubnetIds = v + return s +} + +// SetTags sets the Tags field's value. +func (s *TransitGatewayVpcAttachment) SetTags(v []*Tag) *TransitGatewayVpcAttachment { + s.Tags = v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayVpcAttachment) SetTransitGatewayAttachmentId(v string) *TransitGatewayVpcAttachment { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *TransitGatewayVpcAttachment) SetTransitGatewayId(v string) *TransitGatewayVpcAttachment { + s.TransitGatewayId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *TransitGatewayVpcAttachment) SetVpcId(v string) *TransitGatewayVpcAttachment { + s.VpcId = &v + return s +} + +// SetVpcOwnerId sets the VpcOwnerId field's value. +func (s *TransitGatewayVpcAttachment) SetVpcOwnerId(v string) *TransitGatewayVpcAttachment { + s.VpcOwnerId = &v + return s +} + +// Describes the VPC attachment options. +type TransitGatewayVpcAttachmentOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether appliance mode support is enabled. + ApplianceModeSupport *string `locationName:"applianceModeSupport" type:"string" enum:"ApplianceModeSupportValue"` + + // Indicates whether DNS support is enabled. + DnsSupport *string `locationName:"dnsSupport" type:"string" enum:"DnsSupportValue"` + + // Indicates whether IPv6 support is disabled. + Ipv6Support *string `locationName:"ipv6Support" type:"string" enum:"Ipv6SupportValue"` +} + +// String returns the string representation +func (s TransitGatewayVpcAttachmentOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayVpcAttachmentOptions) GoString() string { + return s.String() +} + +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *TransitGatewayVpcAttachmentOptions) SetApplianceModeSupport(v string) *TransitGatewayVpcAttachmentOptions { + s.ApplianceModeSupport = &v + return s +} + +// SetDnsSupport sets the DnsSupport field's value. +func (s *TransitGatewayVpcAttachmentOptions) SetDnsSupport(v string) *TransitGatewayVpcAttachmentOptions { + s.DnsSupport = &v + return s +} + +// SetIpv6Support sets the Ipv6Support field's value. +func (s *TransitGatewayVpcAttachmentOptions) SetIpv6Support(v string) *TransitGatewayVpcAttachmentOptions { + s.Ipv6Support = &v + return s +} + +// The VPN tunnel options. +type TunnelOption struct { + _ struct{} `type:"structure"` + + // The action to take after a DPD timeout occurs. + DpdTimeoutAction *string `locationName:"dpdTimeoutAction" type:"string"` + + // The number of seconds after which a DPD timeout occurs. + DpdTimeoutSeconds *int64 `locationName:"dpdTimeoutSeconds" type:"integer"` + + // The IKE versions that are permitted for the VPN tunnel. + IkeVersions []*IKEVersionsListValue `locationName:"ikeVersionSet" locationNameList:"item" type:"list"` + + // The external IP address of the VPN tunnel. + OutsideIpAddress *string `locationName:"outsideIpAddress" type:"string"` + + // The permitted Diffie-Hellman group numbers for the VPN tunnel for phase 1 + // IKE negotiations. + Phase1DHGroupNumbers []*Phase1DHGroupNumbersListValue `locationName:"phase1DHGroupNumberSet" locationNameList:"item" type:"list"` + + // The permitted encryption algorithms for the VPN tunnel for phase 1 IKE negotiations. + Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsListValue `locationName:"phase1EncryptionAlgorithmSet" locationNameList:"item" type:"list"` + + // The permitted integrity algorithms for the VPN tunnel for phase 1 IKE negotiations. + Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsListValue `locationName:"phase1IntegrityAlgorithmSet" locationNameList:"item" type:"list"` + + // The lifetime for phase 1 of the IKE negotiation, in seconds. + Phase1LifetimeSeconds *int64 `locationName:"phase1LifetimeSeconds" type:"integer"` + + // The permitted Diffie-Hellman group numbers for the VPN tunnel for phase 2 + // IKE negotiations. + Phase2DHGroupNumbers []*Phase2DHGroupNumbersListValue `locationName:"phase2DHGroupNumberSet" locationNameList:"item" type:"list"` + + // The permitted encryption algorithms for the VPN tunnel for phase 2 IKE negotiations. + Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsListValue `locationName:"phase2EncryptionAlgorithmSet" locationNameList:"item" type:"list"` + + // The permitted integrity algorithms for the VPN tunnel for phase 2 IKE negotiations. + Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsListValue `locationName:"phase2IntegrityAlgorithmSet" locationNameList:"item" type:"list"` + + // The lifetime for phase 2 of the IKE negotiation, in seconds. + Phase2LifetimeSeconds *int64 `locationName:"phase2LifetimeSeconds" type:"integer"` + + // The pre-shared key (PSK) to establish initial authentication between the + // virtual private gateway and the customer gateway. + PreSharedKey *string `locationName:"preSharedKey" type:"string"` + + // The percentage of the rekey window determined by RekeyMarginTimeSeconds during + // which the rekey time is randomly selected. + RekeyFuzzPercentage *int64 `locationName:"rekeyFuzzPercentage" type:"integer"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during + // which the AWS side of the VPN connection performs an IKE rekey. + RekeyMarginTimeSeconds *int64 `locationName:"rekeyMarginTimeSeconds" type:"integer"` + + // The number of packets in an IKE replay window. + ReplayWindowSize *int64 `locationName:"replayWindowSize" type:"integer"` + + // The action to take when the establishing the VPN tunnels for a VPN connection. + StartupAction *string `locationName:"startupAction" type:"string"` + + // The range of inside IPv4 addresses for the tunnel. + TunnelInsideCidr *string `locationName:"tunnelInsideCidr" type:"string"` + + // The range of inside IPv6 addresses for the tunnel. + TunnelInsideIpv6Cidr *string `locationName:"tunnelInsideIpv6Cidr" type:"string"` +} + +// String returns the string representation +func (s TunnelOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TunnelOption) GoString() string { + return s.String() +} + +// SetDpdTimeoutAction sets the DpdTimeoutAction field's value. +func (s *TunnelOption) SetDpdTimeoutAction(v string) *TunnelOption { + s.DpdTimeoutAction = &v + return s +} + +// SetDpdTimeoutSeconds sets the DpdTimeoutSeconds field's value. +func (s *TunnelOption) SetDpdTimeoutSeconds(v int64) *TunnelOption { + s.DpdTimeoutSeconds = &v + return s +} + +// SetIkeVersions sets the IkeVersions field's value. +func (s *TunnelOption) SetIkeVersions(v []*IKEVersionsListValue) *TunnelOption { + s.IkeVersions = v + return s +} + +// SetOutsideIpAddress sets the OutsideIpAddress field's value. +func (s *TunnelOption) SetOutsideIpAddress(v string) *TunnelOption { + s.OutsideIpAddress = &v + return s +} + +// SetPhase1DHGroupNumbers sets the Phase1DHGroupNumbers field's value. +func (s *TunnelOption) SetPhase1DHGroupNumbers(v []*Phase1DHGroupNumbersListValue) *TunnelOption { + s.Phase1DHGroupNumbers = v + return s +} + +// SetPhase1EncryptionAlgorithms sets the Phase1EncryptionAlgorithms field's value. +func (s *TunnelOption) SetPhase1EncryptionAlgorithms(v []*Phase1EncryptionAlgorithmsListValue) *TunnelOption { + s.Phase1EncryptionAlgorithms = v + return s +} + +// SetPhase1IntegrityAlgorithms sets the Phase1IntegrityAlgorithms field's value. +func (s *TunnelOption) SetPhase1IntegrityAlgorithms(v []*Phase1IntegrityAlgorithmsListValue) *TunnelOption { + s.Phase1IntegrityAlgorithms = v + return s +} + +// SetPhase1LifetimeSeconds sets the Phase1LifetimeSeconds field's value. +func (s *TunnelOption) SetPhase1LifetimeSeconds(v int64) *TunnelOption { + s.Phase1LifetimeSeconds = &v + return s +} + +// SetPhase2DHGroupNumbers sets the Phase2DHGroupNumbers field's value. +func (s *TunnelOption) SetPhase2DHGroupNumbers(v []*Phase2DHGroupNumbersListValue) *TunnelOption { + s.Phase2DHGroupNumbers = v + return s +} + +// SetPhase2EncryptionAlgorithms sets the Phase2EncryptionAlgorithms field's value. +func (s *TunnelOption) SetPhase2EncryptionAlgorithms(v []*Phase2EncryptionAlgorithmsListValue) *TunnelOption { + s.Phase2EncryptionAlgorithms = v + return s +} + +// SetPhase2IntegrityAlgorithms sets the Phase2IntegrityAlgorithms field's value. +func (s *TunnelOption) SetPhase2IntegrityAlgorithms(v []*Phase2IntegrityAlgorithmsListValue) *TunnelOption { + s.Phase2IntegrityAlgorithms = v + return s +} + +// SetPhase2LifetimeSeconds sets the Phase2LifetimeSeconds field's value. +func (s *TunnelOption) SetPhase2LifetimeSeconds(v int64) *TunnelOption { + s.Phase2LifetimeSeconds = &v + return s +} + +// SetPreSharedKey sets the PreSharedKey field's value. +func (s *TunnelOption) SetPreSharedKey(v string) *TunnelOption { + s.PreSharedKey = &v + return s +} + +// SetRekeyFuzzPercentage sets the RekeyFuzzPercentage field's value. +func (s *TunnelOption) SetRekeyFuzzPercentage(v int64) *TunnelOption { + s.RekeyFuzzPercentage = &v + return s +} + +// SetRekeyMarginTimeSeconds sets the RekeyMarginTimeSeconds field's value. +func (s *TunnelOption) SetRekeyMarginTimeSeconds(v int64) *TunnelOption { + s.RekeyMarginTimeSeconds = &v + return s +} + +// SetReplayWindowSize sets the ReplayWindowSize field's value. +func (s *TunnelOption) SetReplayWindowSize(v int64) *TunnelOption { + s.ReplayWindowSize = &v + return s +} + +// SetStartupAction sets the StartupAction field's value. +func (s *TunnelOption) SetStartupAction(v string) *TunnelOption { + s.StartupAction = &v + return s +} + +// SetTunnelInsideCidr sets the TunnelInsideCidr field's value. +func (s *TunnelOption) SetTunnelInsideCidr(v string) *TunnelOption { + s.TunnelInsideCidr = &v + return s +} + +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *TunnelOption) SetTunnelInsideIpv6Cidr(v string) *TunnelOption { + s.TunnelInsideIpv6Cidr = &v + return s +} + +type UnassignIpv6AddressesInput struct { + _ struct{} `type:"structure"` + + // The IPv6 addresses to unassign from the network interface. + // + // Ipv6Addresses is a required field + Ipv6Addresses []*string `locationName:"ipv6Addresses" locationNameList:"item" type:"list" required:"true"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UnassignIpv6AddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignIpv6AddressesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnassignIpv6AddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnassignIpv6AddressesInput"} + if s.Ipv6Addresses == nil { + invalidParams.Add(request.NewErrParamRequired("Ipv6Addresses")) + } + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIpv6Addresses sets the Ipv6Addresses field's value. +func (s *UnassignIpv6AddressesInput) SetIpv6Addresses(v []*string) *UnassignIpv6AddressesInput { + s.Ipv6Addresses = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *UnassignIpv6AddressesInput) SetNetworkInterfaceId(v string) *UnassignIpv6AddressesInput { + s.NetworkInterfaceId = &v + return s +} + +type UnassignIpv6AddressesOutput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The IPv6 addresses that have been unassigned from the network interface. + UnassignedIpv6Addresses []*string `locationName:"unassignedIpv6Addresses" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s UnassignIpv6AddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignIpv6AddressesOutput) GoString() string { + return s.String() +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *UnassignIpv6AddressesOutput) SetNetworkInterfaceId(v string) *UnassignIpv6AddressesOutput { + s.NetworkInterfaceId = &v + return s +} + +// SetUnassignedIpv6Addresses sets the UnassignedIpv6Addresses field's value. +func (s *UnassignIpv6AddressesOutput) SetUnassignedIpv6Addresses(v []*string) *UnassignIpv6AddressesOutput { + s.UnassignedIpv6Addresses = v + return s +} + +// Contains the parameters for UnassignPrivateIpAddresses. +type UnassignPrivateIpAddressesInput struct { + _ struct{} `type:"structure"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string" required:"true"` + + // The secondary private IP addresses to unassign from the network interface. + // You can specify this option multiple times to unassign more than one IP address. + // + // PrivateIpAddresses is a required field + PrivateIpAddresses []*string `locationName:"privateIpAddress" locationNameList:"PrivateIpAddress" type:"list" required:"true"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnassignPrivateIpAddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnassignPrivateIpAddressesInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.PrivateIpAddresses == nil { + invalidParams.Add(request.NewErrParamRequired("PrivateIpAddresses")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *UnassignPrivateIpAddressesInput) SetNetworkInterfaceId(v string) *UnassignPrivateIpAddressesInput { + s.NetworkInterfaceId = &v + return s +} + +// SetPrivateIpAddresses sets the PrivateIpAddresses field's value. +func (s *UnassignPrivateIpAddressesInput) SetPrivateIpAddresses(v []*string) *UnassignPrivateIpAddressesInput { + s.PrivateIpAddresses = v + return s +} + +type UnassignPrivateIpAddressesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UnassignPrivateIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnassignPrivateIpAddressesOutput) GoString() string { + return s.String() +} + +type UnmonitorInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The IDs of the instances. + // + // InstanceIds is a required field + InstanceIds []*string `locationName:"InstanceId" locationNameList:"InstanceId" type:"list" required:"true"` +} + +// String returns the string representation +func (s UnmonitorInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UnmonitorInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UnmonitorInstancesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *UnmonitorInstancesInput) SetDryRun(v bool) *UnmonitorInstancesInput { + s.DryRun = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *UnmonitorInstancesInput) SetInstanceIds(v []*string) *UnmonitorInstancesInput { + s.InstanceIds = v + return s +} + +type UnmonitorInstancesOutput struct { + _ struct{} `type:"structure"` + + // The monitoring information. + InstanceMonitorings []*InstanceMonitoring `locationName:"instancesSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s UnmonitorInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnmonitorInstancesOutput) GoString() string { + return s.String() +} + +// SetInstanceMonitorings sets the InstanceMonitorings field's value. +func (s *UnmonitorInstancesOutput) SetInstanceMonitorings(v []*InstanceMonitoring) *UnmonitorInstancesOutput { + s.InstanceMonitorings = v + return s +} + +// Describes the burstable performance instance whose credit option for CPU +// usage was not modified. +type UnsuccessfulInstanceCreditSpecificationItem struct { + _ struct{} `type:"structure"` + + // The applicable error for the burstable performance instance whose credit + // option for CPU usage was not modified. + Error *UnsuccessfulInstanceCreditSpecificationItemError `locationName:"error" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s UnsuccessfulInstanceCreditSpecificationItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulInstanceCreditSpecificationItem) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *UnsuccessfulInstanceCreditSpecificationItem) SetError(v *UnsuccessfulInstanceCreditSpecificationItemError) *UnsuccessfulInstanceCreditSpecificationItem { + s.Error = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *UnsuccessfulInstanceCreditSpecificationItem) SetInstanceId(v string) *UnsuccessfulInstanceCreditSpecificationItem { + s.InstanceId = &v + return s +} + +// Information about the error for the burstable performance instance whose +// credit option for CPU usage was not modified. +type UnsuccessfulInstanceCreditSpecificationItemError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string" enum:"UnsuccessfulInstanceCreditSpecificationErrorCode"` + + // The applicable error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnsuccessfulInstanceCreditSpecificationItemError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulInstanceCreditSpecificationItemError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *UnsuccessfulInstanceCreditSpecificationItemError) SetCode(v string) *UnsuccessfulInstanceCreditSpecificationItemError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *UnsuccessfulInstanceCreditSpecificationItemError) SetMessage(v string) *UnsuccessfulInstanceCreditSpecificationItemError { + s.Message = &v + return s +} + +// Information about items that were not successfully processed in a batch call. +type UnsuccessfulItem struct { + _ struct{} `type:"structure"` + + // Information about the error. + Error *UnsuccessfulItemError `locationName:"error" type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` +} + +// String returns the string representation +func (s UnsuccessfulItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItem) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *UnsuccessfulItem) SetError(v *UnsuccessfulItemError) *UnsuccessfulItem { + s.Error = v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *UnsuccessfulItem) SetResourceId(v string) *UnsuccessfulItem { + s.ResourceId = &v + return s +} + +// Information about the error that occurred. For more information about errors, +// see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). +type UnsuccessfulItemError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message accompanying the error code. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnsuccessfulItemError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsuccessfulItemError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *UnsuccessfulItemError) SetCode(v string) *UnsuccessfulItemError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *UnsuccessfulItemError) SetMessage(v string) *UnsuccessfulItemError { + s.Message = &v + return s +} + +type UpdateSecurityGroupRuleDescriptionsEgressInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. + GroupId *string `type:"string"` + + // [Default VPC] The name of the security group. You must specify either the + // security group ID or the security group name in the request. + GroupName *string `type:"string"` + + // The IP permissions for the security group rule. + // + // IpPermissions is a required field + IpPermissions []*IpPermission `locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsEgressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSecurityGroupRuleDescriptionsEgressInput"} + if s.IpPermissions == nil { + invalidParams.Add(request.NewErrParamRequired("IpPermissions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetDryRun(v bool) *UpdateSecurityGroupRuleDescriptionsEgressInput { + s.DryRun = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetGroupId(v string) *UpdateSecurityGroupRuleDescriptionsEgressInput { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetGroupName(v string) *UpdateSecurityGroupRuleDescriptionsEgressInput { + s.GroupName = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetIpPermissions(v []*IpPermission) *UpdateSecurityGroupRuleDescriptionsEgressInput { + s.IpPermissions = v + return s +} + +type UpdateSecurityGroupRuleDescriptionsEgressOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsEgressOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressOutput) SetReturn(v bool) *UpdateSecurityGroupRuleDescriptionsEgressOutput { + s.Return = &v + return s +} + +type UpdateSecurityGroupRuleDescriptionsIngressInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You must specify + // either the security group ID or the security group name in the request. + GroupName *string `type:"string"` + + // The IP permissions for the security group rule. + // + // IpPermissions is a required field + IpPermissions []*IpPermission `locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSecurityGroupRuleDescriptionsIngressInput"} + if s.IpPermissions == nil { + invalidParams.Add(request.NewErrParamRequired("IpPermissions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetDryRun(v bool) *UpdateSecurityGroupRuleDescriptionsIngressInput { + s.DryRun = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetGroupId(v string) *UpdateSecurityGroupRuleDescriptionsIngressInput { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetGroupName(v string) *UpdateSecurityGroupRuleDescriptionsIngressInput { + s.GroupName = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetIpPermissions(v []*IpPermission) *UpdateSecurityGroupRuleDescriptionsIngressInput { + s.IpPermissions = v + return s +} + +type UpdateSecurityGroupRuleDescriptionsIngressOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsIngressOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressOutput) SetReturn(v bool) *UpdateSecurityGroupRuleDescriptionsIngressOutput { + s.Return = &v + return s +} + +// Describes the Amazon S3 bucket for the disk image. +type UserBucket struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket where the disk image is located. + S3Bucket *string `type:"string"` + + // The file name of the disk image. + S3Key *string `type:"string"` +} + +// String returns the string representation +func (s UserBucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucket) GoString() string { + return s.String() +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *UserBucket) SetS3Bucket(v string) *UserBucket { + s.S3Bucket = &v + return s +} + +// SetS3Key sets the S3Key field's value. +func (s *UserBucket) SetS3Key(v string) *UserBucket { + s.S3Key = &v + return s +} + +// Describes the Amazon S3 bucket for the disk image. +type UserBucketDetails struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket from which the disk image was created. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The file name of the disk image. + S3Key *string `locationName:"s3Key" type:"string"` +} + +// String returns the string representation +func (s UserBucketDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserBucketDetails) GoString() string { + return s.String() +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *UserBucketDetails) SetS3Bucket(v string) *UserBucketDetails { + s.S3Bucket = &v + return s +} + +// SetS3Key sets the S3Key field's value. +func (s *UserBucketDetails) SetS3Key(v string) *UserBucketDetails { + s.S3Key = &v + return s +} + +// Describes the user data for an instance. +type UserData struct { + _ struct{} `type:"structure" sensitive:"true"` + + // The user data. If you are using an AWS SDK or command line tool, Base64-encoding + // is performed for you, and you can load the text from a file. Otherwise, you + // must provide Base64-encoded text. + Data *string `locationName:"data" type:"string"` +} + +// String returns the string representation +func (s UserData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserData) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *UserData) SetData(v string) *UserData { + s.Data = &v + return s +} + +// Describes a security group and AWS account ID pair. +type UserIdGroupPair struct { + _ struct{} `type:"structure"` + + // A description for the security group rule that references this user ID group + // pair. + // + // Constraints: Up to 255 characters in length. Allowed characters are a-z, + // A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + Description *string `locationName:"description" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string"` + + // The name of the security group. In a request, use this parameter for a security + // group in EC2-Classic or a default VPC only. For a security group in a nondefault + // VPC, use the security group ID. + // + // For a referenced security group in another VPC, this value is not returned + // if the referenced security group is deleted. + GroupName *string `locationName:"groupName" type:"string"` + + // The status of a VPC peering connection, if applicable. + PeeringStatus *string `locationName:"peeringStatus" type:"string"` + + // The ID of an AWS account. + // + // For a referenced security group in another VPC, the account ID of the referenced + // security group is returned in the response. If the referenced security group + // is deleted, this value is not returned. + // + // [EC2-Classic] Required when adding or removing rules that reference a security + // group in another AWS account. + UserId *string `locationName:"userId" type:"string"` + + // The ID of the VPC for the referenced security group, if applicable. + VpcId *string `locationName:"vpcId" type:"string"` + + // The ID of the VPC peering connection, if applicable. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s UserIdGroupPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserIdGroupPair) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *UserIdGroupPair) SetDescription(v string) *UserIdGroupPair { + s.Description = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *UserIdGroupPair) SetGroupId(v string) *UserIdGroupPair { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *UserIdGroupPair) SetGroupName(v string) *UserIdGroupPair { + s.GroupName = &v + return s +} + +// SetPeeringStatus sets the PeeringStatus field's value. +func (s *UserIdGroupPair) SetPeeringStatus(v string) *UserIdGroupPair { + s.PeeringStatus = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *UserIdGroupPair) SetUserId(v string) *UserIdGroupPair { + s.UserId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *UserIdGroupPair) SetVpcId(v string) *UserIdGroupPair { + s.VpcId = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *UserIdGroupPair) SetVpcPeeringConnectionId(v string) *UserIdGroupPair { + s.VpcPeeringConnectionId = &v + return s +} + +// Describes the vCPU configurations for the instance type. +type VCpuInfo struct { + _ struct{} `type:"structure"` + + // The default number of cores for the instance type. + DefaultCores *int64 `locationName:"defaultCores" type:"integer"` + + // The default number of threads per core for the instance type. + DefaultThreadsPerCore *int64 `locationName:"defaultThreadsPerCore" type:"integer"` + + // The default number of vCPUs for the instance type. + DefaultVCpus *int64 `locationName:"defaultVCpus" type:"integer"` + + // The valid number of cores that can be configured for the instance type. + ValidCores []*int64 `locationName:"validCores" locationNameList:"item" type:"list"` + + // The valid number of threads per core that can be configured for the instance + // type. + ValidThreadsPerCore []*int64 `locationName:"validThreadsPerCore" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s VCpuInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VCpuInfo) GoString() string { + return s.String() +} + +// SetDefaultCores sets the DefaultCores field's value. +func (s *VCpuInfo) SetDefaultCores(v int64) *VCpuInfo { + s.DefaultCores = &v + return s +} + +// SetDefaultThreadsPerCore sets the DefaultThreadsPerCore field's value. +func (s *VCpuInfo) SetDefaultThreadsPerCore(v int64) *VCpuInfo { + s.DefaultThreadsPerCore = &v + return s +} + +// SetDefaultVCpus sets the DefaultVCpus field's value. +func (s *VCpuInfo) SetDefaultVCpus(v int64) *VCpuInfo { + s.DefaultVCpus = &v + return s +} + +// SetValidCores sets the ValidCores field's value. +func (s *VCpuInfo) SetValidCores(v []*int64) *VCpuInfo { + s.ValidCores = v + return s +} + +// SetValidThreadsPerCore sets the ValidThreadsPerCore field's value. +func (s *VCpuInfo) SetValidThreadsPerCore(v []*int64) *VCpuInfo { + s.ValidThreadsPerCore = v + return s +} + +// The error code and error message that is returned for a parameter or parameter +// combination that is not valid when a new launch template or new version of +// a launch template is created. +type ValidationError struct { + _ struct{} `type:"structure"` + + // The error code that indicates why the parameter or parameter combination + // is not valid. For more information about error codes, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + Code *string `locationName:"code" type:"string"` + + // The error message that describes why the parameter or parameter combination + // is not valid. For more information about error messages, see Error Codes + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ValidationError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ValidationError) SetCode(v string) *ValidationError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ValidationError) SetMessage(v string) *ValidationError { + s.Message = &v + return s +} + +// The error codes and error messages that are returned for the parameters or +// parameter combinations that are not valid when a new launch template or new +// version of a launch template is created. +type ValidationWarning struct { + _ struct{} `type:"structure"` + + // The error codes and error messages. + Errors []*ValidationError `locationName:"errorSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ValidationWarning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationWarning) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *ValidationWarning) SetErrors(v []*ValidationError) *ValidationWarning { + s.Errors = v + return s +} + +// Describes telemetry for a VPN tunnel. +type VgwTelemetry struct { + _ struct{} `type:"structure"` + + // The number of accepted routes. + AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + + // The Amazon Resource Name (ARN) of the VPN tunnel endpoint certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The date and time of the last change in status. + LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp"` + + // The Internet-routable IP address of the virtual private gateway's outside + // interface. + OutsideIpAddress *string `locationName:"outsideIpAddress" type:"string"` + + // The status of the VPN tunnel. + Status *string `locationName:"status" type:"string" enum:"TelemetryStatus"` + + // If an error occurs, a description of the error. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s VgwTelemetry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VgwTelemetry) GoString() string { + return s.String() +} + +// SetAcceptedRouteCount sets the AcceptedRouteCount field's value. +func (s *VgwTelemetry) SetAcceptedRouteCount(v int64) *VgwTelemetry { + s.AcceptedRouteCount = &v + return s +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *VgwTelemetry) SetCertificateArn(v string) *VgwTelemetry { + s.CertificateArn = &v + return s +} + +// SetLastStatusChange sets the LastStatusChange field's value. +func (s *VgwTelemetry) SetLastStatusChange(v time.Time) *VgwTelemetry { + s.LastStatusChange = &v + return s +} + +// SetOutsideIpAddress sets the OutsideIpAddress field's value. +func (s *VgwTelemetry) SetOutsideIpAddress(v string) *VgwTelemetry { + s.OutsideIpAddress = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VgwTelemetry) SetStatus(v string) *VgwTelemetry { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *VgwTelemetry) SetStatusMessage(v string) *VgwTelemetry { + s.StatusMessage = &v + return s +} + +// Describes a volume. +type Volume struct { + _ struct{} `type:"structure"` + + // Information about the volume attachments. + Attachments []*VolumeAttachment `locationName:"attachmentSet" locationNameList:"item" type:"list"` + + // The Availability Zone for the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time stamp when volume creation was initiated. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // Indicates whether the volume is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // Indicates whether the volume was created using fast snapshot restore. + FastRestored *bool `locationName:"fastRestored" type:"boolean"` + + // The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, + // this represents the number of IOPS that are provisioned for the volume. For + // gp2 volumes, this represents the baseline performance of the volume and the + // rate at which the volume accumulates I/O credits for bursting. + Iops *int64 `locationName:"iops" type:"integer"` + + // The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) + // customer master key (CMK) that was used to protect the volume encryption + // key for the volume. + KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + + // Indicates whether Amazon EBS Multi-Attach is enabled. + MultiAttachEnabled *bool `locationName:"multiAttachEnabled" type:"boolean"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `locationName:"outpostArn" type:"string"` + + // The size of the volume, in GiBs. + Size *int64 `locationName:"size" type:"integer"` + + // The snapshot from which the volume was created, if applicable. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The volume state. + State *string `locationName:"status" type:"string" enum:"VolumeState"` + + // Any tags assigned to the volume. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The throughput that the volume supports, in MiB/s. + Throughput *int64 `locationName:"throughput" type:"integer"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume type. + VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s Volume) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Volume) GoString() string { + return s.String() +} + +// SetAttachments sets the Attachments field's value. +func (s *Volume) SetAttachments(v []*VolumeAttachment) *Volume { + s.Attachments = v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *Volume) SetAvailabilityZone(v string) *Volume { + s.AvailabilityZone = &v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *Volume) SetCreateTime(v time.Time) *Volume { + s.CreateTime = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *Volume) SetEncrypted(v bool) *Volume { + s.Encrypted = &v + return s +} + +// SetFastRestored sets the FastRestored field's value. +func (s *Volume) SetFastRestored(v bool) *Volume { + s.FastRestored = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *Volume) SetIops(v int64) *Volume { + s.Iops = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *Volume) SetKmsKeyId(v string) *Volume { + s.KmsKeyId = &v + return s +} + +// SetMultiAttachEnabled sets the MultiAttachEnabled field's value. +func (s *Volume) SetMultiAttachEnabled(v bool) *Volume { + s.MultiAttachEnabled = &v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *Volume) SetOutpostArn(v string) *Volume { + s.OutpostArn = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Volume) SetSize(v int64) *Volume { + s.Size = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *Volume) SetSnapshotId(v string) *Volume { + s.SnapshotId = &v + return s +} + +// SetState sets the State field's value. +func (s *Volume) SetState(v string) *Volume { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Volume) SetTags(v []*Tag) *Volume { + s.Tags = v + return s +} + +// SetThroughput sets the Throughput field's value. +func (s *Volume) SetThroughput(v int64) *Volume { + s.Throughput = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *Volume) SetVolumeId(v string) *Volume { + s.VolumeId = &v + return s +} + +// SetVolumeType sets the VolumeType field's value. +func (s *Volume) SetVolumeType(v string) *Volume { + s.VolumeType = &v + return s +} + +// Describes volume attachment details. +type VolumeAttachment struct { + _ struct{} `type:"structure"` + + // The time stamp when the attachment initiated. + AttachTime *time.Time `locationName:"attachTime" type:"timestamp"` + + // Indicates whether the EBS volume is deleted on instance termination. + DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` + + // The device name. + Device *string `locationName:"device" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The attachment state of the volume. + State *string `locationName:"status" type:"string" enum:"VolumeAttachmentState"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s VolumeAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeAttachment) GoString() string { + return s.String() +} + +// SetAttachTime sets the AttachTime field's value. +func (s *VolumeAttachment) SetAttachTime(v time.Time) *VolumeAttachment { + s.AttachTime = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *VolumeAttachment) SetDeleteOnTermination(v bool) *VolumeAttachment { + s.DeleteOnTermination = &v + return s +} + +// SetDevice sets the Device field's value. +func (s *VolumeAttachment) SetDevice(v string) *VolumeAttachment { + s.Device = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *VolumeAttachment) SetInstanceId(v string) *VolumeAttachment { + s.InstanceId = &v + return s +} + +// SetState sets the State field's value. +func (s *VolumeAttachment) SetState(v string) *VolumeAttachment { + s.State = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *VolumeAttachment) SetVolumeId(v string) *VolumeAttachment { + s.VolumeId = &v + return s +} + +// Describes an EBS volume. +type VolumeDetail struct { + _ struct{} `type:"structure"` + + // The size of the volume, in GiB. + // + // Size is a required field + Size *int64 `locationName:"size" type:"long" required:"true"` +} + +// String returns the string representation +func (s VolumeDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeDetail) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VolumeDetail) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VolumeDetail"} + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSize sets the Size field's value. +func (s *VolumeDetail) SetSize(v int64) *VolumeDetail { + s.Size = &v + return s +} + +// Describes the modification status of an EBS volume. +// +// If the volume has never been modified, some element values will be null. +type VolumeModification struct { + _ struct{} `type:"structure"` + + // The modification completion or failure time. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The current modification state. The modification state is null for unmodified + // volumes. + ModificationState *string `locationName:"modificationState" type:"string" enum:"VolumeModificationState"` + + // The original IOPS rate of the volume. + OriginalIops *int64 `locationName:"originalIops" type:"integer"` + + // The original size of the volume, in GiB. + OriginalSize *int64 `locationName:"originalSize" type:"integer"` + + // The original throughput of the volume, in MiB/s. + OriginalThroughput *int64 `locationName:"originalThroughput" type:"integer"` + + // The original EBS volume type of the volume. + OriginalVolumeType *string `locationName:"originalVolumeType" type:"string" enum:"VolumeType"` + + // The modification progress, from 0 to 100 percent complete. + Progress *int64 `locationName:"progress" type:"long"` + + // The modification start time. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // A status message about the modification progress or failure. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // The target IOPS rate of the volume. + TargetIops *int64 `locationName:"targetIops" type:"integer"` + + // The target size of the volume, in GiB. + TargetSize *int64 `locationName:"targetSize" type:"integer"` + + // The target throughput of the volume, in MiB/s. + TargetThroughput *int64 `locationName:"targetThroughput" type:"integer"` + + // The target EBS volume type of the volume. + TargetVolumeType *string `locationName:"targetVolumeType" type:"string" enum:"VolumeType"` + + // The ID of the volume. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s VolumeModification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeModification) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *VolumeModification) SetEndTime(v time.Time) *VolumeModification { + s.EndTime = &v + return s +} + +// SetModificationState sets the ModificationState field's value. +func (s *VolumeModification) SetModificationState(v string) *VolumeModification { + s.ModificationState = &v + return s +} + +// SetOriginalIops sets the OriginalIops field's value. +func (s *VolumeModification) SetOriginalIops(v int64) *VolumeModification { + s.OriginalIops = &v + return s +} + +// SetOriginalSize sets the OriginalSize field's value. +func (s *VolumeModification) SetOriginalSize(v int64) *VolumeModification { + s.OriginalSize = &v + return s +} + +// SetOriginalThroughput sets the OriginalThroughput field's value. +func (s *VolumeModification) SetOriginalThroughput(v int64) *VolumeModification { + s.OriginalThroughput = &v + return s +} + +// SetOriginalVolumeType sets the OriginalVolumeType field's value. +func (s *VolumeModification) SetOriginalVolumeType(v string) *VolumeModification { + s.OriginalVolumeType = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *VolumeModification) SetProgress(v int64) *VolumeModification { + s.Progress = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *VolumeModification) SetStartTime(v time.Time) *VolumeModification { + s.StartTime = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *VolumeModification) SetStatusMessage(v string) *VolumeModification { + s.StatusMessage = &v + return s +} + +// SetTargetIops sets the TargetIops field's value. +func (s *VolumeModification) SetTargetIops(v int64) *VolumeModification { + s.TargetIops = &v + return s +} + +// SetTargetSize sets the TargetSize field's value. +func (s *VolumeModification) SetTargetSize(v int64) *VolumeModification { + s.TargetSize = &v + return s +} + +// SetTargetThroughput sets the TargetThroughput field's value. +func (s *VolumeModification) SetTargetThroughput(v int64) *VolumeModification { + s.TargetThroughput = &v + return s +} + +// SetTargetVolumeType sets the TargetVolumeType field's value. +func (s *VolumeModification) SetTargetVolumeType(v string) *VolumeModification { + s.TargetVolumeType = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *VolumeModification) SetVolumeId(v string) *VolumeModification { + s.VolumeId = &v + return s +} + +// Describes a volume status operation code. +type VolumeStatusAction struct { + _ struct{} `type:"structure"` + + // The code identifying the operation, for example, enable-volume-io. + Code *string `locationName:"code" type:"string"` + + // A description of the operation. + Description *string `locationName:"description" type:"string"` + + // The ID of the event associated with this operation. + EventId *string `locationName:"eventId" type:"string"` + + // The event type associated with this operation. + EventType *string `locationName:"eventType" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusAction) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *VolumeStatusAction) SetCode(v string) *VolumeStatusAction { + s.Code = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *VolumeStatusAction) SetDescription(v string) *VolumeStatusAction { + s.Description = &v + return s +} + +// SetEventId sets the EventId field's value. +func (s *VolumeStatusAction) SetEventId(v string) *VolumeStatusAction { + s.EventId = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *VolumeStatusAction) SetEventType(v string) *VolumeStatusAction { + s.EventType = &v + return s +} + +// Information about the instances to which the volume is attached. +type VolumeStatusAttachmentStatus struct { + _ struct{} `type:"structure"` + + // The ID of the attached instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The maximum IOPS supported by the attached instance. + IoPerformance *string `locationName:"ioPerformance" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusAttachmentStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusAttachmentStatus) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *VolumeStatusAttachmentStatus) SetInstanceId(v string) *VolumeStatusAttachmentStatus { + s.InstanceId = &v + return s +} + +// SetIoPerformance sets the IoPerformance field's value. +func (s *VolumeStatusAttachmentStatus) SetIoPerformance(v string) *VolumeStatusAttachmentStatus { + s.IoPerformance = &v + return s +} + +// Describes a volume status. +type VolumeStatusDetails struct { + _ struct{} `type:"structure"` + + // The name of the volume status. + Name *string `locationName:"name" type:"string" enum:"VolumeStatusName"` + + // The intended status of the volume status. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusDetails) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *VolumeStatusDetails) SetName(v string) *VolumeStatusDetails { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VolumeStatusDetails) SetStatus(v string) *VolumeStatusDetails { + s.Status = &v + return s +} + +// Describes a volume status event. +type VolumeStatusEvent struct { + _ struct{} `type:"structure"` + + // A description of the event. + Description *string `locationName:"description" type:"string"` + + // The ID of this event. + EventId *string `locationName:"eventId" type:"string"` + + // The type of this event. + EventType *string `locationName:"eventType" type:"string"` + + // The ID of the instance associated with the event. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The latest end time of the event. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp"` + + // The earliest start time of the event. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp"` +} + +// String returns the string representation +func (s VolumeStatusEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusEvent) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *VolumeStatusEvent) SetDescription(v string) *VolumeStatusEvent { + s.Description = &v + return s +} + +// SetEventId sets the EventId field's value. +func (s *VolumeStatusEvent) SetEventId(v string) *VolumeStatusEvent { + s.EventId = &v + return s +} + +// SetEventType sets the EventType field's value. +func (s *VolumeStatusEvent) SetEventType(v string) *VolumeStatusEvent { + s.EventType = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *VolumeStatusEvent) SetInstanceId(v string) *VolumeStatusEvent { + s.InstanceId = &v + return s +} + +// SetNotAfter sets the NotAfter field's value. +func (s *VolumeStatusEvent) SetNotAfter(v time.Time) *VolumeStatusEvent { + s.NotAfter = &v + return s +} + +// SetNotBefore sets the NotBefore field's value. +func (s *VolumeStatusEvent) SetNotBefore(v time.Time) *VolumeStatusEvent { + s.NotBefore = &v + return s +} + +// Describes the status of a volume. +type VolumeStatusInfo struct { + _ struct{} `type:"structure"` + + // The details of the volume status. + Details []*VolumeStatusDetails `locationName:"details" locationNameList:"item" type:"list"` + + // The status of the volume. + Status *string `locationName:"status" type:"string" enum:"VolumeStatusInfoStatus"` +} + +// String returns the string representation +func (s VolumeStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusInfo) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *VolumeStatusInfo) SetDetails(v []*VolumeStatusDetails) *VolumeStatusInfo { + s.Details = v + return s +} + +// SetStatus sets the Status field's value. +func (s *VolumeStatusInfo) SetStatus(v string) *VolumeStatusInfo { + s.Status = &v + return s +} + +// Describes the volume status. +type VolumeStatusItem struct { + _ struct{} `type:"structure"` + + // The details of the operation. + Actions []*VolumeStatusAction `locationName:"actionsSet" locationNameList:"item" type:"list"` + + // Information about the instances to which the volume is attached. + AttachmentStatuses []*VolumeStatusAttachmentStatus `locationName:"attachmentStatuses" locationNameList:"item" type:"list"` + + // The Availability Zone of the volume. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // A list of events associated with the volume. + Events []*VolumeStatusEvent `locationName:"eventsSet" locationNameList:"item" type:"list"` + + // The Amazon Resource Name (ARN) of the Outpost. + OutpostArn *string `locationName:"outpostArn" type:"string"` + + // The volume ID. + VolumeId *string `locationName:"volumeId" type:"string"` + + // The volume status. + VolumeStatus *VolumeStatusInfo `locationName:"volumeStatus" type:"structure"` +} + +// String returns the string representation +func (s VolumeStatusItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusItem) GoString() string { + return s.String() +} + +// SetActions sets the Actions field's value. +func (s *VolumeStatusItem) SetActions(v []*VolumeStatusAction) *VolumeStatusItem { + s.Actions = v + return s +} + +// SetAttachmentStatuses sets the AttachmentStatuses field's value. +func (s *VolumeStatusItem) SetAttachmentStatuses(v []*VolumeStatusAttachmentStatus) *VolumeStatusItem { + s.AttachmentStatuses = v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *VolumeStatusItem) SetAvailabilityZone(v string) *VolumeStatusItem { + s.AvailabilityZone = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *VolumeStatusItem) SetEvents(v []*VolumeStatusEvent) *VolumeStatusItem { + s.Events = v + return s +} + +// SetOutpostArn sets the OutpostArn field's value. +func (s *VolumeStatusItem) SetOutpostArn(v string) *VolumeStatusItem { + s.OutpostArn = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *VolumeStatusItem) SetVolumeId(v string) *VolumeStatusItem { + s.VolumeId = &v + return s +} + +// SetVolumeStatus sets the VolumeStatus field's value. +func (s *VolumeStatusItem) SetVolumeStatus(v *VolumeStatusInfo) *VolumeStatusItem { + s.VolumeStatus = v + return s +} + +// Describes a VPC. +type Vpc struct { + _ struct{} `type:"structure"` + + // The primary IPv4 CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Information about the IPv4 CIDR blocks associated with the VPC. + CidrBlockAssociationSet []*VpcCidrBlockAssociation `locationName:"cidrBlockAssociationSet" locationNameList:"item" type:"list"` + + // The ID of the set of DHCP options you've associated with the VPC. + DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` + + // The allowed tenancy of instances launched into the VPC. + InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + + // Information about the IPv6 CIDR blocks associated with the VPC. + Ipv6CidrBlockAssociationSet []*VpcIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociationSet" locationNameList:"item" type:"list"` + + // Indicates whether the VPC is the default VPC. + IsDefault *bool `locationName:"isDefault" type:"boolean"` + + // The ID of the AWS account that owns the VPC. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The current state of the VPC. + State *string `locationName:"state" type:"string" enum:"VpcState"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s Vpc) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vpc) GoString() string { + return s.String() +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *Vpc) SetCidrBlock(v string) *Vpc { + s.CidrBlock = &v + return s +} + +// SetCidrBlockAssociationSet sets the CidrBlockAssociationSet field's value. +func (s *Vpc) SetCidrBlockAssociationSet(v []*VpcCidrBlockAssociation) *Vpc { + s.CidrBlockAssociationSet = v + return s +} + +// SetDhcpOptionsId sets the DhcpOptionsId field's value. +func (s *Vpc) SetDhcpOptionsId(v string) *Vpc { + s.DhcpOptionsId = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *Vpc) SetInstanceTenancy(v string) *Vpc { + s.InstanceTenancy = &v + return s +} + +// SetIpv6CidrBlockAssociationSet sets the Ipv6CidrBlockAssociationSet field's value. +func (s *Vpc) SetIpv6CidrBlockAssociationSet(v []*VpcIpv6CidrBlockAssociation) *Vpc { + s.Ipv6CidrBlockAssociationSet = v + return s +} + +// SetIsDefault sets the IsDefault field's value. +func (s *Vpc) SetIsDefault(v bool) *Vpc { + s.IsDefault = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *Vpc) SetOwnerId(v string) *Vpc { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *Vpc) SetState(v string) *Vpc { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Vpc) SetTags(v []*Tag) *Vpc { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *Vpc) SetVpcId(v string) *Vpc { + s.VpcId = &v + return s +} + +// Describes an attachment between a virtual private gateway and a VPC. +type VpcAttachment struct { + _ struct{} `type:"structure"` + + // The current state of the attachment. + State *string `locationName:"state" type:"string" enum:"AttachmentStatus"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcAttachment) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *VpcAttachment) SetState(v string) *VpcAttachment { + s.State = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *VpcAttachment) SetVpcId(v string) *VpcAttachment { + s.VpcId = &v + return s +} + +// Describes an IPv4 CIDR block associated with a VPC. +type VpcCidrBlockAssociation struct { + _ struct{} `type:"structure"` + + // The association ID for the IPv4 CIDR block. + AssociationId *string `locationName:"associationId" type:"string"` + + // The IPv4 CIDR block. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Information about the state of the CIDR block. + CidrBlockState *VpcCidrBlockState `locationName:"cidrBlockState" type:"structure"` +} + +// String returns the string representation +func (s VpcCidrBlockAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcCidrBlockAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *VpcCidrBlockAssociation) SetAssociationId(v string) *VpcCidrBlockAssociation { + s.AssociationId = &v + return s +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *VpcCidrBlockAssociation) SetCidrBlock(v string) *VpcCidrBlockAssociation { + s.CidrBlock = &v + return s +} + +// SetCidrBlockState sets the CidrBlockState field's value. +func (s *VpcCidrBlockAssociation) SetCidrBlockState(v *VpcCidrBlockState) *VpcCidrBlockAssociation { + s.CidrBlockState = v + return s +} + +// Describes the state of a CIDR block. +type VpcCidrBlockState struct { + _ struct{} `type:"structure"` + + // The state of the CIDR block. + State *string `locationName:"state" type:"string" enum:"VpcCidrBlockStateCode"` + + // A message about the status of the CIDR block, if applicable. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s VpcCidrBlockState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcCidrBlockState) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *VpcCidrBlockState) SetState(v string) *VpcCidrBlockState { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *VpcCidrBlockState) SetStatusMessage(v string) *VpcCidrBlockState { + s.StatusMessage = &v + return s +} + +// Describes whether a VPC is enabled for ClassicLink. +type VpcClassicLink struct { + _ struct{} `type:"structure"` + + // Indicates whether the VPC is enabled for ClassicLink. + ClassicLinkEnabled *bool `locationName:"classicLinkEnabled" type:"boolean"` + + // Any tags assigned to the VPC. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcClassicLink) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcClassicLink) GoString() string { + return s.String() +} + +// SetClassicLinkEnabled sets the ClassicLinkEnabled field's value. +func (s *VpcClassicLink) SetClassicLinkEnabled(v bool) *VpcClassicLink { + s.ClassicLinkEnabled = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *VpcClassicLink) SetTags(v []*Tag) *VpcClassicLink { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *VpcClassicLink) SetVpcId(v string) *VpcClassicLink { + s.VpcId = &v + return s +} + +// Describes a VPC endpoint. +type VpcEndpoint struct { + _ struct{} `type:"structure"` + + // The date and time that the VPC endpoint was created. + CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp"` + + // (Interface endpoint) The DNS entries for the endpoint. + DnsEntries []*DnsEntry `locationName:"dnsEntrySet" locationNameList:"item" type:"list"` + + // (Interface endpoint) Information about the security groups that are associated + // with the network interface. + Groups []*SecurityGroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` + + // The last error that occurred for VPC endpoint. + LastError *LastError `locationName:"lastError" type:"structure"` + + // (Interface endpoint) One or more network interfaces for the endpoint. + NetworkInterfaceIds []*string `locationName:"networkInterfaceIdSet" locationNameList:"item" type:"list"` + + // The ID of the AWS account that owns the VPC endpoint. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The policy document associated with the endpoint, if applicable. + PolicyDocument *string `locationName:"policyDocument" type:"string"` + + // (Interface endpoint) Indicates whether the VPC is associated with a private + // hosted zone. + PrivateDnsEnabled *bool `locationName:"privateDnsEnabled" type:"boolean"` + + // Indicates whether the VPC endpoint is being managed by its service. + RequesterManaged *bool `locationName:"requesterManaged" type:"boolean"` + + // (Gateway endpoint) One or more route tables associated with the endpoint. + RouteTableIds []*string `locationName:"routeTableIdSet" locationNameList:"item" type:"list"` + + // The name of the service to which the endpoint is associated. + ServiceName *string `locationName:"serviceName" type:"string"` + + // The state of the VPC endpoint. + State *string `locationName:"state" type:"string" enum:"State"` + + // (Interface endpoint) One or more subnets in which the endpoint is located. + SubnetIds []*string `locationName:"subnetIdSet" locationNameList:"item" type:"list"` + + // Any tags assigned to the VPC endpoint. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC endpoint. + VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"` + + // The type of endpoint. + VpcEndpointType *string `locationName:"vpcEndpointType" type:"string" enum:"VpcEndpointType"` + + // The ID of the VPC to which the endpoint is associated. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcEndpoint) GoString() string { + return s.String() +} + +// SetCreationTimestamp sets the CreationTimestamp field's value. +func (s *VpcEndpoint) SetCreationTimestamp(v time.Time) *VpcEndpoint { + s.CreationTimestamp = &v + return s +} + +// SetDnsEntries sets the DnsEntries field's value. +func (s *VpcEndpoint) SetDnsEntries(v []*DnsEntry) *VpcEndpoint { + s.DnsEntries = v + return s +} + +// SetGroups sets the Groups field's value. +func (s *VpcEndpoint) SetGroups(v []*SecurityGroupIdentifier) *VpcEndpoint { + s.Groups = v + return s +} + +// SetLastError sets the LastError field's value. +func (s *VpcEndpoint) SetLastError(v *LastError) *VpcEndpoint { + s.LastError = v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *VpcEndpoint) SetNetworkInterfaceIds(v []*string) *VpcEndpoint { + s.NetworkInterfaceIds = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *VpcEndpoint) SetOwnerId(v string) *VpcEndpoint { + s.OwnerId = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *VpcEndpoint) SetPolicyDocument(v string) *VpcEndpoint { + s.PolicyDocument = &v + return s +} + +// SetPrivateDnsEnabled sets the PrivateDnsEnabled field's value. +func (s *VpcEndpoint) SetPrivateDnsEnabled(v bool) *VpcEndpoint { + s.PrivateDnsEnabled = &v + return s +} + +// SetRequesterManaged sets the RequesterManaged field's value. +func (s *VpcEndpoint) SetRequesterManaged(v bool) *VpcEndpoint { + s.RequesterManaged = &v + return s +} + +// SetRouteTableIds sets the RouteTableIds field's value. +func (s *VpcEndpoint) SetRouteTableIds(v []*string) *VpcEndpoint { + s.RouteTableIds = v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *VpcEndpoint) SetServiceName(v string) *VpcEndpoint { + s.ServiceName = &v + return s +} + +// SetState sets the State field's value. +func (s *VpcEndpoint) SetState(v string) *VpcEndpoint { + s.State = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *VpcEndpoint) SetSubnetIds(v []*string) *VpcEndpoint { + s.SubnetIds = v + return s +} + +// SetTags sets the Tags field's value. +func (s *VpcEndpoint) SetTags(v []*Tag) *VpcEndpoint { + s.Tags = v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *VpcEndpoint) SetVpcEndpointId(v string) *VpcEndpoint { + s.VpcEndpointId = &v + return s +} + +// SetVpcEndpointType sets the VpcEndpointType field's value. +func (s *VpcEndpoint) SetVpcEndpointType(v string) *VpcEndpoint { + s.VpcEndpointType = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *VpcEndpoint) SetVpcId(v string) *VpcEndpoint { + s.VpcId = &v + return s +} + +// Describes a VPC endpoint connection to a service. +type VpcEndpointConnection struct { + _ struct{} `type:"structure"` + + // The date and time that the VPC endpoint was created. + CreationTimestamp *time.Time `locationName:"creationTimestamp" type:"timestamp"` + + // The DNS entries for the VPC endpoint. + DnsEntries []*DnsEntry `locationName:"dnsEntrySet" locationNameList:"item" type:"list"` + + // The Amazon Resource Names (ARNs) of the Gateway Load Balancers for the service. + GatewayLoadBalancerArns []*string `locationName:"gatewayLoadBalancerArnSet" locationNameList:"item" type:"list"` + + // The Amazon Resource Names (ARNs) of the network load balancers for the service. + NetworkLoadBalancerArns []*string `locationName:"networkLoadBalancerArnSet" locationNameList:"item" type:"list"` + + // The ID of the service to which the endpoint is connected. + ServiceId *string `locationName:"serviceId" type:"string"` + + // The ID of the VPC endpoint. + VpcEndpointId *string `locationName:"vpcEndpointId" type:"string"` + + // The AWS account ID of the owner of the VPC endpoint. + VpcEndpointOwner *string `locationName:"vpcEndpointOwner" type:"string"` + + // The state of the VPC endpoint. + VpcEndpointState *string `locationName:"vpcEndpointState" type:"string" enum:"State"` +} + +// String returns the string representation +func (s VpcEndpointConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcEndpointConnection) GoString() string { + return s.String() +} + +// SetCreationTimestamp sets the CreationTimestamp field's value. +func (s *VpcEndpointConnection) SetCreationTimestamp(v time.Time) *VpcEndpointConnection { + s.CreationTimestamp = &v + return s +} + +// SetDnsEntries sets the DnsEntries field's value. +func (s *VpcEndpointConnection) SetDnsEntries(v []*DnsEntry) *VpcEndpointConnection { + s.DnsEntries = v + return s +} + +// SetGatewayLoadBalancerArns sets the GatewayLoadBalancerArns field's value. +func (s *VpcEndpointConnection) SetGatewayLoadBalancerArns(v []*string) *VpcEndpointConnection { + s.GatewayLoadBalancerArns = v + return s +} + +// SetNetworkLoadBalancerArns sets the NetworkLoadBalancerArns field's value. +func (s *VpcEndpointConnection) SetNetworkLoadBalancerArns(v []*string) *VpcEndpointConnection { + s.NetworkLoadBalancerArns = v + return s +} + +// SetServiceId sets the ServiceId field's value. +func (s *VpcEndpointConnection) SetServiceId(v string) *VpcEndpointConnection { + s.ServiceId = &v + return s +} + +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *VpcEndpointConnection) SetVpcEndpointId(v string) *VpcEndpointConnection { + s.VpcEndpointId = &v + return s +} + +// SetVpcEndpointOwner sets the VpcEndpointOwner field's value. +func (s *VpcEndpointConnection) SetVpcEndpointOwner(v string) *VpcEndpointConnection { + s.VpcEndpointOwner = &v + return s +} + +// SetVpcEndpointState sets the VpcEndpointState field's value. +func (s *VpcEndpointConnection) SetVpcEndpointState(v string) *VpcEndpointConnection { + s.VpcEndpointState = &v + return s +} + +// Describes an IPv6 CIDR block associated with a VPC. +type VpcIpv6CidrBlockAssociation struct { + _ struct{} `type:"structure"` + + // The association ID for the IPv6 CIDR block. + AssociationId *string `locationName:"associationId" type:"string"` + + // The IPv6 CIDR block. + Ipv6CidrBlock *string `locationName:"ipv6CidrBlock" type:"string"` + + // Information about the state of the CIDR block. + Ipv6CidrBlockState *VpcCidrBlockState `locationName:"ipv6CidrBlockState" type:"structure"` + + // The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated. + Ipv6Pool *string `locationName:"ipv6Pool" type:"string"` + + // The name of the unique set of Availability Zones, Local Zones, or Wavelength + // Zones from which AWS advertises IP addresses, for example, us-east-1-wl1-bos-wlz-1. + NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` +} + +// String returns the string representation +func (s VpcIpv6CidrBlockAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcIpv6CidrBlockAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *VpcIpv6CidrBlockAssociation) SetAssociationId(v string) *VpcIpv6CidrBlockAssociation { + s.AssociationId = &v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *VpcIpv6CidrBlockAssociation) SetIpv6CidrBlock(v string) *VpcIpv6CidrBlockAssociation { + s.Ipv6CidrBlock = &v + return s +} + +// SetIpv6CidrBlockState sets the Ipv6CidrBlockState field's value. +func (s *VpcIpv6CidrBlockAssociation) SetIpv6CidrBlockState(v *VpcCidrBlockState) *VpcIpv6CidrBlockAssociation { + s.Ipv6CidrBlockState = v + return s +} + +// SetIpv6Pool sets the Ipv6Pool field's value. +func (s *VpcIpv6CidrBlockAssociation) SetIpv6Pool(v string) *VpcIpv6CidrBlockAssociation { + s.Ipv6Pool = &v + return s +} + +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *VpcIpv6CidrBlockAssociation) SetNetworkBorderGroup(v string) *VpcIpv6CidrBlockAssociation { + s.NetworkBorderGroup = &v + return s +} + +// Describes a VPC peering connection. +type VpcPeeringConnection struct { + _ struct{} `type:"structure"` + + // Information about the accepter VPC. CIDR block information is only returned + // when describing an active VPC peering connection. + AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"` + + // The time that an unaccepted VPC peering connection will expire. + ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp"` + + // Information about the requester VPC. CIDR block information is only returned + // when describing an active VPC peering connection. + RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"` + + // The status of the VPC peering connection. + Status *VpcPeeringConnectionStateReason `locationName:"status" type:"structure"` + + // Any tags assigned to the resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnection) GoString() string { + return s.String() +} + +// SetAccepterVpcInfo sets the AccepterVpcInfo field's value. +func (s *VpcPeeringConnection) SetAccepterVpcInfo(v *VpcPeeringConnectionVpcInfo) *VpcPeeringConnection { + s.AccepterVpcInfo = v + return s +} + +// SetExpirationTime sets the ExpirationTime field's value. +func (s *VpcPeeringConnection) SetExpirationTime(v time.Time) *VpcPeeringConnection { + s.ExpirationTime = &v + return s +} + +// SetRequesterVpcInfo sets the RequesterVpcInfo field's value. +func (s *VpcPeeringConnection) SetRequesterVpcInfo(v *VpcPeeringConnectionVpcInfo) *VpcPeeringConnection { + s.RequesterVpcInfo = v + return s +} + +// SetStatus sets the Status field's value. +func (s *VpcPeeringConnection) SetStatus(v *VpcPeeringConnectionStateReason) *VpcPeeringConnection { + s.Status = v + return s +} + +// SetTags sets the Tags field's value. +func (s *VpcPeeringConnection) SetTags(v []*Tag) *VpcPeeringConnection { + s.Tags = v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *VpcPeeringConnection) SetVpcPeeringConnectionId(v string) *VpcPeeringConnection { + s.VpcPeeringConnectionId = &v + return s +} + +// Describes the VPC peering connection options. +type VpcPeeringConnectionOptionsDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether a local VPC can resolve public DNS hostnames to private + // IP addresses when queried from instances in a peer VPC. + AllowDnsResolutionFromRemoteVpc *bool `locationName:"allowDnsResolutionFromRemoteVpc" type:"boolean"` + + // Indicates whether a local ClassicLink connection can communicate with the + // peer VPC over the VPC peering connection. + AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"` + + // Indicates whether a local VPC can communicate with a ClassicLink connection + // in the peer VPC over the VPC peering connection. + AllowEgressFromLocalVpcToRemoteClassicLink *bool `locationName:"allowEgressFromLocalVpcToRemoteClassicLink" type:"boolean"` +} + +// String returns the string representation +func (s VpcPeeringConnectionOptionsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionOptionsDescription) GoString() string { + return s.String() +} + +// SetAllowDnsResolutionFromRemoteVpc sets the AllowDnsResolutionFromRemoteVpc field's value. +func (s *VpcPeeringConnectionOptionsDescription) SetAllowDnsResolutionFromRemoteVpc(v bool) *VpcPeeringConnectionOptionsDescription { + s.AllowDnsResolutionFromRemoteVpc = &v + return s +} + +// SetAllowEgressFromLocalClassicLinkToRemoteVpc sets the AllowEgressFromLocalClassicLinkToRemoteVpc field's value. +func (s *VpcPeeringConnectionOptionsDescription) SetAllowEgressFromLocalClassicLinkToRemoteVpc(v bool) *VpcPeeringConnectionOptionsDescription { + s.AllowEgressFromLocalClassicLinkToRemoteVpc = &v + return s +} + +// SetAllowEgressFromLocalVpcToRemoteClassicLink sets the AllowEgressFromLocalVpcToRemoteClassicLink field's value. +func (s *VpcPeeringConnectionOptionsDescription) SetAllowEgressFromLocalVpcToRemoteClassicLink(v bool) *VpcPeeringConnectionOptionsDescription { + s.AllowEgressFromLocalVpcToRemoteClassicLink = &v + return s +} + +// Describes the status of a VPC peering connection. +type VpcPeeringConnectionStateReason struct { + _ struct{} `type:"structure"` + + // The status of the VPC peering connection. + Code *string `locationName:"code" type:"string" enum:"VpcPeeringConnectionStateReasonCode"` + + // A message that provides more information about the status, if applicable. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionStateReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionStateReason) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *VpcPeeringConnectionStateReason) SetCode(v string) *VpcPeeringConnectionStateReason { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *VpcPeeringConnectionStateReason) SetMessage(v string) *VpcPeeringConnectionStateReason { + s.Message = &v + return s +} + +// Describes a VPC in a VPC peering connection. +type VpcPeeringConnectionVpcInfo struct { + _ struct{} `type:"structure"` + + // The IPv4 CIDR block for the VPC. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Information about the IPv4 CIDR blocks for the VPC. + CidrBlockSet []*CidrBlock `locationName:"cidrBlockSet" locationNameList:"item" type:"list"` + + // The IPv6 CIDR block for the VPC. + Ipv6CidrBlockSet []*Ipv6CidrBlock `locationName:"ipv6CidrBlockSet" locationNameList:"item" type:"list"` + + // The AWS account ID of the VPC owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Information about the VPC peering connection options for the accepter or + // requester VPC. + PeeringOptions *VpcPeeringConnectionOptionsDescription `locationName:"peeringOptions" type:"structure"` + + // The Region in which the VPC is located. + Region *string `locationName:"region" type:"string"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s VpcPeeringConnectionVpcInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcPeeringConnectionVpcInfo) GoString() string { + return s.String() +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *VpcPeeringConnectionVpcInfo) SetCidrBlock(v string) *VpcPeeringConnectionVpcInfo { + s.CidrBlock = &v + return s +} + +// SetCidrBlockSet sets the CidrBlockSet field's value. +func (s *VpcPeeringConnectionVpcInfo) SetCidrBlockSet(v []*CidrBlock) *VpcPeeringConnectionVpcInfo { + s.CidrBlockSet = v + return s +} + +// SetIpv6CidrBlockSet sets the Ipv6CidrBlockSet field's value. +func (s *VpcPeeringConnectionVpcInfo) SetIpv6CidrBlockSet(v []*Ipv6CidrBlock) *VpcPeeringConnectionVpcInfo { + s.Ipv6CidrBlockSet = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *VpcPeeringConnectionVpcInfo) SetOwnerId(v string) *VpcPeeringConnectionVpcInfo { + s.OwnerId = &v + return s +} + +// SetPeeringOptions sets the PeeringOptions field's value. +func (s *VpcPeeringConnectionVpcInfo) SetPeeringOptions(v *VpcPeeringConnectionOptionsDescription) *VpcPeeringConnectionVpcInfo { + s.PeeringOptions = v + return s +} + +// SetRegion sets the Region field's value. +func (s *VpcPeeringConnectionVpcInfo) SetRegion(v string) *VpcPeeringConnectionVpcInfo { + s.Region = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *VpcPeeringConnectionVpcInfo) SetVpcId(v string) *VpcPeeringConnectionVpcInfo { + s.VpcId = &v + return s +} + +// Describes a VPN connection. +type VpnConnection struct { + _ struct{} `type:"structure"` + + // The category of the VPN connection. A value of VPN indicates an AWS VPN connection. + // A value of VPN-Classic indicates an AWS Classic VPN connection. + Category *string `locationName:"category" type:"string"` + + // The configuration information for the VPN connection's customer gateway (in + // the native XML format). This element is always present in the CreateVpnConnection + // response; however, it's present in the DescribeVpnConnections response only + // if the VPN connection is in the pending or available state. + CustomerGatewayConfiguration *string `locationName:"customerGatewayConfiguration" type:"string"` + + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + + // The VPN connection options. + Options *VpnConnectionOptions `locationName:"options" type:"structure"` + + // The static routes associated with the VPN connection. + Routes []*VpnStaticRoute `locationName:"routes" locationNameList:"item" type:"list"` + + // The current state of the VPN connection. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the VPN connection. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the transit gateway associated with the VPN connection. + TransitGatewayId *string `locationName:"transitGatewayId" type:"string"` + + // The type of VPN connection. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Information about the VPN tunnel. + VgwTelemetry []*VgwTelemetry `locationName:"vgwTelemetry" locationNameList:"item" type:"list"` + + // The ID of the VPN connection. + VpnConnectionId *string `locationName:"vpnConnectionId" type:"string"` + + // The ID of the virtual private gateway at the AWS side of the VPN connection. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` +} + +// String returns the string representation +func (s VpnConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnection) GoString() string { + return s.String() +} + +// SetCategory sets the Category field's value. +func (s *VpnConnection) SetCategory(v string) *VpnConnection { + s.Category = &v + return s +} + +// SetCustomerGatewayConfiguration sets the CustomerGatewayConfiguration field's value. +func (s *VpnConnection) SetCustomerGatewayConfiguration(v string) *VpnConnection { + s.CustomerGatewayConfiguration = &v + return s +} + +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *VpnConnection) SetCustomerGatewayId(v string) *VpnConnection { + s.CustomerGatewayId = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *VpnConnection) SetOptions(v *VpnConnectionOptions) *VpnConnection { + s.Options = v + return s +} + +// SetRoutes sets the Routes field's value. +func (s *VpnConnection) SetRoutes(v []*VpnStaticRoute) *VpnConnection { + s.Routes = v + return s +} + +// SetState sets the State field's value. +func (s *VpnConnection) SetState(v string) *VpnConnection { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *VpnConnection) SetTags(v []*Tag) *VpnConnection { + s.Tags = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *VpnConnection) SetTransitGatewayId(v string) *VpnConnection { + s.TransitGatewayId = &v + return s +} + +// SetType sets the Type field's value. +func (s *VpnConnection) SetType(v string) *VpnConnection { + s.Type = &v + return s +} + +// SetVgwTelemetry sets the VgwTelemetry field's value. +func (s *VpnConnection) SetVgwTelemetry(v []*VgwTelemetry) *VpnConnection { + s.VgwTelemetry = v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *VpnConnection) SetVpnConnectionId(v string) *VpnConnection { + s.VpnConnectionId = &v + return s +} + +// SetVpnGatewayId sets the VpnGatewayId field's value. +func (s *VpnConnection) SetVpnGatewayId(v string) *VpnConnection { + s.VpnGatewayId = &v + return s +} + +// Describes VPN connection options. +type VpnConnectionOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether acceleration is enabled for the VPN connection. + EnableAcceleration *bool `locationName:"enableAcceleration" type:"boolean"` + + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIpv4NetworkCidr *string `locationName:"localIpv4NetworkCidr" type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIpv6NetworkCidr *string `locationName:"localIpv6NetworkCidr" type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + RemoteIpv4NetworkCidr *string `locationName:"remoteIpv4NetworkCidr" type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + RemoteIpv6NetworkCidr *string `locationName:"remoteIpv6NetworkCidr" type:"string"` + + // Indicates whether the VPN connection uses static routes only. Static routes + // must be used for devices that don't support BGP. + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + + // Indicates whether the VPN tunnels process IPv4 or IPv6 traffic. + TunnelInsideIpVersion *string `locationName:"tunnelInsideIpVersion" type:"string" enum:"TunnelInsideIpVersion"` + + // Indicates the VPN tunnel options. + TunnelOptions []*TunnelOption `locationName:"tunnelOptionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s VpnConnectionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptions) GoString() string { + return s.String() +} + +// SetEnableAcceleration sets the EnableAcceleration field's value. +func (s *VpnConnectionOptions) SetEnableAcceleration(v bool) *VpnConnectionOptions { + s.EnableAcceleration = &v + return s +} + +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *VpnConnectionOptions) SetLocalIpv4NetworkCidr(v string) *VpnConnectionOptions { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *VpnConnectionOptions) SetLocalIpv6NetworkCidr(v string) *VpnConnectionOptions { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *VpnConnectionOptions) SetRemoteIpv4NetworkCidr(v string) *VpnConnectionOptions { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *VpnConnectionOptions) SetRemoteIpv6NetworkCidr(v string) *VpnConnectionOptions { + s.RemoteIpv6NetworkCidr = &v + return s +} + +// SetStaticRoutesOnly sets the StaticRoutesOnly field's value. +func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions { + s.StaticRoutesOnly = &v + return s +} + +// SetTunnelInsideIpVersion sets the TunnelInsideIpVersion field's value. +func (s *VpnConnectionOptions) SetTunnelInsideIpVersion(v string) *VpnConnectionOptions { + s.TunnelInsideIpVersion = &v + return s +} + +// SetTunnelOptions sets the TunnelOptions field's value. +func (s *VpnConnectionOptions) SetTunnelOptions(v []*TunnelOption) *VpnConnectionOptions { + s.TunnelOptions = v + return s +} + +// Describes VPN connection options. +type VpnConnectionOptionsSpecification struct { + _ struct{} `type:"structure"` + + // Indicate whether to enable acceleration for the VPN connection. + // + // Default: false + EnableAcceleration *bool `type:"boolean"` + + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: 0.0.0.0/0 + LocalIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: ::/0 + LocalIpv6NetworkCidr *string `type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + // + // Default: 0.0.0.0/0 + RemoteIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + // + // Default: ::/0 + RemoteIpv6NetworkCidr *string `type:"string"` + + // Indicate whether the VPN connection uses static routes only. If you are creating + // a VPN connection for a device that does not support BGP, you must specify + // true. Use CreateVpnConnectionRoute to create a static route. + // + // Default: false + StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + + // Indicate whether the VPN tunnels process IPv4 or IPv6 traffic. + // + // Default: ipv4 + TunnelInsideIpVersion *string `type:"string" enum:"TunnelInsideIpVersion"` + + // The tunnel options for the VPN connection. + TunnelOptions []*VpnTunnelOptionsSpecification `type:"list"` +} + +// String returns the string representation +func (s VpnConnectionOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnConnectionOptionsSpecification) GoString() string { + return s.String() +} + +// SetEnableAcceleration sets the EnableAcceleration field's value. +func (s *VpnConnectionOptionsSpecification) SetEnableAcceleration(v bool) *VpnConnectionOptionsSpecification { + s.EnableAcceleration = &v + return s +} + +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetLocalIpv4NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetLocalIpv6NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetRemoteIpv4NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetRemoteIpv6NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.RemoteIpv6NetworkCidr = &v + return s +} + +// SetStaticRoutesOnly sets the StaticRoutesOnly field's value. +func (s *VpnConnectionOptionsSpecification) SetStaticRoutesOnly(v bool) *VpnConnectionOptionsSpecification { + s.StaticRoutesOnly = &v + return s +} + +// SetTunnelInsideIpVersion sets the TunnelInsideIpVersion field's value. +func (s *VpnConnectionOptionsSpecification) SetTunnelInsideIpVersion(v string) *VpnConnectionOptionsSpecification { + s.TunnelInsideIpVersion = &v + return s +} + +// SetTunnelOptions sets the TunnelOptions field's value. +func (s *VpnConnectionOptionsSpecification) SetTunnelOptions(v []*VpnTunnelOptionsSpecification) *VpnConnectionOptionsSpecification { + s.TunnelOptions = v + return s +} + +// Describes a virtual private gateway. +type VpnGateway struct { + _ struct{} `type:"structure"` + + // The private Autonomous System Number (ASN) for the Amazon side of a BGP session. + AmazonSideAsn *int64 `locationName:"amazonSideAsn" type:"long"` + + // The Availability Zone where the virtual private gateway was created, if applicable. + // This field may be empty or not returned. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The current state of the virtual private gateway. + State *string `locationName:"state" type:"string" enum:"VpnState"` + + // Any tags assigned to the virtual private gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The type of VPN connection the virtual private gateway supports. + Type *string `locationName:"type" type:"string" enum:"GatewayType"` + + // Any VPCs attached to the virtual private gateway. + VpcAttachments []*VpcAttachment `locationName:"attachments" locationNameList:"item" type:"list"` + + // The ID of the virtual private gateway. + VpnGatewayId *string `locationName:"vpnGatewayId" type:"string"` +} + +// String returns the string representation +func (s VpnGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnGateway) GoString() string { + return s.String() +} + +// SetAmazonSideAsn sets the AmazonSideAsn field's value. +func (s *VpnGateway) SetAmazonSideAsn(v int64) *VpnGateway { + s.AmazonSideAsn = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *VpnGateway) SetAvailabilityZone(v string) *VpnGateway { + s.AvailabilityZone = &v + return s +} + +// SetState sets the State field's value. +func (s *VpnGateway) SetState(v string) *VpnGateway { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *VpnGateway) SetTags(v []*Tag) *VpnGateway { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *VpnGateway) SetType(v string) *VpnGateway { + s.Type = &v + return s +} + +// SetVpcAttachments sets the VpcAttachments field's value. +func (s *VpnGateway) SetVpcAttachments(v []*VpcAttachment) *VpnGateway { + s.VpcAttachments = v + return s +} + +// SetVpnGatewayId sets the VpnGatewayId field's value. +func (s *VpnGateway) SetVpnGatewayId(v string) *VpnGateway { + s.VpnGatewayId = &v + return s +} + +// Describes a static route for a VPN connection. +type VpnStaticRoute struct { + _ struct{} `type:"structure"` + + // The CIDR block associated with the local subnet of the customer data center. + DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + + // Indicates how the routes were provided. + Source *string `locationName:"source" type:"string" enum:"VpnStaticRouteSource"` + + // The current state of the static route. + State *string `locationName:"state" type:"string" enum:"VpnState"` +} + +// String returns the string representation +func (s VpnStaticRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnStaticRoute) GoString() string { + return s.String() +} + +// SetDestinationCidrBlock sets the DestinationCidrBlock field's value. +func (s *VpnStaticRoute) SetDestinationCidrBlock(v string) *VpnStaticRoute { + s.DestinationCidrBlock = &v + return s +} + +// SetSource sets the Source field's value. +func (s *VpnStaticRoute) SetSource(v string) *VpnStaticRoute { + s.Source = &v + return s +} + +// SetState sets the State field's value. +func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute { + s.State = &v + return s +} + +// The tunnel options for a single VPN tunnel. +type VpnTunnelOptionsSpecification struct { + _ struct{} `type:"structure"` + + // The action to take after DPD timeout occurs. Specify restart to restart the + // IKE initiation. Specify clear to end the IKE session. + // + // Valid Values: clear | none | restart + // + // Default: clear + DPDTimeoutAction *string `type:"string"` + + // The number of seconds after which a DPD timeout occurs. + // + // Constraints: A value between 0 and 30. + // + // Default: 30 + DPDTimeoutSeconds *int64 `type:"integer"` + + // The IKE versions that are permitted for the VPN tunnel. + // + // Valid values: ikev1 | ikev2 + IKEVersions []*IKEVersionsRequestListValue `locationName:"IKEVersion" locationNameList:"item" type:"list"` + + // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel + // for phase 1 IKE negotiations. + // + // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 + Phase1DHGroupNumbers []*Phase1DHGroupNumbersRequestListValue `locationName:"Phase1DHGroupNumber" locationNameList:"item" type:"list"` + + // One or more encryption algorithms that are permitted for the VPN tunnel for + // phase 1 IKE negotiations. + // + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 + Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsRequestListValue `locationName:"Phase1EncryptionAlgorithm" locationNameList:"item" type:"list"` + + // One or more integrity algorithms that are permitted for the VPN tunnel for + // phase 1 IKE negotiations. + // + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 + Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsRequestListValue `locationName:"Phase1IntegrityAlgorithm" locationNameList:"item" type:"list"` + + // The lifetime for phase 1 of the IKE negotiation, in seconds. + // + // Constraints: A value between 900 and 28,800. + // + // Default: 28800 + Phase1LifetimeSeconds *int64 `type:"integer"` + + // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel + // for phase 2 IKE negotiations. + // + // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 + Phase2DHGroupNumbers []*Phase2DHGroupNumbersRequestListValue `locationName:"Phase2DHGroupNumber" locationNameList:"item" type:"list"` + + // One or more encryption algorithms that are permitted for the VPN tunnel for + // phase 2 IKE negotiations. + // + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 + Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsRequestListValue `locationName:"Phase2EncryptionAlgorithm" locationNameList:"item" type:"list"` + + // One or more integrity algorithms that are permitted for the VPN tunnel for + // phase 2 IKE negotiations. + // + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 + Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsRequestListValue `locationName:"Phase2IntegrityAlgorithm" locationNameList:"item" type:"list"` + + // The lifetime for phase 2 of the IKE negotiation, in seconds. + // + // Constraints: A value between 900 and 3,600. The value must be less than the + // value for Phase1LifetimeSeconds. + // + // Default: 3600 + Phase2LifetimeSeconds *int64 `type:"integer"` + + // The pre-shared key (PSK) to establish initial authentication between the + // virtual private gateway and customer gateway. + // + // Constraints: Allowed characters are alphanumeric characters, periods (.), + // and underscores (_). Must be between 8 and 64 characters in length and cannot + // start with zero (0). + PreSharedKey *string `type:"string"` + + // The percentage of the rekey window (determined by RekeyMarginTimeSeconds) + // during which the rekey time is randomly selected. + // + // Constraints: A value between 0 and 100. + // + // Default: 100 + RekeyFuzzPercentage *int64 `type:"integer"` + + // The margin time, in seconds, before the phase 2 lifetime expires, during + // which the AWS side of the VPN connection performs an IKE rekey. The exact + // time of the rekey is randomly selected based on the value for RekeyFuzzPercentage. + // + // Constraints: A value between 60 and half of Phase2LifetimeSeconds. + // + // Default: 540 + RekeyMarginTimeSeconds *int64 `type:"integer"` + + // The number of packets in an IKE replay window. + // + // Constraints: A value between 64 and 2048. + // + // Default: 1024 + ReplayWindowSize *int64 `type:"integer"` + + // The action to take when the establishing the tunnel for the VPN connection. + // By default, your customer gateway device must initiate the IKE negotiation + // and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. + // + // Valid Values: add | start + // + // Default: add + StartupAction *string `type:"string"` + + // The range of inside IPv4 addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same virtual private + // gateway. + // + // Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following + // CIDR blocks are reserved and cannot be used: + // + // * 169.254.0.0/30 + // + // * 169.254.1.0/30 + // + // * 169.254.2.0/30 + // + // * 169.254.3.0/30 + // + // * 169.254.4.0/30 + // + // * 169.254.5.0/30 + // + // * 169.254.169.252/30 + TunnelInsideCidr *string `type:"string"` + + // The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same transit gateway. + // + // Constraints: A size /126 CIDR block from the local fd00::/8 range. + TunnelInsideIpv6Cidr *string `type:"string"` +} + +// String returns the string representation +func (s VpnTunnelOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnTunnelOptionsSpecification) GoString() string { + return s.String() +} + +// SetDPDTimeoutAction sets the DPDTimeoutAction field's value. +func (s *VpnTunnelOptionsSpecification) SetDPDTimeoutAction(v string) *VpnTunnelOptionsSpecification { + s.DPDTimeoutAction = &v + return s +} + +// SetDPDTimeoutSeconds sets the DPDTimeoutSeconds field's value. +func (s *VpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *VpnTunnelOptionsSpecification { + s.DPDTimeoutSeconds = &v + return s +} + +// SetIKEVersions sets the IKEVersions field's value. +func (s *VpnTunnelOptionsSpecification) SetIKEVersions(v []*IKEVersionsRequestListValue) *VpnTunnelOptionsSpecification { + s.IKEVersions = v + return s +} + +// SetPhase1DHGroupNumbers sets the Phase1DHGroupNumbers field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase1DHGroupNumbers(v []*Phase1DHGroupNumbersRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase1DHGroupNumbers = v + return s +} + +// SetPhase1EncryptionAlgorithms sets the Phase1EncryptionAlgorithms field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase1EncryptionAlgorithms(v []*Phase1EncryptionAlgorithmsRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase1EncryptionAlgorithms = v + return s +} + +// SetPhase1IntegrityAlgorithms sets the Phase1IntegrityAlgorithms field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase1IntegrityAlgorithms(v []*Phase1IntegrityAlgorithmsRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase1IntegrityAlgorithms = v + return s +} + +// SetPhase1LifetimeSeconds sets the Phase1LifetimeSeconds field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase1LifetimeSeconds(v int64) *VpnTunnelOptionsSpecification { + s.Phase1LifetimeSeconds = &v + return s +} + +// SetPhase2DHGroupNumbers sets the Phase2DHGroupNumbers field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase2DHGroupNumbers(v []*Phase2DHGroupNumbersRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase2DHGroupNumbers = v + return s +} + +// SetPhase2EncryptionAlgorithms sets the Phase2EncryptionAlgorithms field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase2EncryptionAlgorithms(v []*Phase2EncryptionAlgorithmsRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase2EncryptionAlgorithms = v + return s +} + +// SetPhase2IntegrityAlgorithms sets the Phase2IntegrityAlgorithms field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase2IntegrityAlgorithms(v []*Phase2IntegrityAlgorithmsRequestListValue) *VpnTunnelOptionsSpecification { + s.Phase2IntegrityAlgorithms = v + return s +} + +// SetPhase2LifetimeSeconds sets the Phase2LifetimeSeconds field's value. +func (s *VpnTunnelOptionsSpecification) SetPhase2LifetimeSeconds(v int64) *VpnTunnelOptionsSpecification { + s.Phase2LifetimeSeconds = &v + return s +} + +// SetPreSharedKey sets the PreSharedKey field's value. +func (s *VpnTunnelOptionsSpecification) SetPreSharedKey(v string) *VpnTunnelOptionsSpecification { + s.PreSharedKey = &v + return s +} + +// SetRekeyFuzzPercentage sets the RekeyFuzzPercentage field's value. +func (s *VpnTunnelOptionsSpecification) SetRekeyFuzzPercentage(v int64) *VpnTunnelOptionsSpecification { + s.RekeyFuzzPercentage = &v + return s +} + +// SetRekeyMarginTimeSeconds sets the RekeyMarginTimeSeconds field's value. +func (s *VpnTunnelOptionsSpecification) SetRekeyMarginTimeSeconds(v int64) *VpnTunnelOptionsSpecification { + s.RekeyMarginTimeSeconds = &v + return s +} + +// SetReplayWindowSize sets the ReplayWindowSize field's value. +func (s *VpnTunnelOptionsSpecification) SetReplayWindowSize(v int64) *VpnTunnelOptionsSpecification { + s.ReplayWindowSize = &v + return s +} + +// SetStartupAction sets the StartupAction field's value. +func (s *VpnTunnelOptionsSpecification) SetStartupAction(v string) *VpnTunnelOptionsSpecification { + s.StartupAction = &v + return s +} + +// SetTunnelInsideCidr sets the TunnelInsideCidr field's value. +func (s *VpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *VpnTunnelOptionsSpecification { + s.TunnelInsideCidr = &v + return s +} + +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *VpnTunnelOptionsSpecification) SetTunnelInsideIpv6Cidr(v string) *VpnTunnelOptionsSpecification { + s.TunnelInsideIpv6Cidr = &v + return s +} + +type WithdrawByoipCidrInput struct { + _ struct{} `type:"structure"` + + // The address range, in CIDR notation. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s WithdrawByoipCidrInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WithdrawByoipCidrInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WithdrawByoipCidrInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WithdrawByoipCidrInput"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *WithdrawByoipCidrInput) SetCidr(v string) *WithdrawByoipCidrInput { + s.Cidr = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *WithdrawByoipCidrInput) SetDryRun(v bool) *WithdrawByoipCidrInput { + s.DryRun = &v + return s +} + +type WithdrawByoipCidrOutput struct { + _ struct{} `type:"structure"` + + // Information about the address pool. + ByoipCidr *ByoipCidr `locationName:"byoipCidr" type:"structure"` +} + +// String returns the string representation +func (s WithdrawByoipCidrOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WithdrawByoipCidrOutput) GoString() string { + return s.String() +} + +// SetByoipCidr sets the ByoipCidr field's value. +func (s *WithdrawByoipCidrOutput) SetByoipCidr(v *ByoipCidr) *WithdrawByoipCidrOutput { + s.ByoipCidr = v + return s +} + +const ( + // AccountAttributeNameSupportedPlatforms is a AccountAttributeName enum value + AccountAttributeNameSupportedPlatforms = "supported-platforms" + + // AccountAttributeNameDefaultVpc is a AccountAttributeName enum value + AccountAttributeNameDefaultVpc = "default-vpc" +) + +// AccountAttributeName_Values returns all elements of the AccountAttributeName enum +func AccountAttributeName_Values() []string { + return []string{ + AccountAttributeNameSupportedPlatforms, + AccountAttributeNameDefaultVpc, + } +} + +const ( + // ActivityStatusError is a ActivityStatus enum value + ActivityStatusError = "error" + + // ActivityStatusPendingFulfillment is a ActivityStatus enum value + ActivityStatusPendingFulfillment = "pending_fulfillment" + + // ActivityStatusPendingTermination is a ActivityStatus enum value + ActivityStatusPendingTermination = "pending_termination" + + // ActivityStatusFulfilled is a ActivityStatus enum value + ActivityStatusFulfilled = "fulfilled" +) + +// ActivityStatus_Values returns all elements of the ActivityStatus enum +func ActivityStatus_Values() []string { + return []string{ + ActivityStatusError, + ActivityStatusPendingFulfillment, + ActivityStatusPendingTermination, + ActivityStatusFulfilled, + } +} + +const ( + // AffinityDefault is a Affinity enum value + AffinityDefault = "default" + + // AffinityHost is a Affinity enum value + AffinityHost = "host" +) + +// Affinity_Values returns all elements of the Affinity enum +func Affinity_Values() []string { + return []string{ + AffinityDefault, + AffinityHost, + } +} + +const ( + // AllocationStateAvailable is a AllocationState enum value + AllocationStateAvailable = "available" + + // AllocationStateUnderAssessment is a AllocationState enum value + AllocationStateUnderAssessment = "under-assessment" + + // AllocationStatePermanentFailure is a AllocationState enum value + AllocationStatePermanentFailure = "permanent-failure" + + // AllocationStateReleased is a AllocationState enum value + AllocationStateReleased = "released" + + // AllocationStateReleasedPermanentFailure is a AllocationState enum value + AllocationStateReleasedPermanentFailure = "released-permanent-failure" + + // AllocationStatePending is a AllocationState enum value + AllocationStatePending = "pending" +) + +// AllocationState_Values returns all elements of the AllocationState enum +func AllocationState_Values() []string { + return []string{ + AllocationStateAvailable, + AllocationStateUnderAssessment, + AllocationStatePermanentFailure, + AllocationStateReleased, + AllocationStateReleasedPermanentFailure, + AllocationStatePending, + } +} + +const ( + // AllocationStrategyLowestPrice is a AllocationStrategy enum value + AllocationStrategyLowestPrice = "lowestPrice" + + // AllocationStrategyDiversified is a AllocationStrategy enum value + AllocationStrategyDiversified = "diversified" + + // AllocationStrategyCapacityOptimized is a AllocationStrategy enum value + AllocationStrategyCapacityOptimized = "capacityOptimized" +) + +// AllocationStrategy_Values returns all elements of the AllocationStrategy enum +func AllocationStrategy_Values() []string { + return []string{ + AllocationStrategyLowestPrice, + AllocationStrategyDiversified, + AllocationStrategyCapacityOptimized, + } +} + +const ( + // AllowsMultipleInstanceTypesOn is a AllowsMultipleInstanceTypes enum value + AllowsMultipleInstanceTypesOn = "on" + + // AllowsMultipleInstanceTypesOff is a AllowsMultipleInstanceTypes enum value + AllowsMultipleInstanceTypesOff = "off" +) + +// AllowsMultipleInstanceTypes_Values returns all elements of the AllowsMultipleInstanceTypes enum +func AllowsMultipleInstanceTypes_Values() []string { + return []string{ + AllowsMultipleInstanceTypesOn, + AllowsMultipleInstanceTypesOff, + } +} + +const ( + // ApplianceModeSupportValueEnable is a ApplianceModeSupportValue enum value + ApplianceModeSupportValueEnable = "enable" + + // ApplianceModeSupportValueDisable is a ApplianceModeSupportValue enum value + ApplianceModeSupportValueDisable = "disable" +) + +// ApplianceModeSupportValue_Values returns all elements of the ApplianceModeSupportValue enum +func ApplianceModeSupportValue_Values() []string { + return []string{ + ApplianceModeSupportValueEnable, + ApplianceModeSupportValueDisable, + } +} + +const ( + // ArchitectureTypeI386 is a ArchitectureType enum value + ArchitectureTypeI386 = "i386" + + // ArchitectureTypeX8664 is a ArchitectureType enum value + ArchitectureTypeX8664 = "x86_64" + + // ArchitectureTypeArm64 is a ArchitectureType enum value + ArchitectureTypeArm64 = "arm64" +) + +// ArchitectureType_Values returns all elements of the ArchitectureType enum +func ArchitectureType_Values() []string { + return []string{ + ArchitectureTypeI386, + ArchitectureTypeX8664, + ArchitectureTypeArm64, + } +} + +const ( + // ArchitectureValuesI386 is a ArchitectureValues enum value + ArchitectureValuesI386 = "i386" + + // ArchitectureValuesX8664 is a ArchitectureValues enum value + ArchitectureValuesX8664 = "x86_64" + + // ArchitectureValuesArm64 is a ArchitectureValues enum value + ArchitectureValuesArm64 = "arm64" +) + +// ArchitectureValues_Values returns all elements of the ArchitectureValues enum +func ArchitectureValues_Values() []string { + return []string{ + ArchitectureValuesI386, + ArchitectureValuesX8664, + ArchitectureValuesArm64, + } +} + +const ( + // AssociatedNetworkTypeVpc is a AssociatedNetworkType enum value + AssociatedNetworkTypeVpc = "vpc" +) + +// AssociatedNetworkType_Values returns all elements of the AssociatedNetworkType enum +func AssociatedNetworkType_Values() []string { + return []string{ + AssociatedNetworkTypeVpc, + } +} + +const ( + // AssociationStatusCodeAssociating is a AssociationStatusCode enum value + AssociationStatusCodeAssociating = "associating" + + // AssociationStatusCodeAssociated is a AssociationStatusCode enum value + AssociationStatusCodeAssociated = "associated" + + // AssociationStatusCodeAssociationFailed is a AssociationStatusCode enum value + AssociationStatusCodeAssociationFailed = "association-failed" + + // AssociationStatusCodeDisassociating is a AssociationStatusCode enum value + AssociationStatusCodeDisassociating = "disassociating" + + // AssociationStatusCodeDisassociated is a AssociationStatusCode enum value + AssociationStatusCodeDisassociated = "disassociated" +) + +// AssociationStatusCode_Values returns all elements of the AssociationStatusCode enum +func AssociationStatusCode_Values() []string { + return []string{ + AssociationStatusCodeAssociating, + AssociationStatusCodeAssociated, + AssociationStatusCodeAssociationFailed, + AssociationStatusCodeDisassociating, + AssociationStatusCodeDisassociated, + } +} + +const ( + // AttachmentStatusAttaching is a AttachmentStatus enum value + AttachmentStatusAttaching = "attaching" + + // AttachmentStatusAttached is a AttachmentStatus enum value + AttachmentStatusAttached = "attached" + + // AttachmentStatusDetaching is a AttachmentStatus enum value + AttachmentStatusDetaching = "detaching" + + // AttachmentStatusDetached is a AttachmentStatus enum value + AttachmentStatusDetached = "detached" +) + +// AttachmentStatus_Values returns all elements of the AttachmentStatus enum +func AttachmentStatus_Values() []string { + return []string{ + AttachmentStatusAttaching, + AttachmentStatusAttached, + AttachmentStatusDetaching, + AttachmentStatusDetached, + } +} + +const ( + // AutoAcceptSharedAttachmentsValueEnable is a AutoAcceptSharedAttachmentsValue enum value + AutoAcceptSharedAttachmentsValueEnable = "enable" + + // AutoAcceptSharedAttachmentsValueDisable is a AutoAcceptSharedAttachmentsValue enum value + AutoAcceptSharedAttachmentsValueDisable = "disable" +) + +// AutoAcceptSharedAttachmentsValue_Values returns all elements of the AutoAcceptSharedAttachmentsValue enum +func AutoAcceptSharedAttachmentsValue_Values() []string { + return []string{ + AutoAcceptSharedAttachmentsValueEnable, + AutoAcceptSharedAttachmentsValueDisable, + } +} + +const ( + // AutoPlacementOn is a AutoPlacement enum value + AutoPlacementOn = "on" + + // AutoPlacementOff is a AutoPlacement enum value + AutoPlacementOff = "off" +) + +// AutoPlacement_Values returns all elements of the AutoPlacement enum +func AutoPlacement_Values() []string { + return []string{ + AutoPlacementOn, + AutoPlacementOff, + } +} + +const ( + // AvailabilityZoneOptInStatusOptInNotRequired is a AvailabilityZoneOptInStatus enum value + AvailabilityZoneOptInStatusOptInNotRequired = "opt-in-not-required" + + // AvailabilityZoneOptInStatusOptedIn is a AvailabilityZoneOptInStatus enum value + AvailabilityZoneOptInStatusOptedIn = "opted-in" + + // AvailabilityZoneOptInStatusNotOptedIn is a AvailabilityZoneOptInStatus enum value + AvailabilityZoneOptInStatusNotOptedIn = "not-opted-in" +) + +// AvailabilityZoneOptInStatus_Values returns all elements of the AvailabilityZoneOptInStatus enum +func AvailabilityZoneOptInStatus_Values() []string { + return []string{ + AvailabilityZoneOptInStatusOptInNotRequired, + AvailabilityZoneOptInStatusOptedIn, + AvailabilityZoneOptInStatusNotOptedIn, + } +} + +const ( + // AvailabilityZoneStateAvailable is a AvailabilityZoneState enum value + AvailabilityZoneStateAvailable = "available" + + // AvailabilityZoneStateInformation is a AvailabilityZoneState enum value + AvailabilityZoneStateInformation = "information" + + // AvailabilityZoneStateImpaired is a AvailabilityZoneState enum value + AvailabilityZoneStateImpaired = "impaired" + + // AvailabilityZoneStateUnavailable is a AvailabilityZoneState enum value + AvailabilityZoneStateUnavailable = "unavailable" +) + +// AvailabilityZoneState_Values returns all elements of the AvailabilityZoneState enum +func AvailabilityZoneState_Values() []string { + return []string{ + AvailabilityZoneStateAvailable, + AvailabilityZoneStateInformation, + AvailabilityZoneStateImpaired, + AvailabilityZoneStateUnavailable, + } +} + +const ( + // BatchStateSubmitted is a BatchState enum value + BatchStateSubmitted = "submitted" + + // BatchStateActive is a BatchState enum value + BatchStateActive = "active" + + // BatchStateCancelled is a BatchState enum value + BatchStateCancelled = "cancelled" + + // BatchStateFailed is a BatchState enum value + BatchStateFailed = "failed" + + // BatchStateCancelledRunning is a BatchState enum value + BatchStateCancelledRunning = "cancelled_running" + + // BatchStateCancelledTerminating is a BatchState enum value + BatchStateCancelledTerminating = "cancelled_terminating" + + // BatchStateModifying is a BatchState enum value + BatchStateModifying = "modifying" +) + +// BatchState_Values returns all elements of the BatchState enum +func BatchState_Values() []string { + return []string{ + BatchStateSubmitted, + BatchStateActive, + BatchStateCancelled, + BatchStateFailed, + BatchStateCancelledRunning, + BatchStateCancelledTerminating, + BatchStateModifying, + } +} + +const ( + // BundleTaskStatePending is a BundleTaskState enum value + BundleTaskStatePending = "pending" + + // BundleTaskStateWaitingForShutdown is a BundleTaskState enum value + BundleTaskStateWaitingForShutdown = "waiting-for-shutdown" + + // BundleTaskStateBundling is a BundleTaskState enum value + BundleTaskStateBundling = "bundling" + + // BundleTaskStateStoring is a BundleTaskState enum value + BundleTaskStateStoring = "storing" + + // BundleTaskStateCancelling is a BundleTaskState enum value + BundleTaskStateCancelling = "cancelling" + + // BundleTaskStateComplete is a BundleTaskState enum value + BundleTaskStateComplete = "complete" + + // BundleTaskStateFailed is a BundleTaskState enum value + BundleTaskStateFailed = "failed" +) + +// BundleTaskState_Values returns all elements of the BundleTaskState enum +func BundleTaskState_Values() []string { + return []string{ + BundleTaskStatePending, + BundleTaskStateWaitingForShutdown, + BundleTaskStateBundling, + BundleTaskStateStoring, + BundleTaskStateCancelling, + BundleTaskStateComplete, + BundleTaskStateFailed, + } +} + +const ( + // ByoipCidrStateAdvertised is a ByoipCidrState enum value + ByoipCidrStateAdvertised = "advertised" + + // ByoipCidrStateDeprovisioned is a ByoipCidrState enum value + ByoipCidrStateDeprovisioned = "deprovisioned" + + // ByoipCidrStateFailedDeprovision is a ByoipCidrState enum value + ByoipCidrStateFailedDeprovision = "failed-deprovision" + + // ByoipCidrStateFailedProvision is a ByoipCidrState enum value + ByoipCidrStateFailedProvision = "failed-provision" + + // ByoipCidrStatePendingDeprovision is a ByoipCidrState enum value + ByoipCidrStatePendingDeprovision = "pending-deprovision" + + // ByoipCidrStatePendingProvision is a ByoipCidrState enum value + ByoipCidrStatePendingProvision = "pending-provision" + + // ByoipCidrStateProvisioned is a ByoipCidrState enum value + ByoipCidrStateProvisioned = "provisioned" + + // ByoipCidrStateProvisionedNotPubliclyAdvertisable is a ByoipCidrState enum value + ByoipCidrStateProvisionedNotPubliclyAdvertisable = "provisioned-not-publicly-advertisable" +) + +// ByoipCidrState_Values returns all elements of the ByoipCidrState enum +func ByoipCidrState_Values() []string { + return []string{ + ByoipCidrStateAdvertised, + ByoipCidrStateDeprovisioned, + ByoipCidrStateFailedDeprovision, + ByoipCidrStateFailedProvision, + ByoipCidrStatePendingDeprovision, + ByoipCidrStatePendingProvision, + ByoipCidrStateProvisioned, + ByoipCidrStateProvisionedNotPubliclyAdvertisable, + } +} + +const ( + // CancelBatchErrorCodeFleetRequestIdDoesNotExist is a CancelBatchErrorCode enum value + CancelBatchErrorCodeFleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" + + // CancelBatchErrorCodeFleetRequestIdMalformed is a CancelBatchErrorCode enum value + CancelBatchErrorCodeFleetRequestIdMalformed = "fleetRequestIdMalformed" + + // CancelBatchErrorCodeFleetRequestNotInCancellableState is a CancelBatchErrorCode enum value + CancelBatchErrorCodeFleetRequestNotInCancellableState = "fleetRequestNotInCancellableState" + + // CancelBatchErrorCodeUnexpectedError is a CancelBatchErrorCode enum value + CancelBatchErrorCodeUnexpectedError = "unexpectedError" +) + +// CancelBatchErrorCode_Values returns all elements of the CancelBatchErrorCode enum +func CancelBatchErrorCode_Values() []string { + return []string{ + CancelBatchErrorCodeFleetRequestIdDoesNotExist, + CancelBatchErrorCodeFleetRequestIdMalformed, + CancelBatchErrorCodeFleetRequestNotInCancellableState, + CancelBatchErrorCodeUnexpectedError, + } +} + +const ( + // CancelSpotInstanceRequestStateActive is a CancelSpotInstanceRequestState enum value + CancelSpotInstanceRequestStateActive = "active" + + // CancelSpotInstanceRequestStateOpen is a CancelSpotInstanceRequestState enum value + CancelSpotInstanceRequestStateOpen = "open" + + // CancelSpotInstanceRequestStateClosed is a CancelSpotInstanceRequestState enum value + CancelSpotInstanceRequestStateClosed = "closed" + + // CancelSpotInstanceRequestStateCancelled is a CancelSpotInstanceRequestState enum value + CancelSpotInstanceRequestStateCancelled = "cancelled" + + // CancelSpotInstanceRequestStateCompleted is a CancelSpotInstanceRequestState enum value + CancelSpotInstanceRequestStateCompleted = "completed" +) + +// CancelSpotInstanceRequestState_Values returns all elements of the CancelSpotInstanceRequestState enum +func CancelSpotInstanceRequestState_Values() []string { + return []string{ + CancelSpotInstanceRequestStateActive, + CancelSpotInstanceRequestStateOpen, + CancelSpotInstanceRequestStateClosed, + CancelSpotInstanceRequestStateCancelled, + CancelSpotInstanceRequestStateCompleted, + } +} + +const ( + // CapacityReservationInstancePlatformLinuxUnix is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformLinuxUnix = "Linux/UNIX" + + // CapacityReservationInstancePlatformRedHatEnterpriseLinux is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformRedHatEnterpriseLinux = "Red Hat Enterprise Linux" + + // CapacityReservationInstancePlatformSuselinux is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformSuselinux = "SUSE Linux" + + // CapacityReservationInstancePlatformWindows is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformWindows = "Windows" + + // CapacityReservationInstancePlatformWindowswithSqlserver is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformWindowswithSqlserver = "Windows with SQL Server" + + // CapacityReservationInstancePlatformWindowswithSqlserverEnterprise is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformWindowswithSqlserverEnterprise = "Windows with SQL Server Enterprise" + + // CapacityReservationInstancePlatformWindowswithSqlserverStandard is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformWindowswithSqlserverStandard = "Windows with SQL Server Standard" + + // CapacityReservationInstancePlatformWindowswithSqlserverWeb is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformWindowswithSqlserverWeb = "Windows with SQL Server Web" + + // CapacityReservationInstancePlatformLinuxwithSqlserverStandard is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformLinuxwithSqlserverStandard = "Linux with SQL Server Standard" + + // CapacityReservationInstancePlatformLinuxwithSqlserverWeb is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformLinuxwithSqlserverWeb = "Linux with SQL Server Web" + + // CapacityReservationInstancePlatformLinuxwithSqlserverEnterprise is a CapacityReservationInstancePlatform enum value + CapacityReservationInstancePlatformLinuxwithSqlserverEnterprise = "Linux with SQL Server Enterprise" +) + +// CapacityReservationInstancePlatform_Values returns all elements of the CapacityReservationInstancePlatform enum +func CapacityReservationInstancePlatform_Values() []string { + return []string{ + CapacityReservationInstancePlatformLinuxUnix, + CapacityReservationInstancePlatformRedHatEnterpriseLinux, + CapacityReservationInstancePlatformSuselinux, + CapacityReservationInstancePlatformWindows, + CapacityReservationInstancePlatformWindowswithSqlserver, + CapacityReservationInstancePlatformWindowswithSqlserverEnterprise, + CapacityReservationInstancePlatformWindowswithSqlserverStandard, + CapacityReservationInstancePlatformWindowswithSqlserverWeb, + CapacityReservationInstancePlatformLinuxwithSqlserverStandard, + CapacityReservationInstancePlatformLinuxwithSqlserverWeb, + CapacityReservationInstancePlatformLinuxwithSqlserverEnterprise, + } +} + +const ( + // CapacityReservationPreferenceOpen is a CapacityReservationPreference enum value + CapacityReservationPreferenceOpen = "open" + + // CapacityReservationPreferenceNone is a CapacityReservationPreference enum value + CapacityReservationPreferenceNone = "none" +) + +// CapacityReservationPreference_Values returns all elements of the CapacityReservationPreference enum +func CapacityReservationPreference_Values() []string { + return []string{ + CapacityReservationPreferenceOpen, + CapacityReservationPreferenceNone, + } +} + +const ( + // CapacityReservationStateActive is a CapacityReservationState enum value + CapacityReservationStateActive = "active" + + // CapacityReservationStateExpired is a CapacityReservationState enum value + CapacityReservationStateExpired = "expired" + + // CapacityReservationStateCancelled is a CapacityReservationState enum value + CapacityReservationStateCancelled = "cancelled" + + // CapacityReservationStatePending is a CapacityReservationState enum value + CapacityReservationStatePending = "pending" + + // CapacityReservationStateFailed is a CapacityReservationState enum value + CapacityReservationStateFailed = "failed" +) + +// CapacityReservationState_Values returns all elements of the CapacityReservationState enum +func CapacityReservationState_Values() []string { + return []string{ + CapacityReservationStateActive, + CapacityReservationStateExpired, + CapacityReservationStateCancelled, + CapacityReservationStatePending, + CapacityReservationStateFailed, + } +} + +const ( + // CapacityReservationTenancyDefault is a CapacityReservationTenancy enum value + CapacityReservationTenancyDefault = "default" + + // CapacityReservationTenancyDedicated is a CapacityReservationTenancy enum value + CapacityReservationTenancyDedicated = "dedicated" +) + +// CapacityReservationTenancy_Values returns all elements of the CapacityReservationTenancy enum +func CapacityReservationTenancy_Values() []string { + return []string{ + CapacityReservationTenancyDefault, + CapacityReservationTenancyDedicated, + } +} + +const ( + // CarrierGatewayStatePending is a CarrierGatewayState enum value + CarrierGatewayStatePending = "pending" + + // CarrierGatewayStateAvailable is a CarrierGatewayState enum value + CarrierGatewayStateAvailable = "available" + + // CarrierGatewayStateDeleting is a CarrierGatewayState enum value + CarrierGatewayStateDeleting = "deleting" + + // CarrierGatewayStateDeleted is a CarrierGatewayState enum value + CarrierGatewayStateDeleted = "deleted" +) + +// CarrierGatewayState_Values returns all elements of the CarrierGatewayState enum +func CarrierGatewayState_Values() []string { + return []string{ + CarrierGatewayStatePending, + CarrierGatewayStateAvailable, + CarrierGatewayStateDeleting, + CarrierGatewayStateDeleted, + } +} + +const ( + // ClientCertificateRevocationListStatusCodePending is a ClientCertificateRevocationListStatusCode enum value + ClientCertificateRevocationListStatusCodePending = "pending" + + // ClientCertificateRevocationListStatusCodeActive is a ClientCertificateRevocationListStatusCode enum value + ClientCertificateRevocationListStatusCodeActive = "active" +) + +// ClientCertificateRevocationListStatusCode_Values returns all elements of the ClientCertificateRevocationListStatusCode enum +func ClientCertificateRevocationListStatusCode_Values() []string { + return []string{ + ClientCertificateRevocationListStatusCodePending, + ClientCertificateRevocationListStatusCodeActive, + } +} + +const ( + // ClientVpnAuthenticationTypeCertificateAuthentication is a ClientVpnAuthenticationType enum value + ClientVpnAuthenticationTypeCertificateAuthentication = "certificate-authentication" + + // ClientVpnAuthenticationTypeDirectoryServiceAuthentication is a ClientVpnAuthenticationType enum value + ClientVpnAuthenticationTypeDirectoryServiceAuthentication = "directory-service-authentication" + + // ClientVpnAuthenticationTypeFederatedAuthentication is a ClientVpnAuthenticationType enum value + ClientVpnAuthenticationTypeFederatedAuthentication = "federated-authentication" +) + +// ClientVpnAuthenticationType_Values returns all elements of the ClientVpnAuthenticationType enum +func ClientVpnAuthenticationType_Values() []string { + return []string{ + ClientVpnAuthenticationTypeCertificateAuthentication, + ClientVpnAuthenticationTypeDirectoryServiceAuthentication, + ClientVpnAuthenticationTypeFederatedAuthentication, + } +} + +const ( + // ClientVpnAuthorizationRuleStatusCodeAuthorizing is a ClientVpnAuthorizationRuleStatusCode enum value + ClientVpnAuthorizationRuleStatusCodeAuthorizing = "authorizing" + + // ClientVpnAuthorizationRuleStatusCodeActive is a ClientVpnAuthorizationRuleStatusCode enum value + ClientVpnAuthorizationRuleStatusCodeActive = "active" + + // ClientVpnAuthorizationRuleStatusCodeFailed is a ClientVpnAuthorizationRuleStatusCode enum value + ClientVpnAuthorizationRuleStatusCodeFailed = "failed" + + // ClientVpnAuthorizationRuleStatusCodeRevoking is a ClientVpnAuthorizationRuleStatusCode enum value + ClientVpnAuthorizationRuleStatusCodeRevoking = "revoking" +) + +// ClientVpnAuthorizationRuleStatusCode_Values returns all elements of the ClientVpnAuthorizationRuleStatusCode enum +func ClientVpnAuthorizationRuleStatusCode_Values() []string { + return []string{ + ClientVpnAuthorizationRuleStatusCodeAuthorizing, + ClientVpnAuthorizationRuleStatusCodeActive, + ClientVpnAuthorizationRuleStatusCodeFailed, + ClientVpnAuthorizationRuleStatusCodeRevoking, + } +} + +const ( + // ClientVpnConnectionStatusCodeActive is a ClientVpnConnectionStatusCode enum value + ClientVpnConnectionStatusCodeActive = "active" + + // ClientVpnConnectionStatusCodeFailedToTerminate is a ClientVpnConnectionStatusCode enum value + ClientVpnConnectionStatusCodeFailedToTerminate = "failed-to-terminate" + + // ClientVpnConnectionStatusCodeTerminating is a ClientVpnConnectionStatusCode enum value + ClientVpnConnectionStatusCodeTerminating = "terminating" + + // ClientVpnConnectionStatusCodeTerminated is a ClientVpnConnectionStatusCode enum value + ClientVpnConnectionStatusCodeTerminated = "terminated" +) + +// ClientVpnConnectionStatusCode_Values returns all elements of the ClientVpnConnectionStatusCode enum +func ClientVpnConnectionStatusCode_Values() []string { + return []string{ + ClientVpnConnectionStatusCodeActive, + ClientVpnConnectionStatusCodeFailedToTerminate, + ClientVpnConnectionStatusCodeTerminating, + ClientVpnConnectionStatusCodeTerminated, + } +} + +const ( + // ClientVpnEndpointAttributeStatusCodeApplying is a ClientVpnEndpointAttributeStatusCode enum value + ClientVpnEndpointAttributeStatusCodeApplying = "applying" + + // ClientVpnEndpointAttributeStatusCodeApplied is a ClientVpnEndpointAttributeStatusCode enum value + ClientVpnEndpointAttributeStatusCodeApplied = "applied" +) + +// ClientVpnEndpointAttributeStatusCode_Values returns all elements of the ClientVpnEndpointAttributeStatusCode enum +func ClientVpnEndpointAttributeStatusCode_Values() []string { + return []string{ + ClientVpnEndpointAttributeStatusCodeApplying, + ClientVpnEndpointAttributeStatusCodeApplied, + } +} + +const ( + // ClientVpnEndpointStatusCodePendingAssociate is a ClientVpnEndpointStatusCode enum value + ClientVpnEndpointStatusCodePendingAssociate = "pending-associate" + + // ClientVpnEndpointStatusCodeAvailable is a ClientVpnEndpointStatusCode enum value + ClientVpnEndpointStatusCodeAvailable = "available" + + // ClientVpnEndpointStatusCodeDeleting is a ClientVpnEndpointStatusCode enum value + ClientVpnEndpointStatusCodeDeleting = "deleting" + + // ClientVpnEndpointStatusCodeDeleted is a ClientVpnEndpointStatusCode enum value + ClientVpnEndpointStatusCodeDeleted = "deleted" +) + +// ClientVpnEndpointStatusCode_Values returns all elements of the ClientVpnEndpointStatusCode enum +func ClientVpnEndpointStatusCode_Values() []string { + return []string{ + ClientVpnEndpointStatusCodePendingAssociate, + ClientVpnEndpointStatusCodeAvailable, + ClientVpnEndpointStatusCodeDeleting, + ClientVpnEndpointStatusCodeDeleted, + } +} + +const ( + // ClientVpnRouteStatusCodeCreating is a ClientVpnRouteStatusCode enum value + ClientVpnRouteStatusCodeCreating = "creating" + + // ClientVpnRouteStatusCodeActive is a ClientVpnRouteStatusCode enum value + ClientVpnRouteStatusCodeActive = "active" + + // ClientVpnRouteStatusCodeFailed is a ClientVpnRouteStatusCode enum value + ClientVpnRouteStatusCodeFailed = "failed" + + // ClientVpnRouteStatusCodeDeleting is a ClientVpnRouteStatusCode enum value + ClientVpnRouteStatusCodeDeleting = "deleting" +) + +// ClientVpnRouteStatusCode_Values returns all elements of the ClientVpnRouteStatusCode enum +func ClientVpnRouteStatusCode_Values() []string { + return []string{ + ClientVpnRouteStatusCodeCreating, + ClientVpnRouteStatusCodeActive, + ClientVpnRouteStatusCodeFailed, + ClientVpnRouteStatusCodeDeleting, + } +} + +const ( + // ConnectionNotificationStateEnabled is a ConnectionNotificationState enum value + ConnectionNotificationStateEnabled = "Enabled" + + // ConnectionNotificationStateDisabled is a ConnectionNotificationState enum value + ConnectionNotificationStateDisabled = "Disabled" +) + +// ConnectionNotificationState_Values returns all elements of the ConnectionNotificationState enum +func ConnectionNotificationState_Values() []string { + return []string{ + ConnectionNotificationStateEnabled, + ConnectionNotificationStateDisabled, + } +} + +const ( + // ConnectionNotificationTypeTopic is a ConnectionNotificationType enum value + ConnectionNotificationTypeTopic = "Topic" +) + +// ConnectionNotificationType_Values returns all elements of the ConnectionNotificationType enum +func ConnectionNotificationType_Values() []string { + return []string{ + ConnectionNotificationTypeTopic, + } +} + +const ( + // ContainerFormatOva is a ContainerFormat enum value + ContainerFormatOva = "ova" +) + +// ContainerFormat_Values returns all elements of the ContainerFormat enum +func ContainerFormat_Values() []string { + return []string{ + ContainerFormatOva, + } +} + +const ( + // ConversionTaskStateActive is a ConversionTaskState enum value + ConversionTaskStateActive = "active" + + // ConversionTaskStateCancelling is a ConversionTaskState enum value + ConversionTaskStateCancelling = "cancelling" + + // ConversionTaskStateCancelled is a ConversionTaskState enum value + ConversionTaskStateCancelled = "cancelled" + + // ConversionTaskStateCompleted is a ConversionTaskState enum value + ConversionTaskStateCompleted = "completed" +) + +// ConversionTaskState_Values returns all elements of the ConversionTaskState enum +func ConversionTaskState_Values() []string { + return []string{ + ConversionTaskStateActive, + ConversionTaskStateCancelling, + ConversionTaskStateCancelled, + ConversionTaskStateCompleted, + } +} + +const ( + // CopyTagsFromSourceVolume is a CopyTagsFromSource enum value + CopyTagsFromSourceVolume = "volume" +) + +// CopyTagsFromSource_Values returns all elements of the CopyTagsFromSource enum +func CopyTagsFromSource_Values() []string { + return []string{ + CopyTagsFromSourceVolume, + } +} + +const ( + // CurrencyCodeValuesUsd is a CurrencyCodeValues enum value + CurrencyCodeValuesUsd = "USD" +) + +// CurrencyCodeValues_Values returns all elements of the CurrencyCodeValues enum +func CurrencyCodeValues_Values() []string { + return []string{ + CurrencyCodeValuesUsd, + } +} + +const ( + // DatafeedSubscriptionStateActive is a DatafeedSubscriptionState enum value + DatafeedSubscriptionStateActive = "Active" + + // DatafeedSubscriptionStateInactive is a DatafeedSubscriptionState enum value + DatafeedSubscriptionStateInactive = "Inactive" +) + +// DatafeedSubscriptionState_Values returns all elements of the DatafeedSubscriptionState enum +func DatafeedSubscriptionState_Values() []string { + return []string{ + DatafeedSubscriptionStateActive, + DatafeedSubscriptionStateInactive, + } +} + +const ( + // DefaultRouteTableAssociationValueEnable is a DefaultRouteTableAssociationValue enum value + DefaultRouteTableAssociationValueEnable = "enable" + + // DefaultRouteTableAssociationValueDisable is a DefaultRouteTableAssociationValue enum value + DefaultRouteTableAssociationValueDisable = "disable" +) + +// DefaultRouteTableAssociationValue_Values returns all elements of the DefaultRouteTableAssociationValue enum +func DefaultRouteTableAssociationValue_Values() []string { + return []string{ + DefaultRouteTableAssociationValueEnable, + DefaultRouteTableAssociationValueDisable, + } +} + +const ( + // DefaultRouteTablePropagationValueEnable is a DefaultRouteTablePropagationValue enum value + DefaultRouteTablePropagationValueEnable = "enable" + + // DefaultRouteTablePropagationValueDisable is a DefaultRouteTablePropagationValue enum value + DefaultRouteTablePropagationValueDisable = "disable" +) + +// DefaultRouteTablePropagationValue_Values returns all elements of the DefaultRouteTablePropagationValue enum +func DefaultRouteTablePropagationValue_Values() []string { + return []string{ + DefaultRouteTablePropagationValueEnable, + DefaultRouteTablePropagationValueDisable, + } +} + +const ( + // DefaultTargetCapacityTypeSpot is a DefaultTargetCapacityType enum value + DefaultTargetCapacityTypeSpot = "spot" + + // DefaultTargetCapacityTypeOnDemand is a DefaultTargetCapacityType enum value + DefaultTargetCapacityTypeOnDemand = "on-demand" +) + +// DefaultTargetCapacityType_Values returns all elements of the DefaultTargetCapacityType enum +func DefaultTargetCapacityType_Values() []string { + return []string{ + DefaultTargetCapacityTypeSpot, + DefaultTargetCapacityTypeOnDemand, + } +} + +const ( + // DeleteFleetErrorCodeFleetIdDoesNotExist is a DeleteFleetErrorCode enum value + DeleteFleetErrorCodeFleetIdDoesNotExist = "fleetIdDoesNotExist" + + // DeleteFleetErrorCodeFleetIdMalformed is a DeleteFleetErrorCode enum value + DeleteFleetErrorCodeFleetIdMalformed = "fleetIdMalformed" + + // DeleteFleetErrorCodeFleetNotInDeletableState is a DeleteFleetErrorCode enum value + DeleteFleetErrorCodeFleetNotInDeletableState = "fleetNotInDeletableState" + + // DeleteFleetErrorCodeUnexpectedError is a DeleteFleetErrorCode enum value + DeleteFleetErrorCodeUnexpectedError = "unexpectedError" +) + +// DeleteFleetErrorCode_Values returns all elements of the DeleteFleetErrorCode enum +func DeleteFleetErrorCode_Values() []string { + return []string{ + DeleteFleetErrorCodeFleetIdDoesNotExist, + DeleteFleetErrorCodeFleetIdMalformed, + DeleteFleetErrorCodeFleetNotInDeletableState, + DeleteFleetErrorCodeUnexpectedError, + } +} + +const ( + // DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid is a DeleteQueuedReservedInstancesErrorCode enum value + DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid = "reserved-instances-id-invalid" + + // DeleteQueuedReservedInstancesErrorCodeReservedInstancesNotInQueuedState is a DeleteQueuedReservedInstancesErrorCode enum value + DeleteQueuedReservedInstancesErrorCodeReservedInstancesNotInQueuedState = "reserved-instances-not-in-queued-state" + + // DeleteQueuedReservedInstancesErrorCodeUnexpectedError is a DeleteQueuedReservedInstancesErrorCode enum value + DeleteQueuedReservedInstancesErrorCodeUnexpectedError = "unexpected-error" +) + +// DeleteQueuedReservedInstancesErrorCode_Values returns all elements of the DeleteQueuedReservedInstancesErrorCode enum +func DeleteQueuedReservedInstancesErrorCode_Values() []string { + return []string{ + DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid, + DeleteQueuedReservedInstancesErrorCodeReservedInstancesNotInQueuedState, + DeleteQueuedReservedInstancesErrorCodeUnexpectedError, + } +} + +const ( + // DeviceTypeEbs is a DeviceType enum value + DeviceTypeEbs = "ebs" + + // DeviceTypeInstanceStore is a DeviceType enum value + DeviceTypeInstanceStore = "instance-store" +) + +// DeviceType_Values returns all elements of the DeviceType enum +func DeviceType_Values() []string { + return []string{ + DeviceTypeEbs, + DeviceTypeInstanceStore, + } +} + +const ( + // DiskImageFormatVmdk is a DiskImageFormat enum value + DiskImageFormatVmdk = "VMDK" + + // DiskImageFormatRaw is a DiskImageFormat enum value + DiskImageFormatRaw = "RAW" + + // DiskImageFormatVhd is a DiskImageFormat enum value + DiskImageFormatVhd = "VHD" +) + +// DiskImageFormat_Values returns all elements of the DiskImageFormat enum +func DiskImageFormat_Values() []string { + return []string{ + DiskImageFormatVmdk, + DiskImageFormatRaw, + DiskImageFormatVhd, + } +} + +const ( + // DiskTypeHdd is a DiskType enum value + DiskTypeHdd = "hdd" + + // DiskTypeSsd is a DiskType enum value + DiskTypeSsd = "ssd" +) + +// DiskType_Values returns all elements of the DiskType enum +func DiskType_Values() []string { + return []string{ + DiskTypeHdd, + DiskTypeSsd, + } +} + +const ( + // DnsNameStatePendingVerification is a DnsNameState enum value + DnsNameStatePendingVerification = "pendingVerification" + + // DnsNameStateVerified is a DnsNameState enum value + DnsNameStateVerified = "verified" + + // DnsNameStateFailed is a DnsNameState enum value + DnsNameStateFailed = "failed" +) + +// DnsNameState_Values returns all elements of the DnsNameState enum +func DnsNameState_Values() []string { + return []string{ + DnsNameStatePendingVerification, + DnsNameStateVerified, + DnsNameStateFailed, + } +} + +const ( + // DnsSupportValueEnable is a DnsSupportValue enum value + DnsSupportValueEnable = "enable" + + // DnsSupportValueDisable is a DnsSupportValue enum value + DnsSupportValueDisable = "disable" +) + +// DnsSupportValue_Values returns all elements of the DnsSupportValue enum +func DnsSupportValue_Values() []string { + return []string{ + DnsSupportValueEnable, + DnsSupportValueDisable, + } +} + +const ( + // DomainTypeVpc is a DomainType enum value + DomainTypeVpc = "vpc" + + // DomainTypeStandard is a DomainType enum value + DomainTypeStandard = "standard" +) + +// DomainType_Values returns all elements of the DomainType enum +func DomainType_Values() []string { + return []string{ + DomainTypeVpc, + DomainTypeStandard, + } +} + +const ( + // EbsEncryptionSupportUnsupported is a EbsEncryptionSupport enum value + EbsEncryptionSupportUnsupported = "unsupported" + + // EbsEncryptionSupportSupported is a EbsEncryptionSupport enum value + EbsEncryptionSupportSupported = "supported" +) + +// EbsEncryptionSupport_Values returns all elements of the EbsEncryptionSupport enum +func EbsEncryptionSupport_Values() []string { + return []string{ + EbsEncryptionSupportUnsupported, + EbsEncryptionSupportSupported, + } +} + +const ( + // EbsNvmeSupportUnsupported is a EbsNvmeSupport enum value + EbsNvmeSupportUnsupported = "unsupported" + + // EbsNvmeSupportSupported is a EbsNvmeSupport enum value + EbsNvmeSupportSupported = "supported" + + // EbsNvmeSupportRequired is a EbsNvmeSupport enum value + EbsNvmeSupportRequired = "required" +) + +// EbsNvmeSupport_Values returns all elements of the EbsNvmeSupport enum +func EbsNvmeSupport_Values() []string { + return []string{ + EbsNvmeSupportUnsupported, + EbsNvmeSupportSupported, + EbsNvmeSupportRequired, + } +} + +const ( + // EbsOptimizedSupportUnsupported is a EbsOptimizedSupport enum value + EbsOptimizedSupportUnsupported = "unsupported" + + // EbsOptimizedSupportSupported is a EbsOptimizedSupport enum value + EbsOptimizedSupportSupported = "supported" + + // EbsOptimizedSupportDefault is a EbsOptimizedSupport enum value + EbsOptimizedSupportDefault = "default" +) + +// EbsOptimizedSupport_Values returns all elements of the EbsOptimizedSupport enum +func EbsOptimizedSupport_Values() []string { + return []string{ + EbsOptimizedSupportUnsupported, + EbsOptimizedSupportSupported, + EbsOptimizedSupportDefault, + } +} + +const ( + // ElasticGpuStateAttached is a ElasticGpuState enum value + ElasticGpuStateAttached = "ATTACHED" +) + +// ElasticGpuState_Values returns all elements of the ElasticGpuState enum +func ElasticGpuState_Values() []string { + return []string{ + ElasticGpuStateAttached, + } +} + +const ( + // ElasticGpuStatusOk is a ElasticGpuStatus enum value + ElasticGpuStatusOk = "OK" + + // ElasticGpuStatusImpaired is a ElasticGpuStatus enum value + ElasticGpuStatusImpaired = "IMPAIRED" +) + +// ElasticGpuStatus_Values returns all elements of the ElasticGpuStatus enum +func ElasticGpuStatus_Values() []string { + return []string{ + ElasticGpuStatusOk, + ElasticGpuStatusImpaired, + } +} + +const ( + // EnaSupportUnsupported is a EnaSupport enum value + EnaSupportUnsupported = "unsupported" + + // EnaSupportSupported is a EnaSupport enum value + EnaSupportSupported = "supported" + + // EnaSupportRequired is a EnaSupport enum value + EnaSupportRequired = "required" +) + +// EnaSupport_Values returns all elements of the EnaSupport enum +func EnaSupport_Values() []string { + return []string{ + EnaSupportUnsupported, + EnaSupportSupported, + EnaSupportRequired, + } +} + +const ( + // EndDateTypeUnlimited is a EndDateType enum value + EndDateTypeUnlimited = "unlimited" + + // EndDateTypeLimited is a EndDateType enum value + EndDateTypeLimited = "limited" +) + +// EndDateType_Values returns all elements of the EndDateType enum +func EndDateType_Values() []string { + return []string{ + EndDateTypeUnlimited, + EndDateTypeLimited, + } +} + +const ( + // EphemeralNvmeSupportUnsupported is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportUnsupported = "unsupported" + + // EphemeralNvmeSupportSupported is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportSupported = "supported" + + // EphemeralNvmeSupportRequired is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportRequired = "required" +) + +// EphemeralNvmeSupport_Values returns all elements of the EphemeralNvmeSupport enum +func EphemeralNvmeSupport_Values() []string { + return []string{ + EphemeralNvmeSupportUnsupported, + EphemeralNvmeSupportSupported, + EphemeralNvmeSupportRequired, + } +} + +const ( + // EventCodeInstanceReboot is a EventCode enum value + EventCodeInstanceReboot = "instance-reboot" + + // EventCodeSystemReboot is a EventCode enum value + EventCodeSystemReboot = "system-reboot" + + // EventCodeSystemMaintenance is a EventCode enum value + EventCodeSystemMaintenance = "system-maintenance" + + // EventCodeInstanceRetirement is a EventCode enum value + EventCodeInstanceRetirement = "instance-retirement" + + // EventCodeInstanceStop is a EventCode enum value + EventCodeInstanceStop = "instance-stop" +) + +// EventCode_Values returns all elements of the EventCode enum +func EventCode_Values() []string { + return []string{ + EventCodeInstanceReboot, + EventCodeSystemReboot, + EventCodeSystemMaintenance, + EventCodeInstanceRetirement, + EventCodeInstanceStop, + } +} + +const ( + // EventTypeInstanceChange is a EventType enum value + EventTypeInstanceChange = "instanceChange" + + // EventTypeFleetRequestChange is a EventType enum value + EventTypeFleetRequestChange = "fleetRequestChange" + + // EventTypeError is a EventType enum value + EventTypeError = "error" + + // EventTypeInformation is a EventType enum value + EventTypeInformation = "information" +) + +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeInstanceChange, + EventTypeFleetRequestChange, + EventTypeError, + EventTypeInformation, + } +} + +const ( + // ExcessCapacityTerminationPolicyNoTermination is a ExcessCapacityTerminationPolicy enum value + ExcessCapacityTerminationPolicyNoTermination = "noTermination" + + // ExcessCapacityTerminationPolicyDefault is a ExcessCapacityTerminationPolicy enum value + ExcessCapacityTerminationPolicyDefault = "default" +) + +// ExcessCapacityTerminationPolicy_Values returns all elements of the ExcessCapacityTerminationPolicy enum +func ExcessCapacityTerminationPolicy_Values() []string { + return []string{ + ExcessCapacityTerminationPolicyNoTermination, + ExcessCapacityTerminationPolicyDefault, + } +} + +const ( + // ExportEnvironmentCitrix is a ExportEnvironment enum value + ExportEnvironmentCitrix = "citrix" + + // ExportEnvironmentVmware is a ExportEnvironment enum value + ExportEnvironmentVmware = "vmware" + + // ExportEnvironmentMicrosoft is a ExportEnvironment enum value + ExportEnvironmentMicrosoft = "microsoft" +) + +// ExportEnvironment_Values returns all elements of the ExportEnvironment enum +func ExportEnvironment_Values() []string { + return []string{ + ExportEnvironmentCitrix, + ExportEnvironmentVmware, + ExportEnvironmentMicrosoft, + } +} + +const ( + // ExportTaskStateActive is a ExportTaskState enum value + ExportTaskStateActive = "active" + + // ExportTaskStateCancelling is a ExportTaskState enum value + ExportTaskStateCancelling = "cancelling" + + // ExportTaskStateCancelled is a ExportTaskState enum value + ExportTaskStateCancelled = "cancelled" + + // ExportTaskStateCompleted is a ExportTaskState enum value + ExportTaskStateCompleted = "completed" +) + +// ExportTaskState_Values returns all elements of the ExportTaskState enum +func ExportTaskState_Values() []string { + return []string{ + ExportTaskStateActive, + ExportTaskStateCancelling, + ExportTaskStateCancelled, + ExportTaskStateCompleted, + } +} + +const ( + // FastSnapshotRestoreStateCodeEnabling is a FastSnapshotRestoreStateCode enum value + FastSnapshotRestoreStateCodeEnabling = "enabling" + + // FastSnapshotRestoreStateCodeOptimizing is a FastSnapshotRestoreStateCode enum value + FastSnapshotRestoreStateCodeOptimizing = "optimizing" + + // FastSnapshotRestoreStateCodeEnabled is a FastSnapshotRestoreStateCode enum value + FastSnapshotRestoreStateCodeEnabled = "enabled" + + // FastSnapshotRestoreStateCodeDisabling is a FastSnapshotRestoreStateCode enum value + FastSnapshotRestoreStateCodeDisabling = "disabling" + + // FastSnapshotRestoreStateCodeDisabled is a FastSnapshotRestoreStateCode enum value + FastSnapshotRestoreStateCodeDisabled = "disabled" +) + +// FastSnapshotRestoreStateCode_Values returns all elements of the FastSnapshotRestoreStateCode enum +func FastSnapshotRestoreStateCode_Values() []string { + return []string{ + FastSnapshotRestoreStateCodeEnabling, + FastSnapshotRestoreStateCodeOptimizing, + FastSnapshotRestoreStateCodeEnabled, + FastSnapshotRestoreStateCodeDisabling, + FastSnapshotRestoreStateCodeDisabled, + } +} + +const ( + // FleetActivityStatusError is a FleetActivityStatus enum value + FleetActivityStatusError = "error" + + // FleetActivityStatusPendingFulfillment is a FleetActivityStatus enum value + FleetActivityStatusPendingFulfillment = "pending_fulfillment" + + // FleetActivityStatusPendingTermination is a FleetActivityStatus enum value + FleetActivityStatusPendingTermination = "pending_termination" + + // FleetActivityStatusFulfilled is a FleetActivityStatus enum value + FleetActivityStatusFulfilled = "fulfilled" +) + +// FleetActivityStatus_Values returns all elements of the FleetActivityStatus enum +func FleetActivityStatus_Values() []string { + return []string{ + FleetActivityStatusError, + FleetActivityStatusPendingFulfillment, + FleetActivityStatusPendingTermination, + FleetActivityStatusFulfilled, + } +} + +const ( + // FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst is a FleetCapacityReservationUsageStrategy enum value + FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst = "use-capacity-reservations-first" +) + +// FleetCapacityReservationUsageStrategy_Values returns all elements of the FleetCapacityReservationUsageStrategy enum +func FleetCapacityReservationUsageStrategy_Values() []string { + return []string{ + FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst, + } +} + +const ( + // FleetEventTypeInstanceChange is a FleetEventType enum value + FleetEventTypeInstanceChange = "instance-change" + + // FleetEventTypeFleetChange is a FleetEventType enum value + FleetEventTypeFleetChange = "fleet-change" + + // FleetEventTypeServiceError is a FleetEventType enum value + FleetEventTypeServiceError = "service-error" +) + +// FleetEventType_Values returns all elements of the FleetEventType enum +func FleetEventType_Values() []string { + return []string{ + FleetEventTypeInstanceChange, + FleetEventTypeFleetChange, + FleetEventTypeServiceError, + } +} + +const ( + // FleetExcessCapacityTerminationPolicyNoTermination is a FleetExcessCapacityTerminationPolicy enum value + FleetExcessCapacityTerminationPolicyNoTermination = "no-termination" + + // FleetExcessCapacityTerminationPolicyTermination is a FleetExcessCapacityTerminationPolicy enum value + FleetExcessCapacityTerminationPolicyTermination = "termination" +) + +// FleetExcessCapacityTerminationPolicy_Values returns all elements of the FleetExcessCapacityTerminationPolicy enum +func FleetExcessCapacityTerminationPolicy_Values() []string { + return []string{ + FleetExcessCapacityTerminationPolicyNoTermination, + FleetExcessCapacityTerminationPolicyTermination, + } +} + +const ( + // FleetOnDemandAllocationStrategyLowestPrice is a FleetOnDemandAllocationStrategy enum value + FleetOnDemandAllocationStrategyLowestPrice = "lowest-price" + + // FleetOnDemandAllocationStrategyPrioritized is a FleetOnDemandAllocationStrategy enum value + FleetOnDemandAllocationStrategyPrioritized = "prioritized" +) + +// FleetOnDemandAllocationStrategy_Values returns all elements of the FleetOnDemandAllocationStrategy enum +func FleetOnDemandAllocationStrategy_Values() []string { + return []string{ + FleetOnDemandAllocationStrategyLowestPrice, + FleetOnDemandAllocationStrategyPrioritized, + } +} + +const ( + // FleetReplacementStrategyLaunch is a FleetReplacementStrategy enum value + FleetReplacementStrategyLaunch = "launch" +) + +// FleetReplacementStrategy_Values returns all elements of the FleetReplacementStrategy enum +func FleetReplacementStrategy_Values() []string { + return []string{ + FleetReplacementStrategyLaunch, + } +} + +const ( + // FleetStateCodeSubmitted is a FleetStateCode enum value + FleetStateCodeSubmitted = "submitted" + + // FleetStateCodeActive is a FleetStateCode enum value + FleetStateCodeActive = "active" + + // FleetStateCodeDeleted is a FleetStateCode enum value + FleetStateCodeDeleted = "deleted" + + // FleetStateCodeFailed is a FleetStateCode enum value + FleetStateCodeFailed = "failed" + + // FleetStateCodeDeletedRunning is a FleetStateCode enum value + FleetStateCodeDeletedRunning = "deleted_running" + + // FleetStateCodeDeletedTerminating is a FleetStateCode enum value + FleetStateCodeDeletedTerminating = "deleted_terminating" + + // FleetStateCodeModifying is a FleetStateCode enum value + FleetStateCodeModifying = "modifying" +) + +// FleetStateCode_Values returns all elements of the FleetStateCode enum +func FleetStateCode_Values() []string { + return []string{ + FleetStateCodeSubmitted, + FleetStateCodeActive, + FleetStateCodeDeleted, + FleetStateCodeFailed, + FleetStateCodeDeletedRunning, + FleetStateCodeDeletedTerminating, + FleetStateCodeModifying, + } +} + +const ( + // FleetTypeRequest is a FleetType enum value + FleetTypeRequest = "request" + + // FleetTypeMaintain is a FleetType enum value + FleetTypeMaintain = "maintain" + + // FleetTypeInstant is a FleetType enum value + FleetTypeInstant = "instant" +) + +// FleetType_Values returns all elements of the FleetType enum +func FleetType_Values() []string { + return []string{ + FleetTypeRequest, + FleetTypeMaintain, + FleetTypeInstant, + } +} + +const ( + // FlowLogsResourceTypeVpc is a FlowLogsResourceType enum value + FlowLogsResourceTypeVpc = "VPC" + + // FlowLogsResourceTypeSubnet is a FlowLogsResourceType enum value + FlowLogsResourceTypeSubnet = "Subnet" + + // FlowLogsResourceTypeNetworkInterface is a FlowLogsResourceType enum value + FlowLogsResourceTypeNetworkInterface = "NetworkInterface" +) + +// FlowLogsResourceType_Values returns all elements of the FlowLogsResourceType enum +func FlowLogsResourceType_Values() []string { + return []string{ + FlowLogsResourceTypeVpc, + FlowLogsResourceTypeSubnet, + FlowLogsResourceTypeNetworkInterface, + } +} + +const ( + // FpgaImageAttributeNameDescription is a FpgaImageAttributeName enum value + FpgaImageAttributeNameDescription = "description" + + // FpgaImageAttributeNameName is a FpgaImageAttributeName enum value + FpgaImageAttributeNameName = "name" + + // FpgaImageAttributeNameLoadPermission is a FpgaImageAttributeName enum value + FpgaImageAttributeNameLoadPermission = "loadPermission" + + // FpgaImageAttributeNameProductCodes is a FpgaImageAttributeName enum value + FpgaImageAttributeNameProductCodes = "productCodes" +) + +// FpgaImageAttributeName_Values returns all elements of the FpgaImageAttributeName enum +func FpgaImageAttributeName_Values() []string { + return []string{ + FpgaImageAttributeNameDescription, + FpgaImageAttributeNameName, + FpgaImageAttributeNameLoadPermission, + FpgaImageAttributeNameProductCodes, + } +} + +const ( + // FpgaImageStateCodePending is a FpgaImageStateCode enum value + FpgaImageStateCodePending = "pending" + + // FpgaImageStateCodeFailed is a FpgaImageStateCode enum value + FpgaImageStateCodeFailed = "failed" + + // FpgaImageStateCodeAvailable is a FpgaImageStateCode enum value + FpgaImageStateCodeAvailable = "available" + + // FpgaImageStateCodeUnavailable is a FpgaImageStateCode enum value + FpgaImageStateCodeUnavailable = "unavailable" +) + +// FpgaImageStateCode_Values returns all elements of the FpgaImageStateCode enum +func FpgaImageStateCode_Values() []string { + return []string{ + FpgaImageStateCodePending, + FpgaImageStateCodeFailed, + FpgaImageStateCodeAvailable, + FpgaImageStateCodeUnavailable, + } +} + +const ( + // GatewayTypeIpsec1 is a GatewayType enum value + GatewayTypeIpsec1 = "ipsec.1" +) + +// GatewayType_Values returns all elements of the GatewayType enum +func GatewayType_Values() []string { + return []string{ + GatewayTypeIpsec1, + } +} + +const ( + // HostRecoveryOn is a HostRecovery enum value + HostRecoveryOn = "on" + + // HostRecoveryOff is a HostRecovery enum value + HostRecoveryOff = "off" +) + +// HostRecovery_Values returns all elements of the HostRecovery enum +func HostRecovery_Values() []string { + return []string{ + HostRecoveryOn, + HostRecoveryOff, + } +} + +const ( + // HostTenancyDedicated is a HostTenancy enum value + HostTenancyDedicated = "dedicated" + + // HostTenancyHost is a HostTenancy enum value + HostTenancyHost = "host" +) + +// HostTenancy_Values returns all elements of the HostTenancy enum +func HostTenancy_Values() []string { + return []string{ + HostTenancyDedicated, + HostTenancyHost, + } +} + +const ( + // HttpTokensStateOptional is a HttpTokensState enum value + HttpTokensStateOptional = "optional" + + // HttpTokensStateRequired is a HttpTokensState enum value + HttpTokensStateRequired = "required" +) + +// HttpTokensState_Values returns all elements of the HttpTokensState enum +func HttpTokensState_Values() []string { + return []string{ + HttpTokensStateOptional, + HttpTokensStateRequired, + } +} + +const ( + // HypervisorTypeOvm is a HypervisorType enum value + HypervisorTypeOvm = "ovm" + + // HypervisorTypeXen is a HypervisorType enum value + HypervisorTypeXen = "xen" +) + +// HypervisorType_Values returns all elements of the HypervisorType enum +func HypervisorType_Values() []string { + return []string{ + HypervisorTypeOvm, + HypervisorTypeXen, + } +} + +const ( + // IamInstanceProfileAssociationStateAssociating is a IamInstanceProfileAssociationState enum value + IamInstanceProfileAssociationStateAssociating = "associating" + + // IamInstanceProfileAssociationStateAssociated is a IamInstanceProfileAssociationState enum value + IamInstanceProfileAssociationStateAssociated = "associated" + + // IamInstanceProfileAssociationStateDisassociating is a IamInstanceProfileAssociationState enum value + IamInstanceProfileAssociationStateDisassociating = "disassociating" + + // IamInstanceProfileAssociationStateDisassociated is a IamInstanceProfileAssociationState enum value + IamInstanceProfileAssociationStateDisassociated = "disassociated" +) + +// IamInstanceProfileAssociationState_Values returns all elements of the IamInstanceProfileAssociationState enum +func IamInstanceProfileAssociationState_Values() []string { + return []string{ + IamInstanceProfileAssociationStateAssociating, + IamInstanceProfileAssociationStateAssociated, + IamInstanceProfileAssociationStateDisassociating, + IamInstanceProfileAssociationStateDisassociated, + } +} + +const ( + // ImageAttributeNameDescription is a ImageAttributeName enum value + ImageAttributeNameDescription = "description" + + // ImageAttributeNameKernel is a ImageAttributeName enum value + ImageAttributeNameKernel = "kernel" + + // ImageAttributeNameRamdisk is a ImageAttributeName enum value + ImageAttributeNameRamdisk = "ramdisk" + + // ImageAttributeNameLaunchPermission is a ImageAttributeName enum value + ImageAttributeNameLaunchPermission = "launchPermission" + + // ImageAttributeNameProductCodes is a ImageAttributeName enum value + ImageAttributeNameProductCodes = "productCodes" + + // ImageAttributeNameBlockDeviceMapping is a ImageAttributeName enum value + ImageAttributeNameBlockDeviceMapping = "blockDeviceMapping" + + // ImageAttributeNameSriovNetSupport is a ImageAttributeName enum value + ImageAttributeNameSriovNetSupport = "sriovNetSupport" +) + +// ImageAttributeName_Values returns all elements of the ImageAttributeName enum +func ImageAttributeName_Values() []string { + return []string{ + ImageAttributeNameDescription, + ImageAttributeNameKernel, + ImageAttributeNameRamdisk, + ImageAttributeNameLaunchPermission, + ImageAttributeNameProductCodes, + ImageAttributeNameBlockDeviceMapping, + ImageAttributeNameSriovNetSupport, + } +} + +const ( + // ImageStatePending is a ImageState enum value + ImageStatePending = "pending" + + // ImageStateAvailable is a ImageState enum value + ImageStateAvailable = "available" + + // ImageStateInvalid is a ImageState enum value + ImageStateInvalid = "invalid" + + // ImageStateDeregistered is a ImageState enum value + ImageStateDeregistered = "deregistered" + + // ImageStateTransient is a ImageState enum value + ImageStateTransient = "transient" + + // ImageStateFailed is a ImageState enum value + ImageStateFailed = "failed" + + // ImageStateError is a ImageState enum value + ImageStateError = "error" +) + +// ImageState_Values returns all elements of the ImageState enum +func ImageState_Values() []string { + return []string{ + ImageStatePending, + ImageStateAvailable, + ImageStateInvalid, + ImageStateDeregistered, + ImageStateTransient, + ImageStateFailed, + ImageStateError, + } +} + +const ( + // ImageTypeValuesMachine is a ImageTypeValues enum value + ImageTypeValuesMachine = "machine" + + // ImageTypeValuesKernel is a ImageTypeValues enum value + ImageTypeValuesKernel = "kernel" + + // ImageTypeValuesRamdisk is a ImageTypeValues enum value + ImageTypeValuesRamdisk = "ramdisk" +) + +// ImageTypeValues_Values returns all elements of the ImageTypeValues enum +func ImageTypeValues_Values() []string { + return []string{ + ImageTypeValuesMachine, + ImageTypeValuesKernel, + ImageTypeValuesRamdisk, + } +} + +const ( + // InstanceAttributeNameInstanceType is a InstanceAttributeName enum value + InstanceAttributeNameInstanceType = "instanceType" + + // InstanceAttributeNameKernel is a InstanceAttributeName enum value + InstanceAttributeNameKernel = "kernel" + + // InstanceAttributeNameRamdisk is a InstanceAttributeName enum value + InstanceAttributeNameRamdisk = "ramdisk" + + // InstanceAttributeNameUserData is a InstanceAttributeName enum value + InstanceAttributeNameUserData = "userData" + + // InstanceAttributeNameDisableApiTermination is a InstanceAttributeName enum value + InstanceAttributeNameDisableApiTermination = "disableApiTermination" + + // InstanceAttributeNameInstanceInitiatedShutdownBehavior is a InstanceAttributeName enum value + InstanceAttributeNameInstanceInitiatedShutdownBehavior = "instanceInitiatedShutdownBehavior" + + // InstanceAttributeNameRootDeviceName is a InstanceAttributeName enum value + InstanceAttributeNameRootDeviceName = "rootDeviceName" + + // InstanceAttributeNameBlockDeviceMapping is a InstanceAttributeName enum value + InstanceAttributeNameBlockDeviceMapping = "blockDeviceMapping" + + // InstanceAttributeNameProductCodes is a InstanceAttributeName enum value + InstanceAttributeNameProductCodes = "productCodes" + + // InstanceAttributeNameSourceDestCheck is a InstanceAttributeName enum value + InstanceAttributeNameSourceDestCheck = "sourceDestCheck" + + // InstanceAttributeNameGroupSet is a InstanceAttributeName enum value + InstanceAttributeNameGroupSet = "groupSet" + + // InstanceAttributeNameEbsOptimized is a InstanceAttributeName enum value + InstanceAttributeNameEbsOptimized = "ebsOptimized" + + // InstanceAttributeNameSriovNetSupport is a InstanceAttributeName enum value + InstanceAttributeNameSriovNetSupport = "sriovNetSupport" + + // InstanceAttributeNameEnaSupport is a InstanceAttributeName enum value + InstanceAttributeNameEnaSupport = "enaSupport" + + // InstanceAttributeNameEnclaveOptions is a InstanceAttributeName enum value + InstanceAttributeNameEnclaveOptions = "enclaveOptions" +) + +// InstanceAttributeName_Values returns all elements of the InstanceAttributeName enum +func InstanceAttributeName_Values() []string { + return []string{ + InstanceAttributeNameInstanceType, + InstanceAttributeNameKernel, + InstanceAttributeNameRamdisk, + InstanceAttributeNameUserData, + InstanceAttributeNameDisableApiTermination, + InstanceAttributeNameInstanceInitiatedShutdownBehavior, + InstanceAttributeNameRootDeviceName, + InstanceAttributeNameBlockDeviceMapping, + InstanceAttributeNameProductCodes, + InstanceAttributeNameSourceDestCheck, + InstanceAttributeNameGroupSet, + InstanceAttributeNameEbsOptimized, + InstanceAttributeNameSriovNetSupport, + InstanceAttributeNameEnaSupport, + InstanceAttributeNameEnclaveOptions, + } +} + +const ( + // InstanceHealthStatusHealthy is a InstanceHealthStatus enum value + InstanceHealthStatusHealthy = "healthy" + + // InstanceHealthStatusUnhealthy is a InstanceHealthStatus enum value + InstanceHealthStatusUnhealthy = "unhealthy" +) + +// InstanceHealthStatus_Values returns all elements of the InstanceHealthStatus enum +func InstanceHealthStatus_Values() []string { + return []string{ + InstanceHealthStatusHealthy, + InstanceHealthStatusUnhealthy, + } +} + +const ( + // InstanceInterruptionBehaviorHibernate is a InstanceInterruptionBehavior enum value + InstanceInterruptionBehaviorHibernate = "hibernate" + + // InstanceInterruptionBehaviorStop is a InstanceInterruptionBehavior enum value + InstanceInterruptionBehaviorStop = "stop" + + // InstanceInterruptionBehaviorTerminate is a InstanceInterruptionBehavior enum value + InstanceInterruptionBehaviorTerminate = "terminate" +) + +// InstanceInterruptionBehavior_Values returns all elements of the InstanceInterruptionBehavior enum +func InstanceInterruptionBehavior_Values() []string { + return []string{ + InstanceInterruptionBehaviorHibernate, + InstanceInterruptionBehaviorStop, + InstanceInterruptionBehaviorTerminate, + } +} + +const ( + // InstanceLifecycleSpot is a InstanceLifecycle enum value + InstanceLifecycleSpot = "spot" + + // InstanceLifecycleOnDemand is a InstanceLifecycle enum value + InstanceLifecycleOnDemand = "on-demand" +) + +// InstanceLifecycle_Values returns all elements of the InstanceLifecycle enum +func InstanceLifecycle_Values() []string { + return []string{ + InstanceLifecycleSpot, + InstanceLifecycleOnDemand, + } +} + +const ( + // InstanceLifecycleTypeSpot is a InstanceLifecycleType enum value + InstanceLifecycleTypeSpot = "spot" + + // InstanceLifecycleTypeScheduled is a InstanceLifecycleType enum value + InstanceLifecycleTypeScheduled = "scheduled" +) + +// InstanceLifecycleType_Values returns all elements of the InstanceLifecycleType enum +func InstanceLifecycleType_Values() []string { + return []string{ + InstanceLifecycleTypeSpot, + InstanceLifecycleTypeScheduled, + } +} + +const ( + // InstanceMatchCriteriaOpen is a InstanceMatchCriteria enum value + InstanceMatchCriteriaOpen = "open" + + // InstanceMatchCriteriaTargeted is a InstanceMatchCriteria enum value + InstanceMatchCriteriaTargeted = "targeted" +) + +// InstanceMatchCriteria_Values returns all elements of the InstanceMatchCriteria enum +func InstanceMatchCriteria_Values() []string { + return []string{ + InstanceMatchCriteriaOpen, + InstanceMatchCriteriaTargeted, + } +} + +const ( + // InstanceMetadataEndpointStateDisabled is a InstanceMetadataEndpointState enum value + InstanceMetadataEndpointStateDisabled = "disabled" + + // InstanceMetadataEndpointStateEnabled is a InstanceMetadataEndpointState enum value + InstanceMetadataEndpointStateEnabled = "enabled" +) + +// InstanceMetadataEndpointState_Values returns all elements of the InstanceMetadataEndpointState enum +func InstanceMetadataEndpointState_Values() []string { + return []string{ + InstanceMetadataEndpointStateDisabled, + InstanceMetadataEndpointStateEnabled, + } +} + +const ( + // InstanceMetadataOptionsStatePending is a InstanceMetadataOptionsState enum value + InstanceMetadataOptionsStatePending = "pending" + + // InstanceMetadataOptionsStateApplied is a InstanceMetadataOptionsState enum value + InstanceMetadataOptionsStateApplied = "applied" +) + +// InstanceMetadataOptionsState_Values returns all elements of the InstanceMetadataOptionsState enum +func InstanceMetadataOptionsState_Values() []string { + return []string{ + InstanceMetadataOptionsStatePending, + InstanceMetadataOptionsStateApplied, + } +} + +const ( + // InstanceStateNamePending is a InstanceStateName enum value + InstanceStateNamePending = "pending" + + // InstanceStateNameRunning is a InstanceStateName enum value + InstanceStateNameRunning = "running" + + // InstanceStateNameShuttingDown is a InstanceStateName enum value + InstanceStateNameShuttingDown = "shutting-down" + + // InstanceStateNameTerminated is a InstanceStateName enum value + InstanceStateNameTerminated = "terminated" + + // InstanceStateNameStopping is a InstanceStateName enum value + InstanceStateNameStopping = "stopping" + + // InstanceStateNameStopped is a InstanceStateName enum value + InstanceStateNameStopped = "stopped" +) + +// InstanceStateName_Values returns all elements of the InstanceStateName enum +func InstanceStateName_Values() []string { + return []string{ + InstanceStateNamePending, + InstanceStateNameRunning, + InstanceStateNameShuttingDown, + InstanceStateNameTerminated, + InstanceStateNameStopping, + InstanceStateNameStopped, + } +} + +const ( + // InstanceTypeT1Micro is a InstanceType enum value + InstanceTypeT1Micro = "t1.micro" + + // InstanceTypeT2Nano is a InstanceType enum value + InstanceTypeT2Nano = "t2.nano" + + // InstanceTypeT2Micro is a InstanceType enum value + InstanceTypeT2Micro = "t2.micro" + + // InstanceTypeT2Small is a InstanceType enum value + InstanceTypeT2Small = "t2.small" + + // InstanceTypeT2Medium is a InstanceType enum value + InstanceTypeT2Medium = "t2.medium" + + // InstanceTypeT2Large is a InstanceType enum value + InstanceTypeT2Large = "t2.large" + + // InstanceTypeT2Xlarge is a InstanceType enum value + InstanceTypeT2Xlarge = "t2.xlarge" + + // InstanceTypeT22xlarge is a InstanceType enum value + InstanceTypeT22xlarge = "t2.2xlarge" + + // InstanceTypeT3Nano is a InstanceType enum value + InstanceTypeT3Nano = "t3.nano" + + // InstanceTypeT3Micro is a InstanceType enum value + InstanceTypeT3Micro = "t3.micro" + + // InstanceTypeT3Small is a InstanceType enum value + InstanceTypeT3Small = "t3.small" + + // InstanceTypeT3Medium is a InstanceType enum value + InstanceTypeT3Medium = "t3.medium" + + // InstanceTypeT3Large is a InstanceType enum value + InstanceTypeT3Large = "t3.large" + + // InstanceTypeT3Xlarge is a InstanceType enum value + InstanceTypeT3Xlarge = "t3.xlarge" + + // InstanceTypeT32xlarge is a InstanceType enum value + InstanceTypeT32xlarge = "t3.2xlarge" + + // InstanceTypeT3aNano is a InstanceType enum value + InstanceTypeT3aNano = "t3a.nano" + + // InstanceTypeT3aMicro is a InstanceType enum value + InstanceTypeT3aMicro = "t3a.micro" + + // InstanceTypeT3aSmall is a InstanceType enum value + InstanceTypeT3aSmall = "t3a.small" + + // InstanceTypeT3aMedium is a InstanceType enum value + InstanceTypeT3aMedium = "t3a.medium" + + // InstanceTypeT3aLarge is a InstanceType enum value + InstanceTypeT3aLarge = "t3a.large" + + // InstanceTypeT3aXlarge is a InstanceType enum value + InstanceTypeT3aXlarge = "t3a.xlarge" + + // InstanceTypeT3a2xlarge is a InstanceType enum value + InstanceTypeT3a2xlarge = "t3a.2xlarge" + + // InstanceTypeT4gNano is a InstanceType enum value + InstanceTypeT4gNano = "t4g.nano" + + // InstanceTypeT4gMicro is a InstanceType enum value + InstanceTypeT4gMicro = "t4g.micro" + + // InstanceTypeT4gSmall is a InstanceType enum value + InstanceTypeT4gSmall = "t4g.small" + + // InstanceTypeT4gMedium is a InstanceType enum value + InstanceTypeT4gMedium = "t4g.medium" + + // InstanceTypeT4gLarge is a InstanceType enum value + InstanceTypeT4gLarge = "t4g.large" + + // InstanceTypeT4gXlarge is a InstanceType enum value + InstanceTypeT4gXlarge = "t4g.xlarge" + + // InstanceTypeT4g2xlarge is a InstanceType enum value + InstanceTypeT4g2xlarge = "t4g.2xlarge" + + // InstanceTypeM1Small is a InstanceType enum value + InstanceTypeM1Small = "m1.small" + + // InstanceTypeM1Medium is a InstanceType enum value + InstanceTypeM1Medium = "m1.medium" + + // InstanceTypeM1Large is a InstanceType enum value + InstanceTypeM1Large = "m1.large" + + // InstanceTypeM1Xlarge is a InstanceType enum value + InstanceTypeM1Xlarge = "m1.xlarge" + + // InstanceTypeM3Medium is a InstanceType enum value + InstanceTypeM3Medium = "m3.medium" + + // InstanceTypeM3Large is a InstanceType enum value + InstanceTypeM3Large = "m3.large" + + // InstanceTypeM3Xlarge is a InstanceType enum value + InstanceTypeM3Xlarge = "m3.xlarge" + + // InstanceTypeM32xlarge is a InstanceType enum value + InstanceTypeM32xlarge = "m3.2xlarge" + + // InstanceTypeM4Large is a InstanceType enum value + InstanceTypeM4Large = "m4.large" + + // InstanceTypeM4Xlarge is a InstanceType enum value + InstanceTypeM4Xlarge = "m4.xlarge" + + // InstanceTypeM42xlarge is a InstanceType enum value + InstanceTypeM42xlarge = "m4.2xlarge" + + // InstanceTypeM44xlarge is a InstanceType enum value + InstanceTypeM44xlarge = "m4.4xlarge" + + // InstanceTypeM410xlarge is a InstanceType enum value + InstanceTypeM410xlarge = "m4.10xlarge" + + // InstanceTypeM416xlarge is a InstanceType enum value + InstanceTypeM416xlarge = "m4.16xlarge" + + // InstanceTypeM2Xlarge is a InstanceType enum value + InstanceTypeM2Xlarge = "m2.xlarge" + + // InstanceTypeM22xlarge is a InstanceType enum value + InstanceTypeM22xlarge = "m2.2xlarge" + + // InstanceTypeM24xlarge is a InstanceType enum value + InstanceTypeM24xlarge = "m2.4xlarge" + + // InstanceTypeCr18xlarge is a InstanceType enum value + InstanceTypeCr18xlarge = "cr1.8xlarge" + + // InstanceTypeR3Large is a InstanceType enum value + InstanceTypeR3Large = "r3.large" + + // InstanceTypeR3Xlarge is a InstanceType enum value + InstanceTypeR3Xlarge = "r3.xlarge" + + // InstanceTypeR32xlarge is a InstanceType enum value + InstanceTypeR32xlarge = "r3.2xlarge" + + // InstanceTypeR34xlarge is a InstanceType enum value + InstanceTypeR34xlarge = "r3.4xlarge" + + // InstanceTypeR38xlarge is a InstanceType enum value + InstanceTypeR38xlarge = "r3.8xlarge" + + // InstanceTypeR4Large is a InstanceType enum value + InstanceTypeR4Large = "r4.large" + + // InstanceTypeR4Xlarge is a InstanceType enum value + InstanceTypeR4Xlarge = "r4.xlarge" + + // InstanceTypeR42xlarge is a InstanceType enum value + InstanceTypeR42xlarge = "r4.2xlarge" + + // InstanceTypeR44xlarge is a InstanceType enum value + InstanceTypeR44xlarge = "r4.4xlarge" + + // InstanceTypeR48xlarge is a InstanceType enum value + InstanceTypeR48xlarge = "r4.8xlarge" + + // InstanceTypeR416xlarge is a InstanceType enum value + InstanceTypeR416xlarge = "r4.16xlarge" + + // InstanceTypeR5Large is a InstanceType enum value + InstanceTypeR5Large = "r5.large" + + // InstanceTypeR5Xlarge is a InstanceType enum value + InstanceTypeR5Xlarge = "r5.xlarge" + + // InstanceTypeR52xlarge is a InstanceType enum value + InstanceTypeR52xlarge = "r5.2xlarge" + + // InstanceTypeR54xlarge is a InstanceType enum value + InstanceTypeR54xlarge = "r5.4xlarge" + + // InstanceTypeR58xlarge is a InstanceType enum value + InstanceTypeR58xlarge = "r5.8xlarge" + + // InstanceTypeR512xlarge is a InstanceType enum value + InstanceTypeR512xlarge = "r5.12xlarge" + + // InstanceTypeR516xlarge is a InstanceType enum value + InstanceTypeR516xlarge = "r5.16xlarge" + + // InstanceTypeR524xlarge is a InstanceType enum value + InstanceTypeR524xlarge = "r5.24xlarge" + + // InstanceTypeR5Metal is a InstanceType enum value + InstanceTypeR5Metal = "r5.metal" + + // InstanceTypeR5aLarge is a InstanceType enum value + InstanceTypeR5aLarge = "r5a.large" + + // InstanceTypeR5aXlarge is a InstanceType enum value + InstanceTypeR5aXlarge = "r5a.xlarge" + + // InstanceTypeR5a2xlarge is a InstanceType enum value + InstanceTypeR5a2xlarge = "r5a.2xlarge" + + // InstanceTypeR5a4xlarge is a InstanceType enum value + InstanceTypeR5a4xlarge = "r5a.4xlarge" + + // InstanceTypeR5a8xlarge is a InstanceType enum value + InstanceTypeR5a8xlarge = "r5a.8xlarge" + + // InstanceTypeR5a12xlarge is a InstanceType enum value + InstanceTypeR5a12xlarge = "r5a.12xlarge" + + // InstanceTypeR5a16xlarge is a InstanceType enum value + InstanceTypeR5a16xlarge = "r5a.16xlarge" + + // InstanceTypeR5a24xlarge is a InstanceType enum value + InstanceTypeR5a24xlarge = "r5a.24xlarge" + + // InstanceTypeR5bLarge is a InstanceType enum value + InstanceTypeR5bLarge = "r5b.large" + + // InstanceTypeR5bXlarge is a InstanceType enum value + InstanceTypeR5bXlarge = "r5b.xlarge" + + // InstanceTypeR5b2xlarge is a InstanceType enum value + InstanceTypeR5b2xlarge = "r5b.2xlarge" + + // InstanceTypeR5b4xlarge is a InstanceType enum value + InstanceTypeR5b4xlarge = "r5b.4xlarge" + + // InstanceTypeR5b8xlarge is a InstanceType enum value + InstanceTypeR5b8xlarge = "r5b.8xlarge" + + // InstanceTypeR5b12xlarge is a InstanceType enum value + InstanceTypeR5b12xlarge = "r5b.12xlarge" + + // InstanceTypeR5b16xlarge is a InstanceType enum value + InstanceTypeR5b16xlarge = "r5b.16xlarge" + + // InstanceTypeR5b24xlarge is a InstanceType enum value + InstanceTypeR5b24xlarge = "r5b.24xlarge" + + // InstanceTypeR5bMetal is a InstanceType enum value + InstanceTypeR5bMetal = "r5b.metal" + + // InstanceTypeR5dLarge is a InstanceType enum value + InstanceTypeR5dLarge = "r5d.large" + + // InstanceTypeR5dXlarge is a InstanceType enum value + InstanceTypeR5dXlarge = "r5d.xlarge" + + // InstanceTypeR5d2xlarge is a InstanceType enum value + InstanceTypeR5d2xlarge = "r5d.2xlarge" + + // InstanceTypeR5d4xlarge is a InstanceType enum value + InstanceTypeR5d4xlarge = "r5d.4xlarge" + + // InstanceTypeR5d8xlarge is a InstanceType enum value + InstanceTypeR5d8xlarge = "r5d.8xlarge" + + // InstanceTypeR5d12xlarge is a InstanceType enum value + InstanceTypeR5d12xlarge = "r5d.12xlarge" + + // InstanceTypeR5d16xlarge is a InstanceType enum value + InstanceTypeR5d16xlarge = "r5d.16xlarge" + + // InstanceTypeR5d24xlarge is a InstanceType enum value + InstanceTypeR5d24xlarge = "r5d.24xlarge" + + // InstanceTypeR5dMetal is a InstanceType enum value + InstanceTypeR5dMetal = "r5d.metal" + + // InstanceTypeR5adLarge is a InstanceType enum value + InstanceTypeR5adLarge = "r5ad.large" + + // InstanceTypeR5adXlarge is a InstanceType enum value + InstanceTypeR5adXlarge = "r5ad.xlarge" + + // InstanceTypeR5ad2xlarge is a InstanceType enum value + InstanceTypeR5ad2xlarge = "r5ad.2xlarge" + + // InstanceTypeR5ad4xlarge is a InstanceType enum value + InstanceTypeR5ad4xlarge = "r5ad.4xlarge" + + // InstanceTypeR5ad8xlarge is a InstanceType enum value + InstanceTypeR5ad8xlarge = "r5ad.8xlarge" + + // InstanceTypeR5ad12xlarge is a InstanceType enum value + InstanceTypeR5ad12xlarge = "r5ad.12xlarge" + + // InstanceTypeR5ad16xlarge is a InstanceType enum value + InstanceTypeR5ad16xlarge = "r5ad.16xlarge" + + // InstanceTypeR5ad24xlarge is a InstanceType enum value + InstanceTypeR5ad24xlarge = "r5ad.24xlarge" + + // InstanceTypeR6gMetal is a InstanceType enum value + InstanceTypeR6gMetal = "r6g.metal" + + // InstanceTypeR6gMedium is a InstanceType enum value + InstanceTypeR6gMedium = "r6g.medium" + + // InstanceTypeR6gLarge is a InstanceType enum value + InstanceTypeR6gLarge = "r6g.large" + + // InstanceTypeR6gXlarge is a InstanceType enum value + InstanceTypeR6gXlarge = "r6g.xlarge" + + // InstanceTypeR6g2xlarge is a InstanceType enum value + InstanceTypeR6g2xlarge = "r6g.2xlarge" + + // InstanceTypeR6g4xlarge is a InstanceType enum value + InstanceTypeR6g4xlarge = "r6g.4xlarge" + + // InstanceTypeR6g8xlarge is a InstanceType enum value + InstanceTypeR6g8xlarge = "r6g.8xlarge" + + // InstanceTypeR6g12xlarge is a InstanceType enum value + InstanceTypeR6g12xlarge = "r6g.12xlarge" + + // InstanceTypeR6g16xlarge is a InstanceType enum value + InstanceTypeR6g16xlarge = "r6g.16xlarge" + + // InstanceTypeR6gdMetal is a InstanceType enum value + InstanceTypeR6gdMetal = "r6gd.metal" + + // InstanceTypeR6gdMedium is a InstanceType enum value + InstanceTypeR6gdMedium = "r6gd.medium" + + // InstanceTypeR6gdLarge is a InstanceType enum value + InstanceTypeR6gdLarge = "r6gd.large" + + // InstanceTypeR6gdXlarge is a InstanceType enum value + InstanceTypeR6gdXlarge = "r6gd.xlarge" + + // InstanceTypeR6gd2xlarge is a InstanceType enum value + InstanceTypeR6gd2xlarge = "r6gd.2xlarge" + + // InstanceTypeR6gd4xlarge is a InstanceType enum value + InstanceTypeR6gd4xlarge = "r6gd.4xlarge" + + // InstanceTypeR6gd8xlarge is a InstanceType enum value + InstanceTypeR6gd8xlarge = "r6gd.8xlarge" + + // InstanceTypeR6gd12xlarge is a InstanceType enum value + InstanceTypeR6gd12xlarge = "r6gd.12xlarge" + + // InstanceTypeR6gd16xlarge is a InstanceType enum value + InstanceTypeR6gd16xlarge = "r6gd.16xlarge" + + // InstanceTypeX116xlarge is a InstanceType enum value + InstanceTypeX116xlarge = "x1.16xlarge" + + // InstanceTypeX132xlarge is a InstanceType enum value + InstanceTypeX132xlarge = "x1.32xlarge" + + // InstanceTypeX1eXlarge is a InstanceType enum value + InstanceTypeX1eXlarge = "x1e.xlarge" + + // InstanceTypeX1e2xlarge is a InstanceType enum value + InstanceTypeX1e2xlarge = "x1e.2xlarge" + + // InstanceTypeX1e4xlarge is a InstanceType enum value + InstanceTypeX1e4xlarge = "x1e.4xlarge" + + // InstanceTypeX1e8xlarge is a InstanceType enum value + InstanceTypeX1e8xlarge = "x1e.8xlarge" + + // InstanceTypeX1e16xlarge is a InstanceType enum value + InstanceTypeX1e16xlarge = "x1e.16xlarge" + + // InstanceTypeX1e32xlarge is a InstanceType enum value + InstanceTypeX1e32xlarge = "x1e.32xlarge" + + // InstanceTypeI2Xlarge is a InstanceType enum value + InstanceTypeI2Xlarge = "i2.xlarge" + + // InstanceTypeI22xlarge is a InstanceType enum value + InstanceTypeI22xlarge = "i2.2xlarge" + + // InstanceTypeI24xlarge is a InstanceType enum value + InstanceTypeI24xlarge = "i2.4xlarge" + + // InstanceTypeI28xlarge is a InstanceType enum value + InstanceTypeI28xlarge = "i2.8xlarge" + + // InstanceTypeI3Large is a InstanceType enum value + InstanceTypeI3Large = "i3.large" + + // InstanceTypeI3Xlarge is a InstanceType enum value + InstanceTypeI3Xlarge = "i3.xlarge" + + // InstanceTypeI32xlarge is a InstanceType enum value + InstanceTypeI32xlarge = "i3.2xlarge" + + // InstanceTypeI34xlarge is a InstanceType enum value + InstanceTypeI34xlarge = "i3.4xlarge" + + // InstanceTypeI38xlarge is a InstanceType enum value + InstanceTypeI38xlarge = "i3.8xlarge" + + // InstanceTypeI316xlarge is a InstanceType enum value + InstanceTypeI316xlarge = "i3.16xlarge" + + // InstanceTypeI3Metal is a InstanceType enum value + InstanceTypeI3Metal = "i3.metal" + + // InstanceTypeI3enLarge is a InstanceType enum value + InstanceTypeI3enLarge = "i3en.large" + + // InstanceTypeI3enXlarge is a InstanceType enum value + InstanceTypeI3enXlarge = "i3en.xlarge" + + // InstanceTypeI3en2xlarge is a InstanceType enum value + InstanceTypeI3en2xlarge = "i3en.2xlarge" + + // InstanceTypeI3en3xlarge is a InstanceType enum value + InstanceTypeI3en3xlarge = "i3en.3xlarge" + + // InstanceTypeI3en6xlarge is a InstanceType enum value + InstanceTypeI3en6xlarge = "i3en.6xlarge" + + // InstanceTypeI3en12xlarge is a InstanceType enum value + InstanceTypeI3en12xlarge = "i3en.12xlarge" + + // InstanceTypeI3en24xlarge is a InstanceType enum value + InstanceTypeI3en24xlarge = "i3en.24xlarge" + + // InstanceTypeI3enMetal is a InstanceType enum value + InstanceTypeI3enMetal = "i3en.metal" + + // InstanceTypeHi14xlarge is a InstanceType enum value + InstanceTypeHi14xlarge = "hi1.4xlarge" + + // InstanceTypeHs18xlarge is a InstanceType enum value + InstanceTypeHs18xlarge = "hs1.8xlarge" + + // InstanceTypeC1Medium is a InstanceType enum value + InstanceTypeC1Medium = "c1.medium" + + // InstanceTypeC1Xlarge is a InstanceType enum value + InstanceTypeC1Xlarge = "c1.xlarge" + + // InstanceTypeC3Large is a InstanceType enum value + InstanceTypeC3Large = "c3.large" + + // InstanceTypeC3Xlarge is a InstanceType enum value + InstanceTypeC3Xlarge = "c3.xlarge" + + // InstanceTypeC32xlarge is a InstanceType enum value + InstanceTypeC32xlarge = "c3.2xlarge" + + // InstanceTypeC34xlarge is a InstanceType enum value + InstanceTypeC34xlarge = "c3.4xlarge" + + // InstanceTypeC38xlarge is a InstanceType enum value + InstanceTypeC38xlarge = "c3.8xlarge" + + // InstanceTypeC4Large is a InstanceType enum value + InstanceTypeC4Large = "c4.large" + + // InstanceTypeC4Xlarge is a InstanceType enum value + InstanceTypeC4Xlarge = "c4.xlarge" + + // InstanceTypeC42xlarge is a InstanceType enum value + InstanceTypeC42xlarge = "c4.2xlarge" + + // InstanceTypeC44xlarge is a InstanceType enum value + InstanceTypeC44xlarge = "c4.4xlarge" + + // InstanceTypeC48xlarge is a InstanceType enum value + InstanceTypeC48xlarge = "c4.8xlarge" + + // InstanceTypeC5Large is a InstanceType enum value + InstanceTypeC5Large = "c5.large" + + // InstanceTypeC5Xlarge is a InstanceType enum value + InstanceTypeC5Xlarge = "c5.xlarge" + + // InstanceTypeC52xlarge is a InstanceType enum value + InstanceTypeC52xlarge = "c5.2xlarge" + + // InstanceTypeC54xlarge is a InstanceType enum value + InstanceTypeC54xlarge = "c5.4xlarge" + + // InstanceTypeC59xlarge is a InstanceType enum value + InstanceTypeC59xlarge = "c5.9xlarge" + + // InstanceTypeC512xlarge is a InstanceType enum value + InstanceTypeC512xlarge = "c5.12xlarge" + + // InstanceTypeC518xlarge is a InstanceType enum value + InstanceTypeC518xlarge = "c5.18xlarge" + + // InstanceTypeC524xlarge is a InstanceType enum value + InstanceTypeC524xlarge = "c5.24xlarge" + + // InstanceTypeC5Metal is a InstanceType enum value + InstanceTypeC5Metal = "c5.metal" + + // InstanceTypeC5aLarge is a InstanceType enum value + InstanceTypeC5aLarge = "c5a.large" + + // InstanceTypeC5aXlarge is a InstanceType enum value + InstanceTypeC5aXlarge = "c5a.xlarge" + + // InstanceTypeC5a2xlarge is a InstanceType enum value + InstanceTypeC5a2xlarge = "c5a.2xlarge" + + // InstanceTypeC5a4xlarge is a InstanceType enum value + InstanceTypeC5a4xlarge = "c5a.4xlarge" + + // InstanceTypeC5a8xlarge is a InstanceType enum value + InstanceTypeC5a8xlarge = "c5a.8xlarge" + + // InstanceTypeC5a12xlarge is a InstanceType enum value + InstanceTypeC5a12xlarge = "c5a.12xlarge" + + // InstanceTypeC5a16xlarge is a InstanceType enum value + InstanceTypeC5a16xlarge = "c5a.16xlarge" + + // InstanceTypeC5a24xlarge is a InstanceType enum value + InstanceTypeC5a24xlarge = "c5a.24xlarge" + + // InstanceTypeC5adLarge is a InstanceType enum value + InstanceTypeC5adLarge = "c5ad.large" + + // InstanceTypeC5adXlarge is a InstanceType enum value + InstanceTypeC5adXlarge = "c5ad.xlarge" + + // InstanceTypeC5ad2xlarge is a InstanceType enum value + InstanceTypeC5ad2xlarge = "c5ad.2xlarge" + + // InstanceTypeC5ad4xlarge is a InstanceType enum value + InstanceTypeC5ad4xlarge = "c5ad.4xlarge" + + // InstanceTypeC5ad8xlarge is a InstanceType enum value + InstanceTypeC5ad8xlarge = "c5ad.8xlarge" + + // InstanceTypeC5ad12xlarge is a InstanceType enum value + InstanceTypeC5ad12xlarge = "c5ad.12xlarge" + + // InstanceTypeC5ad16xlarge is a InstanceType enum value + InstanceTypeC5ad16xlarge = "c5ad.16xlarge" + + // InstanceTypeC5ad24xlarge is a InstanceType enum value + InstanceTypeC5ad24xlarge = "c5ad.24xlarge" + + // InstanceTypeC5dLarge is a InstanceType enum value + InstanceTypeC5dLarge = "c5d.large" + + // InstanceTypeC5dXlarge is a InstanceType enum value + InstanceTypeC5dXlarge = "c5d.xlarge" + + // InstanceTypeC5d2xlarge is a InstanceType enum value + InstanceTypeC5d2xlarge = "c5d.2xlarge" + + // InstanceTypeC5d4xlarge is a InstanceType enum value + InstanceTypeC5d4xlarge = "c5d.4xlarge" + + // InstanceTypeC5d9xlarge is a InstanceType enum value + InstanceTypeC5d9xlarge = "c5d.9xlarge" + + // InstanceTypeC5d12xlarge is a InstanceType enum value + InstanceTypeC5d12xlarge = "c5d.12xlarge" + + // InstanceTypeC5d18xlarge is a InstanceType enum value + InstanceTypeC5d18xlarge = "c5d.18xlarge" + + // InstanceTypeC5d24xlarge is a InstanceType enum value + InstanceTypeC5d24xlarge = "c5d.24xlarge" + + // InstanceTypeC5dMetal is a InstanceType enum value + InstanceTypeC5dMetal = "c5d.metal" + + // InstanceTypeC5nLarge is a InstanceType enum value + InstanceTypeC5nLarge = "c5n.large" + + // InstanceTypeC5nXlarge is a InstanceType enum value + InstanceTypeC5nXlarge = "c5n.xlarge" + + // InstanceTypeC5n2xlarge is a InstanceType enum value + InstanceTypeC5n2xlarge = "c5n.2xlarge" + + // InstanceTypeC5n4xlarge is a InstanceType enum value + InstanceTypeC5n4xlarge = "c5n.4xlarge" + + // InstanceTypeC5n9xlarge is a InstanceType enum value + InstanceTypeC5n9xlarge = "c5n.9xlarge" + + // InstanceTypeC5n18xlarge is a InstanceType enum value + InstanceTypeC5n18xlarge = "c5n.18xlarge" + + // InstanceTypeC6gMetal is a InstanceType enum value + InstanceTypeC6gMetal = "c6g.metal" + + // InstanceTypeC6gMedium is a InstanceType enum value + InstanceTypeC6gMedium = "c6g.medium" + + // InstanceTypeC6gLarge is a InstanceType enum value + InstanceTypeC6gLarge = "c6g.large" + + // InstanceTypeC6gXlarge is a InstanceType enum value + InstanceTypeC6gXlarge = "c6g.xlarge" + + // InstanceTypeC6g2xlarge is a InstanceType enum value + InstanceTypeC6g2xlarge = "c6g.2xlarge" + + // InstanceTypeC6g4xlarge is a InstanceType enum value + InstanceTypeC6g4xlarge = "c6g.4xlarge" + + // InstanceTypeC6g8xlarge is a InstanceType enum value + InstanceTypeC6g8xlarge = "c6g.8xlarge" + + // InstanceTypeC6g12xlarge is a InstanceType enum value + InstanceTypeC6g12xlarge = "c6g.12xlarge" + + // InstanceTypeC6g16xlarge is a InstanceType enum value + InstanceTypeC6g16xlarge = "c6g.16xlarge" + + // InstanceTypeC6gdMetal is a InstanceType enum value + InstanceTypeC6gdMetal = "c6gd.metal" + + // InstanceTypeC6gdMedium is a InstanceType enum value + InstanceTypeC6gdMedium = "c6gd.medium" + + // InstanceTypeC6gdLarge is a InstanceType enum value + InstanceTypeC6gdLarge = "c6gd.large" + + // InstanceTypeC6gdXlarge is a InstanceType enum value + InstanceTypeC6gdXlarge = "c6gd.xlarge" + + // InstanceTypeC6gd2xlarge is a InstanceType enum value + InstanceTypeC6gd2xlarge = "c6gd.2xlarge" + + // InstanceTypeC6gd4xlarge is a InstanceType enum value + InstanceTypeC6gd4xlarge = "c6gd.4xlarge" + + // InstanceTypeC6gd8xlarge is a InstanceType enum value + InstanceTypeC6gd8xlarge = "c6gd.8xlarge" + + // InstanceTypeC6gd12xlarge is a InstanceType enum value + InstanceTypeC6gd12xlarge = "c6gd.12xlarge" + + // InstanceTypeC6gd16xlarge is a InstanceType enum value + InstanceTypeC6gd16xlarge = "c6gd.16xlarge" + + // InstanceTypeCc14xlarge is a InstanceType enum value + InstanceTypeCc14xlarge = "cc1.4xlarge" + + // InstanceTypeCc28xlarge is a InstanceType enum value + InstanceTypeCc28xlarge = "cc2.8xlarge" + + // InstanceTypeG22xlarge is a InstanceType enum value + InstanceTypeG22xlarge = "g2.2xlarge" + + // InstanceTypeG28xlarge is a InstanceType enum value + InstanceTypeG28xlarge = "g2.8xlarge" + + // InstanceTypeG34xlarge is a InstanceType enum value + InstanceTypeG34xlarge = "g3.4xlarge" + + // InstanceTypeG38xlarge is a InstanceType enum value + InstanceTypeG38xlarge = "g3.8xlarge" + + // InstanceTypeG316xlarge is a InstanceType enum value + InstanceTypeG316xlarge = "g3.16xlarge" + + // InstanceTypeG3sXlarge is a InstanceType enum value + InstanceTypeG3sXlarge = "g3s.xlarge" + + // InstanceTypeG4dnXlarge is a InstanceType enum value + InstanceTypeG4dnXlarge = "g4dn.xlarge" + + // InstanceTypeG4dn2xlarge is a InstanceType enum value + InstanceTypeG4dn2xlarge = "g4dn.2xlarge" + + // InstanceTypeG4dn4xlarge is a InstanceType enum value + InstanceTypeG4dn4xlarge = "g4dn.4xlarge" + + // InstanceTypeG4dn8xlarge is a InstanceType enum value + InstanceTypeG4dn8xlarge = "g4dn.8xlarge" + + // InstanceTypeG4dn12xlarge is a InstanceType enum value + InstanceTypeG4dn12xlarge = "g4dn.12xlarge" + + // InstanceTypeG4dn16xlarge is a InstanceType enum value + InstanceTypeG4dn16xlarge = "g4dn.16xlarge" + + // InstanceTypeG4dnMetal is a InstanceType enum value + InstanceTypeG4dnMetal = "g4dn.metal" + + // InstanceTypeCg14xlarge is a InstanceType enum value + InstanceTypeCg14xlarge = "cg1.4xlarge" + + // InstanceTypeP2Xlarge is a InstanceType enum value + InstanceTypeP2Xlarge = "p2.xlarge" + + // InstanceTypeP28xlarge is a InstanceType enum value + InstanceTypeP28xlarge = "p2.8xlarge" + + // InstanceTypeP216xlarge is a InstanceType enum value + InstanceTypeP216xlarge = "p2.16xlarge" + + // InstanceTypeP32xlarge is a InstanceType enum value + InstanceTypeP32xlarge = "p3.2xlarge" + + // InstanceTypeP38xlarge is a InstanceType enum value + InstanceTypeP38xlarge = "p3.8xlarge" + + // InstanceTypeP316xlarge is a InstanceType enum value + InstanceTypeP316xlarge = "p3.16xlarge" + + // InstanceTypeP3dn24xlarge is a InstanceType enum value + InstanceTypeP3dn24xlarge = "p3dn.24xlarge" + + // InstanceTypeP4d24xlarge is a InstanceType enum value + InstanceTypeP4d24xlarge = "p4d.24xlarge" + + // InstanceTypeD2Xlarge is a InstanceType enum value + InstanceTypeD2Xlarge = "d2.xlarge" + + // InstanceTypeD22xlarge is a InstanceType enum value + InstanceTypeD22xlarge = "d2.2xlarge" + + // InstanceTypeD24xlarge is a InstanceType enum value + InstanceTypeD24xlarge = "d2.4xlarge" + + // InstanceTypeD28xlarge is a InstanceType enum value + InstanceTypeD28xlarge = "d2.8xlarge" + + // InstanceTypeD3Xlarge is a InstanceType enum value + InstanceTypeD3Xlarge = "d3.xlarge" + + // InstanceTypeD32xlarge is a InstanceType enum value + InstanceTypeD32xlarge = "d3.2xlarge" + + // InstanceTypeD34xlarge is a InstanceType enum value + InstanceTypeD34xlarge = "d3.4xlarge" + + // InstanceTypeD38xlarge is a InstanceType enum value + InstanceTypeD38xlarge = "d3.8xlarge" + + // InstanceTypeD3enXlarge is a InstanceType enum value + InstanceTypeD3enXlarge = "d3en.xlarge" + + // InstanceTypeD3en2xlarge is a InstanceType enum value + InstanceTypeD3en2xlarge = "d3en.2xlarge" + + // InstanceTypeD3en4xlarge is a InstanceType enum value + InstanceTypeD3en4xlarge = "d3en.4xlarge" + + // InstanceTypeD3en6xlarge is a InstanceType enum value + InstanceTypeD3en6xlarge = "d3en.6xlarge" + + // InstanceTypeD3en8xlarge is a InstanceType enum value + InstanceTypeD3en8xlarge = "d3en.8xlarge" + + // InstanceTypeD3en12xlarge is a InstanceType enum value + InstanceTypeD3en12xlarge = "d3en.12xlarge" + + // InstanceTypeF12xlarge is a InstanceType enum value + InstanceTypeF12xlarge = "f1.2xlarge" + + // InstanceTypeF14xlarge is a InstanceType enum value + InstanceTypeF14xlarge = "f1.4xlarge" + + // InstanceTypeF116xlarge is a InstanceType enum value + InstanceTypeF116xlarge = "f1.16xlarge" + + // InstanceTypeM5Large is a InstanceType enum value + InstanceTypeM5Large = "m5.large" + + // InstanceTypeM5Xlarge is a InstanceType enum value + InstanceTypeM5Xlarge = "m5.xlarge" + + // InstanceTypeM52xlarge is a InstanceType enum value + InstanceTypeM52xlarge = "m5.2xlarge" + + // InstanceTypeM54xlarge is a InstanceType enum value + InstanceTypeM54xlarge = "m5.4xlarge" + + // InstanceTypeM58xlarge is a InstanceType enum value + InstanceTypeM58xlarge = "m5.8xlarge" + + // InstanceTypeM512xlarge is a InstanceType enum value + InstanceTypeM512xlarge = "m5.12xlarge" + + // InstanceTypeM516xlarge is a InstanceType enum value + InstanceTypeM516xlarge = "m5.16xlarge" + + // InstanceTypeM524xlarge is a InstanceType enum value + InstanceTypeM524xlarge = "m5.24xlarge" + + // InstanceTypeM5Metal is a InstanceType enum value + InstanceTypeM5Metal = "m5.metal" + + // InstanceTypeM5aLarge is a InstanceType enum value + InstanceTypeM5aLarge = "m5a.large" + + // InstanceTypeM5aXlarge is a InstanceType enum value + InstanceTypeM5aXlarge = "m5a.xlarge" + + // InstanceTypeM5a2xlarge is a InstanceType enum value + InstanceTypeM5a2xlarge = "m5a.2xlarge" + + // InstanceTypeM5a4xlarge is a InstanceType enum value + InstanceTypeM5a4xlarge = "m5a.4xlarge" + + // InstanceTypeM5a8xlarge is a InstanceType enum value + InstanceTypeM5a8xlarge = "m5a.8xlarge" + + // InstanceTypeM5a12xlarge is a InstanceType enum value + InstanceTypeM5a12xlarge = "m5a.12xlarge" + + // InstanceTypeM5a16xlarge is a InstanceType enum value + InstanceTypeM5a16xlarge = "m5a.16xlarge" + + // InstanceTypeM5a24xlarge is a InstanceType enum value + InstanceTypeM5a24xlarge = "m5a.24xlarge" + + // InstanceTypeM5dLarge is a InstanceType enum value + InstanceTypeM5dLarge = "m5d.large" + + // InstanceTypeM5dXlarge is a InstanceType enum value + InstanceTypeM5dXlarge = "m5d.xlarge" + + // InstanceTypeM5d2xlarge is a InstanceType enum value + InstanceTypeM5d2xlarge = "m5d.2xlarge" + + // InstanceTypeM5d4xlarge is a InstanceType enum value + InstanceTypeM5d4xlarge = "m5d.4xlarge" + + // InstanceTypeM5d8xlarge is a InstanceType enum value + InstanceTypeM5d8xlarge = "m5d.8xlarge" + + // InstanceTypeM5d12xlarge is a InstanceType enum value + InstanceTypeM5d12xlarge = "m5d.12xlarge" + + // InstanceTypeM5d16xlarge is a InstanceType enum value + InstanceTypeM5d16xlarge = "m5d.16xlarge" + + // InstanceTypeM5d24xlarge is a InstanceType enum value + InstanceTypeM5d24xlarge = "m5d.24xlarge" + + // InstanceTypeM5dMetal is a InstanceType enum value + InstanceTypeM5dMetal = "m5d.metal" + + // InstanceTypeM5adLarge is a InstanceType enum value + InstanceTypeM5adLarge = "m5ad.large" + + // InstanceTypeM5adXlarge is a InstanceType enum value + InstanceTypeM5adXlarge = "m5ad.xlarge" + + // InstanceTypeM5ad2xlarge is a InstanceType enum value + InstanceTypeM5ad2xlarge = "m5ad.2xlarge" + + // InstanceTypeM5ad4xlarge is a InstanceType enum value + InstanceTypeM5ad4xlarge = "m5ad.4xlarge" + + // InstanceTypeM5ad8xlarge is a InstanceType enum value + InstanceTypeM5ad8xlarge = "m5ad.8xlarge" + + // InstanceTypeM5ad12xlarge is a InstanceType enum value + InstanceTypeM5ad12xlarge = "m5ad.12xlarge" + + // InstanceTypeM5ad16xlarge is a InstanceType enum value + InstanceTypeM5ad16xlarge = "m5ad.16xlarge" + + // InstanceTypeM5ad24xlarge is a InstanceType enum value + InstanceTypeM5ad24xlarge = "m5ad.24xlarge" + + // InstanceTypeM5znLarge is a InstanceType enum value + InstanceTypeM5znLarge = "m5zn.large" + + // InstanceTypeM5znXlarge is a InstanceType enum value + InstanceTypeM5znXlarge = "m5zn.xlarge" + + // InstanceTypeM5zn2xlarge is a InstanceType enum value + InstanceTypeM5zn2xlarge = "m5zn.2xlarge" + + // InstanceTypeM5zn3xlarge is a InstanceType enum value + InstanceTypeM5zn3xlarge = "m5zn.3xlarge" + + // InstanceTypeM5zn6xlarge is a InstanceType enum value + InstanceTypeM5zn6xlarge = "m5zn.6xlarge" + + // InstanceTypeM5zn12xlarge is a InstanceType enum value + InstanceTypeM5zn12xlarge = "m5zn.12xlarge" + + // InstanceTypeM5znMetal is a InstanceType enum value + InstanceTypeM5znMetal = "m5zn.metal" + + // InstanceTypeH12xlarge is a InstanceType enum value + InstanceTypeH12xlarge = "h1.2xlarge" + + // InstanceTypeH14xlarge is a InstanceType enum value + InstanceTypeH14xlarge = "h1.4xlarge" + + // InstanceTypeH18xlarge is a InstanceType enum value + InstanceTypeH18xlarge = "h1.8xlarge" + + // InstanceTypeH116xlarge is a InstanceType enum value + InstanceTypeH116xlarge = "h1.16xlarge" + + // InstanceTypeZ1dLarge is a InstanceType enum value + InstanceTypeZ1dLarge = "z1d.large" + + // InstanceTypeZ1dXlarge is a InstanceType enum value + InstanceTypeZ1dXlarge = "z1d.xlarge" + + // InstanceTypeZ1d2xlarge is a InstanceType enum value + InstanceTypeZ1d2xlarge = "z1d.2xlarge" + + // InstanceTypeZ1d3xlarge is a InstanceType enum value + InstanceTypeZ1d3xlarge = "z1d.3xlarge" + + // InstanceTypeZ1d6xlarge is a InstanceType enum value + InstanceTypeZ1d6xlarge = "z1d.6xlarge" + + // InstanceTypeZ1d12xlarge is a InstanceType enum value + InstanceTypeZ1d12xlarge = "z1d.12xlarge" + + // InstanceTypeZ1dMetal is a InstanceType enum value + InstanceTypeZ1dMetal = "z1d.metal" + + // InstanceTypeU6tb1Metal is a InstanceType enum value + InstanceTypeU6tb1Metal = "u-6tb1.metal" + + // InstanceTypeU9tb1Metal is a InstanceType enum value + InstanceTypeU9tb1Metal = "u-9tb1.metal" + + // InstanceTypeU12tb1Metal is a InstanceType enum value + InstanceTypeU12tb1Metal = "u-12tb1.metal" + + // InstanceTypeU18tb1Metal is a InstanceType enum value + InstanceTypeU18tb1Metal = "u-18tb1.metal" + + // InstanceTypeU24tb1Metal is a InstanceType enum value + InstanceTypeU24tb1Metal = "u-24tb1.metal" + + // InstanceTypeA1Medium is a InstanceType enum value + InstanceTypeA1Medium = "a1.medium" + + // InstanceTypeA1Large is a InstanceType enum value + InstanceTypeA1Large = "a1.large" + + // InstanceTypeA1Xlarge is a InstanceType enum value + InstanceTypeA1Xlarge = "a1.xlarge" + + // InstanceTypeA12xlarge is a InstanceType enum value + InstanceTypeA12xlarge = "a1.2xlarge" + + // InstanceTypeA14xlarge is a InstanceType enum value + InstanceTypeA14xlarge = "a1.4xlarge" + + // InstanceTypeA1Metal is a InstanceType enum value + InstanceTypeA1Metal = "a1.metal" + + // InstanceTypeM5dnLarge is a InstanceType enum value + InstanceTypeM5dnLarge = "m5dn.large" + + // InstanceTypeM5dnXlarge is a InstanceType enum value + InstanceTypeM5dnXlarge = "m5dn.xlarge" + + // InstanceTypeM5dn2xlarge is a InstanceType enum value + InstanceTypeM5dn2xlarge = "m5dn.2xlarge" + + // InstanceTypeM5dn4xlarge is a InstanceType enum value + InstanceTypeM5dn4xlarge = "m5dn.4xlarge" + + // InstanceTypeM5dn8xlarge is a InstanceType enum value + InstanceTypeM5dn8xlarge = "m5dn.8xlarge" + + // InstanceTypeM5dn12xlarge is a InstanceType enum value + InstanceTypeM5dn12xlarge = "m5dn.12xlarge" + + // InstanceTypeM5dn16xlarge is a InstanceType enum value + InstanceTypeM5dn16xlarge = "m5dn.16xlarge" + + // InstanceTypeM5dn24xlarge is a InstanceType enum value + InstanceTypeM5dn24xlarge = "m5dn.24xlarge" + + // InstanceTypeM5nLarge is a InstanceType enum value + InstanceTypeM5nLarge = "m5n.large" + + // InstanceTypeM5nXlarge is a InstanceType enum value + InstanceTypeM5nXlarge = "m5n.xlarge" + + // InstanceTypeM5n2xlarge is a InstanceType enum value + InstanceTypeM5n2xlarge = "m5n.2xlarge" + + // InstanceTypeM5n4xlarge is a InstanceType enum value + InstanceTypeM5n4xlarge = "m5n.4xlarge" + + // InstanceTypeM5n8xlarge is a InstanceType enum value + InstanceTypeM5n8xlarge = "m5n.8xlarge" + + // InstanceTypeM5n12xlarge is a InstanceType enum value + InstanceTypeM5n12xlarge = "m5n.12xlarge" + + // InstanceTypeM5n16xlarge is a InstanceType enum value + InstanceTypeM5n16xlarge = "m5n.16xlarge" + + // InstanceTypeM5n24xlarge is a InstanceType enum value + InstanceTypeM5n24xlarge = "m5n.24xlarge" + + // InstanceTypeR5dnLarge is a InstanceType enum value + InstanceTypeR5dnLarge = "r5dn.large" + + // InstanceTypeR5dnXlarge is a InstanceType enum value + InstanceTypeR5dnXlarge = "r5dn.xlarge" + + // InstanceTypeR5dn2xlarge is a InstanceType enum value + InstanceTypeR5dn2xlarge = "r5dn.2xlarge" + + // InstanceTypeR5dn4xlarge is a InstanceType enum value + InstanceTypeR5dn4xlarge = "r5dn.4xlarge" + + // InstanceTypeR5dn8xlarge is a InstanceType enum value + InstanceTypeR5dn8xlarge = "r5dn.8xlarge" + + // InstanceTypeR5dn12xlarge is a InstanceType enum value + InstanceTypeR5dn12xlarge = "r5dn.12xlarge" + + // InstanceTypeR5dn16xlarge is a InstanceType enum value + InstanceTypeR5dn16xlarge = "r5dn.16xlarge" + + // InstanceTypeR5dn24xlarge is a InstanceType enum value + InstanceTypeR5dn24xlarge = "r5dn.24xlarge" + + // InstanceTypeR5nLarge is a InstanceType enum value + InstanceTypeR5nLarge = "r5n.large" + + // InstanceTypeR5nXlarge is a InstanceType enum value + InstanceTypeR5nXlarge = "r5n.xlarge" + + // InstanceTypeR5n2xlarge is a InstanceType enum value + InstanceTypeR5n2xlarge = "r5n.2xlarge" + + // InstanceTypeR5n4xlarge is a InstanceType enum value + InstanceTypeR5n4xlarge = "r5n.4xlarge" + + // InstanceTypeR5n8xlarge is a InstanceType enum value + InstanceTypeR5n8xlarge = "r5n.8xlarge" + + // InstanceTypeR5n12xlarge is a InstanceType enum value + InstanceTypeR5n12xlarge = "r5n.12xlarge" + + // InstanceTypeR5n16xlarge is a InstanceType enum value + InstanceTypeR5n16xlarge = "r5n.16xlarge" + + // InstanceTypeR5n24xlarge is a InstanceType enum value + InstanceTypeR5n24xlarge = "r5n.24xlarge" + + // InstanceTypeInf1Xlarge is a InstanceType enum value + InstanceTypeInf1Xlarge = "inf1.xlarge" + + // InstanceTypeInf12xlarge is a InstanceType enum value + InstanceTypeInf12xlarge = "inf1.2xlarge" + + // InstanceTypeInf16xlarge is a InstanceType enum value + InstanceTypeInf16xlarge = "inf1.6xlarge" + + // InstanceTypeInf124xlarge is a InstanceType enum value + InstanceTypeInf124xlarge = "inf1.24xlarge" + + // InstanceTypeM6gMetal is a InstanceType enum value + InstanceTypeM6gMetal = "m6g.metal" + + // InstanceTypeM6gMedium is a InstanceType enum value + InstanceTypeM6gMedium = "m6g.medium" + + // InstanceTypeM6gLarge is a InstanceType enum value + InstanceTypeM6gLarge = "m6g.large" + + // InstanceTypeM6gXlarge is a InstanceType enum value + InstanceTypeM6gXlarge = "m6g.xlarge" + + // InstanceTypeM6g2xlarge is a InstanceType enum value + InstanceTypeM6g2xlarge = "m6g.2xlarge" + + // InstanceTypeM6g4xlarge is a InstanceType enum value + InstanceTypeM6g4xlarge = "m6g.4xlarge" + + // InstanceTypeM6g8xlarge is a InstanceType enum value + InstanceTypeM6g8xlarge = "m6g.8xlarge" + + // InstanceTypeM6g12xlarge is a InstanceType enum value + InstanceTypeM6g12xlarge = "m6g.12xlarge" + + // InstanceTypeM6g16xlarge is a InstanceType enum value + InstanceTypeM6g16xlarge = "m6g.16xlarge" + + // InstanceTypeM6gdMetal is a InstanceType enum value + InstanceTypeM6gdMetal = "m6gd.metal" + + // InstanceTypeM6gdMedium is a InstanceType enum value + InstanceTypeM6gdMedium = "m6gd.medium" + + // InstanceTypeM6gdLarge is a InstanceType enum value + InstanceTypeM6gdLarge = "m6gd.large" + + // InstanceTypeM6gdXlarge is a InstanceType enum value + InstanceTypeM6gdXlarge = "m6gd.xlarge" + + // InstanceTypeM6gd2xlarge is a InstanceType enum value + InstanceTypeM6gd2xlarge = "m6gd.2xlarge" + + // InstanceTypeM6gd4xlarge is a InstanceType enum value + InstanceTypeM6gd4xlarge = "m6gd.4xlarge" + + // InstanceTypeM6gd8xlarge is a InstanceType enum value + InstanceTypeM6gd8xlarge = "m6gd.8xlarge" + + // InstanceTypeM6gd12xlarge is a InstanceType enum value + InstanceTypeM6gd12xlarge = "m6gd.12xlarge" + + // InstanceTypeM6gd16xlarge is a InstanceType enum value + InstanceTypeM6gd16xlarge = "m6gd.16xlarge" + + // InstanceTypeMac1Metal is a InstanceType enum value + InstanceTypeMac1Metal = "mac1.metal" +) + +// InstanceType_Values returns all elements of the InstanceType enum +func InstanceType_Values() []string { + return []string{ + InstanceTypeT1Micro, + InstanceTypeT2Nano, + InstanceTypeT2Micro, + InstanceTypeT2Small, + InstanceTypeT2Medium, + InstanceTypeT2Large, + InstanceTypeT2Xlarge, + InstanceTypeT22xlarge, + InstanceTypeT3Nano, + InstanceTypeT3Micro, + InstanceTypeT3Small, + InstanceTypeT3Medium, + InstanceTypeT3Large, + InstanceTypeT3Xlarge, + InstanceTypeT32xlarge, + InstanceTypeT3aNano, + InstanceTypeT3aMicro, + InstanceTypeT3aSmall, + InstanceTypeT3aMedium, + InstanceTypeT3aLarge, + InstanceTypeT3aXlarge, + InstanceTypeT3a2xlarge, + InstanceTypeT4gNano, + InstanceTypeT4gMicro, + InstanceTypeT4gSmall, + InstanceTypeT4gMedium, + InstanceTypeT4gLarge, + InstanceTypeT4gXlarge, + InstanceTypeT4g2xlarge, + InstanceTypeM1Small, + InstanceTypeM1Medium, + InstanceTypeM1Large, + InstanceTypeM1Xlarge, + InstanceTypeM3Medium, + InstanceTypeM3Large, + InstanceTypeM3Xlarge, + InstanceTypeM32xlarge, + InstanceTypeM4Large, + InstanceTypeM4Xlarge, + InstanceTypeM42xlarge, + InstanceTypeM44xlarge, + InstanceTypeM410xlarge, + InstanceTypeM416xlarge, + InstanceTypeM2Xlarge, + InstanceTypeM22xlarge, + InstanceTypeM24xlarge, + InstanceTypeCr18xlarge, + InstanceTypeR3Large, + InstanceTypeR3Xlarge, + InstanceTypeR32xlarge, + InstanceTypeR34xlarge, + InstanceTypeR38xlarge, + InstanceTypeR4Large, + InstanceTypeR4Xlarge, + InstanceTypeR42xlarge, + InstanceTypeR44xlarge, + InstanceTypeR48xlarge, + InstanceTypeR416xlarge, + InstanceTypeR5Large, + InstanceTypeR5Xlarge, + InstanceTypeR52xlarge, + InstanceTypeR54xlarge, + InstanceTypeR58xlarge, + InstanceTypeR512xlarge, + InstanceTypeR516xlarge, + InstanceTypeR524xlarge, + InstanceTypeR5Metal, + InstanceTypeR5aLarge, + InstanceTypeR5aXlarge, + InstanceTypeR5a2xlarge, + InstanceTypeR5a4xlarge, + InstanceTypeR5a8xlarge, + InstanceTypeR5a12xlarge, + InstanceTypeR5a16xlarge, + InstanceTypeR5a24xlarge, + InstanceTypeR5bLarge, + InstanceTypeR5bXlarge, + InstanceTypeR5b2xlarge, + InstanceTypeR5b4xlarge, + InstanceTypeR5b8xlarge, + InstanceTypeR5b12xlarge, + InstanceTypeR5b16xlarge, + InstanceTypeR5b24xlarge, + InstanceTypeR5bMetal, + InstanceTypeR5dLarge, + InstanceTypeR5dXlarge, + InstanceTypeR5d2xlarge, + InstanceTypeR5d4xlarge, + InstanceTypeR5d8xlarge, + InstanceTypeR5d12xlarge, + InstanceTypeR5d16xlarge, + InstanceTypeR5d24xlarge, + InstanceTypeR5dMetal, + InstanceTypeR5adLarge, + InstanceTypeR5adXlarge, + InstanceTypeR5ad2xlarge, + InstanceTypeR5ad4xlarge, + InstanceTypeR5ad8xlarge, + InstanceTypeR5ad12xlarge, + InstanceTypeR5ad16xlarge, + InstanceTypeR5ad24xlarge, + InstanceTypeR6gMetal, + InstanceTypeR6gMedium, + InstanceTypeR6gLarge, + InstanceTypeR6gXlarge, + InstanceTypeR6g2xlarge, + InstanceTypeR6g4xlarge, + InstanceTypeR6g8xlarge, + InstanceTypeR6g12xlarge, + InstanceTypeR6g16xlarge, + InstanceTypeR6gdMetal, + InstanceTypeR6gdMedium, + InstanceTypeR6gdLarge, + InstanceTypeR6gdXlarge, + InstanceTypeR6gd2xlarge, + InstanceTypeR6gd4xlarge, + InstanceTypeR6gd8xlarge, + InstanceTypeR6gd12xlarge, + InstanceTypeR6gd16xlarge, + InstanceTypeX116xlarge, + InstanceTypeX132xlarge, + InstanceTypeX1eXlarge, + InstanceTypeX1e2xlarge, + InstanceTypeX1e4xlarge, + InstanceTypeX1e8xlarge, + InstanceTypeX1e16xlarge, + InstanceTypeX1e32xlarge, + InstanceTypeI2Xlarge, + InstanceTypeI22xlarge, + InstanceTypeI24xlarge, + InstanceTypeI28xlarge, + InstanceTypeI3Large, + InstanceTypeI3Xlarge, + InstanceTypeI32xlarge, + InstanceTypeI34xlarge, + InstanceTypeI38xlarge, + InstanceTypeI316xlarge, + InstanceTypeI3Metal, + InstanceTypeI3enLarge, + InstanceTypeI3enXlarge, + InstanceTypeI3en2xlarge, + InstanceTypeI3en3xlarge, + InstanceTypeI3en6xlarge, + InstanceTypeI3en12xlarge, + InstanceTypeI3en24xlarge, + InstanceTypeI3enMetal, + InstanceTypeHi14xlarge, + InstanceTypeHs18xlarge, + InstanceTypeC1Medium, + InstanceTypeC1Xlarge, + InstanceTypeC3Large, + InstanceTypeC3Xlarge, + InstanceTypeC32xlarge, + InstanceTypeC34xlarge, + InstanceTypeC38xlarge, + InstanceTypeC4Large, + InstanceTypeC4Xlarge, + InstanceTypeC42xlarge, + InstanceTypeC44xlarge, + InstanceTypeC48xlarge, + InstanceTypeC5Large, + InstanceTypeC5Xlarge, + InstanceTypeC52xlarge, + InstanceTypeC54xlarge, + InstanceTypeC59xlarge, + InstanceTypeC512xlarge, + InstanceTypeC518xlarge, + InstanceTypeC524xlarge, + InstanceTypeC5Metal, + InstanceTypeC5aLarge, + InstanceTypeC5aXlarge, + InstanceTypeC5a2xlarge, + InstanceTypeC5a4xlarge, + InstanceTypeC5a8xlarge, + InstanceTypeC5a12xlarge, + InstanceTypeC5a16xlarge, + InstanceTypeC5a24xlarge, + InstanceTypeC5adLarge, + InstanceTypeC5adXlarge, + InstanceTypeC5ad2xlarge, + InstanceTypeC5ad4xlarge, + InstanceTypeC5ad8xlarge, + InstanceTypeC5ad12xlarge, + InstanceTypeC5ad16xlarge, + InstanceTypeC5ad24xlarge, + InstanceTypeC5dLarge, + InstanceTypeC5dXlarge, + InstanceTypeC5d2xlarge, + InstanceTypeC5d4xlarge, + InstanceTypeC5d9xlarge, + InstanceTypeC5d12xlarge, + InstanceTypeC5d18xlarge, + InstanceTypeC5d24xlarge, + InstanceTypeC5dMetal, + InstanceTypeC5nLarge, + InstanceTypeC5nXlarge, + InstanceTypeC5n2xlarge, + InstanceTypeC5n4xlarge, + InstanceTypeC5n9xlarge, + InstanceTypeC5n18xlarge, + InstanceTypeC6gMetal, + InstanceTypeC6gMedium, + InstanceTypeC6gLarge, + InstanceTypeC6gXlarge, + InstanceTypeC6g2xlarge, + InstanceTypeC6g4xlarge, + InstanceTypeC6g8xlarge, + InstanceTypeC6g12xlarge, + InstanceTypeC6g16xlarge, + InstanceTypeC6gdMetal, + InstanceTypeC6gdMedium, + InstanceTypeC6gdLarge, + InstanceTypeC6gdXlarge, + InstanceTypeC6gd2xlarge, + InstanceTypeC6gd4xlarge, + InstanceTypeC6gd8xlarge, + InstanceTypeC6gd12xlarge, + InstanceTypeC6gd16xlarge, + InstanceTypeCc14xlarge, + InstanceTypeCc28xlarge, + InstanceTypeG22xlarge, + InstanceTypeG28xlarge, + InstanceTypeG34xlarge, + InstanceTypeG38xlarge, + InstanceTypeG316xlarge, + InstanceTypeG3sXlarge, + InstanceTypeG4dnXlarge, + InstanceTypeG4dn2xlarge, + InstanceTypeG4dn4xlarge, + InstanceTypeG4dn8xlarge, + InstanceTypeG4dn12xlarge, + InstanceTypeG4dn16xlarge, + InstanceTypeG4dnMetal, + InstanceTypeCg14xlarge, + InstanceTypeP2Xlarge, + InstanceTypeP28xlarge, + InstanceTypeP216xlarge, + InstanceTypeP32xlarge, + InstanceTypeP38xlarge, + InstanceTypeP316xlarge, + InstanceTypeP3dn24xlarge, + InstanceTypeP4d24xlarge, + InstanceTypeD2Xlarge, + InstanceTypeD22xlarge, + InstanceTypeD24xlarge, + InstanceTypeD28xlarge, + InstanceTypeD3Xlarge, + InstanceTypeD32xlarge, + InstanceTypeD34xlarge, + InstanceTypeD38xlarge, + InstanceTypeD3enXlarge, + InstanceTypeD3en2xlarge, + InstanceTypeD3en4xlarge, + InstanceTypeD3en6xlarge, + InstanceTypeD3en8xlarge, + InstanceTypeD3en12xlarge, + InstanceTypeF12xlarge, + InstanceTypeF14xlarge, + InstanceTypeF116xlarge, + InstanceTypeM5Large, + InstanceTypeM5Xlarge, + InstanceTypeM52xlarge, + InstanceTypeM54xlarge, + InstanceTypeM58xlarge, + InstanceTypeM512xlarge, + InstanceTypeM516xlarge, + InstanceTypeM524xlarge, + InstanceTypeM5Metal, + InstanceTypeM5aLarge, + InstanceTypeM5aXlarge, + InstanceTypeM5a2xlarge, + InstanceTypeM5a4xlarge, + InstanceTypeM5a8xlarge, + InstanceTypeM5a12xlarge, + InstanceTypeM5a16xlarge, + InstanceTypeM5a24xlarge, + InstanceTypeM5dLarge, + InstanceTypeM5dXlarge, + InstanceTypeM5d2xlarge, + InstanceTypeM5d4xlarge, + InstanceTypeM5d8xlarge, + InstanceTypeM5d12xlarge, + InstanceTypeM5d16xlarge, + InstanceTypeM5d24xlarge, + InstanceTypeM5dMetal, + InstanceTypeM5adLarge, + InstanceTypeM5adXlarge, + InstanceTypeM5ad2xlarge, + InstanceTypeM5ad4xlarge, + InstanceTypeM5ad8xlarge, + InstanceTypeM5ad12xlarge, + InstanceTypeM5ad16xlarge, + InstanceTypeM5ad24xlarge, + InstanceTypeM5znLarge, + InstanceTypeM5znXlarge, + InstanceTypeM5zn2xlarge, + InstanceTypeM5zn3xlarge, + InstanceTypeM5zn6xlarge, + InstanceTypeM5zn12xlarge, + InstanceTypeM5znMetal, + InstanceTypeH12xlarge, + InstanceTypeH14xlarge, + InstanceTypeH18xlarge, + InstanceTypeH116xlarge, + InstanceTypeZ1dLarge, + InstanceTypeZ1dXlarge, + InstanceTypeZ1d2xlarge, + InstanceTypeZ1d3xlarge, + InstanceTypeZ1d6xlarge, + InstanceTypeZ1d12xlarge, + InstanceTypeZ1dMetal, + InstanceTypeU6tb1Metal, + InstanceTypeU9tb1Metal, + InstanceTypeU12tb1Metal, + InstanceTypeU18tb1Metal, + InstanceTypeU24tb1Metal, + InstanceTypeA1Medium, + InstanceTypeA1Large, + InstanceTypeA1Xlarge, + InstanceTypeA12xlarge, + InstanceTypeA14xlarge, + InstanceTypeA1Metal, + InstanceTypeM5dnLarge, + InstanceTypeM5dnXlarge, + InstanceTypeM5dn2xlarge, + InstanceTypeM5dn4xlarge, + InstanceTypeM5dn8xlarge, + InstanceTypeM5dn12xlarge, + InstanceTypeM5dn16xlarge, + InstanceTypeM5dn24xlarge, + InstanceTypeM5nLarge, + InstanceTypeM5nXlarge, + InstanceTypeM5n2xlarge, + InstanceTypeM5n4xlarge, + InstanceTypeM5n8xlarge, + InstanceTypeM5n12xlarge, + InstanceTypeM5n16xlarge, + InstanceTypeM5n24xlarge, + InstanceTypeR5dnLarge, + InstanceTypeR5dnXlarge, + InstanceTypeR5dn2xlarge, + InstanceTypeR5dn4xlarge, + InstanceTypeR5dn8xlarge, + InstanceTypeR5dn12xlarge, + InstanceTypeR5dn16xlarge, + InstanceTypeR5dn24xlarge, + InstanceTypeR5nLarge, + InstanceTypeR5nXlarge, + InstanceTypeR5n2xlarge, + InstanceTypeR5n4xlarge, + InstanceTypeR5n8xlarge, + InstanceTypeR5n12xlarge, + InstanceTypeR5n16xlarge, + InstanceTypeR5n24xlarge, + InstanceTypeInf1Xlarge, + InstanceTypeInf12xlarge, + InstanceTypeInf16xlarge, + InstanceTypeInf124xlarge, + InstanceTypeM6gMetal, + InstanceTypeM6gMedium, + InstanceTypeM6gLarge, + InstanceTypeM6gXlarge, + InstanceTypeM6g2xlarge, + InstanceTypeM6g4xlarge, + InstanceTypeM6g8xlarge, + InstanceTypeM6g12xlarge, + InstanceTypeM6g16xlarge, + InstanceTypeM6gdMetal, + InstanceTypeM6gdMedium, + InstanceTypeM6gdLarge, + InstanceTypeM6gdXlarge, + InstanceTypeM6gd2xlarge, + InstanceTypeM6gd4xlarge, + InstanceTypeM6gd8xlarge, + InstanceTypeM6gd12xlarge, + InstanceTypeM6gd16xlarge, + InstanceTypeMac1Metal, + } +} + +const ( + // InstanceTypeHypervisorNitro is a InstanceTypeHypervisor enum value + InstanceTypeHypervisorNitro = "nitro" + + // InstanceTypeHypervisorXen is a InstanceTypeHypervisor enum value + InstanceTypeHypervisorXen = "xen" +) + +// InstanceTypeHypervisor_Values returns all elements of the InstanceTypeHypervisor enum +func InstanceTypeHypervisor_Values() []string { + return []string{ + InstanceTypeHypervisorNitro, + InstanceTypeHypervisorXen, + } +} + +const ( + // InterfacePermissionTypeInstanceAttach is a InterfacePermissionType enum value + InterfacePermissionTypeInstanceAttach = "INSTANCE-ATTACH" + + // InterfacePermissionTypeEipAssociate is a InterfacePermissionType enum value + InterfacePermissionTypeEipAssociate = "EIP-ASSOCIATE" +) + +// InterfacePermissionType_Values returns all elements of the InterfacePermissionType enum +func InterfacePermissionType_Values() []string { + return []string{ + InterfacePermissionTypeInstanceAttach, + InterfacePermissionTypeEipAssociate, + } +} + +const ( + // Ipv6SupportValueEnable is a Ipv6SupportValue enum value + Ipv6SupportValueEnable = "enable" + + // Ipv6SupportValueDisable is a Ipv6SupportValue enum value + Ipv6SupportValueDisable = "disable" +) + +// Ipv6SupportValue_Values returns all elements of the Ipv6SupportValue enum +func Ipv6SupportValue_Values() []string { + return []string{ + Ipv6SupportValueEnable, + Ipv6SupportValueDisable, + } +} + +const ( + // LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist is a LaunchTemplateErrorCode enum value + LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist = "launchTemplateIdDoesNotExist" + + // LaunchTemplateErrorCodeLaunchTemplateIdMalformed is a LaunchTemplateErrorCode enum value + LaunchTemplateErrorCodeLaunchTemplateIdMalformed = "launchTemplateIdMalformed" + + // LaunchTemplateErrorCodeLaunchTemplateNameDoesNotExist is a LaunchTemplateErrorCode enum value + LaunchTemplateErrorCodeLaunchTemplateNameDoesNotExist = "launchTemplateNameDoesNotExist" + + // LaunchTemplateErrorCodeLaunchTemplateNameMalformed is a LaunchTemplateErrorCode enum value + LaunchTemplateErrorCodeLaunchTemplateNameMalformed = "launchTemplateNameMalformed" + + // LaunchTemplateErrorCodeLaunchTemplateVersionDoesNotExist is a LaunchTemplateErrorCode enum value + LaunchTemplateErrorCodeLaunchTemplateVersionDoesNotExist = "launchTemplateVersionDoesNotExist" + + // LaunchTemplateErrorCodeUnexpectedError is a LaunchTemplateErrorCode enum value + LaunchTemplateErrorCodeUnexpectedError = "unexpectedError" +) + +// LaunchTemplateErrorCode_Values returns all elements of the LaunchTemplateErrorCode enum +func LaunchTemplateErrorCode_Values() []string { + return []string{ + LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist, + LaunchTemplateErrorCodeLaunchTemplateIdMalformed, + LaunchTemplateErrorCodeLaunchTemplateNameDoesNotExist, + LaunchTemplateErrorCodeLaunchTemplateNameMalformed, + LaunchTemplateErrorCodeLaunchTemplateVersionDoesNotExist, + LaunchTemplateErrorCodeUnexpectedError, + } +} + +const ( + // LaunchTemplateHttpTokensStateOptional is a LaunchTemplateHttpTokensState enum value + LaunchTemplateHttpTokensStateOptional = "optional" + + // LaunchTemplateHttpTokensStateRequired is a LaunchTemplateHttpTokensState enum value + LaunchTemplateHttpTokensStateRequired = "required" +) + +// LaunchTemplateHttpTokensState_Values returns all elements of the LaunchTemplateHttpTokensState enum +func LaunchTemplateHttpTokensState_Values() []string { + return []string{ + LaunchTemplateHttpTokensStateOptional, + LaunchTemplateHttpTokensStateRequired, + } +} + +const ( + // LaunchTemplateInstanceMetadataEndpointStateDisabled is a LaunchTemplateInstanceMetadataEndpointState enum value + LaunchTemplateInstanceMetadataEndpointStateDisabled = "disabled" + + // LaunchTemplateInstanceMetadataEndpointStateEnabled is a LaunchTemplateInstanceMetadataEndpointState enum value + LaunchTemplateInstanceMetadataEndpointStateEnabled = "enabled" +) + +// LaunchTemplateInstanceMetadataEndpointState_Values returns all elements of the LaunchTemplateInstanceMetadataEndpointState enum +func LaunchTemplateInstanceMetadataEndpointState_Values() []string { + return []string{ + LaunchTemplateInstanceMetadataEndpointStateDisabled, + LaunchTemplateInstanceMetadataEndpointStateEnabled, + } +} + +const ( + // LaunchTemplateInstanceMetadataOptionsStatePending is a LaunchTemplateInstanceMetadataOptionsState enum value + LaunchTemplateInstanceMetadataOptionsStatePending = "pending" + + // LaunchTemplateInstanceMetadataOptionsStateApplied is a LaunchTemplateInstanceMetadataOptionsState enum value + LaunchTemplateInstanceMetadataOptionsStateApplied = "applied" +) + +// LaunchTemplateInstanceMetadataOptionsState_Values returns all elements of the LaunchTemplateInstanceMetadataOptionsState enum +func LaunchTemplateInstanceMetadataOptionsState_Values() []string { + return []string{ + LaunchTemplateInstanceMetadataOptionsStatePending, + LaunchTemplateInstanceMetadataOptionsStateApplied, + } +} + +const ( + // ListingStateAvailable is a ListingState enum value + ListingStateAvailable = "available" + + // ListingStateSold is a ListingState enum value + ListingStateSold = "sold" + + // ListingStateCancelled is a ListingState enum value + ListingStateCancelled = "cancelled" + + // ListingStatePending is a ListingState enum value + ListingStatePending = "pending" +) + +// ListingState_Values returns all elements of the ListingState enum +func ListingState_Values() []string { + return []string{ + ListingStateAvailable, + ListingStateSold, + ListingStateCancelled, + ListingStatePending, + } +} + +const ( + // ListingStatusActive is a ListingStatus enum value + ListingStatusActive = "active" + + // ListingStatusPending is a ListingStatus enum value + ListingStatusPending = "pending" + + // ListingStatusCancelled is a ListingStatus enum value + ListingStatusCancelled = "cancelled" + + // ListingStatusClosed is a ListingStatus enum value + ListingStatusClosed = "closed" +) + +// ListingStatus_Values returns all elements of the ListingStatus enum +func ListingStatus_Values() []string { + return []string{ + ListingStatusActive, + ListingStatusPending, + ListingStatusCancelled, + ListingStatusClosed, + } +} + +const ( + // LocalGatewayRouteStatePending is a LocalGatewayRouteState enum value + LocalGatewayRouteStatePending = "pending" + + // LocalGatewayRouteStateActive is a LocalGatewayRouteState enum value + LocalGatewayRouteStateActive = "active" + + // LocalGatewayRouteStateBlackhole is a LocalGatewayRouteState enum value + LocalGatewayRouteStateBlackhole = "blackhole" + + // LocalGatewayRouteStateDeleting is a LocalGatewayRouteState enum value + LocalGatewayRouteStateDeleting = "deleting" + + // LocalGatewayRouteStateDeleted is a LocalGatewayRouteState enum value + LocalGatewayRouteStateDeleted = "deleted" +) + +// LocalGatewayRouteState_Values returns all elements of the LocalGatewayRouteState enum +func LocalGatewayRouteState_Values() []string { + return []string{ + LocalGatewayRouteStatePending, + LocalGatewayRouteStateActive, + LocalGatewayRouteStateBlackhole, + LocalGatewayRouteStateDeleting, + LocalGatewayRouteStateDeleted, + } +} + +const ( + // LocalGatewayRouteTypeStatic is a LocalGatewayRouteType enum value + LocalGatewayRouteTypeStatic = "static" + + // LocalGatewayRouteTypePropagated is a LocalGatewayRouteType enum value + LocalGatewayRouteTypePropagated = "propagated" +) + +// LocalGatewayRouteType_Values returns all elements of the LocalGatewayRouteType enum +func LocalGatewayRouteType_Values() []string { + return []string{ + LocalGatewayRouteTypeStatic, + LocalGatewayRouteTypePropagated, + } +} + +const ( + // LocationTypeRegion is a LocationType enum value + LocationTypeRegion = "region" + + // LocationTypeAvailabilityZone is a LocationType enum value + LocationTypeAvailabilityZone = "availability-zone" + + // LocationTypeAvailabilityZoneId is a LocationType enum value + LocationTypeAvailabilityZoneId = "availability-zone-id" +) + +// LocationType_Values returns all elements of the LocationType enum +func LocationType_Values() []string { + return []string{ + LocationTypeRegion, + LocationTypeAvailabilityZone, + LocationTypeAvailabilityZoneId, + } +} + +const ( + // LogDestinationTypeCloudWatchLogs is a LogDestinationType enum value + LogDestinationTypeCloudWatchLogs = "cloud-watch-logs" + + // LogDestinationTypeS3 is a LogDestinationType enum value + LogDestinationTypeS3 = "s3" +) + +// LogDestinationType_Values returns all elements of the LogDestinationType enum +func LogDestinationType_Values() []string { + return []string{ + LogDestinationTypeCloudWatchLogs, + LogDestinationTypeS3, + } +} + +const ( + // MarketTypeSpot is a MarketType enum value + MarketTypeSpot = "spot" +) + +// MarketType_Values returns all elements of the MarketType enum +func MarketType_Values() []string { + return []string{ + MarketTypeSpot, + } +} + +const ( + // MembershipTypeStatic is a MembershipType enum value + MembershipTypeStatic = "static" + + // MembershipTypeIgmp is a MembershipType enum value + MembershipTypeIgmp = "igmp" +) + +// MembershipType_Values returns all elements of the MembershipType enum +func MembershipType_Values() []string { + return []string{ + MembershipTypeStatic, + MembershipTypeIgmp, + } +} + +const ( + // ModifyAvailabilityZoneOptInStatusOptedIn is a ModifyAvailabilityZoneOptInStatus enum value + ModifyAvailabilityZoneOptInStatusOptedIn = "opted-in" + + // ModifyAvailabilityZoneOptInStatusNotOptedIn is a ModifyAvailabilityZoneOptInStatus enum value + ModifyAvailabilityZoneOptInStatusNotOptedIn = "not-opted-in" +) + +// ModifyAvailabilityZoneOptInStatus_Values returns all elements of the ModifyAvailabilityZoneOptInStatus enum +func ModifyAvailabilityZoneOptInStatus_Values() []string { + return []string{ + ModifyAvailabilityZoneOptInStatusOptedIn, + ModifyAvailabilityZoneOptInStatusNotOptedIn, + } +} + +const ( + // MonitoringStateDisabled is a MonitoringState enum value + MonitoringStateDisabled = "disabled" + + // MonitoringStateDisabling is a MonitoringState enum value + MonitoringStateDisabling = "disabling" + + // MonitoringStateEnabled is a MonitoringState enum value + MonitoringStateEnabled = "enabled" + + // MonitoringStatePending is a MonitoringState enum value + MonitoringStatePending = "pending" +) + +// MonitoringState_Values returns all elements of the MonitoringState enum +func MonitoringState_Values() []string { + return []string{ + MonitoringStateDisabled, + MonitoringStateDisabling, + MonitoringStateEnabled, + MonitoringStatePending, + } +} + +const ( + // MoveStatusMovingToVpc is a MoveStatus enum value + MoveStatusMovingToVpc = "movingToVpc" + + // MoveStatusRestoringToClassic is a MoveStatus enum value + MoveStatusRestoringToClassic = "restoringToClassic" +) + +// MoveStatus_Values returns all elements of the MoveStatus enum +func MoveStatus_Values() []string { + return []string{ + MoveStatusMovingToVpc, + MoveStatusRestoringToClassic, + } +} + +const ( + // MulticastSupportValueEnable is a MulticastSupportValue enum value + MulticastSupportValueEnable = "enable" + + // MulticastSupportValueDisable is a MulticastSupportValue enum value + MulticastSupportValueDisable = "disable" +) + +// MulticastSupportValue_Values returns all elements of the MulticastSupportValue enum +func MulticastSupportValue_Values() []string { + return []string{ + MulticastSupportValueEnable, + MulticastSupportValueDisable, + } +} + +const ( + // NatGatewayStatePending is a NatGatewayState enum value + NatGatewayStatePending = "pending" + + // NatGatewayStateFailed is a NatGatewayState enum value + NatGatewayStateFailed = "failed" + + // NatGatewayStateAvailable is a NatGatewayState enum value + NatGatewayStateAvailable = "available" + + // NatGatewayStateDeleting is a NatGatewayState enum value + NatGatewayStateDeleting = "deleting" + + // NatGatewayStateDeleted is a NatGatewayState enum value + NatGatewayStateDeleted = "deleted" +) + +// NatGatewayState_Values returns all elements of the NatGatewayState enum +func NatGatewayState_Values() []string { + return []string{ + NatGatewayStatePending, + NatGatewayStateFailed, + NatGatewayStateAvailable, + NatGatewayStateDeleting, + NatGatewayStateDeleted, + } +} + +const ( + // NetworkInterfaceAttributeDescription is a NetworkInterfaceAttribute enum value + NetworkInterfaceAttributeDescription = "description" + + // NetworkInterfaceAttributeGroupSet is a NetworkInterfaceAttribute enum value + NetworkInterfaceAttributeGroupSet = "groupSet" + + // NetworkInterfaceAttributeSourceDestCheck is a NetworkInterfaceAttribute enum value + NetworkInterfaceAttributeSourceDestCheck = "sourceDestCheck" + + // NetworkInterfaceAttributeAttachment is a NetworkInterfaceAttribute enum value + NetworkInterfaceAttributeAttachment = "attachment" +) + +// NetworkInterfaceAttribute_Values returns all elements of the NetworkInterfaceAttribute enum +func NetworkInterfaceAttribute_Values() []string { + return []string{ + NetworkInterfaceAttributeDescription, + NetworkInterfaceAttributeGroupSet, + NetworkInterfaceAttributeSourceDestCheck, + NetworkInterfaceAttributeAttachment, + } +} + +const ( + // NetworkInterfaceCreationTypeEfa is a NetworkInterfaceCreationType enum value + NetworkInterfaceCreationTypeEfa = "efa" +) + +// NetworkInterfaceCreationType_Values returns all elements of the NetworkInterfaceCreationType enum +func NetworkInterfaceCreationType_Values() []string { + return []string{ + NetworkInterfaceCreationTypeEfa, + } +} + +const ( + // NetworkInterfacePermissionStateCodePending is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodePending = "pending" + + // NetworkInterfacePermissionStateCodeGranted is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeGranted = "granted" + + // NetworkInterfacePermissionStateCodeRevoking is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeRevoking = "revoking" + + // NetworkInterfacePermissionStateCodeRevoked is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeRevoked = "revoked" +) + +// NetworkInterfacePermissionStateCode_Values returns all elements of the NetworkInterfacePermissionStateCode enum +func NetworkInterfacePermissionStateCode_Values() []string { + return []string{ + NetworkInterfacePermissionStateCodePending, + NetworkInterfacePermissionStateCodeGranted, + NetworkInterfacePermissionStateCodeRevoking, + NetworkInterfacePermissionStateCodeRevoked, + } +} + +const ( + // NetworkInterfaceStatusAvailable is a NetworkInterfaceStatus enum value + NetworkInterfaceStatusAvailable = "available" + + // NetworkInterfaceStatusAssociated is a NetworkInterfaceStatus enum value + NetworkInterfaceStatusAssociated = "associated" + + // NetworkInterfaceStatusAttaching is a NetworkInterfaceStatus enum value + NetworkInterfaceStatusAttaching = "attaching" + + // NetworkInterfaceStatusInUse is a NetworkInterfaceStatus enum value + NetworkInterfaceStatusInUse = "in-use" + + // NetworkInterfaceStatusDetaching is a NetworkInterfaceStatus enum value + NetworkInterfaceStatusDetaching = "detaching" +) + +// NetworkInterfaceStatus_Values returns all elements of the NetworkInterfaceStatus enum +func NetworkInterfaceStatus_Values() []string { + return []string{ + NetworkInterfaceStatusAvailable, + NetworkInterfaceStatusAssociated, + NetworkInterfaceStatusAttaching, + NetworkInterfaceStatusInUse, + NetworkInterfaceStatusDetaching, + } +} + +const ( + // NetworkInterfaceTypeInterface is a NetworkInterfaceType enum value + NetworkInterfaceTypeInterface = "interface" + + // NetworkInterfaceTypeNatGateway is a NetworkInterfaceType enum value + NetworkInterfaceTypeNatGateway = "natGateway" + + // NetworkInterfaceTypeEfa is a NetworkInterfaceType enum value + NetworkInterfaceTypeEfa = "efa" +) + +// NetworkInterfaceType_Values returns all elements of the NetworkInterfaceType enum +func NetworkInterfaceType_Values() []string { + return []string{ + NetworkInterfaceTypeInterface, + NetworkInterfaceTypeNatGateway, + NetworkInterfaceTypeEfa, + } +} + +const ( + // OfferingClassTypeStandard is a OfferingClassType enum value + OfferingClassTypeStandard = "standard" + + // OfferingClassTypeConvertible is a OfferingClassType enum value + OfferingClassTypeConvertible = "convertible" +) + +// OfferingClassType_Values returns all elements of the OfferingClassType enum +func OfferingClassType_Values() []string { + return []string{ + OfferingClassTypeStandard, + OfferingClassTypeConvertible, + } +} + +const ( + // OfferingTypeValuesHeavyUtilization is a OfferingTypeValues enum value + OfferingTypeValuesHeavyUtilization = "Heavy Utilization" + + // OfferingTypeValuesMediumUtilization is a OfferingTypeValues enum value + OfferingTypeValuesMediumUtilization = "Medium Utilization" + + // OfferingTypeValuesLightUtilization is a OfferingTypeValues enum value + OfferingTypeValuesLightUtilization = "Light Utilization" + + // OfferingTypeValuesNoUpfront is a OfferingTypeValues enum value + OfferingTypeValuesNoUpfront = "No Upfront" + + // OfferingTypeValuesPartialUpfront is a OfferingTypeValues enum value + OfferingTypeValuesPartialUpfront = "Partial Upfront" + + // OfferingTypeValuesAllUpfront is a OfferingTypeValues enum value + OfferingTypeValuesAllUpfront = "All Upfront" +) + +// OfferingTypeValues_Values returns all elements of the OfferingTypeValues enum +func OfferingTypeValues_Values() []string { + return []string{ + OfferingTypeValuesHeavyUtilization, + OfferingTypeValuesMediumUtilization, + OfferingTypeValuesLightUtilization, + OfferingTypeValuesNoUpfront, + OfferingTypeValuesPartialUpfront, + OfferingTypeValuesAllUpfront, + } +} + +const ( + // OnDemandAllocationStrategyLowestPrice is a OnDemandAllocationStrategy enum value + OnDemandAllocationStrategyLowestPrice = "lowestPrice" + + // OnDemandAllocationStrategyPrioritized is a OnDemandAllocationStrategy enum value + OnDemandAllocationStrategyPrioritized = "prioritized" +) + +// OnDemandAllocationStrategy_Values returns all elements of the OnDemandAllocationStrategy enum +func OnDemandAllocationStrategy_Values() []string { + return []string{ + OnDemandAllocationStrategyLowestPrice, + OnDemandAllocationStrategyPrioritized, + } +} + +const ( + // OperationTypeAdd is a OperationType enum value + OperationTypeAdd = "add" + + // OperationTypeRemove is a OperationType enum value + OperationTypeRemove = "remove" +) + +// OperationType_Values returns all elements of the OperationType enum +func OperationType_Values() []string { + return []string{ + OperationTypeAdd, + OperationTypeRemove, + } +} + +const ( + // PaymentOptionAllUpfront is a PaymentOption enum value + PaymentOptionAllUpfront = "AllUpfront" + + // PaymentOptionPartialUpfront is a PaymentOption enum value + PaymentOptionPartialUpfront = "PartialUpfront" + + // PaymentOptionNoUpfront is a PaymentOption enum value + PaymentOptionNoUpfront = "NoUpfront" +) + +// PaymentOption_Values returns all elements of the PaymentOption enum +func PaymentOption_Values() []string { + return []string{ + PaymentOptionAllUpfront, + PaymentOptionPartialUpfront, + PaymentOptionNoUpfront, + } +} + +const ( + // PermissionGroupAll is a PermissionGroup enum value + PermissionGroupAll = "all" +) + +// PermissionGroup_Values returns all elements of the PermissionGroup enum +func PermissionGroup_Values() []string { + return []string{ + PermissionGroupAll, + } +} + +const ( + // PlacementGroupStatePending is a PlacementGroupState enum value + PlacementGroupStatePending = "pending" + + // PlacementGroupStateAvailable is a PlacementGroupState enum value + PlacementGroupStateAvailable = "available" + + // PlacementGroupStateDeleting is a PlacementGroupState enum value + PlacementGroupStateDeleting = "deleting" + + // PlacementGroupStateDeleted is a PlacementGroupState enum value + PlacementGroupStateDeleted = "deleted" +) + +// PlacementGroupState_Values returns all elements of the PlacementGroupState enum +func PlacementGroupState_Values() []string { + return []string{ + PlacementGroupStatePending, + PlacementGroupStateAvailable, + PlacementGroupStateDeleting, + PlacementGroupStateDeleted, + } +} + +const ( + // PlacementGroupStrategyCluster is a PlacementGroupStrategy enum value + PlacementGroupStrategyCluster = "cluster" + + // PlacementGroupStrategyPartition is a PlacementGroupStrategy enum value + PlacementGroupStrategyPartition = "partition" + + // PlacementGroupStrategySpread is a PlacementGroupStrategy enum value + PlacementGroupStrategySpread = "spread" +) + +// PlacementGroupStrategy_Values returns all elements of the PlacementGroupStrategy enum +func PlacementGroupStrategy_Values() []string { + return []string{ + PlacementGroupStrategyCluster, + PlacementGroupStrategyPartition, + PlacementGroupStrategySpread, + } +} + +const ( + // PlacementStrategyCluster is a PlacementStrategy enum value + PlacementStrategyCluster = "cluster" + + // PlacementStrategySpread is a PlacementStrategy enum value + PlacementStrategySpread = "spread" + + // PlacementStrategyPartition is a PlacementStrategy enum value + PlacementStrategyPartition = "partition" +) + +// PlacementStrategy_Values returns all elements of the PlacementStrategy enum +func PlacementStrategy_Values() []string { + return []string{ + PlacementStrategyCluster, + PlacementStrategySpread, + PlacementStrategyPartition, + } +} + +const ( + // PlatformValuesWindows is a PlatformValues enum value + PlatformValuesWindows = "Windows" +) + +// PlatformValues_Values returns all elements of the PlatformValues enum +func PlatformValues_Values() []string { + return []string{ + PlatformValuesWindows, + } +} + +const ( + // PrefixListStateCreateInProgress is a PrefixListState enum value + PrefixListStateCreateInProgress = "create-in-progress" + + // PrefixListStateCreateComplete is a PrefixListState enum value + PrefixListStateCreateComplete = "create-complete" + + // PrefixListStateCreateFailed is a PrefixListState enum value + PrefixListStateCreateFailed = "create-failed" + + // PrefixListStateModifyInProgress is a PrefixListState enum value + PrefixListStateModifyInProgress = "modify-in-progress" + + // PrefixListStateModifyComplete is a PrefixListState enum value + PrefixListStateModifyComplete = "modify-complete" + + // PrefixListStateModifyFailed is a PrefixListState enum value + PrefixListStateModifyFailed = "modify-failed" + + // PrefixListStateRestoreInProgress is a PrefixListState enum value + PrefixListStateRestoreInProgress = "restore-in-progress" + + // PrefixListStateRestoreComplete is a PrefixListState enum value + PrefixListStateRestoreComplete = "restore-complete" + + // PrefixListStateRestoreFailed is a PrefixListState enum value + PrefixListStateRestoreFailed = "restore-failed" + + // PrefixListStateDeleteInProgress is a PrefixListState enum value + PrefixListStateDeleteInProgress = "delete-in-progress" + + // PrefixListStateDeleteComplete is a PrefixListState enum value + PrefixListStateDeleteComplete = "delete-complete" + + // PrefixListStateDeleteFailed is a PrefixListState enum value + PrefixListStateDeleteFailed = "delete-failed" +) + +// PrefixListState_Values returns all elements of the PrefixListState enum +func PrefixListState_Values() []string { + return []string{ + PrefixListStateCreateInProgress, + PrefixListStateCreateComplete, + PrefixListStateCreateFailed, + PrefixListStateModifyInProgress, + PrefixListStateModifyComplete, + PrefixListStateModifyFailed, + PrefixListStateRestoreInProgress, + PrefixListStateRestoreComplete, + PrefixListStateRestoreFailed, + PrefixListStateDeleteInProgress, + PrefixListStateDeleteComplete, + PrefixListStateDeleteFailed, + } +} + +const ( + // PrincipalTypeAll is a PrincipalType enum value + PrincipalTypeAll = "All" + + // PrincipalTypeService is a PrincipalType enum value + PrincipalTypeService = "Service" + + // PrincipalTypeOrganizationUnit is a PrincipalType enum value + PrincipalTypeOrganizationUnit = "OrganizationUnit" + + // PrincipalTypeAccount is a PrincipalType enum value + PrincipalTypeAccount = "Account" + + // PrincipalTypeUser is a PrincipalType enum value + PrincipalTypeUser = "User" + + // PrincipalTypeRole is a PrincipalType enum value + PrincipalTypeRole = "Role" +) + +// PrincipalType_Values returns all elements of the PrincipalType enum +func PrincipalType_Values() []string { + return []string{ + PrincipalTypeAll, + PrincipalTypeService, + PrincipalTypeOrganizationUnit, + PrincipalTypeAccount, + PrincipalTypeUser, + PrincipalTypeRole, + } +} + +const ( + // ProductCodeValuesDevpay is a ProductCodeValues enum value + ProductCodeValuesDevpay = "devpay" + + // ProductCodeValuesMarketplace is a ProductCodeValues enum value + ProductCodeValuesMarketplace = "marketplace" +) + +// ProductCodeValues_Values returns all elements of the ProductCodeValues enum +func ProductCodeValues_Values() []string { + return []string{ + ProductCodeValuesDevpay, + ProductCodeValuesMarketplace, + } +} + +const ( + // RIProductDescriptionLinuxUnix is a RIProductDescription enum value + RIProductDescriptionLinuxUnix = "Linux/UNIX" + + // RIProductDescriptionLinuxUnixamazonVpc is a RIProductDescription enum value + RIProductDescriptionLinuxUnixamazonVpc = "Linux/UNIX (Amazon VPC)" + + // RIProductDescriptionWindows is a RIProductDescription enum value + RIProductDescriptionWindows = "Windows" + + // RIProductDescriptionWindowsAmazonVpc is a RIProductDescription enum value + RIProductDescriptionWindowsAmazonVpc = "Windows (Amazon VPC)" +) + +// RIProductDescription_Values returns all elements of the RIProductDescription enum +func RIProductDescription_Values() []string { + return []string{ + RIProductDescriptionLinuxUnix, + RIProductDescriptionLinuxUnixamazonVpc, + RIProductDescriptionWindows, + RIProductDescriptionWindowsAmazonVpc, + } +} + +const ( + // RecurringChargeFrequencyHourly is a RecurringChargeFrequency enum value + RecurringChargeFrequencyHourly = "Hourly" +) + +// RecurringChargeFrequency_Values returns all elements of the RecurringChargeFrequency enum +func RecurringChargeFrequency_Values() []string { + return []string{ + RecurringChargeFrequencyHourly, + } +} + +const ( + // ReplacementStrategyLaunch is a ReplacementStrategy enum value + ReplacementStrategyLaunch = "launch" +) + +// ReplacementStrategy_Values returns all elements of the ReplacementStrategy enum +func ReplacementStrategy_Values() []string { + return []string{ + ReplacementStrategyLaunch, + } +} + +const ( + // ReportInstanceReasonCodesInstanceStuckInState is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state" + + // ReportInstanceReasonCodesUnresponsive is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesUnresponsive = "unresponsive" + + // ReportInstanceReasonCodesNotAcceptingCredentials is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesNotAcceptingCredentials = "not-accepting-credentials" + + // ReportInstanceReasonCodesPasswordNotAvailable is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesPasswordNotAvailable = "password-not-available" + + // ReportInstanceReasonCodesPerformanceNetwork is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesPerformanceNetwork = "performance-network" + + // ReportInstanceReasonCodesPerformanceInstanceStore is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesPerformanceInstanceStore = "performance-instance-store" + + // ReportInstanceReasonCodesPerformanceEbsVolume is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesPerformanceEbsVolume = "performance-ebs-volume" + + // ReportInstanceReasonCodesPerformanceOther is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesPerformanceOther = "performance-other" + + // ReportInstanceReasonCodesOther is a ReportInstanceReasonCodes enum value + ReportInstanceReasonCodesOther = "other" +) + +// ReportInstanceReasonCodes_Values returns all elements of the ReportInstanceReasonCodes enum +func ReportInstanceReasonCodes_Values() []string { + return []string{ + ReportInstanceReasonCodesInstanceStuckInState, + ReportInstanceReasonCodesUnresponsive, + ReportInstanceReasonCodesNotAcceptingCredentials, + ReportInstanceReasonCodesPasswordNotAvailable, + ReportInstanceReasonCodesPerformanceNetwork, + ReportInstanceReasonCodesPerformanceInstanceStore, + ReportInstanceReasonCodesPerformanceEbsVolume, + ReportInstanceReasonCodesPerformanceOther, + ReportInstanceReasonCodesOther, + } +} + +const ( + // ReportStatusTypeOk is a ReportStatusType enum value + ReportStatusTypeOk = "ok" + + // ReportStatusTypeImpaired is a ReportStatusType enum value + ReportStatusTypeImpaired = "impaired" +) + +// ReportStatusType_Values returns all elements of the ReportStatusType enum +func ReportStatusType_Values() []string { + return []string{ + ReportStatusTypeOk, + ReportStatusTypeImpaired, + } +} + +const ( + // ReservationStatePaymentPending is a ReservationState enum value + ReservationStatePaymentPending = "payment-pending" + + // ReservationStatePaymentFailed is a ReservationState enum value + ReservationStatePaymentFailed = "payment-failed" + + // ReservationStateActive is a ReservationState enum value + ReservationStateActive = "active" + + // ReservationStateRetired is a ReservationState enum value + ReservationStateRetired = "retired" +) + +// ReservationState_Values returns all elements of the ReservationState enum +func ReservationState_Values() []string { + return []string{ + ReservationStatePaymentPending, + ReservationStatePaymentFailed, + ReservationStateActive, + ReservationStateRetired, + } +} + +const ( + // ReservedInstanceStatePaymentPending is a ReservedInstanceState enum value + ReservedInstanceStatePaymentPending = "payment-pending" + + // ReservedInstanceStateActive is a ReservedInstanceState enum value + ReservedInstanceStateActive = "active" + + // ReservedInstanceStatePaymentFailed is a ReservedInstanceState enum value + ReservedInstanceStatePaymentFailed = "payment-failed" + + // ReservedInstanceStateRetired is a ReservedInstanceState enum value + ReservedInstanceStateRetired = "retired" + + // ReservedInstanceStateQueued is a ReservedInstanceState enum value + ReservedInstanceStateQueued = "queued" + + // ReservedInstanceStateQueuedDeleted is a ReservedInstanceState enum value + ReservedInstanceStateQueuedDeleted = "queued-deleted" +) + +// ReservedInstanceState_Values returns all elements of the ReservedInstanceState enum +func ReservedInstanceState_Values() []string { + return []string{ + ReservedInstanceStatePaymentPending, + ReservedInstanceStateActive, + ReservedInstanceStatePaymentFailed, + ReservedInstanceStateRetired, + ReservedInstanceStateQueued, + ReservedInstanceStateQueuedDeleted, + } +} + +const ( + // ResetFpgaImageAttributeNameLoadPermission is a ResetFpgaImageAttributeName enum value + ResetFpgaImageAttributeNameLoadPermission = "loadPermission" +) + +// ResetFpgaImageAttributeName_Values returns all elements of the ResetFpgaImageAttributeName enum +func ResetFpgaImageAttributeName_Values() []string { + return []string{ + ResetFpgaImageAttributeNameLoadPermission, + } +} + +const ( + // ResetImageAttributeNameLaunchPermission is a ResetImageAttributeName enum value + ResetImageAttributeNameLaunchPermission = "launchPermission" +) + +// ResetImageAttributeName_Values returns all elements of the ResetImageAttributeName enum +func ResetImageAttributeName_Values() []string { + return []string{ + ResetImageAttributeNameLaunchPermission, + } +} + +const ( + // ResourceTypeClientVpnEndpoint is a ResourceType enum value + ResourceTypeClientVpnEndpoint = "client-vpn-endpoint" + + // ResourceTypeCustomerGateway is a ResourceType enum value + ResourceTypeCustomerGateway = "customer-gateway" + + // ResourceTypeDedicatedHost is a ResourceType enum value + ResourceTypeDedicatedHost = "dedicated-host" + + // ResourceTypeDhcpOptions is a ResourceType enum value + ResourceTypeDhcpOptions = "dhcp-options" + + // ResourceTypeEgressOnlyInternetGateway is a ResourceType enum value + ResourceTypeEgressOnlyInternetGateway = "egress-only-internet-gateway" + + // ResourceTypeElasticIp is a ResourceType enum value + ResourceTypeElasticIp = "elastic-ip" + + // ResourceTypeElasticGpu is a ResourceType enum value + ResourceTypeElasticGpu = "elastic-gpu" + + // ResourceTypeExportImageTask is a ResourceType enum value + ResourceTypeExportImageTask = "export-image-task" + + // ResourceTypeExportInstanceTask is a ResourceType enum value + ResourceTypeExportInstanceTask = "export-instance-task" + + // ResourceTypeFleet is a ResourceType enum value + ResourceTypeFleet = "fleet" + + // ResourceTypeFpgaImage is a ResourceType enum value + ResourceTypeFpgaImage = "fpga-image" + + // ResourceTypeHostReservation is a ResourceType enum value + ResourceTypeHostReservation = "host-reservation" + + // ResourceTypeImage is a ResourceType enum value + ResourceTypeImage = "image" + + // ResourceTypeImportImageTask is a ResourceType enum value + ResourceTypeImportImageTask = "import-image-task" + + // ResourceTypeImportSnapshotTask is a ResourceType enum value + ResourceTypeImportSnapshotTask = "import-snapshot-task" + + // ResourceTypeInstance is a ResourceType enum value + ResourceTypeInstance = "instance" + + // ResourceTypeInternetGateway is a ResourceType enum value + ResourceTypeInternetGateway = "internet-gateway" + + // ResourceTypeKeyPair is a ResourceType enum value + ResourceTypeKeyPair = "key-pair" + + // ResourceTypeLaunchTemplate is a ResourceType enum value + ResourceTypeLaunchTemplate = "launch-template" + + // ResourceTypeLocalGatewayRouteTableVpcAssociation is a ResourceType enum value + ResourceTypeLocalGatewayRouteTableVpcAssociation = "local-gateway-route-table-vpc-association" + + // ResourceTypeNatgateway is a ResourceType enum value + ResourceTypeNatgateway = "natgateway" + + // ResourceTypeNetworkAcl is a ResourceType enum value + ResourceTypeNetworkAcl = "network-acl" + + // ResourceTypeNetworkInterface is a ResourceType enum value + ResourceTypeNetworkInterface = "network-interface" + + // ResourceTypePlacementGroup is a ResourceType enum value + ResourceTypePlacementGroup = "placement-group" + + // ResourceTypeReservedInstances is a ResourceType enum value + ResourceTypeReservedInstances = "reserved-instances" + + // ResourceTypeRouteTable is a ResourceType enum value + ResourceTypeRouteTable = "route-table" + + // ResourceTypeSecurityGroup is a ResourceType enum value + ResourceTypeSecurityGroup = "security-group" + + // ResourceTypeSnapshot is a ResourceType enum value + ResourceTypeSnapshot = "snapshot" + + // ResourceTypeSpotFleetRequest is a ResourceType enum value + ResourceTypeSpotFleetRequest = "spot-fleet-request" + + // ResourceTypeSpotInstancesRequest is a ResourceType enum value + ResourceTypeSpotInstancesRequest = "spot-instances-request" + + // ResourceTypeSubnet is a ResourceType enum value + ResourceTypeSubnet = "subnet" + + // ResourceTypeTrafficMirrorFilter is a ResourceType enum value + ResourceTypeTrafficMirrorFilter = "traffic-mirror-filter" + + // ResourceTypeTrafficMirrorSession is a ResourceType enum value + ResourceTypeTrafficMirrorSession = "traffic-mirror-session" + + // ResourceTypeTrafficMirrorTarget is a ResourceType enum value + ResourceTypeTrafficMirrorTarget = "traffic-mirror-target" + + // ResourceTypeTransitGateway is a ResourceType enum value + ResourceTypeTransitGateway = "transit-gateway" + + // ResourceTypeTransitGatewayAttachment is a ResourceType enum value + ResourceTypeTransitGatewayAttachment = "transit-gateway-attachment" + + // ResourceTypeTransitGatewayMulticastDomain is a ResourceType enum value + ResourceTypeTransitGatewayMulticastDomain = "transit-gateway-multicast-domain" + + // ResourceTypeTransitGatewayRouteTable is a ResourceType enum value + ResourceTypeTransitGatewayRouteTable = "transit-gateway-route-table" + + // ResourceTypeVolume is a ResourceType enum value + ResourceTypeVolume = "volume" + + // ResourceTypeVpc is a ResourceType enum value + ResourceTypeVpc = "vpc" + + // ResourceTypeVpcPeeringConnection is a ResourceType enum value + ResourceTypeVpcPeeringConnection = "vpc-peering-connection" + + // ResourceTypeVpnConnection is a ResourceType enum value + ResourceTypeVpnConnection = "vpn-connection" + + // ResourceTypeVpnGateway is a ResourceType enum value + ResourceTypeVpnGateway = "vpn-gateway" + + // ResourceTypeVpcFlowLog is a ResourceType enum value + ResourceTypeVpcFlowLog = "vpc-flow-log" +) + +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeClientVpnEndpoint, + ResourceTypeCustomerGateway, + ResourceTypeDedicatedHost, + ResourceTypeDhcpOptions, + ResourceTypeEgressOnlyInternetGateway, + ResourceTypeElasticIp, + ResourceTypeElasticGpu, + ResourceTypeExportImageTask, + ResourceTypeExportInstanceTask, + ResourceTypeFleet, + ResourceTypeFpgaImage, + ResourceTypeHostReservation, + ResourceTypeImage, + ResourceTypeImportImageTask, + ResourceTypeImportSnapshotTask, + ResourceTypeInstance, + ResourceTypeInternetGateway, + ResourceTypeKeyPair, + ResourceTypeLaunchTemplate, + ResourceTypeLocalGatewayRouteTableVpcAssociation, + ResourceTypeNatgateway, + ResourceTypeNetworkAcl, + ResourceTypeNetworkInterface, + ResourceTypePlacementGroup, + ResourceTypeReservedInstances, + ResourceTypeRouteTable, + ResourceTypeSecurityGroup, + ResourceTypeSnapshot, + ResourceTypeSpotFleetRequest, + ResourceTypeSpotInstancesRequest, + ResourceTypeSubnet, + ResourceTypeTrafficMirrorFilter, + ResourceTypeTrafficMirrorSession, + ResourceTypeTrafficMirrorTarget, + ResourceTypeTransitGateway, + ResourceTypeTransitGatewayAttachment, + ResourceTypeTransitGatewayMulticastDomain, + ResourceTypeTransitGatewayRouteTable, + ResourceTypeVolume, + ResourceTypeVpc, + ResourceTypeVpcPeeringConnection, + ResourceTypeVpnConnection, + ResourceTypeVpnGateway, + ResourceTypeVpcFlowLog, + } +} + +const ( + // RootDeviceTypeEbs is a RootDeviceType enum value + RootDeviceTypeEbs = "ebs" + + // RootDeviceTypeInstanceStore is a RootDeviceType enum value + RootDeviceTypeInstanceStore = "instance-store" +) + +// RootDeviceType_Values returns all elements of the RootDeviceType enum +func RootDeviceType_Values() []string { + return []string{ + RootDeviceTypeEbs, + RootDeviceTypeInstanceStore, + } +} + +const ( + // RouteOriginCreateRouteTable is a RouteOrigin enum value + RouteOriginCreateRouteTable = "CreateRouteTable" + + // RouteOriginCreateRoute is a RouteOrigin enum value + RouteOriginCreateRoute = "CreateRoute" + + // RouteOriginEnableVgwRoutePropagation is a RouteOrigin enum value + RouteOriginEnableVgwRoutePropagation = "EnableVgwRoutePropagation" +) + +// RouteOrigin_Values returns all elements of the RouteOrigin enum +func RouteOrigin_Values() []string { + return []string{ + RouteOriginCreateRouteTable, + RouteOriginCreateRoute, + RouteOriginEnableVgwRoutePropagation, + } +} + +const ( + // RouteStateActive is a RouteState enum value + RouteStateActive = "active" + + // RouteStateBlackhole is a RouteState enum value + RouteStateBlackhole = "blackhole" +) + +// RouteState_Values returns all elements of the RouteState enum +func RouteState_Values() []string { + return []string{ + RouteStateActive, + RouteStateBlackhole, + } +} + +const ( + // RouteTableAssociationStateCodeAssociating is a RouteTableAssociationStateCode enum value + RouteTableAssociationStateCodeAssociating = "associating" + + // RouteTableAssociationStateCodeAssociated is a RouteTableAssociationStateCode enum value + RouteTableAssociationStateCodeAssociated = "associated" + + // RouteTableAssociationStateCodeDisassociating is a RouteTableAssociationStateCode enum value + RouteTableAssociationStateCodeDisassociating = "disassociating" + + // RouteTableAssociationStateCodeDisassociated is a RouteTableAssociationStateCode enum value + RouteTableAssociationStateCodeDisassociated = "disassociated" + + // RouteTableAssociationStateCodeFailed is a RouteTableAssociationStateCode enum value + RouteTableAssociationStateCodeFailed = "failed" +) + +// RouteTableAssociationStateCode_Values returns all elements of the RouteTableAssociationStateCode enum +func RouteTableAssociationStateCode_Values() []string { + return []string{ + RouteTableAssociationStateCodeAssociating, + RouteTableAssociationStateCodeAssociated, + RouteTableAssociationStateCodeDisassociating, + RouteTableAssociationStateCodeDisassociated, + RouteTableAssociationStateCodeFailed, + } +} + +const ( + // RuleActionAllow is a RuleAction enum value + RuleActionAllow = "allow" + + // RuleActionDeny is a RuleAction enum value + RuleActionDeny = "deny" +) + +// RuleAction_Values returns all elements of the RuleAction enum +func RuleAction_Values() []string { + return []string{ + RuleActionAllow, + RuleActionDeny, + } +} + +const ( + // ScopeAvailabilityZone is a Scope enum value + ScopeAvailabilityZone = "Availability Zone" + + // ScopeRegion is a Scope enum value + ScopeRegion = "Region" +) + +// Scope_Values returns all elements of the Scope enum +func Scope_Values() []string { + return []string{ + ScopeAvailabilityZone, + ScopeRegion, + } +} + +const ( + // SelfServicePortalEnabled is a SelfServicePortal enum value + SelfServicePortalEnabled = "enabled" + + // SelfServicePortalDisabled is a SelfServicePortal enum value + SelfServicePortalDisabled = "disabled" +) + +// SelfServicePortal_Values returns all elements of the SelfServicePortal enum +func SelfServicePortal_Values() []string { + return []string{ + SelfServicePortalEnabled, + SelfServicePortalDisabled, + } +} + +const ( + // ServiceStatePending is a ServiceState enum value + ServiceStatePending = "Pending" + + // ServiceStateAvailable is a ServiceState enum value + ServiceStateAvailable = "Available" + + // ServiceStateDeleting is a ServiceState enum value + ServiceStateDeleting = "Deleting" + + // ServiceStateDeleted is a ServiceState enum value + ServiceStateDeleted = "Deleted" + + // ServiceStateFailed is a ServiceState enum value + ServiceStateFailed = "Failed" +) + +// ServiceState_Values returns all elements of the ServiceState enum +func ServiceState_Values() []string { + return []string{ + ServiceStatePending, + ServiceStateAvailable, + ServiceStateDeleting, + ServiceStateDeleted, + ServiceStateFailed, + } +} + +const ( + // ServiceTypeInterface is a ServiceType enum value + ServiceTypeInterface = "Interface" + + // ServiceTypeGateway is a ServiceType enum value + ServiceTypeGateway = "Gateway" + + // ServiceTypeGatewayLoadBalancer is a ServiceType enum value + ServiceTypeGatewayLoadBalancer = "GatewayLoadBalancer" +) + +// ServiceType_Values returns all elements of the ServiceType enum +func ServiceType_Values() []string { + return []string{ + ServiceTypeInterface, + ServiceTypeGateway, + ServiceTypeGatewayLoadBalancer, + } +} + +const ( + // ShutdownBehaviorStop is a ShutdownBehavior enum value + ShutdownBehaviorStop = "stop" + + // ShutdownBehaviorTerminate is a ShutdownBehavior enum value + ShutdownBehaviorTerminate = "terminate" +) + +// ShutdownBehavior_Values returns all elements of the ShutdownBehavior enum +func ShutdownBehavior_Values() []string { + return []string{ + ShutdownBehaviorStop, + ShutdownBehaviorTerminate, + } +} + +const ( + // SnapshotAttributeNameProductCodes is a SnapshotAttributeName enum value + SnapshotAttributeNameProductCodes = "productCodes" + + // SnapshotAttributeNameCreateVolumePermission is a SnapshotAttributeName enum value + SnapshotAttributeNameCreateVolumePermission = "createVolumePermission" +) + +// SnapshotAttributeName_Values returns all elements of the SnapshotAttributeName enum +func SnapshotAttributeName_Values() []string { + return []string{ + SnapshotAttributeNameProductCodes, + SnapshotAttributeNameCreateVolumePermission, + } +} + +const ( + // SnapshotStatePending is a SnapshotState enum value + SnapshotStatePending = "pending" + + // SnapshotStateCompleted is a SnapshotState enum value + SnapshotStateCompleted = "completed" + + // SnapshotStateError is a SnapshotState enum value + SnapshotStateError = "error" +) + +// SnapshotState_Values returns all elements of the SnapshotState enum +func SnapshotState_Values() []string { + return []string{ + SnapshotStatePending, + SnapshotStateCompleted, + SnapshotStateError, + } +} + +const ( + // SpotAllocationStrategyLowestPrice is a SpotAllocationStrategy enum value + SpotAllocationStrategyLowestPrice = "lowest-price" + + // SpotAllocationStrategyDiversified is a SpotAllocationStrategy enum value + SpotAllocationStrategyDiversified = "diversified" + + // SpotAllocationStrategyCapacityOptimized is a SpotAllocationStrategy enum value + SpotAllocationStrategyCapacityOptimized = "capacity-optimized" +) + +// SpotAllocationStrategy_Values returns all elements of the SpotAllocationStrategy enum +func SpotAllocationStrategy_Values() []string { + return []string{ + SpotAllocationStrategyLowestPrice, + SpotAllocationStrategyDiversified, + SpotAllocationStrategyCapacityOptimized, + } +} + +const ( + // SpotInstanceInterruptionBehaviorHibernate is a SpotInstanceInterruptionBehavior enum value + SpotInstanceInterruptionBehaviorHibernate = "hibernate" + + // SpotInstanceInterruptionBehaviorStop is a SpotInstanceInterruptionBehavior enum value + SpotInstanceInterruptionBehaviorStop = "stop" + + // SpotInstanceInterruptionBehaviorTerminate is a SpotInstanceInterruptionBehavior enum value + SpotInstanceInterruptionBehaviorTerminate = "terminate" +) + +// SpotInstanceInterruptionBehavior_Values returns all elements of the SpotInstanceInterruptionBehavior enum +func SpotInstanceInterruptionBehavior_Values() []string { + return []string{ + SpotInstanceInterruptionBehaviorHibernate, + SpotInstanceInterruptionBehaviorStop, + SpotInstanceInterruptionBehaviorTerminate, + } +} + +const ( + // SpotInstanceStateOpen is a SpotInstanceState enum value + SpotInstanceStateOpen = "open" + + // SpotInstanceStateActive is a SpotInstanceState enum value + SpotInstanceStateActive = "active" + + // SpotInstanceStateClosed is a SpotInstanceState enum value + SpotInstanceStateClosed = "closed" + + // SpotInstanceStateCancelled is a SpotInstanceState enum value + SpotInstanceStateCancelled = "cancelled" + + // SpotInstanceStateFailed is a SpotInstanceState enum value + SpotInstanceStateFailed = "failed" +) + +// SpotInstanceState_Values returns all elements of the SpotInstanceState enum +func SpotInstanceState_Values() []string { + return []string{ + SpotInstanceStateOpen, + SpotInstanceStateActive, + SpotInstanceStateClosed, + SpotInstanceStateCancelled, + SpotInstanceStateFailed, + } +} + +const ( + // SpotInstanceTypeOneTime is a SpotInstanceType enum value + SpotInstanceTypeOneTime = "one-time" + + // SpotInstanceTypePersistent is a SpotInstanceType enum value + SpotInstanceTypePersistent = "persistent" +) + +// SpotInstanceType_Values returns all elements of the SpotInstanceType enum +func SpotInstanceType_Values() []string { + return []string{ + SpotInstanceTypeOneTime, + SpotInstanceTypePersistent, + } +} + +const ( + // StatePendingAcceptance is a State enum value + StatePendingAcceptance = "PendingAcceptance" + + // StatePending is a State enum value + StatePending = "Pending" + + // StateAvailable is a State enum value + StateAvailable = "Available" + + // StateDeleting is a State enum value + StateDeleting = "Deleting" + + // StateDeleted is a State enum value + StateDeleted = "Deleted" + + // StateRejected is a State enum value + StateRejected = "Rejected" + + // StateFailed is a State enum value + StateFailed = "Failed" + + // StateExpired is a State enum value + StateExpired = "Expired" +) + +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StatePendingAcceptance, + StatePending, + StateAvailable, + StateDeleting, + StateDeleted, + StateRejected, + StateFailed, + StateExpired, + } +} + +const ( + // StatusMoveInProgress is a Status enum value + StatusMoveInProgress = "MoveInProgress" + + // StatusInVpc is a Status enum value + StatusInVpc = "InVpc" + + // StatusInClassic is a Status enum value + StatusInClassic = "InClassic" +) + +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusMoveInProgress, + StatusInVpc, + StatusInClassic, + } +} + +const ( + // StatusNameReachability is a StatusName enum value + StatusNameReachability = "reachability" +) + +// StatusName_Values returns all elements of the StatusName enum +func StatusName_Values() []string { + return []string{ + StatusNameReachability, + } +} + +const ( + // StatusTypePassed is a StatusType enum value + StatusTypePassed = "passed" + + // StatusTypeFailed is a StatusType enum value + StatusTypeFailed = "failed" + + // StatusTypeInsufficientData is a StatusType enum value + StatusTypeInsufficientData = "insufficient-data" + + // StatusTypeInitializing is a StatusType enum value + StatusTypeInitializing = "initializing" +) + +// StatusType_Values returns all elements of the StatusType enum +func StatusType_Values() []string { + return []string{ + StatusTypePassed, + StatusTypeFailed, + StatusTypeInsufficientData, + StatusTypeInitializing, + } +} + +const ( + // SubnetCidrBlockStateCodeAssociating is a SubnetCidrBlockStateCode enum value + SubnetCidrBlockStateCodeAssociating = "associating" + + // SubnetCidrBlockStateCodeAssociated is a SubnetCidrBlockStateCode enum value + SubnetCidrBlockStateCodeAssociated = "associated" + + // SubnetCidrBlockStateCodeDisassociating is a SubnetCidrBlockStateCode enum value + SubnetCidrBlockStateCodeDisassociating = "disassociating" + + // SubnetCidrBlockStateCodeDisassociated is a SubnetCidrBlockStateCode enum value + SubnetCidrBlockStateCodeDisassociated = "disassociated" + + // SubnetCidrBlockStateCodeFailing is a SubnetCidrBlockStateCode enum value + SubnetCidrBlockStateCodeFailing = "failing" + + // SubnetCidrBlockStateCodeFailed is a SubnetCidrBlockStateCode enum value + SubnetCidrBlockStateCodeFailed = "failed" +) + +// SubnetCidrBlockStateCode_Values returns all elements of the SubnetCidrBlockStateCode enum +func SubnetCidrBlockStateCode_Values() []string { + return []string{ + SubnetCidrBlockStateCodeAssociating, + SubnetCidrBlockStateCodeAssociated, + SubnetCidrBlockStateCodeDisassociating, + SubnetCidrBlockStateCodeDisassociated, + SubnetCidrBlockStateCodeFailing, + SubnetCidrBlockStateCodeFailed, + } +} + +const ( + // SubnetStatePending is a SubnetState enum value + SubnetStatePending = "pending" + + // SubnetStateAvailable is a SubnetState enum value + SubnetStateAvailable = "available" +) + +// SubnetState_Values returns all elements of the SubnetState enum +func SubnetState_Values() []string { + return []string{ + SubnetStatePending, + SubnetStateAvailable, + } +} + +const ( + // SummaryStatusOk is a SummaryStatus enum value + SummaryStatusOk = "ok" + + // SummaryStatusImpaired is a SummaryStatus enum value + SummaryStatusImpaired = "impaired" + + // SummaryStatusInsufficientData is a SummaryStatus enum value + SummaryStatusInsufficientData = "insufficient-data" + + // SummaryStatusNotApplicable is a SummaryStatus enum value + SummaryStatusNotApplicable = "not-applicable" + + // SummaryStatusInitializing is a SummaryStatus enum value + SummaryStatusInitializing = "initializing" +) + +// SummaryStatus_Values returns all elements of the SummaryStatus enum +func SummaryStatus_Values() []string { + return []string{ + SummaryStatusOk, + SummaryStatusImpaired, + SummaryStatusInsufficientData, + SummaryStatusNotApplicable, + SummaryStatusInitializing, + } +} + +const ( + // TelemetryStatusUp is a TelemetryStatus enum value + TelemetryStatusUp = "UP" + + // TelemetryStatusDown is a TelemetryStatus enum value + TelemetryStatusDown = "DOWN" +) + +// TelemetryStatus_Values returns all elements of the TelemetryStatus enum +func TelemetryStatus_Values() []string { + return []string{ + TelemetryStatusUp, + TelemetryStatusDown, + } +} + +const ( + // TenancyDefault is a Tenancy enum value + TenancyDefault = "default" + + // TenancyDedicated is a Tenancy enum value + TenancyDedicated = "dedicated" + + // TenancyHost is a Tenancy enum value + TenancyHost = "host" +) + +// Tenancy_Values returns all elements of the Tenancy enum +func Tenancy_Values() []string { + return []string{ + TenancyDefault, + TenancyDedicated, + TenancyHost, + } +} + +const ( + // TrafficDirectionIngress is a TrafficDirection enum value + TrafficDirectionIngress = "ingress" + + // TrafficDirectionEgress is a TrafficDirection enum value + TrafficDirectionEgress = "egress" +) + +// TrafficDirection_Values returns all elements of the TrafficDirection enum +func TrafficDirection_Values() []string { + return []string{ + TrafficDirectionIngress, + TrafficDirectionEgress, + } +} + +const ( + // TrafficMirrorFilterRuleFieldDestinationPortRange is a TrafficMirrorFilterRuleField enum value + TrafficMirrorFilterRuleFieldDestinationPortRange = "destination-port-range" + + // TrafficMirrorFilterRuleFieldSourcePortRange is a TrafficMirrorFilterRuleField enum value + TrafficMirrorFilterRuleFieldSourcePortRange = "source-port-range" + + // TrafficMirrorFilterRuleFieldProtocol is a TrafficMirrorFilterRuleField enum value + TrafficMirrorFilterRuleFieldProtocol = "protocol" + + // TrafficMirrorFilterRuleFieldDescription is a TrafficMirrorFilterRuleField enum value + TrafficMirrorFilterRuleFieldDescription = "description" +) + +// TrafficMirrorFilterRuleField_Values returns all elements of the TrafficMirrorFilterRuleField enum +func TrafficMirrorFilterRuleField_Values() []string { + return []string{ + TrafficMirrorFilterRuleFieldDestinationPortRange, + TrafficMirrorFilterRuleFieldSourcePortRange, + TrafficMirrorFilterRuleFieldProtocol, + TrafficMirrorFilterRuleFieldDescription, + } +} + +const ( + // TrafficMirrorNetworkServiceAmazonDns is a TrafficMirrorNetworkService enum value + TrafficMirrorNetworkServiceAmazonDns = "amazon-dns" +) + +// TrafficMirrorNetworkService_Values returns all elements of the TrafficMirrorNetworkService enum +func TrafficMirrorNetworkService_Values() []string { + return []string{ + TrafficMirrorNetworkServiceAmazonDns, + } +} + +const ( + // TrafficMirrorRuleActionAccept is a TrafficMirrorRuleAction enum value + TrafficMirrorRuleActionAccept = "accept" + + // TrafficMirrorRuleActionReject is a TrafficMirrorRuleAction enum value + TrafficMirrorRuleActionReject = "reject" +) + +// TrafficMirrorRuleAction_Values returns all elements of the TrafficMirrorRuleAction enum +func TrafficMirrorRuleAction_Values() []string { + return []string{ + TrafficMirrorRuleActionAccept, + TrafficMirrorRuleActionReject, + } +} + +const ( + // TrafficMirrorSessionFieldPacketLength is a TrafficMirrorSessionField enum value + TrafficMirrorSessionFieldPacketLength = "packet-length" + + // TrafficMirrorSessionFieldDescription is a TrafficMirrorSessionField enum value + TrafficMirrorSessionFieldDescription = "description" + + // TrafficMirrorSessionFieldVirtualNetworkId is a TrafficMirrorSessionField enum value + TrafficMirrorSessionFieldVirtualNetworkId = "virtual-network-id" +) + +// TrafficMirrorSessionField_Values returns all elements of the TrafficMirrorSessionField enum +func TrafficMirrorSessionField_Values() []string { + return []string{ + TrafficMirrorSessionFieldPacketLength, + TrafficMirrorSessionFieldDescription, + TrafficMirrorSessionFieldVirtualNetworkId, + } +} + +const ( + // TrafficMirrorTargetTypeNetworkInterface is a TrafficMirrorTargetType enum value + TrafficMirrorTargetTypeNetworkInterface = "network-interface" + + // TrafficMirrorTargetTypeNetworkLoadBalancer is a TrafficMirrorTargetType enum value + TrafficMirrorTargetTypeNetworkLoadBalancer = "network-load-balancer" +) + +// TrafficMirrorTargetType_Values returns all elements of the TrafficMirrorTargetType enum +func TrafficMirrorTargetType_Values() []string { + return []string{ + TrafficMirrorTargetTypeNetworkInterface, + TrafficMirrorTargetTypeNetworkLoadBalancer, + } +} + +const ( + // TrafficTypeAccept is a TrafficType enum value + TrafficTypeAccept = "ACCEPT" + + // TrafficTypeReject is a TrafficType enum value + TrafficTypeReject = "REJECT" + + // TrafficTypeAll is a TrafficType enum value + TrafficTypeAll = "ALL" +) + +// TrafficType_Values returns all elements of the TrafficType enum +func TrafficType_Values() []string { + return []string{ + TrafficTypeAccept, + TrafficTypeReject, + TrafficTypeAll, + } +} + +const ( + // TransitGatewayAssociationStateAssociating is a TransitGatewayAssociationState enum value + TransitGatewayAssociationStateAssociating = "associating" + + // TransitGatewayAssociationStateAssociated is a TransitGatewayAssociationState enum value + TransitGatewayAssociationStateAssociated = "associated" + + // TransitGatewayAssociationStateDisassociating is a TransitGatewayAssociationState enum value + TransitGatewayAssociationStateDisassociating = "disassociating" + + // TransitGatewayAssociationStateDisassociated is a TransitGatewayAssociationState enum value + TransitGatewayAssociationStateDisassociated = "disassociated" +) + +// TransitGatewayAssociationState_Values returns all elements of the TransitGatewayAssociationState enum +func TransitGatewayAssociationState_Values() []string { + return []string{ + TransitGatewayAssociationStateAssociating, + TransitGatewayAssociationStateAssociated, + TransitGatewayAssociationStateDisassociating, + TransitGatewayAssociationStateDisassociated, + } +} + +const ( + // TransitGatewayAttachmentResourceTypeVpc is a TransitGatewayAttachmentResourceType enum value + TransitGatewayAttachmentResourceTypeVpc = "vpc" + + // TransitGatewayAttachmentResourceTypeVpn is a TransitGatewayAttachmentResourceType enum value + TransitGatewayAttachmentResourceTypeVpn = "vpn" + + // TransitGatewayAttachmentResourceTypeDirectConnectGateway is a TransitGatewayAttachmentResourceType enum value + TransitGatewayAttachmentResourceTypeDirectConnectGateway = "direct-connect-gateway" + + // TransitGatewayAttachmentResourceTypePeering is a TransitGatewayAttachmentResourceType enum value + TransitGatewayAttachmentResourceTypePeering = "peering" + + // TransitGatewayAttachmentResourceTypeTgwPeering is a TransitGatewayAttachmentResourceType enum value + TransitGatewayAttachmentResourceTypeTgwPeering = "tgw-peering" +) + +// TransitGatewayAttachmentResourceType_Values returns all elements of the TransitGatewayAttachmentResourceType enum +func TransitGatewayAttachmentResourceType_Values() []string { + return []string{ + TransitGatewayAttachmentResourceTypeVpc, + TransitGatewayAttachmentResourceTypeVpn, + TransitGatewayAttachmentResourceTypeDirectConnectGateway, + TransitGatewayAttachmentResourceTypePeering, + TransitGatewayAttachmentResourceTypeTgwPeering, + } +} + +const ( + // TransitGatewayAttachmentStateInitiating is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateInitiating = "initiating" + + // TransitGatewayAttachmentStateInitiatingRequest is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateInitiatingRequest = "initiatingRequest" + + // TransitGatewayAttachmentStatePendingAcceptance is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStatePendingAcceptance = "pendingAcceptance" + + // TransitGatewayAttachmentStateRollingBack is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateRollingBack = "rollingBack" + + // TransitGatewayAttachmentStatePending is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStatePending = "pending" + + // TransitGatewayAttachmentStateAvailable is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateAvailable = "available" + + // TransitGatewayAttachmentStateModifying is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateModifying = "modifying" + + // TransitGatewayAttachmentStateDeleting is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateDeleting = "deleting" + + // TransitGatewayAttachmentStateDeleted is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateDeleted = "deleted" + + // TransitGatewayAttachmentStateFailed is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateFailed = "failed" + + // TransitGatewayAttachmentStateRejected is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateRejected = "rejected" + + // TransitGatewayAttachmentStateRejecting is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateRejecting = "rejecting" + + // TransitGatewayAttachmentStateFailing is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateFailing = "failing" +) + +// TransitGatewayAttachmentState_Values returns all elements of the TransitGatewayAttachmentState enum +func TransitGatewayAttachmentState_Values() []string { + return []string{ + TransitGatewayAttachmentStateInitiating, + TransitGatewayAttachmentStateInitiatingRequest, + TransitGatewayAttachmentStatePendingAcceptance, + TransitGatewayAttachmentStateRollingBack, + TransitGatewayAttachmentStatePending, + TransitGatewayAttachmentStateAvailable, + TransitGatewayAttachmentStateModifying, + TransitGatewayAttachmentStateDeleting, + TransitGatewayAttachmentStateDeleted, + TransitGatewayAttachmentStateFailed, + TransitGatewayAttachmentStateRejected, + TransitGatewayAttachmentStateRejecting, + TransitGatewayAttachmentStateFailing, + } +} + +const ( + // TransitGatewayMulitcastDomainAssociationStateAssociating is a TransitGatewayMulitcastDomainAssociationState enum value + TransitGatewayMulitcastDomainAssociationStateAssociating = "associating" + + // TransitGatewayMulitcastDomainAssociationStateAssociated is a TransitGatewayMulitcastDomainAssociationState enum value + TransitGatewayMulitcastDomainAssociationStateAssociated = "associated" + + // TransitGatewayMulitcastDomainAssociationStateDisassociating is a TransitGatewayMulitcastDomainAssociationState enum value + TransitGatewayMulitcastDomainAssociationStateDisassociating = "disassociating" + + // TransitGatewayMulitcastDomainAssociationStateDisassociated is a TransitGatewayMulitcastDomainAssociationState enum value + TransitGatewayMulitcastDomainAssociationStateDisassociated = "disassociated" +) + +// TransitGatewayMulitcastDomainAssociationState_Values returns all elements of the TransitGatewayMulitcastDomainAssociationState enum +func TransitGatewayMulitcastDomainAssociationState_Values() []string { + return []string{ + TransitGatewayMulitcastDomainAssociationStateAssociating, + TransitGatewayMulitcastDomainAssociationStateAssociated, + TransitGatewayMulitcastDomainAssociationStateDisassociating, + TransitGatewayMulitcastDomainAssociationStateDisassociated, + } +} + +const ( + // TransitGatewayMulticastDomainStatePending is a TransitGatewayMulticastDomainState enum value + TransitGatewayMulticastDomainStatePending = "pending" + + // TransitGatewayMulticastDomainStateAvailable is a TransitGatewayMulticastDomainState enum value + TransitGatewayMulticastDomainStateAvailable = "available" + + // TransitGatewayMulticastDomainStateDeleting is a TransitGatewayMulticastDomainState enum value + TransitGatewayMulticastDomainStateDeleting = "deleting" + + // TransitGatewayMulticastDomainStateDeleted is a TransitGatewayMulticastDomainState enum value + TransitGatewayMulticastDomainStateDeleted = "deleted" +) + +// TransitGatewayMulticastDomainState_Values returns all elements of the TransitGatewayMulticastDomainState enum +func TransitGatewayMulticastDomainState_Values() []string { + return []string{ + TransitGatewayMulticastDomainStatePending, + TransitGatewayMulticastDomainStateAvailable, + TransitGatewayMulticastDomainStateDeleting, + TransitGatewayMulticastDomainStateDeleted, + } +} + +const ( + // TransitGatewayPrefixListReferenceStatePending is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStatePending = "pending" + + // TransitGatewayPrefixListReferenceStateAvailable is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateAvailable = "available" + + // TransitGatewayPrefixListReferenceStateModifying is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateModifying = "modifying" + + // TransitGatewayPrefixListReferenceStateDeleting is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateDeleting = "deleting" +) + +// TransitGatewayPrefixListReferenceState_Values returns all elements of the TransitGatewayPrefixListReferenceState enum +func TransitGatewayPrefixListReferenceState_Values() []string { + return []string{ + TransitGatewayPrefixListReferenceStatePending, + TransitGatewayPrefixListReferenceStateAvailable, + TransitGatewayPrefixListReferenceStateModifying, + TransitGatewayPrefixListReferenceStateDeleting, + } +} + +const ( + // TransitGatewayPropagationStateEnabling is a TransitGatewayPropagationState enum value + TransitGatewayPropagationStateEnabling = "enabling" + + // TransitGatewayPropagationStateEnabled is a TransitGatewayPropagationState enum value + TransitGatewayPropagationStateEnabled = "enabled" + + // TransitGatewayPropagationStateDisabling is a TransitGatewayPropagationState enum value + TransitGatewayPropagationStateDisabling = "disabling" + + // TransitGatewayPropagationStateDisabled is a TransitGatewayPropagationState enum value + TransitGatewayPropagationStateDisabled = "disabled" +) + +// TransitGatewayPropagationState_Values returns all elements of the TransitGatewayPropagationState enum +func TransitGatewayPropagationState_Values() []string { + return []string{ + TransitGatewayPropagationStateEnabling, + TransitGatewayPropagationStateEnabled, + TransitGatewayPropagationStateDisabling, + TransitGatewayPropagationStateDisabled, + } +} + +const ( + // TransitGatewayRouteStatePending is a TransitGatewayRouteState enum value + TransitGatewayRouteStatePending = "pending" + + // TransitGatewayRouteStateActive is a TransitGatewayRouteState enum value + TransitGatewayRouteStateActive = "active" + + // TransitGatewayRouteStateBlackhole is a TransitGatewayRouteState enum value + TransitGatewayRouteStateBlackhole = "blackhole" + + // TransitGatewayRouteStateDeleting is a TransitGatewayRouteState enum value + TransitGatewayRouteStateDeleting = "deleting" + + // TransitGatewayRouteStateDeleted is a TransitGatewayRouteState enum value + TransitGatewayRouteStateDeleted = "deleted" +) + +// TransitGatewayRouteState_Values returns all elements of the TransitGatewayRouteState enum +func TransitGatewayRouteState_Values() []string { + return []string{ + TransitGatewayRouteStatePending, + TransitGatewayRouteStateActive, + TransitGatewayRouteStateBlackhole, + TransitGatewayRouteStateDeleting, + TransitGatewayRouteStateDeleted, + } +} + +const ( + // TransitGatewayRouteTableStatePending is a TransitGatewayRouteTableState enum value + TransitGatewayRouteTableStatePending = "pending" + + // TransitGatewayRouteTableStateAvailable is a TransitGatewayRouteTableState enum value + TransitGatewayRouteTableStateAvailable = "available" + + // TransitGatewayRouteTableStateDeleting is a TransitGatewayRouteTableState enum value + TransitGatewayRouteTableStateDeleting = "deleting" + + // TransitGatewayRouteTableStateDeleted is a TransitGatewayRouteTableState enum value + TransitGatewayRouteTableStateDeleted = "deleted" +) + +// TransitGatewayRouteTableState_Values returns all elements of the TransitGatewayRouteTableState enum +func TransitGatewayRouteTableState_Values() []string { + return []string{ + TransitGatewayRouteTableStatePending, + TransitGatewayRouteTableStateAvailable, + TransitGatewayRouteTableStateDeleting, + TransitGatewayRouteTableStateDeleted, + } +} + +const ( + // TransitGatewayRouteTypeStatic is a TransitGatewayRouteType enum value + TransitGatewayRouteTypeStatic = "static" + + // TransitGatewayRouteTypePropagated is a TransitGatewayRouteType enum value + TransitGatewayRouteTypePropagated = "propagated" +) + +// TransitGatewayRouteType_Values returns all elements of the TransitGatewayRouteType enum +func TransitGatewayRouteType_Values() []string { + return []string{ + TransitGatewayRouteTypeStatic, + TransitGatewayRouteTypePropagated, + } +} + +const ( + // TransitGatewayStatePending is a TransitGatewayState enum value + TransitGatewayStatePending = "pending" + + // TransitGatewayStateAvailable is a TransitGatewayState enum value + TransitGatewayStateAvailable = "available" + + // TransitGatewayStateModifying is a TransitGatewayState enum value + TransitGatewayStateModifying = "modifying" + + // TransitGatewayStateDeleting is a TransitGatewayState enum value + TransitGatewayStateDeleting = "deleting" + + // TransitGatewayStateDeleted is a TransitGatewayState enum value + TransitGatewayStateDeleted = "deleted" +) + +// TransitGatewayState_Values returns all elements of the TransitGatewayState enum +func TransitGatewayState_Values() []string { + return []string{ + TransitGatewayStatePending, + TransitGatewayStateAvailable, + TransitGatewayStateModifying, + TransitGatewayStateDeleting, + TransitGatewayStateDeleted, + } +} + +const ( + // TransportProtocolTcp is a TransportProtocol enum value + TransportProtocolTcp = "tcp" + + // TransportProtocolUdp is a TransportProtocol enum value + TransportProtocolUdp = "udp" +) + +// TransportProtocol_Values returns all elements of the TransportProtocol enum +func TransportProtocol_Values() []string { + return []string{ + TransportProtocolTcp, + TransportProtocolUdp, + } +} + +const ( + // TunnelInsideIpVersionIpv4 is a TunnelInsideIpVersion enum value + TunnelInsideIpVersionIpv4 = "ipv4" + + // TunnelInsideIpVersionIpv6 is a TunnelInsideIpVersion enum value + TunnelInsideIpVersionIpv6 = "ipv6" +) + +// TunnelInsideIpVersion_Values returns all elements of the TunnelInsideIpVersion enum +func TunnelInsideIpVersion_Values() []string { + return []string{ + TunnelInsideIpVersionIpv4, + TunnelInsideIpVersionIpv6, + } +} + +const ( + // UnlimitedSupportedInstanceFamilyT2 is a UnlimitedSupportedInstanceFamily enum value + UnlimitedSupportedInstanceFamilyT2 = "t2" + + // UnlimitedSupportedInstanceFamilyT3 is a UnlimitedSupportedInstanceFamily enum value + UnlimitedSupportedInstanceFamilyT3 = "t3" + + // UnlimitedSupportedInstanceFamilyT3a is a UnlimitedSupportedInstanceFamily enum value + UnlimitedSupportedInstanceFamilyT3a = "t3a" + + // UnlimitedSupportedInstanceFamilyT4g is a UnlimitedSupportedInstanceFamily enum value + UnlimitedSupportedInstanceFamilyT4g = "t4g" +) + +// UnlimitedSupportedInstanceFamily_Values returns all elements of the UnlimitedSupportedInstanceFamily enum +func UnlimitedSupportedInstanceFamily_Values() []string { + return []string{ + UnlimitedSupportedInstanceFamilyT2, + UnlimitedSupportedInstanceFamilyT3, + UnlimitedSupportedInstanceFamilyT3a, + UnlimitedSupportedInstanceFamilyT4g, + } +} + +const ( + // UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed is a UnsuccessfulInstanceCreditSpecificationErrorCode enum value + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed = "InvalidInstanceID.Malformed" + + // UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdNotFound is a UnsuccessfulInstanceCreditSpecificationErrorCode enum value + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdNotFound = "InvalidInstanceID.NotFound" + + // UnsuccessfulInstanceCreditSpecificationErrorCodeIncorrectInstanceState is a UnsuccessfulInstanceCreditSpecificationErrorCode enum value + UnsuccessfulInstanceCreditSpecificationErrorCodeIncorrectInstanceState = "IncorrectInstanceState" + + // UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceCreditSpecificationNotSupported is a UnsuccessfulInstanceCreditSpecificationErrorCode enum value + UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceCreditSpecificationNotSupported = "InstanceCreditSpecification.NotSupported" +) + +// UnsuccessfulInstanceCreditSpecificationErrorCode_Values returns all elements of the UnsuccessfulInstanceCreditSpecificationErrorCode enum +func UnsuccessfulInstanceCreditSpecificationErrorCode_Values() []string { + return []string{ + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed, + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdNotFound, + UnsuccessfulInstanceCreditSpecificationErrorCodeIncorrectInstanceState, + UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceCreditSpecificationNotSupported, + } +} + +const ( + // UsageClassTypeSpot is a UsageClassType enum value + UsageClassTypeSpot = "spot" + + // UsageClassTypeOnDemand is a UsageClassType enum value + UsageClassTypeOnDemand = "on-demand" +) + +// UsageClassType_Values returns all elements of the UsageClassType enum +func UsageClassType_Values() []string { + return []string{ + UsageClassTypeSpot, + UsageClassTypeOnDemand, + } +} + +const ( + // VirtualizationTypeHvm is a VirtualizationType enum value + VirtualizationTypeHvm = "hvm" + + // VirtualizationTypeParavirtual is a VirtualizationType enum value + VirtualizationTypeParavirtual = "paravirtual" +) + +// VirtualizationType_Values returns all elements of the VirtualizationType enum +func VirtualizationType_Values() []string { + return []string{ + VirtualizationTypeHvm, + VirtualizationTypeParavirtual, + } +} + +const ( + // VolumeAttachmentStateAttaching is a VolumeAttachmentState enum value + VolumeAttachmentStateAttaching = "attaching" + + // VolumeAttachmentStateAttached is a VolumeAttachmentState enum value + VolumeAttachmentStateAttached = "attached" + + // VolumeAttachmentStateDetaching is a VolumeAttachmentState enum value + VolumeAttachmentStateDetaching = "detaching" + + // VolumeAttachmentStateDetached is a VolumeAttachmentState enum value + VolumeAttachmentStateDetached = "detached" + + // VolumeAttachmentStateBusy is a VolumeAttachmentState enum value + VolumeAttachmentStateBusy = "busy" +) + +// VolumeAttachmentState_Values returns all elements of the VolumeAttachmentState enum +func VolumeAttachmentState_Values() []string { + return []string{ + VolumeAttachmentStateAttaching, + VolumeAttachmentStateAttached, + VolumeAttachmentStateDetaching, + VolumeAttachmentStateDetached, + VolumeAttachmentStateBusy, + } +} + +const ( + // VolumeAttributeNameAutoEnableIo is a VolumeAttributeName enum value + VolumeAttributeNameAutoEnableIo = "autoEnableIO" + + // VolumeAttributeNameProductCodes is a VolumeAttributeName enum value + VolumeAttributeNameProductCodes = "productCodes" +) + +// VolumeAttributeName_Values returns all elements of the VolumeAttributeName enum +func VolumeAttributeName_Values() []string { + return []string{ + VolumeAttributeNameAutoEnableIo, + VolumeAttributeNameProductCodes, + } +} + +const ( + // VolumeModificationStateModifying is a VolumeModificationState enum value + VolumeModificationStateModifying = "modifying" + + // VolumeModificationStateOptimizing is a VolumeModificationState enum value + VolumeModificationStateOptimizing = "optimizing" + + // VolumeModificationStateCompleted is a VolumeModificationState enum value + VolumeModificationStateCompleted = "completed" + + // VolumeModificationStateFailed is a VolumeModificationState enum value + VolumeModificationStateFailed = "failed" +) + +// VolumeModificationState_Values returns all elements of the VolumeModificationState enum +func VolumeModificationState_Values() []string { + return []string{ + VolumeModificationStateModifying, + VolumeModificationStateOptimizing, + VolumeModificationStateCompleted, + VolumeModificationStateFailed, + } +} + +const ( + // VolumeStateCreating is a VolumeState enum value + VolumeStateCreating = "creating" + + // VolumeStateAvailable is a VolumeState enum value + VolumeStateAvailable = "available" + + // VolumeStateInUse is a VolumeState enum value + VolumeStateInUse = "in-use" + + // VolumeStateDeleting is a VolumeState enum value + VolumeStateDeleting = "deleting" + + // VolumeStateDeleted is a VolumeState enum value + VolumeStateDeleted = "deleted" + + // VolumeStateError is a VolumeState enum value + VolumeStateError = "error" +) + +// VolumeState_Values returns all elements of the VolumeState enum +func VolumeState_Values() []string { + return []string{ + VolumeStateCreating, + VolumeStateAvailable, + VolumeStateInUse, + VolumeStateDeleting, + VolumeStateDeleted, + VolumeStateError, + } +} + +const ( + // VolumeStatusInfoStatusOk is a VolumeStatusInfoStatus enum value + VolumeStatusInfoStatusOk = "ok" + + // VolumeStatusInfoStatusImpaired is a VolumeStatusInfoStatus enum value + VolumeStatusInfoStatusImpaired = "impaired" + + // VolumeStatusInfoStatusInsufficientData is a VolumeStatusInfoStatus enum value + VolumeStatusInfoStatusInsufficientData = "insufficient-data" +) + +// VolumeStatusInfoStatus_Values returns all elements of the VolumeStatusInfoStatus enum +func VolumeStatusInfoStatus_Values() []string { + return []string{ + VolumeStatusInfoStatusOk, + VolumeStatusInfoStatusImpaired, + VolumeStatusInfoStatusInsufficientData, + } +} + +const ( + // VolumeStatusNameIoEnabled is a VolumeStatusName enum value + VolumeStatusNameIoEnabled = "io-enabled" + + // VolumeStatusNameIoPerformance is a VolumeStatusName enum value + VolumeStatusNameIoPerformance = "io-performance" +) + +// VolumeStatusName_Values returns all elements of the VolumeStatusName enum +func VolumeStatusName_Values() []string { + return []string{ + VolumeStatusNameIoEnabled, + VolumeStatusNameIoPerformance, + } +} + +const ( + // VolumeTypeStandard is a VolumeType enum value + VolumeTypeStandard = "standard" + + // VolumeTypeIo1 is a VolumeType enum value + VolumeTypeIo1 = "io1" + + // VolumeTypeIo2 is a VolumeType enum value + VolumeTypeIo2 = "io2" + + // VolumeTypeGp2 is a VolumeType enum value + VolumeTypeGp2 = "gp2" + + // VolumeTypeSc1 is a VolumeType enum value + VolumeTypeSc1 = "sc1" + + // VolumeTypeSt1 is a VolumeType enum value + VolumeTypeSt1 = "st1" + + // VolumeTypeGp3 is a VolumeType enum value + VolumeTypeGp3 = "gp3" +) + +// VolumeType_Values returns all elements of the VolumeType enum +func VolumeType_Values() []string { + return []string{ + VolumeTypeStandard, + VolumeTypeIo1, + VolumeTypeIo2, + VolumeTypeGp2, + VolumeTypeSc1, + VolumeTypeSt1, + VolumeTypeGp3, + } +} + +const ( + // VpcAttributeNameEnableDnsSupport is a VpcAttributeName enum value + VpcAttributeNameEnableDnsSupport = "enableDnsSupport" + + // VpcAttributeNameEnableDnsHostnames is a VpcAttributeName enum value + VpcAttributeNameEnableDnsHostnames = "enableDnsHostnames" +) + +// VpcAttributeName_Values returns all elements of the VpcAttributeName enum +func VpcAttributeName_Values() []string { + return []string{ + VpcAttributeNameEnableDnsSupport, + VpcAttributeNameEnableDnsHostnames, + } +} + +const ( + // VpcCidrBlockStateCodeAssociating is a VpcCidrBlockStateCode enum value + VpcCidrBlockStateCodeAssociating = "associating" + + // VpcCidrBlockStateCodeAssociated is a VpcCidrBlockStateCode enum value + VpcCidrBlockStateCodeAssociated = "associated" + + // VpcCidrBlockStateCodeDisassociating is a VpcCidrBlockStateCode enum value + VpcCidrBlockStateCodeDisassociating = "disassociating" + + // VpcCidrBlockStateCodeDisassociated is a VpcCidrBlockStateCode enum value + VpcCidrBlockStateCodeDisassociated = "disassociated" + + // VpcCidrBlockStateCodeFailing is a VpcCidrBlockStateCode enum value + VpcCidrBlockStateCodeFailing = "failing" + + // VpcCidrBlockStateCodeFailed is a VpcCidrBlockStateCode enum value + VpcCidrBlockStateCodeFailed = "failed" +) + +// VpcCidrBlockStateCode_Values returns all elements of the VpcCidrBlockStateCode enum +func VpcCidrBlockStateCode_Values() []string { + return []string{ + VpcCidrBlockStateCodeAssociating, + VpcCidrBlockStateCodeAssociated, + VpcCidrBlockStateCodeDisassociating, + VpcCidrBlockStateCodeDisassociated, + VpcCidrBlockStateCodeFailing, + VpcCidrBlockStateCodeFailed, + } +} + +const ( + // VpcEndpointTypeInterface is a VpcEndpointType enum value + VpcEndpointTypeInterface = "Interface" + + // VpcEndpointTypeGateway is a VpcEndpointType enum value + VpcEndpointTypeGateway = "Gateway" + + // VpcEndpointTypeGatewayLoadBalancer is a VpcEndpointType enum value + VpcEndpointTypeGatewayLoadBalancer = "GatewayLoadBalancer" +) + +// VpcEndpointType_Values returns all elements of the VpcEndpointType enum +func VpcEndpointType_Values() []string { + return []string{ + VpcEndpointTypeInterface, + VpcEndpointTypeGateway, + VpcEndpointTypeGatewayLoadBalancer, + } +} + +const ( + // VpcPeeringConnectionStateReasonCodeInitiatingRequest is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodeInitiatingRequest = "initiating-request" + + // VpcPeeringConnectionStateReasonCodePendingAcceptance is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodePendingAcceptance = "pending-acceptance" + + // VpcPeeringConnectionStateReasonCodeActive is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodeActive = "active" + + // VpcPeeringConnectionStateReasonCodeDeleted is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodeDeleted = "deleted" + + // VpcPeeringConnectionStateReasonCodeRejected is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodeRejected = "rejected" + + // VpcPeeringConnectionStateReasonCodeFailed is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodeFailed = "failed" + + // VpcPeeringConnectionStateReasonCodeExpired is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodeExpired = "expired" + + // VpcPeeringConnectionStateReasonCodeProvisioning is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodeProvisioning = "provisioning" + + // VpcPeeringConnectionStateReasonCodeDeleting is a VpcPeeringConnectionStateReasonCode enum value + VpcPeeringConnectionStateReasonCodeDeleting = "deleting" +) + +// VpcPeeringConnectionStateReasonCode_Values returns all elements of the VpcPeeringConnectionStateReasonCode enum +func VpcPeeringConnectionStateReasonCode_Values() []string { + return []string{ + VpcPeeringConnectionStateReasonCodeInitiatingRequest, + VpcPeeringConnectionStateReasonCodePendingAcceptance, + VpcPeeringConnectionStateReasonCodeActive, + VpcPeeringConnectionStateReasonCodeDeleted, + VpcPeeringConnectionStateReasonCodeRejected, + VpcPeeringConnectionStateReasonCodeFailed, + VpcPeeringConnectionStateReasonCodeExpired, + VpcPeeringConnectionStateReasonCodeProvisioning, + VpcPeeringConnectionStateReasonCodeDeleting, + } +} + +const ( + // VpcStatePending is a VpcState enum value + VpcStatePending = "pending" + + // VpcStateAvailable is a VpcState enum value + VpcStateAvailable = "available" +) + +// VpcState_Values returns all elements of the VpcState enum +func VpcState_Values() []string { + return []string{ + VpcStatePending, + VpcStateAvailable, + } +} + +const ( + // VpcTenancyDefault is a VpcTenancy enum value + VpcTenancyDefault = "default" +) + +// VpcTenancy_Values returns all elements of the VpcTenancy enum +func VpcTenancy_Values() []string { + return []string{ + VpcTenancyDefault, + } +} + +const ( + // VpnEcmpSupportValueEnable is a VpnEcmpSupportValue enum value + VpnEcmpSupportValueEnable = "enable" + + // VpnEcmpSupportValueDisable is a VpnEcmpSupportValue enum value + VpnEcmpSupportValueDisable = "disable" +) + +// VpnEcmpSupportValue_Values returns all elements of the VpnEcmpSupportValue enum +func VpnEcmpSupportValue_Values() []string { + return []string{ + VpnEcmpSupportValueEnable, + VpnEcmpSupportValueDisable, + } +} + +const ( + // VpnProtocolOpenvpn is a VpnProtocol enum value + VpnProtocolOpenvpn = "openvpn" +) + +// VpnProtocol_Values returns all elements of the VpnProtocol enum +func VpnProtocol_Values() []string { + return []string{ + VpnProtocolOpenvpn, + } +} + +const ( + // VpnStatePending is a VpnState enum value + VpnStatePending = "pending" + + // VpnStateAvailable is a VpnState enum value + VpnStateAvailable = "available" + + // VpnStateDeleting is a VpnState enum value + VpnStateDeleting = "deleting" + + // VpnStateDeleted is a VpnState enum value + VpnStateDeleted = "deleted" +) + +// VpnState_Values returns all elements of the VpnState enum +func VpnState_Values() []string { + return []string{ + VpnStatePending, + VpnStateAvailable, + VpnStateDeleting, + VpnStateDeleted, + } +} + +const ( + // VpnStaticRouteSourceStatic is a VpnStaticRouteSource enum value + VpnStaticRouteSourceStatic = "Static" +) + +// VpnStaticRouteSource_Values returns all elements of the VpnStaticRouteSource enum +func VpnStaticRouteSource_Values() []string { + return []string{ + VpnStaticRouteSourceStatic, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go new file mode 100644 index 00000000000..3ad30591894 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -0,0 +1,91 @@ +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // customRetryerMinRetryDelay sets min retry delay + customRetryerMinRetryDelay = 1 * time.Second + + // customRetryerMaxRetryDelay sets max retry delay + customRetryerMaxRetryDelay = 8 * time.Second +) + +func init() { + initRequest = func(r *request.Request) { + if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter + r.Handlers.Build.PushFront(fillPresignedURL) + } + + // only set the retryer on request if config doesn't have a retryer + if r.Config.Retryer == nil && (r.Operation.Name == opModifyNetworkInterfaceAttribute || r.Operation.Name == opAssignPrivateIpAddresses) { + maxRetries := client.DefaultRetryerMaxNumRetries + if m := r.Config.MaxRetries; m != nil && *m != aws.UseServiceDefaultRetries { + maxRetries = *m + } + r.Retryer = client.DefaultRetryer{ + NumMaxRetries: maxRetries, + MinRetryDelay: customRetryerMinRetryDelay, + MinThrottleDelay: customRetryerMinRetryDelay, + MaxRetryDelay: customRetryerMaxRetryDelay, + MaxThrottleDelay: customRetryerMaxRetryDelay, + } + } + } +} + +func fillPresignedURL(r *request.Request) { + if !r.ParamsFilled() { + return + } + + origParams := r.Params.(*CopySnapshotInput) + + // Stop if PresignedURL/DestinationRegion is set + if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil { + return + } + + origParams.DestinationRegion = r.Config.Region + newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput) + + // Create a new request based on the existing request. We will use this to + // presign the CopySnapshot request against the source region. + cfg := r.Config.Copy(aws.NewConfig(). + WithEndpoint(""). + WithRegion(aws.StringValue(origParams.SourceRegion))) + + clientInfo := r.ClientInfo + resolved, err := r.Config.EndpointResolver.EndpointFor( + clientInfo.ServiceName, aws.StringValue(cfg.Region), + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + }, + ) + if err != nil { + r.Error = err + return + } + + clientInfo.Endpoint = resolved.URL + clientInfo.SigningRegion = resolved.SigningRegion + + // Presign a CopySnapshot request with modified params + req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data) + url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough. + if err != nil { // bubble error back up to original request + r.Error = err + return + } + + // We have our URL, set it on params + origParams.PresignedUrl = &url +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go new file mode 100644 index 00000000000..31c314e0e5f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ec2 provides the client and types for making API +// requests to Amazon Elastic Compute Cloud. +// +// Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing +// capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest +// in hardware up front, so you can develop and deploy applications faster. +// +// To learn more, see the following resources: +// +// * Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon +// EC2 documentation (http://aws.amazon.com/documentation/ec2) +// +// * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon +// EBS documentation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) +// +// * Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon +// VPC documentation (http://aws.amazon.com/documentation/vpc) +// +// * AWS VPN: AWS VPN product page (http://aws.amazon.com/vpn), AWS VPN documentation +// (http://aws.amazon.com/documentation/vpn) +// +// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service. +// +// See ec2 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/ +// +// Using the Client +// +// To contact Amazon Elastic Compute Cloud with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Elastic Compute Cloud client EC2 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#New +package ec2 diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/errors.go new file mode 100644 index 00000000000..3d61d7e357e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/errors.go @@ -0,0 +1,3 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go new file mode 100644 index 00000000000..1bde2c2f5c5 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/ec2query" +) + +// EC2 provides the API operation methods for making requests to +// Amazon Elastic Compute Cloud. See this package's package overview docs +// for details on the service. +// +// EC2 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type EC2 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "ec2" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "EC2" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the EC2 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a EC2 client from just a session. +// svc := ec2.New(mySession) +// +// // Create a EC2 client with additional configuration +// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EC2 { + svc := &EC2{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2016-11-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a EC2 operation and runs any +// custom request initialization. +func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go new file mode 100644 index 00000000000..b9bdbde157f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -0,0 +1,1677 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBundleTaskComplete uses the Amazon EC2 API operation +// DescribeBundleTasks to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error { + return c.WaitUntilBundleTaskCompleteWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBundleTaskCompleteWithContext is an extended version of WaitUntilBundleTaskComplete. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilBundleTaskCompleteWithContext(ctx aws.Context, input *DescribeBundleTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBundleTaskComplete", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "BundleTasks[].State", + Expected: "complete", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "BundleTasks[].State", + Expected: "failed", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeBundleTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeBundleTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilConversionTaskCancelled uses the Amazon EC2 API operation +// DescribeConversionTasks to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error { + return c.WaitUntilConversionTaskCancelledWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilConversionTaskCancelledWithContext is an extended version of WaitUntilConversionTaskCancelled. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilConversionTaskCancelledWithContext(ctx aws.Context, input *DescribeConversionTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilConversionTaskCancelled", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeConversionTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeConversionTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilConversionTaskCompleted uses the Amazon EC2 API operation +// DescribeConversionTasks to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error { + return c.WaitUntilConversionTaskCompletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilConversionTaskCompletedWithContext is an extended version of WaitUntilConversionTaskCompleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilConversionTaskCompletedWithContext(ctx aws.Context, input *DescribeConversionTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilConversionTaskCompleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ConversionTasks[].State", + Expected: "completed", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "ConversionTasks[].State", + Expected: "cancelled", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "ConversionTasks[].State", + Expected: "cancelling", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeConversionTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeConversionTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilConversionTaskDeleted uses the Amazon EC2 API operation +// DescribeConversionTasks to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error { + return c.WaitUntilConversionTaskDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilConversionTaskDeletedWithContext is an extended version of WaitUntilConversionTaskDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilConversionTaskDeletedWithContext(ctx aws.Context, input *DescribeConversionTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilConversionTaskDeleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ConversionTasks[].State", + Expected: "deleted", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeConversionTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeConversionTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilCustomerGatewayAvailable uses the Amazon EC2 API operation +// DescribeCustomerGateways to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error { + return c.WaitUntilCustomerGatewayAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilCustomerGatewayAvailableWithContext is an extended version of WaitUntilCustomerGatewayAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilCustomerGatewayAvailableWithContext(ctx aws.Context, input *DescribeCustomerGatewaysInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilCustomerGatewayAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "CustomerGateways[].State", + Expected: "available", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "CustomerGateways[].State", + Expected: "deleted", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "CustomerGateways[].State", + Expected: "deleting", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeCustomerGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCustomerGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilExportTaskCancelled uses the Amazon EC2 API operation +// DescribeExportTasks to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error { + return c.WaitUntilExportTaskCancelledWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilExportTaskCancelledWithContext is an extended version of WaitUntilExportTaskCancelled. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilExportTaskCancelledWithContext(ctx aws.Context, input *DescribeExportTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilExportTaskCancelled", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ExportTasks[].State", + Expected: "cancelled", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeExportTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeExportTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilExportTaskCompleted uses the Amazon EC2 API operation +// DescribeExportTasks to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error { + return c.WaitUntilExportTaskCompletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilExportTaskCompletedWithContext is an extended version of WaitUntilExportTaskCompleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilExportTaskCompletedWithContext(ctx aws.Context, input *DescribeExportTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilExportTaskCompleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ExportTasks[].State", + Expected: "completed", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeExportTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeExportTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilImageAvailable uses the Amazon EC2 API operation +// DescribeImages to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error { + return c.WaitUntilImageAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilImageAvailableWithContext is an extended version of WaitUntilImageAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilImageAvailableWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilImageAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Images[].State", + Expected: "available", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Images[].State", + Expected: "failed", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilImageExists uses the Amazon EC2 API operation +// DescribeImages to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilImageExists(input *DescribeImagesInput) error { + return c.WaitUntilImageExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilImageExistsWithContext is an extended version of WaitUntilImageExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilImageExistsWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilImageExists", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(Images[]) > `0`", + Expected: true, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidAMIID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilInstanceExists uses the Amazon EC2 API operation +// DescribeInstances to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error { + return c.WaitUntilInstanceExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceExistsWithContext is an extended version of WaitUntilInstanceExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceExistsWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceExists", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(Reservations[]) > `0`", + Expected: true, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidInstanceID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilInstanceRunning uses the Amazon EC2 API operation +// DescribeInstances to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error { + return c.WaitUntilInstanceRunningWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceRunningWithContext is an extended version of WaitUntilInstanceRunning. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceRunningWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceRunning", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "running", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "shutting-down", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidInstanceID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilInstanceStatusOk uses the Amazon EC2 API operation +// DescribeInstanceStatus to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error { + return c.WaitUntilInstanceStatusOkWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceStatusOkWithContext is an extended version of WaitUntilInstanceStatusOk. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceStatusOkWithContext(ctx aws.Context, input *DescribeInstanceStatusInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceStatusOk", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "InstanceStatuses[].InstanceStatus.Status", + Expected: "ok", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidInstanceID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstanceStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilInstanceStopped uses the Amazon EC2 API operation +// DescribeInstances to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { + return c.WaitUntilInstanceStoppedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceStoppedWithContext is an extended version of WaitUntilInstanceStopped. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceStoppedWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceStopped", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "stopped", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilInstanceTerminated uses the Amazon EC2 API operation +// DescribeInstances to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { + return c.WaitUntilInstanceTerminatedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceTerminatedWithContext is an extended version of WaitUntilInstanceTerminated. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceTerminatedWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceTerminated", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "terminated", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "pending", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", + Expected: "stopping", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilKeyPairExists uses the Amazon EC2 API operation +// DescribeKeyPairs to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error { + return c.WaitUntilKeyPairExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilKeyPairExistsWithContext is an extended version of WaitUntilKeyPairExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilKeyPairExistsWithContext(ctx aws.Context, input *DescribeKeyPairsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilKeyPairExists", + MaxAttempts: 6, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(KeyPairs[].KeyName) > `0`", + Expected: true, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidKeyPair.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeKeyPairsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeKeyPairsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilNatGatewayAvailable uses the Amazon EC2 API operation +// DescribeNatGateways to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilNatGatewayAvailable(input *DescribeNatGatewaysInput) error { + return c.WaitUntilNatGatewayAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilNatGatewayAvailableWithContext is an extended version of WaitUntilNatGatewayAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilNatGatewayAvailableWithContext(ctx aws.Context, input *DescribeNatGatewaysInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilNatGatewayAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "NatGateways[].State", + Expected: "available", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "NatGateways[].State", + Expected: "failed", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "NatGateways[].State", + Expected: "deleting", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "NatGateways[].State", + Expected: "deleted", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "NatGatewayNotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeNatGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNatGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilNetworkInterfaceAvailable uses the Amazon EC2 API operation +// DescribeNetworkInterfaces to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error { + return c.WaitUntilNetworkInterfaceAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilNetworkInterfaceAvailableWithContext is an extended version of WaitUntilNetworkInterfaceAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilNetworkInterfaceAvailableWithContext(ctx aws.Context, input *DescribeNetworkInterfacesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilNetworkInterfaceAvailable", + MaxAttempts: 10, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "NetworkInterfaces[].Status", + Expected: "available", + }, + { + State: request.FailureWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidNetworkInterfaceID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeNetworkInterfacesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNetworkInterfacesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilPasswordDataAvailable uses the Amazon EC2 API operation +// GetPasswordData to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error { + return c.WaitUntilPasswordDataAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilPasswordDataAvailableWithContext is an extended version of WaitUntilPasswordDataAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilPasswordDataAvailableWithContext(ctx aws.Context, input *GetPasswordDataInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilPasswordDataAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(PasswordData) > `0`", + Expected: true, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetPasswordDataInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetPasswordDataRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilSecurityGroupExists uses the Amazon EC2 API operation +// DescribeSecurityGroups to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilSecurityGroupExists(input *DescribeSecurityGroupsInput) error { + return c.WaitUntilSecurityGroupExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSecurityGroupExistsWithContext is an extended version of WaitUntilSecurityGroupExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSecurityGroupExistsWithContext(ctx aws.Context, input *DescribeSecurityGroupsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSecurityGroupExists", + MaxAttempts: 6, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(SecurityGroups[].GroupId) > `0`", + Expected: true, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidGroupNotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeSecurityGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSecurityGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilSnapshotCompleted uses the Amazon EC2 API operation +// DescribeSnapshots to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error { + return c.WaitUntilSnapshotCompletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSnapshotCompletedWithContext is an extended version of WaitUntilSnapshotCompleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSnapshotCompletedWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSnapshotCompleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Snapshots[].State", + Expected: "completed", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilSpotInstanceRequestFulfilled uses the Amazon EC2 API operation +// DescribeSpotInstanceRequests to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error { + return c.WaitUntilSpotInstanceRequestFulfilledWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSpotInstanceRequestFulfilledWithContext is an extended version of WaitUntilSpotInstanceRequestFulfilled. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSpotInstanceRequestFulfilledWithContext(ctx aws.Context, input *DescribeSpotInstanceRequestsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSpotInstanceRequestFulfilled", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "fulfilled", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "request-canceled-and-instance-running", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "schedule-expired", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "canceled-before-fulfillment", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "bad-parameters", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "system-error", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidSpotInstanceRequestID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeSpotInstanceRequestsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSpotInstanceRequestsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilSubnetAvailable uses the Amazon EC2 API operation +// DescribeSubnets to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error { + return c.WaitUntilSubnetAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSubnetAvailableWithContext is an extended version of WaitUntilSubnetAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSubnetAvailableWithContext(ctx aws.Context, input *DescribeSubnetsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSubnetAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Subnets[].State", + Expected: "available", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeSubnetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSubnetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilSystemStatusOk uses the Amazon EC2 API operation +// DescribeInstanceStatus to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error { + return c.WaitUntilSystemStatusOkWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSystemStatusOkWithContext is an extended version of WaitUntilSystemStatusOk. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSystemStatusOkWithContext(ctx aws.Context, input *DescribeInstanceStatusInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSystemStatusOk", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "InstanceStatuses[].SystemStatus.Status", + Expected: "ok", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstanceStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVolumeAvailable uses the Amazon EC2 API operation +// DescribeVolumes to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error { + return c.WaitUntilVolumeAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVolumeAvailableWithContext is an extended version of WaitUntilVolumeAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVolumeAvailableWithContext(ctx aws.Context, input *DescribeVolumesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVolumeAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Volumes[].State", + Expected: "available", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVolumesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVolumeDeleted uses the Amazon EC2 API operation +// DescribeVolumes to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error { + return c.WaitUntilVolumeDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVolumeDeletedWithContext is an extended version of WaitUntilVolumeDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVolumeDeletedWithContext(ctx aws.Context, input *DescribeVolumesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVolumeDeleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Volumes[].State", + Expected: "deleted", + }, + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidVolume.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVolumesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVolumeInUse uses the Amazon EC2 API operation +// DescribeVolumes to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error { + return c.WaitUntilVolumeInUseWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVolumeInUseWithContext is an extended version of WaitUntilVolumeInUse. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVolumeInUseWithContext(ctx aws.Context, input *DescribeVolumesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVolumeInUse", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Volumes[].State", + Expected: "in-use", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Volumes[].State", + Expected: "deleted", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVolumesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVpcAvailable uses the Amazon EC2 API operation +// DescribeVpcs to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error { + return c.WaitUntilVpcAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpcAvailableWithContext is an extended version of WaitUntilVpcAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpcAvailableWithContext(ctx aws.Context, input *DescribeVpcsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpcAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Vpcs[].State", + Expected: "available", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpcsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVpcExists uses the Amazon EC2 API operation +// DescribeVpcs to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error { + return c.WaitUntilVpcExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpcExistsWithContext is an extended version of WaitUntilVpcExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpcExistsWithContext(ctx aws.Context, input *DescribeVpcsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpcExists", + MaxAttempts: 5, + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidVpcID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpcsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVpcPeeringConnectionDeleted uses the Amazon EC2 API operation +// DescribeVpcPeeringConnections to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVpcPeeringConnectionDeleted(input *DescribeVpcPeeringConnectionsInput) error { + return c.WaitUntilVpcPeeringConnectionDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpcPeeringConnectionDeletedWithContext is an extended version of WaitUntilVpcPeeringConnectionDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpcPeeringConnectionDeletedWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpcPeeringConnectionDeleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "VpcPeeringConnections[].Status.Code", + Expected: "deleted", + }, + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidVpcPeeringConnectionID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpcPeeringConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcPeeringConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVpcPeeringConnectionExists uses the Amazon EC2 API operation +// DescribeVpcPeeringConnections to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConnectionsInput) error { + return c.WaitUntilVpcPeeringConnectionExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpcPeeringConnectionExistsWithContext is an extended version of WaitUntilVpcPeeringConnectionExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpcPeeringConnectionExistsWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpcPeeringConnectionExists", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidVpcPeeringConnectionID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpcPeeringConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcPeeringConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVpnConnectionAvailable uses the Amazon EC2 API operation +// DescribeVpnConnections to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error { + return c.WaitUntilVpnConnectionAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpnConnectionAvailableWithContext is an extended version of WaitUntilVpnConnectionAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpnConnectionAvailableWithContext(ctx aws.Context, input *DescribeVpnConnectionsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpnConnectionAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "VpnConnections[].State", + Expected: "available", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "VpnConnections[].State", + Expected: "deleting", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "VpnConnections[].State", + Expected: "deleted", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpnConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpnConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilVpnConnectionDeleted uses the Amazon EC2 API operation +// DescribeVpnConnections to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error { + return c.WaitUntilVpnConnectionDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpnConnectionDeletedWithContext is an extended version of WaitUntilVpnConnectionDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpnConnectionDeletedWithContext(ctx aws.Context, input *DescribeVpnConnectionsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpnConnectionDeleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "VpnConnections[].State", + Expected: "deleted", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "VpnConnections[].State", + Expected: "pending", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpnConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpnConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go b/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go new file mode 100644 index 00000000000..1a4a41c401d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go @@ -0,0 +1,8660 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package fsx + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAssociateFileSystemAliases = "AssociateFileSystemAliases" + +// AssociateFileSystemAliasesRequest generates a "aws/request.Request" representing the +// client's request for the AssociateFileSystemAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateFileSystemAliases for more information on using the AssociateFileSystemAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateFileSystemAliasesRequest method. +// req, resp := client.AssociateFileSystemAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/AssociateFileSystemAliases +func (c *FSx) AssociateFileSystemAliasesRequest(input *AssociateFileSystemAliasesInput) (req *request.Request, output *AssociateFileSystemAliasesOutput) { + op := &request.Operation{ + Name: opAssociateFileSystemAliases, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateFileSystemAliasesInput{} + } + + output = &AssociateFileSystemAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateFileSystemAliases API operation for Amazon FSx. +// +// Use this action to associate one or more Domain Name Server (DNS) aliases +// with an existing Amazon FSx for Windows File Server file system. A file systen +// can have a maximum of 50 DNS aliases associated with it at any one time. +// If you try to associate a DNS alias that is already associated with the file +// system, FSx takes no action on that alias in the request. For more information, +// see Working with DNS Aliases (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) +// and Walkthrough 5: Using DNS aliases to access your file system (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html), +// including additional steps you must take to be able to access your file system +// using a DNS alias. +// +// The system response shows the DNS aliases that Amazon FSx is attempting to +// associate with the file system. Use the API operation to monitor the status +// of the aliases Amazon FSx is associating with the file system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation AssociateFileSystemAliases for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/AssociateFileSystemAliases +func (c *FSx) AssociateFileSystemAliases(input *AssociateFileSystemAliasesInput) (*AssociateFileSystemAliasesOutput, error) { + req, out := c.AssociateFileSystemAliasesRequest(input) + return out, req.Send() +} + +// AssociateFileSystemAliasesWithContext is the same as AssociateFileSystemAliases with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateFileSystemAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) AssociateFileSystemAliasesWithContext(ctx aws.Context, input *AssociateFileSystemAliasesInput, opts ...request.Option) (*AssociateFileSystemAliasesOutput, error) { + req, out := c.AssociateFileSystemAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelDataRepositoryTask = "CancelDataRepositoryTask" + +// CancelDataRepositoryTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelDataRepositoryTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelDataRepositoryTask for more information on using the CancelDataRepositoryTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelDataRepositoryTaskRequest method. +// req, resp := client.CancelDataRepositoryTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CancelDataRepositoryTask +func (c *FSx) CancelDataRepositoryTaskRequest(input *CancelDataRepositoryTaskInput) (req *request.Request, output *CancelDataRepositoryTaskOutput) { + op := &request.Operation{ + Name: opCancelDataRepositoryTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelDataRepositoryTaskInput{} + } + + output = &CancelDataRepositoryTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelDataRepositoryTask API operation for Amazon FSx. +// +// Cancels an existing Amazon FSx for Lustre data repository task if that task +// is in either the PENDING or EXECUTING state. When you cancel a task, Amazon +// FSx does the following. +// +// * Any files that FSx has already exported are not reverted. +// +// * FSx continues to export any files that are "in-flight" when the cancel +// operation is received. +// +// * FSx does not export any files that have not yet been exported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation CancelDataRepositoryTask for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * UnsupportedOperation +// The requested operation is not supported for this resource or API. +// +// * DataRepositoryTaskNotFound +// The data repository task or tasks you specified could not be found. +// +// * DataRepositoryTaskEnded +// The data repository task could not be canceled because the task has already +// ended. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CancelDataRepositoryTask +func (c *FSx) CancelDataRepositoryTask(input *CancelDataRepositoryTaskInput) (*CancelDataRepositoryTaskOutput, error) { + req, out := c.CancelDataRepositoryTaskRequest(input) + return out, req.Send() +} + +// CancelDataRepositoryTaskWithContext is the same as CancelDataRepositoryTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelDataRepositoryTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) CancelDataRepositoryTaskWithContext(ctx aws.Context, input *CancelDataRepositoryTaskInput, opts ...request.Option) (*CancelDataRepositoryTaskOutput, error) { + req, out := c.CancelDataRepositoryTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBackup = "CreateBackup" + +// CreateBackupRequest generates a "aws/request.Request" representing the +// client's request for the CreateBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBackup for more information on using the CreateBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBackupRequest method. +// req, resp := client.CreateBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CreateBackup +func (c *FSx) CreateBackupRequest(input *CreateBackupInput) (req *request.Request, output *CreateBackupOutput) { + op := &request.Operation{ + Name: opCreateBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBackupInput{} + } + + output = &CreateBackupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBackup API operation for Amazon FSx. +// +// Creates a backup of an existing Amazon FSx file system. Creating regular +// backups for your file system is a best practice, enabling you to restore +// a file system from a backup if an issue arises with the original file system. +// +// For Amazon FSx for Lustre file systems, you can create a backup only for +// file systems with the following configuration: +// +// * a Persistent deployment type +// +// * is not linked to a data respository. +// +// For more information about backing up Amazon FSx for Lustre file systems, +// see Working with FSx for Lustre backups (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). +// +// For more information about backing up Amazon FSx for Windows file systems, +// see Working with FSx for Windows backups (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html). +// +// If a backup with the specified client request token exists, and the parameters +// match, this operation returns the description of the existing backup. If +// a backup specified client request token exists, and the parameters don't +// match, this operation returns IncompatibleParameterError. If a backup with +// the specified client request token doesn't exist, CreateBackup does the following: +// +// * Creates a new Amazon FSx backup with an assigned ID, and an initial +// lifecycle state of CREATING. +// +// * Returns the description of the backup. +// +// By using the idempotent operation, you can retry a CreateBackup operation +// without the risk of creating an extra backup. This approach can be useful +// when an initial call fails in a way that makes it unclear whether a backup +// was created. If you use the same client request token and the initial call +// created a backup, the operation returns a successful result because all the +// parameters are the same. +// +// The CreateBackup operation returns while the backup's lifecycle state is +// still CREATING. You can check the backup creation status by calling the DescribeBackups +// operation, which returns the backup state along with other information. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation CreateBackup for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * UnsupportedOperation +// The requested operation is not supported for this resource or API. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * BackupInProgress +// Another backup is already under way. Wait for completion before initiating +// additional backups of this file system. +// +// * IncompatibleParameterError +// The error returned when a second request is received with the same client +// request token but different parameters settings. A client request token should +// always uniquely identify a single request. +// +// * ServiceLimitExceeded +// An error indicating that a particular service limit was exceeded. You can +// increase some service limits by contacting AWS Support. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CreateBackup +func (c *FSx) CreateBackup(input *CreateBackupInput) (*CreateBackupOutput, error) { + req, out := c.CreateBackupRequest(input) + return out, req.Send() +} + +// CreateBackupWithContext is the same as CreateBackup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) CreateBackupWithContext(ctx aws.Context, input *CreateBackupInput, opts ...request.Option) (*CreateBackupOutput, error) { + req, out := c.CreateBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDataRepositoryTask = "CreateDataRepositoryTask" + +// CreateDataRepositoryTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataRepositoryTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDataRepositoryTask for more information on using the CreateDataRepositoryTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDataRepositoryTaskRequest method. +// req, resp := client.CreateDataRepositoryTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CreateDataRepositoryTask +func (c *FSx) CreateDataRepositoryTaskRequest(input *CreateDataRepositoryTaskInput) (req *request.Request, output *CreateDataRepositoryTaskOutput) { + op := &request.Operation{ + Name: opCreateDataRepositoryTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataRepositoryTaskInput{} + } + + output = &CreateDataRepositoryTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDataRepositoryTask API operation for Amazon FSx. +// +// Creates an Amazon FSx for Lustre data repository task. You use data repository +// tasks to perform bulk operations between your Amazon FSx file system and +// its linked data repository. An example of a data repository task is exporting +// any data and metadata changes, including POSIX metadata, to files, directories, +// and symbolic links (symlinks) from your FSx file system to its linked data +// repository. A CreateDataRepositoryTask operation will fail if a data repository +// is not linked to the FSx file system. To learn more about data repository +// tasks, see Data Repository Tasks (https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-repository-tasks.html). +// To learn more about linking a data repository to your file system, see Linking +// your file system to an S3 bucket (https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-fs-linked-data-repo.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation CreateDataRepositoryTask for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * UnsupportedOperation +// The requested operation is not supported for this resource or API. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * IncompatibleParameterError +// The error returned when a second request is received with the same client +// request token but different parameters settings. A client request token should +// always uniquely identify a single request. +// +// * ServiceLimitExceeded +// An error indicating that a particular service limit was exceeded. You can +// increase some service limits by contacting AWS Support. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// * DataRepositoryTaskExecuting +// An existing data repository task is currently executing on the file system. +// Wait until the existing task has completed, then create the new task. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CreateDataRepositoryTask +func (c *FSx) CreateDataRepositoryTask(input *CreateDataRepositoryTaskInput) (*CreateDataRepositoryTaskOutput, error) { + req, out := c.CreateDataRepositoryTaskRequest(input) + return out, req.Send() +} + +// CreateDataRepositoryTaskWithContext is the same as CreateDataRepositoryTask with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDataRepositoryTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) CreateDataRepositoryTaskWithContext(ctx aws.Context, input *CreateDataRepositoryTaskInput, opts ...request.Option) (*CreateDataRepositoryTaskOutput, error) { + req, out := c.CreateDataRepositoryTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateFileSystem = "CreateFileSystem" + +// CreateFileSystemRequest generates a "aws/request.Request" representing the +// client's request for the CreateFileSystem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFileSystem for more information on using the CreateFileSystem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFileSystemRequest method. +// req, resp := client.CreateFileSystemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CreateFileSystem +func (c *FSx) CreateFileSystemRequest(input *CreateFileSystemInput) (req *request.Request, output *CreateFileSystemOutput) { + op := &request.Operation{ + Name: opCreateFileSystem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFileSystemInput{} + } + + output = &CreateFileSystemOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFileSystem API operation for Amazon FSx. +// +// Creates a new, empty Amazon FSx file system. +// +// If a file system with the specified client request token exists and the parameters +// match, CreateFileSystem returns the description of the existing file system. +// If a file system specified client request token exists and the parameters +// don't match, this call returns IncompatibleParameterError. If a file system +// with the specified client request token doesn't exist, CreateFileSystem does +// the following: +// +// * Creates a new, empty Amazon FSx file system with an assigned ID, and +// an initial lifecycle state of CREATING. +// +// * Returns the description of the file system. +// +// This operation requires a client request token in the request that Amazon +// FSx uses to ensure idempotent creation. This means that calling the operation +// multiple times with the same client request token has no effect. By using +// the idempotent operation, you can retry a CreateFileSystem operation without +// the risk of creating an extra file system. This approach can be useful when +// an initial call fails in a way that makes it unclear whether a file system +// was created. Examples are if a transport level timeout occurred, or your +// connection was reset. If you use the same client request token and the initial +// call created a file system, the client receives success as long as the parameters +// are the same. +// +// The CreateFileSystem call returns while the file system's lifecycle state +// is still CREATING. You can check the file-system creation status by calling +// the DescribeFileSystems operation, which returns the file system state along +// with other information. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation CreateFileSystem for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * ActiveDirectoryError +// An Active Directory error. +// +// * IncompatibleParameterError +// The error returned when a second request is received with the same client +// request token but different parameters settings. A client request token should +// always uniquely identify a single request. +// +// * InvalidImportPath +// The path provided for data repository import isn't valid. +// +// * InvalidExportPath +// The path provided for data repository export isn't valid. +// +// * InvalidNetworkSettings +// One or more network settings specified in the request are invalid. InvalidVpcId +// means that the ID passed for the virtual private cloud (VPC) is invalid. +// InvalidSubnetIds returns the list of IDs for subnets that are either invalid +// or not part of the VPC specified. InvalidSecurityGroupIds returns the list +// of IDs for security groups that are either invalid or not part of the VPC +// specified. +// +// * InvalidPerUnitStorageThroughput +// An invalid value for PerUnitStorageThroughput was provided. Please create +// your file system again, using a valid value. +// +// * ServiceLimitExceeded +// An error indicating that a particular service limit was exceeded. You can +// increase some service limits by contacting AWS Support. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// * MissingFileSystemConfiguration +// A file system configuration is required for this operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CreateFileSystem +func (c *FSx) CreateFileSystem(input *CreateFileSystemInput) (*CreateFileSystemOutput, error) { + req, out := c.CreateFileSystemRequest(input) + return out, req.Send() +} + +// CreateFileSystemWithContext is the same as CreateFileSystem with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFileSystem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) CreateFileSystemWithContext(ctx aws.Context, input *CreateFileSystemInput, opts ...request.Option) (*CreateFileSystemOutput, error) { + req, out := c.CreateFileSystemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateFileSystemFromBackup = "CreateFileSystemFromBackup" + +// CreateFileSystemFromBackupRequest generates a "aws/request.Request" representing the +// client's request for the CreateFileSystemFromBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFileSystemFromBackup for more information on using the CreateFileSystemFromBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFileSystemFromBackupRequest method. +// req, resp := client.CreateFileSystemFromBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CreateFileSystemFromBackup +func (c *FSx) CreateFileSystemFromBackupRequest(input *CreateFileSystemFromBackupInput) (req *request.Request, output *CreateFileSystemFromBackupOutput) { + op := &request.Operation{ + Name: opCreateFileSystemFromBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFileSystemFromBackupInput{} + } + + output = &CreateFileSystemFromBackupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFileSystemFromBackup API operation for Amazon FSx. +// +// Creates a new Amazon FSx file system from an existing Amazon FSx backup. +// +// If a file system with the specified client request token exists and the parameters +// match, this operation returns the description of the file system. If a client +// request token specified by the file system exists and the parameters don't +// match, this call returns IncompatibleParameterError. If a file system with +// the specified client request token doesn't exist, this operation does the +// following: +// +// * Creates a new Amazon FSx file system from backup with an assigned ID, +// and an initial lifecycle state of CREATING. +// +// * Returns the description of the file system. +// +// Parameters like Active Directory, default share name, automatic backup, and +// backup settings default to the parameters of the file system that was backed +// up, unless overridden. You can explicitly supply other settings. +// +// By using the idempotent operation, you can retry a CreateFileSystemFromBackup +// call without the risk of creating an extra file system. This approach can +// be useful when an initial call fails in a way that makes it unclear whether +// a file system was created. Examples are if a transport level timeout occurred, +// or your connection was reset. If you use the same client request token and +// the initial call created a file system, the client receives success as long +// as the parameters are the same. +// +// The CreateFileSystemFromBackup call returns while the file system's lifecycle +// state is still CREATING. You can check the file-system creation status by +// calling the DescribeFileSystems operation, which returns the file system +// state along with other information. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation CreateFileSystemFromBackup for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * ActiveDirectoryError +// An Active Directory error. +// +// * IncompatibleParameterError +// The error returned when a second request is received with the same client +// request token but different parameters settings. A client request token should +// always uniquely identify a single request. +// +// * InvalidNetworkSettings +// One or more network settings specified in the request are invalid. InvalidVpcId +// means that the ID passed for the virtual private cloud (VPC) is invalid. +// InvalidSubnetIds returns the list of IDs for subnets that are either invalid +// or not part of the VPC specified. InvalidSecurityGroupIds returns the list +// of IDs for security groups that are either invalid or not part of the VPC +// specified. +// +// * InvalidPerUnitStorageThroughput +// An invalid value for PerUnitStorageThroughput was provided. Please create +// your file system again, using a valid value. +// +// * ServiceLimitExceeded +// An error indicating that a particular service limit was exceeded. You can +// increase some service limits by contacting AWS Support. +// +// * BackupNotFound +// No Amazon FSx backups were found based upon the supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// * MissingFileSystemConfiguration +// A file system configuration is required for this operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/CreateFileSystemFromBackup +func (c *FSx) CreateFileSystemFromBackup(input *CreateFileSystemFromBackupInput) (*CreateFileSystemFromBackupOutput, error) { + req, out := c.CreateFileSystemFromBackupRequest(input) + return out, req.Send() +} + +// CreateFileSystemFromBackupWithContext is the same as CreateFileSystemFromBackup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFileSystemFromBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) CreateFileSystemFromBackupWithContext(ctx aws.Context, input *CreateFileSystemFromBackupInput, opts ...request.Option) (*CreateFileSystemFromBackupOutput, error) { + req, out := c.CreateFileSystemFromBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBackup = "DeleteBackup" + +// DeleteBackupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBackup for more information on using the DeleteBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBackupRequest method. +// req, resp := client.DeleteBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DeleteBackup +func (c *FSx) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Request, output *DeleteBackupOutput) { + op := &request.Operation{ + Name: opDeleteBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBackupInput{} + } + + output = &DeleteBackupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBackup API operation for Amazon FSx. +// +// Deletes an Amazon FSx backup, deleting its contents. After deletion, the +// backup no longer exists, and its data is gone. +// +// The DeleteBackup call returns instantly. The backup will not show up in later +// DescribeBackups calls. +// +// The data in a deleted backup is also deleted and can't be recovered by any +// means. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DeleteBackup for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * BackupInProgress +// Another backup is already under way. Wait for completion before initiating +// additional backups of this file system. +// +// * BackupNotFound +// No Amazon FSx backups were found based upon the supplied parameters. +// +// * BackupRestoring +// You can't delete a backup while it's being used to restore a file system. +// +// * IncompatibleParameterError +// The error returned when a second request is received with the same client +// request token but different parameters settings. A client request token should +// always uniquely identify a single request. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DeleteBackup +func (c *FSx) DeleteBackup(input *DeleteBackupInput) (*DeleteBackupOutput, error) { + req, out := c.DeleteBackupRequest(input) + return out, req.Send() +} + +// DeleteBackupWithContext is the same as DeleteBackup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DeleteBackupWithContext(ctx aws.Context, input *DeleteBackupInput, opts ...request.Option) (*DeleteBackupOutput, error) { + req, out := c.DeleteBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteFileSystem = "DeleteFileSystem" + +// DeleteFileSystemRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFileSystem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFileSystem for more information on using the DeleteFileSystem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFileSystemRequest method. +// req, resp := client.DeleteFileSystemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DeleteFileSystem +func (c *FSx) DeleteFileSystemRequest(input *DeleteFileSystemInput) (req *request.Request, output *DeleteFileSystemOutput) { + op := &request.Operation{ + Name: opDeleteFileSystem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFileSystemInput{} + } + + output = &DeleteFileSystemOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFileSystem API operation for Amazon FSx. +// +// Deletes a file system, deleting its contents. After deletion, the file system +// no longer exists, and its data is gone. Any existing automatic backups will +// also be deleted. +// +// By default, when you delete an Amazon FSx for Windows File Server file system, +// a final backup is created upon deletion. This final backup is not subject +// to the file system's retention policy, and must be manually deleted. +// +// The DeleteFileSystem action returns while the file system has the DELETING +// status. You can check the file system deletion status by calling the DescribeFileSystems +// action, which returns a list of file systems in your account. If you pass +// the file system ID for a deleted file system, the DescribeFileSystems returns +// a FileSystemNotFound error. +// +// Deleting an Amazon FSx for Lustre file system will fail with a 400 BadRequest +// if a data repository task is in a PENDING or EXECUTING state. +// +// The data in a deleted file system is also deleted and can't be recovered +// by any means. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DeleteFileSystem for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * IncompatibleParameterError +// The error returned when a second request is received with the same client +// request token but different parameters settings. A client request token should +// always uniquely identify a single request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * ServiceLimitExceeded +// An error indicating that a particular service limit was exceeded. You can +// increase some service limits by contacting AWS Support. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DeleteFileSystem +func (c *FSx) DeleteFileSystem(input *DeleteFileSystemInput) (*DeleteFileSystemOutput, error) { + req, out := c.DeleteFileSystemRequest(input) + return out, req.Send() +} + +// DeleteFileSystemWithContext is the same as DeleteFileSystem with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFileSystem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DeleteFileSystemWithContext(ctx aws.Context, input *DeleteFileSystemInput, opts ...request.Option) (*DeleteFileSystemOutput, error) { + req, out := c.DeleteFileSystemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeBackups = "DescribeBackups" + +// DescribeBackupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBackups for more information on using the DescribeBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeBackupsRequest method. +// req, resp := client.DescribeBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeBackups +func (c *FSx) DescribeBackupsRequest(input *DescribeBackupsInput) (req *request.Request, output *DescribeBackupsOutput) { + op := &request.Operation{ + Name: opDescribeBackups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeBackupsInput{} + } + + output = &DescribeBackupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeBackups API operation for Amazon FSx. +// +// Returns the description of specific Amazon FSx backups, if a BackupIds value +// is provided for that backup. Otherwise, it returns all backups owned by your +// AWS account in the AWS Region of the endpoint that you're calling. +// +// When retrieving all backups, you can optionally specify the MaxResults parameter +// to limit the number of backups in a response. If more backups remain, Amazon +// FSx returns a NextToken value in the response. In this case, send a later +// request with the NextToken request parameter set to the value of NextToken +// from the last response. +// +// This action is used in an iterative process to retrieve a list of your backups. +// DescribeBackups is called first without a NextTokenvalue. Then the action +// continues to be called with the NextToken parameter set to the value of the +// last NextToken value until a response has no NextToken. +// +// When using this action, keep the following in mind: +// +// * The implementation might return fewer than MaxResults file system descriptions +// while still including a NextToken value. +// +// * The order of backups returned in the response of one DescribeBackups +// call and the order of backups returned across the responses of a multi-call +// iteration is unspecified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DescribeBackups for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * BackupNotFound +// No Amazon FSx backups were found based upon the supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeBackups +func (c *FSx) DescribeBackups(input *DescribeBackupsInput) (*DescribeBackupsOutput, error) { + req, out := c.DescribeBackupsRequest(input) + return out, req.Send() +} + +// DescribeBackupsWithContext is the same as DescribeBackups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeBackupsWithContext(ctx aws.Context, input *DescribeBackupsInput, opts ...request.Option) (*DescribeBackupsOutput, error) { + req, out := c.DescribeBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeBackupsPages iterates over the pages of a DescribeBackups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeBackups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeBackups operation. +// pageNum := 0 +// err := client.DescribeBackupsPages(params, +// func(page *fsx.DescribeBackupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *FSx) DescribeBackupsPages(input *DescribeBackupsInput, fn func(*DescribeBackupsOutput, bool) bool) error { + return c.DescribeBackupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeBackupsPagesWithContext same as DescribeBackupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeBackupsPagesWithContext(ctx aws.Context, input *DescribeBackupsInput, fn func(*DescribeBackupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeBackupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeBackupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeBackupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeDataRepositoryTasks = "DescribeDataRepositoryTasks" + +// DescribeDataRepositoryTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataRepositoryTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDataRepositoryTasks for more information on using the DescribeDataRepositoryTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDataRepositoryTasksRequest method. +// req, resp := client.DescribeDataRepositoryTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeDataRepositoryTasks +func (c *FSx) DescribeDataRepositoryTasksRequest(input *DescribeDataRepositoryTasksInput) (req *request.Request, output *DescribeDataRepositoryTasksOutput) { + op := &request.Operation{ + Name: opDescribeDataRepositoryTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeDataRepositoryTasksInput{} + } + + output = &DescribeDataRepositoryTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDataRepositoryTasks API operation for Amazon FSx. +// +// Returns the description of specific Amazon FSx for Lustre data repository +// tasks, if one or more TaskIds values are provided in the request, or if filters +// are used in the request. You can use filters to narrow the response to include +// just tasks for specific file systems, or tasks in a specific lifecycle state. +// Otherwise, it returns all data repository tasks owned by your AWS account +// in the AWS Region of the endpoint that you're calling. +// +// When retrieving all tasks, you can paginate the response by using the optional +// MaxResults parameter to limit the number of tasks returned in a response. +// If more tasks remain, Amazon FSx returns a NextToken value in the response. +// In this case, send a later request with the NextToken request parameter set +// to the value of NextToken from the last response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DescribeDataRepositoryTasks for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * DataRepositoryTaskNotFound +// The data repository task or tasks you specified could not be found. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeDataRepositoryTasks +func (c *FSx) DescribeDataRepositoryTasks(input *DescribeDataRepositoryTasksInput) (*DescribeDataRepositoryTasksOutput, error) { + req, out := c.DescribeDataRepositoryTasksRequest(input) + return out, req.Send() +} + +// DescribeDataRepositoryTasksWithContext is the same as DescribeDataRepositoryTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDataRepositoryTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeDataRepositoryTasksWithContext(ctx aws.Context, input *DescribeDataRepositoryTasksInput, opts ...request.Option) (*DescribeDataRepositoryTasksOutput, error) { + req, out := c.DescribeDataRepositoryTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeDataRepositoryTasksPages iterates over the pages of a DescribeDataRepositoryTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDataRepositoryTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDataRepositoryTasks operation. +// pageNum := 0 +// err := client.DescribeDataRepositoryTasksPages(params, +// func(page *fsx.DescribeDataRepositoryTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *FSx) DescribeDataRepositoryTasksPages(input *DescribeDataRepositoryTasksInput, fn func(*DescribeDataRepositoryTasksOutput, bool) bool) error { + return c.DescribeDataRepositoryTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDataRepositoryTasksPagesWithContext same as DescribeDataRepositoryTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeDataRepositoryTasksPagesWithContext(ctx aws.Context, input *DescribeDataRepositoryTasksInput, fn func(*DescribeDataRepositoryTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDataRepositoryTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDataRepositoryTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDataRepositoryTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeFileSystemAliases = "DescribeFileSystemAliases" + +// DescribeFileSystemAliasesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFileSystemAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFileSystemAliases for more information on using the DescribeFileSystemAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFileSystemAliasesRequest method. +// req, resp := client.DescribeFileSystemAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeFileSystemAliases +func (c *FSx) DescribeFileSystemAliasesRequest(input *DescribeFileSystemAliasesInput) (req *request.Request, output *DescribeFileSystemAliasesOutput) { + op := &request.Operation{ + Name: opDescribeFileSystemAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeFileSystemAliasesInput{} + } + + output = &DescribeFileSystemAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFileSystemAliases API operation for Amazon FSx. +// +// Returns the DNS aliases that are associated with the specified Amazon FSx +// for Windows File Server file system. A history of all DNS aliases that have +// been associated with and disassociated from the file system is available +// in the list of AdministrativeAction provided in the DescribeFileSystems operation +// response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DescribeFileSystemAliases for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeFileSystemAliases +func (c *FSx) DescribeFileSystemAliases(input *DescribeFileSystemAliasesInput) (*DescribeFileSystemAliasesOutput, error) { + req, out := c.DescribeFileSystemAliasesRequest(input) + return out, req.Send() +} + +// DescribeFileSystemAliasesWithContext is the same as DescribeFileSystemAliases with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFileSystemAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeFileSystemAliasesWithContext(ctx aws.Context, input *DescribeFileSystemAliasesInput, opts ...request.Option) (*DescribeFileSystemAliasesOutput, error) { + req, out := c.DescribeFileSystemAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeFileSystemAliasesPages iterates over the pages of a DescribeFileSystemAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFileSystemAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFileSystemAliases operation. +// pageNum := 0 +// err := client.DescribeFileSystemAliasesPages(params, +// func(page *fsx.DescribeFileSystemAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *FSx) DescribeFileSystemAliasesPages(input *DescribeFileSystemAliasesInput, fn func(*DescribeFileSystemAliasesOutput, bool) bool) error { + return c.DescribeFileSystemAliasesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFileSystemAliasesPagesWithContext same as DescribeFileSystemAliasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeFileSystemAliasesPagesWithContext(ctx aws.Context, input *DescribeFileSystemAliasesInput, fn func(*DescribeFileSystemAliasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFileSystemAliasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFileSystemAliasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFileSystemAliasesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeFileSystems = "DescribeFileSystems" + +// DescribeFileSystemsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFileSystems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFileSystems for more information on using the DescribeFileSystems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFileSystemsRequest method. +// req, resp := client.DescribeFileSystemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeFileSystems +func (c *FSx) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req *request.Request, output *DescribeFileSystemsOutput) { + op := &request.Operation{ + Name: opDescribeFileSystems, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeFileSystemsInput{} + } + + output = &DescribeFileSystemsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFileSystems API operation for Amazon FSx. +// +// Returns the description of specific Amazon FSx file systems, if a FileSystemIds +// value is provided for that file system. Otherwise, it returns descriptions +// of all file systems owned by your AWS account in the AWS Region of the endpoint +// that you're calling. +// +// When retrieving all file system descriptions, you can optionally specify +// the MaxResults parameter to limit the number of descriptions in a response. +// If more file system descriptions remain, Amazon FSx returns a NextToken value +// in the response. In this case, send a later request with the NextToken request +// parameter set to the value of NextToken from the last response. +// +// This action is used in an iterative process to retrieve a list of your file +// system descriptions. DescribeFileSystems is called first without a NextTokenvalue. +// Then the action continues to be called with the NextToken parameter set to +// the value of the last NextToken value until a response has no NextToken. +// +// When using this action, keep the following in mind: +// +// * The implementation might return fewer than MaxResults file system descriptions +// while still including a NextToken value. +// +// * The order of file systems returned in the response of one DescribeFileSystems +// call and the order of file systems returned across the responses of a +// multicall iteration is unspecified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DescribeFileSystems for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeFileSystems +func (c *FSx) DescribeFileSystems(input *DescribeFileSystemsInput) (*DescribeFileSystemsOutput, error) { + req, out := c.DescribeFileSystemsRequest(input) + return out, req.Send() +} + +// DescribeFileSystemsWithContext is the same as DescribeFileSystems with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFileSystems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeFileSystemsWithContext(ctx aws.Context, input *DescribeFileSystemsInput, opts ...request.Option) (*DescribeFileSystemsOutput, error) { + req, out := c.DescribeFileSystemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeFileSystemsPages iterates over the pages of a DescribeFileSystems operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFileSystems method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFileSystems operation. +// pageNum := 0 +// err := client.DescribeFileSystemsPages(params, +// func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *FSx) DescribeFileSystemsPages(input *DescribeFileSystemsInput, fn func(*DescribeFileSystemsOutput, bool) bool) error { + return c.DescribeFileSystemsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFileSystemsPagesWithContext same as DescribeFileSystemsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeFileSystemsPagesWithContext(ctx aws.Context, input *DescribeFileSystemsInput, fn func(*DescribeFileSystemsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFileSystemsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFileSystemsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFileSystemsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDisassociateFileSystemAliases = "DisassociateFileSystemAliases" + +// DisassociateFileSystemAliasesRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateFileSystemAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateFileSystemAliases for more information on using the DisassociateFileSystemAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateFileSystemAliasesRequest method. +// req, resp := client.DisassociateFileSystemAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DisassociateFileSystemAliases +func (c *FSx) DisassociateFileSystemAliasesRequest(input *DisassociateFileSystemAliasesInput) (req *request.Request, output *DisassociateFileSystemAliasesOutput) { + op := &request.Operation{ + Name: opDisassociateFileSystemAliases, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateFileSystemAliasesInput{} + } + + output = &DisassociateFileSystemAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateFileSystemAliases API operation for Amazon FSx. +// +// Use this action to disassociate, or remove, one or more Domain Name Service +// (DNS) aliases from an Amazon FSx for Windows File Server file system. If +// you attempt to disassociate a DNS alias that is not associated with the file +// system, Amazon FSx responds with a 400 Bad Request. For more information, +// see Working with DNS Aliases (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html). +// +// The system generated response showing the DNS aliases that Amazon FSx is +// attempting to disassociate from the file system. Use the API operation to +// monitor the status of the aliases Amazon FSx is disassociating with the file +// system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DisassociateFileSystemAliases for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DisassociateFileSystemAliases +func (c *FSx) DisassociateFileSystemAliases(input *DisassociateFileSystemAliasesInput) (*DisassociateFileSystemAliasesOutput, error) { + req, out := c.DisassociateFileSystemAliasesRequest(input) + return out, req.Send() +} + +// DisassociateFileSystemAliasesWithContext is the same as DisassociateFileSystemAliases with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateFileSystemAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DisassociateFileSystemAliasesWithContext(ctx aws.Context, input *DisassociateFileSystemAliasesInput, opts ...request.Option) (*DisassociateFileSystemAliasesOutput, error) { + req, out := c.DisassociateFileSystemAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/ListTagsForResource +func (c *FSx) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon FSx. +// +// Lists tags for an Amazon FSx file systems and backups in the case of Amazon +// FSx for Windows File Server. +// +// When retrieving all tags, you can optionally specify the MaxResults parameter +// to limit the number of tags in a response. If more tags remain, Amazon FSx +// returns a NextToken value in the response. In this case, send a later request +// with the NextToken request parameter set to the value of NextToken from the +// last response. +// +// This action is used in an iterative process to retrieve a list of your tags. +// ListTagsForResource is called first without a NextTokenvalue. Then the action +// continues to be called with the NextToken parameter set to the value of the +// last NextToken value until a response has no NextToken. +// +// When using this action, keep the following in mind: +// +// * The implementation might return fewer than MaxResults file system descriptions +// while still including a NextToken value. +// +// * The order of tags returned in the response of one ListTagsForResource +// call and the order of tags returned across the responses of a multi-call +// iteration is unspecified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// * ResourceNotFound +// The resource specified by the Amazon Resource Name (ARN) can't be found. +// +// * NotServiceResourceError +// The resource specified for the tagging operation is not a resource type owned +// by Amazon FSx. Use the API of the relevant service to perform the operation. +// +// * ResourceDoesNotSupportTagging +// The resource specified does not support tagging. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/ListTagsForResource +func (c *FSx) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/TagResource +func (c *FSx) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon FSx. +// +// Tags an Amazon FSx resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// * ResourceNotFound +// The resource specified by the Amazon Resource Name (ARN) can't be found. +// +// * NotServiceResourceError +// The resource specified for the tagging operation is not a resource type owned +// by Amazon FSx. Use the API of the relevant service to perform the operation. +// +// * ResourceDoesNotSupportTagging +// The resource specified does not support tagging. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/TagResource +func (c *FSx) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/UntagResource +func (c *FSx) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon FSx. +// +// This action removes a tag from an Amazon FSx resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// * ResourceNotFound +// The resource specified by the Amazon Resource Name (ARN) can't be found. +// +// * NotServiceResourceError +// The resource specified for the tagging operation is not a resource type owned +// by Amazon FSx. Use the API of the relevant service to perform the operation. +// +// * ResourceDoesNotSupportTagging +// The resource specified does not support tagging. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/UntagResource +func (c *FSx) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFileSystem = "UpdateFileSystem" + +// UpdateFileSystemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFileSystem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFileSystem for more information on using the UpdateFileSystem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFileSystemRequest method. +// req, resp := client.UpdateFileSystemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/UpdateFileSystem +func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *request.Request, output *UpdateFileSystemOutput) { + op := &request.Operation{ + Name: opUpdateFileSystem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateFileSystemInput{} + } + + output = &UpdateFileSystemOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFileSystem API operation for Amazon FSx. +// +// Use this operation to update the configuration of an existing Amazon FSx +// file system. You can update multiple properties in a single request. +// +// For Amazon FSx for Windows File Server file systems, you can update the following +// properties: +// +// * AutomaticBackupRetentionDays +// +// * DailyAutomaticBackupStartTime +// +// * SelfManagedActiveDirectoryConfiguration +// +// * StorageCapacity +// +// * ThroughputCapacity +// +// * WeeklyMaintenanceStartTime +// +// For Amazon FSx for Lustre file systems, you can update the following properties: +// +// * AutoImportPolicy +// +// * AutomaticBackupRetentionDays +// +// * DailyAutomaticBackupStartTime +// +// * StorageCapacity +// +// * WeeklyMaintenanceStartTime +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation UpdateFileSystem for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * UnsupportedOperation +// The requested operation is not supported for this resource or API. +// +// * IncompatibleParameterError +// The error returned when a second request is received with the same client +// request token but different parameters settings. A client request token should +// always uniquely identify a single request. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * MissingFileSystemConfiguration +// A file system configuration is required for this operation. +// +// * ServiceLimitExceeded +// An error indicating that a particular service limit was exceeded. You can +// increase some service limits by contacting AWS Support. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/UpdateFileSystem +func (c *FSx) UpdateFileSystem(input *UpdateFileSystemInput) (*UpdateFileSystemOutput, error) { + req, out := c.UpdateFileSystemRequest(input) + return out, req.Send() +} + +// UpdateFileSystemWithContext is the same as UpdateFileSystem with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFileSystem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) UpdateFileSystemWithContext(ctx aws.Context, input *UpdateFileSystemInput, opts ...request.Option) (*UpdateFileSystemOutput, error) { + req, out := c.UpdateFileSystemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// The Microsoft AD attributes of the Amazon FSx for Windows File Server file +// system. +type ActiveDirectoryBackupAttributes struct { + _ struct{} `type:"structure"` + + // The ID of the AWS Managed Microsoft Active Directory instance to which the + // file system is joined. + ActiveDirectoryId *string `min:"12" type:"string"` + + // The fully qualified domain name of the self-managed AD directory. + DomainName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ActiveDirectoryBackupAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveDirectoryBackupAttributes) GoString() string { + return s.String() +} + +// SetActiveDirectoryId sets the ActiveDirectoryId field's value. +func (s *ActiveDirectoryBackupAttributes) SetActiveDirectoryId(v string) *ActiveDirectoryBackupAttributes { + s.ActiveDirectoryId = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *ActiveDirectoryBackupAttributes) SetDomainName(v string) *ActiveDirectoryBackupAttributes { + s.DomainName = &v + return s +} + +// An Active Directory error. +type ActiveDirectoryError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The directory ID of the directory that an error pertains to. + // + // ActiveDirectoryId is a required field + ActiveDirectoryId *string `min:"12" type:"string" required:"true"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` + + // The type of Active Directory error. + Type *string `type:"string" enum:"ActiveDirectoryErrorType"` +} + +// String returns the string representation +func (s ActiveDirectoryError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveDirectoryError) GoString() string { + return s.String() +} + +func newErrorActiveDirectoryError(v protocol.ResponseMetadata) error { + return &ActiveDirectoryError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ActiveDirectoryError) Code() string { + return "ActiveDirectoryError" +} + +// Message returns the exception's message. +func (s *ActiveDirectoryError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ActiveDirectoryError) OrigErr() error { + return nil +} + +func (s *ActiveDirectoryError) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ActiveDirectoryError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ActiveDirectoryError) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes a specific Amazon FSx administrative action for the current Windows +// or Lustre file system. +type AdministrativeAction struct { + _ struct{} `type:"structure"` + + // Describes the type of administrative action, as follows: + // + // * FILE_SYSTEM_UPDATE - A file system update administrative action initiated + // by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI + // (update-file-system). + // + // * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase + // a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION + // task starts. For Windows, storage optimization is the process of migrating + // the file system data to the new, larger disks. For Lustre, storage optimization + // consists of rebalancing the data across the existing and newly added file + // servers. You can track the storage optimization progress using the ProgressPercent + // property. When STORAGE_OPTIMIZATION completes successfully, the parent + // FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, + // see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. + // + // * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a + // new DNS alias with the file system. For more information, see . + // + // * FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate + // a DNS alias from the file system. For more information, see . + AdministrativeActionType *string `type:"string" enum:"AdministrativeActionType"` + + // Provides information about a failed administrative action. + FailureDetails *AdministrativeActionFailureDetails `type:"structure"` + + // Provides the percent complete of a STORAGE_OPTIMIZATION administrative action. + // Does not apply to any other administrative action type. + ProgressPercent *int64 `type:"integer"` + + // Time that the administrative action request was received. + RequestTime *time.Time `type:"timestamp"` + + // Describes the status of the administrative action, as follows: + // + // * FAILED - Amazon FSx failed to process the administrative action successfully. + // + // * IN_PROGRESS - Amazon FSx is processing the administrative action. + // + // * PENDING - Amazon FSx is waiting to process the administrative action. + // + // * COMPLETED - Amazon FSx has finished processing the administrative task. + // + // * UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon + // FSx has updated the file system with the new storage capacity, and is + // now performing the storage optimization process. For more information, + // see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. + Status *string `type:"string" enum:"Status"` + + // Describes the target value for the administration action, provided in the + // UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative + // actions. + TargetFileSystemValues *FileSystem `type:"structure"` +} + +// String returns the string representation +func (s AdministrativeAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdministrativeAction) GoString() string { + return s.String() +} + +// SetAdministrativeActionType sets the AdministrativeActionType field's value. +func (s *AdministrativeAction) SetAdministrativeActionType(v string) *AdministrativeAction { + s.AdministrativeActionType = &v + return s +} + +// SetFailureDetails sets the FailureDetails field's value. +func (s *AdministrativeAction) SetFailureDetails(v *AdministrativeActionFailureDetails) *AdministrativeAction { + s.FailureDetails = v + return s +} + +// SetProgressPercent sets the ProgressPercent field's value. +func (s *AdministrativeAction) SetProgressPercent(v int64) *AdministrativeAction { + s.ProgressPercent = &v + return s +} + +// SetRequestTime sets the RequestTime field's value. +func (s *AdministrativeAction) SetRequestTime(v time.Time) *AdministrativeAction { + s.RequestTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AdministrativeAction) SetStatus(v string) *AdministrativeAction { + s.Status = &v + return s +} + +// SetTargetFileSystemValues sets the TargetFileSystemValues field's value. +func (s *AdministrativeAction) SetTargetFileSystemValues(v *FileSystem) *AdministrativeAction { + s.TargetFileSystemValues = v + return s +} + +// Provides information about a failed administrative action. +type AdministrativeActionFailureDetails struct { + _ struct{} `type:"structure"` + + // Error message providing details about the failed administrative action. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AdministrativeActionFailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdministrativeActionFailureDetails) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *AdministrativeActionFailureDetails) SetMessage(v string) *AdministrativeActionFailureDetails { + s.Message = &v + return s +} + +// A DNS alias that is associated with the file system. You can use a DNS alias +// to access a file system using user-defined DNS names, in addition to the +// default DNS name that Amazon FSx assigns to the file system. For more information, +// see DNS aliases (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) +// in the FSx for Windows File Server User Guide. +type Alias struct { + _ struct{} `type:"structure"` + + // Describes the state of the DNS alias. + // + // * AVAILABLE - The DNS alias is associated with an Amazon FSx file system. + // + // * CREATING - Amazon FSx is creating the DNS alias and associating it with + // the file system. + // + // * CREATE_FAILED - Amazon FSx was unable to associate the DNS alias with + // the file system. + // + // * DELETING - Amazon FSx is disassociating the DNS alias from the file + // system and deleting it. + // + // * DELETE_FAILED - Amazon FSx was unable to disassocate the DNS alias from + // the file system. + Lifecycle *string `type:"string" enum:"AliasLifecycle"` + + // The name of the DNS alias. The alias name has to meet the following requirements: + // + // * Formatted as a fully-qualified domain name (FQDN), hostname.domain, + // for example, accounting.example.com. + // + // * Can contain alphanumeric characters and the hyphen (-). + // + // * Cannot start or end with a hyphen. + // + // * Can start with a numeric. + // + // For DNS names, Amazon FSx stores alphabetic characters as lowercase letters + // (a-z), regardless of how you specify them: as uppercase letters, lowercase + // letters, or the corresponding letters in escape codes. + Name *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s Alias) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alias) GoString() string { + return s.String() +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *Alias) SetLifecycle(v string) *Alias { + s.Lifecycle = &v + return s +} + +// SetName sets the Name field's value. +func (s *Alias) SetName(v string) *Alias { + s.Name = &v + return s +} + +// The request object specifying one or more DNS alias names to associate with +// an Amazon FSx for Windows File Server file system. +type AssociateFileSystemAliasesInput struct { + _ struct{} `type:"structure"` + + // An array of one or more DNS alias names to associate with the file system. + // The alias name has to comply with the following formatting requirements: + // + // * Formatted as a fully-qualified domain name (FQDN), hostname.domain , + // for example, accounting.corp.example.com. + // + // * Can contain alphanumeric characters and the hyphen (-). + // + // * Cannot start or end with a hyphen. + // + // * Can start with a numeric. + // + // For DNS alias names, Amazon FSx stores alphabetic characters as lowercase + // letters (a-z), regardless of how you specify them: as uppercase letters, + // lowercase letters, or the corresponding letters in escape codes. + // + // Aliases is a required field + Aliases []*string `type:"list" required:"true"` + + // (Optional) An idempotency token for resource creation, in a string of up + // to 64 ASCII characters. This token is automatically filled on your behalf + // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Specifies the file system with which you want to associate one or more DNS + // aliases. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateFileSystemAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateFileSystemAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateFileSystemAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateFileSystemAliasesInput"} + if s.Aliases == nil { + invalidParams.Add(request.NewErrParamRequired("Aliases")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliases sets the Aliases field's value. +func (s *AssociateFileSystemAliasesInput) SetAliases(v []*string) *AssociateFileSystemAliasesInput { + s.Aliases = v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *AssociateFileSystemAliasesInput) SetClientRequestToken(v string) *AssociateFileSystemAliasesInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *AssociateFileSystemAliasesInput) SetFileSystemId(v string) *AssociateFileSystemAliasesInput { + s.FileSystemId = &v + return s +} + +// The system generated response showing the DNS aliases that Amazon FSx is +// attempting to associate with the file system. Use the API operation to monitor +// the status of the aliases Amazon FSx is associating with the file system. +// It can take up to 2.5 minutes for the alias status to change from CREATING +// to AVAILABLE. +type AssociateFileSystemAliasesOutput struct { + _ struct{} `type:"structure"` + + // An array of the DNS aliases that Amazon FSx is associating with the file + // system. + Aliases []*Alias `type:"list"` +} + +// String returns the string representation +func (s AssociateFileSystemAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateFileSystemAliasesOutput) GoString() string { + return s.String() +} + +// SetAliases sets the Aliases field's value. +func (s *AssociateFileSystemAliasesOutput) SetAliases(v []*Alias) *AssociateFileSystemAliasesOutput { + s.Aliases = v + return s +} + +// A backup of an Amazon FSx file system. For more information see: +// +// * Working with backups for Windows file systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html) +// +// * Working with backups for Lustre file systems (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html) +type Backup struct { + _ struct{} `type:"structure"` + + // The ID of the backup. + // + // BackupId is a required field + BackupId *string `min:"12" type:"string" required:"true"` + + // The time when a particular backup was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" required:"true"` + + // The configuration of the self-managed Microsoft Active Directory (AD) to + // which the Windows File Server instance is joined. + DirectoryInformation *ActiveDirectoryBackupAttributes `type:"structure"` + + // Details explaining any failures that occur when creating a backup. + FailureDetails *BackupFailureDetails `type:"structure"` + + // Metadata of the file system associated with the backup. This metadata is + // persisted even if the file system is deleted. + // + // FileSystem is a required field + FileSystem *FileSystem `type:"structure" required:"true"` + + // The ID of the AWS Key Management Service (AWS KMS) key used to encrypt the + // backup of the Amazon FSx file system's data at rest. + KmsKeyId *string `min:"1" type:"string"` + + // The lifecycle status of the backup. + // + // * AVAILABLE - The backup is fully available. + // + // * PENDING - For user-initiated backups on Lustre file systems only; Amazon + // FSx has not started creating the backup. + // + // * CREATING - Amazon FSx is creating the backup. + // + // * TRANSFERRING - For user-initiated backups on Lustre file systems only; + // Amazon FSx is transferring the backup to S3. + // + // * DELETED - Amazon FSx deleted the backup and it is no longer available. + // + // * FAILED - Amazon FSx could not complete the backup. + // + // Lifecycle is a required field + Lifecycle *string `type:"string" required:"true" enum:"BackupLifecycle"` + + // The current percent of progress of an asynchronous task. + ProgressPercent *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) for the backup resource. + ResourceARN *string `min:"8" type:"string"` + + // Tags associated with a particular file system. + Tags []*Tag `min:"1" type:"list"` + + // The type of the file system backup. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"BackupType"` +} + +// String returns the string representation +func (s Backup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Backup) GoString() string { + return s.String() +} + +// SetBackupId sets the BackupId field's value. +func (s *Backup) SetBackupId(v string) *Backup { + s.BackupId = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *Backup) SetCreationTime(v time.Time) *Backup { + s.CreationTime = &v + return s +} + +// SetDirectoryInformation sets the DirectoryInformation field's value. +func (s *Backup) SetDirectoryInformation(v *ActiveDirectoryBackupAttributes) *Backup { + s.DirectoryInformation = v + return s +} + +// SetFailureDetails sets the FailureDetails field's value. +func (s *Backup) SetFailureDetails(v *BackupFailureDetails) *Backup { + s.FailureDetails = v + return s +} + +// SetFileSystem sets the FileSystem field's value. +func (s *Backup) SetFileSystem(v *FileSystem) *Backup { + s.FileSystem = v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *Backup) SetKmsKeyId(v string) *Backup { + s.KmsKeyId = &v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *Backup) SetLifecycle(v string) *Backup { + s.Lifecycle = &v + return s +} + +// SetProgressPercent sets the ProgressPercent field's value. +func (s *Backup) SetProgressPercent(v int64) *Backup { + s.ProgressPercent = &v + return s +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *Backup) SetResourceARN(v string) *Backup { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Backup) SetTags(v []*Tag) *Backup { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *Backup) SetType(v string) *Backup { + s.Type = &v + return s +} + +// If backup creation fails, this structure contains the details of that failure. +type BackupFailureDetails struct { + _ struct{} `type:"structure"` + + // A message describing the backup creation failure. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s BackupFailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackupFailureDetails) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *BackupFailureDetails) SetMessage(v string) *BackupFailureDetails { + s.Message = &v + return s +} + +// Another backup is already under way. Wait for completion before initiating +// additional backups of this file system. +type BackupInProgress struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s BackupInProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackupInProgress) GoString() string { + return s.String() +} + +func newErrorBackupInProgress(v protocol.ResponseMetadata) error { + return &BackupInProgress{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BackupInProgress) Code() string { + return "BackupInProgress" +} + +// Message returns the exception's message. +func (s *BackupInProgress) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BackupInProgress) OrigErr() error { + return nil +} + +func (s *BackupInProgress) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BackupInProgress) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BackupInProgress) RequestID() string { + return s.RespMetadata.RequestID +} + +// No Amazon FSx backups were found based upon the supplied parameters. +type BackupNotFound struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s BackupNotFound) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackupNotFound) GoString() string { + return s.String() +} + +func newErrorBackupNotFound(v protocol.ResponseMetadata) error { + return &BackupNotFound{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BackupNotFound) Code() string { + return "BackupNotFound" +} + +// Message returns the exception's message. +func (s *BackupNotFound) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BackupNotFound) OrigErr() error { + return nil +} + +func (s *BackupNotFound) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BackupNotFound) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BackupNotFound) RequestID() string { + return s.RespMetadata.RequestID +} + +// You can't delete a backup while it's being used to restore a file system. +type BackupRestoring struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The ID of a file system being restored from the backup. + FileSystemId *string `min:"11" type:"string"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s BackupRestoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackupRestoring) GoString() string { + return s.String() +} + +func newErrorBackupRestoring(v protocol.ResponseMetadata) error { + return &BackupRestoring{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BackupRestoring) Code() string { + return "BackupRestoring" +} + +// Message returns the exception's message. +func (s *BackupRestoring) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BackupRestoring) OrigErr() error { + return nil +} + +func (s *BackupRestoring) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BackupRestoring) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BackupRestoring) RequestID() string { + return s.RespMetadata.RequestID +} + +// A generic error indicating a failure with a client request. +type BadRequest struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s BadRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BadRequest) GoString() string { + return s.String() +} + +func newErrorBadRequest(v protocol.ResponseMetadata) error { + return &BadRequest{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BadRequest) Code() string { + return "BadRequest" +} + +// Message returns the exception's message. +func (s *BadRequest) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BadRequest) OrigErr() error { + return nil +} + +func (s *BadRequest) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BadRequest) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BadRequest) RequestID() string { + return s.RespMetadata.RequestID +} + +// Cancels a data repository task. +type CancelDataRepositoryTaskInput struct { + _ struct{} `type:"structure"` + + // Specifies the data repository task to cancel. + // + // TaskId is a required field + TaskId *string `min:"12" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelDataRepositoryTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelDataRepositoryTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelDataRepositoryTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelDataRepositoryTaskInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 12)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskId sets the TaskId field's value. +func (s *CancelDataRepositoryTaskInput) SetTaskId(v string) *CancelDataRepositoryTaskInput { + s.TaskId = &v + return s +} + +type CancelDataRepositoryTaskOutput struct { + _ struct{} `type:"structure"` + + // The lifecycle status of the data repository task, as follows: + // + // * PENDING - Amazon FSx has not started the task. + // + // * EXECUTING - Amazon FSx is processing the task. + // + // * FAILED - Amazon FSx was not able to complete the task. For example, + // there may be files the task failed to process. The DataRepositoryTaskFailureDetails + // property provides more information about task failures. + // + // * SUCCEEDED - FSx completed the task successfully. + // + // * CANCELED - Amazon FSx canceled the task and it did not complete. + // + // * CANCELING - FSx is in process of canceling the task. + Lifecycle *string `type:"string" enum:"DataRepositoryTaskLifecycle"` + + // The ID of the task being canceled. + TaskId *string `min:"12" type:"string"` +} + +// String returns the string representation +func (s CancelDataRepositoryTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelDataRepositoryTaskOutput) GoString() string { + return s.String() +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *CancelDataRepositoryTaskOutput) SetLifecycle(v string) *CancelDataRepositoryTaskOutput { + s.Lifecycle = &v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *CancelDataRepositoryTaskOutput) SetTaskId(v string) *CancelDataRepositoryTaskOutput { + s.TaskId = &v + return s +} + +// Provides a report detailing the data repository task results of the files +// processed that match the criteria specified in the report Scope parameter. +// FSx delivers the report to the file system's linked data repository in Amazon +// S3, using the path specified in the report Path parameter. You can specify +// whether or not a report gets generated for a task using the Enabled parameter. +type CompletionReport struct { + _ struct{} `type:"structure"` + + // Set Enabled to True to generate a CompletionReport when the task completes. + // If set to true, then you need to provide a report Scope, Path, and Format. + // Set Enabled to False if you do not want a CompletionReport generated when + // the task completes. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` + + // Required if Enabled is set to true. Specifies the format of the CompletionReport. + // REPORT_CSV_20191124 is the only format currently supported. When Format is + // set to REPORT_CSV_20191124, the CompletionReport is provided in CSV format, + // and is delivered to {path}/task-{id}/failures.csv. + Format *string `type:"string" enum:"ReportFormat"` + + // Required if Enabled is set to true. Specifies the location of the report + // on the file system's linked S3 data repository. An absolute path that defines + // where the completion report will be stored in the destination location. The + // Path you provide must be located within the file system’s ExportPath. An + // example Path value is "s3://myBucket/myExportPath/optionalPrefix". The report + // provides the following information for each file in the report: FilePath, + // FileStatus, and ErrorCode. To learn more about a file system's ExportPath, + // see . + Path *string `min:"3" type:"string"` + + // Required if Enabled is set to true. Specifies the scope of the CompletionReport; + // FAILED_FILES_ONLY is the only scope currently supported. When Scope is set + // to FAILED_FILES_ONLY, the CompletionReport only contains information about + // files that the data repository task failed to process. + Scope *string `type:"string" enum:"ReportScope"` +} + +// String returns the string representation +func (s CompletionReport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletionReport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompletionReport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompletionReport"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Path != nil && len(*s.Path) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Path", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *CompletionReport) SetEnabled(v bool) *CompletionReport { + s.Enabled = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *CompletionReport) SetFormat(v string) *CompletionReport { + s.Format = &v + return s +} + +// SetPath sets the Path field's value. +func (s *CompletionReport) SetPath(v string) *CompletionReport { + s.Path = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CompletionReport) SetScope(v string) *CompletionReport { + s.Scope = &v + return s +} + +// The request object for the CreateBackup operation. +type CreateBackupInput struct { + _ struct{} `type:"structure"` + + // (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to + // ensure idempotent creation. This string is automatically filled on your behalf + // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The ID of the file system to back up. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // (Optional) The tags to apply to the backup at backup creation. The key value + // of the Name tag appears in the console as the backup name. If you have set + // CopyTagsToBackups to true, and you specify one or more tags using the CreateBackup + // action, no existing file system tags are copied from the file system to the + // backup. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBackupInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateBackupInput) SetClientRequestToken(v string) *CreateBackupInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *CreateBackupInput) SetFileSystemId(v string) *CreateBackupInput { + s.FileSystemId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateBackupInput) SetTags(v []*Tag) *CreateBackupInput { + s.Tags = v + return s +} + +// The response object for the CreateBackup operation. +type CreateBackupOutput struct { + _ struct{} `type:"structure"` + + // A description of the backup. + Backup *Backup `type:"structure"` +} + +// String returns the string representation +func (s CreateBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBackupOutput) GoString() string { + return s.String() +} + +// SetBackup sets the Backup field's value. +func (s *CreateBackupOutput) SetBackup(v *Backup) *CreateBackupOutput { + s.Backup = v + return s +} + +type CreateDataRepositoryTaskInput struct { + _ struct{} `type:"structure"` + + // (Optional) An idempotency token for resource creation, in a string of up + // to 64 ASCII characters. This token is automatically filled on your behalf + // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The globally unique ID of the file system, assigned by Amazon FSx. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // (Optional) The path or paths on the Amazon FSx file system to use when the + // data repository task is processed. The default path is the file system root + // directory. The paths you provide need to be relative to the mount point of + // the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory + // or file on the file system you want to export, then the path to provide is + // path1. If a path that you provide isn't valid, the task fails. + Paths []*string `type:"list"` + + // Defines whether or not Amazon FSx provides a CompletionReport once the task + // has completed. A CompletionReport provides a detailed report on the files + // that Amazon FSx processed that meet the criteria specified by the Scope parameter. + // For more information, see Working with Task Completion Reports (https://docs.aws.amazon.com/fsx/latest/LustreGuide/task-completion-report.html). + // + // Report is a required field + Report *CompletionReport `type:"structure" required:"true"` + + // A list of Tag values, with a maximum of 50 elements. + Tags []*Tag `min:"1" type:"list"` + + // Specifies the type of data repository task to create. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataRepositoryTaskType"` +} + +// String returns the string representation +func (s CreateDataRepositoryTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataRepositoryTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataRepositoryTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataRepositoryTaskInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + if s.Report == nil { + invalidParams.Add(request.NewErrParamRequired("Report")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Report != nil { + if err := s.Report.Validate(); err != nil { + invalidParams.AddNested("Report", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateDataRepositoryTaskInput) SetClientRequestToken(v string) *CreateDataRepositoryTaskInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *CreateDataRepositoryTaskInput) SetFileSystemId(v string) *CreateDataRepositoryTaskInput { + s.FileSystemId = &v + return s +} + +// SetPaths sets the Paths field's value. +func (s *CreateDataRepositoryTaskInput) SetPaths(v []*string) *CreateDataRepositoryTaskInput { + s.Paths = v + return s +} + +// SetReport sets the Report field's value. +func (s *CreateDataRepositoryTaskInput) SetReport(v *CompletionReport) *CreateDataRepositoryTaskInput { + s.Report = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDataRepositoryTaskInput) SetTags(v []*Tag) *CreateDataRepositoryTaskInput { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *CreateDataRepositoryTaskInput) SetType(v string) *CreateDataRepositoryTaskInput { + s.Type = &v + return s +} + +type CreateDataRepositoryTaskOutput struct { + _ struct{} `type:"structure"` + + // The description of the data repository task that you just created. + DataRepositoryTask *DataRepositoryTask `type:"structure"` +} + +// String returns the string representation +func (s CreateDataRepositoryTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataRepositoryTaskOutput) GoString() string { + return s.String() +} + +// SetDataRepositoryTask sets the DataRepositoryTask field's value. +func (s *CreateDataRepositoryTaskOutput) SetDataRepositoryTask(v *DataRepositoryTask) *CreateDataRepositoryTaskOutput { + s.DataRepositoryTask = v + return s +} + +// The request object for the CreateFileSystemFromBackup operation. +type CreateFileSystemFromBackupInput struct { + _ struct{} `type:"structure"` + + // The ID of the backup. Specifies the backup to use if you're creating a file + // system from an existing backup. + // + // BackupId is a required field + BackupId *string `min:"12" type:"string" required:"true"` + + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // creation. This string is automatically filled on your behalf when you use + // the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The Lustre configuration for the file system being created. + LustreConfiguration *CreateFileSystemLustreConfiguration `type:"structure"` + + // A list of IDs for the security groups that apply to the specified network + // interfaces created for file system access. These security groups apply to + // all network interfaces. This value isn't returned in later DescribeFileSystem + // requests. + SecurityGroupIds []*string `type:"list"` + + // Sets the storage type for the Windows file system you're creating from a + // backup. Valid values are SSD and HDD. + // + // * Set to SSD to use solid state drive storage. Supported on all Windows + // deployment types. + // + // * Set to HDD to use hard disk drive storage. Supported on SINGLE_AZ_2 + // and MULTI_AZ_1 Windows file system deployment types. + // + // Default value is SSD. + // + // HDD and SSD storage types have different minimum storage capacity requirements. + // A restored file system's storage capacity is tied to the file system that + // was backed up. You can create a file system that uses HDD storage from a + // backup of a file system that used SSD storage only if the original SSD file + // system had a storage capacity of at least 2000 GiB. + StorageType *string `type:"string" enum:"StorageType"` + + // Specifies the IDs of the subnets that the file system will be accessible + // from. For Windows MULTI_AZ_1 file system deployment types, provide exactly + // two subnet IDs, one for the preferred file server and one for the standby + // file server. You specify one of these subnets as the preferred subnet using + // the WindowsConfiguration > PreferredSubnetID property. + // + // For Windows SINGLE_AZ_1 and SINGLE_AZ_2 deployment types and Lustre file + // systems, provide exactly one subnet ID. The file server is launched in that + // subnet's Availability Zone. + // + // SubnetIds is a required field + SubnetIds []*string `type:"list" required:"true"` + + // The tags to be applied to the file system at file system creation. The key + // value of the Name tag appears in the console as the file system name. + Tags []*Tag `min:"1" type:"list"` + + // The configuration for this Microsoft Windows file system. + WindowsConfiguration *CreateFileSystemWindowsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateFileSystemFromBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemFromBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFileSystemFromBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFileSystemFromBackupInput"} + if s.BackupId == nil { + invalidParams.Add(request.NewErrParamRequired("BackupId")) + } + if s.BackupId != nil && len(*s.BackupId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("BackupId", 12)) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.LustreConfiguration != nil { + if err := s.LustreConfiguration.Validate(); err != nil { + invalidParams.AddNested("LustreConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.WindowsConfiguration != nil { + if err := s.WindowsConfiguration.Validate(); err != nil { + invalidParams.AddNested("WindowsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupId sets the BackupId field's value. +func (s *CreateFileSystemFromBackupInput) SetBackupId(v string) *CreateFileSystemFromBackupInput { + s.BackupId = &v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateFileSystemFromBackupInput) SetClientRequestToken(v string) *CreateFileSystemFromBackupInput { + s.ClientRequestToken = &v + return s +} + +// SetLustreConfiguration sets the LustreConfiguration field's value. +func (s *CreateFileSystemFromBackupInput) SetLustreConfiguration(v *CreateFileSystemLustreConfiguration) *CreateFileSystemFromBackupInput { + s.LustreConfiguration = v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateFileSystemFromBackupInput) SetSecurityGroupIds(v []*string) *CreateFileSystemFromBackupInput { + s.SecurityGroupIds = v + return s +} + +// SetStorageType sets the StorageType field's value. +func (s *CreateFileSystemFromBackupInput) SetStorageType(v string) *CreateFileSystemFromBackupInput { + s.StorageType = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *CreateFileSystemFromBackupInput) SetSubnetIds(v []*string) *CreateFileSystemFromBackupInput { + s.SubnetIds = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateFileSystemFromBackupInput) SetTags(v []*Tag) *CreateFileSystemFromBackupInput { + s.Tags = v + return s +} + +// SetWindowsConfiguration sets the WindowsConfiguration field's value. +func (s *CreateFileSystemFromBackupInput) SetWindowsConfiguration(v *CreateFileSystemWindowsConfiguration) *CreateFileSystemFromBackupInput { + s.WindowsConfiguration = v + return s +} + +// The response object for the CreateFileSystemFromBackup operation. +type CreateFileSystemFromBackupOutput struct { + _ struct{} `type:"structure"` + + // A description of the file system. + FileSystem *FileSystem `type:"structure"` +} + +// String returns the string representation +func (s CreateFileSystemFromBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemFromBackupOutput) GoString() string { + return s.String() +} + +// SetFileSystem sets the FileSystem field's value. +func (s *CreateFileSystemFromBackupOutput) SetFileSystem(v *FileSystem) *CreateFileSystemFromBackupOutput { + s.FileSystem = v + return s +} + +// The request object used to create a new Amazon FSx file system. +type CreateFileSystemInput struct { + _ struct{} `type:"structure"` + + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // creation. This string is automatically filled on your behalf when you use + // the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The type of Amazon FSx file system to create, either WINDOWS or LUSTRE. + // + // FileSystemType is a required field + FileSystemType *string `type:"string" required:"true" enum:"FileSystemType"` + + // The ID of the AWS Key Management Service (AWS KMS) key used to encrypt the + // file system's data for Amazon FSx for Windows File Server file systems and + // Amazon FSx for Lustre PERSISTENT_1 file systems at rest. In either case, + // if not specified, the Amazon FSx managed key is used. The Amazon FSx for + // Lustre SCRATCH_1 and SCRATCH_2 file systems are always encrypted at rest + // using Amazon FSx managed keys. For more information, see Encrypt (https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html) + // in the AWS Key Management Service API Reference. + KmsKeyId *string `min:"1" type:"string"` + + // The Lustre configuration for the file system being created. + LustreConfiguration *CreateFileSystemLustreConfiguration `type:"structure"` + + // A list of IDs specifying the security groups to apply to all network interfaces + // created for file system access. This list isn't returned in later requests + // to describe the file system. + SecurityGroupIds []*string `type:"list"` + + // Sets the storage capacity of the file system that you're creating. + // + // For Lustre file systems: + // + // * For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values are + // 1200 GiB, 2400 GiB, and increments of 2400 GiB. + // + // * For PERSISTENT HDD file systems, valid values are increments of 6000 + // GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB + // file systems. + // + // * For SCRATCH_1 deployment type, valid values are 1200 GiB, 2400 GiB, + // and increments of 3600 GiB. + // + // For Windows file systems: + // + // * If StorageType=SSD, valid values are 32 GiB - 65,536 GiB (64 TiB). + // + // * If StorageType=HDD, valid values are 2000 GiB - 65,536 GiB (64 TiB). + // + // StorageCapacity is a required field + StorageCapacity *int64 `type:"integer" required:"true"` + + // Sets the storage type for the file system you're creating. Valid values are + // SSD and HDD. + // + // * Set to SSD to use solid state drive storage. SSD is supported on all + // Windows and Lustre deployment types. + // + // * Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 + // and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT + // Lustre file system deployment types. + // + // Default value is SSD. For more information, see Storage Type Options (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) + // in the Amazon FSx for Windows User Guide and Multiple Storage Options (https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) + // in the Amazon FSx for Lustre User Guide. + StorageType *string `type:"string" enum:"StorageType"` + + // Specifies the IDs of the subnets that the file system will be accessible + // from. For Windows MULTI_AZ_1 file system deployment types, provide exactly + // two subnet IDs, one for the preferred file server and one for the standby + // file server. You specify one of these subnets as the preferred subnet using + // the WindowsConfiguration > PreferredSubnetID property. + // + // For Windows SINGLE_AZ_1 and SINGLE_AZ_2 file system deployment types and + // Lustre file systems, provide exactly one subnet ID. The file server is launched + // in that subnet's Availability Zone. + // + // SubnetIds is a required field + SubnetIds []*string `type:"list" required:"true"` + + // The tags to apply to the file system being created. The key value of the + // Name tag appears in the console as the file system name. + Tags []*Tag `min:"1" type:"list"` + + // The Microsoft Windows configuration for the file system being created. + WindowsConfiguration *CreateFileSystemWindowsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFileSystemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFileSystemInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemType == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemType")) + } + if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) + } + if s.StorageCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("StorageCapacity")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.LustreConfiguration != nil { + if err := s.LustreConfiguration.Validate(); err != nil { + invalidParams.AddNested("LustreConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.WindowsConfiguration != nil { + if err := s.WindowsConfiguration.Validate(); err != nil { + invalidParams.AddNested("WindowsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateFileSystemInput) SetClientRequestToken(v string) *CreateFileSystemInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemType sets the FileSystemType field's value. +func (s *CreateFileSystemInput) SetFileSystemType(v string) *CreateFileSystemInput { + s.FileSystemType = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateFileSystemInput) SetKmsKeyId(v string) *CreateFileSystemInput { + s.KmsKeyId = &v + return s +} + +// SetLustreConfiguration sets the LustreConfiguration field's value. +func (s *CreateFileSystemInput) SetLustreConfiguration(v *CreateFileSystemLustreConfiguration) *CreateFileSystemInput { + s.LustreConfiguration = v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateFileSystemInput) SetSecurityGroupIds(v []*string) *CreateFileSystemInput { + s.SecurityGroupIds = v + return s +} + +// SetStorageCapacity sets the StorageCapacity field's value. +func (s *CreateFileSystemInput) SetStorageCapacity(v int64) *CreateFileSystemInput { + s.StorageCapacity = &v + return s +} + +// SetStorageType sets the StorageType field's value. +func (s *CreateFileSystemInput) SetStorageType(v string) *CreateFileSystemInput { + s.StorageType = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *CreateFileSystemInput) SetSubnetIds(v []*string) *CreateFileSystemInput { + s.SubnetIds = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateFileSystemInput) SetTags(v []*Tag) *CreateFileSystemInput { + s.Tags = v + return s +} + +// SetWindowsConfiguration sets the WindowsConfiguration field's value. +func (s *CreateFileSystemInput) SetWindowsConfiguration(v *CreateFileSystemWindowsConfiguration) *CreateFileSystemInput { + s.WindowsConfiguration = v + return s +} + +// The Lustre configuration for the file system being created. +type CreateFileSystemLustreConfiguration struct { + _ struct{} `type:"structure"` + + // (Optional) When you create your file system, your existing S3 objects appear + // as file and directory listings. Use this property to choose how Amazon FSx + // keeps your file and directory listings up to date as you add or modify objects + // in your linked S3 bucket. AutoImportPolicy can have the following values: + // + // * NONE - (Default) AutoImport is off. Amazon FSx only updates file and + // directory listings from the linked S3 bucket when the file system is created. + // FSx does not update file and directory listings for any new or changed + // objects after choosing this option. + // + // * NEW - AutoImport is on. Amazon FSx automatically imports directory listings + // of any new objects added to the linked S3 bucket that do not currently + // exist in the FSx file system. + // + // * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports file + // and directory listings of any new objects added to the S3 bucket and any + // existing objects that are changed in the S3 bucket after you choose this + // option. + // + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). + AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` + + // The number of days to retain automatic backups. Setting this to 0 disables + // automatic backups. You can retain automatic backups for a maximum of 90 days. + // The default is 0. + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // (Optional) Not available to use with file systems that are linked to a data + // repository. A boolean flag indicating whether tags for the file system should + // be copied to backups. The default value is false. If it's set to true, all + // file system tags are copied to all automatic and user-initiated backups when + // the user doesn't specify any backup-specific tags. If this value is true, + // and you specify one or more backup tags, only the specified tags are copied + // to backups. If you specify one or more tags when creating a user-initiated + // backup, no tags are copied from the file system, regardless of this value. + // + // For more information, see Working with backups (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). + CopyTagsToBackups *bool `type:"boolean"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of + // the day (0-23), and MM is the zero-padded minute of the hour. For example, + // 05:00 specifies 5 AM daily. + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + + // Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage + // and shorter-term processing of data. The SCRATCH_2 deployment type provides + // in-transit encryption of data and higher burst throughput capacity than SCRATCH_1. + // + // Choose PERSISTENT_1 deployment type for longer-term storage and workloads + // and encryption of data in transit. To learn more about deployment types, + // see FSx for Lustre Deployment Options (https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-deployment-types.html). + // + // Encryption of data in-transit is automatically enabled when you access a + // SCRATCH_2 or PERSISTENT_1 file system from Amazon EC2 instances that support + // this feature (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/data- protection.html). + // (Default = SCRATCH_1) + // + // Encryption of data in-transit for SCRATCH_2 and PERSISTENT_1 deployment types + // is supported when accessed from supported instance types in supported AWS + // Regions. To learn more, Encrypting Data in Transit (https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html). + DeploymentType *string `type:"string" enum:"LustreDeploymentType"` + + // The type of drive cache used by PERSISTENT_1 file systems that are provisioned + // with HDD storage devices. This parameter is required when storage type is + // HDD. Set to READ, improve the performance for frequently accessed files and + // allows 20% of the total storage capacity of the file system to be cached. + // + // This parameter is required when StorageType is set to HDD. + DriveCacheType *string `type:"string" enum:"DriveCacheType"` + + // (Optional) The path in Amazon S3 where the root of your Amazon FSx file system + // is exported. The path must use the same Amazon S3 bucket as specified in + // ImportPath. You can provide an optional prefix to which new and changed data + // is to be exported from your Amazon FSx for Lustre file system. If an ExportPath + // value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]. + // The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z. + // + // The Amazon S3 export bucket must be the same as the import bucket specified + // by ImportPath. If you only specify a bucket name, such as s3://import-bucket, + // you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping + // means that the input data in S3 is overwritten on export. If you provide + // a custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix], + // Amazon FSx exports the contents of your file system to that export prefix + // in the Amazon S3 bucket. + ExportPath *string `min:"3" type:"string"` + + // (Optional) The path to the Amazon S3 bucket (including the optional prefix) + // that you're using as the data repository for your Amazon FSx for Lustre file + // system. The root of your FSx for Lustre file system will be mapped to the + // root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix. + // If you specify a prefix after the Amazon S3 bucket name, only object keys + // with that prefix are loaded into the file system. + ImportPath *string `min:"3" type:"string"` + + // (Optional) For files imported from a data repository, this value determines + // the stripe count and maximum amount of data per file (in MiB) stored on a + // single physical disk. The maximum number of disks that a single file can + // be striped across is limited by the total number of disks that make up the + // file system. + // + // The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 + // MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB. + ImportedFileChunkSize *int64 `min:"1" type:"integer"` + + // Required for the PERSISTENT_1 deployment type, describes the amount of read + // and write throughput for each 1 tebibyte of storage, in MB/s/TiB. File system + // throughput capacity is calculated by multiplying file system storage capacity + // (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB file system, + // provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file + // system throughput. You pay for the amount of throughput that you provision. + // + // Valid values for SSD storage: 50, 100, 200. Valid values for HDD storage: + // 12, 40. + PerUnitStorageThroughput *int64 `min:"12" type:"integer"` + + // (Optional) The preferred start time to perform weekly maintenance, formatted + // d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through + // 7, beginning with Monday and ending with Sunday. + WeeklyMaintenanceStartTime *string `min:"7" type:"string"` +} + +// String returns the string representation +func (s CreateFileSystemLustreConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemLustreConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFileSystemLustreConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFileSystemLustreConfiguration"} + if s.DailyAutomaticBackupStartTime != nil && len(*s.DailyAutomaticBackupStartTime) < 5 { + invalidParams.Add(request.NewErrParamMinLen("DailyAutomaticBackupStartTime", 5)) + } + if s.ExportPath != nil && len(*s.ExportPath) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ExportPath", 3)) + } + if s.ImportPath != nil && len(*s.ImportPath) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ImportPath", 3)) + } + if s.ImportedFileChunkSize != nil && *s.ImportedFileChunkSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("ImportedFileChunkSize", 1)) + } + if s.PerUnitStorageThroughput != nil && *s.PerUnitStorageThroughput < 12 { + invalidParams.Add(request.NewErrParamMinValue("PerUnitStorageThroughput", 12)) + } + if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { + invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoImportPolicy sets the AutoImportPolicy field's value. +func (s *CreateFileSystemLustreConfiguration) SetAutoImportPolicy(v string) *CreateFileSystemLustreConfiguration { + s.AutoImportPolicy = &v + return s +} + +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *CreateFileSystemLustreConfiguration) SetAutomaticBackupRetentionDays(v int64) *CreateFileSystemLustreConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetCopyTagsToBackups sets the CopyTagsToBackups field's value. +func (s *CreateFileSystemLustreConfiguration) SetCopyTagsToBackups(v bool) *CreateFileSystemLustreConfiguration { + s.CopyTagsToBackups = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *CreateFileSystemLustreConfiguration) SetDailyAutomaticBackupStartTime(v string) *CreateFileSystemLustreConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + +// SetDeploymentType sets the DeploymentType field's value. +func (s *CreateFileSystemLustreConfiguration) SetDeploymentType(v string) *CreateFileSystemLustreConfiguration { + s.DeploymentType = &v + return s +} + +// SetDriveCacheType sets the DriveCacheType field's value. +func (s *CreateFileSystemLustreConfiguration) SetDriveCacheType(v string) *CreateFileSystemLustreConfiguration { + s.DriveCacheType = &v + return s +} + +// SetExportPath sets the ExportPath field's value. +func (s *CreateFileSystemLustreConfiguration) SetExportPath(v string) *CreateFileSystemLustreConfiguration { + s.ExportPath = &v + return s +} + +// SetImportPath sets the ImportPath field's value. +func (s *CreateFileSystemLustreConfiguration) SetImportPath(v string) *CreateFileSystemLustreConfiguration { + s.ImportPath = &v + return s +} + +// SetImportedFileChunkSize sets the ImportedFileChunkSize field's value. +func (s *CreateFileSystemLustreConfiguration) SetImportedFileChunkSize(v int64) *CreateFileSystemLustreConfiguration { + s.ImportedFileChunkSize = &v + return s +} + +// SetPerUnitStorageThroughput sets the PerUnitStorageThroughput field's value. +func (s *CreateFileSystemLustreConfiguration) SetPerUnitStorageThroughput(v int64) *CreateFileSystemLustreConfiguration { + s.PerUnitStorageThroughput = &v + return s +} + +// SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. +func (s *CreateFileSystemLustreConfiguration) SetWeeklyMaintenanceStartTime(v string) *CreateFileSystemLustreConfiguration { + s.WeeklyMaintenanceStartTime = &v + return s +} + +// The response object returned after the file system is created. +type CreateFileSystemOutput struct { + _ struct{} `type:"structure"` + + // The configuration of the file system that was created. + FileSystem *FileSystem `type:"structure"` +} + +// String returns the string representation +func (s CreateFileSystemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemOutput) GoString() string { + return s.String() +} + +// SetFileSystem sets the FileSystem field's value. +func (s *CreateFileSystemOutput) SetFileSystem(v *FileSystem) *CreateFileSystemOutput { + s.FileSystem = v + return s +} + +// The configuration object for the Microsoft Windows file system used in CreateFileSystem +// and CreateFileSystemFromBackup operations. +type CreateFileSystemWindowsConfiguration struct { + _ struct{} `type:"structure"` + + // The ID for an existing AWS Managed Microsoft Active Directory (AD) instance + // that the file system should join when it's created. + ActiveDirectoryId *string `min:"12" type:"string"` + + // An array of one or more DNS alias names that you want to associate with the + // Amazon FSx file system. Aliases allow you to use existing DNS names to access + // the data in your Amazon FSx file system. You can associate up to 50 aliases + // with a file system at any time. You can associate additional DNS aliases + // after you create the file system using the AssociateFileSystemAliases operation. + // You can remove DNS aliases from the file system after it is created using + // the DisassociateFileSystemAliases operation. You only need to specify the + // alias name in the request payload. + // + // For more information, see Working with DNS Aliases (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) + // and Walkthrough 5: Using DNS aliases to access your file system (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html), + // including additional steps you must take to be able to access your file system + // using a DNS alias. + // + // An alias name has to meet the following requirements: + // + // * Formatted as a fully-qualified domain name (FQDN), hostname.domain, + // for example, accounting.example.com. + // + // * Can contain alphanumeric characters and the hyphen (-). + // + // * Cannot start or end with a hyphen. + // + // * Can start with a numeric. + // + // For DNS alias names, Amazon FSx stores alphabetic characters as lowercase + // letters (a-z), regardless of how you specify them: as uppercase letters, + // lowercase letters, or the corresponding letters in escape codes. + Aliases []*string `type:"list"` + + // The number of days to retain automatic backups. The default is to retain + // backups for 7 days. Setting this value to 0 disables the creation of automatic + // backups. The maximum retention period for backups is 90 days. + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // A boolean flag indicating whether tags for the file system should be copied + // to backups. This value defaults to false. If it's set to true, all tags for + // the file system are copied to all automatic and user-initiated backups where + // the user doesn't specify tags. If this value is true, and you specify one + // or more tags, only the specified tags are copied to backups. If you specify + // one or more tags when creating a user-initiated backup, no tags are copied + // from the file system, regardless of this value. + CopyTagsToBackups *bool `type:"boolean"` + + // The preferred time to take daily automatic backups, formatted HH:MM in the + // UTC time zone. + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + + // Specifies the file system deployment type, valid values are the following: + // + // * MULTI_AZ_1 - Deploys a high availability file system that is configured + // for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. + // You can only deploy a Multi-AZ file system in AWS Regions that have a + // minimum of three Availability Zones. Also supports HDD storage type + // + // * SINGLE_AZ_1 - (Default) Choose to deploy a file system that is configured + // for single AZ redundancy. + // + // * SINGLE_AZ_2 - The latest generation Single AZ file system. Specifies + // a file system that is configured for single AZ redundancy and supports + // HDD storage type. + // + // For more information, see Availability and Durability: Single-AZ and Multi-AZ + // File Systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html). + DeploymentType *string `type:"string" enum:"WindowsDeploymentType"` + + // Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet + // in which you want the preferred file server to be located. For in-AWS applications, + // we recommend that you launch your clients in the same Availability Zone (AZ) + // as your preferred file server to reduce cross-AZ data transfer costs and + // minimize latency. + PreferredSubnetId *string `min:"15" type:"string"` + + // The configuration that Amazon FSx uses to join the Windows File Server instance + // to your self-managed (including on-premises) Microsoft Active Directory (AD) + // directory. + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfiguration `type:"structure"` + + // The throughput of an Amazon FSx file system, measured in megabytes per second, + // in 2 to the nth increments, between 2^3 (8) and 2^11 (2048). + // + // ThroughputCapacity is a required field + ThroughputCapacity *int64 `min:"8" type:"integer" required:"true"` + + // The preferred start time to perform weekly maintenance, formatted d:HH:MM + // in the UTC time zone, where d is the weekday number, from 1 through 7, beginning + // with Monday and ending with Sunday. + WeeklyMaintenanceStartTime *string `min:"7" type:"string"` +} + +// String returns the string representation +func (s CreateFileSystemWindowsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFileSystemWindowsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFileSystemWindowsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFileSystemWindowsConfiguration"} + if s.ActiveDirectoryId != nil && len(*s.ActiveDirectoryId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("ActiveDirectoryId", 12)) + } + if s.DailyAutomaticBackupStartTime != nil && len(*s.DailyAutomaticBackupStartTime) < 5 { + invalidParams.Add(request.NewErrParamMinLen("DailyAutomaticBackupStartTime", 5)) + } + if s.PreferredSubnetId != nil && len(*s.PreferredSubnetId) < 15 { + invalidParams.Add(request.NewErrParamMinLen("PreferredSubnetId", 15)) + } + if s.ThroughputCapacity == nil { + invalidParams.Add(request.NewErrParamRequired("ThroughputCapacity")) + } + if s.ThroughputCapacity != nil && *s.ThroughputCapacity < 8 { + invalidParams.Add(request.NewErrParamMinValue("ThroughputCapacity", 8)) + } + if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { + invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) + } + if s.SelfManagedActiveDirectoryConfiguration != nil { + if err := s.SelfManagedActiveDirectoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("SelfManagedActiveDirectoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActiveDirectoryId sets the ActiveDirectoryId field's value. +func (s *CreateFileSystemWindowsConfiguration) SetActiveDirectoryId(v string) *CreateFileSystemWindowsConfiguration { + s.ActiveDirectoryId = &v + return s +} + +// SetAliases sets the Aliases field's value. +func (s *CreateFileSystemWindowsConfiguration) SetAliases(v []*string) *CreateFileSystemWindowsConfiguration { + s.Aliases = v + return s +} + +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *CreateFileSystemWindowsConfiguration) SetAutomaticBackupRetentionDays(v int64) *CreateFileSystemWindowsConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetCopyTagsToBackups sets the CopyTagsToBackups field's value. +func (s *CreateFileSystemWindowsConfiguration) SetCopyTagsToBackups(v bool) *CreateFileSystemWindowsConfiguration { + s.CopyTagsToBackups = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *CreateFileSystemWindowsConfiguration) SetDailyAutomaticBackupStartTime(v string) *CreateFileSystemWindowsConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + +// SetDeploymentType sets the DeploymentType field's value. +func (s *CreateFileSystemWindowsConfiguration) SetDeploymentType(v string) *CreateFileSystemWindowsConfiguration { + s.DeploymentType = &v + return s +} + +// SetPreferredSubnetId sets the PreferredSubnetId field's value. +func (s *CreateFileSystemWindowsConfiguration) SetPreferredSubnetId(v string) *CreateFileSystemWindowsConfiguration { + s.PreferredSubnetId = &v + return s +} + +// SetSelfManagedActiveDirectoryConfiguration sets the SelfManagedActiveDirectoryConfiguration field's value. +func (s *CreateFileSystemWindowsConfiguration) SetSelfManagedActiveDirectoryConfiguration(v *SelfManagedActiveDirectoryConfiguration) *CreateFileSystemWindowsConfiguration { + s.SelfManagedActiveDirectoryConfiguration = v + return s +} + +// SetThroughputCapacity sets the ThroughputCapacity field's value. +func (s *CreateFileSystemWindowsConfiguration) SetThroughputCapacity(v int64) *CreateFileSystemWindowsConfiguration { + s.ThroughputCapacity = &v + return s +} + +// SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. +func (s *CreateFileSystemWindowsConfiguration) SetWeeklyMaintenanceStartTime(v string) *CreateFileSystemWindowsConfiguration { + s.WeeklyMaintenanceStartTime = &v + return s +} + +// The data repository configuration object for Lustre file systems returned +// in the response of the CreateFileSystem operation. +type DataRepositoryConfiguration struct { + _ struct{} `type:"structure"` + + // Describes the file system's linked S3 data repository's AutoImportPolicy. + // The AutoImportPolicy configures how Amazon FSx keeps your file and directory + // listings up to date as you add or modify objects in your linked S3 bucket. + // AutoImportPolicy can have the following values: + // + // * NONE - (Default) AutoImport is off. Amazon FSx only updates file and + // directory listings from the linked S3 bucket when the file system is created. + // FSx does not update file and directory listings for any new or changed + // objects after choosing this option. + // + // * NEW - AutoImport is on. Amazon FSx automatically imports directory listings + // of any new objects added to the linked S3 bucket that do not currently + // exist in the FSx file system. + // + // * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports file + // and directory listings of any new objects added to the S3 bucket and any + // existing objects that are changed in the S3 bucket after you choose this + // option. + // + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). + AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` + + // The export path to the Amazon S3 bucket (and prefix) that you are using to + // store new and changed Lustre file system files in S3. + ExportPath *string `min:"3" type:"string"` + + // Provides detailed information about the data respository if its Lifecycle + // is set to MISCONFIGURED. + FailureDetails *DataRepositoryFailureDetails `type:"structure"` + + // The import path to the Amazon S3 bucket (and optional prefix) that you're + // using as the data repository for your FSx for Lustre file system, for example + // s3://import-bucket/optional-prefix. If a prefix is specified after the Amazon + // S3 bucket name, only object keys with that prefix are loaded into the file + // system. + ImportPath *string `min:"3" type:"string"` + + // For files imported from a data repository, this value determines the stripe + // count and maximum amount of data per file (in MiB) stored on a single physical + // disk. The maximum number of disks that a single file can be striped across + // is limited by the total number of disks that make up the file system. + // + // The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 + // MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB. + ImportedFileChunkSize *int64 `min:"1" type:"integer"` + + // Describes the state of the file system's S3 durable data repository, if it + // is configured with an S3 repository. The lifecycle can have the following + // values: + // + // * CREATING - The data repository configuration between the FSx file system + // and the linked S3 data repository is being created. The data repository + // is unavailable. + // + // * AVAILABLE - The data repository is available for use. + // + // * MISCONFIGURED - Amazon FSx cannot automatically import updates from + // the S3 bucket until the data repository configuration is corrected. For + // more information, see Troubleshooting a Misconfigured linked S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/troubleshooting.html#troubleshooting-misconfigured-data-repository). + // + // * UPDATING - The data repository is undergoing a customer initiated update + // and availability may be impacted. + Lifecycle *string `type:"string" enum:"DataRepositoryLifecycle"` +} + +// String returns the string representation +func (s DataRepositoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryConfiguration) GoString() string { + return s.String() +} + +// SetAutoImportPolicy sets the AutoImportPolicy field's value. +func (s *DataRepositoryConfiguration) SetAutoImportPolicy(v string) *DataRepositoryConfiguration { + s.AutoImportPolicy = &v + return s +} + +// SetExportPath sets the ExportPath field's value. +func (s *DataRepositoryConfiguration) SetExportPath(v string) *DataRepositoryConfiguration { + s.ExportPath = &v + return s +} + +// SetFailureDetails sets the FailureDetails field's value. +func (s *DataRepositoryConfiguration) SetFailureDetails(v *DataRepositoryFailureDetails) *DataRepositoryConfiguration { + s.FailureDetails = v + return s +} + +// SetImportPath sets the ImportPath field's value. +func (s *DataRepositoryConfiguration) SetImportPath(v string) *DataRepositoryConfiguration { + s.ImportPath = &v + return s +} + +// SetImportedFileChunkSize sets the ImportedFileChunkSize field's value. +func (s *DataRepositoryConfiguration) SetImportedFileChunkSize(v int64) *DataRepositoryConfiguration { + s.ImportedFileChunkSize = &v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *DataRepositoryConfiguration) SetLifecycle(v string) *DataRepositoryConfiguration { + s.Lifecycle = &v + return s +} + +// Provides detailed information about the data respository if its Lifecycle +// is set to MISCONFIGURED. +type DataRepositoryFailureDetails struct { + _ struct{} `type:"structure"` + + // A detailed error message. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DataRepositoryFailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryFailureDetails) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *DataRepositoryFailureDetails) SetMessage(v string) *DataRepositoryFailureDetails { + s.Message = &v + return s +} + +// A description of the data repository task. You use data repository tasks +// to perform bulk transfer operations between your Amazon FSx file system and +// its linked data repository. +type DataRepositoryTask struct { + _ struct{} `type:"structure"` + + // The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), + // also known as Unix time. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" required:"true"` + + // The time that Amazon FSx completed processing the task, populated after the + // task is complete. + EndTime *time.Time `type:"timestamp"` + + // Failure message describing why the task failed, it is populated only when + // Lifecycle is set to FAILED. + FailureDetails *DataRepositoryTaskFailureDetails `type:"structure"` + + // The globally unique ID of the file system, assigned by Amazon FSx. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // The lifecycle status of the data repository task, as follows: + // + // * PENDING - Amazon FSx has not started the task. + // + // * EXECUTING - Amazon FSx is processing the task. + // + // * FAILED - Amazon FSx was not able to complete the task. For example, + // there may be files the task failed to process. The DataRepositoryTaskFailureDetails + // property provides more information about task failures. + // + // * SUCCEEDED - FSx completed the task successfully. + // + // * CANCELED - Amazon FSx canceled the task and it did not complete. + // + // * CANCELING - FSx is in process of canceling the task. + // + // You cannot delete an FSx for Lustre file system if there are data repository + // tasks for the file system in the PENDING or EXECUTING states. Please retry + // when the data repository task is finished (with a status of CANCELED, SUCCEEDED, + // or FAILED). You can use the DescribeDataRepositoryTask action to monitor + // the task status. Contact the FSx team if you need to delete your file system + // immediately. + // + // Lifecycle is a required field + Lifecycle *string `type:"string" required:"true" enum:"DataRepositoryTaskLifecycle"` + + // An array of paths on the Amazon FSx for Lustre file system that specify the + // data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY + // task, the paths specify which data to export to the linked data repository. + // + // (Default) If Paths is not specified, Amazon FSx uses the file system root + // directory. + Paths []*string `type:"list"` + + // Provides a report detailing the data repository task results of the files + // processed that match the criteria specified in the report Scope parameter. + // FSx delivers the report to the file system's linked data repository in Amazon + // S3, using the path specified in the report Path parameter. You can specify + // whether or not a report gets generated for a task using the Enabled parameter. + Report *CompletionReport `type:"structure"` + + // The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify + // AWS resources. We require an ARN when you need to specify a resource unambiguously + // across all of AWS. For more information, see Amazon Resource Names (ARNs) + // and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + ResourceARN *string `min:"8" type:"string"` + + // The time that Amazon FSx began processing the task. + StartTime *time.Time `type:"timestamp"` + + // Provides the status of the number of files that the task has processed successfully + // and failed to process. + Status *DataRepositoryTaskStatus `type:"structure"` + + // A list of Tag values, with a maximum of 50 elements. + Tags []*Tag `min:"1" type:"list"` + + // The system-generated, unique 17-digit ID of the data repository task. + // + // TaskId is a required field + TaskId *string `min:"12" type:"string" required:"true"` + + // The type of data repository task; EXPORT_TO_REPOSITORY is the only type currently + // supported. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataRepositoryTaskType"` +} + +// String returns the string representation +func (s DataRepositoryTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryTask) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DataRepositoryTask) SetCreationTime(v time.Time) *DataRepositoryTask { + s.CreationTime = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *DataRepositoryTask) SetEndTime(v time.Time) *DataRepositoryTask { + s.EndTime = &v + return s +} + +// SetFailureDetails sets the FailureDetails field's value. +func (s *DataRepositoryTask) SetFailureDetails(v *DataRepositoryTaskFailureDetails) *DataRepositoryTask { + s.FailureDetails = v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *DataRepositoryTask) SetFileSystemId(v string) *DataRepositoryTask { + s.FileSystemId = &v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *DataRepositoryTask) SetLifecycle(v string) *DataRepositoryTask { + s.Lifecycle = &v + return s +} + +// SetPaths sets the Paths field's value. +func (s *DataRepositoryTask) SetPaths(v []*string) *DataRepositoryTask { + s.Paths = v + return s +} + +// SetReport sets the Report field's value. +func (s *DataRepositoryTask) SetReport(v *CompletionReport) *DataRepositoryTask { + s.Report = v + return s +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *DataRepositoryTask) SetResourceARN(v string) *DataRepositoryTask { + s.ResourceARN = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DataRepositoryTask) SetStartTime(v time.Time) *DataRepositoryTask { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DataRepositoryTask) SetStatus(v *DataRepositoryTaskStatus) *DataRepositoryTask { + s.Status = v + return s +} + +// SetTags sets the Tags field's value. +func (s *DataRepositoryTask) SetTags(v []*Tag) *DataRepositoryTask { + s.Tags = v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *DataRepositoryTask) SetTaskId(v string) *DataRepositoryTask { + s.TaskId = &v + return s +} + +// SetType sets the Type field's value. +func (s *DataRepositoryTask) SetType(v string) *DataRepositoryTask { + s.Type = &v + return s +} + +// The data repository task could not be canceled because the task has already +// ended. +type DataRepositoryTaskEnded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s DataRepositoryTaskEnded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryTaskEnded) GoString() string { + return s.String() +} + +func newErrorDataRepositoryTaskEnded(v protocol.ResponseMetadata) error { + return &DataRepositoryTaskEnded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DataRepositoryTaskEnded) Code() string { + return "DataRepositoryTaskEnded" +} + +// Message returns the exception's message. +func (s *DataRepositoryTaskEnded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DataRepositoryTaskEnded) OrigErr() error { + return nil +} + +func (s *DataRepositoryTaskEnded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DataRepositoryTaskEnded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DataRepositoryTaskEnded) RequestID() string { + return s.RespMetadata.RequestID +} + +// An existing data repository task is currently executing on the file system. +// Wait until the existing task has completed, then create the new task. +type DataRepositoryTaskExecuting struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s DataRepositoryTaskExecuting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryTaskExecuting) GoString() string { + return s.String() +} + +func newErrorDataRepositoryTaskExecuting(v protocol.ResponseMetadata) error { + return &DataRepositoryTaskExecuting{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DataRepositoryTaskExecuting) Code() string { + return "DataRepositoryTaskExecuting" +} + +// Message returns the exception's message. +func (s *DataRepositoryTaskExecuting) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DataRepositoryTaskExecuting) OrigErr() error { + return nil +} + +func (s *DataRepositoryTaskExecuting) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DataRepositoryTaskExecuting) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DataRepositoryTaskExecuting) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides information about why a data repository task failed. Only populated +// when the task Lifecycle is set to FAILED. +type DataRepositoryTaskFailureDetails struct { + _ struct{} `type:"structure"` + + // A detailed error message. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DataRepositoryTaskFailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryTaskFailureDetails) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *DataRepositoryTaskFailureDetails) SetMessage(v string) *DataRepositoryTaskFailureDetails { + s.Message = &v + return s +} + +// (Optional) An array of filter objects you can use to filter the response +// of data repository tasks you will see in the the response. You can filter +// the tasks returned in the response by one or more file system IDs, task lifecycles, +// and by task type. A filter object consists of a filter Name, and one or more +// Values for the filter. +type DataRepositoryTaskFilter struct { + _ struct{} `type:"structure"` + + // Name of the task property to use in filtering the tasks returned in the response. + // + // * Use file-system-id to retrieve data repository tasks for specific file + // systems. + // + // * Use task-lifecycle to retrieve data repository tasks with one or more + // specific lifecycle states, as follows: CANCELED, EXECUTING, FAILED, PENDING, + // and SUCCEEDED. + Name *string `type:"string" enum:"DataRepositoryTaskFilterName"` + + // Use Values to include the specific file system IDs and task lifecycle states + // for the filters you are using. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s DataRepositoryTaskFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryTaskFilter) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *DataRepositoryTaskFilter) SetName(v string) *DataRepositoryTaskFilter { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *DataRepositoryTaskFilter) SetValues(v []*string) *DataRepositoryTaskFilter { + s.Values = v + return s +} + +// The data repository task or tasks you specified could not be found. +type DataRepositoryTaskNotFound struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s DataRepositoryTaskNotFound) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryTaskNotFound) GoString() string { + return s.String() +} + +func newErrorDataRepositoryTaskNotFound(v protocol.ResponseMetadata) error { + return &DataRepositoryTaskNotFound{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DataRepositoryTaskNotFound) Code() string { + return "DataRepositoryTaskNotFound" +} + +// Message returns the exception's message. +func (s *DataRepositoryTaskNotFound) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DataRepositoryTaskNotFound) OrigErr() error { + return nil +} + +func (s *DataRepositoryTaskNotFound) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DataRepositoryTaskNotFound) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DataRepositoryTaskNotFound) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides the task status showing a running total of the total number of files +// to be processed, the number successfully processed, and the number of files +// the task failed to process. +type DataRepositoryTaskStatus struct { + _ struct{} `type:"structure"` + + // A running total of the number of files that the task failed to process. + FailedCount *int64 `type:"long"` + + // The time at which the task status was last updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A running total of the number of files that the task has successfully processed. + SucceededCount *int64 `type:"long"` + + // The total number of files that the task will process. While a task is executing, + // the sum of SucceededCount plus FailedCount may not equal TotalCount. When + // the task is complete, TotalCount equals the sum of SucceededCount plus FailedCount. + TotalCount *int64 `type:"long"` +} + +// String returns the string representation +func (s DataRepositoryTaskStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryTaskStatus) GoString() string { + return s.String() +} + +// SetFailedCount sets the FailedCount field's value. +func (s *DataRepositoryTaskStatus) SetFailedCount(v int64) *DataRepositoryTaskStatus { + s.FailedCount = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *DataRepositoryTaskStatus) SetLastUpdatedTime(v time.Time) *DataRepositoryTaskStatus { + s.LastUpdatedTime = &v + return s +} + +// SetSucceededCount sets the SucceededCount field's value. +func (s *DataRepositoryTaskStatus) SetSucceededCount(v int64) *DataRepositoryTaskStatus { + s.SucceededCount = &v + return s +} + +// SetTotalCount sets the TotalCount field's value. +func (s *DataRepositoryTaskStatus) SetTotalCount(v int64) *DataRepositoryTaskStatus { + s.TotalCount = &v + return s +} + +// The request object for DeleteBackup operation. +type DeleteBackupInput struct { + _ struct{} `type:"structure"` + + // The ID of the backup you want to delete. + // + // BackupId is a required field + BackupId *string `min:"12" type:"string" required:"true"` + + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // deletion. This is automatically filled on your behalf when using the AWS + // CLI or SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` +} + +// String returns the string representation +func (s DeleteBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"} + if s.BackupId == nil { + invalidParams.Add(request.NewErrParamRequired("BackupId")) + } + if s.BackupId != nil && len(*s.BackupId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("BackupId", 12)) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupId sets the BackupId field's value. +func (s *DeleteBackupInput) SetBackupId(v string) *DeleteBackupInput { + s.BackupId = &v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *DeleteBackupInput) SetClientRequestToken(v string) *DeleteBackupInput { + s.ClientRequestToken = &v + return s +} + +// The response object for DeleteBackup operation. +type DeleteBackupOutput struct { + _ struct{} `type:"structure"` + + // The ID of the backup deleted. + BackupId *string `min:"12" type:"string"` + + // The lifecycle of the backup. Should be DELETED. + Lifecycle *string `type:"string" enum:"BackupLifecycle"` +} + +// String returns the string representation +func (s DeleteBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBackupOutput) GoString() string { + return s.String() +} + +// SetBackupId sets the BackupId field's value. +func (s *DeleteBackupOutput) SetBackupId(v string) *DeleteBackupOutput { + s.BackupId = &v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *DeleteBackupOutput) SetLifecycle(v string) *DeleteBackupOutput { + s.Lifecycle = &v + return s +} + +// The request object for DeleteFileSystem operation. +type DeleteFileSystemInput struct { + _ struct{} `type:"structure"` + + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // deletion. This is automatically filled on your behalf when using the AWS + // CLI or SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The ID of the file system you want to delete. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // The configuration object for the Amazon FSx for Lustre file system being + // deleted in the DeleteFileSystem operation. + LustreConfiguration *DeleteFileSystemLustreConfiguration `type:"structure"` + + // The configuration object for the Microsoft Windows file system used in the + // DeleteFileSystem operation. + WindowsConfiguration *DeleteFileSystemWindowsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s DeleteFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFileSystemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFileSystemInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + if s.LustreConfiguration != nil { + if err := s.LustreConfiguration.Validate(); err != nil { + invalidParams.AddNested("LustreConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.WindowsConfiguration != nil { + if err := s.WindowsConfiguration.Validate(); err != nil { + invalidParams.AddNested("WindowsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *DeleteFileSystemInput) SetClientRequestToken(v string) *DeleteFileSystemInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *DeleteFileSystemInput) SetFileSystemId(v string) *DeleteFileSystemInput { + s.FileSystemId = &v + return s +} + +// SetLustreConfiguration sets the LustreConfiguration field's value. +func (s *DeleteFileSystemInput) SetLustreConfiguration(v *DeleteFileSystemLustreConfiguration) *DeleteFileSystemInput { + s.LustreConfiguration = v + return s +} + +// SetWindowsConfiguration sets the WindowsConfiguration field's value. +func (s *DeleteFileSystemInput) SetWindowsConfiguration(v *DeleteFileSystemWindowsConfiguration) *DeleteFileSystemInput { + s.WindowsConfiguration = v + return s +} + +// The configuration object for the Amazon FSx for Lustre file system being +// deleted in the DeleteFileSystem operation. +type DeleteFileSystemLustreConfiguration struct { + _ struct{} `type:"structure"` + + // Use if SkipFinalBackup is set to false, and you want to apply an array of + // tags to the final backup. If you have set the file system property CopyTagsToBackups + // to true, and you specify one or more FinalBackupTags when deleting a file + // system, Amazon FSx will not copy any existing file system tags to the backup. + FinalBackupTags []*Tag `min:"1" type:"list"` + + // Set SkipFinalBackup to false if you want to take a final backup of the file + // system you are deleting. By default, Amazon FSx will not take a final backup + // on your behalf when the DeleteFileSystem operation is invoked. (Default = + // true) + SkipFinalBackup *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteFileSystemLustreConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemLustreConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFileSystemLustreConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFileSystemLustreConfiguration"} + if s.FinalBackupTags != nil && len(s.FinalBackupTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FinalBackupTags", 1)) + } + if s.FinalBackupTags != nil { + for i, v := range s.FinalBackupTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FinalBackupTags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFinalBackupTags sets the FinalBackupTags field's value. +func (s *DeleteFileSystemLustreConfiguration) SetFinalBackupTags(v []*Tag) *DeleteFileSystemLustreConfiguration { + s.FinalBackupTags = v + return s +} + +// SetSkipFinalBackup sets the SkipFinalBackup field's value. +func (s *DeleteFileSystemLustreConfiguration) SetSkipFinalBackup(v bool) *DeleteFileSystemLustreConfiguration { + s.SkipFinalBackup = &v + return s +} + +// The response object for the Amazon FSx for Lustre file system being deleted +// in the DeleteFileSystem operation. +type DeleteFileSystemLustreResponse struct { + _ struct{} `type:"structure"` + + // The ID of the final backup for this file system. + FinalBackupId *string `min:"12" type:"string"` + + // The set of tags applied to the final backup. + FinalBackupTags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s DeleteFileSystemLustreResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemLustreResponse) GoString() string { + return s.String() +} + +// SetFinalBackupId sets the FinalBackupId field's value. +func (s *DeleteFileSystemLustreResponse) SetFinalBackupId(v string) *DeleteFileSystemLustreResponse { + s.FinalBackupId = &v + return s +} + +// SetFinalBackupTags sets the FinalBackupTags field's value. +func (s *DeleteFileSystemLustreResponse) SetFinalBackupTags(v []*Tag) *DeleteFileSystemLustreResponse { + s.FinalBackupTags = v + return s +} + +// The response object for the DeleteFileSystem operation. +type DeleteFileSystemOutput struct { + _ struct{} `type:"structure"` + + // The ID of the file system being deleted. + FileSystemId *string `min:"11" type:"string"` + + // The file system lifecycle for the deletion request. Should be DELETING. + Lifecycle *string `type:"string" enum:"FileSystemLifecycle"` + + // The response object for the Amazon FSx for Lustre file system being deleted + // in the DeleteFileSystem operation. + LustreResponse *DeleteFileSystemLustreResponse `type:"structure"` + + // The response object for the Microsoft Windows file system used in the DeleteFileSystem + // operation. + WindowsResponse *DeleteFileSystemWindowsResponse `type:"structure"` +} + +// String returns the string representation +func (s DeleteFileSystemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemOutput) GoString() string { + return s.String() +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *DeleteFileSystemOutput) SetFileSystemId(v string) *DeleteFileSystemOutput { + s.FileSystemId = &v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *DeleteFileSystemOutput) SetLifecycle(v string) *DeleteFileSystemOutput { + s.Lifecycle = &v + return s +} + +// SetLustreResponse sets the LustreResponse field's value. +func (s *DeleteFileSystemOutput) SetLustreResponse(v *DeleteFileSystemLustreResponse) *DeleteFileSystemOutput { + s.LustreResponse = v + return s +} + +// SetWindowsResponse sets the WindowsResponse field's value. +func (s *DeleteFileSystemOutput) SetWindowsResponse(v *DeleteFileSystemWindowsResponse) *DeleteFileSystemOutput { + s.WindowsResponse = v + return s +} + +// The configuration object for the Microsoft Windows file system used in the +// DeleteFileSystem operation. +type DeleteFileSystemWindowsConfiguration struct { + _ struct{} `type:"structure"` + + // A set of tags for your final backup. + FinalBackupTags []*Tag `min:"1" type:"list"` + + // By default, Amazon FSx for Windows takes a final backup on your behalf when + // the DeleteFileSystem operation is invoked. Doing this helps protect you from + // data loss, and we highly recommend taking the final backup. If you want to + // skip this backup, use this flag to do so. + SkipFinalBackup *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteFileSystemWindowsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemWindowsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFileSystemWindowsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFileSystemWindowsConfiguration"} + if s.FinalBackupTags != nil && len(s.FinalBackupTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FinalBackupTags", 1)) + } + if s.FinalBackupTags != nil { + for i, v := range s.FinalBackupTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FinalBackupTags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFinalBackupTags sets the FinalBackupTags field's value. +func (s *DeleteFileSystemWindowsConfiguration) SetFinalBackupTags(v []*Tag) *DeleteFileSystemWindowsConfiguration { + s.FinalBackupTags = v + return s +} + +// SetSkipFinalBackup sets the SkipFinalBackup field's value. +func (s *DeleteFileSystemWindowsConfiguration) SetSkipFinalBackup(v bool) *DeleteFileSystemWindowsConfiguration { + s.SkipFinalBackup = &v + return s +} + +// The response object for the Microsoft Windows file system used in the DeleteFileSystem +// operation. +type DeleteFileSystemWindowsResponse struct { + _ struct{} `type:"structure"` + + // The ID of the final backup for this file system. + FinalBackupId *string `min:"12" type:"string"` + + // The set of tags applied to the final backup. + FinalBackupTags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s DeleteFileSystemWindowsResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemWindowsResponse) GoString() string { + return s.String() +} + +// SetFinalBackupId sets the FinalBackupId field's value. +func (s *DeleteFileSystemWindowsResponse) SetFinalBackupId(v string) *DeleteFileSystemWindowsResponse { + s.FinalBackupId = &v + return s +} + +// SetFinalBackupTags sets the FinalBackupTags field's value. +func (s *DeleteFileSystemWindowsResponse) SetFinalBackupTags(v []*Tag) *DeleteFileSystemWindowsResponse { + s.FinalBackupTags = v + return s +} + +// The request object for DescribeBackups operation. +type DescribeBackupsInput struct { + _ struct{} `type:"structure"` + + // IDs of the backups you want to retrieve (String). This overrides any filters. + // If any IDs are not found, BackupNotFound will be thrown. + BackupIds []*string `type:"list"` + + // Filters structure. Supported names are file-system-id and backup-type. + Filters []*Filter `type:"list"` + + // Maximum number of backups to return in the response (integer). This parameter + // value must be greater than 0. The number of items that Amazon FSx returns + // is the minimum of the MaxResults parameter specified in the request and the + // service's internal maximum number of items per page. + MaxResults *int64 `min:"1" type:"integer"` + + // Opaque pagination token returned from a previous DescribeBackups operation + // (String). If a token present, the action continues the list from where the + // returning call left off. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBackupsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupIds sets the BackupIds field's value. +func (s *DescribeBackupsInput) SetBackupIds(v []*string) *DescribeBackupsInput { + s.BackupIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeBackupsInput) SetFilters(v []*Filter) *DescribeBackupsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeBackupsInput) SetMaxResults(v int64) *DescribeBackupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeBackupsInput) SetNextToken(v string) *DescribeBackupsInput { + s.NextToken = &v + return s +} + +// Response object for DescribeBackups operation. +type DescribeBackupsOutput struct { + _ struct{} `type:"structure"` + + // Any array of backups. + Backups []*Backup `type:"list"` + + // This is present if there are more backups than returned in the response (String). + // You can use the NextToken value in the later request to fetch the backups. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBackupsOutput) GoString() string { + return s.String() +} + +// SetBackups sets the Backups field's value. +func (s *DescribeBackupsOutput) SetBackups(v []*Backup) *DescribeBackupsOutput { + s.Backups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeBackupsOutput) SetNextToken(v string) *DescribeBackupsOutput { + s.NextToken = &v + return s +} + +type DescribeDataRepositoryTasksInput struct { + _ struct{} `type:"structure"` + + // (Optional) You can use filters to narrow the DescribeDataRepositoryTasks + // response to include just tasks for specific file systems, or tasks in a specific + // lifecycle state. + Filters []*DataRepositoryTaskFilter `type:"list"` + + // The maximum number of resources to return in the response. This value must + // be an integer greater than zero. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Opaque pagination token returned from a previous operation (String). + // If present, this token indicates from what point you can continue processing + // the request, where the previous NextToken value left off. + NextToken *string `min:"1" type:"string"` + + // (Optional) IDs of the tasks whose descriptions you want to retrieve (String). + TaskIds []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeDataRepositoryTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataRepositoryTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataRepositoryTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataRepositoryTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeDataRepositoryTasksInput) SetFilters(v []*DataRepositoryTaskFilter) *DescribeDataRepositoryTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeDataRepositoryTasksInput) SetMaxResults(v int64) *DescribeDataRepositoryTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDataRepositoryTasksInput) SetNextToken(v string) *DescribeDataRepositoryTasksInput { + s.NextToken = &v + return s +} + +// SetTaskIds sets the TaskIds field's value. +func (s *DescribeDataRepositoryTasksInput) SetTaskIds(v []*string) *DescribeDataRepositoryTasksInput { + s.TaskIds = v + return s +} + +type DescribeDataRepositoryTasksOutput struct { + _ struct{} `type:"structure"` + + // The collection of data repository task descriptions returned. + DataRepositoryTasks []*DataRepositoryTask `type:"list"` + + // (Optional) Opaque pagination token returned from a previous operation (String). + // If present, this token indicates from what point you can continue processing + // the request, where the previous NextToken value left off. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeDataRepositoryTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataRepositoryTasksOutput) GoString() string { + return s.String() +} + +// SetDataRepositoryTasks sets the DataRepositoryTasks field's value. +func (s *DescribeDataRepositoryTasksOutput) SetDataRepositoryTasks(v []*DataRepositoryTask) *DescribeDataRepositoryTasksOutput { + s.DataRepositoryTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeDataRepositoryTasksOutput) SetNextToken(v string) *DescribeDataRepositoryTasksOutput { + s.NextToken = &v + return s +} + +// The request object for DescribeFileSystemAliases operation. +type DescribeFileSystemAliasesInput struct { + _ struct{} `type:"structure"` + + // (Optional) An idempotency token for resource creation, in a string of up + // to 64 ASCII characters. This token is automatically filled on your behalf + // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The ID of the file system to return the associated DNS aliases for (String). + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // Maximum number of DNS aliases to return in the response (integer). This parameter + // value must be greater than 0. The number of items that Amazon FSx returns + // is the minimum of the MaxResults parameter specified in the request and the + // service's internal maximum number of items per page. + MaxResults *int64 `min:"1" type:"integer"` + + // Opaque pagination token returned from a previous DescribeFileSystemAliases + // operation (String). If a token is included in the request, the action continues + // the list from where the previous returning call left off. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFileSystemAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFileSystemAliasesInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *DescribeFileSystemAliasesInput) SetClientRequestToken(v string) *DescribeFileSystemAliasesInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *DescribeFileSystemAliasesInput) SetFileSystemId(v string) *DescribeFileSystemAliasesInput { + s.FileSystemId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFileSystemAliasesInput) SetMaxResults(v int64) *DescribeFileSystemAliasesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFileSystemAliasesInput) SetNextToken(v string) *DescribeFileSystemAliasesInput { + s.NextToken = &v + return s +} + +// The response object for DescribeFileSystemAliases operation. +type DescribeFileSystemAliasesOutput struct { + _ struct{} `type:"structure"` + + // An array of one or more DNS aliases currently associated with the specified + // file system. + Aliases []*Alias `type:"list"` + + // Present if there are more DNS aliases than returned in the response (String). + // You can use the NextToken value in a later request to fetch additional descriptions. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemAliasesOutput) GoString() string { + return s.String() +} + +// SetAliases sets the Aliases field's value. +func (s *DescribeFileSystemAliasesOutput) SetAliases(v []*Alias) *DescribeFileSystemAliasesOutput { + s.Aliases = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFileSystemAliasesOutput) SetNextToken(v string) *DescribeFileSystemAliasesOutput { + s.NextToken = &v + return s +} + +// The request object for DescribeFileSystems operation. +type DescribeFileSystemsInput struct { + _ struct{} `type:"structure"` + + // IDs of the file systems whose descriptions you want to retrieve (String). + FileSystemIds []*string `type:"list"` + + // Maximum number of file systems to return in the response (integer). This + // parameter value must be greater than 0. The number of items that Amazon FSx + // returns is the minimum of the MaxResults parameter specified in the request + // and the service's internal maximum number of items per page. + MaxResults *int64 `min:"1" type:"integer"` + + // Opaque pagination token returned from a previous DescribeFileSystems operation + // (String). If a token present, the action continues the list from where the + // returning call left off. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFileSystemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFileSystemsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFileSystemIds sets the FileSystemIds field's value. +func (s *DescribeFileSystemsInput) SetFileSystemIds(v []*string) *DescribeFileSystemsInput { + s.FileSystemIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFileSystemsInput) SetMaxResults(v int64) *DescribeFileSystemsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFileSystemsInput) SetNextToken(v string) *DescribeFileSystemsInput { + s.NextToken = &v + return s +} + +// The response object for DescribeFileSystems operation. +type DescribeFileSystemsOutput struct { + _ struct{} `type:"structure"` + + // An array of file system descriptions. + FileSystems []*FileSystem `type:"list"` + + // Present if there are more file systems than returned in the response (String). + // You can use the NextToken value in the later request to fetch the descriptions. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemsOutput) GoString() string { + return s.String() +} + +// SetFileSystems sets the FileSystems field's value. +func (s *DescribeFileSystemsOutput) SetFileSystems(v []*FileSystem) *DescribeFileSystemsOutput { + s.FileSystems = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFileSystemsOutput) SetNextToken(v string) *DescribeFileSystemsOutput { + s.NextToken = &v + return s +} + +// The request object of DNS aliases to disassociate from an Amazon FSx for +// Windows File Server file system. +type DisassociateFileSystemAliasesInput struct { + _ struct{} `type:"structure"` + + // An array of one or more DNS alias names to disassociate, or remove, from + // the file system. + // + // Aliases is a required field + Aliases []*string `type:"list" required:"true"` + + // (Optional) An idempotency token for resource creation, in a string of up + // to 64 ASCII characters. This token is automatically filled on your behalf + // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Specifies the file system from which to disassociate the DNS aliases. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateFileSystemAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateFileSystemAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateFileSystemAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateFileSystemAliasesInput"} + if s.Aliases == nil { + invalidParams.Add(request.NewErrParamRequired("Aliases")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliases sets the Aliases field's value. +func (s *DisassociateFileSystemAliasesInput) SetAliases(v []*string) *DisassociateFileSystemAliasesInput { + s.Aliases = v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *DisassociateFileSystemAliasesInput) SetClientRequestToken(v string) *DisassociateFileSystemAliasesInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *DisassociateFileSystemAliasesInput) SetFileSystemId(v string) *DisassociateFileSystemAliasesInput { + s.FileSystemId = &v + return s +} + +// The system generated response showing the DNS aliases that Amazon FSx is +// attempting to disassociate from the file system. Use the API operation to +// monitor the status of the aliases Amazon FSx is removing from the file system. +type DisassociateFileSystemAliasesOutput struct { + _ struct{} `type:"structure"` + + // An array of one or more DNS aliases that Amazon FSx is attempting to disassociate + // from the file system. + Aliases []*Alias `type:"list"` +} + +// String returns the string representation +func (s DisassociateFileSystemAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateFileSystemAliasesOutput) GoString() string { + return s.String() +} + +// SetAliases sets the Aliases field's value. +func (s *DisassociateFileSystemAliasesOutput) SetAliases(v []*Alias) *DisassociateFileSystemAliasesOutput { + s.Aliases = v + return s +} + +// A description of a specific Amazon FSx file system. +type FileSystem struct { + _ struct{} `type:"structure"` + + // A list of administrative actions for the file system that are in process + // or waiting to be processed. Administrative actions describe changes to the + // Windows file system that you have initiated using the UpdateFileSystem action. + AdministrativeActions []*AdministrativeAction `type:"list"` + + // The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), + // also known as Unix time. + CreationTime *time.Time `type:"timestamp"` + + // The DNS name for the file system. + DNSName *string `min:"16" type:"string"` + + // A structure providing details of any failures that occur when creating the + // file system has failed. + FailureDetails *FileSystemFailureDetails `type:"structure"` + + // The system-generated, unique 17-digit ID of the file system. + FileSystemId *string `min:"11" type:"string"` + + // The type of Amazon FSx file system, either LUSTRE or WINDOWS. + FileSystemType *string `type:"string" enum:"FileSystemType"` + + // The ID of the AWS Key Management Service (AWS KMS) key used to encrypt the + // file system's data for Amazon FSx for Windows File Server file systems and + // persistent Amazon FSx for Lustre file systems at rest. In either case, if + // not specified, the Amazon FSx managed key is used. The scratch Amazon FSx + // for Lustre file systems are always encrypted at rest using Amazon FSx managed + // keys. For more information, see Encrypt (https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html) + // in the AWS Key Management Service API Reference. + KmsKeyId *string `min:"1" type:"string"` + + // The lifecycle status of the file system, following are the possible values + // and what they mean: + // + // * AVAILABLE - The file system is in a healthy state, and is reachable + // and available for use. + // + // * CREATING - Amazon FSx is creating the new file system. + // + // * DELETING - Amazon FSx is deleting an existing file system. + // + // * FAILED - An existing file system has experienced an unrecoverable failure. + // When creating a new file system, Amazon FSx was unable to create the file + // system. + // + // * MISCONFIGURED indicates that the file system is in a failed but recoverable + // state. + // + // * UPDATING indicates that the file system is undergoing a customer initiated + // update. + Lifecycle *string `type:"string" enum:"FileSystemLifecycle"` + + // The configuration for the Amazon FSx for Lustre file system. + LustreConfiguration *LustreFileSystemConfiguration `type:"structure"` + + // The IDs of the elastic network interface from which a specific file system + // is accessible. The elastic network interface is automatically created in + // the same VPC that the Amazon FSx file system was created in. For more information, + // see Elastic Network Interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) + // in the Amazon EC2 User Guide. + // + // For an Amazon FSx for Windows File Server file system, you can have one network + // interface ID. For an Amazon FSx for Lustre file system, you can have more + // than one. + NetworkInterfaceIds []*string `type:"list"` + + // The AWS account that created the file system. If the file system was created + // by an AWS Identity and Access Management (IAM) user, the AWS account to which + // the IAM user belongs is the owner. + OwnerId *string `min:"12" type:"string"` + + // The Amazon Resource Name (ARN) for the file system resource. + ResourceARN *string `min:"8" type:"string"` + + // The storage capacity of the file system in gibibytes (GiB). + StorageCapacity *int64 `type:"integer"` + + // The storage type of the file system. Valid values are SSD and HDD. If set + // to SSD, the file system uses solid state drive storage. If set to HDD, the + // file system uses hard disk drive storage. + StorageType *string `type:"string" enum:"StorageType"` + + // Specifies the IDs of the subnets that the file system is accessible from. + // For Windows MULTI_AZ_1 file system deployment type, there are two subnet + // IDs, one for the preferred file server and one for the standby file server. + // The preferred file server subnet identified in the PreferredSubnetID property. + // All other file systems have only one subnet ID. + // + // For Lustre file systems, and Single-AZ Windows file systems, this is the + // ID of the subnet that contains the endpoint for the file system. For MULTI_AZ_1 + // Windows file systems, the endpoint for the file system is available in the + // PreferredSubnetID. + SubnetIds []*string `type:"list"` + + // The tags to associate with the file system. For more information, see Tagging + // Your Amazon EC2 Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) + // in the Amazon EC2 User Guide. + Tags []*Tag `min:"1" type:"list"` + + // The ID of the primary VPC for the file system. + VpcId *string `min:"12" type:"string"` + + // The configuration for this Microsoft Windows file system. + WindowsConfiguration *WindowsFileSystemConfiguration `type:"structure"` +} + +// String returns the string representation +func (s FileSystem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystem) GoString() string { + return s.String() +} + +// SetAdministrativeActions sets the AdministrativeActions field's value. +func (s *FileSystem) SetAdministrativeActions(v []*AdministrativeAction) *FileSystem { + s.AdministrativeActions = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *FileSystem) SetCreationTime(v time.Time) *FileSystem { + s.CreationTime = &v + return s +} + +// SetDNSName sets the DNSName field's value. +func (s *FileSystem) SetDNSName(v string) *FileSystem { + s.DNSName = &v + return s +} + +// SetFailureDetails sets the FailureDetails field's value. +func (s *FileSystem) SetFailureDetails(v *FileSystemFailureDetails) *FileSystem { + s.FailureDetails = v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *FileSystem) SetFileSystemId(v string) *FileSystem { + s.FileSystemId = &v + return s +} + +// SetFileSystemType sets the FileSystemType field's value. +func (s *FileSystem) SetFileSystemType(v string) *FileSystem { + s.FileSystemType = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *FileSystem) SetKmsKeyId(v string) *FileSystem { + s.KmsKeyId = &v + return s +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *FileSystem) SetLifecycle(v string) *FileSystem { + s.Lifecycle = &v + return s +} + +// SetLustreConfiguration sets the LustreConfiguration field's value. +func (s *FileSystem) SetLustreConfiguration(v *LustreFileSystemConfiguration) *FileSystem { + s.LustreConfiguration = v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *FileSystem) SetNetworkInterfaceIds(v []*string) *FileSystem { + s.NetworkInterfaceIds = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *FileSystem) SetOwnerId(v string) *FileSystem { + s.OwnerId = &v + return s +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *FileSystem) SetResourceARN(v string) *FileSystem { + s.ResourceARN = &v + return s +} + +// SetStorageCapacity sets the StorageCapacity field's value. +func (s *FileSystem) SetStorageCapacity(v int64) *FileSystem { + s.StorageCapacity = &v + return s +} + +// SetStorageType sets the StorageType field's value. +func (s *FileSystem) SetStorageType(v string) *FileSystem { + s.StorageType = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *FileSystem) SetSubnetIds(v []*string) *FileSystem { + s.SubnetIds = v + return s +} + +// SetTags sets the Tags field's value. +func (s *FileSystem) SetTags(v []*Tag) *FileSystem { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *FileSystem) SetVpcId(v string) *FileSystem { + s.VpcId = &v + return s +} + +// SetWindowsConfiguration sets the WindowsConfiguration field's value. +func (s *FileSystem) SetWindowsConfiguration(v *WindowsFileSystemConfiguration) *FileSystem { + s.WindowsConfiguration = v + return s +} + +// A structure providing details of any failures that occur when creating the +// file system has failed. +type FileSystemFailureDetails struct { + _ struct{} `type:"structure"` + + // A message describing any failures that occurred during file system creation. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s FileSystemFailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemFailureDetails) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *FileSystemFailureDetails) SetMessage(v string) *FileSystemFailureDetails { + s.Message = &v + return s +} + +// No Amazon FSx file systems were found based upon supplied parameters. +type FileSystemNotFound struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s FileSystemNotFound) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemNotFound) GoString() string { + return s.String() +} + +func newErrorFileSystemNotFound(v protocol.ResponseMetadata) error { + return &FileSystemNotFound{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *FileSystemNotFound) Code() string { + return "FileSystemNotFound" +} + +// Message returns the exception's message. +func (s *FileSystemNotFound) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *FileSystemNotFound) OrigErr() error { + return nil +} + +func (s *FileSystemNotFound) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *FileSystemNotFound) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *FileSystemNotFound) RequestID() string { + return s.RespMetadata.RequestID +} + +// A filter used to restrict the results of describe calls. You can use multiple +// filters to return results that meet all applied filter requirements. +type Filter struct { + _ struct{} `type:"structure"` + + // The name for this filter. + Name *string `type:"string" enum:"FilterName"` + + // The values of the filter. These are all the values for any of the applied + // filters. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *Filter) SetName(v string) *Filter { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *Filter) SetValues(v []*string) *Filter { + s.Values = v + return s +} + +// The error returned when a second request is received with the same client +// request token but different parameters settings. A client request token should +// always uniquely identify a single request. +type IncompatibleParameterError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` + + // A parameter that is incompatible with the earlier request. + // + // Parameter is a required field + Parameter *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s IncompatibleParameterError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncompatibleParameterError) GoString() string { + return s.String() +} + +func newErrorIncompatibleParameterError(v protocol.ResponseMetadata) error { + return &IncompatibleParameterError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IncompatibleParameterError) Code() string { + return "IncompatibleParameterError" +} + +// Message returns the exception's message. +func (s *IncompatibleParameterError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IncompatibleParameterError) OrigErr() error { + return nil +} + +func (s *IncompatibleParameterError) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IncompatibleParameterError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IncompatibleParameterError) RequestID() string { + return s.RespMetadata.RequestID +} + +// A generic error indicating a server-side failure. +type InternalServerError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InternalServerError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternalServerError) GoString() string { + return s.String() +} + +func newErrorInternalServerError(v protocol.ResponseMetadata) error { + return &InternalServerError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerError) Code() string { + return "InternalServerError" +} + +// Message returns the exception's message. +func (s *InternalServerError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerError) OrigErr() error { + return nil +} + +func (s *InternalServerError) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID +} + +// The path provided for data repository export isn't valid. +type InvalidExportPath struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvalidExportPath) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidExportPath) GoString() string { + return s.String() +} + +func newErrorInvalidExportPath(v protocol.ResponseMetadata) error { + return &InvalidExportPath{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidExportPath) Code() string { + return "InvalidExportPath" +} + +// Message returns the exception's message. +func (s *InvalidExportPath) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidExportPath) OrigErr() error { + return nil +} + +func (s *InvalidExportPath) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidExportPath) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidExportPath) RequestID() string { + return s.RespMetadata.RequestID +} + +// The path provided for data repository import isn't valid. +type InvalidImportPath struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvalidImportPath) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidImportPath) GoString() string { + return s.String() +} + +func newErrorInvalidImportPath(v protocol.ResponseMetadata) error { + return &InvalidImportPath{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidImportPath) Code() string { + return "InvalidImportPath" +} + +// Message returns the exception's message. +func (s *InvalidImportPath) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidImportPath) OrigErr() error { + return nil +} + +func (s *InvalidImportPath) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidImportPath) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidImportPath) RequestID() string { + return s.RespMetadata.RequestID +} + +// One or more network settings specified in the request are invalid. InvalidVpcId +// means that the ID passed for the virtual private cloud (VPC) is invalid. +// InvalidSubnetIds returns the list of IDs for subnets that are either invalid +// or not part of the VPC specified. InvalidSecurityGroupIds returns the list +// of IDs for security groups that are either invalid or not part of the VPC +// specified. +type InvalidNetworkSettings struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The ID of your Amazon EC2 security group. This ID is used to control network + // access to the endpoint that Amazon FSx creates on your behalf in each subnet. + // For more information, see Amazon EC2 Security Groups for Linux Instances + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) + // in the Amazon EC2 User Guide. + InvalidSecurityGroupId *string `min:"11" type:"string"` + + // The ID for a subnet. A subnet is a range of IP addresses in your virtual + // private cloud (VPC). For more information, see VPC and Subnets (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html) + // in the Amazon VPC User Guide. + InvalidSubnetId *string `min:"15" type:"string"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvalidNetworkSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidNetworkSettings) GoString() string { + return s.String() +} + +func newErrorInvalidNetworkSettings(v protocol.ResponseMetadata) error { + return &InvalidNetworkSettings{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidNetworkSettings) Code() string { + return "InvalidNetworkSettings" +} + +// Message returns the exception's message. +func (s *InvalidNetworkSettings) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidNetworkSettings) OrigErr() error { + return nil +} + +func (s *InvalidNetworkSettings) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidNetworkSettings) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidNetworkSettings) RequestID() string { + return s.RespMetadata.RequestID +} + +// An invalid value for PerUnitStorageThroughput was provided. Please create +// your file system again, using a valid value. +type InvalidPerUnitStorageThroughput struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s InvalidPerUnitStorageThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidPerUnitStorageThroughput) GoString() string { + return s.String() +} + +func newErrorInvalidPerUnitStorageThroughput(v protocol.ResponseMetadata) error { + return &InvalidPerUnitStorageThroughput{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidPerUnitStorageThroughput) Code() string { + return "InvalidPerUnitStorageThroughput" +} + +// Message returns the exception's message. +func (s *InvalidPerUnitStorageThroughput) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidPerUnitStorageThroughput) OrigErr() error { + return nil +} + +func (s *InvalidPerUnitStorageThroughput) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidPerUnitStorageThroughput) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidPerUnitStorageThroughput) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request object for ListTagsForResource operation. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // Maximum number of tags to return in the response (integer). This parameter + // value must be greater than 0. The number of items that Amazon FSx returns + // is the minimum of the MaxResults parameter specified in the request and the + // service's internal maximum number of items per page. + MaxResults *int64 `min:"1" type:"integer"` + + // Opaque pagination token returned from a previous ListTagsForResource operation + // (String). If a token present, the action continues the list from where the + // returning call left off. + NextToken *string `min:"1" type:"string"` + + // The ARN of the Amazon FSx resource that will have its tags listed. + // + // ResourceARN is a required field + ResourceARN *string `min:"8" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 8 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 8)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListTagsForResourceInput) SetMaxResults(v int64) *ListTagsForResourceInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v + return s +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +// The response object for ListTagsForResource operation. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // This is present if there are more tags than returned in the response (String). + // You can use the NextToken value in the later request to fetch the tags. + NextToken *string `min:"1" type:"string"` + + // A list of tags on the resource. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// The configuration for the Amazon FSx for Lustre file system. +type LustreFileSystemConfiguration struct { + _ struct{} `type:"structure"` + + // The number of days to retain automatic backups. Setting this to 0 disables + // automatic backups. You can retain automatic backups for a maximum of 90 days. + // The default is 0. + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // A boolean flag indicating whether tags on the file system should be copied + // to backups. If it's set to true, all tags on the file system are copied to + // all automatic backups and any user-initiated backups where the user doesn't + // specify any tags. If this value is true, and you specify one or more tags, + // only the specified tags are copied to backups. If you specify one or more + // tags when creating a user-initiated backup, no tags are copied from the file + // system, regardless of this value. (Default = false) + CopyTagsToBackups *bool `type:"boolean"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of + // the day (0-23), and MM is the zero-padded minute of the hour. For example, + // 05:00 specifies 5 AM daily. + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + + // The data repository configuration object for Lustre file systems returned + // in the response of the CreateFileSystem operation. + DataRepositoryConfiguration *DataRepositoryConfiguration `type:"structure"` + + // The deployment type of the FSX for Lustre file system. Scratch deployment + // type is designed for temporary storage and shorter-term processing of data. + // + // SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need + // temporary storage and shorter-term processing of data. The SCRATCH_2 deployment + // type provides in-transit encryption of data and higher burst throughput capacity + // than SCRATCH_1. + // + // The PERSISTENT_1 deployment type is used for longer-term storage and workloads + // and encryption of data in transit. To learn more about deployment types, + // see FSx for Lustre Deployment Options (https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-deployment-types.html). + // (Default = SCRATCH_1) + DeploymentType *string `type:"string" enum:"LustreDeploymentType"` + + // The type of drive cache used by PERSISTENT_1 file systems that are provisioned + // with HDD storage devices. This parameter is required when storage type is + // HDD. Set to READ, improve the performance for frequently accessed files and + // allows 20% of the total storage capacity of the file system to be cached. + // + // This parameter is required when StorageType is set to HDD. + DriveCacheType *string `type:"string" enum:"DriveCacheType"` + + // You use the MountName value when mounting the file system. + // + // For the SCRATCH_1 deployment type, this value is always "fsx". For SCRATCH_2 + // and PERSISTENT_1 deployment types, this value is a string that is unique + // within an AWS Region. + MountName *string `min:"1" type:"string"` + + // Per unit storage throughput represents the megabytes per second of read or + // write throughput per 1 tebibyte of storage provisioned. File system throughput + // capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). + // This option is only valid for PERSISTENT_1 deployment types. + // + // Valid values for SSD storage: 50, 100, 200. Valid values for HDD storage: + // 12, 40. + PerUnitStorageThroughput *int64 `min:"12" type:"integer"` + + // The preferred start time to perform weekly maintenance, formatted d:HH:MM + // in the UTC time zone. d is the weekday number, from 1 through 7, beginning + // with Monday and ending with Sunday. + WeeklyMaintenanceStartTime *string `min:"7" type:"string"` +} + +// String returns the string representation +func (s LustreFileSystemConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LustreFileSystemConfiguration) GoString() string { + return s.String() +} + +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *LustreFileSystemConfiguration) SetAutomaticBackupRetentionDays(v int64) *LustreFileSystemConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetCopyTagsToBackups sets the CopyTagsToBackups field's value. +func (s *LustreFileSystemConfiguration) SetCopyTagsToBackups(v bool) *LustreFileSystemConfiguration { + s.CopyTagsToBackups = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *LustreFileSystemConfiguration) SetDailyAutomaticBackupStartTime(v string) *LustreFileSystemConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + +// SetDataRepositoryConfiguration sets the DataRepositoryConfiguration field's value. +func (s *LustreFileSystemConfiguration) SetDataRepositoryConfiguration(v *DataRepositoryConfiguration) *LustreFileSystemConfiguration { + s.DataRepositoryConfiguration = v + return s +} + +// SetDeploymentType sets the DeploymentType field's value. +func (s *LustreFileSystemConfiguration) SetDeploymentType(v string) *LustreFileSystemConfiguration { + s.DeploymentType = &v + return s +} + +// SetDriveCacheType sets the DriveCacheType field's value. +func (s *LustreFileSystemConfiguration) SetDriveCacheType(v string) *LustreFileSystemConfiguration { + s.DriveCacheType = &v + return s +} + +// SetMountName sets the MountName field's value. +func (s *LustreFileSystemConfiguration) SetMountName(v string) *LustreFileSystemConfiguration { + s.MountName = &v + return s +} + +// SetPerUnitStorageThroughput sets the PerUnitStorageThroughput field's value. +func (s *LustreFileSystemConfiguration) SetPerUnitStorageThroughput(v int64) *LustreFileSystemConfiguration { + s.PerUnitStorageThroughput = &v + return s +} + +// SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. +func (s *LustreFileSystemConfiguration) SetWeeklyMaintenanceStartTime(v string) *LustreFileSystemConfiguration { + s.WeeklyMaintenanceStartTime = &v + return s +} + +// A file system configuration is required for this operation. +type MissingFileSystemConfiguration struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s MissingFileSystemConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MissingFileSystemConfiguration) GoString() string { + return s.String() +} + +func newErrorMissingFileSystemConfiguration(v protocol.ResponseMetadata) error { + return &MissingFileSystemConfiguration{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *MissingFileSystemConfiguration) Code() string { + return "MissingFileSystemConfiguration" +} + +// Message returns the exception's message. +func (s *MissingFileSystemConfiguration) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MissingFileSystemConfiguration) OrigErr() error { + return nil +} + +func (s *MissingFileSystemConfiguration) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *MissingFileSystemConfiguration) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *MissingFileSystemConfiguration) RequestID() string { + return s.RespMetadata.RequestID +} + +// The resource specified for the tagging operation is not a resource type owned +// by Amazon FSx. Use the API of the relevant service to perform the operation. +type NotServiceResourceError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the non-Amazon FSx resource. + // + // ResourceARN is a required field + ResourceARN *string `min:"8" type:"string" required:"true"` +} + +// String returns the string representation +func (s NotServiceResourceError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotServiceResourceError) GoString() string { + return s.String() +} + +func newErrorNotServiceResourceError(v protocol.ResponseMetadata) error { + return &NotServiceResourceError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *NotServiceResourceError) Code() string { + return "NotServiceResourceError" +} + +// Message returns the exception's message. +func (s *NotServiceResourceError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *NotServiceResourceError) OrigErr() error { + return nil +} + +func (s *NotServiceResourceError) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *NotServiceResourceError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *NotServiceResourceError) RequestID() string { + return s.RespMetadata.RequestID +} + +// The resource specified does not support tagging. +type ResourceDoesNotSupportTagging struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the resource that doesn't support tagging. + // + // ResourceARN is a required field + ResourceARN *string `min:"8" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceDoesNotSupportTagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDoesNotSupportTagging) GoString() string { + return s.String() +} + +func newErrorResourceDoesNotSupportTagging(v protocol.ResponseMetadata) error { + return &ResourceDoesNotSupportTagging{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceDoesNotSupportTagging) Code() string { + return "ResourceDoesNotSupportTagging" +} + +// Message returns the exception's message. +func (s *ResourceDoesNotSupportTagging) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceDoesNotSupportTagging) OrigErr() error { + return nil +} + +func (s *ResourceDoesNotSupportTagging) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceDoesNotSupportTagging) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceDoesNotSupportTagging) RequestID() string { + return s.RespMetadata.RequestID +} + +// The resource specified by the Amazon Resource Name (ARN) can't be found. +type ResourceNotFound struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` + + // The resource ARN of the resource that can't be found. + // + // ResourceARN is a required field + ResourceARN *string `min:"8" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceNotFound) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFound) GoString() string { + return s.String() +} + +func newErrorResourceNotFound(v protocol.ResponseMetadata) error { + return &ResourceNotFound{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFound) Code() string { + return "ResourceNotFound" +} + +// Message returns the exception's message. +func (s *ResourceNotFound) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFound) OrigErr() error { + return nil +} + +func (s *ResourceNotFound) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFound) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFound) RequestID() string { + return s.RespMetadata.RequestID +} + +// The configuration of the self-managed Microsoft Active Directory (AD) directory +// to which the Windows File Server instance is joined. +type SelfManagedActiveDirectoryAttributes struct { + _ struct{} `type:"structure"` + + // A list of up to two IP addresses of DNS servers or domain controllers in + // the self-managed AD directory. + DnsIps []*string `min:"1" type:"list"` + + // The fully qualified domain name of the self-managed AD directory. + DomainName *string `min:"1" type:"string"` + + // The name of the domain group whose members have administrative privileges + // for the FSx file system. + FileSystemAdministratorsGroup *string `min:"1" type:"string"` + + // The fully qualified distinguished name of the organizational unit within + // the self-managed AD directory to which the Windows File Server instance is + // joined. + OrganizationalUnitDistinguishedName *string `min:"1" type:"string"` + + // The user name for the service account on your self-managed AD domain that + // FSx uses to join to your AD domain. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SelfManagedActiveDirectoryAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfManagedActiveDirectoryAttributes) GoString() string { + return s.String() +} + +// SetDnsIps sets the DnsIps field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetDnsIps(v []*string) *SelfManagedActiveDirectoryAttributes { + s.DnsIps = v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetDomainName(v string) *SelfManagedActiveDirectoryAttributes { + s.DomainName = &v + return s +} + +// SetFileSystemAdministratorsGroup sets the FileSystemAdministratorsGroup field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetFileSystemAdministratorsGroup(v string) *SelfManagedActiveDirectoryAttributes { + s.FileSystemAdministratorsGroup = &v + return s +} + +// SetOrganizationalUnitDistinguishedName sets the OrganizationalUnitDistinguishedName field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetOrganizationalUnitDistinguishedName(v string) *SelfManagedActiveDirectoryAttributes { + s.OrganizationalUnitDistinguishedName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SelfManagedActiveDirectoryAttributes) SetUserName(v string) *SelfManagedActiveDirectoryAttributes { + s.UserName = &v + return s +} + +// The configuration that Amazon FSx uses to join the Windows File Server instance +// to your self-managed (including on-premises) Microsoft Active Directory (AD) +// directory. +type SelfManagedActiveDirectoryConfiguration struct { + _ struct{} `type:"structure"` + + // A list of up to two IP addresses of DNS servers or domain controllers in + // the self-managed AD directory. The IP addresses need to be either in the + // same VPC CIDR range as the one in which your Amazon FSx file system is being + // created, or in the private IP version 4 (IPv4) address ranges, as specified + // in RFC 1918 (http://www.faqs.org/rfcs/rfc1918.html): + // + // * 10.0.0.0 - 10.255.255.255 (10/8 prefix) + // + // * 172.16.0.0 - 172.31.255.255 (172.16/12 prefix) + // + // * 192.168.0.0 - 192.168.255.255 (192.168/16 prefix) + // + // DnsIps is a required field + DnsIps []*string `min:"1" type:"list" required:"true"` + + // The fully qualified domain name of the self-managed AD directory, such as + // corp.example.com. + // + // DomainName is a required field + DomainName *string `min:"1" type:"string" required:"true"` + + // (Optional) The name of the domain group whose members are granted administrative + // privileges for the file system. Administrative privileges include taking + // ownership of files and folders, setting audit controls (audit ACLs) on files + // and folders, and administering the file system remotely by using the FSx + // Remote PowerShell. The group that you specify must already exist in your + // domain. If you don't provide one, your AD domain's Domain Admins group is + // used. + FileSystemAdministratorsGroup *string `min:"1" type:"string"` + + // (Optional) The fully qualified distinguished name of the organizational unit + // within your self-managed AD directory that the Windows File Server instance + // will join. Amazon FSx only accepts OU as the direct parent of the file system. + // An example is OU=FSx,DC=yourdomain,DC=corp,DC=com. To learn more, see RFC + // 2253 (https://tools.ietf.org/html/rfc2253). If none is provided, the FSx + // file system is created in the default location of your self-managed AD directory. + // + // Only Organizational Unit (OU) objects can be the direct parent of the file + // system that you're creating. + OrganizationalUnitDistinguishedName *string `min:"1" type:"string"` + + // The password for the service account on your self-managed AD domain that + // Amazon FSx will use to join to your AD domain. + // + // Password is a required field + Password *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The user name for the service account on your self-managed AD domain that + // Amazon FSx will use to join to your AD domain. This account must have the + // permission to join computers to the domain in the organizational unit provided + // in OrganizationalUnitDistinguishedName, or in the default location of your + // AD domain. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SelfManagedActiveDirectoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfManagedActiveDirectoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelfManagedActiveDirectoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelfManagedActiveDirectoryConfiguration"} + if s.DnsIps == nil { + invalidParams.Add(request.NewErrParamRequired("DnsIps")) + } + if s.DnsIps != nil && len(s.DnsIps) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DnsIps", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } + if s.FileSystemAdministratorsGroup != nil && len(*s.FileSystemAdministratorsGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemAdministratorsGroup", 1)) + } + if s.OrganizationalUnitDistinguishedName != nil && len(*s.OrganizationalUnitDistinguishedName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationalUnitDistinguishedName", 1)) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDnsIps sets the DnsIps field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetDnsIps(v []*string) *SelfManagedActiveDirectoryConfiguration { + s.DnsIps = v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetDomainName(v string) *SelfManagedActiveDirectoryConfiguration { + s.DomainName = &v + return s +} + +// SetFileSystemAdministratorsGroup sets the FileSystemAdministratorsGroup field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetFileSystemAdministratorsGroup(v string) *SelfManagedActiveDirectoryConfiguration { + s.FileSystemAdministratorsGroup = &v + return s +} + +// SetOrganizationalUnitDistinguishedName sets the OrganizationalUnitDistinguishedName field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetOrganizationalUnitDistinguishedName(v string) *SelfManagedActiveDirectoryConfiguration { + s.OrganizationalUnitDistinguishedName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetPassword(v string) *SelfManagedActiveDirectoryConfiguration { + s.Password = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SelfManagedActiveDirectoryConfiguration) SetUserName(v string) *SelfManagedActiveDirectoryConfiguration { + s.UserName = &v + return s +} + +// The configuration that Amazon FSx uses to join the Windows File Server instance +// to a self-managed Microsoft Active Directory (AD) directory. +type SelfManagedActiveDirectoryConfigurationUpdates struct { + _ struct{} `type:"structure"` + + // A list of up to two IP addresses of DNS servers or domain controllers in + // the self-managed AD directory. + DnsIps []*string `min:"1" type:"list"` + + // The password for the service account on your self-managed AD domain that + // Amazon FSx will use to join to your AD domain. + Password *string `min:"1" type:"string" sensitive:"true"` + + // The user name for the service account on your self-managed AD domain that + // Amazon FSx will use to join to your AD domain. This account must have the + // permission to join computers to the domain in the organizational unit provided + // in OrganizationalUnitDistinguishedName. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SelfManagedActiveDirectoryConfigurationUpdates) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelfManagedActiveDirectoryConfigurationUpdates) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelfManagedActiveDirectoryConfigurationUpdates) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelfManagedActiveDirectoryConfigurationUpdates"} + if s.DnsIps != nil && len(s.DnsIps) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DnsIps", 1)) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDnsIps sets the DnsIps field's value. +func (s *SelfManagedActiveDirectoryConfigurationUpdates) SetDnsIps(v []*string) *SelfManagedActiveDirectoryConfigurationUpdates { + s.DnsIps = v + return s +} + +// SetPassword sets the Password field's value. +func (s *SelfManagedActiveDirectoryConfigurationUpdates) SetPassword(v string) *SelfManagedActiveDirectoryConfigurationUpdates { + s.Password = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SelfManagedActiveDirectoryConfigurationUpdates) SetUserName(v string) *SelfManagedActiveDirectoryConfigurationUpdates { + s.UserName = &v + return s +} + +// An error indicating that a particular service limit was exceeded. You can +// increase some service limits by contacting AWS Support. +type ServiceLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Enumeration of the service limit that was exceeded. + // + // Limit is a required field + Limit *string `type:"string" required:"true" enum:"ServiceLimit"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s ServiceLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceLimitExceeded) GoString() string { + return s.String() +} + +func newErrorServiceLimitExceeded(v protocol.ResponseMetadata) error { + return &ServiceLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceLimitExceeded) Code() string { + return "ServiceLimitExceeded" +} + +// Message returns the exception's message. +func (s *ServiceLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceLimitExceeded) OrigErr() error { + return nil +} + +func (s *ServiceLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// Specifies a key-value pair for a resource tag. +type Tag struct { + _ struct{} `type:"structure"` + + // A value that specifies the TagKey, the name of the tag. Tag keys must be + // unique for the resource to which they are attached. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // A value that specifies the TagValue, the value assigned to the corresponding + // tag key. Tag values can be null and don't have to be unique in a tag set. + // For example, you can have a key-value pair in a tag set of finances : April + // and also of payroll : April. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// The request object for the TagResource operation. +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon FSx resource that you want to + // tag. + // + // ResourceARN is a required field + ResourceARN *string `min:"8" type:"string" required:"true"` + + // A list of tags for the resource. If a tag with a given key already exists, + // the value is replaced by the one specified in this parameter. + // + // Tags is a required field + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 8 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 8)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +// The response object for the TagResource operation. +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The requested operation is not supported for this resource or API. +type UnsupportedOperation struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A detailed error message. + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s UnsupportedOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedOperation) GoString() string { + return s.String() +} + +func newErrorUnsupportedOperation(v protocol.ResponseMetadata) error { + return &UnsupportedOperation{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedOperation) Code() string { + return "UnsupportedOperation" +} + +// Message returns the exception's message. +func (s *UnsupportedOperation) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedOperation) OrigErr() error { + return nil +} + +func (s *UnsupportedOperation) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedOperation) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedOperation) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request object for UntagResource action. +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon FSx resource to untag. + // + // ResourceARN is a required field + ResourceARN *string `min:"8" type:"string" required:"true"` + + // A list of keys of tags on the resource to untag. In case the tag key doesn't + // exist, the call will still succeed to be idempotent. + // + // TagKeys is a required field + TagKeys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 8 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 8)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +// The response object for UntagResource action. +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +// The request object for the UpdateFileSystem operation. +type UpdateFileSystemInput struct { + _ struct{} `type:"structure"` + + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // updates. This string is automatically filled on your behalf when you use + // the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Identifies the file system that you are updating. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // The configuration object for Amazon FSx for Lustre file systems used in the + // UpdateFileSystem operation. + LustreConfiguration *UpdateFileSystemLustreConfiguration `type:"structure"` + + // Use this parameter to increase the storage capacity of an Amazon FSx file + // system. Specifies the storage capacity target value, GiB, to increase the + // storage capacity for the file system that you're updating. You cannot make + // a storage capacity increase request if there is an existing storage capacity + // increase request in progress. + // + // For Windows file systems, the storage capacity target value must be at least + // 10 percent (%) greater than the current storage capacity value. In order + // to increase storage capacity, the file system must have at least 16 MB/s + // of throughput capacity. + // + // For Lustre file systems, the storage capacity target value can be the following: + // + // * For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values are + // in multiples of 2400 GiB. The value must be greater than the current storage + // capacity. + // + // * For PERSISTENT HDD file systems, valid values are multiples of 6000 + // GiB for 12 MB/s/TiB file systems and multiples of 1800 GiB for 40 MB/s/TiB + // file systems. The values must be greater than the current storage capacity. + // + // * For SCRATCH_1 file systems, you cannot increase the storage capacity. + // + // For more information, see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) + // in the Amazon FSx for Windows File Server User Guide and Managing storage + // and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) + // in the Amazon FSx for Lustre User Guide. + StorageCapacity *int64 `type:"integer"` + + // The configuration updates for an Amazon FSx for Windows File Server file + // system. + WindowsConfiguration *UpdateFileSystemWindowsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s UpdateFileSystemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFileSystemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFileSystemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFileSystemInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + if s.LustreConfiguration != nil { + if err := s.LustreConfiguration.Validate(); err != nil { + invalidParams.AddNested("LustreConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.WindowsConfiguration != nil { + if err := s.WindowsConfiguration.Validate(); err != nil { + invalidParams.AddNested("WindowsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *UpdateFileSystemInput) SetClientRequestToken(v string) *UpdateFileSystemInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *UpdateFileSystemInput) SetFileSystemId(v string) *UpdateFileSystemInput { + s.FileSystemId = &v + return s +} + +// SetLustreConfiguration sets the LustreConfiguration field's value. +func (s *UpdateFileSystemInput) SetLustreConfiguration(v *UpdateFileSystemLustreConfiguration) *UpdateFileSystemInput { + s.LustreConfiguration = v + return s +} + +// SetStorageCapacity sets the StorageCapacity field's value. +func (s *UpdateFileSystemInput) SetStorageCapacity(v int64) *UpdateFileSystemInput { + s.StorageCapacity = &v + return s +} + +// SetWindowsConfiguration sets the WindowsConfiguration field's value. +func (s *UpdateFileSystemInput) SetWindowsConfiguration(v *UpdateFileSystemWindowsConfiguration) *UpdateFileSystemInput { + s.WindowsConfiguration = v + return s +} + +// The configuration object for Amazon FSx for Lustre file systems used in the +// UpdateFileSystem operation. +type UpdateFileSystemLustreConfiguration struct { + _ struct{} `type:"structure"` + + // (Optional) When you create your file system, your existing S3 objects appear + // as file and directory listings. Use this property to choose how Amazon FSx + // keeps your file and directory listing up to date as you add or modify objects + // in your linked S3 bucket. AutoImportPolicy can have the following values: + // + // * NONE - (Default) AutoImport is off. Amazon FSx only updates file and + // directory listings from the linked S3 bucket when the file system is created. + // FSx does not update the file and directory listing for any new or changed + // objects after choosing this option. + // + // * NEW - AutoImport is on. Amazon FSx automatically imports directory listings + // of any new objects added to the linked S3 bucket that do not currently + // exist in the FSx file system. + // + // * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports file + // and directory listings of any new objects added to the S3 bucket and any + // existing objects that are changed in the S3 bucket after you choose this + // option. + // + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). + AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` + + // The number of days to retain automatic backups. Setting this to 0 disables + // automatic backups. You can retain automatic backups for a maximum of 90 days. + // The default is 0. + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of + // the day (0-23), and MM is the zero-padded minute of the hour. For example, + // 05:00 specifies 5 AM daily. + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + + // (Optional) The preferred start time to perform weekly maintenance, formatted + // d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, + // beginning with Monday and ending with Sunday. + WeeklyMaintenanceStartTime *string `min:"7" type:"string"` +} + +// String returns the string representation +func (s UpdateFileSystemLustreConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFileSystemLustreConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFileSystemLustreConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFileSystemLustreConfiguration"} + if s.DailyAutomaticBackupStartTime != nil && len(*s.DailyAutomaticBackupStartTime) < 5 { + invalidParams.Add(request.NewErrParamMinLen("DailyAutomaticBackupStartTime", 5)) + } + if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { + invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoImportPolicy sets the AutoImportPolicy field's value. +func (s *UpdateFileSystemLustreConfiguration) SetAutoImportPolicy(v string) *UpdateFileSystemLustreConfiguration { + s.AutoImportPolicy = &v + return s +} + +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *UpdateFileSystemLustreConfiguration) SetAutomaticBackupRetentionDays(v int64) *UpdateFileSystemLustreConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *UpdateFileSystemLustreConfiguration) SetDailyAutomaticBackupStartTime(v string) *UpdateFileSystemLustreConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + +// SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. +func (s *UpdateFileSystemLustreConfiguration) SetWeeklyMaintenanceStartTime(v string) *UpdateFileSystemLustreConfiguration { + s.WeeklyMaintenanceStartTime = &v + return s +} + +// The response object for the UpdateFileSystem operation. +type UpdateFileSystemOutput struct { + _ struct{} `type:"structure"` + + // A description of the file system that was updated. + FileSystem *FileSystem `type:"structure"` +} + +// String returns the string representation +func (s UpdateFileSystemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFileSystemOutput) GoString() string { + return s.String() +} + +// SetFileSystem sets the FileSystem field's value. +func (s *UpdateFileSystemOutput) SetFileSystem(v *FileSystem) *UpdateFileSystemOutput { + s.FileSystem = v + return s +} + +// Updates the configuration for an existing Amazon FSx for Windows File Server +// file system. Amazon FSx only overwrites existing properties with non-null +// values provided in the request. +type UpdateFileSystemWindowsConfiguration struct { + _ struct{} `type:"structure"` + + // The number of days to retain automatic daily backups. Setting this to zero + // (0) disables automatic daily backups. You can retain automatic daily backups + // for a maximum of 90 days. For more information, see Working with Automatic + // Daily Backups (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html#automatic-backups). + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // The preferred time to start the daily automatic backup, in the UTC time zone, + // for example, 02:00 + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + + // The configuration Amazon FSx uses to join the Windows File Server instance + // to the self-managed Microsoft AD directory. You cannot make a self-managed + // Microsoft AD update request if there is an existing self-managed Microsoft + // AD update request in progress. + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfigurationUpdates `type:"structure"` + + // Sets the target value for a file system's throughput capacity, in MB/s, that + // you are updating the file system to. Valid values are 8, 16, 32, 64, 128, + // 256, 512, 1024, 2048. You cannot make a throughput capacity update request + // if there is an existing throughput capacity update request in progress. For + // more information, see Managing Throughput Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-throughput-capacity.html). + ThroughputCapacity *int64 `min:"8" type:"integer"` + + // The preferred start time to perform weekly maintenance, formatted d:HH:MM + // in the UTC time zone. Where d is the weekday number, from 1 through 7, with + // 1 = Monday and 7 = Sunday. + WeeklyMaintenanceStartTime *string `min:"7" type:"string"` +} + +// String returns the string representation +func (s UpdateFileSystemWindowsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFileSystemWindowsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFileSystemWindowsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFileSystemWindowsConfiguration"} + if s.DailyAutomaticBackupStartTime != nil && len(*s.DailyAutomaticBackupStartTime) < 5 { + invalidParams.Add(request.NewErrParamMinLen("DailyAutomaticBackupStartTime", 5)) + } + if s.ThroughputCapacity != nil && *s.ThroughputCapacity < 8 { + invalidParams.Add(request.NewErrParamMinValue("ThroughputCapacity", 8)) + } + if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { + invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) + } + if s.SelfManagedActiveDirectoryConfiguration != nil { + if err := s.SelfManagedActiveDirectoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("SelfManagedActiveDirectoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *UpdateFileSystemWindowsConfiguration) SetAutomaticBackupRetentionDays(v int64) *UpdateFileSystemWindowsConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *UpdateFileSystemWindowsConfiguration) SetDailyAutomaticBackupStartTime(v string) *UpdateFileSystemWindowsConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + +// SetSelfManagedActiveDirectoryConfiguration sets the SelfManagedActiveDirectoryConfiguration field's value. +func (s *UpdateFileSystemWindowsConfiguration) SetSelfManagedActiveDirectoryConfiguration(v *SelfManagedActiveDirectoryConfigurationUpdates) *UpdateFileSystemWindowsConfiguration { + s.SelfManagedActiveDirectoryConfiguration = v + return s +} + +// SetThroughputCapacity sets the ThroughputCapacity field's value. +func (s *UpdateFileSystemWindowsConfiguration) SetThroughputCapacity(v int64) *UpdateFileSystemWindowsConfiguration { + s.ThroughputCapacity = &v + return s +} + +// SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. +func (s *UpdateFileSystemWindowsConfiguration) SetWeeklyMaintenanceStartTime(v string) *UpdateFileSystemWindowsConfiguration { + s.WeeklyMaintenanceStartTime = &v + return s +} + +// The configuration for this Microsoft Windows file system. +type WindowsFileSystemConfiguration struct { + _ struct{} `type:"structure"` + + // The ID for an existing Microsoft Active Directory instance that the file + // system should join when it's created. + ActiveDirectoryId *string `min:"12" type:"string"` + + // An array of one or more DNS aliases that are currently associated with the + // Amazon FSx file system. Aliases allow you to use existing DNS names to access + // the data in your Amazon FSx file system. You can associate up to 50 aliases + // with a file system at any time. You can associate additional DNS aliases + // after you create the file system using the AssociateFileSystemAliases operation. + // You can remove DNS aliases from the file system after it is created using + // the DisassociateFileSystemAliases operation. You only need to specify the + // alias name in the request payload. For more information, see DNS aliases + // (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html). + Aliases []*Alias `type:"list"` + + // The number of days to retain automatic backups. Setting this to 0 disables + // automatic backups. You can retain automatic backups for a maximum of 90 days. + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // A boolean flag indicating whether tags on the file system should be copied + // to backups. This value defaults to false. If it's set to true, all tags on + // the file system are copied to all automatic backups and any user-initiated + // backups where the user doesn't specify any tags. If this value is true, and + // you specify one or more tags, only the specified tags are copied to backups. + // If you specify one or more tags when creating a user-initiated backup, no + // tags are copied from the file system, regardless of this value. + CopyTagsToBackups *bool `type:"boolean"` + + // The preferred time to take daily automatic backups, in the UTC time zone. + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + + // Specifies the file system deployment type, valid values are the following: + // + // * MULTI_AZ_1 - Specifies a high availability file system that is configured + // for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability, + // and supports SSD and HDD storage. + // + // * SINGLE_AZ_1 - (Default) Specifies a file system that is configured for + // single AZ redundancy, only supports SSD storage. + // + // * SINGLE_AZ_2 - Latest generation Single AZ file system. Specifies a file + // system that is configured for single AZ redundancy and supports SSD and + // HDD storage. + // + // For more information, see Single-AZ and Multi-AZ File Systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html). + DeploymentType *string `type:"string" enum:"WindowsDeploymentType"` + + // The list of maintenance operations in progress for this file system. + MaintenanceOperationsInProgress []*string `type:"list"` + + // For MULTI_AZ_1 deployment types, the IP address of the primary, or preferred, + // file server. + // + // Use this IP address when mounting the file system on Linux SMB clients or + // Windows SMB clients that are not joined to a Microsoft Active Directory. + // Applicable for all Windows file system deployment types. This IP address + // is temporarily unavailable when the file system is undergoing maintenance. + // For Linux and Windows SMB clients that are joined to an Active Directory, + // use the file system's DNSName instead. For more information on mapping and + // mounting file shares, see Accessing File Shares (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html). + PreferredFileServerIp *string `min:"7" type:"string"` + + // For MULTI_AZ_1 deployment types, it specifies the ID of the subnet where + // the preferred file server is located. Must be one of the two subnet IDs specified + // in SubnetIds property. Amazon FSx serves traffic from this subnet except + // in the event of a failover to the secondary file server. + // + // For SINGLE_AZ_1 and SINGLE_AZ_2 deployment types, this value is the same + // as that for SubnetIDs. For more information, see Availability and Durability: + // Single-AZ and Multi-AZ File Systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html#single-multi-az-resources) + PreferredSubnetId *string `min:"15" type:"string"` + + // For MULTI_AZ_1 deployment types, use this endpoint when performing administrative + // tasks on the file system using Amazon FSx Remote PowerShell. + // + // For SINGLE_AZ_1 and SINGLE_AZ_2 deployment types, this is the DNS name of + // the file system. + // + // This endpoint is temporarily unavailable when the file system is undergoing + // maintenance. + RemoteAdministrationEndpoint *string `min:"16" type:"string"` + + // The configuration of the self-managed Microsoft Active Directory (AD) directory + // to which the Windows File Server instance is joined. + SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryAttributes `type:"structure"` + + // The throughput of an Amazon FSx file system, measured in megabytes per second. + ThroughputCapacity *int64 `min:"8" type:"integer"` + + // The preferred start time to perform weekly maintenance, formatted d:HH:MM + // in the UTC time zone. d is the weekday number, from 1 through 7, beginning + // with Monday and ending with Sunday. + WeeklyMaintenanceStartTime *string `min:"7" type:"string"` +} + +// String returns the string representation +func (s WindowsFileSystemConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WindowsFileSystemConfiguration) GoString() string { + return s.String() +} + +// SetActiveDirectoryId sets the ActiveDirectoryId field's value. +func (s *WindowsFileSystemConfiguration) SetActiveDirectoryId(v string) *WindowsFileSystemConfiguration { + s.ActiveDirectoryId = &v + return s +} + +// SetAliases sets the Aliases field's value. +func (s *WindowsFileSystemConfiguration) SetAliases(v []*Alias) *WindowsFileSystemConfiguration { + s.Aliases = v + return s +} + +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *WindowsFileSystemConfiguration) SetAutomaticBackupRetentionDays(v int64) *WindowsFileSystemConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetCopyTagsToBackups sets the CopyTagsToBackups field's value. +func (s *WindowsFileSystemConfiguration) SetCopyTagsToBackups(v bool) *WindowsFileSystemConfiguration { + s.CopyTagsToBackups = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *WindowsFileSystemConfiguration) SetDailyAutomaticBackupStartTime(v string) *WindowsFileSystemConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + +// SetDeploymentType sets the DeploymentType field's value. +func (s *WindowsFileSystemConfiguration) SetDeploymentType(v string) *WindowsFileSystemConfiguration { + s.DeploymentType = &v + return s +} + +// SetMaintenanceOperationsInProgress sets the MaintenanceOperationsInProgress field's value. +func (s *WindowsFileSystemConfiguration) SetMaintenanceOperationsInProgress(v []*string) *WindowsFileSystemConfiguration { + s.MaintenanceOperationsInProgress = v + return s +} + +// SetPreferredFileServerIp sets the PreferredFileServerIp field's value. +func (s *WindowsFileSystemConfiguration) SetPreferredFileServerIp(v string) *WindowsFileSystemConfiguration { + s.PreferredFileServerIp = &v + return s +} + +// SetPreferredSubnetId sets the PreferredSubnetId field's value. +func (s *WindowsFileSystemConfiguration) SetPreferredSubnetId(v string) *WindowsFileSystemConfiguration { + s.PreferredSubnetId = &v + return s +} + +// SetRemoteAdministrationEndpoint sets the RemoteAdministrationEndpoint field's value. +func (s *WindowsFileSystemConfiguration) SetRemoteAdministrationEndpoint(v string) *WindowsFileSystemConfiguration { + s.RemoteAdministrationEndpoint = &v + return s +} + +// SetSelfManagedActiveDirectoryConfiguration sets the SelfManagedActiveDirectoryConfiguration field's value. +func (s *WindowsFileSystemConfiguration) SetSelfManagedActiveDirectoryConfiguration(v *SelfManagedActiveDirectoryAttributes) *WindowsFileSystemConfiguration { + s.SelfManagedActiveDirectoryConfiguration = v + return s +} + +// SetThroughputCapacity sets the ThroughputCapacity field's value. +func (s *WindowsFileSystemConfiguration) SetThroughputCapacity(v int64) *WindowsFileSystemConfiguration { + s.ThroughputCapacity = &v + return s +} + +// SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. +func (s *WindowsFileSystemConfiguration) SetWeeklyMaintenanceStartTime(v string) *WindowsFileSystemConfiguration { + s.WeeklyMaintenanceStartTime = &v + return s +} + +// The type of error relating to Microsoft Active Directory. NOT_FOUND means +// that no directory was found by specifying the given directory. INCOMPATIBLE_MODE +// means that the directory specified is not a Microsoft AD directory. WRONG_VPC +// means that the specified directory isn't accessible from the specified VPC. +// WRONG_STAGE means that the specified directory isn't currently in the ACTIVE +// state. +const ( + // ActiveDirectoryErrorTypeDomainNotFound is a ActiveDirectoryErrorType enum value + ActiveDirectoryErrorTypeDomainNotFound = "DOMAIN_NOT_FOUND" + + // ActiveDirectoryErrorTypeIncompatibleDomainMode is a ActiveDirectoryErrorType enum value + ActiveDirectoryErrorTypeIncompatibleDomainMode = "INCOMPATIBLE_DOMAIN_MODE" + + // ActiveDirectoryErrorTypeWrongVpc is a ActiveDirectoryErrorType enum value + ActiveDirectoryErrorTypeWrongVpc = "WRONG_VPC" + + // ActiveDirectoryErrorTypeInvalidDomainStage is a ActiveDirectoryErrorType enum value + ActiveDirectoryErrorTypeInvalidDomainStage = "INVALID_DOMAIN_STAGE" +) + +// ActiveDirectoryErrorType_Values returns all elements of the ActiveDirectoryErrorType enum +func ActiveDirectoryErrorType_Values() []string { + return []string{ + ActiveDirectoryErrorTypeDomainNotFound, + ActiveDirectoryErrorTypeIncompatibleDomainMode, + ActiveDirectoryErrorTypeWrongVpc, + ActiveDirectoryErrorTypeInvalidDomainStage, + } +} + +// Describes the type of administrative action, as follows: +// +// * FILE_SYSTEM_UPDATE - A file system update administrative action initiated +// by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI +// (update-file-system). +// +// * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase +// a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION +// task starts. For Windows, storage optimization is the process of migrating +// the file system data to the new, larger disks. For Lustre, storage optimization +// consists of rebalancing the data across the existing and newly added file +// servers. You can track the storage optimization progress using the ProgressPercent +// property. When STORAGE_OPTIMIZATION completes successfully, the parent +// FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, +// see Managing storage capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html) +// in the Amazon FSx for Windows File Server User Guide and Managing storage +// and throughput capacity (https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html) +// in the Amazon FSx for Lustre User Guide. +// +// * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a +// new DNS alias with the file system. For more information, see . +// +// * FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate +// a DNS alias from the file system. For more information, see . +const ( + // AdministrativeActionTypeFileSystemUpdate is a AdministrativeActionType enum value + AdministrativeActionTypeFileSystemUpdate = "FILE_SYSTEM_UPDATE" + + // AdministrativeActionTypeStorageOptimization is a AdministrativeActionType enum value + AdministrativeActionTypeStorageOptimization = "STORAGE_OPTIMIZATION" + + // AdministrativeActionTypeFileSystemAliasAssociation is a AdministrativeActionType enum value + AdministrativeActionTypeFileSystemAliasAssociation = "FILE_SYSTEM_ALIAS_ASSOCIATION" + + // AdministrativeActionTypeFileSystemAliasDisassociation is a AdministrativeActionType enum value + AdministrativeActionTypeFileSystemAliasDisassociation = "FILE_SYSTEM_ALIAS_DISASSOCIATION" +) + +// AdministrativeActionType_Values returns all elements of the AdministrativeActionType enum +func AdministrativeActionType_Values() []string { + return []string{ + AdministrativeActionTypeFileSystemUpdate, + AdministrativeActionTypeStorageOptimization, + AdministrativeActionTypeFileSystemAliasAssociation, + AdministrativeActionTypeFileSystemAliasDisassociation, + } +} + +const ( + // AliasLifecycleAvailable is a AliasLifecycle enum value + AliasLifecycleAvailable = "AVAILABLE" + + // AliasLifecycleCreating is a AliasLifecycle enum value + AliasLifecycleCreating = "CREATING" + + // AliasLifecycleDeleting is a AliasLifecycle enum value + AliasLifecycleDeleting = "DELETING" + + // AliasLifecycleCreateFailed is a AliasLifecycle enum value + AliasLifecycleCreateFailed = "CREATE_FAILED" + + // AliasLifecycleDeleteFailed is a AliasLifecycle enum value + AliasLifecycleDeleteFailed = "DELETE_FAILED" +) + +// AliasLifecycle_Values returns all elements of the AliasLifecycle enum +func AliasLifecycle_Values() []string { + return []string{ + AliasLifecycleAvailable, + AliasLifecycleCreating, + AliasLifecycleDeleting, + AliasLifecycleCreateFailed, + AliasLifecycleDeleteFailed, + } +} + +const ( + // AutoImportPolicyTypeNone is a AutoImportPolicyType enum value + AutoImportPolicyTypeNone = "NONE" + + // AutoImportPolicyTypeNew is a AutoImportPolicyType enum value + AutoImportPolicyTypeNew = "NEW" + + // AutoImportPolicyTypeNewChanged is a AutoImportPolicyType enum value + AutoImportPolicyTypeNewChanged = "NEW_CHANGED" +) + +// AutoImportPolicyType_Values returns all elements of the AutoImportPolicyType enum +func AutoImportPolicyType_Values() []string { + return []string{ + AutoImportPolicyTypeNone, + AutoImportPolicyTypeNew, + AutoImportPolicyTypeNewChanged, + } +} + +// The lifecycle status of the backup. +// +// * AVAILABLE - The backup is fully available. +// +// * PENDING - For user-initiated backups on Lustre file systems only; Amazon +// FSx has not started creating the backup. +// +// * CREATING - Amazon FSx is creating the new user-intiated backup +// +// * TRANSFERRING - For user-initiated backups on Lustre file systems only; +// Amazon FSx is backing up the file system. +// +// * DELETED - Amazon FSx deleted the backup and it is no longer available. +// +// * FAILED - Amazon FSx could not complete the backup. +const ( + // BackupLifecycleAvailable is a BackupLifecycle enum value + BackupLifecycleAvailable = "AVAILABLE" + + // BackupLifecycleCreating is a BackupLifecycle enum value + BackupLifecycleCreating = "CREATING" + + // BackupLifecycleTransferring is a BackupLifecycle enum value + BackupLifecycleTransferring = "TRANSFERRING" + + // BackupLifecycleDeleted is a BackupLifecycle enum value + BackupLifecycleDeleted = "DELETED" + + // BackupLifecycleFailed is a BackupLifecycle enum value + BackupLifecycleFailed = "FAILED" + + // BackupLifecyclePending is a BackupLifecycle enum value + BackupLifecyclePending = "PENDING" +) + +// BackupLifecycle_Values returns all elements of the BackupLifecycle enum +func BackupLifecycle_Values() []string { + return []string{ + BackupLifecycleAvailable, + BackupLifecycleCreating, + BackupLifecycleTransferring, + BackupLifecycleDeleted, + BackupLifecycleFailed, + BackupLifecyclePending, + } +} + +// The type of the backup. +const ( + // BackupTypeAutomatic is a BackupType enum value + BackupTypeAutomatic = "AUTOMATIC" + + // BackupTypeUserInitiated is a BackupType enum value + BackupTypeUserInitiated = "USER_INITIATED" + + // BackupTypeAwsBackup is a BackupType enum value + BackupTypeAwsBackup = "AWS_BACKUP" +) + +// BackupType_Values returns all elements of the BackupType enum +func BackupType_Values() []string { + return []string{ + BackupTypeAutomatic, + BackupTypeUserInitiated, + BackupTypeAwsBackup, + } +} + +const ( + // DataRepositoryLifecycleCreating is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleCreating = "CREATING" + + // DataRepositoryLifecycleAvailable is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleAvailable = "AVAILABLE" + + // DataRepositoryLifecycleMisconfigured is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleMisconfigured = "MISCONFIGURED" + + // DataRepositoryLifecycleUpdating is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleUpdating = "UPDATING" + + // DataRepositoryLifecycleDeleting is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleDeleting = "DELETING" +) + +// DataRepositoryLifecycle_Values returns all elements of the DataRepositoryLifecycle enum +func DataRepositoryLifecycle_Values() []string { + return []string{ + DataRepositoryLifecycleCreating, + DataRepositoryLifecycleAvailable, + DataRepositoryLifecycleMisconfigured, + DataRepositoryLifecycleUpdating, + DataRepositoryLifecycleDeleting, + } +} + +const ( + // DataRepositoryTaskFilterNameFileSystemId is a DataRepositoryTaskFilterName enum value + DataRepositoryTaskFilterNameFileSystemId = "file-system-id" + + // DataRepositoryTaskFilterNameTaskLifecycle is a DataRepositoryTaskFilterName enum value + DataRepositoryTaskFilterNameTaskLifecycle = "task-lifecycle" +) + +// DataRepositoryTaskFilterName_Values returns all elements of the DataRepositoryTaskFilterName enum +func DataRepositoryTaskFilterName_Values() []string { + return []string{ + DataRepositoryTaskFilterNameFileSystemId, + DataRepositoryTaskFilterNameTaskLifecycle, + } +} + +const ( + // DataRepositoryTaskLifecyclePending is a DataRepositoryTaskLifecycle enum value + DataRepositoryTaskLifecyclePending = "PENDING" + + // DataRepositoryTaskLifecycleExecuting is a DataRepositoryTaskLifecycle enum value + DataRepositoryTaskLifecycleExecuting = "EXECUTING" + + // DataRepositoryTaskLifecycleFailed is a DataRepositoryTaskLifecycle enum value + DataRepositoryTaskLifecycleFailed = "FAILED" + + // DataRepositoryTaskLifecycleSucceeded is a DataRepositoryTaskLifecycle enum value + DataRepositoryTaskLifecycleSucceeded = "SUCCEEDED" + + // DataRepositoryTaskLifecycleCanceled is a DataRepositoryTaskLifecycle enum value + DataRepositoryTaskLifecycleCanceled = "CANCELED" + + // DataRepositoryTaskLifecycleCanceling is a DataRepositoryTaskLifecycle enum value + DataRepositoryTaskLifecycleCanceling = "CANCELING" +) + +// DataRepositoryTaskLifecycle_Values returns all elements of the DataRepositoryTaskLifecycle enum +func DataRepositoryTaskLifecycle_Values() []string { + return []string{ + DataRepositoryTaskLifecyclePending, + DataRepositoryTaskLifecycleExecuting, + DataRepositoryTaskLifecycleFailed, + DataRepositoryTaskLifecycleSucceeded, + DataRepositoryTaskLifecycleCanceled, + DataRepositoryTaskLifecycleCanceling, + } +} + +const ( + // DataRepositoryTaskTypeExportToRepository is a DataRepositoryTaskType enum value + DataRepositoryTaskTypeExportToRepository = "EXPORT_TO_REPOSITORY" +) + +// DataRepositoryTaskType_Values returns all elements of the DataRepositoryTaskType enum +func DataRepositoryTaskType_Values() []string { + return []string{ + DataRepositoryTaskTypeExportToRepository, + } +} + +const ( + // DriveCacheTypeNone is a DriveCacheType enum value + DriveCacheTypeNone = "NONE" + + // DriveCacheTypeRead is a DriveCacheType enum value + DriveCacheTypeRead = "READ" +) + +// DriveCacheType_Values returns all elements of the DriveCacheType enum +func DriveCacheType_Values() []string { + return []string{ + DriveCacheTypeNone, + DriveCacheTypeRead, + } +} + +// The lifecycle status of the file system. +const ( + // FileSystemLifecycleAvailable is a FileSystemLifecycle enum value + FileSystemLifecycleAvailable = "AVAILABLE" + + // FileSystemLifecycleCreating is a FileSystemLifecycle enum value + FileSystemLifecycleCreating = "CREATING" + + // FileSystemLifecycleFailed is a FileSystemLifecycle enum value + FileSystemLifecycleFailed = "FAILED" + + // FileSystemLifecycleDeleting is a FileSystemLifecycle enum value + FileSystemLifecycleDeleting = "DELETING" + + // FileSystemLifecycleMisconfigured is a FileSystemLifecycle enum value + FileSystemLifecycleMisconfigured = "MISCONFIGURED" + + // FileSystemLifecycleUpdating is a FileSystemLifecycle enum value + FileSystemLifecycleUpdating = "UPDATING" +) + +// FileSystemLifecycle_Values returns all elements of the FileSystemLifecycle enum +func FileSystemLifecycle_Values() []string { + return []string{ + FileSystemLifecycleAvailable, + FileSystemLifecycleCreating, + FileSystemLifecycleFailed, + FileSystemLifecycleDeleting, + FileSystemLifecycleMisconfigured, + FileSystemLifecycleUpdating, + } +} + +// An enumeration specifying the currently ongoing maintenance operation. +const ( + // FileSystemMaintenanceOperationPatching is a FileSystemMaintenanceOperation enum value + FileSystemMaintenanceOperationPatching = "PATCHING" + + // FileSystemMaintenanceOperationBackingUp is a FileSystemMaintenanceOperation enum value + FileSystemMaintenanceOperationBackingUp = "BACKING_UP" +) + +// FileSystemMaintenanceOperation_Values returns all elements of the FileSystemMaintenanceOperation enum +func FileSystemMaintenanceOperation_Values() []string { + return []string{ + FileSystemMaintenanceOperationPatching, + FileSystemMaintenanceOperationBackingUp, + } +} + +// The type of file system. +const ( + // FileSystemTypeWindows is a FileSystemType enum value + FileSystemTypeWindows = "WINDOWS" + + // FileSystemTypeLustre is a FileSystemType enum value + FileSystemTypeLustre = "LUSTRE" +) + +// FileSystemType_Values returns all elements of the FileSystemType enum +func FileSystemType_Values() []string { + return []string{ + FileSystemTypeWindows, + FileSystemTypeLustre, + } +} + +// The name for a filter. +const ( + // FilterNameFileSystemId is a FilterName enum value + FilterNameFileSystemId = "file-system-id" + + // FilterNameBackupType is a FilterName enum value + FilterNameBackupType = "backup-type" + + // FilterNameFileSystemType is a FilterName enum value + FilterNameFileSystemType = "file-system-type" +) + +// FilterName_Values returns all elements of the FilterName enum +func FilterName_Values() []string { + return []string{ + FilterNameFileSystemId, + FilterNameBackupType, + FilterNameFileSystemType, + } +} + +const ( + // LustreDeploymentTypeScratch1 is a LustreDeploymentType enum value + LustreDeploymentTypeScratch1 = "SCRATCH_1" + + // LustreDeploymentTypeScratch2 is a LustreDeploymentType enum value + LustreDeploymentTypeScratch2 = "SCRATCH_2" + + // LustreDeploymentTypePersistent1 is a LustreDeploymentType enum value + LustreDeploymentTypePersistent1 = "PERSISTENT_1" +) + +// LustreDeploymentType_Values returns all elements of the LustreDeploymentType enum +func LustreDeploymentType_Values() []string { + return []string{ + LustreDeploymentTypeScratch1, + LustreDeploymentTypeScratch2, + LustreDeploymentTypePersistent1, + } +} + +const ( + // ReportFormatReportCsv20191124 is a ReportFormat enum value + ReportFormatReportCsv20191124 = "REPORT_CSV_20191124" +) + +// ReportFormat_Values returns all elements of the ReportFormat enum +func ReportFormat_Values() []string { + return []string{ + ReportFormatReportCsv20191124, + } +} + +const ( + // ReportScopeFailedFilesOnly is a ReportScope enum value + ReportScopeFailedFilesOnly = "FAILED_FILES_ONLY" +) + +// ReportScope_Values returns all elements of the ReportScope enum +func ReportScope_Values() []string { + return []string{ + ReportScopeFailedFilesOnly, + } +} + +// The types of limits on your service utilization. Limits include file system +// count, total throughput capacity, total storage, and total user-initiated +// backups. These limits apply for a specific account in a specific AWS Region. +// You can increase some of them by contacting AWS Support. +const ( + // ServiceLimitFileSystemCount is a ServiceLimit enum value + ServiceLimitFileSystemCount = "FILE_SYSTEM_COUNT" + + // ServiceLimitTotalThroughputCapacity is a ServiceLimit enum value + ServiceLimitTotalThroughputCapacity = "TOTAL_THROUGHPUT_CAPACITY" + + // ServiceLimitTotalStorage is a ServiceLimit enum value + ServiceLimitTotalStorage = "TOTAL_STORAGE" + + // ServiceLimitTotalUserInitiatedBackups is a ServiceLimit enum value + ServiceLimitTotalUserInitiatedBackups = "TOTAL_USER_INITIATED_BACKUPS" +) + +// ServiceLimit_Values returns all elements of the ServiceLimit enum +func ServiceLimit_Values() []string { + return []string{ + ServiceLimitFileSystemCount, + ServiceLimitTotalThroughputCapacity, + ServiceLimitTotalStorage, + ServiceLimitTotalUserInitiatedBackups, + } +} + +const ( + // StatusFailed is a Status enum value + StatusFailed = "FAILED" + + // StatusInProgress is a Status enum value + StatusInProgress = "IN_PROGRESS" + + // StatusPending is a Status enum value + StatusPending = "PENDING" + + // StatusCompleted is a Status enum value + StatusCompleted = "COMPLETED" + + // StatusUpdatedOptimizing is a Status enum value + StatusUpdatedOptimizing = "UPDATED_OPTIMIZING" +) + +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusFailed, + StatusInProgress, + StatusPending, + StatusCompleted, + StatusUpdatedOptimizing, + } +} + +// The storage type for your Amazon FSx file system. +const ( + // StorageTypeSsd is a StorageType enum value + StorageTypeSsd = "SSD" + + // StorageTypeHdd is a StorageType enum value + StorageTypeHdd = "HDD" +) + +// StorageType_Values returns all elements of the StorageType enum +func StorageType_Values() []string { + return []string{ + StorageTypeSsd, + StorageTypeHdd, + } +} + +const ( + // WindowsDeploymentTypeMultiAz1 is a WindowsDeploymentType enum value + WindowsDeploymentTypeMultiAz1 = "MULTI_AZ_1" + + // WindowsDeploymentTypeSingleAz1 is a WindowsDeploymentType enum value + WindowsDeploymentTypeSingleAz1 = "SINGLE_AZ_1" + + // WindowsDeploymentTypeSingleAz2 is a WindowsDeploymentType enum value + WindowsDeploymentTypeSingleAz2 = "SINGLE_AZ_2" +) + +// WindowsDeploymentType_Values returns all elements of the WindowsDeploymentType enum +func WindowsDeploymentType_Values() []string { + return []string{ + WindowsDeploymentTypeMultiAz1, + WindowsDeploymentTypeSingleAz1, + WindowsDeploymentTypeSingleAz2, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/doc.go new file mode 100644 index 00000000000..0c23c396c48 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/doc.go @@ -0,0 +1,29 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package fsx provides the client and types for making API +// requests to Amazon FSx. +// +// Amazon FSx is a fully managed service that makes it easy for storage and +// application administrators to launch and use shared file storage. +// +// See https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01 for more information on this service. +// +// See fsx package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/fsx/ +// +// Using the Client +// +// To contact Amazon FSx with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon FSx client FSx for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/fsx/#New +package fsx diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/errors.go new file mode 100644 index 00000000000..a827e7345c6 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/errors.go @@ -0,0 +1,173 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package fsx + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeActiveDirectoryError for service response error code + // "ActiveDirectoryError". + // + // An Active Directory error. + ErrCodeActiveDirectoryError = "ActiveDirectoryError" + + // ErrCodeBackupInProgress for service response error code + // "BackupInProgress". + // + // Another backup is already under way. Wait for completion before initiating + // additional backups of this file system. + ErrCodeBackupInProgress = "BackupInProgress" + + // ErrCodeBackupNotFound for service response error code + // "BackupNotFound". + // + // No Amazon FSx backups were found based upon the supplied parameters. + ErrCodeBackupNotFound = "BackupNotFound" + + // ErrCodeBackupRestoring for service response error code + // "BackupRestoring". + // + // You can't delete a backup while it's being used to restore a file system. + ErrCodeBackupRestoring = "BackupRestoring" + + // ErrCodeBadRequest for service response error code + // "BadRequest". + // + // A generic error indicating a failure with a client request. + ErrCodeBadRequest = "BadRequest" + + // ErrCodeDataRepositoryTaskEnded for service response error code + // "DataRepositoryTaskEnded". + // + // The data repository task could not be canceled because the task has already + // ended. + ErrCodeDataRepositoryTaskEnded = "DataRepositoryTaskEnded" + + // ErrCodeDataRepositoryTaskExecuting for service response error code + // "DataRepositoryTaskExecuting". + // + // An existing data repository task is currently executing on the file system. + // Wait until the existing task has completed, then create the new task. + ErrCodeDataRepositoryTaskExecuting = "DataRepositoryTaskExecuting" + + // ErrCodeDataRepositoryTaskNotFound for service response error code + // "DataRepositoryTaskNotFound". + // + // The data repository task or tasks you specified could not be found. + ErrCodeDataRepositoryTaskNotFound = "DataRepositoryTaskNotFound" + + // ErrCodeFileSystemNotFound for service response error code + // "FileSystemNotFound". + // + // No Amazon FSx file systems were found based upon supplied parameters. + ErrCodeFileSystemNotFound = "FileSystemNotFound" + + // ErrCodeIncompatibleParameterError for service response error code + // "IncompatibleParameterError". + // + // The error returned when a second request is received with the same client + // request token but different parameters settings. A client request token should + // always uniquely identify a single request. + ErrCodeIncompatibleParameterError = "IncompatibleParameterError" + + // ErrCodeInternalServerError for service response error code + // "InternalServerError". + // + // A generic error indicating a server-side failure. + ErrCodeInternalServerError = "InternalServerError" + + // ErrCodeInvalidExportPath for service response error code + // "InvalidExportPath". + // + // The path provided for data repository export isn't valid. + ErrCodeInvalidExportPath = "InvalidExportPath" + + // ErrCodeInvalidImportPath for service response error code + // "InvalidImportPath". + // + // The path provided for data repository import isn't valid. + ErrCodeInvalidImportPath = "InvalidImportPath" + + // ErrCodeInvalidNetworkSettings for service response error code + // "InvalidNetworkSettings". + // + // One or more network settings specified in the request are invalid. InvalidVpcId + // means that the ID passed for the virtual private cloud (VPC) is invalid. + // InvalidSubnetIds returns the list of IDs for subnets that are either invalid + // or not part of the VPC specified. InvalidSecurityGroupIds returns the list + // of IDs for security groups that are either invalid or not part of the VPC + // specified. + ErrCodeInvalidNetworkSettings = "InvalidNetworkSettings" + + // ErrCodeInvalidPerUnitStorageThroughput for service response error code + // "InvalidPerUnitStorageThroughput". + // + // An invalid value for PerUnitStorageThroughput was provided. Please create + // your file system again, using a valid value. + ErrCodeInvalidPerUnitStorageThroughput = "InvalidPerUnitStorageThroughput" + + // ErrCodeMissingFileSystemConfiguration for service response error code + // "MissingFileSystemConfiguration". + // + // A file system configuration is required for this operation. + ErrCodeMissingFileSystemConfiguration = "MissingFileSystemConfiguration" + + // ErrCodeNotServiceResourceError for service response error code + // "NotServiceResourceError". + // + // The resource specified for the tagging operation is not a resource type owned + // by Amazon FSx. Use the API of the relevant service to perform the operation. + ErrCodeNotServiceResourceError = "NotServiceResourceError" + + // ErrCodeResourceDoesNotSupportTagging for service response error code + // "ResourceDoesNotSupportTagging". + // + // The resource specified does not support tagging. + ErrCodeResourceDoesNotSupportTagging = "ResourceDoesNotSupportTagging" + + // ErrCodeResourceNotFound for service response error code + // "ResourceNotFound". + // + // The resource specified by the Amazon Resource Name (ARN) can't be found. + ErrCodeResourceNotFound = "ResourceNotFound" + + // ErrCodeServiceLimitExceeded for service response error code + // "ServiceLimitExceeded". + // + // An error indicating that a particular service limit was exceeded. You can + // increase some service limits by contacting AWS Support. + ErrCodeServiceLimitExceeded = "ServiceLimitExceeded" + + // ErrCodeUnsupportedOperation for service response error code + // "UnsupportedOperation". + // + // The requested operation is not supported for this resource or API. + ErrCodeUnsupportedOperation = "UnsupportedOperation" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "ActiveDirectoryError": newErrorActiveDirectoryError, + "BackupInProgress": newErrorBackupInProgress, + "BackupNotFound": newErrorBackupNotFound, + "BackupRestoring": newErrorBackupRestoring, + "BadRequest": newErrorBadRequest, + "DataRepositoryTaskEnded": newErrorDataRepositoryTaskEnded, + "DataRepositoryTaskExecuting": newErrorDataRepositoryTaskExecuting, + "DataRepositoryTaskNotFound": newErrorDataRepositoryTaskNotFound, + "FileSystemNotFound": newErrorFileSystemNotFound, + "IncompatibleParameterError": newErrorIncompatibleParameterError, + "InternalServerError": newErrorInternalServerError, + "InvalidExportPath": newErrorInvalidExportPath, + "InvalidImportPath": newErrorInvalidImportPath, + "InvalidNetworkSettings": newErrorInvalidNetworkSettings, + "InvalidPerUnitStorageThroughput": newErrorInvalidPerUnitStorageThroughput, + "MissingFileSystemConfiguration": newErrorMissingFileSystemConfiguration, + "NotServiceResourceError": newErrorNotServiceResourceError, + "ResourceDoesNotSupportTagging": newErrorResourceDoesNotSupportTagging, + "ResourceNotFound": newErrorResourceNotFound, + "ServiceLimitExceeded": newErrorServiceLimitExceeded, + "UnsupportedOperation": newErrorUnsupportedOperation, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go b/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go new file mode 100644 index 00000000000..41c2a1d5d4f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package fsx + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// FSx provides the API operation methods for making requests to +// Amazon FSx. See this package's package overview docs +// for details on the service. +// +// FSx methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type FSx struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "FSx" // Name of service. + EndpointsID = "fsx" // ID to lookup a service endpoint with. + ServiceID = "FSx" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the FSx client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a FSx client from just a session. +// svc := fsx.New(mySession) +// +// // Create a FSx client with additional configuration +// svc := fsx.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *FSx { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "fsx" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *FSx { + svc := &FSx{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2018-03-01", + JSONVersion: "1.1", + TargetPrefix: "AWSSimbaAPIService_v20180301", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a FSx operation and runs any +// custom request initialization. +func (c *FSx) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 00000000000..66700cce13d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,35976 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/checksum" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// operation and ensure that the parts list is empty. +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation. After successfully uploading all relevant parts of an upload, +// you call this operation to complete the upload. Upon receiving this request, +// Amazon S3 concatenates all the parts in ascending order by part number to +// create a new object. In the Complete Multipart Upload request, you must provide +// the parts list. You must ensure that the parts list is complete. This operation +// concatenates the parts that you provide in the list. For each part in the +// list, you must provide the part number and the ETag value, returned after +// that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// CompleteMultipartUpload has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic operation using +// this API. However, to copy an object greater than 5 GB, you must use the +// multipart upload Upload Part - Copy API. For more information, see Copy Object +// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// operation starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. For pricing information, see Amazon S3 +// pricing (https://aws.amazon.com/s3/pricing/). +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// Metadata +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// To specify whether you want the object metadata copied from the source object +// or replaced with metadata provided in the request, you can optionally add +// the x-amz-metadata-directive header. When you grant permissions, you can +// use the s3:x-amz-metadata-directive condition key to enforce certain metadata +// behavior when objects are uploaded. For more information, see Specifying +// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) +// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific +// condition keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// +// x-amz-copy-source-if Headers +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the following request parameters: +// +// * x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition evaluates to true +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// Server-side encryption +// +// When you perform a CopyObject operation, you can optionally use the appropriate +// encryption-related headers to encrypt the object using server-side encryption +// with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided +// encryption key. With server-side encryption, Amazon S3 encrypts your data +// as it writes it to disks in its data centers and decrypts the data when you +// access it. For more information about server-side encryption, see Using Server-Side +// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the +// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Access Control List (ACL)-Specific Request Headers +// +// When copying an object, you can optionally use headers to grant ACL-based +// permissions. By default, all objects are private. Only the owner has full +// access control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the ACL on the object. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// You can use the CopyObject operation to change the storage class of an object +// that is already stored in Amazon S3 using the StorageClass parameter. For +// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted. To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// +// The following operations are related to CopyObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY operation is not in the active tier and is +// only stored in Amazon S3 Glacier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// Creates a new S3 bucket. To create a bucket, you must register with Amazon +// S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous +// requests are never allowed to create buckets. By creating the bucket, you +// become the bucket owner. +// +// Not every string is an acceptable bucket name. For information about bucket +// naming restrictions, see Working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// +// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) Region. You +// can optionally specify a Region in the request body. You might choose a Region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the Europe (Ireland) Region. For more information, see +// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request goes to the us-east-1 Region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to +// be created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, +// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// When creating a bucket using this operation, you can optionally specify the +// accounts or groups that should be granted specific permissions on the bucket. +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access control list (ACL) overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// The following operations are related to CreateBucket: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all AWS Regions except in the North Virginia Region. +// For legacy compatibility, if you re-create an existing bucket that you already +// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the +// bucket access control lists (ACLs). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// This operation initiates a multipart upload and returns an upload ID. This +// upload ID is used to associate all of the parts in the specific multipart +// upload. You specify this upload ID in each of your subsequent upload part +// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// You also include this upload ID in the final request to either complete or +// abort the multipart upload request. +// +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort operation and Amazon S3 aborts the multipart upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (AWS Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or +// Amazon S3-managed encryption keys. If you choose to provide your own encryption +// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an AWS KMS CMK, the requester +// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, +// and kms:DescribeKey actions on the key. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before +// it completes the multipart upload. +// +// If your AWS Identity and Access Management (IAM) user or role is in the same +// AWS account as the AWS KMS CMK, then you must have these permissions on the +// key policy. If your IAM user or role belongs to a different account than +// the key, then you must have the permissions on both the key policy and your +// IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information about server-side +// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 For more information about +// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting +// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the access control list (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// This implementation of the DELETE operation removes default encryption from +// the bucket. For information about the Amazon S3 default encryption feature, +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" + +// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketIntelligentTieringConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + output = &DeleteBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketOwnershipControls = "DeleteBucketOwnershipControls" + +// DeleteBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketOwnershipControls for more information on using the DeleteBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketOwnershipControlsRequest method. +// req, resp := client.DeleteBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipControlsInput) (req *request.Request, output *DeleteBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opDeleteBucketOwnershipControls, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &DeleteBucketOwnershipControlsInput{} + } + + output = &DeleteBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to DeleteBucketOwnershipControls: +// +// * GetBucketOwnershipControls +// +// * PutBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControls(input *DeleteBucketOwnershipControlsInput) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// DeleteBucketOwnershipControlsWithContext is the same as DeleteBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketOwnershipControlsWithContext(ctx aws.Context, input *DeleteBucketOwnershipControlsInput, opts ...request.Option) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// This implementation of the DELETE operation uses the policy subresource to +// delete the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the DeleteBucketPolicy permissions on the specified bucket and +// belong to the bucket owner's account to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to DeleteBucketPolicy +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 Developer Guide. +// +// The following operations are related to DeleteBucketReplication: +// +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation removes the website configuration for a bucket. Amazon S3 +// returns a 200 OK response upon successfully deleting a website configuration +// on the specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE operation requires the S3:DeleteBucketWebsite permission. By +// default, only the bucket owner can delete the website configuration attached +// to a bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +// +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling the DELETE Object API or configure +// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny +// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. +// +// The following operation is related to DeleteObject: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success, or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion, the operation does not return any information about +// the delete in the response body. +// +// When performing this operation on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// been altered in transit. +// +// The following operations are related to DeleteObjects: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the GET operation uses the accelerate subresource +// to return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// This implementation of the GET operation uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// Related Resources +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the GET operation returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// For more information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the default encryption configuration for an Amazon S3 bucket. For +// information about the Amazon S3 default encryption feature, see Amazon S3 +// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" + +// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketIntelligentTieringConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &GetBucketIntelligentTieringConfigurationInput{} + } + + output = &GetBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// +// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are using a previous version +// of the lifecycle configuration, it still works. For the earlier API description, +// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// +// To use this implementation of the operation, you must be the bucket owner. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +// +// The following operations are related to GetBucketLogging: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the operation returns an +// empty NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketNotification: +// +// * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketOwnershipControls = "GetBucketOwnershipControls" + +// GetBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketOwnershipControls for more information on using the GetBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketOwnershipControlsRequest method. +// req, resp := client.GetBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControlsInput) (req *request.Request, output *GetBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opGetBucketOwnershipControls, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &GetBucketOwnershipControlsInput{} + } + + output = &GetBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to GetBucketOwnershipControls: +// +// * PutBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControls(input *GetBucketOwnershipControlsInput) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// GetBucketOwnershipControlsWithContext is the same as GetBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketOwnershipControlsWithContext(ctx aws.Context, input *GetBucketOwnershipControlsInput, opts ...request.Option) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the GetBucketPolicy permissions on the specified bucket and belong +// to the bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketPolicy: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires permissions for the s3:GetReplicationConfiguration +// action. For more information about permissions, see Using Bucket Policies +// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to GetBucketReplication: +// +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSetError Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET operation requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// To distribute large files to many people, you can save bandwidth costs by +// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// For more information about returning the ACL of an object, see GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). +// +// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier +// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering +// Deep Archive tiers, before you can retrieve the object you must first restore +// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectStateError error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging +// action), the response also returns the x-amz-tagging-count header that provides +// the count of number of tags associated with the object. You can use GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET operation returns the current version of an object. To +// return a different version, use the versionId subresource. +// +// If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// * ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// Returns the access control list (ACL) of an object. To use this operation, +// you must have READ_ACP access to the object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLegalHold = "GetObjectLegalHold" + +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opGetObjectLegalHold, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &GetObjectLegalHoldInput{} + } + + output = &GetObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Gets an object's current Legal Hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This action is not supported by Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + return out, req.Send() +} + +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" + +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opGetObjectLockConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &GetObjectLockConfigurationInput{} + } + + output = &GetObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { + op := &request.Operation{ + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &GetObjectRetentionInput{} + } + + output = &GetObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectRetention API operation for Amazon Simple Storage Service. +// +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This action is not supported by Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET operation returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following operation is related to GetObjectTagging: +// +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Using BitTorrent with Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// +// You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following operation is related to GetObjectTorrent: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This operation is useful to determine if a bucket exists and you have permission +// to access it. The operation returns a 200 OK if the bucket exists and you +// have permission to access it. Otherwise, the operation might return responses +// such as 404 Not Found and 403 Forbidden. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 returns +// an HTTP status code 403 ("access denied") error. +// +// The following operation is related to HeadObject: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated +// is set to false. If there are more configurations to list, IsTruncated is +// set to true, and there will be a value in NextContinuationToken. You use +// the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" + +// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. +// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketIntelligentTieringConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &ListBucketIntelligentTieringConfigurationsInput{} + } + + output = &ListBucketIntelligentTieringConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketIntelligentTieringConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// This action is not supported by Amazon S3 on Outposts. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This API has been revised. We recommend that you use the newer version, ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// To use this operation in an AWS Identity and Access Management (IAM) policy, +// you must have permissions to perform the s3:ListBucket action. The bucket +// owner has this permission by default and can grant this permission to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// This section describes the latest revision of the API. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +// +// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// +// The following operations are related to ListObjectsV2: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). +// This request returns a maximum of 1,000 uploaded parts. The default number +// of parts returned is 1,000 parts. You can restrict the number of parts returned +// by specifying the max-parts request parameter. If your multipart upload consists +// of more than 1,000 parts, the response returns an IsTruncated field with +// the value of true, and a NextPartNumberMarker element. In subsequent ListParts +// requests you can include the part-number-marker query string parameter and +// set its value to the NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *s3.ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// operation returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use the x-amz-acl header to +// set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-write +// header grants create, overwrite, and delete objects permission to LogDelivery +// group predefined by Amazon S3 and two AWS accounts identified by their +// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// id="111122223333", id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// This operation uses the encryption subresource to configure default encryption +// and Amazon S3 Bucket Key for an existing bucket. +// +// Default encryption for a bucket can use server-side encryption with Amazon +// S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you +// specify default encryption using SSE-KMS, you can also configure Amazon S3 +// Bucket Key. For information about default encryption, see Amazon S3 default +// bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires AWS Signature Version 4. For more information, see +// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" + +// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketIntelligentTieringConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &PutBucketIntelligentTieringConfigurationInput{} + } + + output = &PutBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This implementation of the PUT operation adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same AWS Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// +// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the AWS account that created the resource, +// can access it. The resource owner can optionally grant access permissions +// to others by writing an access policy. For this operation, users must get +// the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// Related Resources +// +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// +// * By default, a resource owner—in this case, a bucket owner, which is +// the AWS account that created the bucket—can perform any of the operations. +// A resource owner can also grant others permission to perform the operation. +// For more information, see the following topics in the Amazon Simple Storage +// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. Each rule +// consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the AWS account that created +// it) can access the resource. The resource owner can optionally grant access +// permissions to others by writing an access policy. For this operation, a +// user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. All logs are saved to buckets +// in the same AWS Region as the source bucket. To set the logging status of +// a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). +// +// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). +// +// The following operations are related to PutBucketLogging: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// GetBucketLifecycle has the following special error: +// +// * Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) +// operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This operation replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of AWS Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with s3:PutBucketNotification permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT operation +// will fail, and Amazon S3 will not add the configuration to your bucket. +// +// Responses +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following operation is related to PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketOwnershipControls = "PutBucketOwnershipControls" + +// PutBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketOwnershipControls for more information on using the PutBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketOwnershipControlsRequest method. +// req, resp := client.PutBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControlsInput) (req *request.Request, output *PutBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opPutBucketOwnershipControls, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &PutBucketOwnershipControlsInput{} + } + + output = &PutBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For +// more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to PutBucketOwnershipControls: +// +// * GetBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControls(input *PutBucketOwnershipControlsInput) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// PutBucketOwnershipControlsWithContext is the same as PutBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketOwnershipControlsWithContext(ctx aws.Context, input *PutBucketOwnershipControlsInput, opts ...request.Option) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the AWS account that owns the bucket, +// the calling identity must have the PutBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 Developer Guide. +// +// To perform this operation, the user or role performing the operation must +// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets +// where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 +// can assume to replicate objects on your behalf, and other relevant information. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// By default, a resource owner, in this case the AWS account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to PutBucketReplication: +// +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Use tags to organize your AWS bill to reflect your own cost structure. To +// do this, sign up to get your AWS account bill with tag key values included. +// Then, to see the cost of combined resources, organize your billing information +// according to resources with the same tag key values. For example, you can +// tag several resources with a specific application name, and then organize +// your billing information to see the total cost of that application across +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// +// Within a bucket, if you add a tag that has the same key as an existing tag, +// the new value overwrites the old value. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// PutBucketTagging has the following special errors: +// +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// operation is currently in progress against this resource. Please try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// The following operations are related to PutBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. +// +// If the bucket owner enables MFA Delete in the bucket versioning configuration, +// the bucket owner must include the x-amz-mfa request header and the Status +// and the MfaDelete request elements in a request to set the versioning state +// of the bucket. +// +// If you have an object expiration lifecycle policy in your non-versioned bucket +// and you want to maintain the same permanent delete behavior when you enable +// versioning, you must add a noncurrent expiration policy. The noncurrent expiration +// lifecycle policy will manage the deletes of the noncurrent object versions +// in the version-enabled bucket. (A version-enabled bucket maintains one current +// and zero or more noncurrent object versions.) For more information, see Lifecycle +// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// The Content-MD5 header is required for any request to upload an object with +// a retention period configured using Amazon S3 Object Lock. For more information +// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side Encryption +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// If you request server-side encryption using AWS Key Management Service (SSE-KMS), +// you can enable an S3 Bucket Key at the object-level. For more information, +// see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Access Control List (ACL)-Specific Request Headers +// +// You can use headers to grant ACL- based permissions. By default, all objects +// are private. Only the owner has full access control. When adding a new object, +// you can grant permissions to individual AWS accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the +// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// By default, Amazon S3 uses the STANDARD Storage Class to store newly created +// objects. The STANDARD storage class provides high durability and high availability. +// Depending on performance needs, you can specify a different Storage Class. +// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, +// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response. When you enable versioning for a bucket, if Amazon S3 receives +// multiple write requests for the same object simultaneously, it stores all +// of the objects. +// +// For more information about versioning, see Adding Objects to Versioning Enabled +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// Uses the acl subresource to set the access control list (ACL) permissions +// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission +// to set the ACL of an object. For more information, see What permissions can +// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon Simple Storage Service Developer Guide. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 Developer Guide. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants list objects permission to the two AWS accounts identified +// by their email addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLegalHold = "PutObjectLegalHold" + +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opPutObjectLegalHold, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &PutObjectLegalHoldInput{} + } + + output = &PutObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Applies a Legal Hold configuration to the specified object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + return out, req.Send() +} + +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" + +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opPutObjectLockConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &PutObjectLockConfigurationInput{} + } + + output = &PutObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new +// object placed in the specified bucket. +// +// DefaultRetention requires either Days or Years. You can't specify both at +// the same time. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectRetention = "PutObjectRetention" + +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { + op := &request.Operation{ + Name: opPutObjectRetention, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &PutObjectRetentionInput{} + } + + output = &PutObjectRetentionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectRetention API operation for Amazon Simple Storage Service. +// +// Places an Object Retention configuration on an object. +// +// This action is not supported by Amazon S3 on Outposts. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + return out, req.Send() +} + +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket. +// +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional operation +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// This action is not supported by Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same AWS Region as the bucket that contains +// the archive object that is being queried. The AWS account that initiates +// the job must have permissions to write to the S3 bucket. You can specify +// the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about the S3 structure in the request body, see the following: PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing +// Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 Glacier Select restore, see +// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring objects +// +// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage +// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers are not accessible in real time. For objects in Archive Access +// or Deep Archive Access tiers you must first initiate a restore request, and +// then wait until the object is moved into the Frequent Access tier. For objects +// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate +// a restore request, and then wait until a temporary copy of the object is +// available. To access an archived object, you must restore the object for +// the duration (number of days) that you specify. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive +// tier when occasional urgent requests for a subset of archives are required. +// For all but the largest archived objects (250 MB+), data accessed using +// Expedited retrievals is typically made available within 1–5 minutes. +// Provisioned capacity ensures that retrieval capacity for Expedited retrievals +// is available when you need it. Expedited retrievals and provisioned capacity +// are not available for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. +// +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// storage class or S3 Intelligent-Tiering Archive tier. They typically finish +// within 12 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals +// are free for objects stored in S3 Intelligent-Tiering. +// +// * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, +// enabling you to retrieve large amounts, even petabytes, of data inexpensively. +// Bulk retrievals typically finish within 5–12 hours for objects stored +// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. +// They typically finish within 48 hours for objects stored in the S3 Glacier +// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon Simple Storage Service Developer Guide. +// +// Responses +// +// A successful operation returns either the 200 OK or 202 Accepted status code. +// +// * If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. +// +// * If the object is previously restored, Amazon S3 returns 200 OK in the +// response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + + es := NewSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// This action is not supported by Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more information about using SQL with Amazon S3 Select, see SQL Reference +// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Permissions +// +// You must have s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon Simple Storage Service Developer Guide. For objects that +// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer +// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see Appendix: SelectObjectContent +// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) . +// +// GetObject Support +// +// The SelectObjectContent operation does not support the following GetObject +// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// +// * Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an +// object to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot +// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// For a list of special errors for this operation, see List of SELECT Object +// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// +// Related Resources +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +var _ awserr.Error + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type SelectObjectContentEventStream struct { + + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// The StreamCloser member should be set to the underlying io.Closer, +// (e.g. http.Response.Body), that will be closed when the stream Close method +// is called. +// +// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream{ +// es.Reader = myMockStreamReader +// es.StreamCloser = myMockStreamCloser +// }) +func NewSelectObjectContentEventStream(opts ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +// +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier, that you must include in your +// upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. Each +// part must be at least 5 MB in size, except the last part. There is no size +// limit on the last part of your multipart upload. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// +// If the upload request is signed with Signature Version 4, then AWS S3 uses +// the x-amz-content-sha256 header as a checksum instead of Content-MD5. For +// more information see Authenticating Requests: Using the Authorization Header +// (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon Simple Storage Service Developer Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the AWS managed encryption keys. If you choose to provide +// your own encryption key, the request headers you provide in the request must +// match the headers you used in the request to initiate the upload by using +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server-side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * x-amz-server-side-encryption-customer-key +// +// * x-amz-server-side-encryption-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// The minimum allowable part size for a multipart upload is 5 MB. For more +// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation +// and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information about using the UploadPartCopy operation, see the following: +// +// * For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about copying objects using a single atomic operation +// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; Amazon S3 returns 200 OK and copies the +// data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// Amazon S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon Simple Storage Service Developer Guide. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + + // The bucket name to which the upload was taking place. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key of the object for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID that identifies the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s AbortMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the transfer acceleration status of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon Simple Storage Service API Reference. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +// Contains information about where to publish the analytics results. +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + BucketAccountId *string `type:"string"` + + // Specifies the file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all AWS accounts. +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. This date can change when making changes to + // your bucket, such as editing its bucket policy. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +// Container for logging status information. +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + // + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + _ struct{} `type:"structure"` + + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. + Comments *string `type:"string"` + + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV + QuoteCharacter *string `type:"string"` + + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". + QuoteEscapeCharacter *string `type:"string"` + + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + QuoteCharacter *string `type:"string"` + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +// Container for specifying the AWS Lambda notification configuration. +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + CloudFunction *string `type:"string"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // Bucket events for which to send notifications. + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The role supporting the invocation of the Lambda function + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. +type CommonPrefix struct { + _ struct{} `type:"structure"` + + // Container for the specified common prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The container for the multipart upload request information. + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CompleteMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket that contains the newly created object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + Bucket *string `type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The object key of the newly created object. + Key *string `min:"1" type:"string"` + + // The URI that identifies the newly created object. + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created object, in case the bucket has versioning + // turned on. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CompleteMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CompleteMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + // Array of CompletedPart data types. + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +type CopyObjectInput struct { + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` + + // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the destination bucket. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a COPY operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and the key of the source object, separated by a slash + // (/). For example, to copy the object reports/january.pdf from the bucket + // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value + // must be URL encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same AWS Region. Alternatively, for objects accessed through Amazon S3 + // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account id of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request will fail with an HTTP + // 403 (Access Denied) error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account id of the expected source bucket owner. If the source bucket + // is owned by a different account, the request will fail with an HTTP 403 (Access + // Denied) error. + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // The key of the destination object. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Specifies whether you want to apply a Legal Hold to the copied object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to the copied object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the copied object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. For information about configuring using any of the officially + // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request + // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectInput) SetBucketKeyEnabled(v bool) *CopyObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedBucketOwner(v string) *CopyObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedSourceBucketOwner(v string) *CopyObjectInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyObjectResult *CopyObjectResult `type:"structure"` + + // Version of the copied object in the destination bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectOutput) SetBucketKeyEnabled(v bool) *CopyObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Container for all response elements. +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. The source and destination ETag + // is identical for a successfully copied object. + ETag *string `type:"string"` + + // Returns the date that the object was last modified. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Container for all response elements. +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket will be created. If you don't specify + // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // The name of the bucket to create. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The configuration information for the bucket. + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need + // to specify the location. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateMultipartUploadInput struct { + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` + + // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the bucket to which to initiate the upload + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with an object operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the multipart upload is to be initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether you want to apply a Legal Hold to the uploaded object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for + // object encryption. All GET and PUT requests for an object protected by AWS + // KMS will fail if not made via SSL or using SigV4. For information about configuring + // using any of the officially supported AWS SDKs and AWS CLI, see Specifying + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadInput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CreateMultipartUploadInput) SetExpectedBucketOwner(v string) *CreateMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + Bucket *string `locationName:"Bucket" type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + Days *int64 `type:"integer"` + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + Years *int64 `type:"integer"` +} + +// String returns the string representation +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + +// Container for the objects to delete. +type Delete struct { + _ struct{} `type:"structure"` + + // The objects to delete. + // + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + + // Specifies the bucket whose cors configuration is being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketEncryptionInput struct { + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketEncryptionInput) SetExpectedBucketOwner(v string) *DeleteBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + + // Specifies the bucket being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + + // The bucket name of the lifecycle to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketOwnershipControlsInput struct { + _ struct{} `locationName:"DeleteBucketOwnershipControlsRequest" type:"structure"` + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketOwnershipControlsInput) SetBucket(v string) *DeleteBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *DeleteBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketPolicyInput) SetExpectedBucketOwner(v string) *DeleteBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketReplicationInput) SetExpectedBucketOwner(v string) *DeleteBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + + // The bucket that has the tag set to be removed. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketTaggingInput) SetExpectedBucketOwner(v string) *DeleteBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + + // The bucket name for which you want to remove the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // The account that created the delete marker.> + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a DeleteMarkerReplication +// element. If your Filter includes a Tag element, the DeleteMarkerReplication +// Status must be set to Disabled, because Amazon S3 does not support replicating +// delete markers for tag-based rules. For an example configuration, see Basic +// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// For more information about delete marker replication, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + + // The bucket name of the bucket containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions + // to process this operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingInput struct { + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + + // The bucket name containing the objects from which to remove the tags. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +type DeleteObjectsInput struct { + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + + // The bucket name containing the objects to delete. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether you want to delete this object even if it has a Governance-type + // Object Lock in place. You must have sufficient permissions to perform this + // operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Container for the request. + // + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + // Container element for a successful delete. It identifies the object that + // was successfully deleted. + Deleted []*DeletedObject `type:"list" flattened:"true"` + + // Container for a failed delete operation that describes the object that Amazon + // S3 attempted to delete and the error it encountered. + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeletePublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Information about the deleted object. +type DeletedObject struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. + DeleteMarker *bool `type:"boolean"` + + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. + DeleteMarkerVersionId *string `type:"string"` + + // The name of the deleted object. + Key *string `min:"1" type:"string"` + + // The version ID of the deleted object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +type Destination struct { + _ struct{} `type:"structure"` + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the AWS account that owns the destination bucket. If this is not specified + // in the replication configuration, the replicas are owned by same AWS account + // that owns the source object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the AWS account that owns + // the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon Simple Storage Service Developer Guide. + Account *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store the results. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // A container specifying replication metrics-related settings enabling replication + // metrics and events. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon Simple Storage Service API Reference. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Contains the type of server-side encryption used. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (for example, AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric customer managed AWS KMS CMK to use for encryption of job results. + // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric + // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. + KMSKeyId *string `type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer + // master key (CMK) stored in AWS Key Management Service (KMS) for the destination + // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only + // supports symmetric customer managed CMKs. For more information, see Using + // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. +type Error struct { + _ struct{} `type:"structure"` + + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your AWS account + // that prevents the operation from completing successfully. Contact AWS + // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact AWS Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all AWS + // Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided + // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The operation is not valid for + // the current state of the object. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact AWS Support for further assistance. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact AWS Support for more information. + // HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact AWS Support for more information. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional operation + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your AWS secret access + // key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + Code *string `type:"string"` + + // The error key. + Key *string `min:"1" type:"string"` + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. + Message *string `type:"string"` + + // The version ID of the error. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// The error information. +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 Developer Guide. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` +} + +// String returns the string representation +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExistingObjectReplication) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v + return s +} + +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. +type FilterRule struct { + _ struct{} `type:"structure"` + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + // The value that the filter searches for in object key names. + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` + + // The name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +type GetBucketAclInput struct { + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + + // Specifies the S3 bucket whose ACL is being requested. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +type GetBucketCorsInput struct { + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + + // The bucket name for which to get the cors configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketEncryptionInput struct { + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketEncryptionInput) SetExpectedBucketOwner(v string) *GetBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type GetBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { + s.IntelligentTieringConfiguration = v + return s +} + +type GetBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLifecycleInput struct { + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +type GetBucketLocationInput struct { + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + + // The name of the bucket for which to get the location. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLocationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + + // The bucket name for which to get the logging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the notification configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketNotificationConfigurationRequest) SetExpectedBucketOwner(v string) *GetBucketNotificationConfigurationRequest { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketNotificationConfigurationRequest) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsInput struct { + _ struct{} `locationName:"GetBucketOwnershipControlsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketOwnershipControlsInput) SetBucket(v string) *GetBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *GetBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure" payload:"OwnershipControls"` + + // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in + // effect for this Amazon S3 bucket. + OwnershipControls *OwnershipControls `type:"structure"` +} + +// String returns the string representation +func (s GetBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipControls) *GetBucketOwnershipControlsOutput { + s.OwnershipControls = v + return s +} + +type GetBucketPolicyInput struct { + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + + // The bucket name for which to get the bucket policy. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyInput) SetExpectedBucketOwner(v string) *GetBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +type GetBucketPolicyStatusInput struct { + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyStatusInput) SetExpectedBucketOwner(v string) *GetBucketPolicyStatusInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyStatusInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` +} + +// String returns the string representation +func (s GetBucketPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v + return s +} + +type GetBucketReplicationInput struct { + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + + // The bucket name for which to get the replication information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketReplicationInput) SetExpectedBucketOwner(v string) *GetBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + + // The name of the bucket for which to get the payment request configuration + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *GetBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +type GetBucketTaggingInput struct { + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + + // The name of the bucket for which to get the tagging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketTaggingInput) SetExpectedBucketOwner(v string) *GetBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +type GetBucketVersioningInput struct { + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + + // The name of the bucket for which to get the versioning information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +type GetBucketWebsiteInput struct { + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + + // The bucket name for which to get the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website (for example index.html). + IndexDocument *IndexDocument `type:"structure"` + + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +type GetObjectAclInput struct { + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + + // The bucket name that contains the object for which to get the ACL information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key of the object for which to get the ACL information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectInput struct { + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key of the object to get. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldInput struct { + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` + + // The bucket name containing the object whose Legal Hold status you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose Legal Hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose Legal Hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLegalHoldInput) SetExpectedBucketOwner(v string) *GetObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current Legal Hold status for the specified object. + LegalHold *ObjectLockLegalHold `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v + return s +} + +type GetObjectLockConfigurationInput struct { + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` + + // The bucket whose Object Lock configuration you want to retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *GetObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's Object Lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *GetObjectOutput) SetBucketKeyEnabled(v bool) *GetObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectRetentionInput struct { + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` + + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectRetentionInput) SetExpectedBucketOwner(v string) *GetObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `type:"structure"` +} + +// String returns the string representation +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + + // The bucket name containing the object for which to get the tagging information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which to get the tagging information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object for which to get the tagging information. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + // The versionId of the object for which you got the tagging information. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetObjectTorrentInput struct { + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + + // The name of the bucket containing the object for which to get the torrent + // files. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key for which to get the information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTorrentInput) SetExpectedBucketOwner(v string) *GetObjectTorrentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTorrentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // A Bencoded dictionary as defined by the BitTorrent specification + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Container for grant information. +type Grant struct { + _ struct{} `type:"structure"` + + // The person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Container for the person being granted permissions. +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // AWS Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the AWS General Reference. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +type HeadBucketInput struct { + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + + // The bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + + // The name of the bucket containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // The archive state of the head object. + ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket or buckets where Amazon S3 stores object replicas. + // When you request an object (GetObject) or object metadata (HeadObject) from + // these buckets, Amazon S3 will return the x-amz-replication-status header + // in the response as follows: + // + // * If requesting an object from the source bucket — Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from a destination bucket — Amazon S3 will + // return the x-amz-replication-status header with value REPLICA if the object + // in your request is a replica that Amazon S3 created and there is no replica + // modification replication in progress. + // + // * When replicating objects to multiple destination buckets the x-amz-replication-status + // header acts differently. The header of the source object will only return + // a value of COMPLETED when replication is successful to all destinations. + // The header will remain at value PENDING until replication has completed + // for all destinations. If one or more destinations fails replication the + // header will return FAILED. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If the object is stored using server-side encryption either with an AWS KMS + // customer master key (CMK) or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetArchiveStatus sets the ArchiveStatus field's value. +func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { + s.ArchiveStatus = &v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *HeadObjectOutput) SetBucketKeyEnabled(v bool) *HeadObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Container for the Suffix element. +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` +} + +// String returns the string representation +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the configuration applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the configuration + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s IntelligentTieringAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { + s.Tags = v + return s +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter `type:"structure"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies the status of the configuration. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // Tierings is a required field + Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s IntelligentTieringConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Tierings == nil { + invalidParams.Add(request.NewErrParamRequired("Tierings")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Tierings != nil { + for i, v := range s.Tierings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { + s.Status = &v + return s +} + +// SetTierings sets the Tierings field's value. +func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { + s.Tierings = v + return s +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +// applies to. +type IntelligentTieringFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // A container of a key value name pair. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s IntelligentTieringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { + s.Tag = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + AccountId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +// Specifies JSON as object's input serialization format. +type JSONInput struct { + _ struct{} `type:"structure"` + + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} + +// String returns the string representation +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +// Specifies JSON as request's output serialization format. +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + +// A container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for the key-value pair that defines the criteria for + // the filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// A container for specifying the configuration for AWS Lambda notifications. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For + // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 + // invokes when the specified event type occurs. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +// Container for lifecycle rules. You can add as many as 1000 rules. +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Container for the expiration for the lifecycle of the object. +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // No longer used; use Filter instead. + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketAnalyticsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.ContinuationToken = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `type:"string"` + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { + s.IntelligentTieringConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketInventoryConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketInventoryConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketInventoryConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketMetricsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketMetricsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketMetricsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requestor. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // The owner of the buckets listed. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +type ListMultipartUploadsInput struct { + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListMultipartUploadsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +type ListObjectVersionsInput struct { + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + + // The bucket name that contains the objects. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectVersionsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Container for an object that is a delete marker. + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last key returned in a truncated response. + KeyMarker *string `type:"string"` + + // Specifies the maximum number of objects to return. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string `type:"string"` + + // Selects objects that start with the value supplied by this parameter. + Prefix *string `type:"string"` + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string `type:"string"` + + // Container for version information. + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +type ListObjectsInput struct { + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + + // The name of the bucket containing the objects. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys rolled up in a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker *string `type:"string"` + + // The maximum number of keys returned in the response body. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMarker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +type ListObjectsV2Input struct { + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` + + // Bucket name to list. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true. + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input { + s.ExpectedBucketOwner = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // If ContinuationToken was sent with the request, it is included in the response. + ContinuationToken *string `type:"string"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true, which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` + + // If StartAfter was sent with the request, it is included in the response. + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +type ListPartsInput struct { + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + + // The name of the bucket to which the parts are being uploaded. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListPartsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Container element that identifies who initiated the multipart upload. If + // the initiator is an AWS account, this element provides the same information + // as the Owner element. If the initiator is an IAM User, this element provides + // the user ARN and display name. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. + Owner *Owner `type:"structure"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + PartNumberMarker *int64 `type:"integer"` + + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Describes an Amazon S3 location that will receive the results of the restore +// request. +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Contains the type of server-side encryption used. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case, you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + // Container for granting information. + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` + + // Name of the Object. + Name *string `type:"string"` + + // Value of the Object. + Value *string `type:"string"` +} + +// String returns the string representation +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +// A container specifying replication metrics-related settings enabling replication +// metrics and events. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + EventThreshold *ReplicationTimeValue `type:"structure"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// a tag, or a conjunction (MetricsAndOperator). +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Specifies the owner of the object that is part of the multipart upload. + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, +// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning +// is suspended), you can set this action to request that Amazon S3 transition +// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, +// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's +// lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // Describes the AWS Lambda functions to invoke and the events for which to + // invoke them. + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Container for specifying the AWS Lambda notification configuration. + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // A container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +// An object consists of data and its descriptive metadata. +type Object struct { + _ struct{} `type:"structure"` + + // The entity tag is a hash of the object. The ETag reflects changes only to + // the contents of an object, not its metadata. The ETag may or may not be an + // MD5 digest of the object data. Whether or not it is depends on how the object + // was created and how it is encrypted as described below: + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-S3 or plaintext, + // have ETags that are an MD5 digest of their object data. + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS, + // have ETags that are not an MD5 digest of their object data. + // + // * If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. + ETag *string `type:"string"` + + // The name that you assign to an object. You use the object key to retrieve + // the object. + Key *string `min:"1" type:"string"` + + // The date the Object was Last Modified + LastModified *time.Time `type:"timestamp"` + + // The owner of the object + Owner *Owner `type:"structure"` + + // Size in bytes of the object + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether this bucket has an Object Lock configuration enabled. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` + + // The Object Lock rule in place for the specified object. + Rule *ObjectLockRule `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} + +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} + +// A Legal Hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a Legal Hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` +} + +// String returns the string representation +func (s ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockLegalHold) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default retention period that you want to apply to new objects placed + // in the specified bucket. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v + return s +} + +// The version of an object. +type ObjectVersion struct { + _ struct{} `type:"structure"` + + // The entity tag is an MD5 hash of that version of the object. + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // Specifies the owner of the object. + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} + +// String returns the string representation +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + +// Container for the owner's display name and ID. +type Owner struct { + _ struct{} `type:"structure"` + + // Container for the display name of the owner. + DisplayName *string `type:"string"` + + // Container for the ID of the owner. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + _ struct{} `type:"structure"` + + // The container element for an ownership control rule. + // + // Rules is a required field + Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s OwnershipControls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OwnershipControls) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControls"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *OwnershipControls) SetRules(v []*OwnershipControlsRule) *OwnershipControls { + s.Rules = v + return s +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + _ struct{} `type:"structure"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // ObjectOwnership is a required field + ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` +} + +// String returns the string representation +func (s OwnershipControlsRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OwnershipControlsRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControlsRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControlsRule"} + if s.ObjectOwnership == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectOwnership")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *OwnershipControlsRule) SetObjectOwnership(v string) *OwnershipControlsRule { + s.ObjectOwnership = &v + return s +} + +// Container for Parquet. +type ParquetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ParquetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParquetInput) GoString() string { + return s.String() +} + +// Container for elements related to a part. +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size in bytes of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} + +// String returns the string representation +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyStatus) GoString() string { + return s.String() +} + +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} + +// This data type contains information about progress of an operation. +type Progress struct { + _ struct{} `type:"structure"` + + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +// This data type contains information about the progress event of an operation. +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // * PUT Bucket calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // AWS service principals and authorized users within this account if the bucket + // has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` +} + +// String returns the string representation +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} + +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` + + // Container for setting the transfer acceleration state. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket to which to apply the ACL. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + + // Specifies the bucket impacted by the corsconfiguration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // Simple Storage Service Developer Guide. + // + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketEncryptionInput struct { + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS + // (SSE-KMS). For information about the Amazon S3 default encryption feature, + // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the default server-side-encryption configuration. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketEncryptionInput) SetExpectedBucketOwner(v string) *PutBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + +type PutBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Container for S3 Intelligent-Tiering configuration. + // + // IntelligentTieringConfiguration is a required field + IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IntelligentTieringConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) + } + if s.IntelligentTieringConfiguration != nil { + if err := s.IntelligentTieringConfiguration.Validate(); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { + s.IntelligentTieringConfiguration = v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + + // The name of the bucket for which to set the configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1000 rules. + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + + // The name of the bucket for which to set the logging parameters. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for logging status information. + // + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The container for the configuration. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketOwnershipControlsInput struct { + _ struct{} `locationName:"PutBucketOwnershipControlsRequest" type:"structure" payload:"OwnershipControls"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want + // to apply to this Amazon S3 bucket. + // + // OwnershipControls is a required field + OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.OwnershipControls == nil { + invalidParams.Add(request.NewErrParamRequired("OwnershipControls")) + } + if s.OwnershipControls != nil { + if err := s.OwnershipControls.Validate(); err != nil { + invalidParams.AddNested("OwnershipControls", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketOwnershipControlsInput) SetBucket(v string) *PutBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *PutBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *PutBucketOwnershipControlsInput) SetOwnershipControls(v *OwnershipControls) *PutBucketOwnershipControlsInput { + s.OwnershipControls = v + return s +} + +func (s *PutBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketPolicyInput) SetExpectedBucketOwner(v string) *PutBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + + // The name of the bucket + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for Payer. + // + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *PutBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the TagSet and Tag elements. + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketTaggingInput) SetExpectedBucketOwner(v string) *PutBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Container for setting the versioning state. + // + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the request. + // + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key for which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a PUT operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectInput) SetBucketKeyEnabled(v bool) *PutObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectInput) SetExpectedBucketOwner(v string) *PutObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldInput struct { + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` + + // The bucket name containing the object that you want to place a Legal Hold + // on. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to place a Legal Hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the Legal Hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a Legal Hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLegalHoldInput) SetExpectedBucketOwner(v string) *PutObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v + return s +} + +type PutObjectLockConfigurationInput struct { + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose Object Lock configuration you want to create or replace. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation +func (s PutObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *PutObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v + return s +} + +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLockConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v + return s +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // If you specified server-side encryption either with an AWS KMS customer master + // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectOutput) SetBucketKeyEnabled(v bool) *PutObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +type PutObjectRetentionInput struct { + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` + + // The bucket name that contains the object you want to apply this Object Retention + // configuration to. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this operation should bypass Governance-mode restrictions. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectRetentionInput) SetExpectedBucketOwner(v string) *PutObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container for the TagSet and Tag elements + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The versionId of the object that the tag-set will be added to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was added to. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +type PutPublicAccessBlockInput struct { + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v + return s +} + +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutPublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // A collection of bucket events for which to send notifications + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html) +// for the same purposes. This data type specifies the configuration for publishing +// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon +// S3 detects specified events. +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of bucket events for which to send notifications + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +// The container for the records event. +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests are redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can +// specify this element and set the status to Enabled to replicate modifications +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. +type ReplicaModifications struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` +} + +// String returns the string representation +func (s ReplicaModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaModifications) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaModifications) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaModifications"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicaModifications) SetStatus(v string) *ReplicaModifications { + s.Status = &v + return s +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a DeleteMarkerReplication + // element. If your Filter includes a Tag element, the DeleteMarkerReplication + // Status must be set to Disabled, because Amazon S3 does not support replicating + // delete markers for tag-based rules. For an example configuration, see Basic + // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // For more information about delete marker replication, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + // + // If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` + + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 Developer Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string `type:"string"` + + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority indicates which rule has precedence whenever two or more replication + // rules conflict. Amazon S3 will attempt to replicate objects according to + // all replication rules. However, if there are two or more rules with the same + // destination bucket, then objects will be replicated according to the rule + // with the highest priority. The higher the number, the higher the priority. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon Simple Storage Service Developer Guide. + Priority *int64 `type:"integer"` + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + + // Specifies whether the rule is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// * If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// * If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // An array of tags containing key and value pairs. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` + + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s +} + +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v + return s +} + +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v + return s +} + +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid values: 15 minutes. + Minutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v + return s +} + +// Container for Payer. +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +// Container for specifying if periodic QueryProgress messages should be sent. +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + + // The bucket name containing the object to restore. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s RestoreObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon Simple Storage Service Developer Guide. +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. For examples, see Put +// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples) +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object. + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value can't be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, + // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning + // is suspended), you can set this action to request that Amazon S3 transition + // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, + // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's + // lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Object key prefix that identifies one or more objects to which this rule + // applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon Simple Storage Service Developer Guide. + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer + // managed customer master key (CMK) to use for encrypting inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSES3) GoString() string { + return s.String() +} + +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + _ struct{} `type:"structure"` + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file.For example; 50 + // means scan from byte 50 until the end of the file. + Start *int64 `type:"long"` +} + +// String returns the string representation +func (s ScanRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanRange) GoString() string { + return s.String() +} + +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events writes for SelectObjectContentEventStream. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + go r.readEventStream() + + return r +} + +// Close will close the underlying event stream reader. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) +} + +func (r *readSelectObjectContentEventStream) Err() error { + return r.err.Err() +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + case "End": + return &EndEvent{}, nil + case "Progress": + return &ProgressEvent{}, nil + case "Records": + return &RecordsEvent{}, nil + case "Stats": + return &StatsEvent{}, nil + default: + return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil + } +} + +// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the +// SelectObjectContentEventStream group of events when an unknown event is received. +type SelectObjectContentEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream +// group of events. +func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, see S3Select API Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The SSE Algorithm used to encrypt the object. For more information, see Server-Side + // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. + ScanRange *ScanRange `type:"structure"` +} + +// String returns the string representation +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s +} + +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *SelectObjectContentInput) SetExpectedBucketOwner(v string) *SelectObjectContentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} + +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s SelectObjectContentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + EventStream *SelectObjectContentEventStream +} + +// String returns the string representation +func (s SelectObjectContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentOutput) GoString() string { + return s.String() +} + +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. For more information, see PUT Bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon Simple Storage Service API Reference. +type ServerSideEncryptionByDefault struct { + _ struct{} `type:"structure"` + + // AWS Key Management Service (KMS) customer master key ID to use for the default + // encryption. This parameter is allowed if and only if SSEAlgorithm is set + // to aws:kms. + // + // You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. + // However, if you are using encryption with cross-account operations, you must + // use a fully qualified CMK ARN. For more information, see Using encryption + // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more + // information, see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. + KMSMasterKeyID *string `type:"string" sensitive:"true"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s ServerSideEncryptionByDefault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionByDefault) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ServerSideEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon + // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon Simple Storage Service Developer Guide. + BucketKeyEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ServerSideEncryptionRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryptionRule { + s.BucketKeyEnabled = &v + return s +} + +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). +type SourceSelectionCriteria struct { + _ struct{} `type:"structure"` + + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can + // specify this element and set the status to Enabled to replicate modifications + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed + ReplicaModifications *ReplicaModifications `type:"structure"` + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication + // configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} + +// String returns the string representation +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.ReplicaModifications != nil { + if err := s.ReplicaModifications.Validate(); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(request.ErrInvalidParams)) + } + } + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicaModifications sets the ReplicaModifications field's value. +func (s *SourceSelectionCriteria) SetReplicaModifications(v *ReplicaModifications) *SourceSelectionCriteria { + s.ReplicaModifications = v + return s +} + +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} + +// A container for filter information for the selection of S3 objects encrypted +// with AWS KMS. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using a customer master key (CMK) stored in AWS Key Management Service. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` +} + +// String returns the string representation +func (s SseKmsEncryptedObjects) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SseKmsEncryptedObjects) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + +// Container for the stats details. +type Stats struct { + _ struct{} `type:"structure"` + + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The total number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Stats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stats) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v + return s +} + +// Container for the Stats Event. +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v + return s +} + +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +// A container of a key value name pair. +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the object key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Container for TagSet elements. +type Tagging struct { + _ struct{} `type:"structure"` + + // A collection for a set of tags + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Container for granting information. +type TargetGrant struct { + _ struct{} `type:"structure"` + + // Container for the person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Logging permissions assigned to the grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + _ struct{} `type:"structure"` + + // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing + // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // AccessTier is a required field + AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` + + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number + // of days specified for Archive Access tier must be at least 90 days and Deep + // Archive Access tier must be at least 180 days. The maximum can be up to 2 + // years (730 days). + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Tiering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tiering) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tiering) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tiering"} + if s.AccessTier == nil { + invalidParams.Add(request.NewErrParamRequired("AccessTier")) + } + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *Tiering) SetAccessTier(v string) *Tiering { + s.AccessTier = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Tiering) SetDays(v int64) *Tiering { + s.Days = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html) +// instead. +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of events related to objects + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon Simple Storage Service Developer Guide. +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. + Days *int64 `type:"integer"` + + // The storage class to which you want the object to transition. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +type UploadPartCopyInput struct { + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + + // The bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and key of the source object, separated by a slash (/). + // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, + // use awsexamplebucket/reports/january.pdf. The value must be URL encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same AWS Region. Alternatively, for objects accessed through Amazon S3 + // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first 10 bytes of the source. You can copy a range only if the source object + // is greater than 5 MB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account id of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request will fail with an HTTP + // 403 (Access Denied) error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account id of the expected source bucket owner. If the source bucket + // is owned by a different account, the request will fail with an HTTP 403 (Access + // Denied) error. + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartCopyOutput) SetBucketKeyEnabled(v bool) *UploadPartCopyOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +type UploadPartInput struct { + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartOutput) SetBucketKeyEnabled(v bool) *UploadPartOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the error document for the website. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website. + IndexDocument *IndexDocument `type:"structure"` + + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +// AnalyticsS3ExportFileFormat_Values returns all elements of the AnalyticsS3ExportFileFormat enum +func AnalyticsS3ExportFileFormat_Values() []string { + return []string{ + AnalyticsS3ExportFileFormatCsv, + } +} + +const ( + // ArchiveStatusArchiveAccess is a ArchiveStatus enum value + ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" + + // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value + ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// ArchiveStatus_Values returns all elements of the ArchiveStatus enum +func ArchiveStatus_Values() []string { + return []string{ + ArchiveStatusArchiveAccess, + ArchiveStatusDeepArchiveAccess, + } +} + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +// BucketAccelerateStatus_Values returns all elements of the BucketAccelerateStatus enum +func BucketAccelerateStatus_Values() []string { + return []string{ + BucketAccelerateStatusEnabled, + BucketAccelerateStatusSuspended, + } +} + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +// BucketCannedACL_Values returns all elements of the BucketCannedACL enum +func BucketCannedACL_Values() []string { + return []string{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + BucketCannedACLPublicReadWrite, + BucketCannedACLAuthenticatedRead, + } +} + +const ( + // BucketLocationConstraintAfSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintAfSouth1 = "af-south-1" + + // BucketLocationConstraintApEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApEast1 = "ap-east-1" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintApNortheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast2 = "ap-northeast-2" + + // BucketLocationConstraintApNortheast3 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast3 = "ap-northeast-3" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintCaCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintCaCentral1 = "ca-central-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintCnNorthwest1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorthwest1 = "cn-northwest-1" + + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" + + // BucketLocationConstraintEuNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuNorth1 = "eu-north-1" + + // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth1 = "eu-south-1" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintEuWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest2 = "eu-west-2" + + // BucketLocationConstraintEuWest3 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest3 = "eu-west-3" + + // BucketLocationConstraintMeSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintMeSouth1 = "me-south-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintUsEast2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsEast2 = "us-east-2" + + // BucketLocationConstraintUsGovEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovEast1 = "us-gov-east-1" + + // BucketLocationConstraintUsGovWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovWest1 = "us-gov-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" +) + +// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum +func BucketLocationConstraint_Values() []string { + return []string{ + BucketLocationConstraintAfSouth1, + BucketLocationConstraintApEast1, + BucketLocationConstraintApNortheast1, + BucketLocationConstraintApNortheast2, + BucketLocationConstraintApNortheast3, + BucketLocationConstraintApSouth1, + BucketLocationConstraintApSoutheast1, + BucketLocationConstraintApSoutheast2, + BucketLocationConstraintCaCentral1, + BucketLocationConstraintCnNorth1, + BucketLocationConstraintCnNorthwest1, + BucketLocationConstraintEu, + BucketLocationConstraintEuCentral1, + BucketLocationConstraintEuNorth1, + BucketLocationConstraintEuSouth1, + BucketLocationConstraintEuWest1, + BucketLocationConstraintEuWest2, + BucketLocationConstraintEuWest3, + BucketLocationConstraintMeSouth1, + BucketLocationConstraintSaEast1, + BucketLocationConstraintUsEast2, + BucketLocationConstraintUsGovEast1, + BucketLocationConstraintUsGovWest1, + BucketLocationConstraintUsWest1, + BucketLocationConstraintUsWest2, + } +} + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum +func BucketLogsPermission_Values() []string { + return []string{ + BucketLogsPermissionFullControl, + BucketLogsPermissionRead, + BucketLogsPermissionWrite, + } +} + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum +func BucketVersioningStatus_Values() []string { + return []string{ + BucketVersioningStatusEnabled, + BucketVersioningStatusSuspended, + } +} + +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +// CompressionType_Values returns all elements of the CompressionType enum +func CompressionType_Values() []string { + return []string{ + CompressionTypeNone, + CompressionTypeGzip, + CompressionTypeBzip2, + } +} + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + +// DeleteMarkerReplicationStatus_Values returns all elements of the DeleteMarkerReplicationStatus enum +func DeleteMarkerReplicationStatus_Values() []string { + return []string{ + DeleteMarkerReplicationStatusEnabled, + DeleteMarkerReplicationStatusDisabled, + } +} + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeUrl, + } +} + +// The bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" +) + +// Event_Values returns all elements of the Event enum +func Event_Values() []string { + return []string{ + EventS3ReducedRedundancyLostObject, + EventS3ObjectCreated, + EventS3ObjectCreatedPut, + EventS3ObjectCreatedPost, + EventS3ObjectCreatedCopy, + EventS3ObjectCreatedCompleteMultipartUpload, + EventS3ObjectRemoved, + EventS3ObjectRemovedDelete, + EventS3ObjectRemovedDeleteMarkerCreated, + EventS3ObjectRestore, + EventS3ObjectRestorePost, + EventS3ObjectRestoreCompleted, + EventS3Replication, + EventS3ReplicationOperationFailedReplication, + EventS3ReplicationOperationNotTracked, + EventS3ReplicationOperationMissedThreshold, + EventS3ReplicationOperationReplicatedAfterThreshold, + } +} + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" +) + +// ExistingObjectReplicationStatus_Values returns all elements of the ExistingObjectReplicationStatus enum +func ExistingObjectReplicationStatus_Values() []string { + return []string{ + ExistingObjectReplicationStatusEnabled, + ExistingObjectReplicationStatusDisabled, + } +} + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +// ExpirationStatus_Values returns all elements of the ExpirationStatus enum +func ExpirationStatus_Values() []string { + return []string{ + ExpirationStatusEnabled, + ExpirationStatusDisabled, + } +} + +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +// ExpressionType_Values returns all elements of the ExpressionType enum +func ExpressionType_Values() []string { + return []string{ + ExpressionTypeSql, + } +} + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + +// FileHeaderInfo_Values returns all elements of the FileHeaderInfo enum +func FileHeaderInfo_Values() []string { + return []string{ + FileHeaderInfoUse, + FileHeaderInfoIgnore, + FileHeaderInfoNone, + } +} + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +// FilterRuleName_Values returns all elements of the FilterRuleName enum +func FilterRuleName_Values() []string { + return []string{ + FilterRuleNamePrefix, + FilterRuleNameSuffix, + } +} + +const ( + // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" + + // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum +func IntelligentTieringAccessTier_Values() []string { + return []string{ + IntelligentTieringAccessTierArchiveAccess, + IntelligentTieringAccessTierDeepArchiveAccess, + } +} + +const ( + // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusEnabled = "Enabled" + + // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusDisabled = "Disabled" +) + +// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum +func IntelligentTieringStatus_Values() []string { + return []string{ + IntelligentTieringStatusEnabled, + IntelligentTieringStatusDisabled, + } +} + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" +) + +// InventoryFormat_Values returns all elements of the InventoryFormat enum +func InventoryFormat_Values() []string { + return []string{ + InventoryFormatCsv, + InventoryFormatOrc, + InventoryFormatParquet, + } +} + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +// InventoryFrequency_Values returns all elements of the InventoryFrequency enum +func InventoryFrequency_Values() []string { + return []string{ + InventoryFrequencyDaily, + InventoryFrequencyWeekly, + } +} + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +// InventoryIncludedObjectVersions_Values returns all elements of the InventoryIncludedObjectVersions enum +func InventoryIncludedObjectVersions_Values() []string { + return []string{ + InventoryIncludedObjectVersionsAll, + InventoryIncludedObjectVersionsCurrent, + } +} + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" +) + +// InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum +func InventoryOptionalField_Values() []string { + return []string{ + InventoryOptionalFieldSize, + InventoryOptionalFieldLastModifiedDate, + InventoryOptionalFieldStorageClass, + InventoryOptionalFieldEtag, + InventoryOptionalFieldIsMultipartUploaded, + InventoryOptionalFieldReplicationStatus, + InventoryOptionalFieldEncryptionStatus, + InventoryOptionalFieldObjectLockRetainUntilDate, + InventoryOptionalFieldObjectLockMode, + InventoryOptionalFieldObjectLockLegalHoldStatus, + InventoryOptionalFieldIntelligentTieringAccessTier, + } +} + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" +) + +// JSONType_Values returns all elements of the JSONType enum +func JSONType_Values() []string { + return []string{ + JSONTypeDocument, + JSONTypeLines, + } +} + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +// MFADelete_Values returns all elements of the MFADelete enum +func MFADelete_Values() []string { + return []string{ + MFADeleteEnabled, + MFADeleteDisabled, + } +} + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum +func MFADeleteStatus_Values() []string { + return []string{ + MFADeleteStatusEnabled, + MFADeleteStatusDisabled, + } +} + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +// MetadataDirective_Values returns all elements of the MetadataDirective enum +func MetadataDirective_Values() []string { + return []string{ + MetadataDirectiveCopy, + MetadataDirectiveReplace, + } +} + +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + +// MetricsStatus_Values returns all elements of the MetricsStatus enum +func MetricsStatus_Values() []string { + return []string{ + MetricsStatusEnabled, + MetricsStatusDisabled, + } +} + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum +func ObjectCannedACL_Values() []string { + return []string{ + ObjectCannedACLPrivate, + ObjectCannedACLPublicRead, + ObjectCannedACLPublicReadWrite, + ObjectCannedACLAuthenticatedRead, + ObjectCannedACLAwsExecRead, + ObjectCannedACLBucketOwnerRead, + ObjectCannedACLBucketOwnerFullControl, + } +} + +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +// ObjectLockEnabled_Values returns all elements of the ObjectLockEnabled enum +func ObjectLockEnabled_Values() []string { + return []string{ + ObjectLockEnabledEnabled, + } +} + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +// ObjectLockLegalHoldStatus_Values returns all elements of the ObjectLockLegalHoldStatus enum +func ObjectLockLegalHoldStatus_Values() []string { + return []string{ + ObjectLockLegalHoldStatusOn, + ObjectLockLegalHoldStatusOff, + } +} + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +// ObjectLockMode_Values returns all elements of the ObjectLockMode enum +func ObjectLockMode_Values() []string { + return []string{ + ObjectLockModeGovernance, + ObjectLockModeCompliance, + } +} + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + +// ObjectLockRetentionMode_Values returns all elements of the ObjectLockRetentionMode enum +func ObjectLockRetentionMode_Values() []string { + return []string{ + ObjectLockRetentionModeGovernance, + ObjectLockRetentionModeCompliance, + } +} + +// The container element for object ownership for a bucket's ownership controls. +// +// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to +// the bucket owner if the objects are uploaded with the bucket-owner-full-control +// canned ACL. +// +// ObjectWriter - The uploading account will own the object if the object is +// uploaded with the bucket-owner-full-control canned ACL. +const ( + // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" + + // ObjectOwnershipObjectWriter is a ObjectOwnership enum value + ObjectOwnershipObjectWriter = "ObjectWriter" +) + +// ObjectOwnership_Values returns all elements of the ObjectOwnership enum +func ObjectOwnership_Values() []string { + return []string{ + ObjectOwnershipBucketOwnerPreferred, + ObjectOwnershipObjectWriter, + } +} + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" + + // ObjectStorageClassOutposts is a ObjectStorageClass enum value + ObjectStorageClassOutposts = "OUTPOSTS" +) + +// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum +func ObjectStorageClass_Values() []string { + return []string{ + ObjectStorageClassStandard, + ObjectStorageClassReducedRedundancy, + ObjectStorageClassGlacier, + ObjectStorageClassStandardIa, + ObjectStorageClassOnezoneIa, + ObjectStorageClassIntelligentTiering, + ObjectStorageClassDeepArchive, + ObjectStorageClassOutposts, + } +} + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum +func ObjectVersionStorageClass_Values() []string { + return []string{ + ObjectVersionStorageClassStandard, + } +} + +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + +// OwnerOverride_Values returns all elements of the OwnerOverride enum +func OwnerOverride_Values() []string { + return []string{ + OwnerOverrideDestination, + } +} + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +// Payer_Values returns all elements of the Payer enum +func Payer_Values() []string { + return []string{ + PayerRequester, + PayerBucketOwner, + } +} + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionFullControl, + PermissionWrite, + PermissionWriteAcp, + PermissionRead, + PermissionReadAcp, + } +} + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolHttp, + ProtocolHttps, + } +} + +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + +// QuoteFields_Values returns all elements of the QuoteFields enum +func QuoteFields_Values() []string { + return []string{ + QuoteFieldsAlways, + QuoteFieldsAsneeded, + } +} + +const ( + // ReplicaModificationsStatusEnabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusEnabled = "Enabled" + + // ReplicaModificationsStatusDisabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusDisabled = "Disabled" +) + +// ReplicaModificationsStatus_Values returns all elements of the ReplicaModificationsStatus enum +func ReplicaModificationsStatus_Values() []string { + return []string{ + ReplicaModificationsStatusEnabled, + ReplicaModificationsStatusDisabled, + } +} + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +// ReplicationRuleStatus_Values returns all elements of the ReplicationRuleStatus enum +func ReplicationRuleStatus_Values() []string { + return []string{ + ReplicationRuleStatusEnabled, + ReplicationRuleStatusDisabled, + } +} + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" +) + +// ReplicationStatus_Values returns all elements of the ReplicationStatus enum +func ReplicationStatus_Values() []string { + return []string{ + ReplicationStatusComplete, + ReplicationStatusPending, + ReplicationStatusFailed, + ReplicationStatusReplica, + } +} + +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + +// ReplicationTimeStatus_Values returns all elements of the ReplicationTimeStatus enum +func ReplicationTimeStatus_Values() []string { + return []string{ + ReplicationTimeStatusEnabled, + ReplicationTimeStatusDisabled, + } +} + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// RequestCharged_Values returns all elements of the RequestCharged enum +func RequestCharged_Values() []string { + return []string{ + RequestChargedRequester, + } +} + +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. For information +// about downloading objects from requester pays buckets, see Downloading Objects +// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 Developer Guide. +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +// RequestPayer_Values returns all elements of the RequestPayer enum +func RequestPayer_Values() []string { + return []string{ + RequestPayerRequester, + } +} + +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + +// RestoreRequestType_Values returns all elements of the RestoreRequestType enum +func RestoreRequestType_Values() []string { + return []string{ + RestoreRequestTypeSelect, + } +} + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" +) + +// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum +func ServerSideEncryption_Values() []string { + return []string{ + ServerSideEncryptionAes256, + ServerSideEncryptionAwsKms, + } +} + +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + +// SseKmsEncryptedObjectsStatus_Values returns all elements of the SseKmsEncryptedObjectsStatus enum +func SseKmsEncryptedObjectsStatus_Values() []string { + return []string{ + SseKmsEncryptedObjectsStatusEnabled, + SseKmsEncryptedObjectsStatusDisabled, + } +} + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" + + // StorageClassOutposts is a StorageClass enum value + StorageClassOutposts = "OUTPOSTS" +) + +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassStandard, + StorageClassReducedRedundancy, + StorageClassStandardIa, + StorageClassOnezoneIa, + StorageClassIntelligentTiering, + StorageClassGlacier, + StorageClassDeepArchive, + StorageClassOutposts, + } +} + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +// StorageClassAnalysisSchemaVersion_Values returns all elements of the StorageClassAnalysisSchemaVersion enum +func StorageClassAnalysisSchemaVersion_Values() []string { + return []string{ + StorageClassAnalysisSchemaVersionV1, + } +} + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +// TaggingDirective_Values returns all elements of the TaggingDirective enum +func TaggingDirective_Values() []string { + return []string{ + TaggingDirectiveCopy, + TaggingDirectiveReplace, + } +} + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +// Tier_Values returns all elements of the Tier enum +func Tier_Values() []string { + return []string{ + TierStandard, + TierBulk, + TierExpedited, + } +} + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" +) + +// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum +func TransitionStorageClass_Values() []string { + return []string{ + TransitionStorageClassGlacier, + TransitionStorageClassStandardIa, + TransitionStorageClassOnezoneIa, + TransitionStorageClassIntelligentTiering, + TransitionStorageClassDeepArchive, + } +} + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeCanonicalUser, + TypeAmazonCustomerByEmail, + TypeGroup, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 00000000000..407f06b6ede --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 00000000000..9ba8a788720 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,107 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 00000000000..f1959b03a95 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,77 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/internal/s3shared/s3err" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(endpointHandler) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) +} + +func defaultInitRequestFn(r *request.Request) { + // Add request handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + } +} + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} + +// endpointARNGetter is an accessor interface to grab the +// the field corresponding to an endpoint ARN input. +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 00000000000..0def02255ac --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 00000000000..7f7aca20859 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,110 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 00000000000..403aebb688c --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,201 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + if a.Service != "s3" { + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"} + } + return arn.ParseAccessPointResource(a, resParts[1:]) + case "outpost": + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +// parseOutpostAccessPointResource attempts to parse the ARNs resource as an +// outpost access-point resource. +// +// Supported Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = s3shared.NewInvalidARNError(nil, err) + return + } + + resReq := s3shared.ResourceRequest{ + Resource: resource, + Request: req, + } + + if resReq.IsCrossPartition() { + req.Error = s3shared.NewClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = s3shared.NewClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if resReq.HasCustomEndpoint() { + req.Error = s3shared.NewInvalidARNWithCustomEndpointError(resource, nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.OutpostAccessPointARN: + // outposts does not support FIPS regions + if resReq.ResourceConfiguredForFIPS() { + req.Error = s3shared.NewInvalidARNWithFIPSError(resource, nil) + return + } + + err = updateRequestOutpostAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = s3shared.NewInvalidARNError(resource, nil) + } +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points since custom endpoints + // are not supported. + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Dualstack not supported + if aws.BoolValue(req.Config.UseDualStack) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points since custom endpoints + // are not supported. + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go new file mode 100644 index 00000000000..c1c77da9adb --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -0,0 +1,177 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." + + outpostPrefixLabel = "outpost" + outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." +) + +// accessPointEndpointBuilder represents the endpoint builder for access point arn +type accessPointEndpointBuilder arn.AccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3" as signing name. +// +func (a accessPointEndpointBuilder) build(req *request.Request) error { + resolveService := arn.AccessPointARN(a).Service + resolveRegion := arn.AccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if s3shared.IsFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, resolveService) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + const serviceEndpointLabel = "s3-accesspoint" + + // dual stack provided by endpoint resolver + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, "s3") { + req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. +type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN + +// build builds an endpoint corresponding to the outpost access point arn. +// +// For building an endpoint from outpost access point arn, format used is: +// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com +// +// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. +// +func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := o.Region + resolveService := o.Service + + endpointsID := resolveService + if resolveService == "s3-outposts" { + endpointsID = "s3" + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(o, + req.ClientInfo.PartitionID, resolveRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + // add url host as s3-outposts + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, endpointsID) { + req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] + } + + protocol.HostPrefixBuilder{ + Prefix: outpostAccessPointPrefixTemplate, + LabelsFn: o.hostPrefixLabelValues, + }.Build(req) + + // set the signing region, name to resolved names from ARN + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(o, err) + } + + return nil +} + +func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: o.AccessPointName, + accountIDPrefixLabel: o.AccountID, + outpostPrefixLabel: o.OutpostID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region string, endpointsID string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) + + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} + +// redirectSigner sets signing name, signing region for a request +func redirectSigner(req *request.Request, signingName string, signingRegion string) { + req.ClientInfo.SigningName = signingName + req.ClientInfo.SigningRegion = signingRegion +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 00000000000..f64b55135ee --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,60 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia Region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + ErrCodeInvalidObjectState = "InvalidObjectState" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This operation is not allowed against this storage tier. + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY operation is not in the active tier and is + // only stored in Amazon S3 Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 00000000000..81cdec1ae75 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,136 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request, bucketName string) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r, bucketName) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r, bucketName) + } +} + +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 00000000000..8e6f3307d41 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 00000000000..14d05f7b75a --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go new file mode 100644 index 00000000000..7c622187843 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -0,0 +1,471 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package s3iface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3API provides an interface to enable mocking the +// s3.S3 service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon Simple Storage Service. +// func myFunc(svc s3iface.S3API) bool { +// // Make svc.AbortMultipartUpload request +// } +// +// func main() { +// sess := session.New() +// svc := s3.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockS3Client struct { +// s3iface.S3API +// } +// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockS3Client{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type S3API interface { + AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) + AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error) + AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) + CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error) + CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) + + CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) + CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error) + CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) + + CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) + CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error) + CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) + + CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) + CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error) + CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) + DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error) + DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) + + DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) + DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) + DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) + + DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) + DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error) + DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + + DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) + DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) + DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) + + DeleteBucketIntelligentTieringConfiguration(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.DeleteBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationRequest(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) + + DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) + DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) + DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) + + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) + DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error) + DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) + + DeleteBucketOwnershipControls(*s3.DeleteBucketOwnershipControlsInput) (*s3.DeleteBucketOwnershipControlsOutput, error) + DeleteBucketOwnershipControlsWithContext(aws.Context, *s3.DeleteBucketOwnershipControlsInput, ...request.Option) (*s3.DeleteBucketOwnershipControlsOutput, error) + DeleteBucketOwnershipControlsRequest(*s3.DeleteBucketOwnershipControlsInput) (*request.Request, *s3.DeleteBucketOwnershipControlsOutput) + + DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) + DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error) + DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) + + DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) + DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error) + DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) + + DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) + DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error) + DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) + + DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) + DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error) + DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) + + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) + DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error) + DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) + + DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) + DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error) + DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) + + DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) + DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error) + DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + + DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) + DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) + DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) + + GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) + GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) + GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) + + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) + GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error) + GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) + + GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) + GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error) + GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) + + GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) + GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error) + GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + + GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) + GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error) + GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) + + GetBucketIntelligentTieringConfiguration(*s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.GetBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationRequest(*s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) + + GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) + GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) + GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) + + GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) + GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error) + GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) + + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) + GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error) + GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) + + GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) + GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error) + GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + + GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) + GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error) + GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) + + GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) + GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error) + GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) + + GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) + GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error) + GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) + + GetBucketOwnershipControls(*s3.GetBucketOwnershipControlsInput) (*s3.GetBucketOwnershipControlsOutput, error) + GetBucketOwnershipControlsWithContext(aws.Context, *s3.GetBucketOwnershipControlsInput, ...request.Option) (*s3.GetBucketOwnershipControlsOutput, error) + GetBucketOwnershipControlsRequest(*s3.GetBucketOwnershipControlsInput) (*request.Request, *s3.GetBucketOwnershipControlsOutput) + + GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) + GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error) + GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + + GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) + GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) + GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) + + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) + GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error) + GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) + + GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) + GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error) + GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) + + GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) + GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error) + GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) + + GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) + GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error) + GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) + + GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) + GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error) + GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) + + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) + GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error) + GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) + + GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) + GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error) + GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + + GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) + GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error) + GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) + + GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) + GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) + GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) + + GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) + GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error) + GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) + + GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) + GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error) + GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) + + GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) + GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error) + GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + + GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) + GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error) + GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) + + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) + HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error) + HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) + + HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) + HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error) + HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) + + ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) + ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) + ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) + + ListBucketIntelligentTieringConfigurations(*s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsWithContext(aws.Context, *s3.ListBucketIntelligentTieringConfigurationsInput, ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsRequest(*s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) + + ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) + ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) + ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) + + ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) + ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error) + ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) + + ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) + ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error) + ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) + ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error) + ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) + + ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error + ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error + + ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) + ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error) + ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) + + ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error + ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error + + ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) + ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error) + ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) + + ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error + ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error + + ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) + ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error) + ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) + + ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error + ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error + + ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) + ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error) + ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) + + ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error + ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error + + PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) + PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error) + PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) + + PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) + PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error) + PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) + + PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) + PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error) + PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) + + PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) + PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error) + PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + + PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) + PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error) + PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) + + PutBucketIntelligentTieringConfiguration(*s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.PutBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationRequest(*s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) + + PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) + PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) + PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) + + PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) + PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error) + PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) + + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) + PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error) + PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + + PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) + PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error) + PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) + + PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) + PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error) + PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) + + PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) + PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error) + PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) + + PutBucketOwnershipControls(*s3.PutBucketOwnershipControlsInput) (*s3.PutBucketOwnershipControlsOutput, error) + PutBucketOwnershipControlsWithContext(aws.Context, *s3.PutBucketOwnershipControlsInput, ...request.Option) (*s3.PutBucketOwnershipControlsOutput, error) + PutBucketOwnershipControlsRequest(*s3.PutBucketOwnershipControlsInput) (*request.Request, *s3.PutBucketOwnershipControlsOutput) + + PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) + PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error) + PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) + + PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) + PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error) + PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) + + PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) + PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error) + PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) + + PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) + PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error) + PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) + + PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) + PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error) + PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) + + PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) + PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error) + PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) + + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) + PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error) + PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) + + PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) + PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error) + PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + + PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) + PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error) + PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) + + PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) + PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) + PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) + + PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) + PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error) + PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) + + PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) + PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error) + PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) + + PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) + PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error) + PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) + + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error) + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + + SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error) + SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error) + SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput) + + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) + UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) + UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) + + UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) + UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error) + UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + + WaitUntilBucketExists(*s3.HeadBucketInput) error + WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error + + WaitUntilBucketNotExists(*s3.HeadBucketInput) error + WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error + + WaitUntilObjectExists(*s3.HeadObjectInput) error + WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error + + WaitUntilObjectNotExists(*s3.HeadObjectInput) error + WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error +} + +var _ S3API = (*s3.S3)(nil) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go new file mode 100644 index 00000000000..22bd0b7ce59 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go @@ -0,0 +1,529 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +const ( + // DefaultBatchSize is the batch size we initialize when constructing a batch delete client. + // This value is used when calling DeleteObjects. This represents how many objects to delete + // per DeleteObjects call. + DefaultBatchSize = 100 +) + +// BatchError will contain the key and bucket of the object that failed to +// either upload or download. +type BatchError struct { + Errors Errors + code string + message string +} + +// Errors is a typed alias for a slice of errors to satisfy the error +// interface. +type Errors []Error + +func (errs Errors) Error() string { + buf := bytes.NewBuffer(nil) + for i, err := range errs { + buf.WriteString(err.Error()) + if i+1 < len(errs) { + buf.WriteString("\n") + } + } + return buf.String() +} + +// Error will contain the original error, bucket, and key of the operation that failed +// during batch operations. +type Error struct { + OrigErr error + Bucket *string + Key *string +} + +func newError(err error, bucket, key *string) Error { + return Error{ + err, + bucket, + key, + } +} + +func (err *Error) Error() string { + origErr := "" + if err.OrigErr != nil { + origErr = ":\n" + err.OrigErr.Error() + } + return fmt.Sprintf("failed to perform batch operation on %q to %q%s", + aws.StringValue(err.Key), + aws.StringValue(err.Bucket), + origErr, + ) +} + +// NewBatchError will return a BatchError that satisfies the awserr.Error interface. +func NewBatchError(code, message string, err []Error) awserr.Error { + return &BatchError{ + Errors: err, + code: code, + message: message, + } +} + +// Code will return the code associated with the batch error. +func (err *BatchError) Code() string { + return err.code +} + +// Message will return the message associated with the batch error. +func (err *BatchError) Message() string { + return err.message +} + +func (err *BatchError) Error() string { + return awserr.SprintError(err.Code(), err.Message(), "", err.Errors) +} + +// OrigErr will return the original error. Which, in this case, will always be nil +// for batched operations. +func (err *BatchError) OrigErr() error { + return err.Errors +} + +// BatchDeleteIterator is an interface that uses the scanner pattern to +// iterate through what needs to be deleted. +type BatchDeleteIterator interface { + Next() bool + Err() error + DeleteObject() BatchDeleteObject +} + +// DeleteListIterator is an alternative iterator for the BatchDelete client. This will +// iterate through a list of objects and delete the objects. +// +// Example: +// iter := &s3manager.DeleteListIterator{ +// Client: svc, +// Input: &s3.ListObjectsInput{ +// Bucket: aws.String("bucket"), +// MaxKeys: aws.Int64(5), +// }, +// Paginator: request.Pagination{ +// NewRequest: func() (*request.Request, error) { +// var inCpy *ListObjectsInput +// if input != nil { +// tmp := *input +// inCpy = &tmp +// } +// req, _ := c.ListObjectsRequest(inCpy) +// return req, nil +// }, +// }, +// } +// +// batcher := s3manager.NewBatchDeleteWithClient(svc) +// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil { +// return err +// } +type DeleteListIterator struct { + Bucket *string + Paginator request.Pagination + objects []*s3.Object +} + +// NewDeleteListIterator will return a new DeleteListIterator. +func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator { + iter := &DeleteListIterator{ + Bucket: input.Bucket, + Paginator: request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *s3.ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := svc.ListObjectsRequest(inCpy) + return req, nil + }, + }, + } + + for _, opt := range opts { + opt(iter) + } + return iter +} + +// Next will use the S3API client to iterate through a list of objects. +func (iter *DeleteListIterator) Next() bool { + if len(iter.objects) > 0 { + iter.objects = iter.objects[1:] + } + + if len(iter.objects) == 0 && iter.Paginator.Next() { + iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents + } + + return len(iter.objects) > 0 +} + +// Err will return the last known error from Next. +func (iter *DeleteListIterator) Err() error { + return iter.Paginator.Err() +} + +// DeleteObject will return the current object to be deleted. +func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject { + return BatchDeleteObject{ + Object: &s3.DeleteObjectInput{ + Bucket: iter.Bucket, + Key: iter.objects[0].Key, + }, + } +} + +// BatchDelete will use the s3 package's service client to perform a batch +// delete. +type BatchDelete struct { + Client s3iface.S3API + BatchSize int +} + +// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of +// objects. +// +// Example: +// batcher := s3manager.NewBatchDeleteWithClient(client, size) +// +// objects := []BatchDeleteObject{ +// { +// Object: &s3.DeleteObjectInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ +// Objects: objects, +// }); err != nil { +// return err +// } +func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete { + svc := &BatchDelete{ + Client: client, + BatchSize: DefaultBatchSize, + } + + for _, opt := range options { + opt(svc) + } + + return svc +} + +// NewBatchDelete will return a new delete client that can delete a batched amount of +// objects. +// +// Example: +// batcher := s3manager.NewBatchDelete(sess, size) +// +// objects := []BatchDeleteObject{ +// { +// Object: &s3.DeleteObjectInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ +// Objects: objects, +// }); err != nil { +// return err +// } +func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete { + client := s3.New(c) + return NewBatchDeleteWithClient(client, options...) +} + +// BatchDeleteObject is a wrapper object for calling the batch delete operation. +type BatchDeleteObject struct { + Object *s3.DeleteObjectInput + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} + +// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate +// through a series of objects to be deleted. +type DeleteObjectsIterator struct { + Objects []BatchDeleteObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (iter *DeleteObjectsIterator) Next() bool { + if iter.inc { + iter.index++ + } else { + iter.inc = true + } + return iter.index < len(iter.Objects) +} + +// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface +// this will only return nil. +func (iter *DeleteObjectsIterator) Err() error { + return nil +} + +// DeleteObject will return the BatchDeleteObject at the current batched index. +func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject { + object := iter.Objects[iter.index] + return object +} + +// Delete will use the iterator to queue up objects that need to be deleted. +// Once the batch size is met, this will call the deleteBatch function. +func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error { + var errs []Error + objects := []BatchDeleteObject{} + var input *s3.DeleteObjectsInput + + for iter.Next() { + o := iter.DeleteObject() + + if input == nil { + input = initDeleteObjectsInput(o.Object) + } + + parity := hasParity(input, o) + if parity { + input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{ + Key: o.Object.Key, + VersionId: o.Object.VersionId, + }) + objects = append(objects, o) + } + + if len(input.Delete.Objects) == d.BatchSize || !parity { + if err := deleteBatch(ctx, d, input, objects); err != nil { + errs = append(errs, err...) + } + + objects = objects[:0] + input = nil + + if !parity { + objects = append(objects, o) + input = initDeleteObjectsInput(o.Object) + input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{ + Key: o.Object.Key, + VersionId: o.Object.VersionId, + }) + } + } + } + + // iter.Next() could return false (above) plus populate iter.Err() + if iter.Err() != nil { + errs = append(errs, newError(iter.Err(), nil, nil)) + } + + if input != nil && len(input.Delete.Objects) > 0 { + if err := deleteBatch(ctx, d, input, objects); err != nil { + errs = append(errs, err...) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs) + } + return nil +} + +func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput { + return &s3.DeleteObjectsInput{ + Bucket: o.Bucket, + MFA: o.MFA, + RequestPayer: o.RequestPayer, + Delete: &s3.Delete{}, + } +} + +const ( + // ErrDeleteBatchFailCode represents an error code which will be returned + // only when DeleteObjects.Errors has an error that does not contain a code. + ErrDeleteBatchFailCode = "DeleteBatchError" + errDefaultDeleteBatchMessage = "failed to delete" +) + +// deleteBatch will delete a batch of items in the objects parameters. +func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error { + errs := []Error{} + + if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil { + for i := 0; i < len(input.Delete.Objects); i++ { + errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key)) + } + } else if len(result.Errors) > 0 { + for i := 0; i < len(result.Errors); i++ { + code := ErrDeleteBatchFailCode + msg := errDefaultDeleteBatchMessage + if result.Errors[i].Message != nil { + msg = *result.Errors[i].Message + } + if result.Errors[i].Code != nil { + code = *result.Errors[i].Code + } + + errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key)) + } + } + for _, object := range objects { + if object.After == nil { + continue + } + if err := object.After(); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + } + + return errs +} + +func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool { + if o1.Bucket != nil && o2.Object.Bucket != nil { + if *o1.Bucket != *o2.Object.Bucket { + return false + } + } else if o1.Bucket != o2.Object.Bucket { + return false + } + + if o1.MFA != nil && o2.Object.MFA != nil { + if *o1.MFA != *o2.Object.MFA { + return false + } + } else if o1.MFA != o2.Object.MFA { + return false + } + + if o1.RequestPayer != nil && o2.Object.RequestPayer != nil { + if *o1.RequestPayer != *o2.Object.RequestPayer { + return false + } + } else if o1.RequestPayer != o2.Object.RequestPayer { + return false + } + + return true +} + +// BatchDownloadIterator is an interface that uses the scanner pattern to iterate +// through a series of objects to be downloaded. +type BatchDownloadIterator interface { + Next() bool + Err() error + DownloadObject() BatchDownloadObject +} + +// BatchDownloadObject contains all necessary information to run a batch operation once. +type BatchDownloadObject struct { + Object *s3.GetObjectInput + Writer io.WriterAt + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} + +// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched +// download of objects. +type DownloadObjectsIterator struct { + Objects []BatchDownloadObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (batcher *DownloadObjectsIterator) Next() bool { + if batcher.inc { + batcher.index++ + } else { + batcher.inc = true + } + return batcher.index < len(batcher.Objects) +} + +// DownloadObject will return the BatchDownloadObject at the current batched index. +func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject { + object := batcher.Objects[batcher.index] + return object +} + +// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface +// this will only return nil. +func (batcher *DownloadObjectsIterator) Err() error { + return nil +} + +// BatchUploadIterator is an interface that uses the scanner pattern to +// iterate through what needs to be uploaded. +type BatchUploadIterator interface { + Next() bool + Err() error + UploadObject() BatchUploadObject +} + +// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched +// upload of objects. +type UploadObjectsIterator struct { + Objects []BatchUploadObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (batcher *UploadObjectsIterator) Next() bool { + if batcher.inc { + batcher.index++ + } else { + batcher.inc = true + } + return batcher.index < len(batcher.Objects) +} + +// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface +// this will only return nil. +func (batcher *UploadObjectsIterator) Err() error { + return nil +} + +// UploadObject will return the BatchUploadObject at the current batched index. +func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject { + object := batcher.Objects[batcher.index] + return object +} + +// BatchUploadObject contains all necessary information to run a batch operation once. +type BatchUploadObject struct { + Object *UploadInput + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go new file mode 100644 index 00000000000..9cc1e5970c1 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go @@ -0,0 +1,159 @@ +package s3manager + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// GetBucketRegion will attempt to get the region for a bucket using the +// regionHint to determine which AWS partition to perform the query on. +// +// The request will not be signed, and will not use your AWS credentials. +// +// A "NotFound" error code will be returned if the bucket does not exist in the +// AWS partition the regionHint belongs to. If the regionHint parameter is an +// empty string GetBucketRegion will fallback to the ConfigProvider's region +// config. If the regionHint is empty, and the ConfigProvider does not have a +// region value, an error will be returned.. +// +// For example to get the region of a bucket which exists in "eu-central-1" +// you could provide a region hint of "us-west-2". +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// By default the request will be made to the Amazon S3 endpoint using the Path +// style addressing. +// +// s3.us-west-2.amazonaws.com/bucketname +// +// This is not compatible with Amazon S3's FIPS endpoints. To override this +// behavior to use Virtual Host style addressing, provide a functional option +// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false). +// +// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) { +// r.S3ForcePathStyle = aws.Bool(false) +// }) +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the +// utility is called with. The hint region will be ignored if an endpoint URL +// is configured on the session or client. +// +// sess, err := session.NewSession(&aws.Config{ +// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"), +// }) +// +// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "") +func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) { + var cfg aws.Config + if len(regionHint) != 0 { + cfg.Region = aws.String(regionHint) + } + svc := s3.New(c, &cfg) + return GetBucketRegionWithClient(ctx, svc, bucket, opts...) +} + +const bucketRegionHeader = "X-Amz-Bucket-Region" + +// GetBucketRegionWithClient is the same as GetBucketRegion with the exception +// that it takes a S3 service client instead of a Session. The regionHint is +// derived from the region the S3 service client was created in. +// +// By default the request will be made to the Amazon S3 endpoint using the Path +// style addressing. +// +// s3.us-west-2.amazonaws.com/bucketname +// +// This is not compatible with Amazon S3's FIPS endpoints. To override this +// behavior to use Virtual Host style addressing, provide a functional option +// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false). +// +// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) { +// r.S3ForcePathStyle = aws.Bool(false) +// }) +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the +// utility is called with. The hint region will be ignored if an endpoint URL +// is configured on the session or client. +// +// region, err := s3manager.GetBucketRegionWithClient(context.Background(), +// s3.New(sess, &aws.Config{ +// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"), +// }), +// "bucketname") +// +// See GetBucketRegion for more information. +func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) { + req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{ + Bucket: aws.String(bucket), + }) + req.Config.S3ForcePathStyle = aws.Bool(true) + + req.Config.Credentials = credentials.AnonymousCredentials + req.SetContext(ctx) + + // Disable HTTP redirects to prevent an invalid 301 from eating the response + // because Go's HTTP client will fail, and drop the response if an 301 is + // received without a location header. S3 will return a 301 without the + // location header for HeadObject API calls. + req.DisableFollowRedirects = true + + var bucketRegion string + req.Handlers.Send.PushBack(func(r *request.Request) { + bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader) + if len(bucketRegion) == 0 { + return + } + r.HTTPResponse.StatusCode = 200 + r.HTTPResponse.Status = "OK" + r.Error = nil + }) + // Replace the endpoint validation handler to not require a region if an + // endpoint URL was specified. Since these requests are not authenticated, + // requiring a region is not needed when an endpoint URL is provided. + req.Handlers.Validate.Swap( + corehandlers.ValidateEndpointHandler.Name, + request.NamedHandler{ + Name: "validateEndpointWithoutRegion", + Fn: validateEndpointWithoutRegion, + }, + ) + + req.ApplyOptions(opts...) + + if err := req.Send(); err != nil { + return "", err + } + + bucketRegion = s3.NormalizeBucketLocation(bucketRegion) + + return bucketRegion, nil +} + +func validateEndpointWithoutRegion(r *request.Request) { + // Check if the caller provided an explicit URL instead of one derived by + // the SDK's endpoint resolver. For GetBucketRegion, with an explicit + // endpoint URL, a region is not needed. If no endpoint URL is provided, + // fallback the SDK's standard endpoint validation handler. + if len(aws.StringValue(r.Config.Endpoint)) == 0 { + corehandlers.ValidateEndpointHandler.Fn(r) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go new file mode 100644 index 00000000000..f1d9e85c7b3 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go @@ -0,0 +1,81 @@ +package s3manager + +import ( + "io" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// BufferedReadSeeker is buffered io.ReadSeeker +type BufferedReadSeeker struct { + r io.ReadSeeker + buffer []byte + readIdx, writeIdx int +} + +// NewBufferedReadSeeker returns a new BufferedReadSeeker +// if len(b) == 0 then the buffer will be initialized to 64 KiB. +func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { + if len(b) == 0 { + b = make([]byte, 64*1024) + } + return &BufferedReadSeeker{r: r, buffer: b} +} + +func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { + b.r = r + b.readIdx, b.writeIdx = 0, 0 +} + +// Read will read up len(p) bytes into p and will return +// the number of bytes read and any error that occurred. +// If the len(p) > the buffer size then a single read request +// will be issued to the underlying io.ReadSeeker for len(p) bytes. +// A Read request will at most perform a single Read to the underlying +// io.ReadSeeker, and may return < len(p) if serviced from the buffer. +func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return n, err + } + + if b.readIdx == b.writeIdx { + if len(p) >= len(b.buffer) { + n, err = b.r.Read(p) + return n, err + } + b.readIdx, b.writeIdx = 0, 0 + + n, err = b.r.Read(b.buffer) + if n == 0 { + return n, err + } + + b.writeIdx += n + } + + n = copy(p, b.buffer[b.readIdx:b.writeIdx]) + b.readIdx += n + + return n, err +} + +// Seek will position then underlying io.ReadSeeker to the given offset +// and will clear the buffer. +func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { + n, err := b.r.Seek(offset, whence) + + b.reset(b.r) + + return n, err +} + +// ReadAt will read up to len(p) bytes at the given file offset. +// This will result in the buffer being cleared. +func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { + _, err := b.Seek(off, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return b.Read(p) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go new file mode 100644 index 00000000000..42276530a8b --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go @@ -0,0 +1,7 @@ +// +build !windows + +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go new file mode 100644 index 00000000000..687082c3066 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return NewBufferedReadSeekerWriteToPool(1024 * 1024) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go new file mode 100644 index 00000000000..ada50c24355 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go @@ -0,0 +1,7 @@ +// +build !windows + +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return nil +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go new file mode 100644 index 00000000000..7e9d9579f64 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return NewPooledBufferedWriterReadFromProvider(1024 * 1024) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go new file mode 100644 index 00000000000..229c0d63bda --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go @@ -0,0 +1,3 @@ +// Package s3manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package s3manager diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go new file mode 100644 index 00000000000..4b54b7c033d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -0,0 +1,597 @@ +package s3manager + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +type errReadingBody struct { + err error +} + +func (e *errReadingBody) Error() string { + return fmt.Sprintf("failed to read part body: %v", e.err) +} + +func (e *errReadingBody) Unwrap() error { + return e.err +} + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The size (in bytes) to request from S3 for each part. + // The minimum allowed part size is 5MB, and if this value is set to zero, + // the DefaultDownloadPartSize value will be used. + // + // PartSize is ignored if the Range input parameter is provided. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultDownloadConcurrency value will be used. + // + // Concurrency of 1 will download the parts sequentially. + // + // Concurrency is ignored if the Range input parameter is provided. + Concurrency int + + // An S3 client to use when performing downloads. + S3 s3iface.S3API + + // List of request options that will be passed down to individual API + // operation requests made by the downloader. + RequestOptions []request.Option + + // Defines the buffer strategy used when downloading a part. + // + // If a WriterReadFromProvider is given the Download manager + // will pass the io.WriterAt of the Download request to the provider + // and will use the returned WriterReadFrom from the provider as the + // destination writer when copying from http response body. + BufferProvider WriterReadFromProvider +} + +// WithDownloaderRequestOptions appends to the Downloader's API request options. +func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) { + return func(d *Downloader) { + d.RequestOptions = append(d.RequestOptions, opts...) + } +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a downloader with the session and custom options +// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + return newDownloader(s3.New(c), options...) +} + +func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: client, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + BufferProvider: defaultDownloadBufferProvider(), + } + for _, option := range options { + option(d) + } + + return d +} + +// NewDownloaderWithClient creates a new Downloader instance to downloads +// objects from S3 in concurrent chunks. Pass in additional functional +// options to customize the downloader behavior. Requires a S3 service client +// to make S3 API calls. +// +// Example: +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // The S3 client the S3 Downloader will use +// s3Svc := s3.New(sess) +// +// // Create a downloader with the s3 client and default options +// downloader := s3manager.NewDownloaderWithClient(s3Svc) +// +// // Create a downloader with the s3 client and custom options +// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { + return newDownloader(svc, options...) +} + +type maxRetrier interface { + MaxRetries() int +} + +// Download downloads an object in S3 and writes the payload into w using +// concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is called from. +// Modifying the options will not impact the original Downloader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +// +// Specifying a Downloader.Concurrency of 1 will cause the Downloader to +// download the parts from S3 sequentially. +// +// If the GetObjectInput's Range value is provided that will cause the downloader +// to perform a single GetObjectInput request for that object's range. This will +// caused the part size, and concurrency configurations to be ignored. +func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...) +} + +// DownloadWithContext downloads an object in S3 and writes the payload into w +// using concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. +// +// DownloadWithContext is the same as Download with the additional support for +// Context input parameters. The Context must not be nil. A nil Context will +// cause a panic. Use the Context to add deadlining, timeouts, etc. The +// DownloadWithContext may create sub-contexts for individual underlying +// requests. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is +// called from. Modifying the options will not impact the original Downloader +// instance. Use the WithDownloaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this downloader. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +// +// Specifying a Downloader.Concurrency of 1 will cause the Downloader to +// download the parts from S3 sequentially. +// +// It is safe to call this method concurrently across goroutines. +// +// If the GetObjectInput's Range value is provided that will cause the downloader +// to perform a single GetObjectInput request for that object's range. This will +// caused the part size, and concurrency configurations to be ignored. +func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + impl := downloader{w: w, in: input, cfg: d, ctx: ctx} + + for _, option := range options { + option(&impl.cfg) + } + impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) + + if s, ok := d.S3.(maxRetrier); ok { + impl.partBodyMaxRetries = s.MaxRetries() + } + + impl.totalBytes = -1 + if impl.cfg.Concurrency == 0 { + impl.cfg.Concurrency = DefaultDownloadConcurrency + } + + if impl.cfg.PartSize == 0 { + impl.cfg.PartSize = DefaultDownloadPartSize + } + + return impl.download() +} + +// DownloadWithIterator will download a batched amount of objects in S3 and writes them +// to the io.WriterAt specificed in the iterator. +// +// Example: +// svc := s3manager.NewDownloader(session) +// +// fooFile, err := os.Open("/tmp/foo.file") +// if err != nil { +// return err +// } +// +// barFile, err := os.Open("/tmp/bar.file") +// if err != nil { +// return err +// } +// +// objects := []s3manager.BatchDownloadObject { +// { +// Object: &s3.GetObjectInput { +// Bucket: aws.String("bucket"), +// Key: aws.String("foo"), +// }, +// Writer: fooFile, +// }, +// { +// Object: &s3.GetObjectInput { +// Bucket: aws.String("bucket"), +// Key: aws.String("bar"), +// }, +// Writer: barFile, +// }, +// } +// +// iter := &s3manager.DownloadObjectsIterator{Objects: objects} +// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil { +// return err +// } +func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error { + var errs []Error + for iter.Next() { + object := iter.DownloadObject() + if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + + if object.After == nil { + continue + } + + if err := object.After(); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs) + } + return nil +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx aws.Context + cfg Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error + + partBodyMaxRetries int +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + // If range is specified fall back to single download of that range + // this enables the functionality of ranged gets with the downloader but + // at the cost of no multipart downloads. + if rng := aws.StringValue(d.in.Range); len(rng) > 0 { + d.downloadRange(rng) + return d.written, d.err + } + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.cfg.Concurrency) + + for i := 0; i < d.cfg.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queuing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + e, ok := d.err.(awserr.RequestFailure) + if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + if d.getErr() != nil { + // Drain the channel if there is an error, to prevent deadlocking + // of download producer. + continue + } + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } +} + +// downloadRange downloads an Object given the passed in Byte-Range value. +// The chunk used down download the range will be configured for that range. +func (d *downloader) downloadRange(rng string) { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos} + // Ranges specified will short circuit the multipart download + chunk.withRange = rng + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + + // Update the position based on the amount of data received. + d.pos = d.written +} + +// downloadChunk downloads the chunk from s3 +func (d *downloader) downloadChunk(chunk dlchunk) error { + in := &s3.GetObjectInput{} + awsutil.Copy(in, d.in) + + // Get the next byte range of data + in.Range = aws.String(chunk.ByteRange()) + + var n int64 + var err error + for retry := 0; retry <= d.partBodyMaxRetries; retry++ { + n, err = d.tryDownloadChunk(in, &chunk) + if err == nil { + break + } + // Check if the returned error is an errReadingBody. + // If err is errReadingBody this indicates that an error + // occurred while copying the http response body. + // If this occurs we unwrap the err to set the underlying error + // and attempt any remaining retries. + if bodyErr, ok := err.(*errReadingBody); ok { + err = bodyErr.Unwrap() + } else { + return err + } + + chunk.cur = 0 + logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries, + fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d", + aws.StringValue(in.Key), err, retry)) + } + + d.incrWritten(n) + + return err +} + +func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) { + cleanup := func() {} + if d.cfg.BufferProvider != nil { + w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) + } + defer cleanup() + + resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...) + if err != nil { + return 0, err + } + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(w, resp.Body) + resp.Body.Close() + if err != nil { + return n, &errReadingBody{err: err} + } + + return n, nil +} + +func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) { + s, ok := svc.(*s3.S3) + if !ok { + return + } + + if s.Config.Logger == nil { + return + } + + if s.Config.LogLevel.Matches(level) { + s.Config.Logger.Log(msg) + } +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provided, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength != nil { + d.totalBytes = *resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 + + // specifies the byte range the chunk should be downloaded with. + withRange string +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +// +// If a range is specified on the dlchunk the size will be ignored when writing. +// as the total size may not of be known ahead of time. +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size && len(c.withRange) == 0 { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} + +// ByteRange returns a HTTP Byte-Range header value that should be used by the +// client to request the chunk's range. +func (c *dlchunk) ByteRange() string { + if len(c.withRange) != 0 { + return c.withRange + } + + return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go new file mode 100644 index 00000000000..f6f27fc48a1 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go @@ -0,0 +1,252 @@ +package s3manager + +import ( + "fmt" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +type byteSlicePool interface { + Get(aws.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + // allocator is defined as a function pointer to allow + // for test cases to instrument custom tracers when allocations + // occur. + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // pass + } + + select { + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go new file mode 100644 index 00000000000..f62e1a45eef --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go @@ -0,0 +1,65 @@ +package s3manager + +import ( + "io" + "sync" +) + +// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker +type ReadSeekerWriteTo interface { + io.ReadSeeker + io.WriterTo +} + +// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt +// implementation. +type BufferedReadSeekerWriteTo struct { + *BufferedReadSeeker +} + +// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or +// an error occurs. Returns the number of bytes written and any error encountered during the write. +func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { + return io.Copy(writer, b.BufferedReadSeeker) +} + +// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker +type ReadSeekerWriteToProvider interface { + GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) +} + +// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse +// []byte slices for buffering parts in memory +type BufferedReadSeekerWriteToPool struct { + pool sync.Pool +} + +// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create +// a pool of reusable buffers . If size is less then < 64 KiB then the buffer +// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom +// respectively will default to copying 32 KiB. +func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { + if size < 65536 { + size = 65536 + } + + return &BufferedReadSeekerWriteToPool{ + pool: sync.Pool{New: func() interface{} { + return make([]byte, size) + }}, + } +} + +// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. +// The provided cleanup must be called after operations have been completed on the +// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. +func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { + buffer := p.pool.Get().([]byte) + + r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} + cleanup = func() { + p.pool.Put(buffer) + } + + return r, cleanup +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go new file mode 100644 index 00000000000..8770d404119 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -0,0 +1,777 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := s3manager.NewUploader(opts) +// output, err := u.upload(input) +// if err != nil { +// if multierr, ok := err.(s3manager.MultiUploadFailure); ok { +// // Process error and its associated uploadID +// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) +// } else { +// // Process error generically +// fmt.Println("Error:", err.Error()) +// } +// } +// +type MultiUploadFailure interface { + awserr.Error + + // Returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// So that the Error interface type can be included as an anonymous field +// in the multiUploadError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + awsError + + // ID for multipart upload which failed. + uploadID string +} + +// Error returns the string representation of the error. +// +// See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m multiUploadError) Error() string { + extra := fmt.Sprintf("upload id: %s", m.uploadID) + return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (m multiUploadError) String() string { + return m.Error() +} + +// UploadID returns the id of the S3 upload which failed. +func (m multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + UploadID string +} + +// WithUploaderRequestOptions appends to the Uploader's API request options. +func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) { + return func(u *Uploader) { + u.RequestOptions = append(u.RequestOptions, opts...) + } +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultUploadPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel per call to Upload when + // sending parts. If this is set to zero, the DefaultUploadConcurrency value + // will be used. + // + // The concurrency pool is not shared between calls to Upload. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. + // + // Defaults to package const's MaxUploadParts value. + MaxUploadParts int + + // The client to use when uploading to S3. + S3 s3iface.S3API + + // List of request options that will be passed down to individual API + // operation requests made by the uploader. + RequestOptions []request.Option + + // Defines the buffer strategy used when uploading a part + BufferProvider ReadSeekerWriteToProvider + + // partPool allows for the re-usage of streaming payload part buffers between upload calls + partPool byteSlicePool +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// // Create an uploader with the session and custom options +// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + return newUploader(s3.New(c), options...) +} + +func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: client, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + BufferProvider: defaultUploadBufferProvider(), + } + + for _, option := range options { + option(u) + } + + u.partPool = newByteSlicePool(u.PartSize) + + return u +} + +// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in +// additional functional options to customize the uploader's behavior. Requires +// a S3 service client to make S3 API calls. +// +// Example: +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // S3 service client the Upload manager will use. +// s3Svc := s3.New(sess) +// +// // Create an uploader with S3 client and default options +// uploader := s3manager.NewUploaderWithClient(s3Svc) +// +// // Create an uploader with S3 client and custom options +// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { + return newUploader(svc, options...) +} + +// Upload uploads an object to S3, intelligently buffering large files into +// smaller chunks and sending them in parallel across multiple goroutines. You +// can configure the buffer size and concurrency through the Uploader's parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// Use the WithUploaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this uploader. +// +// It is safe to call this method concurrently across goroutines. +// +// Example: +// // Upload input parameters +// upParams := &s3manager.UploadInput{ +// Bucket: &bucketName, +// Key: &keyName, +// Body: file, +// } +// +// // Perform an upload. +// result, err := uploader.Upload(upParams) +// +// // Perform upload with options different than the those in the Uploader. +// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { +// u.PartSize = 10 * 1024 * 1024 // 10MB part size +// u.LeavePartsOnError = true // Don't delete the parts if the upload fails. +// }) +func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { + return u.UploadWithContext(aws.BackgroundContext(), input, options...) +} + +// UploadWithContext uploads an object to S3, intelligently buffering large +// files into smaller chunks and sending them in parallel across multiple +// goroutines. You can configure the buffer size and concurrency through the +// Uploader's parameters. +// +// UploadWithContext is the same as Upload with the additional support for +// Context input parameters. The Context must not be nil. A nil Context will +// cause a panic. Use the context to add deadlining, timeouts, etc. The +// UploadWithContext may create sub-contexts for individual underlying requests. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// Use the WithUploaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this uploader. +// +// It is safe to call this method concurrently across goroutines. +func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) { + i := uploader{in: input, cfg: u, ctx: ctx} + + for _, opt := range opts { + opt(&i.cfg) + } + + i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) + + return i.upload() +} + +// UploadWithIterator will upload a batched amount of objects to S3. This operation uses +// the iterator pattern to know which object to upload next. Since this is an interface this +// allows for custom defined functionality. +// +// Example: +// svc:= s3manager.NewUploader(sess) +// +// objects := []BatchUploadObject{ +// { +// Object: &s3manager.UploadInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// iter := &s3manager.UploadObjectsIterator{Objects: objects} +// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil { +// return err +// } +func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error { + var errs []Error + for iter.Next() { + object := iter.UploadObject() + if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil { + s3Err := Error{ + OrigErr: err, + Bucket: object.Object.Bucket, + Key: object.Object.Key, + } + + errs = append(errs, s3Err) + } + + if object.After == nil { + continue + } + + if err := object.After(); err != nil { + s3Err := Error{ + OrigErr: err, + Bucket: object.Object.Bucket, + Key: object.Object.Key, + } + + errs = append(errs, s3Err) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs) + } + return nil +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx aws.Context + cfg Uploader + + in *UploadInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + if err := u.init(); err != nil { + return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err) + } + defer u.cfg.partPool.Close() + + if u.cfg.PartSize < MinUploadPartSize { + msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) + return nil, awserr.New("ConfigError", msg, nil) + } + + // Do one read to determine if we have more than one part + reader, _, cleanup, err := u.nextReader() + if err == io.EOF { // single part + return u.singlePart(reader, cleanup) + } else if err != nil { + cleanup() + return nil, awserr.New("ReadRequestBody", "read upload data failed", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(reader, cleanup) +} + +// init will initialize all default options. +func (u *uploader) init() error { + if u.cfg.Concurrency == 0 { + u.cfg.Concurrency = DefaultUploadConcurrency + } + if u.cfg.PartSize == 0 { + u.cfg.PartSize = DefaultUploadPartSize + } + if u.cfg.MaxUploadParts == 0 { + u.cfg.MaxUploadParts = MaxUploadParts + } + + // Try to get the total size for some optimizations + if err := u.initSize(); err != nil { + return err + } + + // If PartSize was changed or partPool was never setup then we need to allocated a new pool + // so that we return []byte slices of the correct size + poolCap := u.cfg.Concurrency + 1 + if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { + u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) + u.cfg.partPool.ModifyCapacity(poolCap) + } else { + u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} + u.cfg.partPool.ModifyCapacity(poolCap) + } + + return nil +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() error { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + n, err := aws.SeekerLen(r) + if err != nil { + return err + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 + } + } + + return nil +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { + switch r := u.in.Body.(type) { + case readerAtSeeker: + var err error + + n := u.cfg.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft <= u.cfg.PartSize { + err = io.EOF + n = bytesLeft + } + } + + var ( + reader io.ReadSeeker + cleanup func() + ) + + reader = io.NewSectionReader(r, u.readerPos, n) + if u.cfg.BufferProvider != nil { + reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) + } else { + cleanup = func() {} + } + + u.readerPos += n + + return reader, int(n), cleanup, err + + default: + part, err := u.cfg.partPool.Get(u.ctx) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFillBuf(r, *part) + u.readerPos += int64(n) + + cleanup := func() { + u.cfg.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err + } +} + +func readFillBuf(r io.Reader, b []byte) (offset int, err error) { + for offset < len(b) && err == nil { + var n int + n, err = r.Read(b[offset:]) + offset += n + } + + return offset, err +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + defer cleanup() + + params := &s3.PutObjectInput{} + awsutil.Copy(params, u.in) + params.Body = r + + // Need to use request form because URL generated in request is + // used in return. + req, out := u.cfg.S3.PutObjectRequest(params) + req.SetContext(u.ctx) + req.ApplyOptions(u.cfg.RequestOptions...) + if err := req.Send(); err != nil { + return nil, err + } + + url := req.HTTPRequest.URL.String() + return &UploadOutput{ + Location: url, + VersionID: out.VersionId, + }, nil +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int64 + cleanup func() +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + params := &s3.CreateMultipartUploadInput{} + awsutil.Copy(params, u.in) + + // Create the multipart + resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + cleanup() + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.cfg.Concurrency) + for i := 0; i < u.cfg.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int64 = 1 + ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} + + // Read and queue the rest of the parts + for u.geterr() == nil && err == nil { + var ( + reader io.ReadSeeker + nextChunkLen int + ok bool + ) + + reader, nextChunkLen, cleanup, err = u.nextReader() + ok, err = u.shouldContinue(num, nextChunkLen, err) + if !ok { + cleanup() + if err != nil { + u.seterr(err) + } + break + } + + num++ + + ch <- chunk{buf: reader, num: num, cleanup: cleanup} + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + complete := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + awsError: awserr.New( + "MultipartUpload", + "upload multipart failed", + err), + uploadID: u.uploadID, + } + } + + // Create a presigned URL of the S3 Get Object in order to have parity with + // single part upload. + getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + }) + getReq.Config.Credentials = credentials.AnonymousCredentials + getReq.SetContext(u.ctx) + uploadLocation, _, _ := getReq.PresignRequest(1) + + return &UploadOutput{ + Location: uploadLocation, + VersionID: complete.VersionId, + UploadID: u.uploadID, + }, nil +} + +func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) { + if err != nil && err != io.EOF { + return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err) + } + + if nextChunkLen == 0 { + // No need to upload empty part, if file was empty to start + // with empty single part would of been created and never + // started multipart upload. + return false, nil + } + + part++ + // This upload exceeded maximum number of supported parts, error now. + if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) { + var msg string + if part > int64(u.cfg.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + return false, awserr.New("TotalPartsExceeded", msg, nil) + } + + return true, err +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + + data.cleanup() + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + params := &s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + UploadId: &u.uploadID, + SSECustomerAlgorithm: u.in.SSECustomerAlgorithm, + SSECustomerKey: u.in.SSECustomerKey, + PartNumber: &c.num, + } + + resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + return err + } + + n := c.num + completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.cfg.LeavePartsOnError { + return + } + + params := &s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + } + _, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err)) + } +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + params := &s3.CompleteMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, + } + resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + u.seterr(err) + u.fail() + } + + return resp +} + +type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go new file mode 100644 index 00000000000..6cac26fa83d --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -0,0 +1,207 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3manager + +import ( + "io" + "time" +) + +// UploadInput provides the input parameters for uploading a stream or buffer +// to an object in an Amazon S3 bucket. This type is similar to the s3 +// package's PutObjectInput with the exception that the Body member is an +// io.Reader instead of an io.ReadSeeker. +type UploadInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // + // This action is not supported by Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The readable body payload to send to S3. + Body io.Reader + + // The bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a PUT operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go new file mode 100644 index 00000000000..765dc07ca32 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go @@ -0,0 +1,75 @@ +package s3manager + +import ( + "bufio" + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom +type WriterReadFrom interface { + io.Writer + io.ReaderFrom +} + +// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer +type WriterReadFromProvider interface { + GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) +} + +type bufferedWriter interface { + WriterReadFrom + Flush() error + Reset(io.Writer) +} + +type bufferedReadFrom struct { + bufferedWriter +} + +func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := b.bufferedWriter.ReadFrom(r) + if flushErr := b.Flush(); flushErr != nil && err == nil { + err = flushErr + } + return n, err +} + +// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool +// to manage allocation and reuse of *bufio.Writer structures. +type PooledBufferedReadFromProvider struct { + pool sync.Pool +} + +// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider +// Size is used to control the size of the underlying *bufio.Writer created for +// calls to GetReadFrom. +func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { + if size < int(32*sdkio.KibiByte) { + size = int(64 * sdkio.KibiByte) + } + + return &PooledBufferedReadFromProvider{ + pool: sync.Pool{ + New: func() interface{} { + return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} + }, + }, + } +} + +// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom +// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom +// has been completed in order to allow the reuse of the *bufio.Writer +func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { + buffer := p.pool.Get().(*bufferedReadFrom) + buffer.Reset(writer) + r = buffer + cleanup = func() { + buffer.Reset(nil) // Reset to nil writer to release reference + p.pool.Put(buffer) + } + return r, cleanup +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 00000000000..b4c07b4d47e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,103 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 00000000000..57a0bd92ca3 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,84 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } +} + +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() + } + + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return + } + + // In backwards compatible, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 00000000000..247770e4c88 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,42 @@ +package s3 + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, sdkio.SeekStart) + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { + r.Error = nil + return + } + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 00000000000..6eecf669107 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,114 @@ +package s3 + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Attempt to parse error from body if it is known + var errResp xmlErrorResponse + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) + } else { + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + } + + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + errorMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Fallback to status code converted to message if still no error code + if len(errResp.Code) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errResp.Code, errResp.Message, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 00000000000..2596c694b50 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go new file mode 100644 index 00000000000..b89f513d5cc --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go @@ -0,0 +1,6577 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package secretsmanager + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opCancelRotateSecret = "CancelRotateSecret" + +// CancelRotateSecretRequest generates a "aws/request.Request" representing the +// client's request for the CancelRotateSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelRotateSecret for more information on using the CancelRotateSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelRotateSecretRequest method. +// req, resp := client.CancelRotateSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/CancelRotateSecret +func (c *SecretsManager) CancelRotateSecretRequest(input *CancelRotateSecretInput) (req *request.Request, output *CancelRotateSecretOutput) { + op := &request.Operation{ + Name: opCancelRotateSecret, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelRotateSecretInput{} + } + + output = &CancelRotateSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelRotateSecret API operation for AWS Secrets Manager. +// +// Disables automatic scheduled rotation and cancels the rotation of a secret +// if currently in progress. +// +// To re-enable scheduled rotation, call RotateSecret with AutomaticallyRotateAfterDays +// set to a value greater than 0. This immediately rotates your secret and then +// enables the automatic schedule. +// +// If you cancel a rotation while in progress, it can leave the VersionStage +// labels in an unexpected state. Depending on the step of the rotation in progress, +// you might need to remove the staging label AWSPENDING from the partially +// created version, specified by the VersionId response value. You should also +// evaluate the partially rotated new version to see if it should be deleted, +// which you can do by removing all staging labels from the new version VersionStage +// field. +// +// To successfully start a rotation, the staging label AWSPENDING must be in +// one of the following states: +// +// * Not attached to any version at all +// +// * Attached to the same version as the staging label AWSCURRENT +// +// If the staging label AWSPENDING attached to a different version than the +// version with AWSCURRENT then the attempt to rotate fails. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:CancelRotateSecret +// +// Related operations +// +// * To configure rotation for a secret or to manually trigger a rotation, +// use RotateSecret. +// +// * To get the rotation configuration details for a secret, use DescribeSecret. +// +// * To list all of the currently available secrets, use ListSecrets. +// +// * To list all of the versions currently associated with a secret, use +// ListSecretVersionIds. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation CancelRotateSecret for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/CancelRotateSecret +func (c *SecretsManager) CancelRotateSecret(input *CancelRotateSecretInput) (*CancelRotateSecretOutput, error) { + req, out := c.CancelRotateSecretRequest(input) + return out, req.Send() +} + +// CancelRotateSecretWithContext is the same as CancelRotateSecret with the addition of +// the ability to pass a context and additional request options. +// +// See CancelRotateSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) CancelRotateSecretWithContext(ctx aws.Context, input *CancelRotateSecretInput, opts ...request.Option) (*CancelRotateSecretOutput, error) { + req, out := c.CancelRotateSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSecret = "CreateSecret" + +// CreateSecretRequest generates a "aws/request.Request" representing the +// client's request for the CreateSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSecret for more information on using the CreateSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSecretRequest method. +// req, resp := client.CreateSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/CreateSecret +func (c *SecretsManager) CreateSecretRequest(input *CreateSecretInput) (req *request.Request, output *CreateSecretOutput) { + op := &request.Operation{ + Name: opCreateSecret, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSecretInput{} + } + + output = &CreateSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSecret API operation for AWS Secrets Manager. +// +// Creates a new secret. A secret in Secrets Manager consists of both the protected +// secret data and the important information needed to manage the secret. +// +// Secrets Manager stores the encrypted secret data in one of a collection of +// "versions" associated with the secret. Each version contains a copy of the +// encrypted secret data. Each version is associated with one or more "staging +// labels" that identify where the version is in the rotation cycle. The SecretVersionsToStages +// field of the secret contains the mapping of staging labels to the active +// versions of the secret. Versions without a staging label are considered deprecated +// and not included in the list. +// +// You provide the secret data to be encrypted by putting text in either the +// SecretString parameter or binary data in the SecretBinary parameter, but +// not both. If you include SecretString or SecretBinary then Secrets Manager +// also creates an initial secret version and automatically attaches the staging +// label AWSCURRENT to the new version. +// +// * If you call an operation to encrypt or decrypt the SecretString or SecretBinary +// for a secret in the same account as the calling user and that secret doesn't +// specify a AWS KMS encryption key, Secrets Manager uses the account's default +// AWS managed customer master key (CMK) with the alias aws/secretsmanager. +// If this key doesn't already exist in your account then Secrets Manager +// creates it for you automatically. All users and roles in the same AWS +// account automatically have access to use the default CMK. Note that if +// an Secrets Manager API call results in AWS creating the account's AWS-managed +// CMK, it can result in a one-time significant delay in returning the result. +// +// * If the secret resides in a different AWS account from the credentials +// calling an API that requires encryption or decryption of the secret value +// then you must create and use a custom AWS KMS CMK because you can't access +// the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret +// or when you update it by including it in the KMSKeyId. If you call an +// API that must encrypt or decrypt SecretString or SecretBinary using credentials +// from a different account then the AWS KMS key policy must grant cross-account +// access to that other account's user or role for both the kms:GenerateDataKey +// and kms:Decrypt operations. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:CreateSecret +// +// * kms:GenerateDataKey - needed only if you use a customer-managed AWS +// KMS key to encrypt the secret. You do not need this permission to use +// the account default AWS managed CMK for Secrets Manager. +// +// * kms:Decrypt - needed only if you use a customer-managed AWS KMS key +// to encrypt the secret. You do not need this permission to use the account +// default AWS managed CMK for Secrets Manager. +// +// * secretsmanager:TagResource - needed only if you include the Tags parameter. +// +// Related operations +// +// * To delete a secret, use DeleteSecret. +// +// * To modify an existing secret, use UpdateSecret. +// +// * To create a new version of a secret, use PutSecretValue. +// +// * To retrieve the encrypted secure string and secure binary values, use +// GetSecretValue. +// +// * To retrieve all other details for a secret, use DescribeSecret. This +// does not include the encrypted secure string and secure binary values. +// +// * To retrieve the list of secret versions associated with the current +// secret, use DescribeSecret and examine the SecretVersionsToStages response +// value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation CreateSecret for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * LimitExceededException +// The request failed because it would exceed one of the Secrets Manager internal +// limits. +// +// * EncryptionFailure +// Secrets Manager can't encrypt the protected secret text using the provided +// KMS key. Check that the customer master key (CMK) is available, enabled, +// and not in an invalid state. For more information, see How Key State Affects +// Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). +// +// * ResourceExistsException +// A resource with the ID you requested already exists. +// +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * MalformedPolicyDocumentException +// The policy document that you provided isn't valid. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * PreconditionNotMetException +// The request failed because you did not complete all the prerequisite steps. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/CreateSecret +func (c *SecretsManager) CreateSecret(input *CreateSecretInput) (*CreateSecretOutput, error) { + req, out := c.CreateSecretRequest(input) + return out, req.Send() +} + +// CreateSecretWithContext is the same as CreateSecret with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) CreateSecretWithContext(ctx aws.Context, input *CreateSecretInput, opts ...request.Option) (*CreateSecretOutput, error) { + req, out := c.CreateSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteResourcePolicy = "DeleteResourcePolicy" + +// DeleteResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteResourcePolicy for more information on using the DeleteResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteResourcePolicyRequest method. +// req, resp := client.DeleteResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/DeleteResourcePolicy +func (c *SecretsManager) DeleteResourcePolicyRequest(input *DeleteResourcePolicyInput) (req *request.Request, output *DeleteResourcePolicyOutput) { + op := &request.Operation{ + Name: opDeleteResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteResourcePolicyInput{} + } + + output = &DeleteResourcePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteResourcePolicy API operation for AWS Secrets Manager. +// +// Deletes the resource-based permission policy attached to the secret. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:DeleteResourcePolicy +// +// Related operations +// +// * To attach a resource policy to a secret, use PutResourcePolicy. +// +// * To retrieve the current resource-based policy that's attached to a secret, +// use GetResourcePolicy. +// +// * To list all of the currently available secrets, use ListSecrets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation DeleteResourcePolicy for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/DeleteResourcePolicy +func (c *SecretsManager) DeleteResourcePolicy(input *DeleteResourcePolicyInput) (*DeleteResourcePolicyOutput, error) { + req, out := c.DeleteResourcePolicyRequest(input) + return out, req.Send() +} + +// DeleteResourcePolicyWithContext is the same as DeleteResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) DeleteResourcePolicyWithContext(ctx aws.Context, input *DeleteResourcePolicyInput, opts ...request.Option) (*DeleteResourcePolicyOutput, error) { + req, out := c.DeleteResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSecret = "DeleteSecret" + +// DeleteSecretRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSecret for more information on using the DeleteSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSecretRequest method. +// req, resp := client.DeleteSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/DeleteSecret +func (c *SecretsManager) DeleteSecretRequest(input *DeleteSecretInput) (req *request.Request, output *DeleteSecretOutput) { + op := &request.Operation{ + Name: opDeleteSecret, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSecretInput{} + } + + output = &DeleteSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteSecret API operation for AWS Secrets Manager. +// +// Deletes an entire secret and all of its versions. You can optionally include +// a recovery window during which you can restore the secret. If you don't specify +// a recovery window value, the operation defaults to 30 days. Secrets Manager +// attaches a DeletionDate stamp to the secret that specifies the end of the +// recovery window. At the end of the recovery window, Secrets Manager deletes +// the secret permanently. +// +// At any time before recovery window ends, you can use RestoreSecret to remove +// the DeletionDate and cancel the deletion of the secret. +// +// You cannot access the encrypted secret information in any secret that is +// scheduled for deletion. If you need to access that information, you must +// cancel the deletion with RestoreSecret and then retrieve the information. +// +// * There is no explicit operation to delete a version of a secret. Instead, +// remove all staging labels from the VersionStage field of a version. That +// marks the version as deprecated and allows Secrets Manager to delete it +// as needed. Versions that do not have any staging labels do not show up +// in ListSecretVersionIds unless you specify IncludeDeprecated. +// +// * The permanent secret deletion at the end of the waiting period is performed +// as a background task with low priority. There is no guarantee of a specific +// time after the recovery window for the actual delete operation to occur. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:DeleteSecret +// +// Related operations +// +// * To create a secret, use CreateSecret. +// +// * To cancel deletion of a version of a secret before the recovery window +// has expired, use RestoreSecret. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation DeleteSecret for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/DeleteSecret +func (c *SecretsManager) DeleteSecret(input *DeleteSecretInput) (*DeleteSecretOutput, error) { + req, out := c.DeleteSecretRequest(input) + return out, req.Send() +} + +// DeleteSecretWithContext is the same as DeleteSecret with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) DeleteSecretWithContext(ctx aws.Context, input *DeleteSecretInput, opts ...request.Option) (*DeleteSecretOutput, error) { + req, out := c.DeleteSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSecret = "DescribeSecret" + +// DescribeSecretRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSecret for more information on using the DescribeSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSecretRequest method. +// req, resp := client.DescribeSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/DescribeSecret +func (c *SecretsManager) DescribeSecretRequest(input *DescribeSecretInput) (req *request.Request, output *DescribeSecretOutput) { + op := &request.Operation{ + Name: opDescribeSecret, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecretInput{} + } + + output = &DescribeSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSecret API operation for AWS Secrets Manager. +// +// Retrieves the details of a secret. It does not include the encrypted fields. +// Secrets Manager only returns fields populated with a value in the response. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:DescribeSecret +// +// Related operations +// +// * To create a secret, use CreateSecret. +// +// * To modify a secret, use UpdateSecret. +// +// * To retrieve the encrypted secret information in a version of the secret, +// use GetSecretValue. +// +// * To list all of the secrets in the AWS account, use ListSecrets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation DescribeSecret for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/DescribeSecret +func (c *SecretsManager) DescribeSecret(input *DescribeSecretInput) (*DescribeSecretOutput, error) { + req, out := c.DescribeSecretRequest(input) + return out, req.Send() +} + +// DescribeSecretWithContext is the same as DescribeSecret with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) DescribeSecretWithContext(ctx aws.Context, input *DescribeSecretInput, opts ...request.Option) (*DescribeSecretOutput, error) { + req, out := c.DescribeSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRandomPassword = "GetRandomPassword" + +// GetRandomPasswordRequest generates a "aws/request.Request" representing the +// client's request for the GetRandomPassword operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRandomPassword for more information on using the GetRandomPassword +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRandomPasswordRequest method. +// req, resp := client.GetRandomPasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/GetRandomPassword +func (c *SecretsManager) GetRandomPasswordRequest(input *GetRandomPasswordInput) (req *request.Request, output *GetRandomPasswordOutput) { + op := &request.Operation{ + Name: opGetRandomPassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRandomPasswordInput{} + } + + output = &GetRandomPasswordOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRandomPassword API operation for AWS Secrets Manager. +// +// Generates a random password of the specified complexity. This operation is +// intended for use in the Lambda rotation function. Per best practice, we recommend +// that you specify the maximum length and include every character type that +// the system you are generating a password for can support. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:GetRandomPassword +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation GetRandomPassword for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/GetRandomPassword +func (c *SecretsManager) GetRandomPassword(input *GetRandomPasswordInput) (*GetRandomPasswordOutput, error) { + req, out := c.GetRandomPasswordRequest(input) + return out, req.Send() +} + +// GetRandomPasswordWithContext is the same as GetRandomPassword with the addition of +// the ability to pass a context and additional request options. +// +// See GetRandomPassword for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) GetRandomPasswordWithContext(ctx aws.Context, input *GetRandomPasswordInput, opts ...request.Option) (*GetRandomPasswordOutput, error) { + req, out := c.GetRandomPasswordRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetResourcePolicy = "GetResourcePolicy" + +// GetResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetResourcePolicy for more information on using the GetResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetResourcePolicyRequest method. +// req, resp := client.GetResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/GetResourcePolicy +func (c *SecretsManager) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req *request.Request, output *GetResourcePolicyOutput) { + op := &request.Operation{ + Name: opGetResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetResourcePolicyInput{} + } + + output = &GetResourcePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetResourcePolicy API operation for AWS Secrets Manager. +// +// Retrieves the JSON text of the resource-based policy document attached to +// the specified secret. The JSON request string input and response output displays +// formatted code with white space and line breaks for better readability. Submit +// your input as a single line JSON string. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:GetResourcePolicy +// +// Related operations +// +// * To attach a resource policy to a secret, use PutResourcePolicy. +// +// * To delete the resource-based policy attached to a secret, use DeleteResourcePolicy. +// +// * To list all of the currently available secrets, use ListSecrets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation GetResourcePolicy for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/GetResourcePolicy +func (c *SecretsManager) GetResourcePolicy(input *GetResourcePolicyInput) (*GetResourcePolicyOutput, error) { + req, out := c.GetResourcePolicyRequest(input) + return out, req.Send() +} + +// GetResourcePolicyWithContext is the same as GetResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) GetResourcePolicyWithContext(ctx aws.Context, input *GetResourcePolicyInput, opts ...request.Option) (*GetResourcePolicyOutput, error) { + req, out := c.GetResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSecretValue = "GetSecretValue" + +// GetSecretValueRequest generates a "aws/request.Request" representing the +// client's request for the GetSecretValue operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSecretValue for more information on using the GetSecretValue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSecretValueRequest method. +// req, resp := client.GetSecretValueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/GetSecretValue +func (c *SecretsManager) GetSecretValueRequest(input *GetSecretValueInput) (req *request.Request, output *GetSecretValueOutput) { + op := &request.Operation{ + Name: opGetSecretValue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSecretValueInput{} + } + + output = &GetSecretValueOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSecretValue API operation for AWS Secrets Manager. +// +// Retrieves the contents of the encrypted fields SecretString or SecretBinary +// from the specified version of a secret, whichever contains content. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:GetSecretValue +// +// * kms:Decrypt - required only if you use a customer-managed AWS KMS key +// to encrypt the secret. You do not need this permission to use the account's +// default AWS managed CMK for Secrets Manager. +// +// Related operations +// +// * To create a new version of the secret with different encrypted information, +// use PutSecretValue. +// +// * To retrieve the non-encrypted details for the secret, use DescribeSecret. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation GetSecretValue for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * DecryptionFailure +// Secrets Manager can't decrypt the protected secret text using the provided +// KMS key. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/GetSecretValue +func (c *SecretsManager) GetSecretValue(input *GetSecretValueInput) (*GetSecretValueOutput, error) { + req, out := c.GetSecretValueRequest(input) + return out, req.Send() +} + +// GetSecretValueWithContext is the same as GetSecretValue with the addition of +// the ability to pass a context and additional request options. +// +// See GetSecretValue for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) GetSecretValueWithContext(ctx aws.Context, input *GetSecretValueInput, opts ...request.Option) (*GetSecretValueOutput, error) { + req, out := c.GetSecretValueRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListSecretVersionIds = "ListSecretVersionIds" + +// ListSecretVersionIdsRequest generates a "aws/request.Request" representing the +// client's request for the ListSecretVersionIds operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSecretVersionIds for more information on using the ListSecretVersionIds +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSecretVersionIdsRequest method. +// req, resp := client.ListSecretVersionIdsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/ListSecretVersionIds +func (c *SecretsManager) ListSecretVersionIdsRequest(input *ListSecretVersionIdsInput) (req *request.Request, output *ListSecretVersionIdsOutput) { + op := &request.Operation{ + Name: opListSecretVersionIds, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSecretVersionIdsInput{} + } + + output = &ListSecretVersionIdsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSecretVersionIds API operation for AWS Secrets Manager. +// +// Lists all of the versions attached to the specified secret. The output does +// not include the SecretString or SecretBinary fields. By default, the list +// includes only versions that have at least one staging label in VersionStage +// attached. +// +// Always check the NextToken response parameter when calling any of the List* +// operations. These operations can occasionally return an empty or shorter +// than expected list of results even when there more results become available. +// When this happens, the NextToken response parameter contains a value to pass +// to the next call to the same API to request the next part of the list. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:ListSecretVersionIds +// +// Related operations +// +// * To list the secrets in an account, use ListSecrets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation ListSecretVersionIds for usage and error information. +// +// Returned Error Types: +// * InvalidNextTokenException +// You provided an invalid NextToken value. +// +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/ListSecretVersionIds +func (c *SecretsManager) ListSecretVersionIds(input *ListSecretVersionIdsInput) (*ListSecretVersionIdsOutput, error) { + req, out := c.ListSecretVersionIdsRequest(input) + return out, req.Send() +} + +// ListSecretVersionIdsWithContext is the same as ListSecretVersionIds with the addition of +// the ability to pass a context and additional request options. +// +// See ListSecretVersionIds for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) ListSecretVersionIdsWithContext(ctx aws.Context, input *ListSecretVersionIdsInput, opts ...request.Option) (*ListSecretVersionIdsOutput, error) { + req, out := c.ListSecretVersionIdsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSecretVersionIdsPages iterates over the pages of a ListSecretVersionIds operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSecretVersionIds method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSecretVersionIds operation. +// pageNum := 0 +// err := client.ListSecretVersionIdsPages(params, +// func(page *secretsmanager.ListSecretVersionIdsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SecretsManager) ListSecretVersionIdsPages(input *ListSecretVersionIdsInput, fn func(*ListSecretVersionIdsOutput, bool) bool) error { + return c.ListSecretVersionIdsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSecretVersionIdsPagesWithContext same as ListSecretVersionIdsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) ListSecretVersionIdsPagesWithContext(ctx aws.Context, input *ListSecretVersionIdsInput, fn func(*ListSecretVersionIdsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSecretVersionIdsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSecretVersionIdsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSecretVersionIdsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListSecrets = "ListSecrets" + +// ListSecretsRequest generates a "aws/request.Request" representing the +// client's request for the ListSecrets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSecrets for more information on using the ListSecrets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSecretsRequest method. +// req, resp := client.ListSecretsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/ListSecrets +func (c *SecretsManager) ListSecretsRequest(input *ListSecretsInput) (req *request.Request, output *ListSecretsOutput) { + op := &request.Operation{ + Name: opListSecrets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSecretsInput{} + } + + output = &ListSecretsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSecrets API operation for AWS Secrets Manager. +// +// Lists all of the secrets that are stored by Secrets Manager in the AWS account. +// To list the versions currently stored for a specific secret, use ListSecretVersionIds. +// The encrypted fields SecretString and SecretBinary are not included in the +// output. To get that information, call the GetSecretValue operation. +// +// Always check the NextToken response parameter when calling any of the List* +// operations. These operations can occasionally return an empty or shorter +// than expected list of results even when there more results become available. +// When this happens, the NextToken response parameter contains a value to pass +// to the next call to the same API to request the next part of the list. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:ListSecrets +// +// Related operations +// +// * To list the versions attached to a secret, use ListSecretVersionIds. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation ListSecrets for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidNextTokenException +// You provided an invalid NextToken value. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/ListSecrets +func (c *SecretsManager) ListSecrets(input *ListSecretsInput) (*ListSecretsOutput, error) { + req, out := c.ListSecretsRequest(input) + return out, req.Send() +} + +// ListSecretsWithContext is the same as ListSecrets with the addition of +// the ability to pass a context and additional request options. +// +// See ListSecrets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) ListSecretsWithContext(ctx aws.Context, input *ListSecretsInput, opts ...request.Option) (*ListSecretsOutput, error) { + req, out := c.ListSecretsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSecretsPages iterates over the pages of a ListSecrets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSecrets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSecrets operation. +// pageNum := 0 +// err := client.ListSecretsPages(params, +// func(page *secretsmanager.ListSecretsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SecretsManager) ListSecretsPages(input *ListSecretsInput, fn func(*ListSecretsOutput, bool) bool) error { + return c.ListSecretsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSecretsPagesWithContext same as ListSecretsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) ListSecretsPagesWithContext(ctx aws.Context, input *ListSecretsInput, fn func(*ListSecretsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSecretsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSecretsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSecretsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutResourcePolicy = "PutResourcePolicy" + +// PutResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutResourcePolicy for more information on using the PutResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutResourcePolicyRequest method. +// req, resp := client.PutResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/PutResourcePolicy +func (c *SecretsManager) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) { + op := &request.Operation{ + Name: opPutResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutResourcePolicyInput{} + } + + output = &PutResourcePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutResourcePolicy API operation for AWS Secrets Manager. +// +// Attaches the contents of the specified resource-based permission policy to +// a secret. A resource-based policy is optional. Alternatively, you can use +// IAM identity-based policies that specify the secret's Amazon Resource Name +// (ARN) in the policy statement's Resources element. You can also use a combination +// of both identity-based and resource-based policies. The affected users and +// roles receive the permissions that are permitted by all of the relevant policies. +// For more information, see Using Resource-Based Policies for AWS Secrets Manager +// (http://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html). +// For the complete description of the AWS policy syntax and grammar, see IAM +// JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) +// in the IAM User Guide. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:PutResourcePolicy +// +// Related operations +// +// * To retrieve the resource policy attached to a secret, use GetResourcePolicy. +// +// * To delete the resource-based policy that's attached to a secret, use +// DeleteResourcePolicy. +// +// * To list all of the currently available secrets, use ListSecrets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation PutResourcePolicy for usage and error information. +// +// Returned Error Types: +// * MalformedPolicyDocumentException +// The policy document that you provided isn't valid. +// +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * PublicPolicyException +// The resource policy did not prevent broad access to the secret. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/PutResourcePolicy +func (c *SecretsManager) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) + return out, req.Send() +} + +// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) { + req, out := c.PutResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutSecretValue = "PutSecretValue" + +// PutSecretValueRequest generates a "aws/request.Request" representing the +// client's request for the PutSecretValue operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutSecretValue for more information on using the PutSecretValue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutSecretValueRequest method. +// req, resp := client.PutSecretValueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/PutSecretValue +func (c *SecretsManager) PutSecretValueRequest(input *PutSecretValueInput) (req *request.Request, output *PutSecretValueOutput) { + op := &request.Operation{ + Name: opPutSecretValue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutSecretValueInput{} + } + + output = &PutSecretValueOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutSecretValue API operation for AWS Secrets Manager. +// +// Stores a new encrypted secret value in the specified secret. To do this, +// the operation creates a new version and attaches it to the secret. The version +// can contain a new SecretString value or a new SecretBinary value. You can +// also specify the staging labels that are initially attached to the new version. +// +// The Secrets Manager console uses only the SecretString field. To add binary +// data to a secret with the SecretBinary field you must use the AWS CLI or +// one of the AWS SDKs. +// +// * If this operation creates the first version for the secret then Secrets +// Manager automatically attaches the staging label AWSCURRENT to the new +// version. +// +// * If another version of this secret already exists, then this operation +// does not automatically move any staging labels other than those that you +// explicitly specify in the VersionStages parameter. +// +// * If this operation moves the staging label AWSCURRENT from another version +// to this version (because you included it in the StagingLabels parameter) +// then Secrets Manager also automatically moves the staging label AWSPREVIOUS +// to the version that AWSCURRENT was removed from. +// +// * This operation is idempotent. If a version with a VersionId with the +// same value as the ClientRequestToken parameter already exists and you +// specify the same secret data, the operation succeeds but does nothing. +// However, if the secret data is different, then the operation fails because +// you cannot modify an existing version; you can only create new ones. +// +// * If you call an operation to encrypt or decrypt the SecretString or SecretBinary +// for a secret in the same account as the calling user and that secret doesn't +// specify a AWS KMS encryption key, Secrets Manager uses the account's default +// AWS managed customer master key (CMK) with the alias aws/secretsmanager. +// If this key doesn't already exist in your account then Secrets Manager +// creates it for you automatically. All users and roles in the same AWS +// account automatically have access to use the default CMK. Note that if +// an Secrets Manager API call results in AWS creating the account's AWS-managed +// CMK, it can result in a one-time significant delay in returning the result. +// +// * If the secret resides in a different AWS account from the credentials +// calling an API that requires encryption or decryption of the secret value +// then you must create and use a custom AWS KMS CMK because you can't access +// the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret +// or when you update it by including it in the KMSKeyId. If you call an +// API that must encrypt or decrypt SecretString or SecretBinary using credentials +// from a different account then the AWS KMS key policy must grant cross-account +// access to that other account's user or role for both the kms:GenerateDataKey +// and kms:Decrypt operations. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:PutSecretValue +// +// * kms:GenerateDataKey - needed only if you use a customer-managed AWS +// KMS key to encrypt the secret. You do not need this permission to use +// the account's default AWS managed CMK for Secrets Manager. +// +// Related operations +// +// * To retrieve the encrypted value you store in the version of a secret, +// use GetSecretValue. +// +// * To create a secret, use CreateSecret. +// +// * To get the details for a secret, use DescribeSecret. +// +// * To list the versions attached to a secret, use ListSecretVersionIds. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation PutSecretValue for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * LimitExceededException +// The request failed because it would exceed one of the Secrets Manager internal +// limits. +// +// * EncryptionFailure +// Secrets Manager can't encrypt the protected secret text using the provided +// KMS key. Check that the customer master key (CMK) is available, enabled, +// and not in an invalid state. For more information, see How Key State Affects +// Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). +// +// * ResourceExistsException +// A resource with the ID you requested already exists. +// +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/PutSecretValue +func (c *SecretsManager) PutSecretValue(input *PutSecretValueInput) (*PutSecretValueOutput, error) { + req, out := c.PutSecretValueRequest(input) + return out, req.Send() +} + +// PutSecretValueWithContext is the same as PutSecretValue with the addition of +// the ability to pass a context and additional request options. +// +// See PutSecretValue for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) PutSecretValueWithContext(ctx aws.Context, input *PutSecretValueInput, opts ...request.Option) (*PutSecretValueOutput, error) { + req, out := c.PutSecretValueRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreSecret = "RestoreSecret" + +// RestoreSecretRequest generates a "aws/request.Request" representing the +// client's request for the RestoreSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreSecret for more information on using the RestoreSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreSecretRequest method. +// req, resp := client.RestoreSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/RestoreSecret +func (c *SecretsManager) RestoreSecretRequest(input *RestoreSecretInput) (req *request.Request, output *RestoreSecretOutput) { + op := &request.Operation{ + Name: opRestoreSecret, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreSecretInput{} + } + + output = &RestoreSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreSecret API operation for AWS Secrets Manager. +// +// Cancels the scheduled deletion of a secret by removing the DeletedDate time +// stamp. This makes the secret accessible to query once again. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:RestoreSecret +// +// Related operations +// +// * To delete a secret, use DeleteSecret. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation RestoreSecret for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/RestoreSecret +func (c *SecretsManager) RestoreSecret(input *RestoreSecretInput) (*RestoreSecretOutput, error) { + req, out := c.RestoreSecretRequest(input) + return out, req.Send() +} + +// RestoreSecretWithContext is the same as RestoreSecret with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) RestoreSecretWithContext(ctx aws.Context, input *RestoreSecretInput, opts ...request.Option) (*RestoreSecretOutput, error) { + req, out := c.RestoreSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRotateSecret = "RotateSecret" + +// RotateSecretRequest generates a "aws/request.Request" representing the +// client's request for the RotateSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RotateSecret for more information on using the RotateSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RotateSecretRequest method. +// req, resp := client.RotateSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/RotateSecret +func (c *SecretsManager) RotateSecretRequest(input *RotateSecretInput) (req *request.Request, output *RotateSecretOutput) { + op := &request.Operation{ + Name: opRotateSecret, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RotateSecretInput{} + } + + output = &RotateSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// RotateSecret API operation for AWS Secrets Manager. +// +// Configures and starts the asynchronous process of rotating this secret. If +// you include the configuration parameters, the operation sets those values +// for the secret and then immediately starts a rotation. If you do not include +// the configuration parameters, the operation starts a rotation with the values +// already stored in the secret. After the rotation completes, the protected +// service and its clients all use the new version of the secret. +// +// This required configuration information includes the ARN of an AWS Lambda +// function and the time between scheduled rotations. The Lambda rotation function +// creates a new version of the secret and creates or updates the credentials +// on the protected service to match. After testing the new credentials, the +// function marks the new secret with the staging label AWSCURRENT so that your +// clients all immediately begin to use the new version. For more information +// about rotating secrets and how to configure a Lambda function to rotate the +// secrets for your protected service, see Rotating Secrets in AWS Secrets Manager +// (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html) +// in the AWS Secrets Manager User Guide. +// +// Secrets Manager schedules the next rotation when the previous one completes. +// Secrets Manager schedules the date by adding the rotation interval (number +// of days) to the actual date of the last rotation. The service chooses the +// hour within that 24-hour date window randomly. The minute is also chosen +// somewhat randomly, but weighted towards the top of the hour and influenced +// by a variety of factors that help distribute load. +// +// The rotation function must end with the versions of the secret in one of +// two states: +// +// * The AWSPENDING and AWSCURRENT staging labels are attached to the same +// version of the secret, or +// +// * The AWSPENDING staging label is not attached to any version of the secret. +// +// If the AWSPENDING staging label is present but not attached to the same version +// as AWSCURRENT then any later invocation of RotateSecret assumes that a previous +// rotation request is still in progress and returns an error. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:RotateSecret +// +// * lambda:InvokeFunction (on the function specified in the secret's metadata) +// +// Related operations +// +// * To list the secrets in your account, use ListSecrets. +// +// * To get the details for a version of a secret, use DescribeSecret. +// +// * To create a new version of a secret, use CreateSecret. +// +// * To attach staging labels to or remove staging labels from a version +// of a secret, use UpdateSecretVersionStage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation RotateSecret for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/RotateSecret +func (c *SecretsManager) RotateSecret(input *RotateSecretInput) (*RotateSecretOutput, error) { + req, out := c.RotateSecretRequest(input) + return out, req.Send() +} + +// RotateSecretWithContext is the same as RotateSecret with the addition of +// the ability to pass a context and additional request options. +// +// See RotateSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) RotateSecretWithContext(ctx aws.Context, input *RotateSecretInput, opts ...request.Option) (*RotateSecretOutput, error) { + req, out := c.RotateSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/TagResource +func (c *SecretsManager) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Secrets Manager. +// +// Attaches one or more tags, each consisting of a key name and a value, to +// the specified secret. Tags are part of the secret's overall metadata, and +// are not associated with any specific version of the secret. This operation +// only appends tags to the existing list of tags. To remove tags, you must +// use UntagResource. +// +// The following basic restrictions apply to tags: +// +// * Maximum number of tags per secret—50 +// +// * Maximum key length—127 Unicode characters in UTF-8 +// +// * Maximum value length—255 Unicode characters in UTF-8 +// +// * Tag keys and values are case sensitive. +// +// * Do not use the aws: prefix in your tag names or values because AWS reserves +// it for AWS use. You can't edit or delete tag names or values with this +// prefix. Tags with this prefix do not count against your tags per secret +// limit. +// +// * If you use your tagging schema across multiple services and resources, +// remember other services might have restrictions on allowed characters. +// Generally allowed characters: letters, spaces, and numbers representable +// in UTF-8, plus the following special characters: + - = . _ : / @. +// +// If you use tags as part of your security strategy, then adding or removing +// a tag can change permissions. If successfully completing this operation would +// result in you losing your permissions for this secret, then the operation +// is blocked and returns an Access Denied error. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:TagResource +// +// Related operations +// +// * To remove one or more tags from the collection attached to a secret, +// use UntagResource. +// +// * To view the list of tags attached to a secret, use DescribeSecret. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/TagResource +func (c *SecretsManager) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/UntagResource +func (c *SecretsManager) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Secrets Manager. +// +// Removes one or more tags from the specified secret. +// +// This operation is idempotent. If a requested tag is not attached to the secret, +// no error is returned and the secret metadata is unchanged. +// +// If you use tags as part of your security strategy, then removing a tag can +// change permissions. If successfully completing this operation would result +// in you losing your permissions for this secret, then the operation is blocked +// and returns an Access Denied error. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:UntagResource +// +// Related operations +// +// * To add one or more tags to the collection attached to a secret, use +// TagResource. +// +// * To view the list of tags attached to a secret, use DescribeSecret. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/UntagResource +func (c *SecretsManager) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSecret = "UpdateSecret" + +// UpdateSecretRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSecret for more information on using the UpdateSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSecretRequest method. +// req, resp := client.UpdateSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/UpdateSecret +func (c *SecretsManager) UpdateSecretRequest(input *UpdateSecretInput) (req *request.Request, output *UpdateSecretOutput) { + op := &request.Operation{ + Name: opUpdateSecret, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSecretInput{} + } + + output = &UpdateSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSecret API operation for AWS Secrets Manager. +// +// Modifies many of the details of the specified secret. If you include a ClientRequestToken +// and either SecretString or SecretBinary then it also creates a new version +// attached to the secret. +// +// To modify the rotation configuration of a secret, use RotateSecret instead. +// +// The Secrets Manager console uses only the SecretString parameter and therefore +// limits you to encrypting and storing only a text string. To encrypt and store +// binary data as part of the version of a secret, you must use either the AWS +// CLI or one of the AWS SDKs. +// +// * If a version with a VersionId with the same value as the ClientRequestToken +// parameter already exists, the operation results in an error. You cannot +// modify an existing version, you can only create a new version. +// +// * If you include SecretString or SecretBinary to create a new secret version, +// Secrets Manager automatically attaches the staging label AWSCURRENT to +// the new version. +// +// * If you call an operation to encrypt or decrypt the SecretString or SecretBinary +// for a secret in the same account as the calling user and that secret doesn't +// specify a AWS KMS encryption key, Secrets Manager uses the account's default +// AWS managed customer master key (CMK) with the alias aws/secretsmanager. +// If this key doesn't already exist in your account then Secrets Manager +// creates it for you automatically. All users and roles in the same AWS +// account automatically have access to use the default CMK. Note that if +// an Secrets Manager API call results in AWS creating the account's AWS-managed +// CMK, it can result in a one-time significant delay in returning the result. +// +// * If the secret resides in a different AWS account from the credentials +// calling an API that requires encryption or decryption of the secret value +// then you must create and use a custom AWS KMS CMK because you can't access +// the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret +// or when you update it by including it in the KMSKeyId. If you call an +// API that must encrypt or decrypt SecretString or SecretBinary using credentials +// from a different account then the AWS KMS key policy must grant cross-account +// access to that other account's user or role for both the kms:GenerateDataKey +// and kms:Decrypt operations. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:UpdateSecret +// +// * kms:GenerateDataKey - needed only if you use a custom AWS KMS key to +// encrypt the secret. You do not need this permission to use the account's +// AWS managed CMK for Secrets Manager. +// +// * kms:Decrypt - needed only if you use a custom AWS KMS key to encrypt +// the secret. You do not need this permission to use the account's AWS managed +// CMK for Secrets Manager. +// +// Related operations +// +// * To create a new secret, use CreateSecret. +// +// * To add only a new version to an existing secret, use PutSecretValue. +// +// * To get the details for a secret, use DescribeSecret. +// +// * To list the versions contained in a secret, use ListSecretVersionIds. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation UpdateSecret for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * LimitExceededException +// The request failed because it would exceed one of the Secrets Manager internal +// limits. +// +// * EncryptionFailure +// Secrets Manager can't encrypt the protected secret text using the provided +// KMS key. Check that the customer master key (CMK) is available, enabled, +// and not in an invalid state. For more information, see How Key State Affects +// Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). +// +// * ResourceExistsException +// A resource with the ID you requested already exists. +// +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * MalformedPolicyDocumentException +// The policy document that you provided isn't valid. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * PreconditionNotMetException +// The request failed because you did not complete all the prerequisite steps. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/UpdateSecret +func (c *SecretsManager) UpdateSecret(input *UpdateSecretInput) (*UpdateSecretOutput, error) { + req, out := c.UpdateSecretRequest(input) + return out, req.Send() +} + +// UpdateSecretWithContext is the same as UpdateSecret with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) UpdateSecretWithContext(ctx aws.Context, input *UpdateSecretInput, opts ...request.Option) (*UpdateSecretOutput, error) { + req, out := c.UpdateSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSecretVersionStage = "UpdateSecretVersionStage" + +// UpdateSecretVersionStageRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSecretVersionStage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSecretVersionStage for more information on using the UpdateSecretVersionStage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSecretVersionStageRequest method. +// req, resp := client.UpdateSecretVersionStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/UpdateSecretVersionStage +func (c *SecretsManager) UpdateSecretVersionStageRequest(input *UpdateSecretVersionStageInput) (req *request.Request, output *UpdateSecretVersionStageOutput) { + op := &request.Operation{ + Name: opUpdateSecretVersionStage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSecretVersionStageInput{} + } + + output = &UpdateSecretVersionStageOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSecretVersionStage API operation for AWS Secrets Manager. +// +// Modifies the staging labels attached to a version of a secret. Staging labels +// are used to track a version as it progresses through the secret rotation +// process. You can attach a staging label to only one version of a secret at +// a time. If a staging label to be added is already attached to another version, +// then it is moved--removed from the other version first and then attached +// to this one. For more information about staging labels, see Staging Labels +// (https://docs.aws.amazon.com/secretsmanager/latest/userguide/terms-concepts.html#term_staging-label) +// in the AWS Secrets Manager User Guide. +// +// The staging labels that you specify in the VersionStage parameter are added +// to the existing list of staging labels--they don't replace it. +// +// You can move the AWSCURRENT staging label to this version by including it +// in this call. +// +// Whenever you move AWSCURRENT, Secrets Manager automatically moves the label +// AWSPREVIOUS to the version that AWSCURRENT was removed from. +// +// If this action results in the last label being removed from a version, then +// the version is considered to be 'deprecated' and can be deleted by Secrets +// Manager. +// +// Minimum permissions +// +// To run this command, you must have the following permissions: +// +// * secretsmanager:UpdateSecretVersionStage +// +// Related operations +// +// * To get the list of staging labels that are currently associated with +// a version of a secret, use DescribeSecret and examine the SecretVersionsToStages +// response value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation UpdateSecretVersionStage for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// * LimitExceededException +// The request failed because it would exceed one of the Secrets Manager internal +// limits. +// +// * InternalServiceError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/UpdateSecretVersionStage +func (c *SecretsManager) UpdateSecretVersionStage(input *UpdateSecretVersionStageInput) (*UpdateSecretVersionStageOutput, error) { + req, out := c.UpdateSecretVersionStageRequest(input) + return out, req.Send() +} + +// UpdateSecretVersionStageWithContext is the same as UpdateSecretVersionStage with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSecretVersionStage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) UpdateSecretVersionStageWithContext(ctx aws.Context, input *UpdateSecretVersionStageInput, opts ...request.Option) (*UpdateSecretVersionStageOutput, error) { + req, out := c.UpdateSecretVersionStageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opValidateResourcePolicy = "ValidateResourcePolicy" + +// ValidateResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the ValidateResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ValidateResourcePolicy for more information on using the ValidateResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ValidateResourcePolicyRequest method. +// req, resp := client.ValidateResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/ValidateResourcePolicy +func (c *SecretsManager) ValidateResourcePolicyRequest(input *ValidateResourcePolicyInput) (req *request.Request, output *ValidateResourcePolicyOutput) { + op := &request.Operation{ + Name: opValidateResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidateResourcePolicyInput{} + } + + output = &ValidateResourcePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// ValidateResourcePolicy API operation for AWS Secrets Manager. +// +// Validates the JSON text of the resource-based policy document attached to +// the specified secret. The JSON request string input and response output displays +// formatted code with white space and line breaks for better readability. Submit +// your input as a single line JSON string. A resource-based policy is optional. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation ValidateResourcePolicy for usage and error information. +// +// Returned Error Types: +// * MalformedPolicyDocumentException +// The policy document that you provided isn't valid. +// +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/ValidateResourcePolicy +func (c *SecretsManager) ValidateResourcePolicy(input *ValidateResourcePolicyInput) (*ValidateResourcePolicyOutput, error) { + req, out := c.ValidateResourcePolicyRequest(input) + return out, req.Send() +} + +// ValidateResourcePolicyWithContext is the same as ValidateResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See ValidateResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) ValidateResourcePolicyWithContext(ctx aws.Context, input *ValidateResourcePolicyInput, opts ...request.Option) (*ValidateResourcePolicyOutput, error) { + req, out := c.ValidateResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type CancelRotateSecretInput struct { + _ struct{} `type:"structure"` + + // Specifies the secret to cancel a rotation request. You can specify either + // the Amazon Resource Name (ARN) or the friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelRotateSecretInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelRotateSecretInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelRotateSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelRotateSecretInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecretId sets the SecretId field's value. +func (s *CancelRotateSecretInput) SetSecretId(v string) *CancelRotateSecretInput { + s.SecretId = &v + return s +} + +type CancelRotateSecretOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret for which rotation was canceled. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret for which rotation was canceled. + Name *string `min:"1" type:"string"` + + // The unique identifier of the version of the secret created during the rotation. + // This version might not be complete, and should be evaluated for possible + // deletion. At the very least, you should remove the VersionStage value AWSPENDING + // to enable this version to be deleted. Failing to clean up a cancelled rotation + // can block you from successfully starting future rotations. + VersionId *string `min:"32" type:"string"` +} + +// String returns the string representation +func (s CancelRotateSecretOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelRotateSecretOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *CancelRotateSecretOutput) SetARN(v string) *CancelRotateSecretOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *CancelRotateSecretOutput) SetName(v string) *CancelRotateSecretOutput { + s.Name = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CancelRotateSecretOutput) SetVersionId(v string) *CancelRotateSecretOutput { + s.VersionId = &v + return s +} + +type CreateSecretInput struct { + _ struct{} `type:"structure"` + + // (Optional) If you include SecretString or SecretBinary, then an initial version + // is created as part of the secret, and this parameter specifies a unique identifier + // for the new version. + // + // If you use the AWS CLI or one of the AWS SDK to call this operation, then + // you can leave this parameter empty. The CLI or SDK generates a random UUID + // for you and includes it as the value for this parameter in the request. If + // you don't use the SDK and instead generate a raw HTTP request to the Secrets + // Manager service endpoint, then you must generate a ClientRequestToken yourself + // for the new version and include the value in the request. + // + // This value helps ensure idempotency. Secrets Manager uses this value to prevent + // the accidental creation of duplicate versions if there are failures and retries + // during a rotation. We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) + // value to ensure uniqueness of your versions within the specified secret. + // + // * If the ClientRequestToken value isn't already associated with a version + // of the secret then a new version of the secret is created. + // + // * If a version with this value already exists and the version SecretString + // and SecretBinary values are the same as those in the request, then the + // request is ignored. + // + // * If a version with this value already exists and that version's SecretString + // and SecretBinary values are different from those in the request then the + // request fails because you cannot modify an existing version. Instead, + // use PutSecretValue to create a new version. + // + // This value becomes the VersionId of the new version. + ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` + + // (Optional) Specifies a user-provided description of the secret. + Description *string `type:"string"` + + // (Optional) Specifies the ARN, Key ID, or alias of the AWS KMS customer master + // key (CMK) to be used to encrypt the SecretString or SecretBinary values in + // the versions stored in this secret. + // + // You can specify any of the supported ways to identify a AWS KMS key ID. If + // you need to reference a CMK in a different account, you can use only the + // key ARN or the alias ARN. + // + // If you don't specify this value, then Secrets Manager defaults to using the + // AWS account's default CMK (the one named aws/secretsmanager). If a AWS KMS + // CMK with that name doesn't yet exist, then Secrets Manager creates it for + // you automatically the first time it needs to encrypt a version's SecretString + // or SecretBinary fields. + // + // You can use the account default CMK to encrypt and decrypt only if you call + // this operation using credentials from the same account that owns the secret. + // If the secret resides in a different account, then you must create a custom + // CMK and specify the ARN in this field. + KmsKeyId *string `type:"string"` + + // Specifies the friendly name of the new secret. + // + // The secret name must be ASCII letters, digits, or the following characters + // : /_+=.@- + // + // Do not end your secret name with a hyphen followed by six characters. If + // you do so, you risk confusion and unexpected results when searching for a + // secret by partial ARN. Secrets Manager automatically adds a hyphen and six + // random characters at the end of the ARN. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // (Optional) Specifies binary data that you want to encrypt and store in the + // new version of the secret. To use this parameter in the command-line tools, + // we recommend that you store your binary data in a file and then use the appropriate + // technique for your tool to pass the contents of the file as a parameter. + // + // Either SecretString or SecretBinary must have a value, but not both. They + // cannot both be empty. + // + // This parameter is not available using the Secrets Manager console. It can + // be accessed only by using the AWS CLI or one of the AWS SDKs. + // + // SecretBinary is automatically base64 encoded/decoded by the SDK. + SecretBinary []byte `type:"blob" sensitive:"true"` + + // (Optional) Specifies text data that you want to encrypt and store in this + // new version of the secret. + // + // Either SecretString or SecretBinary must have a value, but not both. They + // cannot both be empty. + // + // If you create a secret by using the Secrets Manager console then Secrets + // Manager puts the protected secret text in only the SecretString parameter. + // The Secrets Manager console stores the information as a JSON structure of + // key/value pairs that the Lambda rotation function knows how to parse. + // + // For storing multiple values, we recommend that you use a JSON text string + // argument and specify key/value pairs. For information on how to format a + // JSON parameter for the various command line tool environments, see Using + // JSON for Parameters (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) + // in the AWS CLI User Guide. For example: + // + // {"username":"bob","password":"abc123xyz456"} + // + // If your command-line tool or SDK requires quotation marks around the parameter, + // you should use single quotes to avoid confusion with the double quotes required + // in the JSON text. + SecretString *string `type:"string" sensitive:"true"` + + // (Optional) Specifies a list of user-defined tags that are attached to the + // secret. Each tag is a "Key" and "Value" pair of strings. This operation only + // appends tags to the existing list of tags. To remove tags, you must use UntagResource. + // + // * Secrets Manager tag key names are case sensitive. A tag with the key + // "ABC" is a different tag from one with key "abc". + // + // * If you check tags in IAM policy Condition elements as part of your security + // strategy, then adding or removing a tag can change permissions. If the + // successful completion of this operation would result in you losing your + // permissions for this secret, then this operation is blocked and returns + // an Access Denied error. + // + // This parameter requires a JSON text string argument. For information on how + // to format a JSON parameter for the various command line tool environments, + // see Using JSON for Parameters (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) + // in the AWS CLI User Guide. For example: + // + // [{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}] + // + // If your command-line tool or SDK requires quotation marks around the parameter, + // you should use single quotes to avoid confusion with the double quotes required + // in the JSON text. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per secret—50 + // + // * Maximum key length—127 Unicode characters in UTF-8 + // + // * Maximum value length—255 Unicode characters in UTF-8 + // + // * Tag keys and values are case sensitive. + // + // * Do not use the aws: prefix in your tag names or values because AWS reserves + // it for AWS use. You can't edit or delete tag names or values with this + // prefix. Tags with this prefix do not count against your tags per secret + // limit. + // + // * If you use your tagging schema across multiple services and resources, + // remember other services might have restrictions on allowed characters. + // Generally allowed characters: letters, spaces, and numbers representable + // in UTF-8, plus the following special characters: + - = . _ : / @. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateSecretInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecretInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSecretInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateSecretInput) SetClientRequestToken(v string) *CreateSecretInput { + s.ClientRequestToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateSecretInput) SetDescription(v string) *CreateSecretInput { + s.Description = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateSecretInput) SetKmsKeyId(v string) *CreateSecretInput { + s.KmsKeyId = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateSecretInput) SetName(v string) *CreateSecretInput { + s.Name = &v + return s +} + +// SetSecretBinary sets the SecretBinary field's value. +func (s *CreateSecretInput) SetSecretBinary(v []byte) *CreateSecretInput { + s.SecretBinary = v + return s +} + +// SetSecretString sets the SecretString field's value. +func (s *CreateSecretInput) SetSecretString(v string) *CreateSecretInput { + s.SecretString = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateSecretInput) SetTags(v []*Tag) *CreateSecretInput { + s.Tags = v + return s +} + +type CreateSecretOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the secret that you just created. + // + // Secrets Manager automatically adds several random characters to the name + // at the end of the ARN when you initially create a secret. This affects only + // the ARN and not the actual friendly name. This ensures that if you create + // a new secret with the same name as an old secret that you previously deleted, + // then users with access to the old secret don't automatically get access to + // the new secret because the ARNs are different. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret that you just created. + Name *string `min:"1" type:"string"` + + // The unique identifier associated with the version of the secret you just + // created. + VersionId *string `min:"32" type:"string"` +} + +// String returns the string representation +func (s CreateSecretOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSecretOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *CreateSecretOutput) SetARN(v string) *CreateSecretOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateSecretOutput) SetName(v string) *CreateSecretOutput { + s.Name = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CreateSecretOutput) SetVersionId(v string) *CreateSecretOutput { + s.VersionId = &v + return s +} + +// Secrets Manager can't decrypt the protected secret text using the provided +// KMS key. +type DecryptionFailure struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DecryptionFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecryptionFailure) GoString() string { + return s.String() +} + +func newErrorDecryptionFailure(v protocol.ResponseMetadata) error { + return &DecryptionFailure{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DecryptionFailure) Code() string { + return "DecryptionFailure" +} + +// Message returns the exception's message. +func (s *DecryptionFailure) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DecryptionFailure) OrigErr() error { + return nil +} + +func (s *DecryptionFailure) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DecryptionFailure) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DecryptionFailure) RequestID() string { + return s.RespMetadata.RequestID +} + +type DeleteResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // Specifies the secret that you want to delete the attached resource-based + // policy for. You can specify either the Amazon Resource Name (ARN) or the + // friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecretId sets the SecretId field's value. +func (s *DeleteResourcePolicyInput) SetSecretId(v string) *DeleteResourcePolicyInput { + s.SecretId = &v + return s +} + +type DeleteResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret that the resource-based policy was deleted for. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret that the resource-based policy was deleted + // for. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *DeleteResourcePolicyOutput) SetARN(v string) *DeleteResourcePolicyOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteResourcePolicyOutput) SetName(v string) *DeleteResourcePolicyOutput { + s.Name = &v + return s +} + +type DeleteSecretInput struct { + _ struct{} `type:"structure"` + + // (Optional) Specifies that the secret is to be deleted without any recovery + // window. You can't use both this parameter and the RecoveryWindowInDays parameter + // in the same API call. + // + // An asynchronous background process performs the actual deletion, so there + // can be a short delay before the operation completes. If you write code to + // delete and then immediately recreate a secret with the same name, ensure + // that your code includes appropriate back off and retry logic. + // + // Use this parameter with caution. This parameter causes the operation to skip + // the normal waiting period before the permanent deletion that AWS would normally + // impose with the RecoveryWindowInDays parameter. If you delete a secret with + // the ForceDeleteWithouRecovery parameter, then you have no opportunity to + // recover the secret. It is permanently lost. + ForceDeleteWithoutRecovery *bool `type:"boolean"` + + // (Optional) Specifies the number of days that Secrets Manager waits before + // it can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery + // parameter in the same API call. + // + // This value can range from 7 to 30 days. The default value is 30. + RecoveryWindowInDays *int64 `type:"long"` + + // Specifies the secret that you want to delete. You can specify either the + // Amazon Resource Name (ARN) or the friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSecretInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecretInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSecretInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForceDeleteWithoutRecovery sets the ForceDeleteWithoutRecovery field's value. +func (s *DeleteSecretInput) SetForceDeleteWithoutRecovery(v bool) *DeleteSecretInput { + s.ForceDeleteWithoutRecovery = &v + return s +} + +// SetRecoveryWindowInDays sets the RecoveryWindowInDays field's value. +func (s *DeleteSecretInput) SetRecoveryWindowInDays(v int64) *DeleteSecretInput { + s.RecoveryWindowInDays = &v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *DeleteSecretInput) SetSecretId(v string) *DeleteSecretInput { + s.SecretId = &v + return s +} + +type DeleteSecretOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret that is now scheduled for deletion. + ARN *string `min:"20" type:"string"` + + // The date and time after which this secret can be deleted by Secrets Manager + // and can no longer be restored. This value is the date and time of the delete + // request plus the number of days specified in RecoveryWindowInDays. + DeletionDate *time.Time `type:"timestamp"` + + // The friendly name of the secret that is now scheduled for deletion. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteSecretOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSecretOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *DeleteSecretOutput) SetARN(v string) *DeleteSecretOutput { + s.ARN = &v + return s +} + +// SetDeletionDate sets the DeletionDate field's value. +func (s *DeleteSecretOutput) SetDeletionDate(v time.Time) *DeleteSecretOutput { + s.DeletionDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteSecretOutput) SetName(v string) *DeleteSecretOutput { + s.Name = &v + return s +} + +type DescribeSecretInput struct { + _ struct{} `type:"structure"` + + // The identifier of the secret whose details you want to retrieve. You can + // specify either the Amazon Resource Name (ARN) or the friendly name of the + // secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSecretInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecretInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSecretInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecretId sets the SecretId field's value. +func (s *DescribeSecretInput) SetSecretId(v string) *DescribeSecretInput { + s.SecretId = &v + return s +} + +type DescribeSecretOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret. + ARN *string `min:"20" type:"string"` + + // The date that the secret was created. + CreatedDate *time.Time `type:"timestamp"` + + // This value exists if the secret is scheduled for deletion. Some time after + // the specified date and time, Secrets Manager deletes the secret and all of + // its versions. + // + // If a secret is scheduled for deletion, then its details, including the encrypted + // secret information, is not accessible. To cancel a scheduled deletion and + // restore access, use RestoreSecret. + DeletedDate *time.Time `type:"timestamp"` + + // The user-provided description of the secret. + Description *string `type:"string"` + + // The ARN or alias of the AWS KMS customer master key (CMK) that's used to + // encrypt the SecretString or SecretBinary fields in each version of the secret. + // If you don't provide a key, then Secrets Manager defaults to encrypting the + // secret fields with the default AWS KMS CMK (the one named awssecretsmanager) + // for this account. + KmsKeyId *string `type:"string"` + + // The last date that this secret was accessed. This value is truncated to midnight + // of the date and therefore shows only the date, not the time. + LastAccessedDate *time.Time `type:"timestamp"` + + // The last date and time that this secret was modified in any way. + LastChangedDate *time.Time `type:"timestamp"` + + // The most recent date and time that the Secrets Manager rotation process was + // successfully completed. This value is null if the secret has never rotated. + LastRotatedDate *time.Time `type:"timestamp"` + + // The user-provided friendly name of the secret. + Name *string `min:"1" type:"string"` + + // Returns the name of the service that created this secret. + OwningService *string `min:"1" type:"string"` + + // Specifies whether automatic rotation is enabled for this secret. + // + // To enable rotation, use RotateSecret with AutomaticallyRotateAfterDays set + // to a value greater than 0. To disable rotation, use CancelRotateSecret. + RotationEnabled *bool `type:"boolean"` + + // The ARN of a Lambda function that's invoked by Secrets Manager to rotate + // the secret either automatically per the schedule or manually by a call to + // RotateSecret. + RotationLambdaARN *string `type:"string"` + + // A structure that contains the rotation configuration for this secret. + RotationRules *RotationRulesType `type:"structure"` + + // The list of user-defined tags that are associated with the secret. To add + // tags to a secret, use TagResource. To remove tags, use UntagResource. + Tags []*Tag `type:"list"` + + // A list of all of the currently assigned VersionStage staging labels and the + // VersionId that each is attached to. Staging labels are used to keep track + // of the different versions during the rotation process. + // + // A version that does not have any staging labels attached is considered deprecated + // and subject to deletion. Such versions are not included in this list. + VersionIdsToStages map[string][]*string `type:"map"` +} + +// String returns the string representation +func (s DescribeSecretOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecretOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *DescribeSecretOutput) SetARN(v string) *DescribeSecretOutput { + s.ARN = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *DescribeSecretOutput) SetCreatedDate(v time.Time) *DescribeSecretOutput { + s.CreatedDate = &v + return s +} + +// SetDeletedDate sets the DeletedDate field's value. +func (s *DescribeSecretOutput) SetDeletedDate(v time.Time) *DescribeSecretOutput { + s.DeletedDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *DescribeSecretOutput) SetDescription(v string) *DescribeSecretOutput { + s.Description = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *DescribeSecretOutput) SetKmsKeyId(v string) *DescribeSecretOutput { + s.KmsKeyId = &v + return s +} + +// SetLastAccessedDate sets the LastAccessedDate field's value. +func (s *DescribeSecretOutput) SetLastAccessedDate(v time.Time) *DescribeSecretOutput { + s.LastAccessedDate = &v + return s +} + +// SetLastChangedDate sets the LastChangedDate field's value. +func (s *DescribeSecretOutput) SetLastChangedDate(v time.Time) *DescribeSecretOutput { + s.LastChangedDate = &v + return s +} + +// SetLastRotatedDate sets the LastRotatedDate field's value. +func (s *DescribeSecretOutput) SetLastRotatedDate(v time.Time) *DescribeSecretOutput { + s.LastRotatedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeSecretOutput) SetName(v string) *DescribeSecretOutput { + s.Name = &v + return s +} + +// SetOwningService sets the OwningService field's value. +func (s *DescribeSecretOutput) SetOwningService(v string) *DescribeSecretOutput { + s.OwningService = &v + return s +} + +// SetRotationEnabled sets the RotationEnabled field's value. +func (s *DescribeSecretOutput) SetRotationEnabled(v bool) *DescribeSecretOutput { + s.RotationEnabled = &v + return s +} + +// SetRotationLambdaARN sets the RotationLambdaARN field's value. +func (s *DescribeSecretOutput) SetRotationLambdaARN(v string) *DescribeSecretOutput { + s.RotationLambdaARN = &v + return s +} + +// SetRotationRules sets the RotationRules field's value. +func (s *DescribeSecretOutput) SetRotationRules(v *RotationRulesType) *DescribeSecretOutput { + s.RotationRules = v + return s +} + +// SetTags sets the Tags field's value. +func (s *DescribeSecretOutput) SetTags(v []*Tag) *DescribeSecretOutput { + s.Tags = v + return s +} + +// SetVersionIdsToStages sets the VersionIdsToStages field's value. +func (s *DescribeSecretOutput) SetVersionIdsToStages(v map[string][]*string) *DescribeSecretOutput { + s.VersionIdsToStages = v + return s +} + +// Secrets Manager can't encrypt the protected secret text using the provided +// KMS key. Check that the customer master key (CMK) is available, enabled, +// and not in an invalid state. For more information, see How Key State Affects +// Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). +type EncryptionFailure struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s EncryptionFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionFailure) GoString() string { + return s.String() +} + +func newErrorEncryptionFailure(v protocol.ResponseMetadata) error { + return &EncryptionFailure{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *EncryptionFailure) Code() string { + return "EncryptionFailure" +} + +// Message returns the exception's message. +func (s *EncryptionFailure) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *EncryptionFailure) OrigErr() error { + return nil +} + +func (s *EncryptionFailure) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *EncryptionFailure) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *EncryptionFailure) RequestID() string { + return s.RespMetadata.RequestID +} + +// Allows you to filter your list of secrets. +type Filter struct { + _ struct{} `type:"structure"` + + // Filters your list of secrets by a specific key. + Key *string `type:"string" enum:"FilterNameStringType"` + + // Filters your list of secrets by a specific value. + Values []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Filter) SetKey(v string) *Filter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *Filter) SetValues(v []*string) *Filter { + s.Values = v + return s +} + +type GetRandomPasswordInput struct { + _ struct{} `type:"structure"` + + // A string that includes characters that should not be included in the generated + // password. The default is that all characters from the included sets can be + // used. + ExcludeCharacters *string `type:"string"` + + // Specifies that the generated password should not include lowercase letters. + // The default if you do not include this switch parameter is that lowercase + // letters can be included. + ExcludeLowercase *bool `type:"boolean"` + + // Specifies that the generated password should not include digits. The default + // if you do not include this switch parameter is that digits can be included. + ExcludeNumbers *bool `type:"boolean"` + + // Specifies that the generated password should not include punctuation characters. + // The default if you do not include this switch parameter is that punctuation + // characters can be included. + // + // The following are the punctuation characters that can be included in the + // generated password if you don't explicitly exclude them with ExcludeCharacters + // or ExcludePunctuation: + // + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + ExcludePunctuation *bool `type:"boolean"` + + // Specifies that the generated password should not include uppercase letters. + // The default if you do not include this switch parameter is that uppercase + // letters can be included. + ExcludeUppercase *bool `type:"boolean"` + + // Specifies that the generated password can include the space character. The + // default if you do not include this switch parameter is that the space character + // is not included. + IncludeSpace *bool `type:"boolean"` + + // The desired length of the generated password. The default value if you do + // not include this parameter is 32 characters. + PasswordLength *int64 `min:"1" type:"long"` + + // A boolean value that specifies whether the generated password must include + // at least one of every allowed character type. The default value is True and + // the operation requires at least one of every character type. + RequireEachIncludedType *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetRandomPasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRandomPasswordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRandomPasswordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRandomPasswordInput"} + if s.PasswordLength != nil && *s.PasswordLength < 1 { + invalidParams.Add(request.NewErrParamMinValue("PasswordLength", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExcludeCharacters sets the ExcludeCharacters field's value. +func (s *GetRandomPasswordInput) SetExcludeCharacters(v string) *GetRandomPasswordInput { + s.ExcludeCharacters = &v + return s +} + +// SetExcludeLowercase sets the ExcludeLowercase field's value. +func (s *GetRandomPasswordInput) SetExcludeLowercase(v bool) *GetRandomPasswordInput { + s.ExcludeLowercase = &v + return s +} + +// SetExcludeNumbers sets the ExcludeNumbers field's value. +func (s *GetRandomPasswordInput) SetExcludeNumbers(v bool) *GetRandomPasswordInput { + s.ExcludeNumbers = &v + return s +} + +// SetExcludePunctuation sets the ExcludePunctuation field's value. +func (s *GetRandomPasswordInput) SetExcludePunctuation(v bool) *GetRandomPasswordInput { + s.ExcludePunctuation = &v + return s +} + +// SetExcludeUppercase sets the ExcludeUppercase field's value. +func (s *GetRandomPasswordInput) SetExcludeUppercase(v bool) *GetRandomPasswordInput { + s.ExcludeUppercase = &v + return s +} + +// SetIncludeSpace sets the IncludeSpace field's value. +func (s *GetRandomPasswordInput) SetIncludeSpace(v bool) *GetRandomPasswordInput { + s.IncludeSpace = &v + return s +} + +// SetPasswordLength sets the PasswordLength field's value. +func (s *GetRandomPasswordInput) SetPasswordLength(v int64) *GetRandomPasswordInput { + s.PasswordLength = &v + return s +} + +// SetRequireEachIncludedType sets the RequireEachIncludedType field's value. +func (s *GetRandomPasswordInput) SetRequireEachIncludedType(v bool) *GetRandomPasswordInput { + s.RequireEachIncludedType = &v + return s +} + +type GetRandomPasswordOutput struct { + _ struct{} `type:"structure"` + + // A string with the generated password. + RandomPassword *string `type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s GetRandomPasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRandomPasswordOutput) GoString() string { + return s.String() +} + +// SetRandomPassword sets the RandomPassword field's value. +func (s *GetRandomPasswordOutput) SetRandomPassword(v string) *GetRandomPasswordOutput { + s.RandomPassword = &v + return s +} + +type GetResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // Specifies the secret that you want to retrieve the attached resource-based + // policy for. You can specify either the Amazon Resource Name (ARN) or the + // friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourcePolicyInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecretId sets the SecretId field's value. +func (s *GetResourcePolicyInput) SetSecretId(v string) *GetResourcePolicyInput { + s.SecretId = &v + return s +} + +type GetResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret that the resource-based policy was retrieved for. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret that the resource-based policy was retrieved + // for. + Name *string `min:"1" type:"string"` + + // A JSON-formatted string that describes the permissions that are associated + // with the attached secret. These permissions are combined with any permissions + // that are associated with the user or role that attempts to access this secret. + // The combined permissions specify who can access the secret and what actions + // they can perform. For more information, see Authentication and Access Control + // for AWS Secrets Manager (http://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access.html) + // in the AWS Secrets Manager User Guide. + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *GetResourcePolicyOutput) SetARN(v string) *GetResourcePolicyOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetResourcePolicyOutput) SetName(v string) *GetResourcePolicyOutput { + s.Name = &v + return s +} + +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *GetResourcePolicyOutput) SetResourcePolicy(v string) *GetResourcePolicyOutput { + s.ResourcePolicy = &v + return s +} + +type GetSecretValueInput struct { + _ struct{} `type:"structure"` + + // Specifies the secret containing the version that you want to retrieve. You + // can specify either the Amazon Resource Name (ARN) or the friendly name of + // the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` + + // Specifies the unique identifier of the version of the secret that you want + // to retrieve. If you specify this parameter then don't specify VersionStage. + // If you don't specify either a VersionStage or VersionId then the default + // is to perform the operation on the version with the VersionStage value of + // AWSCURRENT. + // + // This value is typically a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) + // value with 32 hexadecimal digits. + VersionId *string `min:"32" type:"string"` + + // Specifies the secret version that you want to retrieve by the staging label + // attached to the version. + // + // Staging labels are used to keep track of different versions during the rotation + // process. If you use this parameter then don't specify VersionId. If you don't + // specify either a VersionStage or VersionId, then the default is to perform + // the operation on the version with the VersionStage value of AWSCURRENT. + VersionStage *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetSecretValueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSecretValueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSecretValueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSecretValueInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + if s.VersionId != nil && len(*s.VersionId) < 32 { + invalidParams.Add(request.NewErrParamMinLen("VersionId", 32)) + } + if s.VersionStage != nil && len(*s.VersionStage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionStage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecretId sets the SecretId field's value. +func (s *GetSecretValueInput) SetSecretId(v string) *GetSecretValueInput { + s.SecretId = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetSecretValueInput) SetVersionId(v string) *GetSecretValueInput { + s.VersionId = &v + return s +} + +// SetVersionStage sets the VersionStage field's value. +func (s *GetSecretValueInput) SetVersionStage(v string) *GetSecretValueInput { + s.VersionStage = &v + return s +} + +type GetSecretValueOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret. + ARN *string `min:"20" type:"string"` + + // The date and time that this version of the secret was created. + CreatedDate *time.Time `type:"timestamp"` + + // The friendly name of the secret. + Name *string `min:"1" type:"string"` + + // The decrypted part of the protected secret information that was originally + // provided as binary data in the form of a byte array. The response parameter + // represents the binary data as a base64-encoded (https://tools.ietf.org/html/rfc4648#section-4) + // string. + // + // This parameter is not used if the secret is created by the Secrets Manager + // console. + // + // If you store custom information in this field of the secret, then you must + // code your Lambda rotation function to parse and interpret whatever you store + // in the SecretString or SecretBinary fields. + // + // SecretBinary is automatically base64 encoded/decoded by the SDK. + SecretBinary []byte `type:"blob" sensitive:"true"` + + // The decrypted part of the protected secret information that was originally + // provided as a string. + // + // If you create this secret by using the Secrets Manager console then only + // the SecretString parameter contains data. Secrets Manager stores the information + // as a JSON structure of key/value pairs that the Lambda rotation function + // knows how to parse. + // + // If you store custom information in the secret by using the CreateSecret, + // UpdateSecret, or PutSecretValue API operations instead of the Secrets Manager + // console, or by using the Other secret type in the console, then you must + // code your Lambda rotation function to parse and interpret those values. + SecretString *string `type:"string" sensitive:"true"` + + // The unique identifier of this version of the secret. + VersionId *string `min:"32" type:"string"` + + // A list of all of the staging labels currently attached to this version of + // the secret. + VersionStages []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s GetSecretValueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSecretValueOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *GetSecretValueOutput) SetARN(v string) *GetSecretValueOutput { + s.ARN = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *GetSecretValueOutput) SetCreatedDate(v time.Time) *GetSecretValueOutput { + s.CreatedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetSecretValueOutput) SetName(v string) *GetSecretValueOutput { + s.Name = &v + return s +} + +// SetSecretBinary sets the SecretBinary field's value. +func (s *GetSecretValueOutput) SetSecretBinary(v []byte) *GetSecretValueOutput { + s.SecretBinary = v + return s +} + +// SetSecretString sets the SecretString field's value. +func (s *GetSecretValueOutput) SetSecretString(v string) *GetSecretValueOutput { + s.SecretString = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetSecretValueOutput) SetVersionId(v string) *GetSecretValueOutput { + s.VersionId = &v + return s +} + +// SetVersionStages sets the VersionStages field's value. +func (s *GetSecretValueOutput) SetVersionStages(v []*string) *GetSecretValueOutput { + s.VersionStages = v + return s +} + +// An error occurred on the server side. +type InternalServiceError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InternalServiceError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternalServiceError) GoString() string { + return s.String() +} + +func newErrorInternalServiceError(v protocol.ResponseMetadata) error { + return &InternalServiceError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServiceError) Code() string { + return "InternalServiceError" +} + +// Message returns the exception's message. +func (s *InternalServiceError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServiceError) OrigErr() error { + return nil +} + +func (s *InternalServiceError) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServiceError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServiceError) RequestID() string { + return s.RespMetadata.RequestID +} + +// You provided an invalid NextToken value. +type InvalidNextTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidNextTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidNextTokenException) GoString() string { + return s.String() +} + +func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { + return &InvalidNextTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidNextTokenException) Code() string { + return "InvalidNextTokenException" +} + +// Message returns the exception's message. +func (s *InvalidNextTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidNextTokenException) OrigErr() error { + return nil +} + +func (s *InvalidNextTokenException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You provided an invalid value for a parameter. +type InvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidParameterException) GoString() string { + return s.String() +} + +func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { + return &InvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidParameterException) Code() string { + return "InvalidParameterException" +} + +// Message returns the exception's message. +func (s *InvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameterException) OrigErr() error { + return nil +} + +func (s *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request failed because it would exceed one of the Secrets Manager internal +// limits. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s LimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LimitExceededException) GoString() string { + return s.String() +} + +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" +} + +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListSecretVersionIdsInput struct { + _ struct{} `type:"structure"` + + // (Optional) Specifies that you want the results to include versions that do + // not have any staging labels attached to them. Such versions are considered + // deprecated and are subject to deletion by Secrets Manager as needed. + IncludeDeprecated *bool `type:"boolean"` + + // (Optional) Limits the number of results you want to include in the response. + // If you don't include this parameter, it defaults to a value that's specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (isn't null). Include + // that value as the NextToken request parameter in the next call to the operation + // to get the next part of the results. Note that Secrets Manager might return + // fewer results than the maximum even when there are more results available. + // You should check NextToken after every operation to ensure that you receive + // all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request indicating there's more output available. In a subsequent + // call, set it to the value of the previous call NextToken response to indicate + // where the output should continue from. + NextToken *string `min:"1" type:"string"` + + // The identifier for the secret containing the versions you want to list. You + // can specify either the Amazon Resource Name (ARN) or the friendly name of + // the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListSecretVersionIdsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSecretVersionIdsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSecretVersionIdsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSecretVersionIdsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIncludeDeprecated sets the IncludeDeprecated field's value. +func (s *ListSecretVersionIdsInput) SetIncludeDeprecated(v bool) *ListSecretVersionIdsInput { + s.IncludeDeprecated = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSecretVersionIdsInput) SetMaxResults(v int64) *ListSecretVersionIdsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSecretVersionIdsInput) SetNextToken(v string) *ListSecretVersionIdsInput { + s.NextToken = &v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *ListSecretVersionIdsInput) SetSecretId(v string) *ListSecretVersionIdsInput { + s.SecretId = &v + return s +} + +type ListSecretVersionIdsOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the secret. + // + // Secrets Manager automatically adds several random characters to the name + // at the end of the ARN when you initially create a secret. This affects only + // the ARN and not the actual friendly name. This ensures that if you create + // a new secret with the same name as an old secret that you previously deleted, + // then users with access to the old secret don't automatically get access to + // the new secret because the ARNs are different. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret. + Name *string `min:"1" type:"string"` + + // If present in the response, this value indicates that there's more output + // available than included in the current response. This can occur even when + // the response includes no values at all, such as when you ask for a filtered + // view of a very long list. Use this value in the NextToken request parameter + // in a subsequent call to the operation to continue processing and get the + // next part of the output. You should repeat this until the NextToken response + // element comes back empty (as null). + NextToken *string `min:"1" type:"string"` + + // The list of the currently available versions of the specified secret. + Versions []*SecretVersionsListEntry `type:"list"` +} + +// String returns the string representation +func (s ListSecretVersionIdsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSecretVersionIdsOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *ListSecretVersionIdsOutput) SetARN(v string) *ListSecretVersionIdsOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListSecretVersionIdsOutput) SetName(v string) *ListSecretVersionIdsOutput { + s.Name = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSecretVersionIdsOutput) SetNextToken(v string) *ListSecretVersionIdsOutput { + s.NextToken = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListSecretVersionIdsOutput) SetVersions(v []*SecretVersionsListEntry) *ListSecretVersionIdsOutput { + s.Versions = v + return s +} + +type ListSecretsInput struct { + _ struct{} `type:"structure"` + + // Lists the secret request filters. + Filters []*Filter `type:"list"` + + // (Optional) Limits the number of results you want to include in the response. + // If you don't include this parameter, it defaults to a value that's specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (isn't null). Include + // that value as the NextToken request parameter in the next call to the operation + // to get the next part of the results. Note that Secrets Manager might return + // fewer results than the maximum even when there are more results available. + // You should check NextToken after every operation to ensure that you receive + // all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) Use this parameter in a request if you receive a NextToken response + // in a previous request indicating there's more output available. In a subsequent + // call, set it to the value of the previous call NextToken response to indicate + // where the output should continue from. + NextToken *string `min:"1" type:"string"` + + // Lists secrets in the requested order. + SortOrder *string `type:"string" enum:"SortOrderType"` +} + +// String returns the string representation +func (s ListSecretsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSecretsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSecretsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSecretsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListSecretsInput) SetFilters(v []*Filter) *ListSecretsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSecretsInput) SetMaxResults(v int64) *ListSecretsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSecretsInput) SetNextToken(v string) *ListSecretsInput { + s.NextToken = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListSecretsInput) SetSortOrder(v string) *ListSecretsInput { + s.SortOrder = &v + return s +} + +type ListSecretsOutput struct { + _ struct{} `type:"structure"` + + // If present in the response, this value indicates that there's more output + // available than included in the current response. This can occur even when + // the response includes no values at all, such as when you ask for a filtered + // view of a very long list. Use this value in the NextToken request parameter + // in a subsequent call to the operation to continue processing and get the + // next part of the output. You should repeat this until the NextToken response + // element comes back empty (as null). + NextToken *string `min:"1" type:"string"` + + // A list of the secrets in the account. + SecretList []*SecretListEntry `type:"list"` +} + +// String returns the string representation +func (s ListSecretsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSecretsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSecretsOutput) SetNextToken(v string) *ListSecretsOutput { + s.NextToken = &v + return s +} + +// SetSecretList sets the SecretList field's value. +func (s *ListSecretsOutput) SetSecretList(v []*SecretListEntry) *ListSecretsOutput { + s.SecretList = v + return s +} + +// The policy document that you provided isn't valid. +type MalformedPolicyDocumentException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s MalformedPolicyDocumentException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MalformedPolicyDocumentException) GoString() string { + return s.String() +} + +func newErrorMalformedPolicyDocumentException(v protocol.ResponseMetadata) error { + return &MalformedPolicyDocumentException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *MalformedPolicyDocumentException) Code() string { + return "MalformedPolicyDocumentException" +} + +// Message returns the exception's message. +func (s *MalformedPolicyDocumentException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MalformedPolicyDocumentException) OrigErr() error { + return nil +} + +func (s *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *MalformedPolicyDocumentException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *MalformedPolicyDocumentException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request failed because you did not complete all the prerequisite steps. +type PreconditionNotMetException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s PreconditionNotMetException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PreconditionNotMetException) GoString() string { + return s.String() +} + +func newErrorPreconditionNotMetException(v protocol.ResponseMetadata) error { + return &PreconditionNotMetException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PreconditionNotMetException) Code() string { + return "PreconditionNotMetException" +} + +// Message returns the exception's message. +func (s *PreconditionNotMetException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PreconditionNotMetException) OrigErr() error { + return nil +} + +func (s *PreconditionNotMetException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PreconditionNotMetException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PreconditionNotMetException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The resource policy did not prevent broad access to the secret. +type PublicPolicyException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s PublicPolicyException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicPolicyException) GoString() string { + return s.String() +} + +func newErrorPublicPolicyException(v protocol.ResponseMetadata) error { + return &PublicPolicyException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PublicPolicyException) Code() string { + return "PublicPolicyException" +} + +// Message returns the exception's message. +func (s *PublicPolicyException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PublicPolicyException) OrigErr() error { + return nil +} + +func (s *PublicPolicyException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PublicPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PublicPolicyException) RequestID() string { + return s.RespMetadata.RequestID +} + +type PutResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // Makes an optional API call to Zelkova to validate the Resource Policy to + // prevent broad access to your secret. + BlockPublicPolicy *bool `type:"boolean"` + + // A JSON-formatted string that's constructed according to the grammar and syntax + // for an AWS resource-based policy. The policy in the string identifies who + // can access or manage this secret and its versions. For information on how + // to format a JSON parameter for the various command line tool environments, + // see Using JSON for Parameters (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) + // in the AWS CLI User Guide. + // + // ResourcePolicy is a required field + ResourcePolicy *string `min:"1" type:"string" required:"true"` + + // Specifies the secret that you want to attach the resource-based policy to. + // You can specify either the ARN or the friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} + if s.ResourcePolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ResourcePolicy")) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PutResourcePolicyInput) SetBlockPublicPolicy(v bool) *PutResourcePolicyInput { + s.BlockPublicPolicy = &v + return s +} + +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *PutResourcePolicyInput) SetResourcePolicy(v string) *PutResourcePolicyInput { + s.ResourcePolicy = &v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *PutResourcePolicyInput) SetSecretId(v string) *PutResourcePolicyInput { + s.SecretId = &v + return s +} + +type PutResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret retrieved by the resource-based policy. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret that the retrieved by the resource-based + // policy. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *PutResourcePolicyOutput) SetARN(v string) *PutResourcePolicyOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *PutResourcePolicyOutput) SetName(v string) *PutResourcePolicyOutput { + s.Name = &v + return s +} + +type PutSecretValueInput struct { + _ struct{} `type:"structure"` + + // (Optional) Specifies a unique identifier for the new version of the secret. + // + // If you use the AWS CLI or one of the AWS SDK to call this operation, then + // you can leave this parameter empty. The CLI or SDK generates a random UUID + // for you and includes that in the request. If you don't use the SDK and instead + // generate a raw HTTP request to the Secrets Manager service endpoint, then + // you must generate a ClientRequestToken yourself for new versions and include + // that value in the request. + // + // This value helps ensure idempotency. Secrets Manager uses this value to prevent + // the accidental creation of duplicate versions if there are failures and retries + // during the Lambda rotation function's processing. We recommend that you generate + // a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) value + // to ensure uniqueness within the specified secret. + // + // * If the ClientRequestToken value isn't already associated with a version + // of the secret then a new version of the secret is created. + // + // * If a version with this value already exists and that version's SecretString + // or SecretBinary values are the same as those in the request then the request + // is ignored (the operation is idempotent). + // + // * If a version with this value already exists and the version of the SecretString + // and SecretBinary values are different from those in the request then the + // request fails because you cannot modify an existing secret version. You + // can only create new versions to store new secret values. + // + // This value becomes the VersionId of the new version. + ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` + + // (Optional) Specifies binary data that you want to encrypt and store in the + // new version of the secret. To use this parameter in the command-line tools, + // we recommend that you store your binary data in a file and then use the appropriate + // technique for your tool to pass the contents of the file as a parameter. + // Either SecretBinary or SecretString must have a value, but not both. They + // cannot both be empty. + // + // This parameter is not accessible if the secret using the Secrets Manager + // console. + // + // SecretBinary is automatically base64 encoded/decoded by the SDK. + SecretBinary []byte `type:"blob" sensitive:"true"` + + // Specifies the secret to which you want to add a new version. You can specify + // either the Amazon Resource Name (ARN) or the friendly name of the secret. + // The secret must already exist. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` + + // (Optional) Specifies text data that you want to encrypt and store in this + // new version of the secret. Either SecretString or SecretBinary must have + // a value, but not both. They cannot both be empty. + // + // If you create this secret by using the Secrets Manager console then Secrets + // Manager puts the protected secret text in only the SecretString parameter. + // The Secrets Manager console stores the information as a JSON structure of + // key/value pairs that the default Lambda rotation function knows how to parse. + // + // For storing multiple values, we recommend that you use a JSON text string + // argument and specify key/value pairs. For information on how to format a + // JSON parameter for the various command line tool environments, see Using + // JSON for Parameters (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) + // in the AWS CLI User Guide. + // + // For example: + // + // [{"username":"bob"},{"password":"abc123xyz456"}] + // + // If your command-line tool or SDK requires quotation marks around the parameter, + // you should use single quotes to avoid confusion with the double quotes required + // in the JSON text. + SecretString *string `type:"string" sensitive:"true"` + + // (Optional) Specifies a list of staging labels that are attached to this version + // of the secret. These staging labels are used to track the versions through + // the rotation process by the Lambda rotation function. + // + // A staging label must be unique to a single version of the secret. If you + // specify a staging label that's already associated with a different version + // of the same secret then that staging label is automatically removed from + // the other version and attached to this version. + // + // If you do not specify a value for VersionStages then Secrets Manager automatically + // moves the staging label AWSCURRENT to this new version. + VersionStages []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s PutSecretValueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSecretValueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutSecretValueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutSecretValueInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) + } + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + if s.VersionStages != nil && len(s.VersionStages) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionStages", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *PutSecretValueInput) SetClientRequestToken(v string) *PutSecretValueInput { + s.ClientRequestToken = &v + return s +} + +// SetSecretBinary sets the SecretBinary field's value. +func (s *PutSecretValueInput) SetSecretBinary(v []byte) *PutSecretValueInput { + s.SecretBinary = v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *PutSecretValueInput) SetSecretId(v string) *PutSecretValueInput { + s.SecretId = &v + return s +} + +// SetSecretString sets the SecretString field's value. +func (s *PutSecretValueInput) SetSecretString(v string) *PutSecretValueInput { + s.SecretString = &v + return s +} + +// SetVersionStages sets the VersionStages field's value. +func (s *PutSecretValueInput) SetVersionStages(v []*string) *PutSecretValueInput { + s.VersionStages = v + return s +} + +type PutSecretValueOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the secret for which you just created + // a version. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret for which you just created or updated a version. + Name *string `min:"1" type:"string"` + + // The unique identifier of the version of the secret you just created or updated. + VersionId *string `min:"32" type:"string"` + + // The list of staging labels that are currently attached to this version of + // the secret. Staging labels are used to track a version as it progresses through + // the secret rotation process. + VersionStages []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s PutSecretValueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutSecretValueOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *PutSecretValueOutput) SetARN(v string) *PutSecretValueOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *PutSecretValueOutput) SetName(v string) *PutSecretValueOutput { + s.Name = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutSecretValueOutput) SetVersionId(v string) *PutSecretValueOutput { + s.VersionId = &v + return s +} + +// SetVersionStages sets the VersionStages field's value. +func (s *PutSecretValueOutput) SetVersionStages(v []*string) *PutSecretValueOutput { + s.VersionStages = v + return s +} + +// A resource with the ID you requested already exists. +type ResourceExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceExistsException) GoString() string { + return s.String() +} + +func newErrorResourceExistsException(v protocol.ResponseMetadata) error { + return &ResourceExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceExistsException) Code() string { + return "ResourceExistsException" +} + +// Message returns the exception's message. +func (s *ResourceExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceExistsException) OrigErr() error { + return nil +} + +func (s *ResourceExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// We can't find the resource that you asked for. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RestoreSecretInput struct { + _ struct{} `type:"structure"` + + // Specifies the secret that you want to restore from a previously scheduled + // deletion. You can specify either the Amazon Resource Name (ARN) or the friendly + // name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreSecretInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreSecretInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreSecretInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecretId sets the SecretId field's value. +func (s *RestoreSecretInput) SetSecretId(v string) *RestoreSecretInput { + s.SecretId = &v + return s +} + +type RestoreSecretOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret that was restored. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret that was restored. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RestoreSecretOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreSecretOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *RestoreSecretOutput) SetARN(v string) *RestoreSecretOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *RestoreSecretOutput) SetName(v string) *RestoreSecretOutput { + s.Name = &v + return s +} + +type RotateSecretInput struct { + _ struct{} `type:"structure"` + + // (Optional) Specifies a unique identifier for the new version of the secret + // that helps ensure idempotency. + // + // If you use the AWS CLI or one of the AWS SDK to call this operation, then + // you can leave this parameter empty. The CLI or SDK generates a random UUID + // for you and includes that in the request for this parameter. If you don't + // use the SDK and instead generate a raw HTTP request to the Secrets Manager + // service endpoint, then you must generate a ClientRequestToken yourself for + // new versions and include that value in the request. + // + // You only need to specify your own value if you implement your own retry logic + // and want to ensure that a given secret is not created twice. We recommend + // that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) + // value to ensure uniqueness within the specified secret. + // + // Secrets Manager uses this value to prevent the accidental creation of duplicate + // versions if there are failures and retries during the function's processing. + // This value becomes the VersionId of the new version. + ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` + + // (Optional) Specifies the ARN of the Lambda function that can rotate the secret. + RotationLambdaARN *string `type:"string"` + + // A structure that defines the rotation configuration for this secret. + RotationRules *RotationRulesType `type:"structure"` + + // Specifies the secret that you want to rotate. You can specify either the + // Amazon Resource Name (ARN) or the friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RotateSecretInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotateSecretInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RotateSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RotateSecretInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) + } + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + if s.RotationRules != nil { + if err := s.RotationRules.Validate(); err != nil { + invalidParams.AddNested("RotationRules", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *RotateSecretInput) SetClientRequestToken(v string) *RotateSecretInput { + s.ClientRequestToken = &v + return s +} + +// SetRotationLambdaARN sets the RotationLambdaARN field's value. +func (s *RotateSecretInput) SetRotationLambdaARN(v string) *RotateSecretInput { + s.RotationLambdaARN = &v + return s +} + +// SetRotationRules sets the RotationRules field's value. +func (s *RotateSecretInput) SetRotationRules(v *RotationRulesType) *RotateSecretInput { + s.RotationRules = v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *RotateSecretInput) SetSecretId(v string) *RotateSecretInput { + s.SecretId = &v + return s +} + +type RotateSecretOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret. + Name *string `min:"1" type:"string"` + + // The ID of the new version of the secret created by the rotation started by + // this request. + VersionId *string `min:"32" type:"string"` +} + +// String returns the string representation +func (s RotateSecretOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotateSecretOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *RotateSecretOutput) SetARN(v string) *RotateSecretOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *RotateSecretOutput) SetName(v string) *RotateSecretOutput { + s.Name = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RotateSecretOutput) SetVersionId(v string) *RotateSecretOutput { + s.VersionId = &v + return s +} + +// A structure that defines the rotation configuration for the secret. +type RotationRulesType struct { + _ struct{} `type:"structure"` + + // Specifies the number of days between automatic scheduled rotations of the + // secret. + // + // Secrets Manager schedules the next rotation when the previous one is complete. + // Secrets Manager schedules the date by adding the rotation interval (number + // of days) to the actual date of the last rotation. The service chooses the + // hour within that 24-hour date window randomly. The minute is also chosen + // somewhat randomly, but weighted towards the top of the hour and influenced + // by a variety of factors that help distribute load. + AutomaticallyAfterDays *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s RotationRulesType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RotationRulesType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RotationRulesType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RotationRulesType"} + if s.AutomaticallyAfterDays != nil && *s.AutomaticallyAfterDays < 1 { + invalidParams.Add(request.NewErrParamMinValue("AutomaticallyAfterDays", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutomaticallyAfterDays sets the AutomaticallyAfterDays field's value. +func (s *RotationRulesType) SetAutomaticallyAfterDays(v int64) *RotationRulesType { + s.AutomaticallyAfterDays = &v + return s +} + +// A structure that contains the details about a secret. It does not include +// the encrypted SecretString and SecretBinary values. To get those values, +// use the GetSecretValue operation. +type SecretListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the secret. + // + // For more information about ARNs in Secrets Manager, see Policy Resources + // (https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_iam-permissions.html#iam-resources) + // in the AWS Secrets Manager User Guide. + ARN *string `min:"20" type:"string"` + + // The date and time when a secret was created. + CreatedDate *time.Time `type:"timestamp"` + + // The date and time the deletion of the secret occurred. Not present on active + // secrets. The secret can be recovered until the number of days in the recovery + // window has passed, as specified in the RecoveryWindowInDays parameter of + // the DeleteSecret operation. + DeletedDate *time.Time `type:"timestamp"` + + // The user-provided description of the secret. + Description *string `type:"string"` + + // The ARN or alias of the AWS KMS customer master key (CMK) used to encrypt + // the SecretString and SecretBinary fields in each version of the secret. If + // you don't provide a key, then Secrets Manager defaults to encrypting the + // secret fields with the default KMS CMK, the key named awssecretsmanager, + // for this account. + KmsKeyId *string `type:"string"` + + // The last date that this secret was accessed. This value is truncated to midnight + // of the date and therefore shows only the date, not the time. + LastAccessedDate *time.Time `type:"timestamp"` + + // The last date and time that this secret was modified in any way. + LastChangedDate *time.Time `type:"timestamp"` + + // The last date and time that the rotation process for this secret was invoked. + LastRotatedDate *time.Time `type:"timestamp"` + + // The friendly name of the secret. You can use forward slashes in the name + // to represent a path hierarchy. For example, /prod/databases/dbserver1 could + // represent the secret for a server named dbserver1 in the folder databases + // in the folder prod. + Name *string `min:"1" type:"string"` + + // Returns the name of the service that created the secret. + OwningService *string `min:"1" type:"string"` + + // Indicates whether automatic, scheduled rotation is enabled for this secret. + RotationEnabled *bool `type:"boolean"` + + // The ARN of an AWS Lambda function invoked by Secrets Manager to rotate and + // expire the secret either automatically per the schedule or manually by a + // call to RotateSecret. + RotationLambdaARN *string `type:"string"` + + // A structure that defines the rotation configuration for the secret. + RotationRules *RotationRulesType `type:"structure"` + + // A list of all of the currently assigned SecretVersionStage staging labels + // and the SecretVersionId attached to each one. Staging labels are used to + // keep track of the different versions during the rotation process. + // + // A version that does not have any SecretVersionStage is considered deprecated + // and subject to deletion. Such versions are not included in this list. + SecretVersionsToStages map[string][]*string `type:"map"` + + // The list of user-defined tags associated with the secret. To add tags to + // a secret, use TagResource. To remove tags, use UntagResource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s SecretListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecretListEntry) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *SecretListEntry) SetARN(v string) *SecretListEntry { + s.ARN = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *SecretListEntry) SetCreatedDate(v time.Time) *SecretListEntry { + s.CreatedDate = &v + return s +} + +// SetDeletedDate sets the DeletedDate field's value. +func (s *SecretListEntry) SetDeletedDate(v time.Time) *SecretListEntry { + s.DeletedDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *SecretListEntry) SetDescription(v string) *SecretListEntry { + s.Description = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *SecretListEntry) SetKmsKeyId(v string) *SecretListEntry { + s.KmsKeyId = &v + return s +} + +// SetLastAccessedDate sets the LastAccessedDate field's value. +func (s *SecretListEntry) SetLastAccessedDate(v time.Time) *SecretListEntry { + s.LastAccessedDate = &v + return s +} + +// SetLastChangedDate sets the LastChangedDate field's value. +func (s *SecretListEntry) SetLastChangedDate(v time.Time) *SecretListEntry { + s.LastChangedDate = &v + return s +} + +// SetLastRotatedDate sets the LastRotatedDate field's value. +func (s *SecretListEntry) SetLastRotatedDate(v time.Time) *SecretListEntry { + s.LastRotatedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *SecretListEntry) SetName(v string) *SecretListEntry { + s.Name = &v + return s +} + +// SetOwningService sets the OwningService field's value. +func (s *SecretListEntry) SetOwningService(v string) *SecretListEntry { + s.OwningService = &v + return s +} + +// SetRotationEnabled sets the RotationEnabled field's value. +func (s *SecretListEntry) SetRotationEnabled(v bool) *SecretListEntry { + s.RotationEnabled = &v + return s +} + +// SetRotationLambdaARN sets the RotationLambdaARN field's value. +func (s *SecretListEntry) SetRotationLambdaARN(v string) *SecretListEntry { + s.RotationLambdaARN = &v + return s +} + +// SetRotationRules sets the RotationRules field's value. +func (s *SecretListEntry) SetRotationRules(v *RotationRulesType) *SecretListEntry { + s.RotationRules = v + return s +} + +// SetSecretVersionsToStages sets the SecretVersionsToStages field's value. +func (s *SecretListEntry) SetSecretVersionsToStages(v map[string][]*string) *SecretListEntry { + s.SecretVersionsToStages = v + return s +} + +// SetTags sets the Tags field's value. +func (s *SecretListEntry) SetTags(v []*Tag) *SecretListEntry { + s.Tags = v + return s +} + +// A structure that contains information about one version of a secret. +type SecretVersionsListEntry struct { + _ struct{} `type:"structure"` + + // The date and time this version of the secret was created. + CreatedDate *time.Time `type:"timestamp"` + + // The date that this version of the secret was last accessed. Note that the + // resolution of this field is at the date level and does not include the time. + LastAccessedDate *time.Time `type:"timestamp"` + + // The unique version identifier of this version of the secret. + VersionId *string `min:"32" type:"string"` + + // An array of staging labels that are currently associated with this version + // of the secret. + VersionStages []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s SecretVersionsListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecretVersionsListEntry) GoString() string { + return s.String() +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *SecretVersionsListEntry) SetCreatedDate(v time.Time) *SecretVersionsListEntry { + s.CreatedDate = &v + return s +} + +// SetLastAccessedDate sets the LastAccessedDate field's value. +func (s *SecretVersionsListEntry) SetLastAccessedDate(v time.Time) *SecretVersionsListEntry { + s.LastAccessedDate = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *SecretVersionsListEntry) SetVersionId(v string) *SecretVersionsListEntry { + s.VersionId = &v + return s +} + +// SetVersionStages sets the VersionStages field's value. +func (s *SecretVersionsListEntry) SetVersionStages(v []*string) *SecretVersionsListEntry { + s.VersionStages = v + return s +} + +// A structure that contains information about a tag. +type Tag struct { + _ struct{} `type:"structure"` + + // The key identifier, or name, of the tag. + Key *string `min:"1" type:"string"` + + // The string value associated with the key of the tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The identifier for the secret that you want to attach tags to. You can specify + // either the Amazon Resource Name (ARN) or the friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` + + // The tags to attach to the secret. Each element in the list consists of a + // Key and a Value. + // + // This parameter to the API requires a JSON text string argument. For information + // on how to format a JSON parameter for the various command line tool environments, + // see Using JSON for Parameters (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) + // in the AWS CLI User Guide. For the AWS CLI, you can also use the syntax: + // --Tags Key="Key1",Value="Value1",Key="Key2",Value="Value2"[,…] + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecretId sets the SecretId field's value. +func (s *TagResourceInput) SetSecretId(v string) *TagResourceInput { + s.SecretId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The identifier for the secret that you want to remove tags from. You can + // specify either the Amazon Resource Name (ARN) or the friendly name of the + // secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` + + // A list of tag key names to remove from the secret. You don't specify the + // value. Both the key and its associated value are removed. + // + // This parameter to the API requires a JSON text string argument. For information + // on how to format a JSON parameter for the various command line tool environments, + // see Using JSON for Parameters (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) + // in the AWS CLI User Guide. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecretId sets the SecretId field's value. +func (s *UntagResourceInput) SetSecretId(v string) *UntagResourceInput { + s.SecretId = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateSecretInput struct { + _ struct{} `type:"structure"` + + // (Optional) If you want to add a new version to the secret, this parameter + // specifies a unique identifier for the new version that helps ensure idempotency. + // + // If you use the AWS CLI or one of the AWS SDK to call this operation, then + // you can leave this parameter empty. The CLI or SDK generates a random UUID + // for you and includes that in the request. If you don't use the SDK and instead + // generate a raw HTTP request to the Secrets Manager service endpoint, then + // you must generate a ClientRequestToken yourself for new versions and include + // that value in the request. + // + // You typically only need to interact with this value if you implement your + // own retry logic and want to ensure that a given secret is not created twice. + // We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) + // value to ensure uniqueness within the specified secret. + // + // Secrets Manager uses this value to prevent the accidental creation of duplicate + // versions if there are failures and retries during the Lambda rotation function's + // processing. + // + // * If the ClientRequestToken value isn't already associated with a version + // of the secret then a new version of the secret is created. + // + // * If a version with this value already exists and that version's SecretString + // and SecretBinary values are the same as those in the request then the + // request is ignored (the operation is idempotent). + // + // * If a version with this value already exists and that version's SecretString + // and SecretBinary values are different from the request then an error occurs + // because you cannot modify an existing secret value. + // + // This value becomes the VersionId of the new version. + ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` + + // (Optional) Specifies an updated user-provided description of the secret. + Description *string `type:"string"` + + // (Optional) Specifies an updated ARN or alias of the AWS KMS customer master + // key (CMK) to be used to encrypt the protected text in new versions of this + // secret. + // + // You can only use the account's default CMK to encrypt and decrypt if you + // call this operation using credentials from the same account that owns the + // secret. If the secret is in a different account, then you must create a custom + // CMK and provide the ARN of that CMK in this field. The user making the call + // must have permissions to both the secret and the CMK in their respective + // accounts. + KmsKeyId *string `type:"string"` + + // (Optional) Specifies updated binary data that you want to encrypt and store + // in the new version of the secret. To use this parameter in the command-line + // tools, we recommend that you store your binary data in a file and then use + // the appropriate technique for your tool to pass the contents of the file + // as a parameter. Either SecretBinary or SecretString must have a value, but + // not both. They cannot both be empty. + // + // This parameter is not accessible using the Secrets Manager console. + // + // SecretBinary is automatically base64 encoded/decoded by the SDK. + SecretBinary []byte `type:"blob" sensitive:"true"` + + // Specifies the secret that you want to modify or to which you want to add + // a new version. You can specify either the Amazon Resource Name (ARN) or the + // friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` + + // (Optional) Specifies updated text data that you want to encrypt and store + // in this new version of the secret. Either SecretBinary or SecretString must + // have a value, but not both. They cannot both be empty. + // + // If you create this secret by using the Secrets Manager console then Secrets + // Manager puts the protected secret text in only the SecretString parameter. + // The Secrets Manager console stores the information as a JSON structure of + // key/value pairs that the default Lambda rotation function knows how to parse. + // + // For storing multiple values, we recommend that you use a JSON text string + // argument and specify key/value pairs. For information on how to format a + // JSON parameter for the various command line tool environments, see Using + // JSON for Parameters (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) + // in the AWS CLI User Guide. For example: + // + // [{"username":"bob"},{"password":"abc123xyz456"}] + // + // If your command-line tool or SDK requires quotation marks around the parameter, + // you should use single quotes to avoid confusion with the double quotes required + // in the JSON text. You can also 'escape' the double quote character in the + // embedded JSON text by prefacing each with a backslash. For example, the following + // string is surrounded by double-quotes. All of the embedded double quotes + // are escaped: + // + // "[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]" + SecretString *string `type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s UpdateSecretInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecretInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSecretInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) + } + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *UpdateSecretInput) SetClientRequestToken(v string) *UpdateSecretInput { + s.ClientRequestToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateSecretInput) SetDescription(v string) *UpdateSecretInput { + s.Description = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *UpdateSecretInput) SetKmsKeyId(v string) *UpdateSecretInput { + s.KmsKeyId = &v + return s +} + +// SetSecretBinary sets the SecretBinary field's value. +func (s *UpdateSecretInput) SetSecretBinary(v []byte) *UpdateSecretInput { + s.SecretBinary = v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *UpdateSecretInput) SetSecretId(v string) *UpdateSecretInput { + s.SecretId = &v + return s +} + +// SetSecretString sets the SecretString field's value. +func (s *UpdateSecretInput) SetSecretString(v string) *UpdateSecretInput { + s.SecretString = &v + return s +} + +type UpdateSecretOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret that was updated. + // + // Secrets Manager automatically adds several random characters to the name + // at the end of the ARN when you initially create a secret. This affects only + // the ARN and not the actual friendly name. This ensures that if you create + // a new secret with the same name as an old secret that you previously deleted, + // then users with access to the old secret don't automatically get access to + // the new secret because the ARNs are different. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret that was updated. + Name *string `min:"1" type:"string"` + + // If a new version of the secret was created by this operation, then VersionId + // contains the unique identifier of the new version. + VersionId *string `min:"32" type:"string"` +} + +// String returns the string representation +func (s UpdateSecretOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecretOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *UpdateSecretOutput) SetARN(v string) *UpdateSecretOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateSecretOutput) SetName(v string) *UpdateSecretOutput { + s.Name = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *UpdateSecretOutput) SetVersionId(v string) *UpdateSecretOutput { + s.VersionId = &v + return s +} + +type UpdateSecretVersionStageInput struct { + _ struct{} `type:"structure"` + + // (Optional) The secret version ID that you want to add the staging label. + // If you want to remove a label from a version, then do not specify this parameter. + // + // If the staging label is already attached to a different version of the secret, + // then you must also specify the RemoveFromVersionId parameter. + MoveToVersionId *string `min:"32" type:"string"` + + // Specifies the secret version ID of the version that the staging label is + // to be removed from. If the staging label you are trying to attach to one + // version is already attached to a different version, then you must include + // this parameter and specify the version that the label is to be removed from. + // If the label is attached and you either do not specify this parameter, or + // the version ID does not match, then the operation fails. + RemoveFromVersionId *string `min:"32" type:"string"` + + // Specifies the secret with the version with the list of staging labels you + // want to modify. You can specify either the Amazon Resource Name (ARN) or + // the friendly name of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + // + // SecretId is a required field + SecretId *string `min:"1" type:"string" required:"true"` + + // The staging label to add to this version. + // + // VersionStage is a required field + VersionStage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSecretVersionStageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecretVersionStageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSecretVersionStageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSecretVersionStageInput"} + if s.MoveToVersionId != nil && len(*s.MoveToVersionId) < 32 { + invalidParams.Add(request.NewErrParamMinLen("MoveToVersionId", 32)) + } + if s.RemoveFromVersionId != nil && len(*s.RemoveFromVersionId) < 32 { + invalidParams.Add(request.NewErrParamMinLen("RemoveFromVersionId", 32)) + } + if s.SecretId == nil { + invalidParams.Add(request.NewErrParamRequired("SecretId")) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + if s.VersionStage == nil { + invalidParams.Add(request.NewErrParamRequired("VersionStage")) + } + if s.VersionStage != nil && len(*s.VersionStage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionStage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMoveToVersionId sets the MoveToVersionId field's value. +func (s *UpdateSecretVersionStageInput) SetMoveToVersionId(v string) *UpdateSecretVersionStageInput { + s.MoveToVersionId = &v + return s +} + +// SetRemoveFromVersionId sets the RemoveFromVersionId field's value. +func (s *UpdateSecretVersionStageInput) SetRemoveFromVersionId(v string) *UpdateSecretVersionStageInput { + s.RemoveFromVersionId = &v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *UpdateSecretVersionStageInput) SetSecretId(v string) *UpdateSecretVersionStageInput { + s.SecretId = &v + return s +} + +// SetVersionStage sets the VersionStage field's value. +func (s *UpdateSecretVersionStageInput) SetVersionStage(v string) *UpdateSecretVersionStageInput { + s.VersionStage = &v + return s +} + +type UpdateSecretVersionStageOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the secret with the modified staging label. + ARN *string `min:"20" type:"string"` + + // The friendly name of the secret with the modified staging label. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateSecretVersionStageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecretVersionStageOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *UpdateSecretVersionStageOutput) SetARN(v string) *UpdateSecretVersionStageOutput { + s.ARN = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateSecretVersionStageOutput) SetName(v string) *UpdateSecretVersionStageOutput { + s.Name = &v + return s +} + +type ValidateResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // Identifies the Resource Policy attached to the secret. + // + // ResourcePolicy is a required field + ResourcePolicy *string `min:"1" type:"string" required:"true"` + + // The identifier for the secret that you want to validate a resource policy. + // You can specify either the Amazon Resource Name (ARN) or the friendly name + // of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + SecretId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidateResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ValidateResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ValidateResourcePolicyInput"} + if s.ResourcePolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ResourcePolicy")) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *ValidateResourcePolicyInput) SetResourcePolicy(v string) *ValidateResourcePolicyInput { + s.ResourcePolicy = &v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *ValidateResourcePolicyInput) SetSecretId(v string) *ValidateResourcePolicyInput { + s.SecretId = &v + return s +} + +type ValidateResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // Returns a message stating that your Reource Policy passed validation. + PolicyValidationPassed *bool `type:"boolean"` + + // Returns an error message if your policy doesn't pass validatation. + ValidationErrors []*ValidationErrorsEntry `type:"list"` +} + +// String returns the string representation +func (s ValidateResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyValidationPassed sets the PolicyValidationPassed field's value. +func (s *ValidateResourcePolicyOutput) SetPolicyValidationPassed(v bool) *ValidateResourcePolicyOutput { + s.PolicyValidationPassed = &v + return s +} + +// SetValidationErrors sets the ValidationErrors field's value. +func (s *ValidateResourcePolicyOutput) SetValidationErrors(v []*ValidationErrorsEntry) *ValidateResourcePolicyOutput { + s.ValidationErrors = v + return s +} + +// Displays errors that occurred during validation of the resource policy. +type ValidationErrorsEntry struct { + _ struct{} `type:"structure"` + + // Checks the name of the policy. + CheckName *string `min:"1" type:"string"` + + // Displays error messages if validation encounters problems during validation + // of the resource policy. + ErrorMessage *string `type:"string"` +} + +// String returns the string representation +func (s ValidationErrorsEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationErrorsEntry) GoString() string { + return s.String() +} + +// SetCheckName sets the CheckName field's value. +func (s *ValidationErrorsEntry) SetCheckName(v string) *ValidationErrorsEntry { + s.CheckName = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ValidationErrorsEntry) SetErrorMessage(v string) *ValidationErrorsEntry { + s.ErrorMessage = &v + return s +} + +const ( + // FilterNameStringTypeDescription is a FilterNameStringType enum value + FilterNameStringTypeDescription = "description" + + // FilterNameStringTypeName is a FilterNameStringType enum value + FilterNameStringTypeName = "name" + + // FilterNameStringTypeTagKey is a FilterNameStringType enum value + FilterNameStringTypeTagKey = "tag-key" + + // FilterNameStringTypeTagValue is a FilterNameStringType enum value + FilterNameStringTypeTagValue = "tag-value" + + // FilterNameStringTypeAll is a FilterNameStringType enum value + FilterNameStringTypeAll = "all" +) + +// FilterNameStringType_Values returns all elements of the FilterNameStringType enum +func FilterNameStringType_Values() []string { + return []string{ + FilterNameStringTypeDescription, + FilterNameStringTypeName, + FilterNameStringTypeTagKey, + FilterNameStringTypeTagValue, + FilterNameStringTypeAll, + } +} + +const ( + // SortOrderTypeAsc is a SortOrderType enum value + SortOrderTypeAsc = "asc" + + // SortOrderTypeDesc is a SortOrderType enum value + SortOrderTypeDesc = "desc" +) + +// SortOrderType_Values returns all elements of the SortOrderType enum +func SortOrderType_Values() []string { + return []string{ + SortOrderTypeAsc, + SortOrderTypeDesc, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go new file mode 100644 index 00000000000..71cfce04145 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go @@ -0,0 +1,86 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package secretsmanager provides the client and types for making API +// requests to AWS Secrets Manager. +// +// AWS Secrets Manager provides a service to enable you to store, manage, and +// retrieve, secrets. +// +// This guide provides descriptions of the Secrets Manager API. For more information +// about using this service, see the AWS Secrets Manager User Guide (https://docs.aws.amazon.com/secretsmanager/latest/userguide/introduction.html). +// +// API Version +// +// This version of the Secrets Manager API Reference documents the Secrets Manager +// API version 2017-10-17. +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms such as Java, Ruby, .NET, iOS, and Android. The SDKs provide a +// convenient way to create programmatic access to AWS Secrets Manager. For +// example, the SDKs provide cryptographically signing requests, managing errors, +// and retrying requests automatically. For more information about the AWS SDKs, +// including downloading and installing them, see Tools for Amazon Web Services +// (http://aws.amazon.com/tools/). +// +// We recommend you use the AWS SDKs to make programmatic API calls to Secrets +// Manager. However, you also can use the Secrets Manager HTTP Query API to +// make direct calls to the Secrets Manager web service. To learn more about +// the Secrets Manager HTTP Query API, see Making Query Requests (https://docs.aws.amazon.com/secretsmanager/latest/userguide/query-requests.html) +// in the AWS Secrets Manager User Guide. +// +// Secrets Manager API supports GET and POST requests for all actions, and doesn't +// require you to use GET for some actions and POST for others. However, GET +// requests are subject to the limitation size of a URL. Therefore, for operations +// that require larger sizes, use a POST request. +// +// Support and Feedback for AWS Secrets Manager +// +// We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com +// (mailto:awssecretsmanager-feedback@amazon.com), or post your feedback and +// questions in the AWS Secrets Manager Discussion Forum (http://forums.aws.amazon.com/forum.jspa?forumID=296). +// For more information about the AWS Discussion Forums, see Forums Help (http://forums.aws.amazon.com/help.jspa). +// +// How examples are presented +// +// The JSON that AWS Secrets Manager expects as your request parameters and +// the service returns as a response to HTTP query requests contain single, +// long strings without line breaks or white space formatting. The JSON shown +// in the examples displays the code formatted with both line breaks and white +// space to improve readability. When example input parameters can also cause +// long strings extending beyond the screen, you can insert line breaks to enhance +// readability. You should always submit the input as a single JSON text string. +// +// Logging API Requests +// +// AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API +// calls for your AWS account and delivers log files to an Amazon S3 bucket. +// By using information that's collected by AWS CloudTrail, you can determine +// the requests successfully made to Secrets Manager, who made the request, +// when it was made, and so on. For more about AWS Secrets Manager and support +// for AWS CloudTrail, see Logging AWS Secrets Manager Events with AWS CloudTrail +// (http://docs.aws.amazon.com/secretsmanager/latest/userguide/monitoring.html#monitoring_cloudtrail) +// in the AWS Secrets Manager User Guide. To learn more about CloudTrail, including +// enabling it and find your log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17 for more information on this service. +// +// See secretsmanager package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/secretsmanager/ +// +// Using the Client +// +// To contact AWS Secrets Manager with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Secrets Manager client SecretsManager for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/secretsmanager/#New +package secretsmanager diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/errors.go new file mode 100644 index 00000000000..ab90196bb65 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/errors.go @@ -0,0 +1,112 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package secretsmanager + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeDecryptionFailure for service response error code + // "DecryptionFailure". + // + // Secrets Manager can't decrypt the protected secret text using the provided + // KMS key. + ErrCodeDecryptionFailure = "DecryptionFailure" + + // ErrCodeEncryptionFailure for service response error code + // "EncryptionFailure". + // + // Secrets Manager can't encrypt the protected secret text using the provided + // KMS key. Check that the customer master key (CMK) is available, enabled, + // and not in an invalid state. For more information, see How Key State Affects + // Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). + ErrCodeEncryptionFailure = "EncryptionFailure" + + // ErrCodeInternalServiceError for service response error code + // "InternalServiceError". + // + // An error occurred on the server side. + ErrCodeInternalServiceError = "InternalServiceError" + + // ErrCodeInvalidNextTokenException for service response error code + // "InvalidNextTokenException". + // + // You provided an invalid NextToken value. + ErrCodeInvalidNextTokenException = "InvalidNextTokenException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // You provided an invalid value for a parameter. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // You provided a parameter value that is not valid for the current state of + // the resource. + // + // Possible causes: + // + // * You tried to perform the operation on a secret that's currently marked + // deleted. + // + // * You tried to enable rotation on a secret that doesn't already have a + // Lambda function ARN configured and you didn't include such an ARN as a + // parameter in this call. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The request failed because it would exceed one of the Secrets Manager internal + // limits. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocumentException". + // + // The policy document that you provided isn't valid. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocumentException" + + // ErrCodePreconditionNotMetException for service response error code + // "PreconditionNotMetException". + // + // The request failed because you did not complete all the prerequisite steps. + ErrCodePreconditionNotMetException = "PreconditionNotMetException" + + // ErrCodePublicPolicyException for service response error code + // "PublicPolicyException". + // + // The resource policy did not prevent broad access to the secret. + ErrCodePublicPolicyException = "PublicPolicyException" + + // ErrCodeResourceExistsException for service response error code + // "ResourceExistsException". + // + // A resource with the ID you requested already exists. + ErrCodeResourceExistsException = "ResourceExistsException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // We can't find the resource that you asked for. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "DecryptionFailure": newErrorDecryptionFailure, + "EncryptionFailure": newErrorEncryptionFailure, + "InternalServiceError": newErrorInternalServiceError, + "InvalidNextTokenException": newErrorInvalidNextTokenException, + "InvalidParameterException": newErrorInvalidParameterException, + "InvalidRequestException": newErrorInvalidRequestException, + "LimitExceededException": newErrorLimitExceededException, + "MalformedPolicyDocumentException": newErrorMalformedPolicyDocumentException, + "PreconditionNotMetException": newErrorPreconditionNotMetException, + "PublicPolicyException": newErrorPublicPolicyException, + "ResourceExistsException": newErrorResourceExistsException, + "ResourceNotFoundException": newErrorResourceNotFoundException, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface/interface.go b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface/interface.go new file mode 100644 index 00000000000..fe386bd5820 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface/interface.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package secretsmanageriface provides an interface to enable mocking the AWS Secrets Manager service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package secretsmanageriface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/secretsmanager" +) + +// SecretsManagerAPI provides an interface to enable mocking the +// secretsmanager.SecretsManager service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Secrets Manager. +// func myFunc(svc secretsmanageriface.SecretsManagerAPI) bool { +// // Make svc.CancelRotateSecret request +// } +// +// func main() { +// sess := session.New() +// svc := secretsmanager.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSecretsManagerClient struct { +// secretsmanageriface.SecretsManagerAPI +// } +// func (m *mockSecretsManagerClient) CancelRotateSecret(input *secretsmanager.CancelRotateSecretInput) (*secretsmanager.CancelRotateSecretOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSecretsManagerClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type SecretsManagerAPI interface { + CancelRotateSecret(*secretsmanager.CancelRotateSecretInput) (*secretsmanager.CancelRotateSecretOutput, error) + CancelRotateSecretWithContext(aws.Context, *secretsmanager.CancelRotateSecretInput, ...request.Option) (*secretsmanager.CancelRotateSecretOutput, error) + CancelRotateSecretRequest(*secretsmanager.CancelRotateSecretInput) (*request.Request, *secretsmanager.CancelRotateSecretOutput) + + CreateSecret(*secretsmanager.CreateSecretInput) (*secretsmanager.CreateSecretOutput, error) + CreateSecretWithContext(aws.Context, *secretsmanager.CreateSecretInput, ...request.Option) (*secretsmanager.CreateSecretOutput, error) + CreateSecretRequest(*secretsmanager.CreateSecretInput) (*request.Request, *secretsmanager.CreateSecretOutput) + + DeleteResourcePolicy(*secretsmanager.DeleteResourcePolicyInput) (*secretsmanager.DeleteResourcePolicyOutput, error) + DeleteResourcePolicyWithContext(aws.Context, *secretsmanager.DeleteResourcePolicyInput, ...request.Option) (*secretsmanager.DeleteResourcePolicyOutput, error) + DeleteResourcePolicyRequest(*secretsmanager.DeleteResourcePolicyInput) (*request.Request, *secretsmanager.DeleteResourcePolicyOutput) + + DeleteSecret(*secretsmanager.DeleteSecretInput) (*secretsmanager.DeleteSecretOutput, error) + DeleteSecretWithContext(aws.Context, *secretsmanager.DeleteSecretInput, ...request.Option) (*secretsmanager.DeleteSecretOutput, error) + DeleteSecretRequest(*secretsmanager.DeleteSecretInput) (*request.Request, *secretsmanager.DeleteSecretOutput) + + DescribeSecret(*secretsmanager.DescribeSecretInput) (*secretsmanager.DescribeSecretOutput, error) + DescribeSecretWithContext(aws.Context, *secretsmanager.DescribeSecretInput, ...request.Option) (*secretsmanager.DescribeSecretOutput, error) + DescribeSecretRequest(*secretsmanager.DescribeSecretInput) (*request.Request, *secretsmanager.DescribeSecretOutput) + + GetRandomPassword(*secretsmanager.GetRandomPasswordInput) (*secretsmanager.GetRandomPasswordOutput, error) + GetRandomPasswordWithContext(aws.Context, *secretsmanager.GetRandomPasswordInput, ...request.Option) (*secretsmanager.GetRandomPasswordOutput, error) + GetRandomPasswordRequest(*secretsmanager.GetRandomPasswordInput) (*request.Request, *secretsmanager.GetRandomPasswordOutput) + + GetResourcePolicy(*secretsmanager.GetResourcePolicyInput) (*secretsmanager.GetResourcePolicyOutput, error) + GetResourcePolicyWithContext(aws.Context, *secretsmanager.GetResourcePolicyInput, ...request.Option) (*secretsmanager.GetResourcePolicyOutput, error) + GetResourcePolicyRequest(*secretsmanager.GetResourcePolicyInput) (*request.Request, *secretsmanager.GetResourcePolicyOutput) + + GetSecretValue(*secretsmanager.GetSecretValueInput) (*secretsmanager.GetSecretValueOutput, error) + GetSecretValueWithContext(aws.Context, *secretsmanager.GetSecretValueInput, ...request.Option) (*secretsmanager.GetSecretValueOutput, error) + GetSecretValueRequest(*secretsmanager.GetSecretValueInput) (*request.Request, *secretsmanager.GetSecretValueOutput) + + ListSecretVersionIds(*secretsmanager.ListSecretVersionIdsInput) (*secretsmanager.ListSecretVersionIdsOutput, error) + ListSecretVersionIdsWithContext(aws.Context, *secretsmanager.ListSecretVersionIdsInput, ...request.Option) (*secretsmanager.ListSecretVersionIdsOutput, error) + ListSecretVersionIdsRequest(*secretsmanager.ListSecretVersionIdsInput) (*request.Request, *secretsmanager.ListSecretVersionIdsOutput) + + ListSecretVersionIdsPages(*secretsmanager.ListSecretVersionIdsInput, func(*secretsmanager.ListSecretVersionIdsOutput, bool) bool) error + ListSecretVersionIdsPagesWithContext(aws.Context, *secretsmanager.ListSecretVersionIdsInput, func(*secretsmanager.ListSecretVersionIdsOutput, bool) bool, ...request.Option) error + + ListSecrets(*secretsmanager.ListSecretsInput) (*secretsmanager.ListSecretsOutput, error) + ListSecretsWithContext(aws.Context, *secretsmanager.ListSecretsInput, ...request.Option) (*secretsmanager.ListSecretsOutput, error) + ListSecretsRequest(*secretsmanager.ListSecretsInput) (*request.Request, *secretsmanager.ListSecretsOutput) + + ListSecretsPages(*secretsmanager.ListSecretsInput, func(*secretsmanager.ListSecretsOutput, bool) bool) error + ListSecretsPagesWithContext(aws.Context, *secretsmanager.ListSecretsInput, func(*secretsmanager.ListSecretsOutput, bool) bool, ...request.Option) error + + PutResourcePolicy(*secretsmanager.PutResourcePolicyInput) (*secretsmanager.PutResourcePolicyOutput, error) + PutResourcePolicyWithContext(aws.Context, *secretsmanager.PutResourcePolicyInput, ...request.Option) (*secretsmanager.PutResourcePolicyOutput, error) + PutResourcePolicyRequest(*secretsmanager.PutResourcePolicyInput) (*request.Request, *secretsmanager.PutResourcePolicyOutput) + + PutSecretValue(*secretsmanager.PutSecretValueInput) (*secretsmanager.PutSecretValueOutput, error) + PutSecretValueWithContext(aws.Context, *secretsmanager.PutSecretValueInput, ...request.Option) (*secretsmanager.PutSecretValueOutput, error) + PutSecretValueRequest(*secretsmanager.PutSecretValueInput) (*request.Request, *secretsmanager.PutSecretValueOutput) + + RestoreSecret(*secretsmanager.RestoreSecretInput) (*secretsmanager.RestoreSecretOutput, error) + RestoreSecretWithContext(aws.Context, *secretsmanager.RestoreSecretInput, ...request.Option) (*secretsmanager.RestoreSecretOutput, error) + RestoreSecretRequest(*secretsmanager.RestoreSecretInput) (*request.Request, *secretsmanager.RestoreSecretOutput) + + RotateSecret(*secretsmanager.RotateSecretInput) (*secretsmanager.RotateSecretOutput, error) + RotateSecretWithContext(aws.Context, *secretsmanager.RotateSecretInput, ...request.Option) (*secretsmanager.RotateSecretOutput, error) + RotateSecretRequest(*secretsmanager.RotateSecretInput) (*request.Request, *secretsmanager.RotateSecretOutput) + + TagResource(*secretsmanager.TagResourceInput) (*secretsmanager.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *secretsmanager.TagResourceInput, ...request.Option) (*secretsmanager.TagResourceOutput, error) + TagResourceRequest(*secretsmanager.TagResourceInput) (*request.Request, *secretsmanager.TagResourceOutput) + + UntagResource(*secretsmanager.UntagResourceInput) (*secretsmanager.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *secretsmanager.UntagResourceInput, ...request.Option) (*secretsmanager.UntagResourceOutput, error) + UntagResourceRequest(*secretsmanager.UntagResourceInput) (*request.Request, *secretsmanager.UntagResourceOutput) + + UpdateSecret(*secretsmanager.UpdateSecretInput) (*secretsmanager.UpdateSecretOutput, error) + UpdateSecretWithContext(aws.Context, *secretsmanager.UpdateSecretInput, ...request.Option) (*secretsmanager.UpdateSecretOutput, error) + UpdateSecretRequest(*secretsmanager.UpdateSecretInput) (*request.Request, *secretsmanager.UpdateSecretOutput) + + UpdateSecretVersionStage(*secretsmanager.UpdateSecretVersionStageInput) (*secretsmanager.UpdateSecretVersionStageOutput, error) + UpdateSecretVersionStageWithContext(aws.Context, *secretsmanager.UpdateSecretVersionStageInput, ...request.Option) (*secretsmanager.UpdateSecretVersionStageOutput, error) + UpdateSecretVersionStageRequest(*secretsmanager.UpdateSecretVersionStageInput) (*request.Request, *secretsmanager.UpdateSecretVersionStageOutput) + + ValidateResourcePolicy(*secretsmanager.ValidateResourcePolicyInput) (*secretsmanager.ValidateResourcePolicyOutput, error) + ValidateResourcePolicyWithContext(aws.Context, *secretsmanager.ValidateResourcePolicyInput, ...request.Option) (*secretsmanager.ValidateResourcePolicyOutput, error) + ValidateResourcePolicyRequest(*secretsmanager.ValidateResourcePolicyInput) (*request.Request, *secretsmanager.ValidateResourcePolicyOutput) +} + +var _ SecretsManagerAPI = (*secretsmanager.SecretsManager)(nil) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go new file mode 100644 index 00000000000..abb0bee7ac5 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package secretsmanager + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// SecretsManager provides the API operation methods for making requests to +// AWS Secrets Manager. See this package's package overview docs +// for details on the service. +// +// SecretsManager methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SecretsManager struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "secretsmanager" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "Secrets Manager" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SecretsManager client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a SecretsManager client from just a session. +// svc := secretsmanager.New(mySession) +// +// // Create a SecretsManager client with additional configuration +// svc := secretsmanager.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SecretsManager { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "secretsmanager" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SecretsManager { + svc := &SecretsManager{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2017-10-17", + JSONVersion: "1.1", + TargetPrefix: "secretsmanager", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SecretsManager operation and runs any +// custom request initialization. +func (c *SecretsManager) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go new file mode 100644 index 00000000000..538e40395c4 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go @@ -0,0 +1,49345 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssm + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddTagsToResource for more information on using the AddTagsToResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/AddTagsToResource +func (c *SSM) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + output = &AddTagsToResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AddTagsToResource API operation for Amazon Simple Systems Manager (SSM). +// +// Adds or overwrites one or more tags for the specified resource. Tags are +// metadata that you can assign to your documents, managed instances, maintenance +// windows, Parameter Store parameters, and patch baselines. Tags enable you +// to categorize your resources in different ways, for example, by purpose, +// owner, or environment. Each tag consists of a key and an optional value, +// both of which you define. For example, you could define a set of tags for +// your account's managed instances that helps you track each instance's owner +// and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. +// Or Key=Stack and Value=Production, Pre-Production, or Test. +// +// Each resource can have a maximum of 50 tags. +// +// We recommend that you devise a set of tag keys that meets your needs for +// each resource type. Using a consistent set of tag keys makes it easier for +// you to manage your resources. You can search and filter the resources based +// on the tags you add. Tags don't have any semantic meaning to and are interpreted +// strictly as a string of characters. +// +// For more information about using tags with EC2 instances, see Tagging your +// Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// in the Amazon EC2 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation AddTagsToResource for usage and error information. +// +// Returned Error Types: +// * InvalidResourceType +// The resource type is not valid. For example, if you are attempting to tag +// an instance, the instance must be a registered, managed instance. +// +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// * TooManyTagsError +// The Targets parameter includes too many tags. Remove one or more tags and +// try the command again. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/AddTagsToResource +func (c *SSM) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + return out, req.Send() +} + +// AddTagsToResourceWithContext is the same as AddTagsToResource with the addition of +// the ability to pass a context and additional request options. +// +// See AddTagsToResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) AddTagsToResourceWithContext(ctx aws.Context, input *AddTagsToResourceInput, opts ...request.Option) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelCommand = "CancelCommand" + +// CancelCommandRequest generates a "aws/request.Request" representing the +// client's request for the CancelCommand operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelCommand for more information on using the CancelCommand +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelCommandRequest method. +// req, resp := client.CancelCommandRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CancelCommand +func (c *SSM) CancelCommandRequest(input *CancelCommandInput) (req *request.Request, output *CancelCommandOutput) { + op := &request.Operation{ + Name: opCancelCommand, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelCommandInput{} + } + + output = &CancelCommandOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CancelCommand API operation for Amazon Simple Systems Manager (SSM). +// +// Attempts to cancel the command specified by the Command ID. There is no guarantee +// that the command will be terminated and the underlying process stopped. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CancelCommand for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidCommandId +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * DuplicateInstanceId +// You cannot specify an instance ID in more than one association. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CancelCommand +func (c *SSM) CancelCommand(input *CancelCommandInput) (*CancelCommandOutput, error) { + req, out := c.CancelCommandRequest(input) + return out, req.Send() +} + +// CancelCommandWithContext is the same as CancelCommand with the addition of +// the ability to pass a context and additional request options. +// +// See CancelCommand for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CancelCommandWithContext(ctx aws.Context, input *CancelCommandInput, opts ...request.Option) (*CancelCommandOutput, error) { + req, out := c.CancelCommandRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCancelMaintenanceWindowExecution = "CancelMaintenanceWindowExecution" + +// CancelMaintenanceWindowExecutionRequest generates a "aws/request.Request" representing the +// client's request for the CancelMaintenanceWindowExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelMaintenanceWindowExecution for more information on using the CancelMaintenanceWindowExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelMaintenanceWindowExecutionRequest method. +// req, resp := client.CancelMaintenanceWindowExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CancelMaintenanceWindowExecution +func (c *SSM) CancelMaintenanceWindowExecutionRequest(input *CancelMaintenanceWindowExecutionInput) (req *request.Request, output *CancelMaintenanceWindowExecutionOutput) { + op := &request.Operation{ + Name: opCancelMaintenanceWindowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelMaintenanceWindowExecutionInput{} + } + + output = &CancelMaintenanceWindowExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelMaintenanceWindowExecution API operation for Amazon Simple Systems Manager (SSM). +// +// Stops a maintenance window execution that is already in progress and cancels +// any tasks in the window that have not already starting running. (Tasks already +// in progress will continue to completion.) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CancelMaintenanceWindowExecution for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CancelMaintenanceWindowExecution +func (c *SSM) CancelMaintenanceWindowExecution(input *CancelMaintenanceWindowExecutionInput) (*CancelMaintenanceWindowExecutionOutput, error) { + req, out := c.CancelMaintenanceWindowExecutionRequest(input) + return out, req.Send() +} + +// CancelMaintenanceWindowExecutionWithContext is the same as CancelMaintenanceWindowExecution with the addition of +// the ability to pass a context and additional request options. +// +// See CancelMaintenanceWindowExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CancelMaintenanceWindowExecutionWithContext(ctx aws.Context, input *CancelMaintenanceWindowExecutionInput, opts ...request.Option) (*CancelMaintenanceWindowExecutionOutput, error) { + req, out := c.CancelMaintenanceWindowExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateActivation = "CreateActivation" + +// CreateActivationRequest generates a "aws/request.Request" representing the +// client's request for the CreateActivation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateActivation for more information on using the CreateActivation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateActivationRequest method. +// req, resp := client.CreateActivationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateActivation +func (c *SSM) CreateActivationRequest(input *CreateActivationInput) (req *request.Request, output *CreateActivationOutput) { + op := &request.Operation{ + Name: opCreateActivation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateActivationInput{} + } + + output = &CreateActivationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateActivation API operation for Amazon Simple Systems Manager (SSM). +// +// Generates an activation code and activation ID you can use to register your +// on-premises server or virtual machine (VM) with Systems Manager. Registering +// these machines with Systems Manager makes it possible to manage them using +// Systems Manager capabilities. You use the activation code and ID when installing +// SSM Agent on machines in your hybrid environment. For more information about +// requirements for managing on-premises instances and VMs using Systems Manager, +// see Setting up AWS Systems Manager for hybrid environments (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html) +// in the AWS Systems Manager User Guide. +// +// On-premises servers or VMs that are registered with Systems Manager and EC2 +// instances that you manage with Systems Manager are all called managed instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreateActivation for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateActivation +func (c *SSM) CreateActivation(input *CreateActivationInput) (*CreateActivationOutput, error) { + req, out := c.CreateActivationRequest(input) + return out, req.Send() +} + +// CreateActivationWithContext is the same as CreateActivation with the addition of +// the ability to pass a context and additional request options. +// +// See CreateActivation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreateActivationWithContext(ctx aws.Context, input *CreateActivationInput, opts ...request.Option) (*CreateActivationOutput, error) { + req, out := c.CreateActivationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateAssociation = "CreateAssociation" + +// CreateAssociationRequest generates a "aws/request.Request" representing the +// client's request for the CreateAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateAssociation for more information on using the CreateAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAssociationRequest method. +// req, resp := client.CreateAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateAssociation +func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *request.Request, output *CreateAssociationOutput) { + op := &request.Operation{ + Name: opCreateAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssociationInput{} + } + + output = &CreateAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateAssociation API operation for Amazon Simple Systems Manager (SSM). +// +// A State Manager association defines the state that you want to maintain on +// your instances. For example, an association can specify that anti-virus software +// must be installed and running on your instances, or that certain ports must +// be closed. For static targets, the association specifies a schedule for when +// the configuration is reapplied. For dynamic targets, such as an AWS Resource +// Group or an AWS Autoscaling Group, State Manager applies the configuration +// when new instances are added to the group. The association also specifies +// actions to take when applying the configuration. For example, an association +// for anti-virus software might run once a day. If the software is not installed, +// then State Manager installs it. If the software is installed, but the service +// is not running, then the association might instruct State Manager to start +// the service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreateAssociation for usage and error information. +// +// Returned Error Types: +// * AssociationAlreadyExists +// The specified association already exists. +// +// * AssociationLimitExceeded +// You can have at most 2,000 active associations. +// +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentVersion +// The document version is not valid or does not exist. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * UnsupportedPlatformType +// The document does not support the platform type of the given instance ID(s). +// For example, you sent an document for a Windows instance to a Linux instance. +// +// * InvalidOutputLocation +// The output location is not valid or does not exist. +// +// * InvalidParameters +// You must specify values for all required parameters in the Systems Manager +// document. You can only supply values to parameters defined in the Systems +// Manager document. +// +// * InvalidTarget +// The target is not valid or does not exist. It might not be configured for +// Systems Manager or you might not have permission to perform the operation. +// +// * InvalidSchedule +// The schedule is invalid. Verify your cron or rate expression and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateAssociation +func (c *SSM) CreateAssociation(input *CreateAssociationInput) (*CreateAssociationOutput, error) { + req, out := c.CreateAssociationRequest(input) + return out, req.Send() +} + +// CreateAssociationWithContext is the same as CreateAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreateAssociationWithContext(ctx aws.Context, input *CreateAssociationInput, opts ...request.Option) (*CreateAssociationOutput, error) { + req, out := c.CreateAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateAssociationBatch = "CreateAssociationBatch" + +// CreateAssociationBatchRequest generates a "aws/request.Request" representing the +// client's request for the CreateAssociationBatch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateAssociationBatch for more information on using the CreateAssociationBatch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAssociationBatchRequest method. +// req, resp := client.CreateAssociationBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateAssociationBatch +func (c *SSM) CreateAssociationBatchRequest(input *CreateAssociationBatchInput) (req *request.Request, output *CreateAssociationBatchOutput) { + op := &request.Operation{ + Name: opCreateAssociationBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAssociationBatchInput{} + } + + output = &CreateAssociationBatchOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateAssociationBatch API operation for Amazon Simple Systems Manager (SSM). +// +// Associates the specified Systems Manager document with the specified instances +// or targets. +// +// When you associate a document with one or more instances using instance IDs +// or tags, SSM Agent running on the instance processes the document and configures +// the instance as specified. +// +// If you associate a document with an instance that already has an associated +// document, the system returns the AssociationAlreadyExists exception. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreateAssociationBatch for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentVersion +// The document version is not valid or does not exist. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidParameters +// You must specify values for all required parameters in the Systems Manager +// document. You can only supply values to parameters defined in the Systems +// Manager document. +// +// * DuplicateInstanceId +// You cannot specify an instance ID in more than one association. +// +// * AssociationLimitExceeded +// You can have at most 2,000 active associations. +// +// * UnsupportedPlatformType +// The document does not support the platform type of the given instance ID(s). +// For example, you sent an document for a Windows instance to a Linux instance. +// +// * InvalidOutputLocation +// The output location is not valid or does not exist. +// +// * InvalidTarget +// The target is not valid or does not exist. It might not be configured for +// Systems Manager or you might not have permission to perform the operation. +// +// * InvalidSchedule +// The schedule is invalid. Verify your cron or rate expression and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateAssociationBatch +func (c *SSM) CreateAssociationBatch(input *CreateAssociationBatchInput) (*CreateAssociationBatchOutput, error) { + req, out := c.CreateAssociationBatchRequest(input) + return out, req.Send() +} + +// CreateAssociationBatchWithContext is the same as CreateAssociationBatch with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAssociationBatch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreateAssociationBatchWithContext(ctx aws.Context, input *CreateAssociationBatchInput, opts ...request.Option) (*CreateAssociationBatchOutput, error) { + req, out := c.CreateAssociationBatchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDocument = "CreateDocument" + +// CreateDocumentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDocument operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDocument for more information on using the CreateDocument +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDocumentRequest method. +// req, resp := client.CreateDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateDocument +func (c *SSM) CreateDocumentRequest(input *CreateDocumentInput) (req *request.Request, output *CreateDocumentOutput) { + op := &request.Operation{ + Name: opCreateDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDocumentInput{} + } + + output = &CreateDocumentOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDocument API operation for Amazon Simple Systems Manager (SSM). +// +// Creates a Systems Manager (SSM) document. An SSM document defines the actions +// that Systems Manager performs on your managed instances. For more information +// about SSM documents, including information about supported schemas, features, +// and syntax, see AWS Systems Manager Documents (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-ssm-docs.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreateDocument for usage and error information. +// +// Returned Error Types: +// * DocumentAlreadyExists +// The specified document already exists. +// +// * MaxDocumentSizeExceeded +// The size limit of a document is 64 KB. +// +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocumentContent +// The content for the document is not valid. +// +// * DocumentLimitExceeded +// You can have at most 500 active Systems Manager documents. +// +// * InvalidDocumentSchemaVersion +// The version of the document schema is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateDocument +func (c *SSM) CreateDocument(input *CreateDocumentInput) (*CreateDocumentOutput, error) { + req, out := c.CreateDocumentRequest(input) + return out, req.Send() +} + +// CreateDocumentWithContext is the same as CreateDocument with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDocument for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreateDocumentWithContext(ctx aws.Context, input *CreateDocumentInput, opts ...request.Option) (*CreateDocumentOutput, error) { + req, out := c.CreateDocumentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMaintenanceWindow = "CreateMaintenanceWindow" + +// CreateMaintenanceWindowRequest generates a "aws/request.Request" representing the +// client's request for the CreateMaintenanceWindow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMaintenanceWindow for more information on using the CreateMaintenanceWindow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMaintenanceWindowRequest method. +// req, resp := client.CreateMaintenanceWindowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateMaintenanceWindow +func (c *SSM) CreateMaintenanceWindowRequest(input *CreateMaintenanceWindowInput) (req *request.Request, output *CreateMaintenanceWindowOutput) { + op := &request.Operation{ + Name: opCreateMaintenanceWindow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateMaintenanceWindowInput{} + } + + output = &CreateMaintenanceWindowOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). +// +// Creates a new maintenance window. +// +// The value you specify for Duration determines the specific end time for the +// maintenance window based on the time it begins. No maintenance window tasks +// are permitted to start after the resulting endtime minus the number of hours +// you specify for Cutoff. For example, if the maintenance window starts at +// 3 PM, the duration is three hours, and the value you specify for Cutoff is +// one hour, no maintenance window tasks can start after 5 PM. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreateMaintenanceWindow for usage and error information. +// +// Returned Error Types: +// * IdempotentParameterMismatch +// Error returned when an idempotent operation is retried and the parameters +// don't match the original call to the API with the same idempotency token. +// +// * ResourceLimitExceededException +// Error returned when the caller has exceeded the default resource quotas. +// For example, too many maintenance windows or patch baselines have been created. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateMaintenanceWindow +func (c *SSM) CreateMaintenanceWindow(input *CreateMaintenanceWindowInput) (*CreateMaintenanceWindowOutput, error) { + req, out := c.CreateMaintenanceWindowRequest(input) + return out, req.Send() +} + +// CreateMaintenanceWindowWithContext is the same as CreateMaintenanceWindow with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMaintenanceWindow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreateMaintenanceWindowWithContext(ctx aws.Context, input *CreateMaintenanceWindowInput, opts ...request.Option) (*CreateMaintenanceWindowOutput, error) { + req, out := c.CreateMaintenanceWindowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateOpsItem = "CreateOpsItem" + +// CreateOpsItemRequest generates a "aws/request.Request" representing the +// client's request for the CreateOpsItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateOpsItem for more information on using the CreateOpsItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateOpsItemRequest method. +// req, resp := client.CreateOpsItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateOpsItem +func (c *SSM) CreateOpsItemRequest(input *CreateOpsItemInput) (req *request.Request, output *CreateOpsItemOutput) { + op := &request.Operation{ + Name: opCreateOpsItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOpsItemInput{} + } + + output = &CreateOpsItemOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateOpsItem API operation for Amazon Simple Systems Manager (SSM). +// +// Creates a new OpsItem. You must have permission in AWS Identity and Access +// Management (IAM) to create a new OpsItem. For more information, see Getting +// started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// in the AWS Systems Manager User Guide. +// +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreateOpsItem for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * OpsItemAlreadyExistsException +// The OpsItem already exists. +// +// * OpsItemLimitExceededException +// The request caused OpsItems to exceed one or more quotas. For information +// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). +// +// * OpsItemInvalidParameterException +// A specified parameter argument isn't valid. Verify the available arguments +// and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateOpsItem +func (c *SSM) CreateOpsItem(input *CreateOpsItemInput) (*CreateOpsItemOutput, error) { + req, out := c.CreateOpsItemRequest(input) + return out, req.Send() +} + +// CreateOpsItemWithContext is the same as CreateOpsItem with the addition of +// the ability to pass a context and additional request options. +// +// See CreateOpsItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreateOpsItemWithContext(ctx aws.Context, input *CreateOpsItemInput, opts ...request.Option) (*CreateOpsItemOutput, error) { + req, out := c.CreateOpsItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePatchBaseline = "CreatePatchBaseline" + +// CreatePatchBaselineRequest generates a "aws/request.Request" representing the +// client's request for the CreatePatchBaseline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePatchBaseline for more information on using the CreatePatchBaseline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePatchBaselineRequest method. +// req, resp := client.CreatePatchBaselineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreatePatchBaseline +func (c *SSM) CreatePatchBaselineRequest(input *CreatePatchBaselineInput) (req *request.Request, output *CreatePatchBaselineOutput) { + op := &request.Operation{ + Name: opCreatePatchBaseline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePatchBaselineInput{} + } + + output = &CreatePatchBaselineOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePatchBaseline API operation for Amazon Simple Systems Manager (SSM). +// +// Creates a patch baseline. +// +// For information about valid key and value pairs in PatchFilters for each +// supported operating system type, see PatchFilter (http://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreatePatchBaseline for usage and error information. +// +// Returned Error Types: +// * IdempotentParameterMismatch +// Error returned when an idempotent operation is retried and the parameters +// don't match the original call to the API with the same idempotency token. +// +// * ResourceLimitExceededException +// Error returned when the caller has exceeded the default resource quotas. +// For example, too many maintenance windows or patch baselines have been created. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreatePatchBaseline +func (c *SSM) CreatePatchBaseline(input *CreatePatchBaselineInput) (*CreatePatchBaselineOutput, error) { + req, out := c.CreatePatchBaselineRequest(input) + return out, req.Send() +} + +// CreatePatchBaselineWithContext is the same as CreatePatchBaseline with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePatchBaseline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreatePatchBaselineWithContext(ctx aws.Context, input *CreatePatchBaselineInput, opts ...request.Option) (*CreatePatchBaselineOutput, error) { + req, out := c.CreatePatchBaselineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateResourceDataSync = "CreateResourceDataSync" + +// CreateResourceDataSyncRequest generates a "aws/request.Request" representing the +// client's request for the CreateResourceDataSync operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateResourceDataSync for more information on using the CreateResourceDataSync +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateResourceDataSyncRequest method. +// req, resp := client.CreateResourceDataSyncRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateResourceDataSync +func (c *SSM) CreateResourceDataSyncRequest(input *CreateResourceDataSyncInput) (req *request.Request, output *CreateResourceDataSyncOutput) { + op := &request.Operation{ + Name: opCreateResourceDataSync, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateResourceDataSyncInput{} + } + + output = &CreateResourceDataSyncOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateResourceDataSync API operation for Amazon Simple Systems Manager (SSM). +// +// A resource data sync helps you view data from multiple sources in a single +// location. Systems Manager offers two types of resource data sync: SyncToDestination +// and SyncFromSource. +// +// You can configure Systems Manager Inventory to use the SyncToDestination +// type to synchronize Inventory data from multiple AWS Regions to a single +// S3 bucket. For more information, see Configuring Resource Data Sync for Inventory +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync.html) +// in the AWS Systems Manager User Guide. +// +// You can configure Systems Manager Explorer to use the SyncFromSource type +// to synchronize operational work items (OpsItems) and operational data (OpsData) +// from multiple AWS Regions to a single S3 bucket. This type can synchronize +// OpsItems and OpsData from multiple AWS accounts and Regions or EntireOrganization +// by using AWS Organizations. For more information, see Setting up Systems +// Manager Explorer to display data from multiple accounts and Regions (https://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resource-data-sync.html) +// in the AWS Systems Manager User Guide. +// +// A resource data sync is an asynchronous operation that returns immediately. +// After a successful initial sync is completed, the system continuously syncs +// data. To check the status of a sync, use the ListResourceDataSync. +// +// By default, data is not encrypted in Amazon S3. We strongly recommend that +// you enable encryption in Amazon S3 to ensure secure data storage. We also +// recommend that you secure access to the Amazon S3 bucket by creating a restrictive +// bucket policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation CreateResourceDataSync for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * ResourceDataSyncCountExceededException +// You have exceeded the allowed maximum sync configurations. +// +// * ResourceDataSyncAlreadyExistsException +// A sync configuration with the same name already exists. +// +// * ResourceDataSyncInvalidConfigurationException +// The specified sync configuration is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateResourceDataSync +func (c *SSM) CreateResourceDataSync(input *CreateResourceDataSyncInput) (*CreateResourceDataSyncOutput, error) { + req, out := c.CreateResourceDataSyncRequest(input) + return out, req.Send() +} + +// CreateResourceDataSyncWithContext is the same as CreateResourceDataSync with the addition of +// the ability to pass a context and additional request options. +// +// See CreateResourceDataSync for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) CreateResourceDataSyncWithContext(ctx aws.Context, input *CreateResourceDataSyncInput, opts ...request.Option) (*CreateResourceDataSyncOutput, error) { + req, out := c.CreateResourceDataSyncRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteActivation = "DeleteActivation" + +// DeleteActivationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteActivation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteActivation for more information on using the DeleteActivation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteActivationRequest method. +// req, resp := client.DeleteActivationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteActivation +func (c *SSM) DeleteActivationRequest(input *DeleteActivationInput) (req *request.Request, output *DeleteActivationOutput) { + op := &request.Operation{ + Name: opDeleteActivation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteActivationInput{} + } + + output = &DeleteActivationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteActivation API operation for Amazon Simple Systems Manager (SSM). +// +// Deletes an activation. You are not required to delete an activation. If you +// delete an activation, you can no longer use it to register additional managed +// instances. Deleting an activation does not de-register managed instances. +// You must manually de-register managed instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeleteActivation for usage and error information. +// +// Returned Error Types: +// * InvalidActivationId +// The activation ID is not valid. Verify the you entered the correct ActivationId +// or ActivationCode and try again. +// +// * InvalidActivation +// The activation is not valid. The activation might have been deleted, or the +// ActivationId and the ActivationCode do not match. +// +// * InternalServerError +// An error occurred on the server side. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteActivation +func (c *SSM) DeleteActivation(input *DeleteActivationInput) (*DeleteActivationOutput, error) { + req, out := c.DeleteActivationRequest(input) + return out, req.Send() +} + +// DeleteActivationWithContext is the same as DeleteActivation with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteActivation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeleteActivationWithContext(ctx aws.Context, input *DeleteActivationInput, opts ...request.Option) (*DeleteActivationOutput, error) { + req, out := c.DeleteActivationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAssociation = "DeleteAssociation" + +// DeleteAssociationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAssociation for more information on using the DeleteAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAssociationRequest method. +// req, resp := client.DeleteAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteAssociation +func (c *SSM) DeleteAssociationRequest(input *DeleteAssociationInput) (req *request.Request, output *DeleteAssociationOutput) { + op := &request.Operation{ + Name: opDeleteAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAssociationInput{} + } + + output = &DeleteAssociationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAssociation API operation for Amazon Simple Systems Manager (SSM). +// +// Disassociates the specified Systems Manager document from the specified instance. +// +// When you disassociate a document from an instance, it does not change the +// configuration of the instance. To change the configuration state of an instance +// after you disassociate a document, you must create a new document with the +// desired configuration and associate it with the instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeleteAssociation for usage and error information. +// +// Returned Error Types: +// * AssociationDoesNotExist +// The specified association does not exist. +// +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteAssociation +func (c *SSM) DeleteAssociation(input *DeleteAssociationInput) (*DeleteAssociationOutput, error) { + req, out := c.DeleteAssociationRequest(input) + return out, req.Send() +} + +// DeleteAssociationWithContext is the same as DeleteAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeleteAssociationWithContext(ctx aws.Context, input *DeleteAssociationInput, opts ...request.Option) (*DeleteAssociationOutput, error) { + req, out := c.DeleteAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDocument = "DeleteDocument" + +// DeleteDocumentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDocument operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDocument for more information on using the DeleteDocument +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDocumentRequest method. +// req, resp := client.DeleteDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteDocument +func (c *SSM) DeleteDocumentRequest(input *DeleteDocumentInput) (req *request.Request, output *DeleteDocumentOutput) { + op := &request.Operation{ + Name: opDeleteDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDocumentInput{} + } + + output = &DeleteDocumentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDocument API operation for Amazon Simple Systems Manager (SSM). +// +// Deletes the Systems Manager document and all instance associations to the +// document. +// +// Before you delete the document, we recommend that you use DeleteAssociation +// to disassociate all instances that are associated with the document. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeleteDocument for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentOperation +// You attempted to delete a document while it is still shared. You must stop +// sharing the document before you can delete it. +// +// * AssociatedInstances +// You must disassociate a document from all instances before you can delete +// it. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteDocument +func (c *SSM) DeleteDocument(input *DeleteDocumentInput) (*DeleteDocumentOutput, error) { + req, out := c.DeleteDocumentRequest(input) + return out, req.Send() +} + +// DeleteDocumentWithContext is the same as DeleteDocument with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDocument for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeleteDocumentWithContext(ctx aws.Context, input *DeleteDocumentInput, opts ...request.Option) (*DeleteDocumentOutput, error) { + req, out := c.DeleteDocumentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteInventory = "DeleteInventory" + +// DeleteInventoryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInventory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInventory for more information on using the DeleteInventory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInventoryRequest method. +// req, resp := client.DeleteInventoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteInventory +func (c *SSM) DeleteInventoryRequest(input *DeleteInventoryInput) (req *request.Request, output *DeleteInventoryOutput) { + op := &request.Operation{ + Name: opDeleteInventory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInventoryInput{} + } + + output = &DeleteInventoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteInventory API operation for Amazon Simple Systems Manager (SSM). +// +// Delete a custom inventory type or the data associated with a custom Inventory +// type. Deleting a custom inventory type is also referred to as deleting a +// custom inventory schema. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeleteInventory for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidTypeNameException +// The parameter type name is not valid. +// +// * InvalidOptionException +// The delete inventory option specified is not valid. Verify the option and +// try again. +// +// * InvalidDeleteInventoryParametersException +// One or more of the parameters specified for the delete operation is not valid. +// Verify all parameters and try again. +// +// * InvalidInventoryRequestException +// The request is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteInventory +func (c *SSM) DeleteInventory(input *DeleteInventoryInput) (*DeleteInventoryOutput, error) { + req, out := c.DeleteInventoryRequest(input) + return out, req.Send() +} + +// DeleteInventoryWithContext is the same as DeleteInventory with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInventory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeleteInventoryWithContext(ctx aws.Context, input *DeleteInventoryInput, opts ...request.Option) (*DeleteInventoryOutput, error) { + req, out := c.DeleteInventoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteMaintenanceWindow = "DeleteMaintenanceWindow" + +// DeleteMaintenanceWindowRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMaintenanceWindow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteMaintenanceWindow for more information on using the DeleteMaintenanceWindow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteMaintenanceWindowRequest method. +// req, resp := client.DeleteMaintenanceWindowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteMaintenanceWindow +func (c *SSM) DeleteMaintenanceWindowRequest(input *DeleteMaintenanceWindowInput) (req *request.Request, output *DeleteMaintenanceWindowOutput) { + op := &request.Operation{ + Name: opDeleteMaintenanceWindow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMaintenanceWindowInput{} + } + + output = &DeleteMaintenanceWindowOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). +// +// Deletes a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeleteMaintenanceWindow for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteMaintenanceWindow +func (c *SSM) DeleteMaintenanceWindow(input *DeleteMaintenanceWindowInput) (*DeleteMaintenanceWindowOutput, error) { + req, out := c.DeleteMaintenanceWindowRequest(input) + return out, req.Send() +} + +// DeleteMaintenanceWindowWithContext is the same as DeleteMaintenanceWindow with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteMaintenanceWindow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeleteMaintenanceWindowWithContext(ctx aws.Context, input *DeleteMaintenanceWindowInput, opts ...request.Option) (*DeleteMaintenanceWindowOutput, error) { + req, out := c.DeleteMaintenanceWindowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteParameter = "DeleteParameter" + +// DeleteParameterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteParameter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteParameter for more information on using the DeleteParameter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteParameterRequest method. +// req, resp := client.DeleteParameterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteParameter +func (c *SSM) DeleteParameterRequest(input *DeleteParameterInput) (req *request.Request, output *DeleteParameterOutput) { + op := &request.Operation{ + Name: opDeleteParameter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteParameterInput{} + } + + output = &DeleteParameterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteParameter API operation for Amazon Simple Systems Manager (SSM). +// +// Delete a parameter from the system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeleteParameter for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * ParameterNotFound +// The parameter could not be found. Verify the name and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteParameter +func (c *SSM) DeleteParameter(input *DeleteParameterInput) (*DeleteParameterOutput, error) { + req, out := c.DeleteParameterRequest(input) + return out, req.Send() +} + +// DeleteParameterWithContext is the same as DeleteParameter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteParameter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeleteParameterWithContext(ctx aws.Context, input *DeleteParameterInput, opts ...request.Option) (*DeleteParameterOutput, error) { + req, out := c.DeleteParameterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteParameters = "DeleteParameters" + +// DeleteParametersRequest generates a "aws/request.Request" representing the +// client's request for the DeleteParameters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteParameters for more information on using the DeleteParameters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteParametersRequest method. +// req, resp := client.DeleteParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteParameters +func (c *SSM) DeleteParametersRequest(input *DeleteParametersInput) (req *request.Request, output *DeleteParametersOutput) { + op := &request.Operation{ + Name: opDeleteParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteParametersInput{} + } + + output = &DeleteParametersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteParameters API operation for Amazon Simple Systems Manager (SSM). +// +// Delete a list of parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeleteParameters for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteParameters +func (c *SSM) DeleteParameters(input *DeleteParametersInput) (*DeleteParametersOutput, error) { + req, out := c.DeleteParametersRequest(input) + return out, req.Send() +} + +// DeleteParametersWithContext is the same as DeleteParameters with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteParameters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeleteParametersWithContext(ctx aws.Context, input *DeleteParametersInput, opts ...request.Option) (*DeleteParametersOutput, error) { + req, out := c.DeleteParametersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePatchBaseline = "DeletePatchBaseline" + +// DeletePatchBaselineRequest generates a "aws/request.Request" representing the +// client's request for the DeletePatchBaseline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePatchBaseline for more information on using the DeletePatchBaseline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePatchBaselineRequest method. +// req, resp := client.DeletePatchBaselineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeletePatchBaseline +func (c *SSM) DeletePatchBaselineRequest(input *DeletePatchBaselineInput) (req *request.Request, output *DeletePatchBaselineOutput) { + op := &request.Operation{ + Name: opDeletePatchBaseline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePatchBaselineInput{} + } + + output = &DeletePatchBaselineOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeletePatchBaseline API operation for Amazon Simple Systems Manager (SSM). +// +// Deletes a patch baseline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeletePatchBaseline for usage and error information. +// +// Returned Error Types: +// * ResourceInUseException +// Error returned if an attempt is made to delete a patch baseline that is registered +// for a patch group. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeletePatchBaseline +func (c *SSM) DeletePatchBaseline(input *DeletePatchBaselineInput) (*DeletePatchBaselineOutput, error) { + req, out := c.DeletePatchBaselineRequest(input) + return out, req.Send() +} + +// DeletePatchBaselineWithContext is the same as DeletePatchBaseline with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePatchBaseline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeletePatchBaselineWithContext(ctx aws.Context, input *DeletePatchBaselineInput, opts ...request.Option) (*DeletePatchBaselineOutput, error) { + req, out := c.DeletePatchBaselineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteResourceDataSync = "DeleteResourceDataSync" + +// DeleteResourceDataSyncRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResourceDataSync operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteResourceDataSync for more information on using the DeleteResourceDataSync +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteResourceDataSyncRequest method. +// req, resp := client.DeleteResourceDataSyncRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteResourceDataSync +func (c *SSM) DeleteResourceDataSyncRequest(input *DeleteResourceDataSyncInput) (req *request.Request, output *DeleteResourceDataSyncOutput) { + op := &request.Operation{ + Name: opDeleteResourceDataSync, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteResourceDataSyncInput{} + } + + output = &DeleteResourceDataSyncOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteResourceDataSync API operation for Amazon Simple Systems Manager (SSM). +// +// Deletes a Resource Data Sync configuration. After the configuration is deleted, +// changes to data on managed instances are no longer synced to or from the +// target. Deleting a sync configuration does not delete data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeleteResourceDataSync for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * ResourceDataSyncNotFoundException +// The specified sync name was not found. +// +// * ResourceDataSyncInvalidConfigurationException +// The specified sync configuration is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteResourceDataSync +func (c *SSM) DeleteResourceDataSync(input *DeleteResourceDataSyncInput) (*DeleteResourceDataSyncOutput, error) { + req, out := c.DeleteResourceDataSyncRequest(input) + return out, req.Send() +} + +// DeleteResourceDataSyncWithContext is the same as DeleteResourceDataSync with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteResourceDataSync for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeleteResourceDataSyncWithContext(ctx aws.Context, input *DeleteResourceDataSyncInput, opts ...request.Option) (*DeleteResourceDataSyncOutput, error) { + req, out := c.DeleteResourceDataSyncRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterManagedInstance = "DeregisterManagedInstance" + +// DeregisterManagedInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterManagedInstance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterManagedInstance for more information on using the DeregisterManagedInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterManagedInstanceRequest method. +// req, resp := client.DeregisterManagedInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeregisterManagedInstance +func (c *SSM) DeregisterManagedInstanceRequest(input *DeregisterManagedInstanceInput) (req *request.Request, output *DeregisterManagedInstanceOutput) { + op := &request.Operation{ + Name: opDeregisterManagedInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterManagedInstanceInput{} + } + + output = &DeregisterManagedInstanceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeregisterManagedInstance API operation for Amazon Simple Systems Manager (SSM). +// +// Removes the server or virtual machine from the list of registered servers. +// You can reregister the instance again at any time. If you don't plan to use +// Run Command on the server, we suggest uninstalling SSM Agent first. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeregisterManagedInstance for usage and error information. +// +// Returned Error Types: +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeregisterManagedInstance +func (c *SSM) DeregisterManagedInstance(input *DeregisterManagedInstanceInput) (*DeregisterManagedInstanceOutput, error) { + req, out := c.DeregisterManagedInstanceRequest(input) + return out, req.Send() +} + +// DeregisterManagedInstanceWithContext is the same as DeregisterManagedInstance with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterManagedInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeregisterManagedInstanceWithContext(ctx aws.Context, input *DeregisterManagedInstanceInput, opts ...request.Option) (*DeregisterManagedInstanceOutput, error) { + req, out := c.DeregisterManagedInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterPatchBaselineForPatchGroup = "DeregisterPatchBaselineForPatchGroup" + +// DeregisterPatchBaselineForPatchGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterPatchBaselineForPatchGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterPatchBaselineForPatchGroup for more information on using the DeregisterPatchBaselineForPatchGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterPatchBaselineForPatchGroupRequest method. +// req, resp := client.DeregisterPatchBaselineForPatchGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeregisterPatchBaselineForPatchGroup +func (c *SSM) DeregisterPatchBaselineForPatchGroupRequest(input *DeregisterPatchBaselineForPatchGroupInput) (req *request.Request, output *DeregisterPatchBaselineForPatchGroupOutput) { + op := &request.Operation{ + Name: opDeregisterPatchBaselineForPatchGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterPatchBaselineForPatchGroupInput{} + } + + output = &DeregisterPatchBaselineForPatchGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterPatchBaselineForPatchGroup API operation for Amazon Simple Systems Manager (SSM). +// +// Removes a patch group from a patch baseline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeregisterPatchBaselineForPatchGroup for usage and error information. +// +// Returned Error Types: +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeregisterPatchBaselineForPatchGroup +func (c *SSM) DeregisterPatchBaselineForPatchGroup(input *DeregisterPatchBaselineForPatchGroupInput) (*DeregisterPatchBaselineForPatchGroupOutput, error) { + req, out := c.DeregisterPatchBaselineForPatchGroupRequest(input) + return out, req.Send() +} + +// DeregisterPatchBaselineForPatchGroupWithContext is the same as DeregisterPatchBaselineForPatchGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterPatchBaselineForPatchGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeregisterPatchBaselineForPatchGroupWithContext(ctx aws.Context, input *DeregisterPatchBaselineForPatchGroupInput, opts ...request.Option) (*DeregisterPatchBaselineForPatchGroupOutput, error) { + req, out := c.DeregisterPatchBaselineForPatchGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterTargetFromMaintenanceWindow = "DeregisterTargetFromMaintenanceWindow" + +// DeregisterTargetFromMaintenanceWindowRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterTargetFromMaintenanceWindow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterTargetFromMaintenanceWindow for more information on using the DeregisterTargetFromMaintenanceWindow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterTargetFromMaintenanceWindowRequest method. +// req, resp := client.DeregisterTargetFromMaintenanceWindowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeregisterTargetFromMaintenanceWindow +func (c *SSM) DeregisterTargetFromMaintenanceWindowRequest(input *DeregisterTargetFromMaintenanceWindowInput) (req *request.Request, output *DeregisterTargetFromMaintenanceWindowOutput) { + op := &request.Operation{ + Name: opDeregisterTargetFromMaintenanceWindow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTargetFromMaintenanceWindowInput{} + } + + output = &DeregisterTargetFromMaintenanceWindowOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterTargetFromMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). +// +// Removes a target from a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeregisterTargetFromMaintenanceWindow for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// * TargetInUseException +// You specified the Safe option for the DeregisterTargetFromMaintenanceWindow +// operation, but the target is still referenced in a task. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeregisterTargetFromMaintenanceWindow +func (c *SSM) DeregisterTargetFromMaintenanceWindow(input *DeregisterTargetFromMaintenanceWindowInput) (*DeregisterTargetFromMaintenanceWindowOutput, error) { + req, out := c.DeregisterTargetFromMaintenanceWindowRequest(input) + return out, req.Send() +} + +// DeregisterTargetFromMaintenanceWindowWithContext is the same as DeregisterTargetFromMaintenanceWindow with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterTargetFromMaintenanceWindow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeregisterTargetFromMaintenanceWindowWithContext(ctx aws.Context, input *DeregisterTargetFromMaintenanceWindowInput, opts ...request.Option) (*DeregisterTargetFromMaintenanceWindowOutput, error) { + req, out := c.DeregisterTargetFromMaintenanceWindowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterTaskFromMaintenanceWindow = "DeregisterTaskFromMaintenanceWindow" + +// DeregisterTaskFromMaintenanceWindowRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterTaskFromMaintenanceWindow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterTaskFromMaintenanceWindow for more information on using the DeregisterTaskFromMaintenanceWindow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterTaskFromMaintenanceWindowRequest method. +// req, resp := client.DeregisterTaskFromMaintenanceWindowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeregisterTaskFromMaintenanceWindow +func (c *SSM) DeregisterTaskFromMaintenanceWindowRequest(input *DeregisterTaskFromMaintenanceWindowInput) (req *request.Request, output *DeregisterTaskFromMaintenanceWindowOutput) { + op := &request.Operation{ + Name: opDeregisterTaskFromMaintenanceWindow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTaskFromMaintenanceWindowInput{} + } + + output = &DeregisterTaskFromMaintenanceWindowOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterTaskFromMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). +// +// Removes a task from a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DeregisterTaskFromMaintenanceWindow for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeregisterTaskFromMaintenanceWindow +func (c *SSM) DeregisterTaskFromMaintenanceWindow(input *DeregisterTaskFromMaintenanceWindowInput) (*DeregisterTaskFromMaintenanceWindowOutput, error) { + req, out := c.DeregisterTaskFromMaintenanceWindowRequest(input) + return out, req.Send() +} + +// DeregisterTaskFromMaintenanceWindowWithContext is the same as DeregisterTaskFromMaintenanceWindow with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterTaskFromMaintenanceWindow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DeregisterTaskFromMaintenanceWindowWithContext(ctx aws.Context, input *DeregisterTaskFromMaintenanceWindowInput, opts ...request.Option) (*DeregisterTaskFromMaintenanceWindowOutput, error) { + req, out := c.DeregisterTaskFromMaintenanceWindowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeActivations = "DescribeActivations" + +// DescribeActivationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeActivations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeActivations for more information on using the DescribeActivations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeActivationsRequest method. +// req, resp := client.DescribeActivationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeActivations +func (c *SSM) DescribeActivationsRequest(input *DescribeActivationsInput) (req *request.Request, output *DescribeActivationsOutput) { + op := &request.Operation{ + Name: opDescribeActivations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeActivationsInput{} + } + + output = &DescribeActivationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeActivations API operation for Amazon Simple Systems Manager (SSM). +// +// Describes details about the activation, such as the date and time the activation +// was created, its expiration date, the IAM role assigned to the instances +// in the activation, and the number of instances registered by using this activation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeActivations for usage and error information. +// +// Returned Error Types: +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeActivations +func (c *SSM) DescribeActivations(input *DescribeActivationsInput) (*DescribeActivationsOutput, error) { + req, out := c.DescribeActivationsRequest(input) + return out, req.Send() +} + +// DescribeActivationsWithContext is the same as DescribeActivations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeActivations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeActivationsWithContext(ctx aws.Context, input *DescribeActivationsInput, opts ...request.Option) (*DescribeActivationsOutput, error) { + req, out := c.DescribeActivationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeActivationsPages iterates over the pages of a DescribeActivations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeActivations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeActivations operation. +// pageNum := 0 +// err := client.DescribeActivationsPages(params, +// func(page *ssm.DescribeActivationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeActivationsPages(input *DescribeActivationsInput, fn func(*DescribeActivationsOutput, bool) bool) error { + return c.DescribeActivationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeActivationsPagesWithContext same as DescribeActivationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeActivationsPagesWithContext(ctx aws.Context, input *DescribeActivationsInput, fn func(*DescribeActivationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeActivationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeActivationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeActivationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeAssociation = "DescribeAssociation" + +// DescribeAssociationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAssociation for more information on using the DescribeAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAssociationRequest method. +// req, resp := client.DescribeAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAssociation +func (c *SSM) DescribeAssociationRequest(input *DescribeAssociationInput) (req *request.Request, output *DescribeAssociationOutput) { + op := &request.Operation{ + Name: opDescribeAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAssociationInput{} + } + + output = &DescribeAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAssociation API operation for Amazon Simple Systems Manager (SSM). +// +// Describes the association for the specified target or instance. If you created +// the association by using the Targets parameter, then you must retrieve the +// association by using the association ID. If you created the association by +// specifying an instance ID and a Systems Manager document, then you retrieve +// the association by specifying the document name and the instance ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeAssociation for usage and error information. +// +// Returned Error Types: +// * AssociationDoesNotExist +// The specified association does not exist. +// +// * InvalidAssociationVersion +// The version you specified is not valid. Use ListAssociationVersions to view +// all versions of an association according to the association ID. Or, use the +// $LATEST parameter to view the latest version of the association. +// +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAssociation +func (c *SSM) DescribeAssociation(input *DescribeAssociationInput) (*DescribeAssociationOutput, error) { + req, out := c.DescribeAssociationRequest(input) + return out, req.Send() +} + +// DescribeAssociationWithContext is the same as DescribeAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAssociationWithContext(ctx aws.Context, input *DescribeAssociationInput, opts ...request.Option) (*DescribeAssociationOutput, error) { + req, out := c.DescribeAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAssociationExecutionTargets = "DescribeAssociationExecutionTargets" + +// DescribeAssociationExecutionTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAssociationExecutionTargets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAssociationExecutionTargets for more information on using the DescribeAssociationExecutionTargets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAssociationExecutionTargetsRequest method. +// req, resp := client.DescribeAssociationExecutionTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAssociationExecutionTargets +func (c *SSM) DescribeAssociationExecutionTargetsRequest(input *DescribeAssociationExecutionTargetsInput) (req *request.Request, output *DescribeAssociationExecutionTargetsOutput) { + op := &request.Operation{ + Name: opDescribeAssociationExecutionTargets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAssociationExecutionTargetsInput{} + } + + output = &DescribeAssociationExecutionTargetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAssociationExecutionTargets API operation for Amazon Simple Systems Manager (SSM). +// +// Use this API action to view information about a specific execution of a specific +// association. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeAssociationExecutionTargets for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * AssociationDoesNotExist +// The specified association does not exist. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * AssociationExecutionDoesNotExist +// The specified execution ID does not exist. Verify the ID number and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAssociationExecutionTargets +func (c *SSM) DescribeAssociationExecutionTargets(input *DescribeAssociationExecutionTargetsInput) (*DescribeAssociationExecutionTargetsOutput, error) { + req, out := c.DescribeAssociationExecutionTargetsRequest(input) + return out, req.Send() +} + +// DescribeAssociationExecutionTargetsWithContext is the same as DescribeAssociationExecutionTargets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAssociationExecutionTargets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAssociationExecutionTargetsWithContext(ctx aws.Context, input *DescribeAssociationExecutionTargetsInput, opts ...request.Option) (*DescribeAssociationExecutionTargetsOutput, error) { + req, out := c.DescribeAssociationExecutionTargetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAssociationExecutionTargetsPages iterates over the pages of a DescribeAssociationExecutionTargets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAssociationExecutionTargets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAssociationExecutionTargets operation. +// pageNum := 0 +// err := client.DescribeAssociationExecutionTargetsPages(params, +// func(page *ssm.DescribeAssociationExecutionTargetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAssociationExecutionTargetsPages(input *DescribeAssociationExecutionTargetsInput, fn func(*DescribeAssociationExecutionTargetsOutput, bool) bool) error { + return c.DescribeAssociationExecutionTargetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAssociationExecutionTargetsPagesWithContext same as DescribeAssociationExecutionTargetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAssociationExecutionTargetsPagesWithContext(ctx aws.Context, input *DescribeAssociationExecutionTargetsInput, fn func(*DescribeAssociationExecutionTargetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAssociationExecutionTargetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAssociationExecutionTargetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAssociationExecutionTargetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeAssociationExecutions = "DescribeAssociationExecutions" + +// DescribeAssociationExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAssociationExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAssociationExecutions for more information on using the DescribeAssociationExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAssociationExecutionsRequest method. +// req, resp := client.DescribeAssociationExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAssociationExecutions +func (c *SSM) DescribeAssociationExecutionsRequest(input *DescribeAssociationExecutionsInput) (req *request.Request, output *DescribeAssociationExecutionsOutput) { + op := &request.Operation{ + Name: opDescribeAssociationExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAssociationExecutionsInput{} + } + + output = &DescribeAssociationExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAssociationExecutions API operation for Amazon Simple Systems Manager (SSM). +// +// Use this API action to view all executions for a specific association ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeAssociationExecutions for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * AssociationDoesNotExist +// The specified association does not exist. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAssociationExecutions +func (c *SSM) DescribeAssociationExecutions(input *DescribeAssociationExecutionsInput) (*DescribeAssociationExecutionsOutput, error) { + req, out := c.DescribeAssociationExecutionsRequest(input) + return out, req.Send() +} + +// DescribeAssociationExecutionsWithContext is the same as DescribeAssociationExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAssociationExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAssociationExecutionsWithContext(ctx aws.Context, input *DescribeAssociationExecutionsInput, opts ...request.Option) (*DescribeAssociationExecutionsOutput, error) { + req, out := c.DescribeAssociationExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAssociationExecutionsPages iterates over the pages of a DescribeAssociationExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAssociationExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAssociationExecutions operation. +// pageNum := 0 +// err := client.DescribeAssociationExecutionsPages(params, +// func(page *ssm.DescribeAssociationExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAssociationExecutionsPages(input *DescribeAssociationExecutionsInput, fn func(*DescribeAssociationExecutionsOutput, bool) bool) error { + return c.DescribeAssociationExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAssociationExecutionsPagesWithContext same as DescribeAssociationExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAssociationExecutionsPagesWithContext(ctx aws.Context, input *DescribeAssociationExecutionsInput, fn func(*DescribeAssociationExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAssociationExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAssociationExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAssociationExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeAutomationExecutions = "DescribeAutomationExecutions" + +// DescribeAutomationExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAutomationExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAutomationExecutions for more information on using the DescribeAutomationExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAutomationExecutionsRequest method. +// req, resp := client.DescribeAutomationExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAutomationExecutions +func (c *SSM) DescribeAutomationExecutionsRequest(input *DescribeAutomationExecutionsInput) (req *request.Request, output *DescribeAutomationExecutionsOutput) { + op := &request.Operation{ + Name: opDescribeAutomationExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAutomationExecutionsInput{} + } + + output = &DescribeAutomationExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAutomationExecutions API operation for Amazon Simple Systems Manager (SSM). +// +// Provides details about all active and terminated Automation executions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeAutomationExecutions for usage and error information. +// +// Returned Error Types: +// * InvalidFilterKey +// The specified key is not valid. +// +// * InvalidFilterValue +// The filter value is not valid. Verify the value and try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAutomationExecutions +func (c *SSM) DescribeAutomationExecutions(input *DescribeAutomationExecutionsInput) (*DescribeAutomationExecutionsOutput, error) { + req, out := c.DescribeAutomationExecutionsRequest(input) + return out, req.Send() +} + +// DescribeAutomationExecutionsWithContext is the same as DescribeAutomationExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAutomationExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAutomationExecutionsWithContext(ctx aws.Context, input *DescribeAutomationExecutionsInput, opts ...request.Option) (*DescribeAutomationExecutionsOutput, error) { + req, out := c.DescribeAutomationExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAutomationExecutionsPages iterates over the pages of a DescribeAutomationExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAutomationExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAutomationExecutions operation. +// pageNum := 0 +// err := client.DescribeAutomationExecutionsPages(params, +// func(page *ssm.DescribeAutomationExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAutomationExecutionsPages(input *DescribeAutomationExecutionsInput, fn func(*DescribeAutomationExecutionsOutput, bool) bool) error { + return c.DescribeAutomationExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAutomationExecutionsPagesWithContext same as DescribeAutomationExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAutomationExecutionsPagesWithContext(ctx aws.Context, input *DescribeAutomationExecutionsInput, fn func(*DescribeAutomationExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAutomationExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutomationExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAutomationExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeAutomationStepExecutions = "DescribeAutomationStepExecutions" + +// DescribeAutomationStepExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAutomationStepExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAutomationStepExecutions for more information on using the DescribeAutomationStepExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAutomationStepExecutionsRequest method. +// req, resp := client.DescribeAutomationStepExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAutomationStepExecutions +func (c *SSM) DescribeAutomationStepExecutionsRequest(input *DescribeAutomationStepExecutionsInput) (req *request.Request, output *DescribeAutomationStepExecutionsOutput) { + op := &request.Operation{ + Name: opDescribeAutomationStepExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAutomationStepExecutionsInput{} + } + + output = &DescribeAutomationStepExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAutomationStepExecutions API operation for Amazon Simple Systems Manager (SSM). +// +// Information about all active and terminated step executions in an Automation +// workflow. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeAutomationStepExecutions for usage and error information. +// +// Returned Error Types: +// * AutomationExecutionNotFoundException +// There is no automation execution information for the requested automation +// execution ID. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InvalidFilterKey +// The specified key is not valid. +// +// * InvalidFilterValue +// The filter value is not valid. Verify the value and try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAutomationStepExecutions +func (c *SSM) DescribeAutomationStepExecutions(input *DescribeAutomationStepExecutionsInput) (*DescribeAutomationStepExecutionsOutput, error) { + req, out := c.DescribeAutomationStepExecutionsRequest(input) + return out, req.Send() +} + +// DescribeAutomationStepExecutionsWithContext is the same as DescribeAutomationStepExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAutomationStepExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAutomationStepExecutionsWithContext(ctx aws.Context, input *DescribeAutomationStepExecutionsInput, opts ...request.Option) (*DescribeAutomationStepExecutionsOutput, error) { + req, out := c.DescribeAutomationStepExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAutomationStepExecutionsPages iterates over the pages of a DescribeAutomationStepExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAutomationStepExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAutomationStepExecutions operation. +// pageNum := 0 +// err := client.DescribeAutomationStepExecutionsPages(params, +// func(page *ssm.DescribeAutomationStepExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAutomationStepExecutionsPages(input *DescribeAutomationStepExecutionsInput, fn func(*DescribeAutomationStepExecutionsOutput, bool) bool) error { + return c.DescribeAutomationStepExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAutomationStepExecutionsPagesWithContext same as DescribeAutomationStepExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAutomationStepExecutionsPagesWithContext(ctx aws.Context, input *DescribeAutomationStepExecutionsInput, fn func(*DescribeAutomationStepExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAutomationStepExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutomationStepExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAutomationStepExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeAvailablePatches = "DescribeAvailablePatches" + +// DescribeAvailablePatchesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAvailablePatches operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAvailablePatches for more information on using the DescribeAvailablePatches +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAvailablePatchesRequest method. +// req, resp := client.DescribeAvailablePatchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAvailablePatches +func (c *SSM) DescribeAvailablePatchesRequest(input *DescribeAvailablePatchesInput) (req *request.Request, output *DescribeAvailablePatchesOutput) { + op := &request.Operation{ + Name: opDescribeAvailablePatches, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAvailablePatchesInput{} + } + + output = &DescribeAvailablePatchesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAvailablePatches API operation for Amazon Simple Systems Manager (SSM). +// +// Lists all patches eligible to be included in a patch baseline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeAvailablePatches for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAvailablePatches +func (c *SSM) DescribeAvailablePatches(input *DescribeAvailablePatchesInput) (*DescribeAvailablePatchesOutput, error) { + req, out := c.DescribeAvailablePatchesRequest(input) + return out, req.Send() +} + +// DescribeAvailablePatchesWithContext is the same as DescribeAvailablePatches with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAvailablePatches for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAvailablePatchesWithContext(ctx aws.Context, input *DescribeAvailablePatchesInput, opts ...request.Option) (*DescribeAvailablePatchesOutput, error) { + req, out := c.DescribeAvailablePatchesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAvailablePatchesPages iterates over the pages of a DescribeAvailablePatches operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAvailablePatches method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAvailablePatches operation. +// pageNum := 0 +// err := client.DescribeAvailablePatchesPages(params, +// func(page *ssm.DescribeAvailablePatchesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAvailablePatchesPages(input *DescribeAvailablePatchesInput, fn func(*DescribeAvailablePatchesOutput, bool) bool) error { + return c.DescribeAvailablePatchesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAvailablePatchesPagesWithContext same as DescribeAvailablePatchesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAvailablePatchesPagesWithContext(ctx aws.Context, input *DescribeAvailablePatchesInput, fn func(*DescribeAvailablePatchesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAvailablePatchesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAvailablePatchesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAvailablePatchesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeDocument = "DescribeDocument" + +// DescribeDocumentRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDocument operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDocument for more information on using the DescribeDocument +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDocumentRequest method. +// req, resp := client.DescribeDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeDocument +func (c *SSM) DescribeDocumentRequest(input *DescribeDocumentInput) (req *request.Request, output *DescribeDocumentOutput) { + op := &request.Operation{ + Name: opDescribeDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDocumentInput{} + } + + output = &DescribeDocumentOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDocument API operation for Amazon Simple Systems Manager (SSM). +// +// Describes the specified Systems Manager document. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeDocument for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentVersion +// The document version is not valid or does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeDocument +func (c *SSM) DescribeDocument(input *DescribeDocumentInput) (*DescribeDocumentOutput, error) { + req, out := c.DescribeDocumentRequest(input) + return out, req.Send() +} + +// DescribeDocumentWithContext is the same as DescribeDocument with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDocument for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeDocumentWithContext(ctx aws.Context, input *DescribeDocumentInput, opts ...request.Option) (*DescribeDocumentOutput, error) { + req, out := c.DescribeDocumentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeDocumentPermission = "DescribeDocumentPermission" + +// DescribeDocumentPermissionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDocumentPermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDocumentPermission for more information on using the DescribeDocumentPermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDocumentPermissionRequest method. +// req, resp := client.DescribeDocumentPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeDocumentPermission +func (c *SSM) DescribeDocumentPermissionRequest(input *DescribeDocumentPermissionInput) (req *request.Request, output *DescribeDocumentPermissionOutput) { + op := &request.Operation{ + Name: opDescribeDocumentPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDocumentPermissionInput{} + } + + output = &DescribeDocumentPermissionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDocumentPermission API operation for Amazon Simple Systems Manager (SSM). +// +// Describes the permissions for a Systems Manager document. If you created +// the document, you are the owner. If a document is shared, it can either be +// shared privately (by specifying a user's AWS account ID) or publicly (All). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeDocumentPermission for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidPermissionType +// The permission type is not supported. Share is the only supported permission +// type. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeDocumentPermission +func (c *SSM) DescribeDocumentPermission(input *DescribeDocumentPermissionInput) (*DescribeDocumentPermissionOutput, error) { + req, out := c.DescribeDocumentPermissionRequest(input) + return out, req.Send() +} + +// DescribeDocumentPermissionWithContext is the same as DescribeDocumentPermission with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDocumentPermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeDocumentPermissionWithContext(ctx aws.Context, input *DescribeDocumentPermissionInput, opts ...request.Option) (*DescribeDocumentPermissionOutput, error) { + req, out := c.DescribeDocumentPermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEffectiveInstanceAssociations = "DescribeEffectiveInstanceAssociations" + +// DescribeEffectiveInstanceAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEffectiveInstanceAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEffectiveInstanceAssociations for more information on using the DescribeEffectiveInstanceAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEffectiveInstanceAssociationsRequest method. +// req, resp := client.DescribeEffectiveInstanceAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeEffectiveInstanceAssociations +func (c *SSM) DescribeEffectiveInstanceAssociationsRequest(input *DescribeEffectiveInstanceAssociationsInput) (req *request.Request, output *DescribeEffectiveInstanceAssociationsOutput) { + op := &request.Operation{ + Name: opDescribeEffectiveInstanceAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEffectiveInstanceAssociationsInput{} + } + + output = &DescribeEffectiveInstanceAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEffectiveInstanceAssociations API operation for Amazon Simple Systems Manager (SSM). +// +// All associations for the instance(s). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeEffectiveInstanceAssociations for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeEffectiveInstanceAssociations +func (c *SSM) DescribeEffectiveInstanceAssociations(input *DescribeEffectiveInstanceAssociationsInput) (*DescribeEffectiveInstanceAssociationsOutput, error) { + req, out := c.DescribeEffectiveInstanceAssociationsRequest(input) + return out, req.Send() +} + +// DescribeEffectiveInstanceAssociationsWithContext is the same as DescribeEffectiveInstanceAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEffectiveInstanceAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeEffectiveInstanceAssociationsWithContext(ctx aws.Context, input *DescribeEffectiveInstanceAssociationsInput, opts ...request.Option) (*DescribeEffectiveInstanceAssociationsOutput, error) { + req, out := c.DescribeEffectiveInstanceAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeEffectiveInstanceAssociationsPages iterates over the pages of a DescribeEffectiveInstanceAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEffectiveInstanceAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEffectiveInstanceAssociations operation. +// pageNum := 0 +// err := client.DescribeEffectiveInstanceAssociationsPages(params, +// func(page *ssm.DescribeEffectiveInstanceAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeEffectiveInstanceAssociationsPages(input *DescribeEffectiveInstanceAssociationsInput, fn func(*DescribeEffectiveInstanceAssociationsOutput, bool) bool) error { + return c.DescribeEffectiveInstanceAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeEffectiveInstanceAssociationsPagesWithContext same as DescribeEffectiveInstanceAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeEffectiveInstanceAssociationsPagesWithContext(ctx aws.Context, input *DescribeEffectiveInstanceAssociationsInput, fn func(*DescribeEffectiveInstanceAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeEffectiveInstanceAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEffectiveInstanceAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeEffectiveInstanceAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeEffectivePatchesForPatchBaseline = "DescribeEffectivePatchesForPatchBaseline" + +// DescribeEffectivePatchesForPatchBaselineRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEffectivePatchesForPatchBaseline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEffectivePatchesForPatchBaseline for more information on using the DescribeEffectivePatchesForPatchBaseline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEffectivePatchesForPatchBaselineRequest method. +// req, resp := client.DescribeEffectivePatchesForPatchBaselineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeEffectivePatchesForPatchBaseline +func (c *SSM) DescribeEffectivePatchesForPatchBaselineRequest(input *DescribeEffectivePatchesForPatchBaselineInput) (req *request.Request, output *DescribeEffectivePatchesForPatchBaselineOutput) { + op := &request.Operation{ + Name: opDescribeEffectivePatchesForPatchBaseline, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeEffectivePatchesForPatchBaselineInput{} + } + + output = &DescribeEffectivePatchesForPatchBaselineOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEffectivePatchesForPatchBaseline API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the current effective patches (the patch and the approval state) +// for the specified patch baseline. Note that this API applies only to Windows +// patch baselines. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeEffectivePatchesForPatchBaseline for usage and error information. +// +// Returned Error Types: +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * UnsupportedOperatingSystem +// The operating systems you specified is not supported, or the operation is +// not supported for the operating system. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeEffectivePatchesForPatchBaseline +func (c *SSM) DescribeEffectivePatchesForPatchBaseline(input *DescribeEffectivePatchesForPatchBaselineInput) (*DescribeEffectivePatchesForPatchBaselineOutput, error) { + req, out := c.DescribeEffectivePatchesForPatchBaselineRequest(input) + return out, req.Send() +} + +// DescribeEffectivePatchesForPatchBaselineWithContext is the same as DescribeEffectivePatchesForPatchBaseline with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEffectivePatchesForPatchBaseline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeEffectivePatchesForPatchBaselineWithContext(ctx aws.Context, input *DescribeEffectivePatchesForPatchBaselineInput, opts ...request.Option) (*DescribeEffectivePatchesForPatchBaselineOutput, error) { + req, out := c.DescribeEffectivePatchesForPatchBaselineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeEffectivePatchesForPatchBaselinePages iterates over the pages of a DescribeEffectivePatchesForPatchBaseline operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEffectivePatchesForPatchBaseline method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEffectivePatchesForPatchBaseline operation. +// pageNum := 0 +// err := client.DescribeEffectivePatchesForPatchBaselinePages(params, +// func(page *ssm.DescribeEffectivePatchesForPatchBaselineOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeEffectivePatchesForPatchBaselinePages(input *DescribeEffectivePatchesForPatchBaselineInput, fn func(*DescribeEffectivePatchesForPatchBaselineOutput, bool) bool) error { + return c.DescribeEffectivePatchesForPatchBaselinePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeEffectivePatchesForPatchBaselinePagesWithContext same as DescribeEffectivePatchesForPatchBaselinePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeEffectivePatchesForPatchBaselinePagesWithContext(ctx aws.Context, input *DescribeEffectivePatchesForPatchBaselineInput, fn func(*DescribeEffectivePatchesForPatchBaselineOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeEffectivePatchesForPatchBaselineInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEffectivePatchesForPatchBaselineRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeEffectivePatchesForPatchBaselineOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstanceAssociationsStatus = "DescribeInstanceAssociationsStatus" + +// DescribeInstanceAssociationsStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceAssociationsStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceAssociationsStatus for more information on using the DescribeInstanceAssociationsStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceAssociationsStatusRequest method. +// req, resp := client.DescribeInstanceAssociationsStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstanceAssociationsStatus +func (c *SSM) DescribeInstanceAssociationsStatusRequest(input *DescribeInstanceAssociationsStatusInput) (req *request.Request, output *DescribeInstanceAssociationsStatusOutput) { + op := &request.Operation{ + Name: opDescribeInstanceAssociationsStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceAssociationsStatusInput{} + } + + output = &DescribeInstanceAssociationsStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceAssociationsStatus API operation for Amazon Simple Systems Manager (SSM). +// +// The status of the associations for the instance(s). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeInstanceAssociationsStatus for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstanceAssociationsStatus +func (c *SSM) DescribeInstanceAssociationsStatus(input *DescribeInstanceAssociationsStatusInput) (*DescribeInstanceAssociationsStatusOutput, error) { + req, out := c.DescribeInstanceAssociationsStatusRequest(input) + return out, req.Send() +} + +// DescribeInstanceAssociationsStatusWithContext is the same as DescribeInstanceAssociationsStatus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceAssociationsStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstanceAssociationsStatusWithContext(ctx aws.Context, input *DescribeInstanceAssociationsStatusInput, opts ...request.Option) (*DescribeInstanceAssociationsStatusOutput, error) { + req, out := c.DescribeInstanceAssociationsStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstanceAssociationsStatusPages iterates over the pages of a DescribeInstanceAssociationsStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceAssociationsStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceAssociationsStatus operation. +// pageNum := 0 +// err := client.DescribeInstanceAssociationsStatusPages(params, +// func(page *ssm.DescribeInstanceAssociationsStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstanceAssociationsStatusPages(input *DescribeInstanceAssociationsStatusInput, fn func(*DescribeInstanceAssociationsStatusOutput, bool) bool) error { + return c.DescribeInstanceAssociationsStatusPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceAssociationsStatusPagesWithContext same as DescribeInstanceAssociationsStatusPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstanceAssociationsStatusPagesWithContext(ctx aws.Context, input *DescribeInstanceAssociationsStatusInput, fn func(*DescribeInstanceAssociationsStatusOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceAssociationsStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceAssociationsStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceAssociationsStatusOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstanceInformation = "DescribeInstanceInformation" + +// DescribeInstanceInformationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceInformation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceInformation for more information on using the DescribeInstanceInformation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceInformationRequest method. +// req, resp := client.DescribeInstanceInformationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstanceInformation +func (c *SSM) DescribeInstanceInformationRequest(input *DescribeInstanceInformationInput) (req *request.Request, output *DescribeInstanceInformationOutput) { + op := &request.Operation{ + Name: opDescribeInstanceInformation, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstanceInformationInput{} + } + + output = &DescribeInstanceInformationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceInformation API operation for Amazon Simple Systems Manager (SSM). +// +// Describes one or more of your instances, including information about the +// operating system platform, the version of SSM Agent installed on the instance, +// instance status, and so on. +// +// If you specify one or more instance IDs, it returns information for those +// instances. If you do not specify instance IDs, it returns information for +// all your instances. If you specify an instance ID that is not valid or an +// instance that you do not own, you receive an error. +// +// The IamRole field for this API action is the Amazon Identity and Access Management +// (IAM) role assigned to on-premises instances. This call does not return the +// IAM role for EC2 instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeInstanceInformation for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InvalidInstanceInformationFilterValue +// The specified filter value is not valid. +// +// * InvalidFilterKey +// The specified key is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstanceInformation +func (c *SSM) DescribeInstanceInformation(input *DescribeInstanceInformationInput) (*DescribeInstanceInformationOutput, error) { + req, out := c.DescribeInstanceInformationRequest(input) + return out, req.Send() +} + +// DescribeInstanceInformationWithContext is the same as DescribeInstanceInformation with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceInformation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstanceInformationWithContext(ctx aws.Context, input *DescribeInstanceInformationInput, opts ...request.Option) (*DescribeInstanceInformationOutput, error) { + req, out := c.DescribeInstanceInformationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstanceInformationPages iterates over the pages of a DescribeInstanceInformation operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceInformation method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceInformation operation. +// pageNum := 0 +// err := client.DescribeInstanceInformationPages(params, +// func(page *ssm.DescribeInstanceInformationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstanceInformationPages(input *DescribeInstanceInformationInput, fn func(*DescribeInstanceInformationOutput, bool) bool) error { + return c.DescribeInstanceInformationPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceInformationPagesWithContext same as DescribeInstanceInformationPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstanceInformationPagesWithContext(ctx aws.Context, input *DescribeInstanceInformationInput, fn func(*DescribeInstanceInformationOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceInformationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceInformationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceInformationOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstancePatchStates = "DescribeInstancePatchStates" + +// DescribeInstancePatchStatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstancePatchStates operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstancePatchStates for more information on using the DescribeInstancePatchStates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstancePatchStatesRequest method. +// req, resp := client.DescribeInstancePatchStatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstancePatchStates +func (c *SSM) DescribeInstancePatchStatesRequest(input *DescribeInstancePatchStatesInput) (req *request.Request, output *DescribeInstancePatchStatesOutput) { + op := &request.Operation{ + Name: opDescribeInstancePatchStates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancePatchStatesInput{} + } + + output = &DescribeInstancePatchStatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstancePatchStates API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the high-level patch state of one or more instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeInstancePatchStates for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstancePatchStates +func (c *SSM) DescribeInstancePatchStates(input *DescribeInstancePatchStatesInput) (*DescribeInstancePatchStatesOutput, error) { + req, out := c.DescribeInstancePatchStatesRequest(input) + return out, req.Send() +} + +// DescribeInstancePatchStatesWithContext is the same as DescribeInstancePatchStates with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstancePatchStates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchStatesWithContext(ctx aws.Context, input *DescribeInstancePatchStatesInput, opts ...request.Option) (*DescribeInstancePatchStatesOutput, error) { + req, out := c.DescribeInstancePatchStatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstancePatchStatesPages iterates over the pages of a DescribeInstancePatchStates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstancePatchStates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstancePatchStates operation. +// pageNum := 0 +// err := client.DescribeInstancePatchStatesPages(params, +// func(page *ssm.DescribeInstancePatchStatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstancePatchStatesPages(input *DescribeInstancePatchStatesInput, fn func(*DescribeInstancePatchStatesOutput, bool) bool) error { + return c.DescribeInstancePatchStatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancePatchStatesPagesWithContext same as DescribeInstancePatchStatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchStatesPagesWithContext(ctx aws.Context, input *DescribeInstancePatchStatesInput, fn func(*DescribeInstancePatchStatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancePatchStatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancePatchStatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstancePatchStatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstancePatchStatesForPatchGroup = "DescribeInstancePatchStatesForPatchGroup" + +// DescribeInstancePatchStatesForPatchGroupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstancePatchStatesForPatchGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstancePatchStatesForPatchGroup for more information on using the DescribeInstancePatchStatesForPatchGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstancePatchStatesForPatchGroupRequest method. +// req, resp := client.DescribeInstancePatchStatesForPatchGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstancePatchStatesForPatchGroup +func (c *SSM) DescribeInstancePatchStatesForPatchGroupRequest(input *DescribeInstancePatchStatesForPatchGroupInput) (req *request.Request, output *DescribeInstancePatchStatesForPatchGroupOutput) { + op := &request.Operation{ + Name: opDescribeInstancePatchStatesForPatchGroup, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancePatchStatesForPatchGroupInput{} + } + + output = &DescribeInstancePatchStatesForPatchGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstancePatchStatesForPatchGroup API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the high-level patch state for the instances in the specified patch +// group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeInstancePatchStatesForPatchGroup for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstancePatchStatesForPatchGroup +func (c *SSM) DescribeInstancePatchStatesForPatchGroup(input *DescribeInstancePatchStatesForPatchGroupInput) (*DescribeInstancePatchStatesForPatchGroupOutput, error) { + req, out := c.DescribeInstancePatchStatesForPatchGroupRequest(input) + return out, req.Send() +} + +// DescribeInstancePatchStatesForPatchGroupWithContext is the same as DescribeInstancePatchStatesForPatchGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstancePatchStatesForPatchGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchStatesForPatchGroupWithContext(ctx aws.Context, input *DescribeInstancePatchStatesForPatchGroupInput, opts ...request.Option) (*DescribeInstancePatchStatesForPatchGroupOutput, error) { + req, out := c.DescribeInstancePatchStatesForPatchGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstancePatchStatesForPatchGroupPages iterates over the pages of a DescribeInstancePatchStatesForPatchGroup operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstancePatchStatesForPatchGroup method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstancePatchStatesForPatchGroup operation. +// pageNum := 0 +// err := client.DescribeInstancePatchStatesForPatchGroupPages(params, +// func(page *ssm.DescribeInstancePatchStatesForPatchGroupOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstancePatchStatesForPatchGroupPages(input *DescribeInstancePatchStatesForPatchGroupInput, fn func(*DescribeInstancePatchStatesForPatchGroupOutput, bool) bool) error { + return c.DescribeInstancePatchStatesForPatchGroupPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancePatchStatesForPatchGroupPagesWithContext same as DescribeInstancePatchStatesForPatchGroupPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchStatesForPatchGroupPagesWithContext(ctx aws.Context, input *DescribeInstancePatchStatesForPatchGroupInput, fn func(*DescribeInstancePatchStatesForPatchGroupOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancePatchStatesForPatchGroupInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancePatchStatesForPatchGroupRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstancePatchStatesForPatchGroupOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstancePatches = "DescribeInstancePatches" + +// DescribeInstancePatchesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstancePatches operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstancePatches for more information on using the DescribeInstancePatches +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstancePatchesRequest method. +// req, resp := client.DescribeInstancePatchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstancePatches +func (c *SSM) DescribeInstancePatchesRequest(input *DescribeInstancePatchesInput) (req *request.Request, output *DescribeInstancePatchesOutput) { + op := &request.Operation{ + Name: opDescribeInstancePatches, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancePatchesInput{} + } + + output = &DescribeInstancePatchesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstancePatches API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves information about the patches on the specified instance and their +// state relative to the patch baseline being used for the instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeInstancePatches for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstancePatches +func (c *SSM) DescribeInstancePatches(input *DescribeInstancePatchesInput) (*DescribeInstancePatchesOutput, error) { + req, out := c.DescribeInstancePatchesRequest(input) + return out, req.Send() +} + +// DescribeInstancePatchesWithContext is the same as DescribeInstancePatches with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstancePatches for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchesWithContext(ctx aws.Context, input *DescribeInstancePatchesInput, opts ...request.Option) (*DescribeInstancePatchesOutput, error) { + req, out := c.DescribeInstancePatchesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstancePatchesPages iterates over the pages of a DescribeInstancePatches operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstancePatches method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstancePatches operation. +// pageNum := 0 +// err := client.DescribeInstancePatchesPages(params, +// func(page *ssm.DescribeInstancePatchesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstancePatchesPages(input *DescribeInstancePatchesInput, fn func(*DescribeInstancePatchesOutput, bool) bool) error { + return c.DescribeInstancePatchesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancePatchesPagesWithContext same as DescribeInstancePatchesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchesPagesWithContext(ctx aws.Context, input *DescribeInstancePatchesInput, fn func(*DescribeInstancePatchesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancePatchesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancePatchesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstancePatchesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInventoryDeletions = "DescribeInventoryDeletions" + +// DescribeInventoryDeletionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInventoryDeletions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInventoryDeletions for more information on using the DescribeInventoryDeletions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInventoryDeletionsRequest method. +// req, resp := client.DescribeInventoryDeletionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInventoryDeletions +func (c *SSM) DescribeInventoryDeletionsRequest(input *DescribeInventoryDeletionsInput) (req *request.Request, output *DescribeInventoryDeletionsOutput) { + op := &request.Operation{ + Name: opDescribeInventoryDeletions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInventoryDeletionsInput{} + } + + output = &DescribeInventoryDeletionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInventoryDeletions API operation for Amazon Simple Systems Manager (SSM). +// +// Describes a specific delete inventory operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeInventoryDeletions for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDeletionIdException +// The ID specified for the delete operation does not exist or is not valid. +// Verify the ID and try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInventoryDeletions +func (c *SSM) DescribeInventoryDeletions(input *DescribeInventoryDeletionsInput) (*DescribeInventoryDeletionsOutput, error) { + req, out := c.DescribeInventoryDeletionsRequest(input) + return out, req.Send() +} + +// DescribeInventoryDeletionsWithContext is the same as DescribeInventoryDeletions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInventoryDeletions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInventoryDeletionsWithContext(ctx aws.Context, input *DescribeInventoryDeletionsInput, opts ...request.Option) (*DescribeInventoryDeletionsOutput, error) { + req, out := c.DescribeInventoryDeletionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInventoryDeletionsPages iterates over the pages of a DescribeInventoryDeletions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInventoryDeletions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInventoryDeletions operation. +// pageNum := 0 +// err := client.DescribeInventoryDeletionsPages(params, +// func(page *ssm.DescribeInventoryDeletionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInventoryDeletionsPages(input *DescribeInventoryDeletionsInput, fn func(*DescribeInventoryDeletionsOutput, bool) bool) error { + return c.DescribeInventoryDeletionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInventoryDeletionsPagesWithContext same as DescribeInventoryDeletionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInventoryDeletionsPagesWithContext(ctx aws.Context, input *DescribeInventoryDeletionsInput, fn func(*DescribeInventoryDeletionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInventoryDeletionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInventoryDeletionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInventoryDeletionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMaintenanceWindowExecutionTaskInvocations = "DescribeMaintenanceWindowExecutionTaskInvocations" + +// DescribeMaintenanceWindowExecutionTaskInvocationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceWindowExecutionTaskInvocations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMaintenanceWindowExecutionTaskInvocations for more information on using the DescribeMaintenanceWindowExecutionTaskInvocations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMaintenanceWindowExecutionTaskInvocationsRequest method. +// req, resp := client.DescribeMaintenanceWindowExecutionTaskInvocationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowExecutionTaskInvocations +func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input *DescribeMaintenanceWindowExecutionTaskInvocationsInput) (req *request.Request, output *DescribeMaintenanceWindowExecutionTaskInvocationsOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceWindowExecutionTaskInvocations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMaintenanceWindowExecutionTaskInvocationsInput{} + } + + output = &DescribeMaintenanceWindowExecutionTaskInvocationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMaintenanceWindowExecutionTaskInvocations API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the individual task executions (one per target) for a particular +// task run as part of a maintenance window execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeMaintenanceWindowExecutionTaskInvocations for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowExecutionTaskInvocations +func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocations(input *DescribeMaintenanceWindowExecutionTaskInvocationsInput) (*DescribeMaintenanceWindowExecutionTaskInvocationsOutput, error) { + req, out := c.DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input) + return out, req.Send() +} + +// DescribeMaintenanceWindowExecutionTaskInvocationsWithContext is the same as DescribeMaintenanceWindowExecutionTaskInvocations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMaintenanceWindowExecutionTaskInvocations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionTaskInvocationsInput, opts ...request.Option) (*DescribeMaintenanceWindowExecutionTaskInvocationsOutput, error) { + req, out := c.DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMaintenanceWindowExecutionTaskInvocationsPages iterates over the pages of a DescribeMaintenanceWindowExecutionTaskInvocations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowExecutionTaskInvocations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowExecutionTaskInvocations operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowExecutionTaskInvocationsPages(params, +// func(page *ssm.DescribeMaintenanceWindowExecutionTaskInvocationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsPages(input *DescribeMaintenanceWindowExecutionTaskInvocationsInput, fn func(*DescribeMaintenanceWindowExecutionTaskInvocationsOutput, bool) bool) error { + return c.DescribeMaintenanceWindowExecutionTaskInvocationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowExecutionTaskInvocationsPagesWithContext same as DescribeMaintenanceWindowExecutionTaskInvocationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionTaskInvocationsInput, fn func(*DescribeMaintenanceWindowExecutionTaskInvocationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowExecutionTaskInvocationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowExecutionTaskInvocationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowExecutionTaskInvocationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMaintenanceWindowExecutionTasks = "DescribeMaintenanceWindowExecutionTasks" + +// DescribeMaintenanceWindowExecutionTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceWindowExecutionTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMaintenanceWindowExecutionTasks for more information on using the DescribeMaintenanceWindowExecutionTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMaintenanceWindowExecutionTasksRequest method. +// req, resp := client.DescribeMaintenanceWindowExecutionTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowExecutionTasks +func (c *SSM) DescribeMaintenanceWindowExecutionTasksRequest(input *DescribeMaintenanceWindowExecutionTasksInput) (req *request.Request, output *DescribeMaintenanceWindowExecutionTasksOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceWindowExecutionTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMaintenanceWindowExecutionTasksInput{} + } + + output = &DescribeMaintenanceWindowExecutionTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMaintenanceWindowExecutionTasks API operation for Amazon Simple Systems Manager (SSM). +// +// For a given maintenance window execution, lists the tasks that were run. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeMaintenanceWindowExecutionTasks for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowExecutionTasks +func (c *SSM) DescribeMaintenanceWindowExecutionTasks(input *DescribeMaintenanceWindowExecutionTasksInput) (*DescribeMaintenanceWindowExecutionTasksOutput, error) { + req, out := c.DescribeMaintenanceWindowExecutionTasksRequest(input) + return out, req.Send() +} + +// DescribeMaintenanceWindowExecutionTasksWithContext is the same as DescribeMaintenanceWindowExecutionTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMaintenanceWindowExecutionTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionTasksWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionTasksInput, opts ...request.Option) (*DescribeMaintenanceWindowExecutionTasksOutput, error) { + req, out := c.DescribeMaintenanceWindowExecutionTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMaintenanceWindowExecutionTasksPages iterates over the pages of a DescribeMaintenanceWindowExecutionTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowExecutionTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowExecutionTasks operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowExecutionTasksPages(params, +// func(page *ssm.DescribeMaintenanceWindowExecutionTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowExecutionTasksPages(input *DescribeMaintenanceWindowExecutionTasksInput, fn func(*DescribeMaintenanceWindowExecutionTasksOutput, bool) bool) error { + return c.DescribeMaintenanceWindowExecutionTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowExecutionTasksPagesWithContext same as DescribeMaintenanceWindowExecutionTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionTasksPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionTasksInput, fn func(*DescribeMaintenanceWindowExecutionTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowExecutionTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowExecutionTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowExecutionTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMaintenanceWindowExecutions = "DescribeMaintenanceWindowExecutions" + +// DescribeMaintenanceWindowExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceWindowExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMaintenanceWindowExecutions for more information on using the DescribeMaintenanceWindowExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMaintenanceWindowExecutionsRequest method. +// req, resp := client.DescribeMaintenanceWindowExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowExecutions +func (c *SSM) DescribeMaintenanceWindowExecutionsRequest(input *DescribeMaintenanceWindowExecutionsInput) (req *request.Request, output *DescribeMaintenanceWindowExecutionsOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceWindowExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMaintenanceWindowExecutionsInput{} + } + + output = &DescribeMaintenanceWindowExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMaintenanceWindowExecutions API operation for Amazon Simple Systems Manager (SSM). +// +// Lists the executions of a maintenance window. This includes information about +// when the maintenance window was scheduled to be active, and information about +// tasks registered and run with the maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeMaintenanceWindowExecutions for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowExecutions +func (c *SSM) DescribeMaintenanceWindowExecutions(input *DescribeMaintenanceWindowExecutionsInput) (*DescribeMaintenanceWindowExecutionsOutput, error) { + req, out := c.DescribeMaintenanceWindowExecutionsRequest(input) + return out, req.Send() +} + +// DescribeMaintenanceWindowExecutionsWithContext is the same as DescribeMaintenanceWindowExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMaintenanceWindowExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionsWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionsInput, opts ...request.Option) (*DescribeMaintenanceWindowExecutionsOutput, error) { + req, out := c.DescribeMaintenanceWindowExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMaintenanceWindowExecutionsPages iterates over the pages of a DescribeMaintenanceWindowExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowExecutions operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowExecutionsPages(params, +// func(page *ssm.DescribeMaintenanceWindowExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowExecutionsPages(input *DescribeMaintenanceWindowExecutionsInput, fn func(*DescribeMaintenanceWindowExecutionsOutput, bool) bool) error { + return c.DescribeMaintenanceWindowExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowExecutionsPagesWithContext same as DescribeMaintenanceWindowExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionsPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionsInput, fn func(*DescribeMaintenanceWindowExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMaintenanceWindowSchedule = "DescribeMaintenanceWindowSchedule" + +// DescribeMaintenanceWindowScheduleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceWindowSchedule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMaintenanceWindowSchedule for more information on using the DescribeMaintenanceWindowSchedule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMaintenanceWindowScheduleRequest method. +// req, resp := client.DescribeMaintenanceWindowScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowSchedule +func (c *SSM) DescribeMaintenanceWindowScheduleRequest(input *DescribeMaintenanceWindowScheduleInput) (req *request.Request, output *DescribeMaintenanceWindowScheduleOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceWindowSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMaintenanceWindowScheduleInput{} + } + + output = &DescribeMaintenanceWindowScheduleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMaintenanceWindowSchedule API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves information about upcoming executions of a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeMaintenanceWindowSchedule for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowSchedule +func (c *SSM) DescribeMaintenanceWindowSchedule(input *DescribeMaintenanceWindowScheduleInput) (*DescribeMaintenanceWindowScheduleOutput, error) { + req, out := c.DescribeMaintenanceWindowScheduleRequest(input) + return out, req.Send() +} + +// DescribeMaintenanceWindowScheduleWithContext is the same as DescribeMaintenanceWindowSchedule with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMaintenanceWindowSchedule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowScheduleWithContext(ctx aws.Context, input *DescribeMaintenanceWindowScheduleInput, opts ...request.Option) (*DescribeMaintenanceWindowScheduleOutput, error) { + req, out := c.DescribeMaintenanceWindowScheduleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMaintenanceWindowSchedulePages iterates over the pages of a DescribeMaintenanceWindowSchedule operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowSchedule method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowSchedule operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowSchedulePages(params, +// func(page *ssm.DescribeMaintenanceWindowScheduleOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowSchedulePages(input *DescribeMaintenanceWindowScheduleInput, fn func(*DescribeMaintenanceWindowScheduleOutput, bool) bool) error { + return c.DescribeMaintenanceWindowSchedulePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowSchedulePagesWithContext same as DescribeMaintenanceWindowSchedulePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowSchedulePagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowScheduleInput, fn func(*DescribeMaintenanceWindowScheduleOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowScheduleInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowScheduleRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowScheduleOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMaintenanceWindowTargets = "DescribeMaintenanceWindowTargets" + +// DescribeMaintenanceWindowTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceWindowTargets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMaintenanceWindowTargets for more information on using the DescribeMaintenanceWindowTargets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMaintenanceWindowTargetsRequest method. +// req, resp := client.DescribeMaintenanceWindowTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowTargets +func (c *SSM) DescribeMaintenanceWindowTargetsRequest(input *DescribeMaintenanceWindowTargetsInput) (req *request.Request, output *DescribeMaintenanceWindowTargetsOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceWindowTargets, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMaintenanceWindowTargetsInput{} + } + + output = &DescribeMaintenanceWindowTargetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMaintenanceWindowTargets API operation for Amazon Simple Systems Manager (SSM). +// +// Lists the targets registered with the maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeMaintenanceWindowTargets for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowTargets +func (c *SSM) DescribeMaintenanceWindowTargets(input *DescribeMaintenanceWindowTargetsInput) (*DescribeMaintenanceWindowTargetsOutput, error) { + req, out := c.DescribeMaintenanceWindowTargetsRequest(input) + return out, req.Send() +} + +// DescribeMaintenanceWindowTargetsWithContext is the same as DescribeMaintenanceWindowTargets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMaintenanceWindowTargets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowTargetsWithContext(ctx aws.Context, input *DescribeMaintenanceWindowTargetsInput, opts ...request.Option) (*DescribeMaintenanceWindowTargetsOutput, error) { + req, out := c.DescribeMaintenanceWindowTargetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMaintenanceWindowTargetsPages iterates over the pages of a DescribeMaintenanceWindowTargets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowTargets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowTargets operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowTargetsPages(params, +// func(page *ssm.DescribeMaintenanceWindowTargetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowTargetsPages(input *DescribeMaintenanceWindowTargetsInput, fn func(*DescribeMaintenanceWindowTargetsOutput, bool) bool) error { + return c.DescribeMaintenanceWindowTargetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowTargetsPagesWithContext same as DescribeMaintenanceWindowTargetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowTargetsPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowTargetsInput, fn func(*DescribeMaintenanceWindowTargetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowTargetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowTargetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowTargetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMaintenanceWindowTasks = "DescribeMaintenanceWindowTasks" + +// DescribeMaintenanceWindowTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceWindowTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMaintenanceWindowTasks for more information on using the DescribeMaintenanceWindowTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMaintenanceWindowTasksRequest method. +// req, resp := client.DescribeMaintenanceWindowTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowTasks +func (c *SSM) DescribeMaintenanceWindowTasksRequest(input *DescribeMaintenanceWindowTasksInput) (req *request.Request, output *DescribeMaintenanceWindowTasksOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceWindowTasks, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMaintenanceWindowTasksInput{} + } + + output = &DescribeMaintenanceWindowTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMaintenanceWindowTasks API operation for Amazon Simple Systems Manager (SSM). +// +// Lists the tasks in a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeMaintenanceWindowTasks for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowTasks +func (c *SSM) DescribeMaintenanceWindowTasks(input *DescribeMaintenanceWindowTasksInput) (*DescribeMaintenanceWindowTasksOutput, error) { + req, out := c.DescribeMaintenanceWindowTasksRequest(input) + return out, req.Send() +} + +// DescribeMaintenanceWindowTasksWithContext is the same as DescribeMaintenanceWindowTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMaintenanceWindowTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowTasksWithContext(ctx aws.Context, input *DescribeMaintenanceWindowTasksInput, opts ...request.Option) (*DescribeMaintenanceWindowTasksOutput, error) { + req, out := c.DescribeMaintenanceWindowTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMaintenanceWindowTasksPages iterates over the pages of a DescribeMaintenanceWindowTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowTasks operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowTasksPages(params, +// func(page *ssm.DescribeMaintenanceWindowTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowTasksPages(input *DescribeMaintenanceWindowTasksInput, fn func(*DescribeMaintenanceWindowTasksOutput, bool) bool) error { + return c.DescribeMaintenanceWindowTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowTasksPagesWithContext same as DescribeMaintenanceWindowTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowTasksPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowTasksInput, fn func(*DescribeMaintenanceWindowTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMaintenanceWindows = "DescribeMaintenanceWindows" + +// DescribeMaintenanceWindowsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceWindows operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMaintenanceWindows for more information on using the DescribeMaintenanceWindows +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMaintenanceWindowsRequest method. +// req, resp := client.DescribeMaintenanceWindowsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindows +func (c *SSM) DescribeMaintenanceWindowsRequest(input *DescribeMaintenanceWindowsInput) (req *request.Request, output *DescribeMaintenanceWindowsOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceWindows, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMaintenanceWindowsInput{} + } + + output = &DescribeMaintenanceWindowsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMaintenanceWindows API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the maintenance windows in an AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeMaintenanceWindows for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindows +func (c *SSM) DescribeMaintenanceWindows(input *DescribeMaintenanceWindowsInput) (*DescribeMaintenanceWindowsOutput, error) { + req, out := c.DescribeMaintenanceWindowsRequest(input) + return out, req.Send() +} + +// DescribeMaintenanceWindowsWithContext is the same as DescribeMaintenanceWindows with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMaintenanceWindows for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowsWithContext(ctx aws.Context, input *DescribeMaintenanceWindowsInput, opts ...request.Option) (*DescribeMaintenanceWindowsOutput, error) { + req, out := c.DescribeMaintenanceWindowsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMaintenanceWindowsPages iterates over the pages of a DescribeMaintenanceWindows operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindows method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindows operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowsPages(params, +// func(page *ssm.DescribeMaintenanceWindowsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowsPages(input *DescribeMaintenanceWindowsInput, fn func(*DescribeMaintenanceWindowsOutput, bool) bool) error { + return c.DescribeMaintenanceWindowsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowsPagesWithContext same as DescribeMaintenanceWindowsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowsPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowsInput, fn func(*DescribeMaintenanceWindowsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMaintenanceWindowsForTarget = "DescribeMaintenanceWindowsForTarget" + +// DescribeMaintenanceWindowsForTargetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMaintenanceWindowsForTarget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMaintenanceWindowsForTarget for more information on using the DescribeMaintenanceWindowsForTarget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMaintenanceWindowsForTargetRequest method. +// req, resp := client.DescribeMaintenanceWindowsForTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowsForTarget +func (c *SSM) DescribeMaintenanceWindowsForTargetRequest(input *DescribeMaintenanceWindowsForTargetInput) (req *request.Request, output *DescribeMaintenanceWindowsForTargetOutput) { + op := &request.Operation{ + Name: opDescribeMaintenanceWindowsForTarget, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMaintenanceWindowsForTargetInput{} + } + + output = &DescribeMaintenanceWindowsForTargetOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMaintenanceWindowsForTarget API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves information about the maintenance window targets or tasks that +// an instance is associated with. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeMaintenanceWindowsForTarget for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowsForTarget +func (c *SSM) DescribeMaintenanceWindowsForTarget(input *DescribeMaintenanceWindowsForTargetInput) (*DescribeMaintenanceWindowsForTargetOutput, error) { + req, out := c.DescribeMaintenanceWindowsForTargetRequest(input) + return out, req.Send() +} + +// DescribeMaintenanceWindowsForTargetWithContext is the same as DescribeMaintenanceWindowsForTarget with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMaintenanceWindowsForTarget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowsForTargetWithContext(ctx aws.Context, input *DescribeMaintenanceWindowsForTargetInput, opts ...request.Option) (*DescribeMaintenanceWindowsForTargetOutput, error) { + req, out := c.DescribeMaintenanceWindowsForTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMaintenanceWindowsForTargetPages iterates over the pages of a DescribeMaintenanceWindowsForTarget operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowsForTarget method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowsForTarget operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowsForTargetPages(params, +// func(page *ssm.DescribeMaintenanceWindowsForTargetOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowsForTargetPages(input *DescribeMaintenanceWindowsForTargetInput, fn func(*DescribeMaintenanceWindowsForTargetOutput, bool) bool) error { + return c.DescribeMaintenanceWindowsForTargetPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowsForTargetPagesWithContext same as DescribeMaintenanceWindowsForTargetPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowsForTargetPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowsForTargetInput, fn func(*DescribeMaintenanceWindowsForTargetOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowsForTargetInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowsForTargetRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowsForTargetOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeOpsItems = "DescribeOpsItems" + +// DescribeOpsItemsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOpsItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeOpsItems for more information on using the DescribeOpsItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeOpsItemsRequest method. +// req, resp := client.DescribeOpsItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeOpsItems +func (c *SSM) DescribeOpsItemsRequest(input *DescribeOpsItemsInput) (req *request.Request, output *DescribeOpsItemsOutput) { + op := &request.Operation{ + Name: opDescribeOpsItems, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeOpsItemsInput{} + } + + output = &DescribeOpsItemsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeOpsItems API operation for Amazon Simple Systems Manager (SSM). +// +// Query a set of OpsItems. You must have permission in AWS Identity and Access +// Management (IAM) to query a list of OpsItems. For more information, see Getting +// started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// in the AWS Systems Manager User Guide. +// +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeOpsItems for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeOpsItems +func (c *SSM) DescribeOpsItems(input *DescribeOpsItemsInput) (*DescribeOpsItemsOutput, error) { + req, out := c.DescribeOpsItemsRequest(input) + return out, req.Send() +} + +// DescribeOpsItemsWithContext is the same as DescribeOpsItems with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeOpsItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeOpsItemsWithContext(ctx aws.Context, input *DescribeOpsItemsInput, opts ...request.Option) (*DescribeOpsItemsOutput, error) { + req, out := c.DescribeOpsItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeOpsItemsPages iterates over the pages of a DescribeOpsItems operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOpsItems method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOpsItems operation. +// pageNum := 0 +// err := client.DescribeOpsItemsPages(params, +// func(page *ssm.DescribeOpsItemsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeOpsItemsPages(input *DescribeOpsItemsInput, fn func(*DescribeOpsItemsOutput, bool) bool) error { + return c.DescribeOpsItemsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeOpsItemsPagesWithContext same as DescribeOpsItemsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeOpsItemsPagesWithContext(ctx aws.Context, input *DescribeOpsItemsInput, fn func(*DescribeOpsItemsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeOpsItemsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeOpsItemsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeOpsItemsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeParameters = "DescribeParameters" + +// DescribeParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeParameters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeParameters for more information on using the DescribeParameters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeParametersRequest method. +// req, resp := client.DescribeParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeParameters +func (c *SSM) DescribeParametersRequest(input *DescribeParametersInput) (req *request.Request, output *DescribeParametersOutput) { + op := &request.Operation{ + Name: opDescribeParameters, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeParametersInput{} + } + + output = &DescribeParametersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeParameters API operation for Amazon Simple Systems Manager (SSM). +// +// Get information about a parameter. +// +// Request results are returned on a best-effort basis. If you specify MaxResults +// in the request, the response includes information up to the limit specified. +// The number of items returned, however, can be between zero and the value +// of MaxResults. If the service reaches an internal limit while processing +// the results, it stops the operation and returns the matching values up to +// that point and a NextToken. You can specify the NextToken in a subsequent +// call to get the next set of results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeParameters for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidFilterKey +// The specified key is not valid. +// +// * InvalidFilterOption +// The specified filter option is not valid. Valid options are Equals and BeginsWith. +// For Path filter, valid options are Recursive and OneLevel. +// +// * InvalidFilterValue +// The filter value is not valid. Verify the value and try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeParameters +func (c *SSM) DescribeParameters(input *DescribeParametersInput) (*DescribeParametersOutput, error) { + req, out := c.DescribeParametersRequest(input) + return out, req.Send() +} + +// DescribeParametersWithContext is the same as DescribeParameters with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeParameters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeParametersWithContext(ctx aws.Context, input *DescribeParametersInput, opts ...request.Option) (*DescribeParametersOutput, error) { + req, out := c.DescribeParametersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeParametersPages iterates over the pages of a DescribeParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeParameters operation. +// pageNum := 0 +// err := client.DescribeParametersPages(params, +// func(page *ssm.DescribeParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeParametersPages(input *DescribeParametersInput, fn func(*DescribeParametersOutput, bool) bool) error { + return c.DescribeParametersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeParametersPagesWithContext same as DescribeParametersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeParametersPagesWithContext(ctx aws.Context, input *DescribeParametersInput, fn func(*DescribeParametersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeParametersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeParametersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeParametersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribePatchBaselines = "DescribePatchBaselines" + +// DescribePatchBaselinesRequest generates a "aws/request.Request" representing the +// client's request for the DescribePatchBaselines operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePatchBaselines for more information on using the DescribePatchBaselines +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePatchBaselinesRequest method. +// req, resp := client.DescribePatchBaselinesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchBaselines +func (c *SSM) DescribePatchBaselinesRequest(input *DescribePatchBaselinesInput) (req *request.Request, output *DescribePatchBaselinesOutput) { + op := &request.Operation{ + Name: opDescribePatchBaselines, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePatchBaselinesInput{} + } + + output = &DescribePatchBaselinesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePatchBaselines API operation for Amazon Simple Systems Manager (SSM). +// +// Lists the patch baselines in your AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribePatchBaselines for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchBaselines +func (c *SSM) DescribePatchBaselines(input *DescribePatchBaselinesInput) (*DescribePatchBaselinesOutput, error) { + req, out := c.DescribePatchBaselinesRequest(input) + return out, req.Send() +} + +// DescribePatchBaselinesWithContext is the same as DescribePatchBaselines with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePatchBaselines for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchBaselinesWithContext(ctx aws.Context, input *DescribePatchBaselinesInput, opts ...request.Option) (*DescribePatchBaselinesOutput, error) { + req, out := c.DescribePatchBaselinesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribePatchBaselinesPages iterates over the pages of a DescribePatchBaselines operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePatchBaselines method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePatchBaselines operation. +// pageNum := 0 +// err := client.DescribePatchBaselinesPages(params, +// func(page *ssm.DescribePatchBaselinesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribePatchBaselinesPages(input *DescribePatchBaselinesInput, fn func(*DescribePatchBaselinesOutput, bool) bool) error { + return c.DescribePatchBaselinesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePatchBaselinesPagesWithContext same as DescribePatchBaselinesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchBaselinesPagesWithContext(ctx aws.Context, input *DescribePatchBaselinesInput, fn func(*DescribePatchBaselinesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePatchBaselinesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePatchBaselinesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePatchBaselinesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribePatchGroupState = "DescribePatchGroupState" + +// DescribePatchGroupStateRequest generates a "aws/request.Request" representing the +// client's request for the DescribePatchGroupState operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePatchGroupState for more information on using the DescribePatchGroupState +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePatchGroupStateRequest method. +// req, resp := client.DescribePatchGroupStateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchGroupState +func (c *SSM) DescribePatchGroupStateRequest(input *DescribePatchGroupStateInput) (req *request.Request, output *DescribePatchGroupStateOutput) { + op := &request.Operation{ + Name: opDescribePatchGroupState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribePatchGroupStateInput{} + } + + output = &DescribePatchGroupStateOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePatchGroupState API operation for Amazon Simple Systems Manager (SSM). +// +// Returns high-level aggregated patch compliance state for a patch group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribePatchGroupState for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchGroupState +func (c *SSM) DescribePatchGroupState(input *DescribePatchGroupStateInput) (*DescribePatchGroupStateOutput, error) { + req, out := c.DescribePatchGroupStateRequest(input) + return out, req.Send() +} + +// DescribePatchGroupStateWithContext is the same as DescribePatchGroupState with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePatchGroupState for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchGroupStateWithContext(ctx aws.Context, input *DescribePatchGroupStateInput, opts ...request.Option) (*DescribePatchGroupStateOutput, error) { + req, out := c.DescribePatchGroupStateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribePatchGroups = "DescribePatchGroups" + +// DescribePatchGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePatchGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePatchGroups for more information on using the DescribePatchGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePatchGroupsRequest method. +// req, resp := client.DescribePatchGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchGroups +func (c *SSM) DescribePatchGroupsRequest(input *DescribePatchGroupsInput) (req *request.Request, output *DescribePatchGroupsOutput) { + op := &request.Operation{ + Name: opDescribePatchGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePatchGroupsInput{} + } + + output = &DescribePatchGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePatchGroups API operation for Amazon Simple Systems Manager (SSM). +// +// Lists all patch groups that have been registered with patch baselines. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribePatchGroups for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchGroups +func (c *SSM) DescribePatchGroups(input *DescribePatchGroupsInput) (*DescribePatchGroupsOutput, error) { + req, out := c.DescribePatchGroupsRequest(input) + return out, req.Send() +} + +// DescribePatchGroupsWithContext is the same as DescribePatchGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePatchGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchGroupsWithContext(ctx aws.Context, input *DescribePatchGroupsInput, opts ...request.Option) (*DescribePatchGroupsOutput, error) { + req, out := c.DescribePatchGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribePatchGroupsPages iterates over the pages of a DescribePatchGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePatchGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePatchGroups operation. +// pageNum := 0 +// err := client.DescribePatchGroupsPages(params, +// func(page *ssm.DescribePatchGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribePatchGroupsPages(input *DescribePatchGroupsInput, fn func(*DescribePatchGroupsOutput, bool) bool) error { + return c.DescribePatchGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePatchGroupsPagesWithContext same as DescribePatchGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchGroupsPagesWithContext(ctx aws.Context, input *DescribePatchGroupsInput, fn func(*DescribePatchGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePatchGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePatchGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePatchGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribePatchProperties = "DescribePatchProperties" + +// DescribePatchPropertiesRequest generates a "aws/request.Request" representing the +// client's request for the DescribePatchProperties operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribePatchProperties for more information on using the DescribePatchProperties +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribePatchPropertiesRequest method. +// req, resp := client.DescribePatchPropertiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchProperties +func (c *SSM) DescribePatchPropertiesRequest(input *DescribePatchPropertiesInput) (req *request.Request, output *DescribePatchPropertiesOutput) { + op := &request.Operation{ + Name: opDescribePatchProperties, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribePatchPropertiesInput{} + } + + output = &DescribePatchPropertiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribePatchProperties API operation for Amazon Simple Systems Manager (SSM). +// +// Lists the properties of available patches organized by product, product family, +// classification, severity, and other properties of available patches. You +// can use the reported properties in the filters you specify in requests for +// actions such as CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, +// and DescribePatchBaselines. +// +// The following section lists the properties that can be used in filters for +// each major operating system type: +// +// AMAZON_LINUX +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// AMAZON_LINUX_2 +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// CENTOS +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// DEBIAN +// +// Valid properties: PRODUCT, PRIORITY +// +// ORACLE_LINUX +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// REDHAT_ENTERPRISE_LINUX +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// SUSE +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// UBUNTU +// +// Valid properties: PRODUCT, PRIORITY +// +// WINDOWS +// +// Valid properties: PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, MSRC_SEVERITY +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribePatchProperties for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribePatchProperties +func (c *SSM) DescribePatchProperties(input *DescribePatchPropertiesInput) (*DescribePatchPropertiesOutput, error) { + req, out := c.DescribePatchPropertiesRequest(input) + return out, req.Send() +} + +// DescribePatchPropertiesWithContext is the same as DescribePatchProperties with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePatchProperties for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchPropertiesWithContext(ctx aws.Context, input *DescribePatchPropertiesInput, opts ...request.Option) (*DescribePatchPropertiesOutput, error) { + req, out := c.DescribePatchPropertiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribePatchPropertiesPages iterates over the pages of a DescribePatchProperties operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePatchProperties method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePatchProperties operation. +// pageNum := 0 +// err := client.DescribePatchPropertiesPages(params, +// func(page *ssm.DescribePatchPropertiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribePatchPropertiesPages(input *DescribePatchPropertiesInput, fn func(*DescribePatchPropertiesOutput, bool) bool) error { + return c.DescribePatchPropertiesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePatchPropertiesPagesWithContext same as DescribePatchPropertiesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchPropertiesPagesWithContext(ctx aws.Context, input *DescribePatchPropertiesInput, fn func(*DescribePatchPropertiesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePatchPropertiesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePatchPropertiesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePatchPropertiesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSessions = "DescribeSessions" + +// DescribeSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSessions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSessions for more information on using the DescribeSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSessionsRequest method. +// req, resp := client.DescribeSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeSessions +func (c *SSM) DescribeSessionsRequest(input *DescribeSessionsInput) (req *request.Request, output *DescribeSessionsOutput) { + op := &request.Operation{ + Name: opDescribeSessions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSessionsInput{} + } + + output = &DescribeSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSessions API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves a list of all active sessions (both connected and disconnected) +// or terminated sessions from the past 30 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation DescribeSessions for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidFilterKey +// The specified key is not valid. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeSessions +func (c *SSM) DescribeSessions(input *DescribeSessionsInput) (*DescribeSessionsOutput, error) { + req, out := c.DescribeSessionsRequest(input) + return out, req.Send() +} + +// DescribeSessionsWithContext is the same as DescribeSessions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeSessionsWithContext(ctx aws.Context, input *DescribeSessionsInput, opts ...request.Option) (*DescribeSessionsOutput, error) { + req, out := c.DescribeSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSessionsPages iterates over the pages of a DescribeSessions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSessions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSessions operation. +// pageNum := 0 +// err := client.DescribeSessionsPages(params, +// func(page *ssm.DescribeSessionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeSessionsPages(input *DescribeSessionsInput, fn func(*DescribeSessionsOutput, bool) bool) error { + return c.DescribeSessionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSessionsPagesWithContext same as DescribeSessionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeSessionsPagesWithContext(ctx aws.Context, input *DescribeSessionsInput, fn func(*DescribeSessionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSessionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSessionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSessionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetAutomationExecution = "GetAutomationExecution" + +// GetAutomationExecutionRequest generates a "aws/request.Request" representing the +// client's request for the GetAutomationExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAutomationExecution for more information on using the GetAutomationExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAutomationExecutionRequest method. +// req, resp := client.GetAutomationExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetAutomationExecution +func (c *SSM) GetAutomationExecutionRequest(input *GetAutomationExecutionInput) (req *request.Request, output *GetAutomationExecutionOutput) { + op := &request.Operation{ + Name: opGetAutomationExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAutomationExecutionInput{} + } + + output = &GetAutomationExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAutomationExecution API operation for Amazon Simple Systems Manager (SSM). +// +// Get detailed information about a particular Automation execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetAutomationExecution for usage and error information. +// +// Returned Error Types: +// * AutomationExecutionNotFoundException +// There is no automation execution information for the requested automation +// execution ID. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetAutomationExecution +func (c *SSM) GetAutomationExecution(input *GetAutomationExecutionInput) (*GetAutomationExecutionOutput, error) { + req, out := c.GetAutomationExecutionRequest(input) + return out, req.Send() +} + +// GetAutomationExecutionWithContext is the same as GetAutomationExecution with the addition of +// the ability to pass a context and additional request options. +// +// See GetAutomationExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetAutomationExecutionWithContext(ctx aws.Context, input *GetAutomationExecutionInput, opts ...request.Option) (*GetAutomationExecutionOutput, error) { + req, out := c.GetAutomationExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCalendarState = "GetCalendarState" + +// GetCalendarStateRequest generates a "aws/request.Request" representing the +// client's request for the GetCalendarState operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCalendarState for more information on using the GetCalendarState +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCalendarStateRequest method. +// req, resp := client.GetCalendarStateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetCalendarState +func (c *SSM) GetCalendarStateRequest(input *GetCalendarStateInput) (req *request.Request, output *GetCalendarStateOutput) { + op := &request.Operation{ + Name: opGetCalendarState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCalendarStateInput{} + } + + output = &GetCalendarStateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCalendarState API operation for Amazon Simple Systems Manager (SSM). +// +// Gets the state of the AWS Systems Manager Change Calendar at an optional, +// specified time. If you specify a time, GetCalendarState returns the state +// of the calendar at a specific time, and returns the next time that the Change +// Calendar state will transition. If you do not specify a time, GetCalendarState +// assumes the current time. Change Calendar entries have two possible states: +// OPEN or CLOSED. +// +// If you specify more than one calendar in a request, the command returns the +// status of OPEN only if all calendars in the request are open. If one or more +// calendars in the request are closed, the status returned is CLOSED. +// +// For more information about Systems Manager Change Calendar, see AWS Systems +// Manager Change Calendar (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-change-calendar.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetCalendarState for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentType +// The document type is not valid. Valid document types are described in the +// DocumentType property. +// +// * UnsupportedCalendarException +// The calendar entry contained in the specified Systems Manager document is +// not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetCalendarState +func (c *SSM) GetCalendarState(input *GetCalendarStateInput) (*GetCalendarStateOutput, error) { + req, out := c.GetCalendarStateRequest(input) + return out, req.Send() +} + +// GetCalendarStateWithContext is the same as GetCalendarState with the addition of +// the ability to pass a context and additional request options. +// +// See GetCalendarState for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetCalendarStateWithContext(ctx aws.Context, input *GetCalendarStateInput, opts ...request.Option) (*GetCalendarStateOutput, error) { + req, out := c.GetCalendarStateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCommandInvocation = "GetCommandInvocation" + +// GetCommandInvocationRequest generates a "aws/request.Request" representing the +// client's request for the GetCommandInvocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCommandInvocation for more information on using the GetCommandInvocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCommandInvocationRequest method. +// req, resp := client.GetCommandInvocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetCommandInvocation +func (c *SSM) GetCommandInvocationRequest(input *GetCommandInvocationInput) (req *request.Request, output *GetCommandInvocationOutput) { + op := &request.Operation{ + Name: opGetCommandInvocation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCommandInvocationInput{} + } + + output = &GetCommandInvocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCommandInvocation API operation for Amazon Simple Systems Manager (SSM). +// +// Returns detailed information about command execution for an invocation or +// plugin. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetCommandInvocation for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidCommandId +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidPluginName +// The plugin name is not valid. +// +// * InvocationDoesNotExist +// The command ID and instance ID you specified did not match any invocations. +// Verify the command ID and the instance ID and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetCommandInvocation +func (c *SSM) GetCommandInvocation(input *GetCommandInvocationInput) (*GetCommandInvocationOutput, error) { + req, out := c.GetCommandInvocationRequest(input) + return out, req.Send() +} + +// GetCommandInvocationWithContext is the same as GetCommandInvocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetCommandInvocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetCommandInvocationWithContext(ctx aws.Context, input *GetCommandInvocationInput, opts ...request.Option) (*GetCommandInvocationOutput, error) { + req, out := c.GetCommandInvocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetConnectionStatus = "GetConnectionStatus" + +// GetConnectionStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetConnectionStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetConnectionStatus for more information on using the GetConnectionStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetConnectionStatusRequest method. +// req, resp := client.GetConnectionStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetConnectionStatus +func (c *SSM) GetConnectionStatusRequest(input *GetConnectionStatusInput) (req *request.Request, output *GetConnectionStatusOutput) { + op := &request.Operation{ + Name: opGetConnectionStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConnectionStatusInput{} + } + + output = &GetConnectionStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetConnectionStatus API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the Session Manager connection status for an instance to determine +// whether it is running and ready to receive Session Manager connections. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetConnectionStatus for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetConnectionStatus +func (c *SSM) GetConnectionStatus(input *GetConnectionStatusInput) (*GetConnectionStatusOutput, error) { + req, out := c.GetConnectionStatusRequest(input) + return out, req.Send() +} + +// GetConnectionStatusWithContext is the same as GetConnectionStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetConnectionStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetConnectionStatusWithContext(ctx aws.Context, input *GetConnectionStatusInput, opts ...request.Option) (*GetConnectionStatusOutput, error) { + req, out := c.GetConnectionStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDefaultPatchBaseline = "GetDefaultPatchBaseline" + +// GetDefaultPatchBaselineRequest generates a "aws/request.Request" representing the +// client's request for the GetDefaultPatchBaseline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDefaultPatchBaseline for more information on using the GetDefaultPatchBaseline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDefaultPatchBaselineRequest method. +// req, resp := client.GetDefaultPatchBaselineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDefaultPatchBaseline +func (c *SSM) GetDefaultPatchBaselineRequest(input *GetDefaultPatchBaselineInput) (req *request.Request, output *GetDefaultPatchBaselineOutput) { + op := &request.Operation{ + Name: opGetDefaultPatchBaseline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDefaultPatchBaselineInput{} + } + + output = &GetDefaultPatchBaselineOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDefaultPatchBaseline API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the default patch baseline. Note that Systems Manager supports +// creating multiple default patch baselines. For example, you can create a +// default patch baseline for each operating system. +// +// If you do not specify an operating system value, the default patch baseline +// for Windows is returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetDefaultPatchBaseline for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDefaultPatchBaseline +func (c *SSM) GetDefaultPatchBaseline(input *GetDefaultPatchBaselineInput) (*GetDefaultPatchBaselineOutput, error) { + req, out := c.GetDefaultPatchBaselineRequest(input) + return out, req.Send() +} + +// GetDefaultPatchBaselineWithContext is the same as GetDefaultPatchBaseline with the addition of +// the ability to pass a context and additional request options. +// +// See GetDefaultPatchBaseline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetDefaultPatchBaselineWithContext(ctx aws.Context, input *GetDefaultPatchBaselineInput, opts ...request.Option) (*GetDefaultPatchBaselineOutput, error) { + req, out := c.GetDefaultPatchBaselineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDeployablePatchSnapshotForInstance = "GetDeployablePatchSnapshotForInstance" + +// GetDeployablePatchSnapshotForInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetDeployablePatchSnapshotForInstance operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDeployablePatchSnapshotForInstance for more information on using the GetDeployablePatchSnapshotForInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDeployablePatchSnapshotForInstanceRequest method. +// req, resp := client.GetDeployablePatchSnapshotForInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDeployablePatchSnapshotForInstance +func (c *SSM) GetDeployablePatchSnapshotForInstanceRequest(input *GetDeployablePatchSnapshotForInstanceInput) (req *request.Request, output *GetDeployablePatchSnapshotForInstanceOutput) { + op := &request.Operation{ + Name: opGetDeployablePatchSnapshotForInstance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDeployablePatchSnapshotForInstanceInput{} + } + + output = &GetDeployablePatchSnapshotForInstanceOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDeployablePatchSnapshotForInstance API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the current snapshot for the patch baseline the instance uses. +// This API is primarily used by the AWS-RunPatchBaseline Systems Manager document. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetDeployablePatchSnapshotForInstance for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * UnsupportedOperatingSystem +// The operating systems you specified is not supported, or the operation is +// not supported for the operating system. +// +// * UnsupportedFeatureRequiredException +// Microsoft application patching is only available on EC2 instances and advanced +// instances. To patch Microsoft applications on on-premises servers and VMs, +// you must enable advanced instances. For more information, see Using the advanced-instances +// tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) +// in the AWS Systems Manager User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDeployablePatchSnapshotForInstance +func (c *SSM) GetDeployablePatchSnapshotForInstance(input *GetDeployablePatchSnapshotForInstanceInput) (*GetDeployablePatchSnapshotForInstanceOutput, error) { + req, out := c.GetDeployablePatchSnapshotForInstanceRequest(input) + return out, req.Send() +} + +// GetDeployablePatchSnapshotForInstanceWithContext is the same as GetDeployablePatchSnapshotForInstance with the addition of +// the ability to pass a context and additional request options. +// +// See GetDeployablePatchSnapshotForInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetDeployablePatchSnapshotForInstanceWithContext(ctx aws.Context, input *GetDeployablePatchSnapshotForInstanceInput, opts ...request.Option) (*GetDeployablePatchSnapshotForInstanceOutput, error) { + req, out := c.GetDeployablePatchSnapshotForInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDocument = "GetDocument" + +// GetDocumentRequest generates a "aws/request.Request" representing the +// client's request for the GetDocument operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDocument for more information on using the GetDocument +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDocumentRequest method. +// req, resp := client.GetDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDocument +func (c *SSM) GetDocumentRequest(input *GetDocumentInput) (req *request.Request, output *GetDocumentOutput) { + op := &request.Operation{ + Name: opGetDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDocumentInput{} + } + + output = &GetDocumentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDocument API operation for Amazon Simple Systems Manager (SSM). +// +// Gets the contents of the specified Systems Manager document. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetDocument for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentVersion +// The document version is not valid or does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDocument +func (c *SSM) GetDocument(input *GetDocumentInput) (*GetDocumentOutput, error) { + req, out := c.GetDocumentRequest(input) + return out, req.Send() +} + +// GetDocumentWithContext is the same as GetDocument with the addition of +// the ability to pass a context and additional request options. +// +// See GetDocument for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetDocumentWithContext(ctx aws.Context, input *GetDocumentInput, opts ...request.Option) (*GetDocumentOutput, error) { + req, out := c.GetDocumentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetInventory = "GetInventory" + +// GetInventoryRequest generates a "aws/request.Request" representing the +// client's request for the GetInventory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInventory for more information on using the GetInventory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetInventoryRequest method. +// req, resp := client.GetInventoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetInventory +func (c *SSM) GetInventoryRequest(input *GetInventoryInput) (req *request.Request, output *GetInventoryOutput) { + op := &request.Operation{ + Name: opGetInventory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetInventoryInput{} + } + + output = &GetInventoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInventory API operation for Amazon Simple Systems Manager (SSM). +// +// Query inventory information. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetInventory for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidInventoryGroupException +// The specified inventory group is not valid. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InvalidTypeNameException +// The parameter type name is not valid. +// +// * InvalidAggregatorException +// The specified aggregator is not valid for inventory groups. Verify that the +// aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation. +// +// * InvalidResultAttributeException +// The specified inventory item result attribute is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetInventory +func (c *SSM) GetInventory(input *GetInventoryInput) (*GetInventoryOutput, error) { + req, out := c.GetInventoryRequest(input) + return out, req.Send() +} + +// GetInventoryWithContext is the same as GetInventory with the addition of +// the ability to pass a context and additional request options. +// +// See GetInventory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetInventoryWithContext(ctx aws.Context, input *GetInventoryInput, opts ...request.Option) (*GetInventoryOutput, error) { + req, out := c.GetInventoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetInventoryPages iterates over the pages of a GetInventory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetInventory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetInventory operation. +// pageNum := 0 +// err := client.GetInventoryPages(params, +// func(page *ssm.GetInventoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) GetInventoryPages(input *GetInventoryInput, fn func(*GetInventoryOutput, bool) bool) error { + return c.GetInventoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetInventoryPagesWithContext same as GetInventoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetInventoryPagesWithContext(ctx aws.Context, input *GetInventoryInput, fn func(*GetInventoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetInventoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetInventoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetInventoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetInventorySchema = "GetInventorySchema" + +// GetInventorySchemaRequest generates a "aws/request.Request" representing the +// client's request for the GetInventorySchema operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInventorySchema for more information on using the GetInventorySchema +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetInventorySchemaRequest method. +// req, resp := client.GetInventorySchemaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetInventorySchema +func (c *SSM) GetInventorySchemaRequest(input *GetInventorySchemaInput) (req *request.Request, output *GetInventorySchemaOutput) { + op := &request.Operation{ + Name: opGetInventorySchema, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetInventorySchemaInput{} + } + + output = &GetInventorySchemaOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInventorySchema API operation for Amazon Simple Systems Manager (SSM). +// +// Return a list of inventory type names for the account, or return a list of +// attribute names for a specific Inventory item type. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetInventorySchema for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidTypeNameException +// The parameter type name is not valid. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetInventorySchema +func (c *SSM) GetInventorySchema(input *GetInventorySchemaInput) (*GetInventorySchemaOutput, error) { + req, out := c.GetInventorySchemaRequest(input) + return out, req.Send() +} + +// GetInventorySchemaWithContext is the same as GetInventorySchema with the addition of +// the ability to pass a context and additional request options. +// +// See GetInventorySchema for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetInventorySchemaWithContext(ctx aws.Context, input *GetInventorySchemaInput, opts ...request.Option) (*GetInventorySchemaOutput, error) { + req, out := c.GetInventorySchemaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetInventorySchemaPages iterates over the pages of a GetInventorySchema operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetInventorySchema method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetInventorySchema operation. +// pageNum := 0 +// err := client.GetInventorySchemaPages(params, +// func(page *ssm.GetInventorySchemaOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) GetInventorySchemaPages(input *GetInventorySchemaInput, fn func(*GetInventorySchemaOutput, bool) bool) error { + return c.GetInventorySchemaPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetInventorySchemaPagesWithContext same as GetInventorySchemaPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetInventorySchemaPagesWithContext(ctx aws.Context, input *GetInventorySchemaInput, fn func(*GetInventorySchemaOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetInventorySchemaInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetInventorySchemaRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetInventorySchemaOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetMaintenanceWindow = "GetMaintenanceWindow" + +// GetMaintenanceWindowRequest generates a "aws/request.Request" representing the +// client's request for the GetMaintenanceWindow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMaintenanceWindow for more information on using the GetMaintenanceWindow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMaintenanceWindowRequest method. +// req, resp := client.GetMaintenanceWindowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindow +func (c *SSM) GetMaintenanceWindowRequest(input *GetMaintenanceWindowInput) (req *request.Request, output *GetMaintenanceWindowOutput) { + op := &request.Operation{ + Name: opGetMaintenanceWindow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMaintenanceWindowInput{} + } + + output = &GetMaintenanceWindowOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetMaintenanceWindow for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindow +func (c *SSM) GetMaintenanceWindow(input *GetMaintenanceWindowInput) (*GetMaintenanceWindowOutput, error) { + req, out := c.GetMaintenanceWindowRequest(input) + return out, req.Send() +} + +// GetMaintenanceWindowWithContext is the same as GetMaintenanceWindow with the addition of +// the ability to pass a context and additional request options. +// +// See GetMaintenanceWindow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetMaintenanceWindowWithContext(ctx aws.Context, input *GetMaintenanceWindowInput, opts ...request.Option) (*GetMaintenanceWindowOutput, error) { + req, out := c.GetMaintenanceWindowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMaintenanceWindowExecution = "GetMaintenanceWindowExecution" + +// GetMaintenanceWindowExecutionRequest generates a "aws/request.Request" representing the +// client's request for the GetMaintenanceWindowExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMaintenanceWindowExecution for more information on using the GetMaintenanceWindowExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMaintenanceWindowExecutionRequest method. +// req, resp := client.GetMaintenanceWindowExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowExecution +func (c *SSM) GetMaintenanceWindowExecutionRequest(input *GetMaintenanceWindowExecutionInput) (req *request.Request, output *GetMaintenanceWindowExecutionOutput) { + op := &request.Operation{ + Name: opGetMaintenanceWindowExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMaintenanceWindowExecutionInput{} + } + + output = &GetMaintenanceWindowExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMaintenanceWindowExecution API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves details about a specific a maintenance window execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetMaintenanceWindowExecution for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowExecution +func (c *SSM) GetMaintenanceWindowExecution(input *GetMaintenanceWindowExecutionInput) (*GetMaintenanceWindowExecutionOutput, error) { + req, out := c.GetMaintenanceWindowExecutionRequest(input) + return out, req.Send() +} + +// GetMaintenanceWindowExecutionWithContext is the same as GetMaintenanceWindowExecution with the addition of +// the ability to pass a context and additional request options. +// +// See GetMaintenanceWindowExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetMaintenanceWindowExecutionWithContext(ctx aws.Context, input *GetMaintenanceWindowExecutionInput, opts ...request.Option) (*GetMaintenanceWindowExecutionOutput, error) { + req, out := c.GetMaintenanceWindowExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMaintenanceWindowExecutionTask = "GetMaintenanceWindowExecutionTask" + +// GetMaintenanceWindowExecutionTaskRequest generates a "aws/request.Request" representing the +// client's request for the GetMaintenanceWindowExecutionTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMaintenanceWindowExecutionTask for more information on using the GetMaintenanceWindowExecutionTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMaintenanceWindowExecutionTaskRequest method. +// req, resp := client.GetMaintenanceWindowExecutionTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowExecutionTask +func (c *SSM) GetMaintenanceWindowExecutionTaskRequest(input *GetMaintenanceWindowExecutionTaskInput) (req *request.Request, output *GetMaintenanceWindowExecutionTaskOutput) { + op := &request.Operation{ + Name: opGetMaintenanceWindowExecutionTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMaintenanceWindowExecutionTaskInput{} + } + + output = &GetMaintenanceWindowExecutionTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMaintenanceWindowExecutionTask API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the details about a specific task run as part of a maintenance +// window execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetMaintenanceWindowExecutionTask for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowExecutionTask +func (c *SSM) GetMaintenanceWindowExecutionTask(input *GetMaintenanceWindowExecutionTaskInput) (*GetMaintenanceWindowExecutionTaskOutput, error) { + req, out := c.GetMaintenanceWindowExecutionTaskRequest(input) + return out, req.Send() +} + +// GetMaintenanceWindowExecutionTaskWithContext is the same as GetMaintenanceWindowExecutionTask with the addition of +// the ability to pass a context and additional request options. +// +// See GetMaintenanceWindowExecutionTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetMaintenanceWindowExecutionTaskWithContext(ctx aws.Context, input *GetMaintenanceWindowExecutionTaskInput, opts ...request.Option) (*GetMaintenanceWindowExecutionTaskOutput, error) { + req, out := c.GetMaintenanceWindowExecutionTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMaintenanceWindowExecutionTaskInvocation = "GetMaintenanceWindowExecutionTaskInvocation" + +// GetMaintenanceWindowExecutionTaskInvocationRequest generates a "aws/request.Request" representing the +// client's request for the GetMaintenanceWindowExecutionTaskInvocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMaintenanceWindowExecutionTaskInvocation for more information on using the GetMaintenanceWindowExecutionTaskInvocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMaintenanceWindowExecutionTaskInvocationRequest method. +// req, resp := client.GetMaintenanceWindowExecutionTaskInvocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowExecutionTaskInvocation +func (c *SSM) GetMaintenanceWindowExecutionTaskInvocationRequest(input *GetMaintenanceWindowExecutionTaskInvocationInput) (req *request.Request, output *GetMaintenanceWindowExecutionTaskInvocationOutput) { + op := &request.Operation{ + Name: opGetMaintenanceWindowExecutionTaskInvocation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMaintenanceWindowExecutionTaskInvocationInput{} + } + + output = &GetMaintenanceWindowExecutionTaskInvocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMaintenanceWindowExecutionTaskInvocation API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves information about a specific task running on a specific target. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetMaintenanceWindowExecutionTaskInvocation for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowExecutionTaskInvocation +func (c *SSM) GetMaintenanceWindowExecutionTaskInvocation(input *GetMaintenanceWindowExecutionTaskInvocationInput) (*GetMaintenanceWindowExecutionTaskInvocationOutput, error) { + req, out := c.GetMaintenanceWindowExecutionTaskInvocationRequest(input) + return out, req.Send() +} + +// GetMaintenanceWindowExecutionTaskInvocationWithContext is the same as GetMaintenanceWindowExecutionTaskInvocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetMaintenanceWindowExecutionTaskInvocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetMaintenanceWindowExecutionTaskInvocationWithContext(ctx aws.Context, input *GetMaintenanceWindowExecutionTaskInvocationInput, opts ...request.Option) (*GetMaintenanceWindowExecutionTaskInvocationOutput, error) { + req, out := c.GetMaintenanceWindowExecutionTaskInvocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMaintenanceWindowTask = "GetMaintenanceWindowTask" + +// GetMaintenanceWindowTaskRequest generates a "aws/request.Request" representing the +// client's request for the GetMaintenanceWindowTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMaintenanceWindowTask for more information on using the GetMaintenanceWindowTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMaintenanceWindowTaskRequest method. +// req, resp := client.GetMaintenanceWindowTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowTask +func (c *SSM) GetMaintenanceWindowTaskRequest(input *GetMaintenanceWindowTaskInput) (req *request.Request, output *GetMaintenanceWindowTaskOutput) { + op := &request.Operation{ + Name: opGetMaintenanceWindowTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMaintenanceWindowTaskInput{} + } + + output = &GetMaintenanceWindowTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMaintenanceWindowTask API operation for Amazon Simple Systems Manager (SSM). +// +// Lists the tasks in a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetMaintenanceWindowTask for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetMaintenanceWindowTask +func (c *SSM) GetMaintenanceWindowTask(input *GetMaintenanceWindowTaskInput) (*GetMaintenanceWindowTaskOutput, error) { + req, out := c.GetMaintenanceWindowTaskRequest(input) + return out, req.Send() +} + +// GetMaintenanceWindowTaskWithContext is the same as GetMaintenanceWindowTask with the addition of +// the ability to pass a context and additional request options. +// +// See GetMaintenanceWindowTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetMaintenanceWindowTaskWithContext(ctx aws.Context, input *GetMaintenanceWindowTaskInput, opts ...request.Option) (*GetMaintenanceWindowTaskOutput, error) { + req, out := c.GetMaintenanceWindowTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetOpsItem = "GetOpsItem" + +// GetOpsItemRequest generates a "aws/request.Request" representing the +// client's request for the GetOpsItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetOpsItem for more information on using the GetOpsItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetOpsItemRequest method. +// req, resp := client.GetOpsItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsItem +func (c *SSM) GetOpsItemRequest(input *GetOpsItemInput) (req *request.Request, output *GetOpsItemOutput) { + op := &request.Operation{ + Name: opGetOpsItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpsItemInput{} + } + + output = &GetOpsItemOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOpsItem API operation for Amazon Simple Systems Manager (SSM). +// +// Get information about an OpsItem by using the ID. You must have permission +// in AWS Identity and Access Management (IAM) to view information about an +// OpsItem. For more information, see Getting started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// in the AWS Systems Manager User Guide. +// +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetOpsItem for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * OpsItemNotFoundException +// The specified OpsItem ID doesn't exist. Verify the ID and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsItem +func (c *SSM) GetOpsItem(input *GetOpsItemInput) (*GetOpsItemOutput, error) { + req, out := c.GetOpsItemRequest(input) + return out, req.Send() +} + +// GetOpsItemWithContext is the same as GetOpsItem with the addition of +// the ability to pass a context and additional request options. +// +// See GetOpsItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetOpsItemWithContext(ctx aws.Context, input *GetOpsItemInput, opts ...request.Option) (*GetOpsItemOutput, error) { + req, out := c.GetOpsItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetOpsSummary = "GetOpsSummary" + +// GetOpsSummaryRequest generates a "aws/request.Request" representing the +// client's request for the GetOpsSummary operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetOpsSummary for more information on using the GetOpsSummary +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetOpsSummaryRequest method. +// req, resp := client.GetOpsSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsSummary +func (c *SSM) GetOpsSummaryRequest(input *GetOpsSummaryInput) (req *request.Request, output *GetOpsSummaryOutput) { + op := &request.Operation{ + Name: opGetOpsSummary, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetOpsSummaryInput{} + } + + output = &GetOpsSummaryOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOpsSummary API operation for Amazon Simple Systems Manager (SSM). +// +// View a summary of OpsItems based on specified filters and aggregators. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetOpsSummary for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * ResourceDataSyncNotFoundException +// The specified sync name was not found. +// +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InvalidTypeNameException +// The parameter type name is not valid. +// +// * InvalidAggregatorException +// The specified aggregator is not valid for inventory groups. Verify that the +// aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsSummary +func (c *SSM) GetOpsSummary(input *GetOpsSummaryInput) (*GetOpsSummaryOutput, error) { + req, out := c.GetOpsSummaryRequest(input) + return out, req.Send() +} + +// GetOpsSummaryWithContext is the same as GetOpsSummary with the addition of +// the ability to pass a context and additional request options. +// +// See GetOpsSummary for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetOpsSummaryWithContext(ctx aws.Context, input *GetOpsSummaryInput, opts ...request.Option) (*GetOpsSummaryOutput, error) { + req, out := c.GetOpsSummaryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetOpsSummaryPages iterates over the pages of a GetOpsSummary operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetOpsSummary method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetOpsSummary operation. +// pageNum := 0 +// err := client.GetOpsSummaryPages(params, +// func(page *ssm.GetOpsSummaryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) GetOpsSummaryPages(input *GetOpsSummaryInput, fn func(*GetOpsSummaryOutput, bool) bool) error { + return c.GetOpsSummaryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetOpsSummaryPagesWithContext same as GetOpsSummaryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetOpsSummaryPagesWithContext(ctx aws.Context, input *GetOpsSummaryInput, fn func(*GetOpsSummaryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetOpsSummaryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetOpsSummaryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetOpsSummaryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetParameter = "GetParameter" + +// GetParameterRequest generates a "aws/request.Request" representing the +// client's request for the GetParameter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetParameter for more information on using the GetParameter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetParameterRequest method. +// req, resp := client.GetParameterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParameter +func (c *SSM) GetParameterRequest(input *GetParameterInput) (req *request.Request, output *GetParameterOutput) { + op := &request.Operation{ + Name: opGetParameter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetParameterInput{} + } + + output = &GetParameterOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetParameter API operation for Amazon Simple Systems Manager (SSM). +// +// Get information about a parameter by using the parameter name. Don't confuse +// this API action with the GetParameters API action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetParameter for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidKeyId +// The query key ID is not valid. +// +// * ParameterNotFound +// The parameter could not be found. Verify the name and try again. +// +// * ParameterVersionNotFound +// The specified parameter version was not found. Verify the parameter name +// and version, and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParameter +func (c *SSM) GetParameter(input *GetParameterInput) (*GetParameterOutput, error) { + req, out := c.GetParameterRequest(input) + return out, req.Send() +} + +// GetParameterWithContext is the same as GetParameter with the addition of +// the ability to pass a context and additional request options. +// +// See GetParameter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetParameterWithContext(ctx aws.Context, input *GetParameterInput, opts ...request.Option) (*GetParameterOutput, error) { + req, out := c.GetParameterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetParameterHistory = "GetParameterHistory" + +// GetParameterHistoryRequest generates a "aws/request.Request" representing the +// client's request for the GetParameterHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetParameterHistory for more information on using the GetParameterHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetParameterHistoryRequest method. +// req, resp := client.GetParameterHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParameterHistory +func (c *SSM) GetParameterHistoryRequest(input *GetParameterHistoryInput) (req *request.Request, output *GetParameterHistoryOutput) { + op := &request.Operation{ + Name: opGetParameterHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetParameterHistoryInput{} + } + + output = &GetParameterHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetParameterHistory API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the history of all changes to a parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetParameterHistory for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * ParameterNotFound +// The parameter could not be found. Verify the name and try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InvalidKeyId +// The query key ID is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParameterHistory +func (c *SSM) GetParameterHistory(input *GetParameterHistoryInput) (*GetParameterHistoryOutput, error) { + req, out := c.GetParameterHistoryRequest(input) + return out, req.Send() +} + +// GetParameterHistoryWithContext is the same as GetParameterHistory with the addition of +// the ability to pass a context and additional request options. +// +// See GetParameterHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetParameterHistoryWithContext(ctx aws.Context, input *GetParameterHistoryInput, opts ...request.Option) (*GetParameterHistoryOutput, error) { + req, out := c.GetParameterHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetParameterHistoryPages iterates over the pages of a GetParameterHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetParameterHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetParameterHistory operation. +// pageNum := 0 +// err := client.GetParameterHistoryPages(params, +// func(page *ssm.GetParameterHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) GetParameterHistoryPages(input *GetParameterHistoryInput, fn func(*GetParameterHistoryOutput, bool) bool) error { + return c.GetParameterHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetParameterHistoryPagesWithContext same as GetParameterHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetParameterHistoryPagesWithContext(ctx aws.Context, input *GetParameterHistoryInput, fn func(*GetParameterHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetParameterHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetParameterHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetParameterHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetParameters = "GetParameters" + +// GetParametersRequest generates a "aws/request.Request" representing the +// client's request for the GetParameters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetParameters for more information on using the GetParameters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetParametersRequest method. +// req, resp := client.GetParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParameters +func (c *SSM) GetParametersRequest(input *GetParametersInput) (req *request.Request, output *GetParametersOutput) { + op := &request.Operation{ + Name: opGetParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetParametersInput{} + } + + output = &GetParametersOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetParameters API operation for Amazon Simple Systems Manager (SSM). +// +// Get details of a parameter. Don't confuse this API action with the GetParameter +// API action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetParameters for usage and error information. +// +// Returned Error Types: +// * InvalidKeyId +// The query key ID is not valid. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParameters +func (c *SSM) GetParameters(input *GetParametersInput) (*GetParametersOutput, error) { + req, out := c.GetParametersRequest(input) + return out, req.Send() +} + +// GetParametersWithContext is the same as GetParameters with the addition of +// the ability to pass a context and additional request options. +// +// See GetParameters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetParametersWithContext(ctx aws.Context, input *GetParametersInput, opts ...request.Option) (*GetParametersOutput, error) { + req, out := c.GetParametersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetParametersByPath = "GetParametersByPath" + +// GetParametersByPathRequest generates a "aws/request.Request" representing the +// client's request for the GetParametersByPath operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetParametersByPath for more information on using the GetParametersByPath +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetParametersByPathRequest method. +// req, resp := client.GetParametersByPathRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParametersByPath +func (c *SSM) GetParametersByPathRequest(input *GetParametersByPathInput) (req *request.Request, output *GetParametersByPathOutput) { + op := &request.Operation{ + Name: opGetParametersByPath, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetParametersByPathInput{} + } + + output = &GetParametersByPathOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetParametersByPath API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieve information about one or more parameters in a specific hierarchy. +// +// Request results are returned on a best-effort basis. If you specify MaxResults +// in the request, the response includes information up to the limit specified. +// The number of items returned, however, can be between zero and the value +// of MaxResults. If the service reaches an internal limit while processing +// the results, it stops the operation and returns the matching values up to +// that point and a NextToken. You can specify the NextToken in a subsequent +// call to get the next set of results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetParametersByPath for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidFilterKey +// The specified key is not valid. +// +// * InvalidFilterOption +// The specified filter option is not valid. Valid options are Equals and BeginsWith. +// For Path filter, valid options are Recursive and OneLevel. +// +// * InvalidFilterValue +// The filter value is not valid. Verify the value and try again. +// +// * InvalidKeyId +// The query key ID is not valid. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParametersByPath +func (c *SSM) GetParametersByPath(input *GetParametersByPathInput) (*GetParametersByPathOutput, error) { + req, out := c.GetParametersByPathRequest(input) + return out, req.Send() +} + +// GetParametersByPathWithContext is the same as GetParametersByPath with the addition of +// the ability to pass a context and additional request options. +// +// See GetParametersByPath for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetParametersByPathWithContext(ctx aws.Context, input *GetParametersByPathInput, opts ...request.Option) (*GetParametersByPathOutput, error) { + req, out := c.GetParametersByPathRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetParametersByPathPages iterates over the pages of a GetParametersByPath operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetParametersByPath method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetParametersByPath operation. +// pageNum := 0 +// err := client.GetParametersByPathPages(params, +// func(page *ssm.GetParametersByPathOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) GetParametersByPathPages(input *GetParametersByPathInput, fn func(*GetParametersByPathOutput, bool) bool) error { + return c.GetParametersByPathPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetParametersByPathPagesWithContext same as GetParametersByPathPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetParametersByPathPagesWithContext(ctx aws.Context, input *GetParametersByPathInput, fn func(*GetParametersByPathOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetParametersByPathInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetParametersByPathRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetParametersByPathOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetPatchBaseline = "GetPatchBaseline" + +// GetPatchBaselineRequest generates a "aws/request.Request" representing the +// client's request for the GetPatchBaseline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPatchBaseline for more information on using the GetPatchBaseline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPatchBaselineRequest method. +// req, resp := client.GetPatchBaselineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetPatchBaseline +func (c *SSM) GetPatchBaselineRequest(input *GetPatchBaselineInput) (req *request.Request, output *GetPatchBaselineOutput) { + op := &request.Operation{ + Name: opGetPatchBaseline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPatchBaselineInput{} + } + + output = &GetPatchBaselineOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPatchBaseline API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves information about a patch baseline. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetPatchBaseline for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetPatchBaseline +func (c *SSM) GetPatchBaseline(input *GetPatchBaselineInput) (*GetPatchBaselineOutput, error) { + req, out := c.GetPatchBaselineRequest(input) + return out, req.Send() +} + +// GetPatchBaselineWithContext is the same as GetPatchBaseline with the addition of +// the ability to pass a context and additional request options. +// +// See GetPatchBaseline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetPatchBaselineWithContext(ctx aws.Context, input *GetPatchBaselineInput, opts ...request.Option) (*GetPatchBaselineOutput, error) { + req, out := c.GetPatchBaselineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPatchBaselineForPatchGroup = "GetPatchBaselineForPatchGroup" + +// GetPatchBaselineForPatchGroupRequest generates a "aws/request.Request" representing the +// client's request for the GetPatchBaselineForPatchGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPatchBaselineForPatchGroup for more information on using the GetPatchBaselineForPatchGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPatchBaselineForPatchGroupRequest method. +// req, resp := client.GetPatchBaselineForPatchGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetPatchBaselineForPatchGroup +func (c *SSM) GetPatchBaselineForPatchGroupRequest(input *GetPatchBaselineForPatchGroupInput) (req *request.Request, output *GetPatchBaselineForPatchGroupOutput) { + op := &request.Operation{ + Name: opGetPatchBaselineForPatchGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPatchBaselineForPatchGroupInput{} + } + + output = &GetPatchBaselineForPatchGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPatchBaselineForPatchGroup API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves the patch baseline that should be used for the specified patch +// group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetPatchBaselineForPatchGroup for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetPatchBaselineForPatchGroup +func (c *SSM) GetPatchBaselineForPatchGroup(input *GetPatchBaselineForPatchGroupInput) (*GetPatchBaselineForPatchGroupOutput, error) { + req, out := c.GetPatchBaselineForPatchGroupRequest(input) + return out, req.Send() +} + +// GetPatchBaselineForPatchGroupWithContext is the same as GetPatchBaselineForPatchGroup with the addition of +// the ability to pass a context and additional request options. +// +// See GetPatchBaselineForPatchGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetPatchBaselineForPatchGroupWithContext(ctx aws.Context, input *GetPatchBaselineForPatchGroupInput, opts ...request.Option) (*GetPatchBaselineForPatchGroupOutput, error) { + req, out := c.GetPatchBaselineForPatchGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetServiceSetting = "GetServiceSetting" + +// GetServiceSettingRequest generates a "aws/request.Request" representing the +// client's request for the GetServiceSetting operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetServiceSetting for more information on using the GetServiceSetting +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetServiceSettingRequest method. +// req, resp := client.GetServiceSettingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetServiceSetting +func (c *SSM) GetServiceSettingRequest(input *GetServiceSettingInput) (req *request.Request, output *GetServiceSettingOutput) { + op := &request.Operation{ + Name: opGetServiceSetting, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceSettingInput{} + } + + output = &GetServiceSettingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetServiceSetting API operation for Amazon Simple Systems Manager (SSM). +// +// ServiceSetting is an account-level setting for an AWS service. This setting +// defines how a user interacts with or uses a service or a feature of a service. +// For example, if an AWS service charges money to the account based on feature +// or service usage, then the AWS service team might create a default setting +// of "false". This means the user can't use this feature unless they change +// the setting to "true" and intentionally opt in for a paid feature. +// +// Services map a SettingId object to a setting value. AWS services teams define +// the default value for a SettingId. You can't create a new SettingId, but +// you can overwrite the default value if you have the ssm:UpdateServiceSetting +// permission for the setting. Use the UpdateServiceSetting API action to change +// the default setting. Or use the ResetServiceSetting to change the value back +// to the original value defined by the AWS service team. +// +// Query the current service setting for the account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation GetServiceSetting for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * ServiceSettingNotFound +// The specified service setting was not found. Either the service name or the +// setting has not been provisioned by the AWS service team. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetServiceSetting +func (c *SSM) GetServiceSetting(input *GetServiceSettingInput) (*GetServiceSettingOutput, error) { + req, out := c.GetServiceSettingRequest(input) + return out, req.Send() +} + +// GetServiceSettingWithContext is the same as GetServiceSetting with the addition of +// the ability to pass a context and additional request options. +// +// See GetServiceSetting for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetServiceSettingWithContext(ctx aws.Context, input *GetServiceSettingInput, opts ...request.Option) (*GetServiceSettingOutput, error) { + req, out := c.GetServiceSettingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opLabelParameterVersion = "LabelParameterVersion" + +// LabelParameterVersionRequest generates a "aws/request.Request" representing the +// client's request for the LabelParameterVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See LabelParameterVersion for more information on using the LabelParameterVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the LabelParameterVersionRequest method. +// req, resp := client.LabelParameterVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/LabelParameterVersion +func (c *SSM) LabelParameterVersionRequest(input *LabelParameterVersionInput) (req *request.Request, output *LabelParameterVersionOutput) { + op := &request.Operation{ + Name: opLabelParameterVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &LabelParameterVersionInput{} + } + + output = &LabelParameterVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// LabelParameterVersion API operation for Amazon Simple Systems Manager (SSM). +// +// A parameter label is a user-defined alias to help you manage different versions +// of a parameter. When you modify a parameter, Systems Manager automatically +// saves a new version and increments the version number by one. A label can +// help you remember the purpose of a parameter when there are multiple versions. +// +// Parameter labels have the following requirements and restrictions. +// +// * A version of a parameter can have a maximum of 10 labels. +// +// * You can't attach the same label to different versions of the same parameter. +// For example, if version 1 has the label Production, then you can't attach +// Production to version 2. +// +// * You can move a label from one version of a parameter to another. +// +// * You can't create a label when you create a new parameter. You must attach +// a label to a specific version of a parameter. +// +// * You can't delete a parameter label. If you no longer want to use a parameter +// label, then you must move it to a different version of a parameter. +// +// * A label can have a maximum of 100 characters. +// +// * Labels can contain letters (case sensitive), numbers, periods (.), hyphens +// (-), or underscores (_). +// +// * Labels can't begin with a number, "aws," or "ssm" (not case sensitive). +// If a label fails to meet these requirements, then the label is not associated +// with a parameter and the system displays it in the list of InvalidLabels. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation LabelParameterVersion for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// * ParameterNotFound +// The parameter could not be found. Verify the name and try again. +// +// * ParameterVersionNotFound +// The specified parameter version was not found. Verify the parameter name +// and version, and try again. +// +// * ParameterVersionLabelLimitExceeded +// A parameter version can have a maximum of ten labels. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/LabelParameterVersion +func (c *SSM) LabelParameterVersion(input *LabelParameterVersionInput) (*LabelParameterVersionOutput, error) { + req, out := c.LabelParameterVersionRequest(input) + return out, req.Send() +} + +// LabelParameterVersionWithContext is the same as LabelParameterVersion with the addition of +// the ability to pass a context and additional request options. +// +// See LabelParameterVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) LabelParameterVersionWithContext(ctx aws.Context, input *LabelParameterVersionInput, opts ...request.Option) (*LabelParameterVersionOutput, error) { + req, out := c.LabelParameterVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAssociationVersions = "ListAssociationVersions" + +// ListAssociationVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListAssociationVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAssociationVersions for more information on using the ListAssociationVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAssociationVersionsRequest method. +// req, resp := client.ListAssociationVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListAssociationVersions +func (c *SSM) ListAssociationVersionsRequest(input *ListAssociationVersionsInput) (req *request.Request, output *ListAssociationVersionsOutput) { + op := &request.Operation{ + Name: opListAssociationVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAssociationVersionsInput{} + } + + output = &ListAssociationVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAssociationVersions API operation for Amazon Simple Systems Manager (SSM). +// +// Retrieves all versions of an association for a specific association ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListAssociationVersions for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * AssociationDoesNotExist +// The specified association does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListAssociationVersions +func (c *SSM) ListAssociationVersions(input *ListAssociationVersionsInput) (*ListAssociationVersionsOutput, error) { + req, out := c.ListAssociationVersionsRequest(input) + return out, req.Send() +} + +// ListAssociationVersionsWithContext is the same as ListAssociationVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListAssociationVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListAssociationVersionsWithContext(ctx aws.Context, input *ListAssociationVersionsInput, opts ...request.Option) (*ListAssociationVersionsOutput, error) { + req, out := c.ListAssociationVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAssociationVersionsPages iterates over the pages of a ListAssociationVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAssociationVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAssociationVersions operation. +// pageNum := 0 +// err := client.ListAssociationVersionsPages(params, +// func(page *ssm.ListAssociationVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListAssociationVersionsPages(input *ListAssociationVersionsInput, fn func(*ListAssociationVersionsOutput, bool) bool) error { + return c.ListAssociationVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAssociationVersionsPagesWithContext same as ListAssociationVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListAssociationVersionsPagesWithContext(ctx aws.Context, input *ListAssociationVersionsInput, fn func(*ListAssociationVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAssociationVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAssociationVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAssociationVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAssociations = "ListAssociations" + +// ListAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the ListAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAssociations for more information on using the ListAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAssociationsRequest method. +// req, resp := client.ListAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListAssociations +func (c *SSM) ListAssociationsRequest(input *ListAssociationsInput) (req *request.Request, output *ListAssociationsOutput) { + op := &request.Operation{ + Name: opListAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAssociationsInput{} + } + + output = &ListAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAssociations API operation for Amazon Simple Systems Manager (SSM). +// +// Returns all State Manager associations in the current AWS account and Region. +// You can limit the results to a specific State Manager association document +// or instance by specifying a filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListAssociations for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListAssociations +func (c *SSM) ListAssociations(input *ListAssociationsInput) (*ListAssociationsOutput, error) { + req, out := c.ListAssociationsRequest(input) + return out, req.Send() +} + +// ListAssociationsWithContext is the same as ListAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See ListAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListAssociationsWithContext(ctx aws.Context, input *ListAssociationsInput, opts ...request.Option) (*ListAssociationsOutput, error) { + req, out := c.ListAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAssociationsPages iterates over the pages of a ListAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAssociations operation. +// pageNum := 0 +// err := client.ListAssociationsPages(params, +// func(page *ssm.ListAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListAssociationsPages(input *ListAssociationsInput, fn func(*ListAssociationsOutput, bool) bool) error { + return c.ListAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAssociationsPagesWithContext same as ListAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListAssociationsPagesWithContext(ctx aws.Context, input *ListAssociationsInput, fn func(*ListAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListCommandInvocations = "ListCommandInvocations" + +// ListCommandInvocationsRequest generates a "aws/request.Request" representing the +// client's request for the ListCommandInvocations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListCommandInvocations for more information on using the ListCommandInvocations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListCommandInvocationsRequest method. +// req, resp := client.ListCommandInvocationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListCommandInvocations +func (c *SSM) ListCommandInvocationsRequest(input *ListCommandInvocationsInput) (req *request.Request, output *ListCommandInvocationsOutput) { + op := &request.Operation{ + Name: opListCommandInvocations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCommandInvocationsInput{} + } + + output = &ListCommandInvocationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListCommandInvocations API operation for Amazon Simple Systems Manager (SSM). +// +// An invocation is copy of a command sent to a specific instance. A command +// can apply to one or more instances. A command invocation applies to one instance. +// For example, if a user runs SendCommand against three instances, then a command +// invocation is created for each requested instance ID. ListCommandInvocations +// provide status about command execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListCommandInvocations for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidCommandId +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidFilterKey +// The specified key is not valid. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListCommandInvocations +func (c *SSM) ListCommandInvocations(input *ListCommandInvocationsInput) (*ListCommandInvocationsOutput, error) { + req, out := c.ListCommandInvocationsRequest(input) + return out, req.Send() +} + +// ListCommandInvocationsWithContext is the same as ListCommandInvocations with the addition of +// the ability to pass a context and additional request options. +// +// See ListCommandInvocations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListCommandInvocationsWithContext(ctx aws.Context, input *ListCommandInvocationsInput, opts ...request.Option) (*ListCommandInvocationsOutput, error) { + req, out := c.ListCommandInvocationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListCommandInvocationsPages iterates over the pages of a ListCommandInvocations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCommandInvocations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCommandInvocations operation. +// pageNum := 0 +// err := client.ListCommandInvocationsPages(params, +// func(page *ssm.ListCommandInvocationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListCommandInvocationsPages(input *ListCommandInvocationsInput, fn func(*ListCommandInvocationsOutput, bool) bool) error { + return c.ListCommandInvocationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCommandInvocationsPagesWithContext same as ListCommandInvocationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListCommandInvocationsPagesWithContext(ctx aws.Context, input *ListCommandInvocationsInput, fn func(*ListCommandInvocationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCommandInvocationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCommandInvocationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCommandInvocationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListCommands = "ListCommands" + +// ListCommandsRequest generates a "aws/request.Request" representing the +// client's request for the ListCommands operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListCommands for more information on using the ListCommands +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListCommandsRequest method. +// req, resp := client.ListCommandsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListCommands +func (c *SSM) ListCommandsRequest(input *ListCommandsInput) (req *request.Request, output *ListCommandsOutput) { + op := &request.Operation{ + Name: opListCommands, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCommandsInput{} + } + + output = &ListCommandsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListCommands API operation for Amazon Simple Systems Manager (SSM). +// +// Lists the commands requested by users of the AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListCommands for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidCommandId +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidFilterKey +// The specified key is not valid. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListCommands +func (c *SSM) ListCommands(input *ListCommandsInput) (*ListCommandsOutput, error) { + req, out := c.ListCommandsRequest(input) + return out, req.Send() +} + +// ListCommandsWithContext is the same as ListCommands with the addition of +// the ability to pass a context and additional request options. +// +// See ListCommands for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListCommandsWithContext(ctx aws.Context, input *ListCommandsInput, opts ...request.Option) (*ListCommandsOutput, error) { + req, out := c.ListCommandsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListCommandsPages iterates over the pages of a ListCommands operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCommands method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCommands operation. +// pageNum := 0 +// err := client.ListCommandsPages(params, +// func(page *ssm.ListCommandsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListCommandsPages(input *ListCommandsInput, fn func(*ListCommandsOutput, bool) bool) error { + return c.ListCommandsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCommandsPagesWithContext same as ListCommandsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListCommandsPagesWithContext(ctx aws.Context, input *ListCommandsInput, fn func(*ListCommandsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCommandsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCommandsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCommandsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListComplianceItems = "ListComplianceItems" + +// ListComplianceItemsRequest generates a "aws/request.Request" representing the +// client's request for the ListComplianceItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListComplianceItems for more information on using the ListComplianceItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListComplianceItemsRequest method. +// req, resp := client.ListComplianceItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListComplianceItems +func (c *SSM) ListComplianceItemsRequest(input *ListComplianceItemsInput) (req *request.Request, output *ListComplianceItemsOutput) { + op := &request.Operation{ + Name: opListComplianceItems, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListComplianceItemsInput{} + } + + output = &ListComplianceItemsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListComplianceItems API operation for Amazon Simple Systems Manager (SSM). +// +// For a specified resource ID, this API action returns a list of compliance +// statuses for different resource types. Currently, you can only specify one +// resource ID per call. List results depend on the criteria specified in the +// filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListComplianceItems for usage and error information. +// +// Returned Error Types: +// * InvalidResourceType +// The resource type is not valid. For example, if you are attempting to tag +// an instance, the instance must be a registered, managed instance. +// +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListComplianceItems +func (c *SSM) ListComplianceItems(input *ListComplianceItemsInput) (*ListComplianceItemsOutput, error) { + req, out := c.ListComplianceItemsRequest(input) + return out, req.Send() +} + +// ListComplianceItemsWithContext is the same as ListComplianceItems with the addition of +// the ability to pass a context and additional request options. +// +// See ListComplianceItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListComplianceItemsWithContext(ctx aws.Context, input *ListComplianceItemsInput, opts ...request.Option) (*ListComplianceItemsOutput, error) { + req, out := c.ListComplianceItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListComplianceItemsPages iterates over the pages of a ListComplianceItems operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListComplianceItems method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListComplianceItems operation. +// pageNum := 0 +// err := client.ListComplianceItemsPages(params, +// func(page *ssm.ListComplianceItemsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListComplianceItemsPages(input *ListComplianceItemsInput, fn func(*ListComplianceItemsOutput, bool) bool) error { + return c.ListComplianceItemsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListComplianceItemsPagesWithContext same as ListComplianceItemsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListComplianceItemsPagesWithContext(ctx aws.Context, input *ListComplianceItemsInput, fn func(*ListComplianceItemsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListComplianceItemsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListComplianceItemsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListComplianceItemsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListComplianceSummaries = "ListComplianceSummaries" + +// ListComplianceSummariesRequest generates a "aws/request.Request" representing the +// client's request for the ListComplianceSummaries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListComplianceSummaries for more information on using the ListComplianceSummaries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListComplianceSummariesRequest method. +// req, resp := client.ListComplianceSummariesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListComplianceSummaries +func (c *SSM) ListComplianceSummariesRequest(input *ListComplianceSummariesInput) (req *request.Request, output *ListComplianceSummariesOutput) { + op := &request.Operation{ + Name: opListComplianceSummaries, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListComplianceSummariesInput{} + } + + output = &ListComplianceSummariesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListComplianceSummaries API operation for Amazon Simple Systems Manager (SSM). +// +// Returns a summary count of compliant and non-compliant resources for a compliance +// type. For example, this call can return State Manager associations, patches, +// or custom compliance types according to the filter criteria that you specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListComplianceSummaries for usage and error information. +// +// Returned Error Types: +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListComplianceSummaries +func (c *SSM) ListComplianceSummaries(input *ListComplianceSummariesInput) (*ListComplianceSummariesOutput, error) { + req, out := c.ListComplianceSummariesRequest(input) + return out, req.Send() +} + +// ListComplianceSummariesWithContext is the same as ListComplianceSummaries with the addition of +// the ability to pass a context and additional request options. +// +// See ListComplianceSummaries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListComplianceSummariesWithContext(ctx aws.Context, input *ListComplianceSummariesInput, opts ...request.Option) (*ListComplianceSummariesOutput, error) { + req, out := c.ListComplianceSummariesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListComplianceSummariesPages iterates over the pages of a ListComplianceSummaries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListComplianceSummaries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListComplianceSummaries operation. +// pageNum := 0 +// err := client.ListComplianceSummariesPages(params, +// func(page *ssm.ListComplianceSummariesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListComplianceSummariesPages(input *ListComplianceSummariesInput, fn func(*ListComplianceSummariesOutput, bool) bool) error { + return c.ListComplianceSummariesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListComplianceSummariesPagesWithContext same as ListComplianceSummariesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListComplianceSummariesPagesWithContext(ctx aws.Context, input *ListComplianceSummariesInput, fn func(*ListComplianceSummariesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListComplianceSummariesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListComplianceSummariesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListComplianceSummariesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDocumentVersions = "ListDocumentVersions" + +// ListDocumentVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListDocumentVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDocumentVersions for more information on using the ListDocumentVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDocumentVersionsRequest method. +// req, resp := client.ListDocumentVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListDocumentVersions +func (c *SSM) ListDocumentVersionsRequest(input *ListDocumentVersionsInput) (req *request.Request, output *ListDocumentVersionsOutput) { + op := &request.Operation{ + Name: opListDocumentVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDocumentVersionsInput{} + } + + output = &ListDocumentVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDocumentVersions API operation for Amazon Simple Systems Manager (SSM). +// +// List all versions for a document. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListDocumentVersions for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InvalidDocument +// The specified document does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListDocumentVersions +func (c *SSM) ListDocumentVersions(input *ListDocumentVersionsInput) (*ListDocumentVersionsOutput, error) { + req, out := c.ListDocumentVersionsRequest(input) + return out, req.Send() +} + +// ListDocumentVersionsWithContext is the same as ListDocumentVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListDocumentVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListDocumentVersionsWithContext(ctx aws.Context, input *ListDocumentVersionsInput, opts ...request.Option) (*ListDocumentVersionsOutput, error) { + req, out := c.ListDocumentVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDocumentVersionsPages iterates over the pages of a ListDocumentVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDocumentVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDocumentVersions operation. +// pageNum := 0 +// err := client.ListDocumentVersionsPages(params, +// func(page *ssm.ListDocumentVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListDocumentVersionsPages(input *ListDocumentVersionsInput, fn func(*ListDocumentVersionsOutput, bool) bool) error { + return c.ListDocumentVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDocumentVersionsPagesWithContext same as ListDocumentVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListDocumentVersionsPagesWithContext(ctx aws.Context, input *ListDocumentVersionsInput, fn func(*ListDocumentVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDocumentVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDocumentVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDocumentVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDocuments = "ListDocuments" + +// ListDocumentsRequest generates a "aws/request.Request" representing the +// client's request for the ListDocuments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDocuments for more information on using the ListDocuments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDocumentsRequest method. +// req, resp := client.ListDocumentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListDocuments +func (c *SSM) ListDocumentsRequest(input *ListDocumentsInput) (req *request.Request, output *ListDocumentsOutput) { + op := &request.Operation{ + Name: opListDocuments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDocumentsInput{} + } + + output = &ListDocumentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDocuments API operation for Amazon Simple Systems Manager (SSM). +// +// Returns all Systems Manager (SSM) documents in the current AWS account and +// Region. You can limit the results of this request by using a filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListDocuments for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InvalidFilterKey +// The specified key is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListDocuments +func (c *SSM) ListDocuments(input *ListDocumentsInput) (*ListDocumentsOutput, error) { + req, out := c.ListDocumentsRequest(input) + return out, req.Send() +} + +// ListDocumentsWithContext is the same as ListDocuments with the addition of +// the ability to pass a context and additional request options. +// +// See ListDocuments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListDocumentsWithContext(ctx aws.Context, input *ListDocumentsInput, opts ...request.Option) (*ListDocumentsOutput, error) { + req, out := c.ListDocumentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDocumentsPages iterates over the pages of a ListDocuments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDocuments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDocuments operation. +// pageNum := 0 +// err := client.ListDocumentsPages(params, +// func(page *ssm.ListDocumentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListDocumentsPages(input *ListDocumentsInput, fn func(*ListDocumentsOutput, bool) bool) error { + return c.ListDocumentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDocumentsPagesWithContext same as ListDocumentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListDocumentsPagesWithContext(ctx aws.Context, input *ListDocumentsInput, fn func(*ListDocumentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDocumentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDocumentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDocumentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListInventoryEntries = "ListInventoryEntries" + +// ListInventoryEntriesRequest generates a "aws/request.Request" representing the +// client's request for the ListInventoryEntries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListInventoryEntries for more information on using the ListInventoryEntries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListInventoryEntriesRequest method. +// req, resp := client.ListInventoryEntriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListInventoryEntries +func (c *SSM) ListInventoryEntriesRequest(input *ListInventoryEntriesInput) (req *request.Request, output *ListInventoryEntriesOutput) { + op := &request.Operation{ + Name: opListInventoryEntries, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListInventoryEntriesInput{} + } + + output = &ListInventoryEntriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListInventoryEntries API operation for Amazon Simple Systems Manager (SSM). +// +// A list of inventory items returned by the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListInventoryEntries for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidTypeNameException +// The parameter type name is not valid. +// +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListInventoryEntries +func (c *SSM) ListInventoryEntries(input *ListInventoryEntriesInput) (*ListInventoryEntriesOutput, error) { + req, out := c.ListInventoryEntriesRequest(input) + return out, req.Send() +} + +// ListInventoryEntriesWithContext is the same as ListInventoryEntries with the addition of +// the ability to pass a context and additional request options. +// +// See ListInventoryEntries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListInventoryEntriesWithContext(ctx aws.Context, input *ListInventoryEntriesInput, opts ...request.Option) (*ListInventoryEntriesOutput, error) { + req, out := c.ListInventoryEntriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListResourceComplianceSummaries = "ListResourceComplianceSummaries" + +// ListResourceComplianceSummariesRequest generates a "aws/request.Request" representing the +// client's request for the ListResourceComplianceSummaries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListResourceComplianceSummaries for more information on using the ListResourceComplianceSummaries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListResourceComplianceSummariesRequest method. +// req, resp := client.ListResourceComplianceSummariesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListResourceComplianceSummaries +func (c *SSM) ListResourceComplianceSummariesRequest(input *ListResourceComplianceSummariesInput) (req *request.Request, output *ListResourceComplianceSummariesOutput) { + op := &request.Operation{ + Name: opListResourceComplianceSummaries, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListResourceComplianceSummariesInput{} + } + + output = &ListResourceComplianceSummariesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResourceComplianceSummaries API operation for Amazon Simple Systems Manager (SSM). +// +// Returns a resource-level summary count. The summary includes information +// about compliant and non-compliant statuses and detailed compliance-item severity +// counts, according to the filter criteria you specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListResourceComplianceSummaries for usage and error information. +// +// Returned Error Types: +// * InvalidFilter +// The filter name is not valid. Verify the you entered the correct name and +// try again. +// +// * InvalidNextToken +// The specified token is not valid. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListResourceComplianceSummaries +func (c *SSM) ListResourceComplianceSummaries(input *ListResourceComplianceSummariesInput) (*ListResourceComplianceSummariesOutput, error) { + req, out := c.ListResourceComplianceSummariesRequest(input) + return out, req.Send() +} + +// ListResourceComplianceSummariesWithContext is the same as ListResourceComplianceSummaries with the addition of +// the ability to pass a context and additional request options. +// +// See ListResourceComplianceSummaries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListResourceComplianceSummariesWithContext(ctx aws.Context, input *ListResourceComplianceSummariesInput, opts ...request.Option) (*ListResourceComplianceSummariesOutput, error) { + req, out := c.ListResourceComplianceSummariesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListResourceComplianceSummariesPages iterates over the pages of a ListResourceComplianceSummaries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResourceComplianceSummaries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResourceComplianceSummaries operation. +// pageNum := 0 +// err := client.ListResourceComplianceSummariesPages(params, +// func(page *ssm.ListResourceComplianceSummariesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListResourceComplianceSummariesPages(input *ListResourceComplianceSummariesInput, fn func(*ListResourceComplianceSummariesOutput, bool) bool) error { + return c.ListResourceComplianceSummariesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListResourceComplianceSummariesPagesWithContext same as ListResourceComplianceSummariesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListResourceComplianceSummariesPagesWithContext(ctx aws.Context, input *ListResourceComplianceSummariesInput, fn func(*ListResourceComplianceSummariesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListResourceComplianceSummariesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListResourceComplianceSummariesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListResourceComplianceSummariesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListResourceDataSync = "ListResourceDataSync" + +// ListResourceDataSyncRequest generates a "aws/request.Request" representing the +// client's request for the ListResourceDataSync operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListResourceDataSync for more information on using the ListResourceDataSync +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListResourceDataSyncRequest method. +// req, resp := client.ListResourceDataSyncRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListResourceDataSync +func (c *SSM) ListResourceDataSyncRequest(input *ListResourceDataSyncInput) (req *request.Request, output *ListResourceDataSyncOutput) { + op := &request.Operation{ + Name: opListResourceDataSync, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListResourceDataSyncInput{} + } + + output = &ListResourceDataSyncOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResourceDataSync API operation for Amazon Simple Systems Manager (SSM). +// +// Lists your resource data sync configurations. Includes information about +// the last time a sync attempted to start, the last sync status, and the last +// time a sync successfully completed. +// +// The number of sync configurations might be too large to return using a single +// call to ListResourceDataSync. You can limit the number of sync configurations +// returned by using the MaxResults parameter. To determine whether there are +// more sync configurations to list, check the value of NextToken in the output. +// If there are more sync configurations to list, you can request them by specifying +// the NextToken returned in the call to the parameter of a subsequent call. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListResourceDataSync for usage and error information. +// +// Returned Error Types: +// * ResourceDataSyncInvalidConfigurationException +// The specified sync configuration is invalid. +// +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidNextToken +// The specified token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListResourceDataSync +func (c *SSM) ListResourceDataSync(input *ListResourceDataSyncInput) (*ListResourceDataSyncOutput, error) { + req, out := c.ListResourceDataSyncRequest(input) + return out, req.Send() +} + +// ListResourceDataSyncWithContext is the same as ListResourceDataSync with the addition of +// the ability to pass a context and additional request options. +// +// See ListResourceDataSync for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListResourceDataSyncWithContext(ctx aws.Context, input *ListResourceDataSyncInput, opts ...request.Option) (*ListResourceDataSyncOutput, error) { + req, out := c.ListResourceDataSyncRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListResourceDataSyncPages iterates over the pages of a ListResourceDataSync operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResourceDataSync method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResourceDataSync operation. +// pageNum := 0 +// err := client.ListResourceDataSyncPages(params, +// func(page *ssm.ListResourceDataSyncOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListResourceDataSyncPages(input *ListResourceDataSyncInput, fn func(*ListResourceDataSyncOutput, bool) bool) error { + return c.ListResourceDataSyncPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListResourceDataSyncPagesWithContext same as ListResourceDataSyncPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListResourceDataSyncPagesWithContext(ctx aws.Context, input *ListResourceDataSyncInput, fn func(*ListResourceDataSyncOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListResourceDataSyncInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListResourceDataSyncRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListResourceDataSyncOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListTagsForResource +func (c *SSM) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Simple Systems Manager (SSM). +// +// Returns a list of the tags assigned to the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * InvalidResourceType +// The resource type is not valid. For example, if you are attempting to tag +// an instance, the instance must be a registered, managed instance. +// +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListTagsForResource +func (c *SSM) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyDocumentPermission = "ModifyDocumentPermission" + +// ModifyDocumentPermissionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDocumentPermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyDocumentPermission for more information on using the ModifyDocumentPermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyDocumentPermissionRequest method. +// req, resp := client.ModifyDocumentPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ModifyDocumentPermission +func (c *SSM) ModifyDocumentPermissionRequest(input *ModifyDocumentPermissionInput) (req *request.Request, output *ModifyDocumentPermissionOutput) { + op := &request.Operation{ + Name: opModifyDocumentPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDocumentPermissionInput{} + } + + output = &ModifyDocumentPermissionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyDocumentPermission API operation for Amazon Simple Systems Manager (SSM). +// +// Shares a Systems Manager document publicly or privately. If you share a document +// privately, you must specify the AWS user account IDs for those people who +// can use the document. If you share a document publicly, you must specify +// All as the account ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ModifyDocumentPermission for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidPermissionType +// The permission type is not supported. Share is the only supported permission +// type. +// +// * DocumentPermissionLimit +// The document cannot be shared with more AWS user accounts. You can share +// a document with a maximum of 20 accounts. You can publicly share up to five +// documents. If you need to increase this limit, contact AWS Support. +// +// * DocumentLimitExceeded +// You can have at most 500 active Systems Manager documents. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ModifyDocumentPermission +func (c *SSM) ModifyDocumentPermission(input *ModifyDocumentPermissionInput) (*ModifyDocumentPermissionOutput, error) { + req, out := c.ModifyDocumentPermissionRequest(input) + return out, req.Send() +} + +// ModifyDocumentPermissionWithContext is the same as ModifyDocumentPermission with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyDocumentPermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ModifyDocumentPermissionWithContext(ctx aws.Context, input *ModifyDocumentPermissionInput, opts ...request.Option) (*ModifyDocumentPermissionOutput, error) { + req, out := c.ModifyDocumentPermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutComplianceItems = "PutComplianceItems" + +// PutComplianceItemsRequest generates a "aws/request.Request" representing the +// client's request for the PutComplianceItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutComplianceItems for more information on using the PutComplianceItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutComplianceItemsRequest method. +// req, resp := client.PutComplianceItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutComplianceItems +func (c *SSM) PutComplianceItemsRequest(input *PutComplianceItemsInput) (req *request.Request, output *PutComplianceItemsOutput) { + op := &request.Operation{ + Name: opPutComplianceItems, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutComplianceItemsInput{} + } + + output = &PutComplianceItemsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutComplianceItems API operation for Amazon Simple Systems Manager (SSM). +// +// Registers a compliance type and other compliance details on a designated +// resource. This action lets you register custom compliance details with a +// resource. This call overwrites existing compliance information on the resource, +// so you must provide a full list of compliance items each time that you send +// the request. +// +// ComplianceType can be one of the following: +// +// * ExecutionId: The execution ID when the patch, association, or custom +// compliance item was applied. +// +// * ExecutionType: Specify patch, association, or Custom:string. +// +// * ExecutionTime. The time the patch, association, or custom compliance +// item was applied to the instance. +// +// * Id: The patch, association, or custom compliance ID. +// +// * Title: A title. +// +// * Status: The status of the compliance item. For example, approved for +// patches, or Failed for associations. +// +// * Severity: A patch severity. For example, critical. +// +// * DocumentName: A SSM document name. For example, AWS-RunPatchBaseline. +// +// * DocumentVersion: An SSM document version number. For example, 4. +// +// * Classification: A patch classification. For example, security updates. +// +// * PatchBaselineId: A patch baseline ID. +// +// * PatchSeverity: A patch severity. For example, Critical. +// +// * PatchState: A patch state. For example, InstancesWithFailedPatches. +// +// * PatchGroup: The name of a patch group. +// +// * InstalledTime: The time the association, patch, or custom compliance +// item was applied to the resource. Specify the time by using the following +// format: yyyy-MM-dd'T'HH:mm:ss'Z' +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation PutComplianceItems for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidItemContentException +// One or more content items is not valid. +// +// * TotalSizeLimitExceededException +// The size of inventory data has exceeded the total size limit for the resource. +// +// * ItemSizeLimitExceededException +// The inventory item size has exceeded the size limit. +// +// * ComplianceTypeCountLimitExceededException +// You specified too many custom compliance types. You can specify a maximum +// of 10 different types. +// +// * InvalidResourceType +// The resource type is not valid. For example, if you are attempting to tag +// an instance, the instance must be a registered, managed instance. +// +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutComplianceItems +func (c *SSM) PutComplianceItems(input *PutComplianceItemsInput) (*PutComplianceItemsOutput, error) { + req, out := c.PutComplianceItemsRequest(input) + return out, req.Send() +} + +// PutComplianceItemsWithContext is the same as PutComplianceItems with the addition of +// the ability to pass a context and additional request options. +// +// See PutComplianceItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) PutComplianceItemsWithContext(ctx aws.Context, input *PutComplianceItemsInput, opts ...request.Option) (*PutComplianceItemsOutput, error) { + req, out := c.PutComplianceItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutInventory = "PutInventory" + +// PutInventoryRequest generates a "aws/request.Request" representing the +// client's request for the PutInventory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutInventory for more information on using the PutInventory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutInventoryRequest method. +// req, resp := client.PutInventoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutInventory +func (c *SSM) PutInventoryRequest(input *PutInventoryInput) (req *request.Request, output *PutInventoryOutput) { + op := &request.Operation{ + Name: opPutInventory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutInventoryInput{} + } + + output = &PutInventoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutInventory API operation for Amazon Simple Systems Manager (SSM). +// +// Bulk update custom inventory items on one more instance. The request adds +// an inventory item, if it doesn't already exist, or updates an inventory item, +// if it does exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation PutInventory for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidTypeNameException +// The parameter type name is not valid. +// +// * InvalidItemContentException +// One or more content items is not valid. +// +// * TotalSizeLimitExceededException +// The size of inventory data has exceeded the total size limit for the resource. +// +// * ItemSizeLimitExceededException +// The inventory item size has exceeded the size limit. +// +// * ItemContentMismatchException +// The inventory item has invalid content. +// +// * CustomSchemaCountLimitExceededException +// You have exceeded the limit for custom schemas. Delete one or more custom +// schemas and try again. +// +// * UnsupportedInventorySchemaVersionException +// Inventory item type schema version has to match supported versions in the +// service. Check output of GetInventorySchema to see the available schema version +// for each type. +// +// * UnsupportedInventoryItemContextException +// The Context attribute that you specified for the InventoryItem is not allowed +// for this inventory type. You can only use the Context attribute with inventory +// types like AWS:ComplianceItem. +// +// * InvalidInventoryItemContextException +// You specified invalid keys or values in the Context attribute for InventoryItem. +// Verify the keys and values, and try again. +// +// * SubTypeCountLimitExceededException +// The sub-type count exceeded the limit for the inventory type. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutInventory +func (c *SSM) PutInventory(input *PutInventoryInput) (*PutInventoryOutput, error) { + req, out := c.PutInventoryRequest(input) + return out, req.Send() +} + +// PutInventoryWithContext is the same as PutInventory with the addition of +// the ability to pass a context and additional request options. +// +// See PutInventory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) PutInventoryWithContext(ctx aws.Context, input *PutInventoryInput, opts ...request.Option) (*PutInventoryOutput, error) { + req, out := c.PutInventoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutParameter = "PutParameter" + +// PutParameterRequest generates a "aws/request.Request" representing the +// client's request for the PutParameter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutParameter for more information on using the PutParameter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutParameterRequest method. +// req, resp := client.PutParameterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutParameter +func (c *SSM) PutParameterRequest(input *PutParameterInput) (req *request.Request, output *PutParameterOutput) { + op := &request.Operation{ + Name: opPutParameter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutParameterInput{} + } + + output = &PutParameterOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutParameter API operation for Amazon Simple Systems Manager (SSM). +// +// Add a parameter to the system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation PutParameter for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidKeyId +// The query key ID is not valid. +// +// * ParameterLimitExceeded +// You have exceeded the number of parameters for this AWS account. Delete one +// or more parameters and try again. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// * ParameterAlreadyExists +// The parameter already exists. You can't create duplicate parameters. +// +// * HierarchyLevelLimitExceededException +// A hierarchy can have a maximum of 15 levels. For more information, see Requirements +// and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) +// in the AWS Systems Manager User Guide. +// +// * HierarchyTypeMismatchException +// Parameter Store does not support changing a parameter type in a hierarchy. +// For example, you can't change a parameter from a String type to a SecureString +// type. You must create a new, unique parameter. +// +// * InvalidAllowedPatternException +// The request does not meet the regular expression requirement. +// +// * ParameterMaxVersionLimitExceeded +// Parameter Store retains the 100 most recently created versions of a parameter. +// After this number of versions has been created, Parameter Store deletes the +// oldest version when a new one is created. However, if the oldest version +// has a label attached to it, Parameter Store will not delete the version and +// instead presents this error message: +// +// An error occurred (ParameterMaxVersionLimitExceeded) when calling the PutParameter +// operation: You attempted to create a new version of parameter-name by calling +// the PutParameter API with the overwrite flag. Version version-number, the +// oldest version, can't be deleted because it has a label associated with it. +// Move the label to another version of the parameter, and try again. +// +// This safeguard is to prevent parameter versions with mission critical labels +// assigned to them from being deleted. To continue creating new parameters, +// first move the label from the oldest version of the parameter to a newer +// one for use in your operations. For information about moving parameter labels, +// see Move a parameter label (console) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html#sysman-paramstore-labels-console-move) +// or Move a parameter label (CLI) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html#sysman-paramstore-labels-cli-move) +// in the AWS Systems Manager User Guide. +// +// * ParameterPatternMismatchException +// The parameter name is not valid. +// +// * UnsupportedParameterType +// The parameter type is not supported. +// +// * PoliciesLimitExceededException +// You specified more than the maximum number of allowed policies for the parameter. +// The maximum is 10. +// +// * InvalidPolicyTypeException +// The policy type is not supported. Parameter Store supports the following +// policy types: Expiration, ExpirationNotification, and NoChangeNotification. +// +// * InvalidPolicyAttributeException +// A policy attribute or its value is invalid. +// +// * IncompatiblePolicyException +// There is a conflict in the policies specified for this parameter. You can't, +// for example, specify two Expiration policies for a parameter. Review your +// policies, and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutParameter +func (c *SSM) PutParameter(input *PutParameterInput) (*PutParameterOutput, error) { + req, out := c.PutParameterRequest(input) + return out, req.Send() +} + +// PutParameterWithContext is the same as PutParameter with the addition of +// the ability to pass a context and additional request options. +// +// See PutParameter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) PutParameterWithContext(ctx aws.Context, input *PutParameterInput, opts ...request.Option) (*PutParameterOutput, error) { + req, out := c.PutParameterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterDefaultPatchBaseline = "RegisterDefaultPatchBaseline" + +// RegisterDefaultPatchBaselineRequest generates a "aws/request.Request" representing the +// client's request for the RegisterDefaultPatchBaseline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterDefaultPatchBaseline for more information on using the RegisterDefaultPatchBaseline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterDefaultPatchBaselineRequest method. +// req, resp := client.RegisterDefaultPatchBaselineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterDefaultPatchBaseline +func (c *SSM) RegisterDefaultPatchBaselineRequest(input *RegisterDefaultPatchBaselineInput) (req *request.Request, output *RegisterDefaultPatchBaselineOutput) { + op := &request.Operation{ + Name: opRegisterDefaultPatchBaseline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterDefaultPatchBaselineInput{} + } + + output = &RegisterDefaultPatchBaselineOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterDefaultPatchBaseline API operation for Amazon Simple Systems Manager (SSM). +// +// Defines the default patch baseline for the relevant operating system. +// +// To reset the AWS predefined patch baseline as the default, specify the full +// patch baseline ARN as the baseline ID value. For example, for CentOS, specify +// arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed instead +// of pb-0574b43a65ea646ed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation RegisterDefaultPatchBaseline for usage and error information. +// +// Returned Error Types: +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterDefaultPatchBaseline +func (c *SSM) RegisterDefaultPatchBaseline(input *RegisterDefaultPatchBaselineInput) (*RegisterDefaultPatchBaselineOutput, error) { + req, out := c.RegisterDefaultPatchBaselineRequest(input) + return out, req.Send() +} + +// RegisterDefaultPatchBaselineWithContext is the same as RegisterDefaultPatchBaseline with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterDefaultPatchBaseline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) RegisterDefaultPatchBaselineWithContext(ctx aws.Context, input *RegisterDefaultPatchBaselineInput, opts ...request.Option) (*RegisterDefaultPatchBaselineOutput, error) { + req, out := c.RegisterDefaultPatchBaselineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterPatchBaselineForPatchGroup = "RegisterPatchBaselineForPatchGroup" + +// RegisterPatchBaselineForPatchGroupRequest generates a "aws/request.Request" representing the +// client's request for the RegisterPatchBaselineForPatchGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterPatchBaselineForPatchGroup for more information on using the RegisterPatchBaselineForPatchGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterPatchBaselineForPatchGroupRequest method. +// req, resp := client.RegisterPatchBaselineForPatchGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterPatchBaselineForPatchGroup +func (c *SSM) RegisterPatchBaselineForPatchGroupRequest(input *RegisterPatchBaselineForPatchGroupInput) (req *request.Request, output *RegisterPatchBaselineForPatchGroupOutput) { + op := &request.Operation{ + Name: opRegisterPatchBaselineForPatchGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterPatchBaselineForPatchGroupInput{} + } + + output = &RegisterPatchBaselineForPatchGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterPatchBaselineForPatchGroup API operation for Amazon Simple Systems Manager (SSM). +// +// Registers a patch baseline for a patch group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation RegisterPatchBaselineForPatchGroup for usage and error information. +// +// Returned Error Types: +// * AlreadyExistsException +// Error returned if an attempt is made to register a patch group with a patch +// baseline that is already registered with a different patch baseline. +// +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * ResourceLimitExceededException +// Error returned when the caller has exceeded the default resource quotas. +// For example, too many maintenance windows or patch baselines have been created. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterPatchBaselineForPatchGroup +func (c *SSM) RegisterPatchBaselineForPatchGroup(input *RegisterPatchBaselineForPatchGroupInput) (*RegisterPatchBaselineForPatchGroupOutput, error) { + req, out := c.RegisterPatchBaselineForPatchGroupRequest(input) + return out, req.Send() +} + +// RegisterPatchBaselineForPatchGroupWithContext is the same as RegisterPatchBaselineForPatchGroup with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterPatchBaselineForPatchGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) RegisterPatchBaselineForPatchGroupWithContext(ctx aws.Context, input *RegisterPatchBaselineForPatchGroupInput, opts ...request.Option) (*RegisterPatchBaselineForPatchGroupOutput, error) { + req, out := c.RegisterPatchBaselineForPatchGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterTargetWithMaintenanceWindow = "RegisterTargetWithMaintenanceWindow" + +// RegisterTargetWithMaintenanceWindowRequest generates a "aws/request.Request" representing the +// client's request for the RegisterTargetWithMaintenanceWindow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterTargetWithMaintenanceWindow for more information on using the RegisterTargetWithMaintenanceWindow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterTargetWithMaintenanceWindowRequest method. +// req, resp := client.RegisterTargetWithMaintenanceWindowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterTargetWithMaintenanceWindow +func (c *SSM) RegisterTargetWithMaintenanceWindowRequest(input *RegisterTargetWithMaintenanceWindowInput) (req *request.Request, output *RegisterTargetWithMaintenanceWindowOutput) { + op := &request.Operation{ + Name: opRegisterTargetWithMaintenanceWindow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTargetWithMaintenanceWindowInput{} + } + + output = &RegisterTargetWithMaintenanceWindowOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterTargetWithMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). +// +// Registers a target with a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation RegisterTargetWithMaintenanceWindow for usage and error information. +// +// Returned Error Types: +// * IdempotentParameterMismatch +// Error returned when an idempotent operation is retried and the parameters +// don't match the original call to the API with the same idempotency token. +// +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * ResourceLimitExceededException +// Error returned when the caller has exceeded the default resource quotas. +// For example, too many maintenance windows or patch baselines have been created. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterTargetWithMaintenanceWindow +func (c *SSM) RegisterTargetWithMaintenanceWindow(input *RegisterTargetWithMaintenanceWindowInput) (*RegisterTargetWithMaintenanceWindowOutput, error) { + req, out := c.RegisterTargetWithMaintenanceWindowRequest(input) + return out, req.Send() +} + +// RegisterTargetWithMaintenanceWindowWithContext is the same as RegisterTargetWithMaintenanceWindow with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterTargetWithMaintenanceWindow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) RegisterTargetWithMaintenanceWindowWithContext(ctx aws.Context, input *RegisterTargetWithMaintenanceWindowInput, opts ...request.Option) (*RegisterTargetWithMaintenanceWindowOutput, error) { + req, out := c.RegisterTargetWithMaintenanceWindowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterTaskWithMaintenanceWindow = "RegisterTaskWithMaintenanceWindow" + +// RegisterTaskWithMaintenanceWindowRequest generates a "aws/request.Request" representing the +// client's request for the RegisterTaskWithMaintenanceWindow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterTaskWithMaintenanceWindow for more information on using the RegisterTaskWithMaintenanceWindow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterTaskWithMaintenanceWindowRequest method. +// req, resp := client.RegisterTaskWithMaintenanceWindowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterTaskWithMaintenanceWindow +func (c *SSM) RegisterTaskWithMaintenanceWindowRequest(input *RegisterTaskWithMaintenanceWindowInput) (req *request.Request, output *RegisterTaskWithMaintenanceWindowOutput) { + op := &request.Operation{ + Name: opRegisterTaskWithMaintenanceWindow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTaskWithMaintenanceWindowInput{} + } + + output = &RegisterTaskWithMaintenanceWindowOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterTaskWithMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). +// +// Adds a new task to a maintenance window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation RegisterTaskWithMaintenanceWindow for usage and error information. +// +// Returned Error Types: +// * IdempotentParameterMismatch +// Error returned when an idempotent operation is retried and the parameters +// don't match the original call to the API with the same idempotency token. +// +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * ResourceLimitExceededException +// Error returned when the caller has exceeded the default resource quotas. +// For example, too many maintenance windows or patch baselines have been created. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * FeatureNotAvailableException +// You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where +// the corresponding service is not available. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterTaskWithMaintenanceWindow +func (c *SSM) RegisterTaskWithMaintenanceWindow(input *RegisterTaskWithMaintenanceWindowInput) (*RegisterTaskWithMaintenanceWindowOutput, error) { + req, out := c.RegisterTaskWithMaintenanceWindowRequest(input) + return out, req.Send() +} + +// RegisterTaskWithMaintenanceWindowWithContext is the same as RegisterTaskWithMaintenanceWindow with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterTaskWithMaintenanceWindow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) RegisterTaskWithMaintenanceWindowWithContext(ctx aws.Context, input *RegisterTaskWithMaintenanceWindowInput, opts ...request.Option) (*RegisterTaskWithMaintenanceWindowOutput, error) { + req, out := c.RegisterTaskWithMaintenanceWindowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveTagsFromResource for more information on using the RemoveTagsFromResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RemoveTagsFromResource +func (c *SSM) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + output = &RemoveTagsFromResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemoveTagsFromResource API operation for Amazon Simple Systems Manager (SSM). +// +// Removes tag keys from the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation RemoveTagsFromResource for usage and error information. +// +// Returned Error Types: +// * InvalidResourceType +// The resource type is not valid. For example, if you are attempting to tag +// an instance, the instance must be a registered, managed instance. +// +// * InvalidResourceId +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RemoveTagsFromResource +func (c *SSM) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + return out, req.Send() +} + +// RemoveTagsFromResourceWithContext is the same as RemoveTagsFromResource with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveTagsFromResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) RemoveTagsFromResourceWithContext(ctx aws.Context, input *RemoveTagsFromResourceInput, opts ...request.Option) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetServiceSetting = "ResetServiceSetting" + +// ResetServiceSettingRequest generates a "aws/request.Request" representing the +// client's request for the ResetServiceSetting operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetServiceSetting for more information on using the ResetServiceSetting +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetServiceSettingRequest method. +// req, resp := client.ResetServiceSettingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ResetServiceSetting +func (c *SSM) ResetServiceSettingRequest(input *ResetServiceSettingInput) (req *request.Request, output *ResetServiceSettingOutput) { + op := &request.Operation{ + Name: opResetServiceSetting, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetServiceSettingInput{} + } + + output = &ResetServiceSettingOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetServiceSetting API operation for Amazon Simple Systems Manager (SSM). +// +// ServiceSetting is an account-level setting for an AWS service. This setting +// defines how a user interacts with or uses a service or a feature of a service. +// For example, if an AWS service charges money to the account based on feature +// or service usage, then the AWS service team might create a default setting +// of "false". This means the user can't use this feature unless they change +// the setting to "true" and intentionally opt in for a paid feature. +// +// Services map a SettingId object to a setting value. AWS services teams define +// the default value for a SettingId. You can't create a new SettingId, but +// you can overwrite the default value if you have the ssm:UpdateServiceSetting +// permission for the setting. Use the GetServiceSetting API action to view +// the current value. Use the UpdateServiceSetting API action to change the +// default setting. +// +// Reset the service setting for the account to the default value as provisioned +// by the AWS service team. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ResetServiceSetting for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * ServiceSettingNotFound +// The specified service setting was not found. Either the service name or the +// setting has not been provisioned by the AWS service team. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ResetServiceSetting +func (c *SSM) ResetServiceSetting(input *ResetServiceSettingInput) (*ResetServiceSettingOutput, error) { + req, out := c.ResetServiceSettingRequest(input) + return out, req.Send() +} + +// ResetServiceSettingWithContext is the same as ResetServiceSetting with the addition of +// the ability to pass a context and additional request options. +// +// See ResetServiceSetting for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ResetServiceSettingWithContext(ctx aws.Context, input *ResetServiceSettingInput, opts ...request.Option) (*ResetServiceSettingOutput, error) { + req, out := c.ResetServiceSettingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResumeSession = "ResumeSession" + +// ResumeSessionRequest generates a "aws/request.Request" representing the +// client's request for the ResumeSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResumeSession for more information on using the ResumeSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResumeSessionRequest method. +// req, resp := client.ResumeSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ResumeSession +func (c *SSM) ResumeSessionRequest(input *ResumeSessionInput) (req *request.Request, output *ResumeSessionOutput) { + op := &request.Operation{ + Name: opResumeSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResumeSessionInput{} + } + + output = &ResumeSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResumeSession API operation for Amazon Simple Systems Manager (SSM). +// +// Reconnects a session to an instance after it has been disconnected. Connections +// can be resumed for disconnected sessions, but not terminated sessions. +// +// This command is primarily for use by client machines to automatically reconnect +// during intermittent network issues. It is not intended for any other use. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation ResumeSession for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ResumeSession +func (c *SSM) ResumeSession(input *ResumeSessionInput) (*ResumeSessionOutput, error) { + req, out := c.ResumeSessionRequest(input) + return out, req.Send() +} + +// ResumeSessionWithContext is the same as ResumeSession with the addition of +// the ability to pass a context and additional request options. +// +// See ResumeSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ResumeSessionWithContext(ctx aws.Context, input *ResumeSessionInput, opts ...request.Option) (*ResumeSessionOutput, error) { + req, out := c.ResumeSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendAutomationSignal = "SendAutomationSignal" + +// SendAutomationSignalRequest generates a "aws/request.Request" representing the +// client's request for the SendAutomationSignal operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendAutomationSignal for more information on using the SendAutomationSignal +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendAutomationSignalRequest method. +// req, resp := client.SendAutomationSignalRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/SendAutomationSignal +func (c *SSM) SendAutomationSignalRequest(input *SendAutomationSignalInput) (req *request.Request, output *SendAutomationSignalOutput) { + op := &request.Operation{ + Name: opSendAutomationSignal, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendAutomationSignalInput{} + } + + output = &SendAutomationSignalOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SendAutomationSignal API operation for Amazon Simple Systems Manager (SSM). +// +// Sends a signal to an Automation execution to change the current behavior +// or status of the execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation SendAutomationSignal for usage and error information. +// +// Returned Error Types: +// * AutomationExecutionNotFoundException +// There is no automation execution information for the requested automation +// execution ID. +// +// * AutomationStepNotFoundException +// The specified step name and execution ID don't exist. Verify the information +// and try again. +// +// * InvalidAutomationSignalException +// The signal is not valid for the current Automation execution. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/SendAutomationSignal +func (c *SSM) SendAutomationSignal(input *SendAutomationSignalInput) (*SendAutomationSignalOutput, error) { + req, out := c.SendAutomationSignalRequest(input) + return out, req.Send() +} + +// SendAutomationSignalWithContext is the same as SendAutomationSignal with the addition of +// the ability to pass a context and additional request options. +// +// See SendAutomationSignal for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) SendAutomationSignalWithContext(ctx aws.Context, input *SendAutomationSignalInput, opts ...request.Option) (*SendAutomationSignalOutput, error) { + req, out := c.SendAutomationSignalRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendCommand = "SendCommand" + +// SendCommandRequest generates a "aws/request.Request" representing the +// client's request for the SendCommand operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendCommand for more information on using the SendCommand +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendCommandRequest method. +// req, resp := client.SendCommandRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/SendCommand +func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request, output *SendCommandOutput) { + op := &request.Operation{ + Name: opSendCommand, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendCommandInput{} + } + + output = &SendCommandOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendCommand API operation for Amazon Simple Systems Manager (SSM). +// +// Runs commands on one or more managed instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation SendCommand for usage and error information. +// +// Returned Error Types: +// * DuplicateInstanceId +// You cannot specify an instance ID in more than one association. +// +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentVersion +// The document version is not valid or does not exist. +// +// * InvalidOutputFolder +// The S3 bucket does not exist. +// +// * InvalidParameters +// You must specify values for all required parameters in the Systems Manager +// document. You can only supply values to parameters defined in the Systems +// Manager document. +// +// * UnsupportedPlatformType +// The document does not support the platform type of the given instance ID(s). +// For example, you sent an document for a Windows instance to a Linux instance. +// +// * MaxDocumentSizeExceeded +// The size limit of a document is 64 KB. +// +// * InvalidRole +// The role name can't contain invalid characters. Also verify that you specified +// an IAM role for notifications that includes the required trust policy. For +// information about configuring the IAM role for Run Command notifications, +// see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) +// in the AWS Systems Manager User Guide. +// +// * InvalidNotificationConfig +// One or more configuration items is not valid. Verify that a valid Amazon +// Resource Name (ARN) was provided for an Amazon SNS topic. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/SendCommand +func (c *SSM) SendCommand(input *SendCommandInput) (*SendCommandOutput, error) { + req, out := c.SendCommandRequest(input) + return out, req.Send() +} + +// SendCommandWithContext is the same as SendCommand with the addition of +// the ability to pass a context and additional request options. +// +// See SendCommand for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) SendCommandWithContext(ctx aws.Context, input *SendCommandInput, opts ...request.Option) (*SendCommandOutput, error) { + req, out := c.SendCommandRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartAssociationsOnce = "StartAssociationsOnce" + +// StartAssociationsOnceRequest generates a "aws/request.Request" representing the +// client's request for the StartAssociationsOnce operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartAssociationsOnce for more information on using the StartAssociationsOnce +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartAssociationsOnceRequest method. +// req, resp := client.StartAssociationsOnceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/StartAssociationsOnce +func (c *SSM) StartAssociationsOnceRequest(input *StartAssociationsOnceInput) (req *request.Request, output *StartAssociationsOnceOutput) { + op := &request.Operation{ + Name: opStartAssociationsOnce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartAssociationsOnceInput{} + } + + output = &StartAssociationsOnceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StartAssociationsOnce API operation for Amazon Simple Systems Manager (SSM). +// +// Use this API action to run an association immediately and only one time. +// This action can be helpful when troubleshooting associations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation StartAssociationsOnce for usage and error information. +// +// Returned Error Types: +// * InvalidAssociation +// The association is not valid or does not exist. +// +// * AssociationDoesNotExist +// The specified association does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/StartAssociationsOnce +func (c *SSM) StartAssociationsOnce(input *StartAssociationsOnceInput) (*StartAssociationsOnceOutput, error) { + req, out := c.StartAssociationsOnceRequest(input) + return out, req.Send() +} + +// StartAssociationsOnceWithContext is the same as StartAssociationsOnce with the addition of +// the ability to pass a context and additional request options. +// +// See StartAssociationsOnce for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) StartAssociationsOnceWithContext(ctx aws.Context, input *StartAssociationsOnceInput, opts ...request.Option) (*StartAssociationsOnceOutput, error) { + req, out := c.StartAssociationsOnceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartAutomationExecution = "StartAutomationExecution" + +// StartAutomationExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StartAutomationExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartAutomationExecution for more information on using the StartAutomationExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartAutomationExecutionRequest method. +// req, resp := client.StartAutomationExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/StartAutomationExecution +func (c *SSM) StartAutomationExecutionRequest(input *StartAutomationExecutionInput) (req *request.Request, output *StartAutomationExecutionOutput) { + op := &request.Operation{ + Name: opStartAutomationExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartAutomationExecutionInput{} + } + + output = &StartAutomationExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartAutomationExecution API operation for Amazon Simple Systems Manager (SSM). +// +// Initiates execution of an Automation document. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation StartAutomationExecution for usage and error information. +// +// Returned Error Types: +// * AutomationDefinitionNotFoundException +// An Automation document with the specified name could not be found. +// +// * InvalidAutomationExecutionParametersException +// The supplied parameters for invoking the specified Automation document are +// incorrect. For example, they may not match the set of parameters permitted +// for the specified Automation document. +// +// * AutomationExecutionLimitExceededException +// The number of simultaneously running Automation executions exceeded the allowable +// limit. +// +// * AutomationDefinitionVersionNotFoundException +// An Automation document with the specified name and version could not be found. +// +// * IdempotentParameterMismatch +// Error returned when an idempotent operation is retried and the parameters +// don't match the original call to the API with the same idempotency token. +// +// * InvalidTarget +// The target is not valid or does not exist. It might not be configured for +// Systems Manager or you might not have permission to perform the operation. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/StartAutomationExecution +func (c *SSM) StartAutomationExecution(input *StartAutomationExecutionInput) (*StartAutomationExecutionOutput, error) { + req, out := c.StartAutomationExecutionRequest(input) + return out, req.Send() +} + +// StartAutomationExecutionWithContext is the same as StartAutomationExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StartAutomationExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) StartAutomationExecutionWithContext(ctx aws.Context, input *StartAutomationExecutionInput, opts ...request.Option) (*StartAutomationExecutionOutput, error) { + req, out := c.StartAutomationExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartSession = "StartSession" + +// StartSessionRequest generates a "aws/request.Request" representing the +// client's request for the StartSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartSession for more information on using the StartSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartSessionRequest method. +// req, resp := client.StartSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/StartSession +func (c *SSM) StartSessionRequest(input *StartSessionInput) (req *request.Request, output *StartSessionOutput) { + op := &request.Operation{ + Name: opStartSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartSessionInput{} + } + + output = &StartSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartSession API operation for Amazon Simple Systems Manager (SSM). +// +// Initiates a connection to a target (for example, an instance) for a Session +// Manager session. Returns a URL and token that can be used to open a WebSocket +// connection for sending input and receiving outputs. +// +// AWS CLI usage: start-session is an interactive command that requires the +// Session Manager plugin to be installed on the client machine making the call. +// For information, see Install the Session Manager plugin for the AWS CLI (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) +// in the AWS Systems Manager User Guide. +// +// AWS Tools for PowerShell usage: Start-SSMSession is not currently supported +// by AWS Tools for PowerShell on Windows local machines. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation StartSession for usage and error information. +// +// Returned Error Types: +// * InvalidDocument +// The specified document does not exist. +// +// * TargetNotConnected +// The specified target instance for the session is not fully configured for +// use with Session Manager. For more information, see Getting started with +// Session Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) +// in the AWS Systems Manager User Guide. This error is also returned if you +// attempt to start a session on an instance that is located in a different +// account or Region +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/StartSession +func (c *SSM) StartSession(input *StartSessionInput) (*StartSessionOutput, error) { + req, out := c.StartSessionRequest(input) + return out, req.Send() +} + +// StartSessionWithContext is the same as StartSession with the addition of +// the ability to pass a context and additional request options. +// +// See StartSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) StartSessionWithContext(ctx aws.Context, input *StartSessionInput, opts ...request.Option) (*StartSessionOutput, error) { + req, out := c.StartSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopAutomationExecution = "StopAutomationExecution" + +// StopAutomationExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StopAutomationExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopAutomationExecution for more information on using the StopAutomationExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopAutomationExecutionRequest method. +// req, resp := client.StopAutomationExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/StopAutomationExecution +func (c *SSM) StopAutomationExecutionRequest(input *StopAutomationExecutionInput) (req *request.Request, output *StopAutomationExecutionOutput) { + op := &request.Operation{ + Name: opStopAutomationExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopAutomationExecutionInput{} + } + + output = &StopAutomationExecutionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopAutomationExecution API operation for Amazon Simple Systems Manager (SSM). +// +// Stop an Automation that is currently running. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation StopAutomationExecution for usage and error information. +// +// Returned Error Types: +// * AutomationExecutionNotFoundException +// There is no automation execution information for the requested automation +// execution ID. +// +// * InvalidAutomationStatusUpdateException +// The specified update status operation is not valid. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/StopAutomationExecution +func (c *SSM) StopAutomationExecution(input *StopAutomationExecutionInput) (*StopAutomationExecutionOutput, error) { + req, out := c.StopAutomationExecutionRequest(input) + return out, req.Send() +} + +// StopAutomationExecutionWithContext is the same as StopAutomationExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StopAutomationExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) StopAutomationExecutionWithContext(ctx aws.Context, input *StopAutomationExecutionInput, opts ...request.Option) (*StopAutomationExecutionOutput, error) { + req, out := c.StopAutomationExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTerminateSession = "TerminateSession" + +// TerminateSessionRequest generates a "aws/request.Request" representing the +// client's request for the TerminateSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TerminateSession for more information on using the TerminateSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TerminateSessionRequest method. +// req, resp := client.TerminateSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/TerminateSession +func (c *SSM) TerminateSessionRequest(input *TerminateSessionInput) (req *request.Request, output *TerminateSessionOutput) { + op := &request.Operation{ + Name: opTerminateSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateSessionInput{} + } + + output = &TerminateSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// TerminateSession API operation for Amazon Simple Systems Manager (SSM). +// +// Permanently ends a session and closes the data connection between the Session +// Manager client and SSM Agent on the instance. A terminated session cannot +// be resumed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation TerminateSession for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/TerminateSession +func (c *SSM) TerminateSession(input *TerminateSessionInput) (*TerminateSessionOutput, error) { + req, out := c.TerminateSessionRequest(input) + return out, req.Send() +} + +// TerminateSessionWithContext is the same as TerminateSession with the addition of +// the ability to pass a context and additional request options. +// +// See TerminateSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) TerminateSessionWithContext(ctx aws.Context, input *TerminateSessionInput, opts ...request.Option) (*TerminateSessionOutput, error) { + req, out := c.TerminateSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAssociation = "UpdateAssociation" + +// UpdateAssociationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAssociation for more information on using the UpdateAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAssociationRequest method. +// req, resp := client.UpdateAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateAssociation +func (c *SSM) UpdateAssociationRequest(input *UpdateAssociationInput) (req *request.Request, output *UpdateAssociationOutput) { + op := &request.Operation{ + Name: opUpdateAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssociationInput{} + } + + output = &UpdateAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAssociation API operation for Amazon Simple Systems Manager (SSM). +// +// Updates an association. You can update the association name and version, +// the document version, schedule, parameters, and Amazon S3 output. +// +// In order to call this API action, your IAM user account, group, or role must +// be configured with permission to call the DescribeAssociation API action. +// If you don't have permission to call DescribeAssociation, then you receive +// the following error: An error occurred (AccessDeniedException) when calling +// the UpdateAssociation operation: User: is not authorized to perform: +// ssm:DescribeAssociation on resource: +// +// When you update an association, the association immediately runs against +// the specified targets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateAssociation for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidSchedule +// The schedule is invalid. Verify your cron or rate expression and try again. +// +// * InvalidParameters +// You must specify values for all required parameters in the Systems Manager +// document. You can only supply values to parameters defined in the Systems +// Manager document. +// +// * InvalidOutputLocation +// The output location is not valid or does not exist. +// +// * InvalidDocumentVersion +// The document version is not valid or does not exist. +// +// * AssociationDoesNotExist +// The specified association does not exist. +// +// * InvalidUpdate +// The update is not valid. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidTarget +// The target is not valid or does not exist. It might not be configured for +// Systems Manager or you might not have permission to perform the operation. +// +// * InvalidAssociationVersion +// The version you specified is not valid. Use ListAssociationVersions to view +// all versions of an association according to the association ID. Or, use the +// $LATEST parameter to view the latest version of the association. +// +// * AssociationVersionLimitExceeded +// You have reached the maximum number versions allowed for an association. +// Each association has a limit of 1,000 versions. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateAssociation +func (c *SSM) UpdateAssociation(input *UpdateAssociationInput) (*UpdateAssociationOutput, error) { + req, out := c.UpdateAssociationRequest(input) + return out, req.Send() +} + +// UpdateAssociationWithContext is the same as UpdateAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateAssociationWithContext(ctx aws.Context, input *UpdateAssociationInput, opts ...request.Option) (*UpdateAssociationOutput, error) { + req, out := c.UpdateAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAssociationStatus = "UpdateAssociationStatus" + +// UpdateAssociationStatusRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAssociationStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAssociationStatus for more information on using the UpdateAssociationStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAssociationStatusRequest method. +// req, resp := client.UpdateAssociationStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateAssociationStatus +func (c *SSM) UpdateAssociationStatusRequest(input *UpdateAssociationStatusInput) (req *request.Request, output *UpdateAssociationStatusOutput) { + op := &request.Operation{ + Name: opUpdateAssociationStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssociationStatusInput{} + } + + output = &UpdateAssociationStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAssociationStatus API operation for Amazon Simple Systems Manager (SSM). +// +// Updates the status of the Systems Manager document associated with the specified +// instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateAssociationStatus for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InvalidDocument +// The specified document does not exist. +// +// * AssociationDoesNotExist +// The specified association does not exist. +// +// * StatusUnchanged +// The updated status is the same as the current status. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateAssociationStatus +func (c *SSM) UpdateAssociationStatus(input *UpdateAssociationStatusInput) (*UpdateAssociationStatusOutput, error) { + req, out := c.UpdateAssociationStatusRequest(input) + return out, req.Send() +} + +// UpdateAssociationStatusWithContext is the same as UpdateAssociationStatus with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAssociationStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateAssociationStatusWithContext(ctx aws.Context, input *UpdateAssociationStatusInput, opts ...request.Option) (*UpdateAssociationStatusOutput, error) { + req, out := c.UpdateAssociationStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDocument = "UpdateDocument" + +// UpdateDocumentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDocument operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDocument for more information on using the UpdateDocument +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDocumentRequest method. +// req, resp := client.UpdateDocumentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateDocument +func (c *SSM) UpdateDocumentRequest(input *UpdateDocumentInput) (req *request.Request, output *UpdateDocumentOutput) { + op := &request.Operation{ + Name: opUpdateDocument, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDocumentInput{} + } + + output = &UpdateDocumentOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDocument API operation for Amazon Simple Systems Manager (SSM). +// +// Updates one or more values for an SSM document. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateDocument for usage and error information. +// +// Returned Error Types: +// * MaxDocumentSizeExceeded +// The size limit of a document is 64 KB. +// +// * DocumentVersionLimitExceeded +// The document has too many versions. Delete one or more document versions +// and try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// * DuplicateDocumentContent +// The content of the association document matches another document. Change +// the content of the document and try again. +// +// * DuplicateDocumentVersionName +// The version name has already been used in this document. Specify a different +// version name, and then try again. +// +// * InvalidDocumentContent +// The content for the document is not valid. +// +// * InvalidDocumentVersion +// The document version is not valid or does not exist. +// +// * InvalidDocumentSchemaVersion +// The version of the document schema is not supported. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentOperation +// You attempted to delete a document while it is still shared. You must stop +// sharing the document before you can delete it. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateDocument +func (c *SSM) UpdateDocument(input *UpdateDocumentInput) (*UpdateDocumentOutput, error) { + req, out := c.UpdateDocumentRequest(input) + return out, req.Send() +} + +// UpdateDocumentWithContext is the same as UpdateDocument with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDocument for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateDocumentWithContext(ctx aws.Context, input *UpdateDocumentInput, opts ...request.Option) (*UpdateDocumentOutput, error) { + req, out := c.UpdateDocumentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDocumentDefaultVersion = "UpdateDocumentDefaultVersion" + +// UpdateDocumentDefaultVersionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDocumentDefaultVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDocumentDefaultVersion for more information on using the UpdateDocumentDefaultVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDocumentDefaultVersionRequest method. +// req, resp := client.UpdateDocumentDefaultVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateDocumentDefaultVersion +func (c *SSM) UpdateDocumentDefaultVersionRequest(input *UpdateDocumentDefaultVersionInput) (req *request.Request, output *UpdateDocumentDefaultVersionOutput) { + op := &request.Operation{ + Name: opUpdateDocumentDefaultVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDocumentDefaultVersionInput{} + } + + output = &UpdateDocumentDefaultVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDocumentDefaultVersion API operation for Amazon Simple Systems Manager (SSM). +// +// Set the default version of a document. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateDocumentDefaultVersion for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * InvalidDocument +// The specified document does not exist. +// +// * InvalidDocumentVersion +// The document version is not valid or does not exist. +// +// * InvalidDocumentSchemaVersion +// The version of the document schema is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateDocumentDefaultVersion +func (c *SSM) UpdateDocumentDefaultVersion(input *UpdateDocumentDefaultVersionInput) (*UpdateDocumentDefaultVersionOutput, error) { + req, out := c.UpdateDocumentDefaultVersionRequest(input) + return out, req.Send() +} + +// UpdateDocumentDefaultVersionWithContext is the same as UpdateDocumentDefaultVersion with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDocumentDefaultVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateDocumentDefaultVersionWithContext(ctx aws.Context, input *UpdateDocumentDefaultVersionInput, opts ...request.Option) (*UpdateDocumentDefaultVersionOutput, error) { + req, out := c.UpdateDocumentDefaultVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateMaintenanceWindow = "UpdateMaintenanceWindow" + +// UpdateMaintenanceWindowRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMaintenanceWindow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateMaintenanceWindow for more information on using the UpdateMaintenanceWindow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateMaintenanceWindowRequest method. +// req, resp := client.UpdateMaintenanceWindowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateMaintenanceWindow +func (c *SSM) UpdateMaintenanceWindowRequest(input *UpdateMaintenanceWindowInput) (req *request.Request, output *UpdateMaintenanceWindowOutput) { + op := &request.Operation{ + Name: opUpdateMaintenanceWindow, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMaintenanceWindowInput{} + } + + output = &UpdateMaintenanceWindowOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). +// +// Updates an existing maintenance window. Only specified parameters are modified. +// +// The value you specify for Duration determines the specific end time for the +// maintenance window based on the time it begins. No maintenance window tasks +// are permitted to start after the resulting endtime minus the number of hours +// you specify for Cutoff. For example, if the maintenance window starts at +// 3 PM, the duration is three hours, and the value you specify for Cutoff is +// one hour, no maintenance window tasks can start after 5 PM. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateMaintenanceWindow for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateMaintenanceWindow +func (c *SSM) UpdateMaintenanceWindow(input *UpdateMaintenanceWindowInput) (*UpdateMaintenanceWindowOutput, error) { + req, out := c.UpdateMaintenanceWindowRequest(input) + return out, req.Send() +} + +// UpdateMaintenanceWindowWithContext is the same as UpdateMaintenanceWindow with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMaintenanceWindow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateMaintenanceWindowWithContext(ctx aws.Context, input *UpdateMaintenanceWindowInput, opts ...request.Option) (*UpdateMaintenanceWindowOutput, error) { + req, out := c.UpdateMaintenanceWindowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateMaintenanceWindowTarget = "UpdateMaintenanceWindowTarget" + +// UpdateMaintenanceWindowTargetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMaintenanceWindowTarget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateMaintenanceWindowTarget for more information on using the UpdateMaintenanceWindowTarget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateMaintenanceWindowTargetRequest method. +// req, resp := client.UpdateMaintenanceWindowTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateMaintenanceWindowTarget +func (c *SSM) UpdateMaintenanceWindowTargetRequest(input *UpdateMaintenanceWindowTargetInput) (req *request.Request, output *UpdateMaintenanceWindowTargetOutput) { + op := &request.Operation{ + Name: opUpdateMaintenanceWindowTarget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMaintenanceWindowTargetInput{} + } + + output = &UpdateMaintenanceWindowTargetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateMaintenanceWindowTarget API operation for Amazon Simple Systems Manager (SSM). +// +// Modifies the target of an existing maintenance window. You can change the +// following: +// +// * Name +// +// * Description +// +// * Owner +// +// * IDs for an ID target +// +// * Tags for a Tag target +// +// * From any supported tag type to another. The three supported tag types +// are ID target, Tag target, and resource group. For more information, see +// Target. +// +// If a parameter is null, then the corresponding field is not modified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateMaintenanceWindowTarget for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateMaintenanceWindowTarget +func (c *SSM) UpdateMaintenanceWindowTarget(input *UpdateMaintenanceWindowTargetInput) (*UpdateMaintenanceWindowTargetOutput, error) { + req, out := c.UpdateMaintenanceWindowTargetRequest(input) + return out, req.Send() +} + +// UpdateMaintenanceWindowTargetWithContext is the same as UpdateMaintenanceWindowTarget with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMaintenanceWindowTarget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateMaintenanceWindowTargetWithContext(ctx aws.Context, input *UpdateMaintenanceWindowTargetInput, opts ...request.Option) (*UpdateMaintenanceWindowTargetOutput, error) { + req, out := c.UpdateMaintenanceWindowTargetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateMaintenanceWindowTask = "UpdateMaintenanceWindowTask" + +// UpdateMaintenanceWindowTaskRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMaintenanceWindowTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateMaintenanceWindowTask for more information on using the UpdateMaintenanceWindowTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateMaintenanceWindowTaskRequest method. +// req, resp := client.UpdateMaintenanceWindowTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateMaintenanceWindowTask +func (c *SSM) UpdateMaintenanceWindowTaskRequest(input *UpdateMaintenanceWindowTaskInput) (req *request.Request, output *UpdateMaintenanceWindowTaskOutput) { + op := &request.Operation{ + Name: opUpdateMaintenanceWindowTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateMaintenanceWindowTaskInput{} + } + + output = &UpdateMaintenanceWindowTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateMaintenanceWindowTask API operation for Amazon Simple Systems Manager (SSM). +// +// Modifies a task assigned to a maintenance window. You can't change the task +// type, but you can change the following values: +// +// * TaskARN. For example, you can change a RUN_COMMAND task from AWS-RunPowerShellScript +// to AWS-RunShellScript. +// +// * ServiceRoleArn +// +// * TaskInvocationParameters +// +// * Priority +// +// * MaxConcurrency +// +// * MaxErrors +// +// If the value for a parameter in UpdateMaintenanceWindowTask is null, then +// the corresponding field is not modified. If you set Replace to true, then +// all fields required by the RegisterTaskWithMaintenanceWindow action are required +// for this request. Optional fields that aren't specified are set to null. +// +// When you update a maintenance window task that has options specified in TaskInvocationParameters, +// you must provide again all the TaskInvocationParameters values that you want +// to retain. The values you do not specify again are removed. For example, +// suppose that when you registered a Run Command task, you specified TaskInvocationParameters +// values for Comment, NotificationConfig, and OutputS3BucketName. If you update +// the maintenance window task and specify only a different OutputS3BucketName +// value, the values for Comment and NotificationConfig are removed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateMaintenanceWindowTask for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateMaintenanceWindowTask +func (c *SSM) UpdateMaintenanceWindowTask(input *UpdateMaintenanceWindowTaskInput) (*UpdateMaintenanceWindowTaskOutput, error) { + req, out := c.UpdateMaintenanceWindowTaskRequest(input) + return out, req.Send() +} + +// UpdateMaintenanceWindowTaskWithContext is the same as UpdateMaintenanceWindowTask with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMaintenanceWindowTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateMaintenanceWindowTaskWithContext(ctx aws.Context, input *UpdateMaintenanceWindowTaskInput, opts ...request.Option) (*UpdateMaintenanceWindowTaskOutput, error) { + req, out := c.UpdateMaintenanceWindowTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateManagedInstanceRole = "UpdateManagedInstanceRole" + +// UpdateManagedInstanceRoleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateManagedInstanceRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateManagedInstanceRole for more information on using the UpdateManagedInstanceRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateManagedInstanceRoleRequest method. +// req, resp := client.UpdateManagedInstanceRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateManagedInstanceRole +func (c *SSM) UpdateManagedInstanceRoleRequest(input *UpdateManagedInstanceRoleInput) (req *request.Request, output *UpdateManagedInstanceRoleOutput) { + op := &request.Operation{ + Name: opUpdateManagedInstanceRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateManagedInstanceRoleInput{} + } + + output = &UpdateManagedInstanceRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateManagedInstanceRole API operation for Amazon Simple Systems Manager (SSM). +// +// Changes the Amazon Identity and Access Management (IAM) role that is assigned +// to the on-premises instance or virtual machines (VM). IAM roles are first +// assigned to these hybrid instances during the activation process. For more +// information, see CreateActivation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateManagedInstanceRole for usage and error information. +// +// Returned Error Types: +// * InvalidInstanceId +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateManagedInstanceRole +func (c *SSM) UpdateManagedInstanceRole(input *UpdateManagedInstanceRoleInput) (*UpdateManagedInstanceRoleOutput, error) { + req, out := c.UpdateManagedInstanceRoleRequest(input) + return out, req.Send() +} + +// UpdateManagedInstanceRoleWithContext is the same as UpdateManagedInstanceRole with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateManagedInstanceRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateManagedInstanceRoleWithContext(ctx aws.Context, input *UpdateManagedInstanceRoleInput, opts ...request.Option) (*UpdateManagedInstanceRoleOutput, error) { + req, out := c.UpdateManagedInstanceRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateOpsItem = "UpdateOpsItem" + +// UpdateOpsItemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateOpsItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateOpsItem for more information on using the UpdateOpsItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateOpsItemRequest method. +// req, resp := client.UpdateOpsItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateOpsItem +func (c *SSM) UpdateOpsItemRequest(input *UpdateOpsItemInput) (req *request.Request, output *UpdateOpsItemOutput) { + op := &request.Operation{ + Name: opUpdateOpsItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateOpsItemInput{} + } + + output = &UpdateOpsItemOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateOpsItem API operation for Amazon Simple Systems Manager (SSM). +// +// Edit or change an OpsItem. You must have permission in AWS Identity and Access +// Management (IAM) to update an OpsItem. For more information, see Getting +// started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// in the AWS Systems Manager User Guide. +// +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateOpsItem for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * OpsItemNotFoundException +// The specified OpsItem ID doesn't exist. Verify the ID and try again. +// +// * OpsItemAlreadyExistsException +// The OpsItem already exists. +// +// * OpsItemLimitExceededException +// The request caused OpsItems to exceed one or more quotas. For information +// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). +// +// * OpsItemInvalidParameterException +// A specified parameter argument isn't valid. Verify the available arguments +// and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateOpsItem +func (c *SSM) UpdateOpsItem(input *UpdateOpsItemInput) (*UpdateOpsItemOutput, error) { + req, out := c.UpdateOpsItemRequest(input) + return out, req.Send() +} + +// UpdateOpsItemWithContext is the same as UpdateOpsItem with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateOpsItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateOpsItemWithContext(ctx aws.Context, input *UpdateOpsItemInput, opts ...request.Option) (*UpdateOpsItemOutput, error) { + req, out := c.UpdateOpsItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePatchBaseline = "UpdatePatchBaseline" + +// UpdatePatchBaselineRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePatchBaseline operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePatchBaseline for more information on using the UpdatePatchBaseline +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePatchBaselineRequest method. +// req, resp := client.UpdatePatchBaselineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdatePatchBaseline +func (c *SSM) UpdatePatchBaselineRequest(input *UpdatePatchBaselineInput) (req *request.Request, output *UpdatePatchBaselineOutput) { + op := &request.Operation{ + Name: opUpdatePatchBaseline, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePatchBaselineInput{} + } + + output = &UpdatePatchBaselineOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdatePatchBaseline API operation for Amazon Simple Systems Manager (SSM). +// +// Modifies an existing patch baseline. Fields not specified in the request +// are left unchanged. +// +// For information about valid key and value pairs in PatchFilters for each +// supported operating system type, see PatchFilter (http://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdatePatchBaseline for usage and error information. +// +// Returned Error Types: +// * DoesNotExistException +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdatePatchBaseline +func (c *SSM) UpdatePatchBaseline(input *UpdatePatchBaselineInput) (*UpdatePatchBaselineOutput, error) { + req, out := c.UpdatePatchBaselineRequest(input) + return out, req.Send() +} + +// UpdatePatchBaselineWithContext is the same as UpdatePatchBaseline with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePatchBaseline for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdatePatchBaselineWithContext(ctx aws.Context, input *UpdatePatchBaselineInput, opts ...request.Option) (*UpdatePatchBaselineOutput, error) { + req, out := c.UpdatePatchBaselineRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateResourceDataSync = "UpdateResourceDataSync" + +// UpdateResourceDataSyncRequest generates a "aws/request.Request" representing the +// client's request for the UpdateResourceDataSync operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateResourceDataSync for more information on using the UpdateResourceDataSync +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateResourceDataSyncRequest method. +// req, resp := client.UpdateResourceDataSyncRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateResourceDataSync +func (c *SSM) UpdateResourceDataSyncRequest(input *UpdateResourceDataSyncInput) (req *request.Request, output *UpdateResourceDataSyncOutput) { + op := &request.Operation{ + Name: opUpdateResourceDataSync, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateResourceDataSyncInput{} + } + + output = &UpdateResourceDataSyncOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateResourceDataSync API operation for Amazon Simple Systems Manager (SSM). +// +// Update a resource data sync. After you create a resource data sync for a +// Region, you can't change the account options for that sync. For example, +// if you create a sync in the us-east-2 (Ohio) Region and you choose the Include +// only the current account option, you can't edit that sync later and choose +// the Include all accounts from my AWS Organizations configuration option. +// Instead, you must delete the first resource data sync, and create a new one. +// +// This API action only supports a resource data sync that was created with +// a SyncFromSource SyncType. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateResourceDataSync for usage and error information. +// +// Returned Error Types: +// * ResourceDataSyncNotFoundException +// The specified sync name was not found. +// +// * ResourceDataSyncInvalidConfigurationException +// The specified sync configuration is invalid. +// +// * ResourceDataSyncConflictException +// Another UpdateResourceDataSync request is being processed. Wait a few minutes +// and try again. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateResourceDataSync +func (c *SSM) UpdateResourceDataSync(input *UpdateResourceDataSyncInput) (*UpdateResourceDataSyncOutput, error) { + req, out := c.UpdateResourceDataSyncRequest(input) + return out, req.Send() +} + +// UpdateResourceDataSyncWithContext is the same as UpdateResourceDataSync with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateResourceDataSync for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateResourceDataSyncWithContext(ctx aws.Context, input *UpdateResourceDataSyncInput, opts ...request.Option) (*UpdateResourceDataSyncOutput, error) { + req, out := c.UpdateResourceDataSyncRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateServiceSetting = "UpdateServiceSetting" + +// UpdateServiceSettingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateServiceSetting operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateServiceSetting for more information on using the UpdateServiceSetting +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateServiceSettingRequest method. +// req, resp := client.UpdateServiceSettingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateServiceSetting +func (c *SSM) UpdateServiceSettingRequest(input *UpdateServiceSettingInput) (req *request.Request, output *UpdateServiceSettingOutput) { + op := &request.Operation{ + Name: opUpdateServiceSetting, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceSettingInput{} + } + + output = &UpdateServiceSettingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateServiceSetting API operation for Amazon Simple Systems Manager (SSM). +// +// ServiceSetting is an account-level setting for an AWS service. This setting +// defines how a user interacts with or uses a service or a feature of a service. +// For example, if an AWS service charges money to the account based on feature +// or service usage, then the AWS service team might create a default setting +// of "false". This means the user can't use this feature unless they change +// the setting to "true" and intentionally opt in for a paid feature. +// +// Services map a SettingId object to a setting value. AWS services teams define +// the default value for a SettingId. You can't create a new SettingId, but +// you can overwrite the default value if you have the ssm:UpdateServiceSetting +// permission for the setting. Use the GetServiceSetting API action to view +// the current value. Or, use the ResetServiceSetting to change the value back +// to the original value defined by the AWS service team. +// +// Update the service setting for the account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s +// API operation UpdateServiceSetting for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// * ServiceSettingNotFound +// The specified service setting was not found. Either the service name or the +// setting has not been provisioned by the AWS service team. +// +// * TooManyUpdates +// There are concurrent updates for a resource that supports one update at a +// time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateServiceSetting +func (c *SSM) UpdateServiceSetting(input *UpdateServiceSettingInput) (*UpdateServiceSettingOutput, error) { + req, out := c.UpdateServiceSettingRequest(input) + return out, req.Send() +} + +// UpdateServiceSettingWithContext is the same as UpdateServiceSetting with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateServiceSetting for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) UpdateServiceSettingWithContext(ctx aws.Context, input *UpdateServiceSettingInput, opts ...request.Option) (*UpdateServiceSettingOutput, error) { + req, out := c.UpdateServiceSettingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Information includes the AWS account ID where the current document is shared +// and the version shared with that account. +type AccountSharingInfo struct { + _ struct{} `type:"structure"` + + // The AWS account ID where the current document is shared. + AccountId *string `type:"string"` + + // The version of the current document shared with the account. + SharedDocumentVersion *string `type:"string"` +} + +// String returns the string representation +func (s AccountSharingInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountSharingInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountSharingInfo) SetAccountId(v string) *AccountSharingInfo { + s.AccountId = &v + return s +} + +// SetSharedDocumentVersion sets the SharedDocumentVersion field's value. +func (s *AccountSharingInfo) SetSharedDocumentVersion(v string) *AccountSharingInfo { + s.SharedDocumentVersion = &v + return s +} + +// An activation registers one or more on-premises servers or virtual machines +// (VMs) with AWS so that you can configure those servers or VMs using Run Command. +// A server or VM that has been registered with AWS is called a managed instance. +type Activation struct { + _ struct{} `type:"structure"` + + // The ID created by Systems Manager when you submitted the activation. + ActivationId *string `type:"string"` + + // The date the activation was created. + CreatedDate *time.Time `type:"timestamp"` + + // A name for the managed instance when it is created. + DefaultInstanceName *string `type:"string"` + + // A user defined description of the activation. + Description *string `type:"string"` + + // The date when this activation can no longer be used to register managed instances. + ExpirationDate *time.Time `type:"timestamp"` + + // Whether or not the activation is expired. + Expired *bool `type:"boolean"` + + // The Amazon Identity and Access Management (IAM) role to assign to the managed + // instance. + IamRole *string `type:"string"` + + // The maximum number of managed instances that can be registered using this + // activation. + RegistrationLimit *int64 `min:"1" type:"integer"` + + // The number of managed instances already registered with this activation. + RegistrationsCount *int64 `min:"1" type:"integer"` + + // Tags assigned to the activation. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s Activation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Activation) GoString() string { + return s.String() +} + +// SetActivationId sets the ActivationId field's value. +func (s *Activation) SetActivationId(v string) *Activation { + s.ActivationId = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *Activation) SetCreatedDate(v time.Time) *Activation { + s.CreatedDate = &v + return s +} + +// SetDefaultInstanceName sets the DefaultInstanceName field's value. +func (s *Activation) SetDefaultInstanceName(v string) *Activation { + s.DefaultInstanceName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Activation) SetDescription(v string) *Activation { + s.Description = &v + return s +} + +// SetExpirationDate sets the ExpirationDate field's value. +func (s *Activation) SetExpirationDate(v time.Time) *Activation { + s.ExpirationDate = &v + return s +} + +// SetExpired sets the Expired field's value. +func (s *Activation) SetExpired(v bool) *Activation { + s.Expired = &v + return s +} + +// SetIamRole sets the IamRole field's value. +func (s *Activation) SetIamRole(v string) *Activation { + s.IamRole = &v + return s +} + +// SetRegistrationLimit sets the RegistrationLimit field's value. +func (s *Activation) SetRegistrationLimit(v int64) *Activation { + s.RegistrationLimit = &v + return s +} + +// SetRegistrationsCount sets the RegistrationsCount field's value. +func (s *Activation) SetRegistrationsCount(v int64) *Activation { + s.RegistrationsCount = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Activation) SetTags(v []*Tag) *Activation { + s.Tags = v + return s +} + +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The resource ID you want to tag. + // + // Use the ID of the resource. Here are some examples: + // + // ManagedInstance: mi-012345abcde + // + // MaintenanceWindow: mw-012345abcde + // + // PatchBaseline: pb-012345abcde + // + // For the Document and Parameter values, use the name of the resource. + // + // The ManagedInstance type for this API action is only for on-premises managed + // instances. You must specify the name of the managed instance in the following + // format: mi-ID_number. For example, mi-1a2b3c4d5e6f. + // + // ResourceId is a required field + ResourceId *string `type:"string" required:"true"` + + // Specifies the type of resource you are tagging. + // + // The ManagedInstance type for this API action is for on-premises managed instances. + // You must specify the name of the managed instance in the following format: + // mi-ID_number. For example, mi-1a2b3c4d5e6f. + // + // ResourceType is a required field + ResourceType *string `type:"string" required:"true" enum:"ResourceTypeForTagging"` + + // One or more tags. The value parameter is required, but if you don't want + // the tag to have a value, specify the parameter with no value, and we set + // the value to an empty string. + // + // Do not enter personally identifiable information in this field. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *AddTagsToResourceInput) SetResourceId(v string) *AddTagsToResourceInput { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *AddTagsToResourceInput) SetResourceType(v string) *AddTagsToResourceInput { + s.ResourceType = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AddTagsToResourceInput) SetTags(v []*Tag) *AddTagsToResourceInput { + s.Tags = v + return s +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + +// Error returned if an attempt is made to register a patch group with a patch +// baseline that is already registered with a different patch baseline. +type AlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { + return &AlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AlreadyExistsException) Code() string { + return "AlreadyExistsException" +} + +// Message returns the exception's message. +func (s *AlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AlreadyExistsException) OrigErr() error { + return nil +} + +func (s *AlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You must disassociate a document from all instances before you can delete +// it. +type AssociatedInstances struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AssociatedInstances) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociatedInstances) GoString() string { + return s.String() +} + +func newErrorAssociatedInstances(v protocol.ResponseMetadata) error { + return &AssociatedInstances{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AssociatedInstances) Code() string { + return "AssociatedInstances" +} + +// Message returns the exception's message. +func (s *AssociatedInstances) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AssociatedInstances) OrigErr() error { + return nil +} + +func (s *AssociatedInstances) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AssociatedInstances) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AssociatedInstances) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes an association of a Systems Manager document and an instance. +type Association struct { + _ struct{} `type:"structure"` + + // The ID created by the system when you create an association. An association + // is a binding between a document and a set of targets with a schedule. + AssociationId *string `type:"string"` + + // The association name. + AssociationName *string `type:"string"` + + // The association version. + AssociationVersion *string `type:"string"` + + // The version of the document used in the association. + DocumentVersion *string `type:"string"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The date on which the association was last run. + LastExecutionDate *time.Time `type:"timestamp"` + + // The name of the Systems Manager document. + Name *string `type:"string"` + + // Information about the association. + Overview *AssociationOverview `type:"structure"` + + // A cron expression that specifies a schedule when the association runs. + ScheduleExpression *string `min:"1" type:"string"` + + // The instances targeted by the request to create an association. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s Association) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Association) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *Association) SetAssociationId(v string) *Association { + s.AssociationId = &v + return s +} + +// SetAssociationName sets the AssociationName field's value. +func (s *Association) SetAssociationName(v string) *Association { + s.AssociationName = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *Association) SetAssociationVersion(v string) *Association { + s.AssociationVersion = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *Association) SetDocumentVersion(v string) *Association { + s.DocumentVersion = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *Association) SetInstanceId(v string) *Association { + s.InstanceId = &v + return s +} + +// SetLastExecutionDate sets the LastExecutionDate field's value. +func (s *Association) SetLastExecutionDate(v time.Time) *Association { + s.LastExecutionDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Association) SetName(v string) *Association { + s.Name = &v + return s +} + +// SetOverview sets the Overview field's value. +func (s *Association) SetOverview(v *AssociationOverview) *Association { + s.Overview = v + return s +} + +// SetScheduleExpression sets the ScheduleExpression field's value. +func (s *Association) SetScheduleExpression(v string) *Association { + s.ScheduleExpression = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *Association) SetTargets(v []*Target) *Association { + s.Targets = v + return s +} + +// The specified association already exists. +type AssociationAlreadyExists struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AssociationAlreadyExists) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationAlreadyExists) GoString() string { + return s.String() +} + +func newErrorAssociationAlreadyExists(v protocol.ResponseMetadata) error { + return &AssociationAlreadyExists{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AssociationAlreadyExists) Code() string { + return "AssociationAlreadyExists" +} + +// Message returns the exception's message. +func (s *AssociationAlreadyExists) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AssociationAlreadyExists) OrigErr() error { + return nil +} + +func (s *AssociationAlreadyExists) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AssociationAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AssociationAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes the parameters for a document. +type AssociationDescription struct { + _ struct{} `type:"structure"` + + // By default, when you create a new associations, the system runs it immediately + // after it is created and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // create it. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + + // The association ID. + AssociationId *string `type:"string"` + + // The association name. + AssociationName *string `type:"string"` + + // The association version. + AssociationVersion *string `type:"string"` + + // Specify the target for the association. This target is required for associations + // that use an Automation document and target resources by using rate controls. + AutomationTargetParameterName *string `min:"1" type:"string"` + + // The severity level that is assigned to the association. + ComplianceSeverity *string `type:"string" enum:"AssociationComplianceSeverity"` + + // The date when the association was made. + Date *time.Time `type:"timestamp"` + + // The document version. + DocumentVersion *string `type:"string"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The date on which the association was last run. + LastExecutionDate *time.Time `type:"timestamp"` + + // The last date on which the association was successfully run. + LastSuccessfulExecutionDate *time.Time `type:"timestamp"` + + // The date when the association was last updated. + LastUpdateAssociationDate *time.Time `type:"timestamp"` + + // The maximum number of targets allowed to run the association at the same + // time. You can specify a number, for example 10, or a percentage of the target + // set, for example 10%. The default value is 100%, which means all targets + // run the association at the same time. + // + // If a new instance starts and attempts to run an association while Systems + // Manager is running MaxConcurrency associations, the association is allowed + // to run. During the next association interval, the new instance will process + // its association within the limit specified for MaxConcurrency. + MaxConcurrency *string `min:"1" type:"string"` + + // The number of errors that are allowed before the system stops sending requests + // to run the association on additional targets. You can specify either an absolute + // number of errors, for example 10, or a percentage of the target set, for + // example 10%. If you specify 3, for example, the system stops sending requests + // when the fourth error is received. If you specify 0, then the system stops + // sending requests after the first error is returned. If you run an association + // on 50 instances and set MaxError to 10%, then the system stops sending the + // request when the sixth error is received. + // + // Executions that are already running an association when MaxErrors is reached + // are allowed to complete, but some of these executions may fail as well. If + // you need to ensure that there won't be more than max-errors failed executions, + // set MaxConcurrency to 1 so that executions proceed one at a time. + MaxErrors *string `min:"1" type:"string"` + + // The name of the Systems Manager document. + Name *string `type:"string"` + + // An S3 bucket where you want to store the output details of the request. + OutputLocation *InstanceAssociationOutputLocation `type:"structure"` + + // Information about the association. + Overview *AssociationOverview `type:"structure"` + + // A description of the parameters for a document. + Parameters map[string][]*string `type:"map"` + + // A cron expression that specifies a schedule when the association runs. + ScheduleExpression *string `min:"1" type:"string"` + + // The association status. + Status *AssociationStatus `type:"structure"` + + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + + // The instances targeted by the request. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s AssociationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationDescription) GoString() string { + return s.String() +} + +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *AssociationDescription) SetApplyOnlyAtCronInterval(v bool) *AssociationDescription { + s.ApplyOnlyAtCronInterval = &v + return s +} + +// SetAssociationId sets the AssociationId field's value. +func (s *AssociationDescription) SetAssociationId(v string) *AssociationDescription { + s.AssociationId = &v + return s +} + +// SetAssociationName sets the AssociationName field's value. +func (s *AssociationDescription) SetAssociationName(v string) *AssociationDescription { + s.AssociationName = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *AssociationDescription) SetAssociationVersion(v string) *AssociationDescription { + s.AssociationVersion = &v + return s +} + +// SetAutomationTargetParameterName sets the AutomationTargetParameterName field's value. +func (s *AssociationDescription) SetAutomationTargetParameterName(v string) *AssociationDescription { + s.AutomationTargetParameterName = &v + return s +} + +// SetComplianceSeverity sets the ComplianceSeverity field's value. +func (s *AssociationDescription) SetComplianceSeverity(v string) *AssociationDescription { + s.ComplianceSeverity = &v + return s +} + +// SetDate sets the Date field's value. +func (s *AssociationDescription) SetDate(v time.Time) *AssociationDescription { + s.Date = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *AssociationDescription) SetDocumentVersion(v string) *AssociationDescription { + s.DocumentVersion = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AssociationDescription) SetInstanceId(v string) *AssociationDescription { + s.InstanceId = &v + return s +} + +// SetLastExecutionDate sets the LastExecutionDate field's value. +func (s *AssociationDescription) SetLastExecutionDate(v time.Time) *AssociationDescription { + s.LastExecutionDate = &v + return s +} + +// SetLastSuccessfulExecutionDate sets the LastSuccessfulExecutionDate field's value. +func (s *AssociationDescription) SetLastSuccessfulExecutionDate(v time.Time) *AssociationDescription { + s.LastSuccessfulExecutionDate = &v + return s +} + +// SetLastUpdateAssociationDate sets the LastUpdateAssociationDate field's value. +func (s *AssociationDescription) SetLastUpdateAssociationDate(v time.Time) *AssociationDescription { + s.LastUpdateAssociationDate = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *AssociationDescription) SetMaxConcurrency(v string) *AssociationDescription { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *AssociationDescription) SetMaxErrors(v string) *AssociationDescription { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *AssociationDescription) SetName(v string) *AssociationDescription { + s.Name = &v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *AssociationDescription) SetOutputLocation(v *InstanceAssociationOutputLocation) *AssociationDescription { + s.OutputLocation = v + return s +} + +// SetOverview sets the Overview field's value. +func (s *AssociationDescription) SetOverview(v *AssociationOverview) *AssociationDescription { + s.Overview = v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *AssociationDescription) SetParameters(v map[string][]*string) *AssociationDescription { + s.Parameters = v + return s +} + +// SetScheduleExpression sets the ScheduleExpression field's value. +func (s *AssociationDescription) SetScheduleExpression(v string) *AssociationDescription { + s.ScheduleExpression = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AssociationDescription) SetStatus(v *AssociationStatus) *AssociationDescription { + s.Status = v + return s +} + +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *AssociationDescription) SetSyncCompliance(v string) *AssociationDescription { + s.SyncCompliance = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *AssociationDescription) SetTargets(v []*Target) *AssociationDescription { + s.Targets = v + return s +} + +// The specified association does not exist. +type AssociationDoesNotExist struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AssociationDoesNotExist) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationDoesNotExist) GoString() string { + return s.String() +} + +func newErrorAssociationDoesNotExist(v protocol.ResponseMetadata) error { + return &AssociationDoesNotExist{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AssociationDoesNotExist) Code() string { + return "AssociationDoesNotExist" +} + +// Message returns the exception's message. +func (s *AssociationDoesNotExist) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AssociationDoesNotExist) OrigErr() error { + return nil +} + +func (s *AssociationDoesNotExist) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AssociationDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AssociationDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID +} + +// Includes information about the specified association. +type AssociationExecution struct { + _ struct{} `type:"structure"` + + // The association ID. + AssociationId *string `type:"string"` + + // The association version. + AssociationVersion *string `type:"string"` + + // The time the execution started. + CreatedTime *time.Time `type:"timestamp"` + + // Detailed status information about the execution. + DetailedStatus *string `type:"string"` + + // The execution ID for the association. + ExecutionId *string `type:"string"` + + // The date of the last execution. + LastExecutionDate *time.Time `type:"timestamp"` + + // An aggregate status of the resources in the execution based on the status + // type. + ResourceCountByStatus *string `type:"string"` + + // The status of the association execution. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AssociationExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationExecution) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *AssociationExecution) SetAssociationId(v string) *AssociationExecution { + s.AssociationId = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *AssociationExecution) SetAssociationVersion(v string) *AssociationExecution { + s.AssociationVersion = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *AssociationExecution) SetCreatedTime(v time.Time) *AssociationExecution { + s.CreatedTime = &v + return s +} + +// SetDetailedStatus sets the DetailedStatus field's value. +func (s *AssociationExecution) SetDetailedStatus(v string) *AssociationExecution { + s.DetailedStatus = &v + return s +} + +// SetExecutionId sets the ExecutionId field's value. +func (s *AssociationExecution) SetExecutionId(v string) *AssociationExecution { + s.ExecutionId = &v + return s +} + +// SetLastExecutionDate sets the LastExecutionDate field's value. +func (s *AssociationExecution) SetLastExecutionDate(v time.Time) *AssociationExecution { + s.LastExecutionDate = &v + return s +} + +// SetResourceCountByStatus sets the ResourceCountByStatus field's value. +func (s *AssociationExecution) SetResourceCountByStatus(v string) *AssociationExecution { + s.ResourceCountByStatus = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AssociationExecution) SetStatus(v string) *AssociationExecution { + s.Status = &v + return s +} + +// The specified execution ID does not exist. Verify the ID number and try again. +type AssociationExecutionDoesNotExist struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AssociationExecutionDoesNotExist) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationExecutionDoesNotExist) GoString() string { + return s.String() +} + +func newErrorAssociationExecutionDoesNotExist(v protocol.ResponseMetadata) error { + return &AssociationExecutionDoesNotExist{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AssociationExecutionDoesNotExist) Code() string { + return "AssociationExecutionDoesNotExist" +} + +// Message returns the exception's message. +func (s *AssociationExecutionDoesNotExist) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AssociationExecutionDoesNotExist) OrigErr() error { + return nil +} + +func (s *AssociationExecutionDoesNotExist) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AssociationExecutionDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AssociationExecutionDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID +} + +// Filters used in the request. +type AssociationExecutionFilter struct { + _ struct{} `type:"structure"` + + // The key value used in the request. + // + // Key is a required field + Key *string `type:"string" required:"true" enum:"AssociationExecutionFilterKey"` + + // The filter type specified in the request. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"AssociationFilterOperatorType"` + + // The value specified for the key. + // + // Value is a required field + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociationExecutionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationExecutionFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociationExecutionFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociationExecutionFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *AssociationExecutionFilter) SetKey(v string) *AssociationExecutionFilter { + s.Key = &v + return s +} + +// SetType sets the Type field's value. +func (s *AssociationExecutionFilter) SetType(v string) *AssociationExecutionFilter { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AssociationExecutionFilter) SetValue(v string) *AssociationExecutionFilter { + s.Value = &v + return s +} + +// Includes information about the specified association execution. +type AssociationExecutionTarget struct { + _ struct{} `type:"structure"` + + // The association ID. + AssociationId *string `type:"string"` + + // The association version. + AssociationVersion *string `type:"string"` + + // Detailed information about the execution status. + DetailedStatus *string `type:"string"` + + // The execution ID. + ExecutionId *string `type:"string"` + + // The date of the last execution. + LastExecutionDate *time.Time `type:"timestamp"` + + // The location where the association details are saved. + OutputSource *OutputSource `type:"structure"` + + // The resource ID, for example, the instance ID where the association ran. + ResourceId *string `min:"1" type:"string"` + + // The resource type, for example, instance. + ResourceType *string `min:"1" type:"string"` + + // The association execution status. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AssociationExecutionTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationExecutionTarget) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *AssociationExecutionTarget) SetAssociationId(v string) *AssociationExecutionTarget { + s.AssociationId = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *AssociationExecutionTarget) SetAssociationVersion(v string) *AssociationExecutionTarget { + s.AssociationVersion = &v + return s +} + +// SetDetailedStatus sets the DetailedStatus field's value. +func (s *AssociationExecutionTarget) SetDetailedStatus(v string) *AssociationExecutionTarget { + s.DetailedStatus = &v + return s +} + +// SetExecutionId sets the ExecutionId field's value. +func (s *AssociationExecutionTarget) SetExecutionId(v string) *AssociationExecutionTarget { + s.ExecutionId = &v + return s +} + +// SetLastExecutionDate sets the LastExecutionDate field's value. +func (s *AssociationExecutionTarget) SetLastExecutionDate(v time.Time) *AssociationExecutionTarget { + s.LastExecutionDate = &v + return s +} + +// SetOutputSource sets the OutputSource field's value. +func (s *AssociationExecutionTarget) SetOutputSource(v *OutputSource) *AssociationExecutionTarget { + s.OutputSource = v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *AssociationExecutionTarget) SetResourceId(v string) *AssociationExecutionTarget { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *AssociationExecutionTarget) SetResourceType(v string) *AssociationExecutionTarget { + s.ResourceType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AssociationExecutionTarget) SetStatus(v string) *AssociationExecutionTarget { + s.Status = &v + return s +} + +// Filters for the association execution. +type AssociationExecutionTargetsFilter struct { + _ struct{} `type:"structure"` + + // The key value used in the request. + // + // Key is a required field + Key *string `type:"string" required:"true" enum:"AssociationExecutionTargetsFilterKey"` + + // The value specified for the key. + // + // Value is a required field + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociationExecutionTargetsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationExecutionTargetsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociationExecutionTargetsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociationExecutionTargetsFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *AssociationExecutionTargetsFilter) SetKey(v string) *AssociationExecutionTargetsFilter { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AssociationExecutionTargetsFilter) SetValue(v string) *AssociationExecutionTargetsFilter { + s.Value = &v + return s +} + +// Describes a filter. +type AssociationFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // InstanceId has been deprecated. + // + // Key is a required field + Key *string `locationName:"key" type:"string" required:"true" enum:"AssociationFilterKey"` + + // The filter value. + // + // Value is a required field + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociationFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociationFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *AssociationFilter) SetKey(v string) *AssociationFilter { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AssociationFilter) SetValue(v string) *AssociationFilter { + s.Value = &v + return s +} + +// You can have at most 2,000 active associations. +type AssociationLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AssociationLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationLimitExceeded) GoString() string { + return s.String() +} + +func newErrorAssociationLimitExceeded(v protocol.ResponseMetadata) error { + return &AssociationLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AssociationLimitExceeded) Code() string { + return "AssociationLimitExceeded" +} + +// Message returns the exception's message. +func (s *AssociationLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AssociationLimitExceeded) OrigErr() error { + return nil +} + +func (s *AssociationLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AssociationLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AssociationLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about the association. +type AssociationOverview struct { + _ struct{} `type:"structure"` + + // Returns the number of targets for the association status. For example, if + // you created an association with two instances, and one of them was successful, + // this would return the count of instances by status. + AssociationStatusAggregatedCount map[string]*int64 `type:"map"` + + // A detailed status of the association. + DetailedStatus *string `type:"string"` + + // The status of the association. Status can be: Pending, Success, or Failed. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AssociationOverview) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationOverview) GoString() string { + return s.String() +} + +// SetAssociationStatusAggregatedCount sets the AssociationStatusAggregatedCount field's value. +func (s *AssociationOverview) SetAssociationStatusAggregatedCount(v map[string]*int64) *AssociationOverview { + s.AssociationStatusAggregatedCount = v + return s +} + +// SetDetailedStatus sets the DetailedStatus field's value. +func (s *AssociationOverview) SetDetailedStatus(v string) *AssociationOverview { + s.DetailedStatus = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AssociationOverview) SetStatus(v string) *AssociationOverview { + s.Status = &v + return s +} + +// Describes an association status. +type AssociationStatus struct { + _ struct{} `type:"structure"` + + // A user-defined string. + AdditionalInfo *string `type:"string"` + + // The date when the status changed. + // + // Date is a required field + Date *time.Time `type:"timestamp" required:"true"` + + // The reason for the status. + // + // Message is a required field + Message *string `min:"1" type:"string" required:"true"` + + // The status. + // + // Name is a required field + Name *string `type:"string" required:"true" enum:"AssociationStatusName"` +} + +// String returns the string representation +func (s AssociationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociationStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociationStatus"} + if s.Date == nil { + invalidParams.Add(request.NewErrParamRequired("Date")) + } + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Message != nil && len(*s.Message) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Message", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalInfo sets the AdditionalInfo field's value. +func (s *AssociationStatus) SetAdditionalInfo(v string) *AssociationStatus { + s.AdditionalInfo = &v + return s +} + +// SetDate sets the Date field's value. +func (s *AssociationStatus) SetDate(v time.Time) *AssociationStatus { + s.Date = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *AssociationStatus) SetMessage(v string) *AssociationStatus { + s.Message = &v + return s +} + +// SetName sets the Name field's value. +func (s *AssociationStatus) SetName(v string) *AssociationStatus { + s.Name = &v + return s +} + +// Information about the association version. +type AssociationVersionInfo struct { + _ struct{} `type:"structure"` + + // By default, when you create a new associations, the system runs it immediately + // after it is created and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // create it. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + + // The ID created by the system when the association was created. + AssociationId *string `type:"string"` + + // The name specified for the association version when the association version + // was created. + AssociationName *string `type:"string"` + + // The association version. + AssociationVersion *string `type:"string"` + + // The severity level that is assigned to the association. + ComplianceSeverity *string `type:"string" enum:"AssociationComplianceSeverity"` + + // The date the association version was created. + CreatedDate *time.Time `type:"timestamp"` + + // The version of a Systems Manager document used when the association version + // was created. + DocumentVersion *string `type:"string"` + + // The maximum number of targets allowed to run the association at the same + // time. You can specify a number, for example 10, or a percentage of the target + // set, for example 10%. The default value is 100%, which means all targets + // run the association at the same time. + // + // If a new instance starts and attempts to run an association while Systems + // Manager is running MaxConcurrency associations, the association is allowed + // to run. During the next association interval, the new instance will process + // its association within the limit specified for MaxConcurrency. + MaxConcurrency *string `min:"1" type:"string"` + + // The number of errors that are allowed before the system stops sending requests + // to run the association on additional targets. You can specify either an absolute + // number of errors, for example 10, or a percentage of the target set, for + // example 10%. If you specify 3, for example, the system stops sending requests + // when the fourth error is received. If you specify 0, then the system stops + // sending requests after the first error is returned. If you run an association + // on 50 instances and set MaxError to 10%, then the system stops sending the + // request when the sixth error is received. + // + // Executions that are already running an association when MaxErrors is reached + // are allowed to complete, but some of these executions may fail as well. If + // you need to ensure that there won't be more than max-errors failed executions, + // set MaxConcurrency to 1 so that executions proceed one at a time. + MaxErrors *string `min:"1" type:"string"` + + // The name specified when the association was created. + Name *string `type:"string"` + + // The location in Amazon S3 specified for the association when the association + // version was created. + OutputLocation *InstanceAssociationOutputLocation `type:"structure"` + + // Parameters specified when the association version was created. + Parameters map[string][]*string `type:"map"` + + // The cron or rate schedule specified for the association when the association + // version was created. + ScheduleExpression *string `min:"1" type:"string"` + + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + + // The targets specified for the association when the association version was + // created. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s AssociationVersionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationVersionInfo) GoString() string { + return s.String() +} + +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *AssociationVersionInfo) SetApplyOnlyAtCronInterval(v bool) *AssociationVersionInfo { + s.ApplyOnlyAtCronInterval = &v + return s +} + +// SetAssociationId sets the AssociationId field's value. +func (s *AssociationVersionInfo) SetAssociationId(v string) *AssociationVersionInfo { + s.AssociationId = &v + return s +} + +// SetAssociationName sets the AssociationName field's value. +func (s *AssociationVersionInfo) SetAssociationName(v string) *AssociationVersionInfo { + s.AssociationName = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *AssociationVersionInfo) SetAssociationVersion(v string) *AssociationVersionInfo { + s.AssociationVersion = &v + return s +} + +// SetComplianceSeverity sets the ComplianceSeverity field's value. +func (s *AssociationVersionInfo) SetComplianceSeverity(v string) *AssociationVersionInfo { + s.ComplianceSeverity = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *AssociationVersionInfo) SetCreatedDate(v time.Time) *AssociationVersionInfo { + s.CreatedDate = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *AssociationVersionInfo) SetDocumentVersion(v string) *AssociationVersionInfo { + s.DocumentVersion = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *AssociationVersionInfo) SetMaxConcurrency(v string) *AssociationVersionInfo { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *AssociationVersionInfo) SetMaxErrors(v string) *AssociationVersionInfo { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *AssociationVersionInfo) SetName(v string) *AssociationVersionInfo { + s.Name = &v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *AssociationVersionInfo) SetOutputLocation(v *InstanceAssociationOutputLocation) *AssociationVersionInfo { + s.OutputLocation = v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *AssociationVersionInfo) SetParameters(v map[string][]*string) *AssociationVersionInfo { + s.Parameters = v + return s +} + +// SetScheduleExpression sets the ScheduleExpression field's value. +func (s *AssociationVersionInfo) SetScheduleExpression(v string) *AssociationVersionInfo { + s.ScheduleExpression = &v + return s +} + +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *AssociationVersionInfo) SetSyncCompliance(v string) *AssociationVersionInfo { + s.SyncCompliance = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *AssociationVersionInfo) SetTargets(v []*Target) *AssociationVersionInfo { + s.Targets = v + return s +} + +// You have reached the maximum number versions allowed for an association. +// Each association has a limit of 1,000 versions. +type AssociationVersionLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AssociationVersionLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociationVersionLimitExceeded) GoString() string { + return s.String() +} + +func newErrorAssociationVersionLimitExceeded(v protocol.ResponseMetadata) error { + return &AssociationVersionLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AssociationVersionLimitExceeded) Code() string { + return "AssociationVersionLimitExceeded" +} + +// Message returns the exception's message. +func (s *AssociationVersionLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AssociationVersionLimitExceeded) OrigErr() error { + return nil +} + +func (s *AssociationVersionLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AssociationVersionLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AssociationVersionLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// A structure that includes attributes that describe a document attachment. +type AttachmentContent struct { + _ struct{} `type:"structure"` + + // The cryptographic hash value of the document content. + Hash *string `type:"string"` + + // The hash algorithm used to calculate the hash value. + HashType *string `type:"string" enum:"AttachmentHashType"` + + // The name of an attachment. + Name *string `type:"string"` + + // The size of an attachment in bytes. + Size *int64 `type:"long"` + + // The URL location of the attachment content. + Url *string `type:"string"` +} + +// String returns the string representation +func (s AttachmentContent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachmentContent) GoString() string { + return s.String() +} + +// SetHash sets the Hash field's value. +func (s *AttachmentContent) SetHash(v string) *AttachmentContent { + s.Hash = &v + return s +} + +// SetHashType sets the HashType field's value. +func (s *AttachmentContent) SetHashType(v string) *AttachmentContent { + s.HashType = &v + return s +} + +// SetName sets the Name field's value. +func (s *AttachmentContent) SetName(v string) *AttachmentContent { + s.Name = &v + return s +} + +// SetSize sets the Size field's value. +func (s *AttachmentContent) SetSize(v int64) *AttachmentContent { + s.Size = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *AttachmentContent) SetUrl(v string) *AttachmentContent { + s.Url = &v + return s +} + +// An attribute of an attachment, such as the attachment name. +type AttachmentInformation struct { + _ struct{} `type:"structure"` + + // The name of the attachment. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AttachmentInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachmentInformation) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *AttachmentInformation) SetName(v string) *AttachmentInformation { + s.Name = &v + return s +} + +// Identifying information about a document attachment, including the file name +// and a key-value pair that identifies the location of an attachment to a document. +type AttachmentsSource struct { + _ struct{} `type:"structure"` + + // The key of a key-value pair that identifies the location of an attachment + // to a document. + Key *string `type:"string" enum:"AttachmentsSourceKey"` + + // The name of the document attachment file. + Name *string `type:"string"` + + // The value of a key-value pair that identifies the location of an attachment + // to a document. The format for Value depends on the type of key you specify. + // + // * For the key SourceUrl, the value is an S3 bucket location. For example: + // "Values": [ "s3://doc-example-bucket/my-folder" ] + // + // * For the key S3FileUrl, the value is a file in an S3 bucket. For example: + // "Values": [ "s3://doc-example-bucket/my-folder/my-file.py" ] + // + // * For the key AttachmentReference, the value is constructed from the name + // of another SSM document in your account, a version number of that document, + // and a file attached to that document version that you want to reuse. For + // example: "Values": [ "MyOtherDocument/3/my-other-file.py" ] However, if + // the SSM document is shared with you from another account, the full SSM + // document ARN must be specified instead of the document name only. For + // example: "Values": [ "arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py" + // ] + Values []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s AttachmentsSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachmentsSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachmentsSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachmentsSource"} + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *AttachmentsSource) SetKey(v string) *AttachmentsSource { + s.Key = &v + return s +} + +// SetName sets the Name field's value. +func (s *AttachmentsSource) SetName(v string) *AttachmentsSource { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *AttachmentsSource) SetValues(v []*string) *AttachmentsSource { + s.Values = v + return s +} + +// An Automation document with the specified name could not be found. +type AutomationDefinitionNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AutomationDefinitionNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomationDefinitionNotFoundException) GoString() string { + return s.String() +} + +func newErrorAutomationDefinitionNotFoundException(v protocol.ResponseMetadata) error { + return &AutomationDefinitionNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AutomationDefinitionNotFoundException) Code() string { + return "AutomationDefinitionNotFoundException" +} + +// Message returns the exception's message. +func (s *AutomationDefinitionNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AutomationDefinitionNotFoundException) OrigErr() error { + return nil +} + +func (s *AutomationDefinitionNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AutomationDefinitionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AutomationDefinitionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An Automation document with the specified name and version could not be found. +type AutomationDefinitionVersionNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AutomationDefinitionVersionNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomationDefinitionVersionNotFoundException) GoString() string { + return s.String() +} + +func newErrorAutomationDefinitionVersionNotFoundException(v protocol.ResponseMetadata) error { + return &AutomationDefinitionVersionNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AutomationDefinitionVersionNotFoundException) Code() string { + return "AutomationDefinitionVersionNotFoundException" +} + +// Message returns the exception's message. +func (s *AutomationDefinitionVersionNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AutomationDefinitionVersionNotFoundException) OrigErr() error { + return nil +} + +func (s *AutomationDefinitionVersionNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AutomationDefinitionVersionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AutomationDefinitionVersionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Detailed information about the current state of an individual Automation +// execution. +type AutomationExecution struct { + _ struct{} `type:"structure"` + + // The execution ID. + AutomationExecutionId *string `min:"36" type:"string"` + + // The execution status of the Automation. + AutomationExecutionStatus *string `type:"string" enum:"AutomationExecutionStatus"` + + // The action of the step that is currently running. + CurrentAction *string `type:"string"` + + // The name of the step that is currently running. + CurrentStepName *string `type:"string"` + + // The name of the Automation document used during the execution. + DocumentName *string `type:"string"` + + // The version of the document to use during execution. + DocumentVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the user who ran the automation. + ExecutedBy *string `type:"string"` + + // The time the execution finished. + ExecutionEndTime *time.Time `type:"timestamp"` + + // The time the execution started. + ExecutionStartTime *time.Time `type:"timestamp"` + + // A message describing why an execution has failed, if the status is set to + // Failed. + FailureMessage *string `type:"string"` + + // The MaxConcurrency value specified by the user when the execution started. + MaxConcurrency *string `min:"1" type:"string"` + + // The MaxErrors value specified by the user when the execution started. + MaxErrors *string `min:"1" type:"string"` + + // The automation execution mode. + Mode *string `type:"string" enum:"ExecutionMode"` + + // The list of execution outputs as defined in the automation document. + Outputs map[string][]*string `min:"1" type:"map"` + + // The key-value map of execution parameters, which were supplied when calling + // StartAutomationExecution. + Parameters map[string][]*string `min:"1" type:"map"` + + // The AutomationExecutionId of the parent automation. + ParentAutomationExecutionId *string `min:"36" type:"string"` + + // An aggregate of step execution statuses displayed in the AWS Console for + // a multi-Region and multi-account Automation execution. + ProgressCounters *ProgressCounters `type:"structure"` + + // A list of resolved targets in the rate control execution. + ResolvedTargets *ResolvedTargets `type:"structure"` + + // A list of details about the current state of all steps that comprise an execution. + // An Automation document contains a list of steps that are run in order. + StepExecutions []*StepExecution `type:"list"` + + // A boolean value that indicates if the response contains the full list of + // the Automation step executions. If true, use the DescribeAutomationStepExecutions + // API action to get the full list of step executions. + StepExecutionsTruncated *bool `type:"boolean"` + + // The target of the execution. + Target *string `type:"string"` + + // The combination of AWS Regions and/or AWS accounts where you want to run + // the Automation. + TargetLocations []*TargetLocation `min:"1" type:"list"` + + // The specified key-value mapping of document parameters to target resources. + TargetMaps []map[string][]*string `type:"list"` + + // The parameter name. + TargetParameterName *string `min:"1" type:"string"` + + // The specified targets. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s AutomationExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomationExecution) GoString() string { + return s.String() +} + +// SetAutomationExecutionId sets the AutomationExecutionId field's value. +func (s *AutomationExecution) SetAutomationExecutionId(v string) *AutomationExecution { + s.AutomationExecutionId = &v + return s +} + +// SetAutomationExecutionStatus sets the AutomationExecutionStatus field's value. +func (s *AutomationExecution) SetAutomationExecutionStatus(v string) *AutomationExecution { + s.AutomationExecutionStatus = &v + return s +} + +// SetCurrentAction sets the CurrentAction field's value. +func (s *AutomationExecution) SetCurrentAction(v string) *AutomationExecution { + s.CurrentAction = &v + return s +} + +// SetCurrentStepName sets the CurrentStepName field's value. +func (s *AutomationExecution) SetCurrentStepName(v string) *AutomationExecution { + s.CurrentStepName = &v + return s +} + +// SetDocumentName sets the DocumentName field's value. +func (s *AutomationExecution) SetDocumentName(v string) *AutomationExecution { + s.DocumentName = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *AutomationExecution) SetDocumentVersion(v string) *AutomationExecution { + s.DocumentVersion = &v + return s +} + +// SetExecutedBy sets the ExecutedBy field's value. +func (s *AutomationExecution) SetExecutedBy(v string) *AutomationExecution { + s.ExecutedBy = &v + return s +} + +// SetExecutionEndTime sets the ExecutionEndTime field's value. +func (s *AutomationExecution) SetExecutionEndTime(v time.Time) *AutomationExecution { + s.ExecutionEndTime = &v + return s +} + +// SetExecutionStartTime sets the ExecutionStartTime field's value. +func (s *AutomationExecution) SetExecutionStartTime(v time.Time) *AutomationExecution { + s.ExecutionStartTime = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *AutomationExecution) SetFailureMessage(v string) *AutomationExecution { + s.FailureMessage = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *AutomationExecution) SetMaxConcurrency(v string) *AutomationExecution { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *AutomationExecution) SetMaxErrors(v string) *AutomationExecution { + s.MaxErrors = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *AutomationExecution) SetMode(v string) *AutomationExecution { + s.Mode = &v + return s +} + +// SetOutputs sets the Outputs field's value. +func (s *AutomationExecution) SetOutputs(v map[string][]*string) *AutomationExecution { + s.Outputs = v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *AutomationExecution) SetParameters(v map[string][]*string) *AutomationExecution { + s.Parameters = v + return s +} + +// SetParentAutomationExecutionId sets the ParentAutomationExecutionId field's value. +func (s *AutomationExecution) SetParentAutomationExecutionId(v string) *AutomationExecution { + s.ParentAutomationExecutionId = &v + return s +} + +// SetProgressCounters sets the ProgressCounters field's value. +func (s *AutomationExecution) SetProgressCounters(v *ProgressCounters) *AutomationExecution { + s.ProgressCounters = v + return s +} + +// SetResolvedTargets sets the ResolvedTargets field's value. +func (s *AutomationExecution) SetResolvedTargets(v *ResolvedTargets) *AutomationExecution { + s.ResolvedTargets = v + return s +} + +// SetStepExecutions sets the StepExecutions field's value. +func (s *AutomationExecution) SetStepExecutions(v []*StepExecution) *AutomationExecution { + s.StepExecutions = v + return s +} + +// SetStepExecutionsTruncated sets the StepExecutionsTruncated field's value. +func (s *AutomationExecution) SetStepExecutionsTruncated(v bool) *AutomationExecution { + s.StepExecutionsTruncated = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *AutomationExecution) SetTarget(v string) *AutomationExecution { + s.Target = &v + return s +} + +// SetTargetLocations sets the TargetLocations field's value. +func (s *AutomationExecution) SetTargetLocations(v []*TargetLocation) *AutomationExecution { + s.TargetLocations = v + return s +} + +// SetTargetMaps sets the TargetMaps field's value. +func (s *AutomationExecution) SetTargetMaps(v []map[string][]*string) *AutomationExecution { + s.TargetMaps = v + return s +} + +// SetTargetParameterName sets the TargetParameterName field's value. +func (s *AutomationExecution) SetTargetParameterName(v string) *AutomationExecution { + s.TargetParameterName = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *AutomationExecution) SetTargets(v []*Target) *AutomationExecution { + s.Targets = v + return s +} + +// A filter used to match specific automation executions. This is used to limit +// the scope of Automation execution information returned. +type AutomationExecutionFilter struct { + _ struct{} `type:"structure"` + + // One or more keys to limit the results. Valid filter keys include the following: + // DocumentNamePrefix, ExecutionStatus, ExecutionId, ParentExecutionId, CurrentAction, + // StartTimeBefore, StartTimeAfter, TargetResourceGroup. + // + // Key is a required field + Key *string `type:"string" required:"true" enum:"AutomationExecutionFilterKey"` + + // The values used to limit the execution information associated with the filter's + // key. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AutomationExecutionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomationExecutionFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutomationExecutionFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutomationExecutionFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *AutomationExecutionFilter) SetKey(v string) *AutomationExecutionFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *AutomationExecutionFilter) SetValues(v []*string) *AutomationExecutionFilter { + s.Values = v + return s +} + +// The number of simultaneously running Automation executions exceeded the allowable +// limit. +type AutomationExecutionLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AutomationExecutionLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomationExecutionLimitExceededException) GoString() string { + return s.String() +} + +func newErrorAutomationExecutionLimitExceededException(v protocol.ResponseMetadata) error { + return &AutomationExecutionLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AutomationExecutionLimitExceededException) Code() string { + return "AutomationExecutionLimitExceededException" +} + +// Message returns the exception's message. +func (s *AutomationExecutionLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AutomationExecutionLimitExceededException) OrigErr() error { + return nil +} + +func (s *AutomationExecutionLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AutomationExecutionLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AutomationExecutionLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Details about a specific Automation execution. +type AutomationExecutionMetadata struct { + _ struct{} `type:"structure"` + + // The execution ID. + AutomationExecutionId *string `min:"36" type:"string"` + + // The status of the execution. + AutomationExecutionStatus *string `type:"string" enum:"AutomationExecutionStatus"` + + // Use this filter with DescribeAutomationExecutions. Specify either Local or + // CrossAccount. CrossAccount is an Automation that runs in multiple AWS Regions + // and accounts. For more information, see Running Automation workflows in multiple + // AWS Regions and accounts (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-automation-multiple-accounts-and-regions.html) + // in the AWS Systems Manager User Guide. + AutomationType *string `type:"string" enum:"AutomationType"` + + // The action of the step that is currently running. + CurrentAction *string `type:"string"` + + // The name of the step that is currently running. + CurrentStepName *string `type:"string"` + + // The name of the Automation document used during execution. + DocumentName *string `type:"string"` + + // The document version used during the execution. + DocumentVersion *string `type:"string"` + + // The IAM role ARN of the user who ran the Automation. + ExecutedBy *string `type:"string"` + + // The time the execution finished. This is not populated if the execution is + // still in progress. + ExecutionEndTime *time.Time `type:"timestamp"` + + // The time the execution started. + ExecutionStartTime *time.Time `type:"timestamp"` + + // The list of execution outputs as defined in the Automation document. + FailureMessage *string `type:"string"` + + // An S3 bucket where execution information is stored. + LogFile *string `type:"string"` + + // The MaxConcurrency value specified by the user when starting the Automation. + MaxConcurrency *string `min:"1" type:"string"` + + // The MaxErrors value specified by the user when starting the Automation. + MaxErrors *string `min:"1" type:"string"` + + // The Automation execution mode. + Mode *string `type:"string" enum:"ExecutionMode"` + + // The list of execution outputs as defined in the Automation document. + Outputs map[string][]*string `min:"1" type:"map"` + + // The ExecutionId of the parent Automation. + ParentAutomationExecutionId *string `min:"36" type:"string"` + + // A list of targets that resolved during the execution. + ResolvedTargets *ResolvedTargets `type:"structure"` + + // The list of execution outputs as defined in the Automation document. + Target *string `type:"string"` + + // The specified key-value mapping of document parameters to target resources. + TargetMaps []map[string][]*string `type:"list"` + + // The list of execution outputs as defined in the Automation document. + TargetParameterName *string `min:"1" type:"string"` + + // The targets defined by the user when starting the Automation. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s AutomationExecutionMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomationExecutionMetadata) GoString() string { + return s.String() +} + +// SetAutomationExecutionId sets the AutomationExecutionId field's value. +func (s *AutomationExecutionMetadata) SetAutomationExecutionId(v string) *AutomationExecutionMetadata { + s.AutomationExecutionId = &v + return s +} + +// SetAutomationExecutionStatus sets the AutomationExecutionStatus field's value. +func (s *AutomationExecutionMetadata) SetAutomationExecutionStatus(v string) *AutomationExecutionMetadata { + s.AutomationExecutionStatus = &v + return s +} + +// SetAutomationType sets the AutomationType field's value. +func (s *AutomationExecutionMetadata) SetAutomationType(v string) *AutomationExecutionMetadata { + s.AutomationType = &v + return s +} + +// SetCurrentAction sets the CurrentAction field's value. +func (s *AutomationExecutionMetadata) SetCurrentAction(v string) *AutomationExecutionMetadata { + s.CurrentAction = &v + return s +} + +// SetCurrentStepName sets the CurrentStepName field's value. +func (s *AutomationExecutionMetadata) SetCurrentStepName(v string) *AutomationExecutionMetadata { + s.CurrentStepName = &v + return s +} + +// SetDocumentName sets the DocumentName field's value. +func (s *AutomationExecutionMetadata) SetDocumentName(v string) *AutomationExecutionMetadata { + s.DocumentName = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *AutomationExecutionMetadata) SetDocumentVersion(v string) *AutomationExecutionMetadata { + s.DocumentVersion = &v + return s +} + +// SetExecutedBy sets the ExecutedBy field's value. +func (s *AutomationExecutionMetadata) SetExecutedBy(v string) *AutomationExecutionMetadata { + s.ExecutedBy = &v + return s +} + +// SetExecutionEndTime sets the ExecutionEndTime field's value. +func (s *AutomationExecutionMetadata) SetExecutionEndTime(v time.Time) *AutomationExecutionMetadata { + s.ExecutionEndTime = &v + return s +} + +// SetExecutionStartTime sets the ExecutionStartTime field's value. +func (s *AutomationExecutionMetadata) SetExecutionStartTime(v time.Time) *AutomationExecutionMetadata { + s.ExecutionStartTime = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *AutomationExecutionMetadata) SetFailureMessage(v string) *AutomationExecutionMetadata { + s.FailureMessage = &v + return s +} + +// SetLogFile sets the LogFile field's value. +func (s *AutomationExecutionMetadata) SetLogFile(v string) *AutomationExecutionMetadata { + s.LogFile = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *AutomationExecutionMetadata) SetMaxConcurrency(v string) *AutomationExecutionMetadata { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *AutomationExecutionMetadata) SetMaxErrors(v string) *AutomationExecutionMetadata { + s.MaxErrors = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *AutomationExecutionMetadata) SetMode(v string) *AutomationExecutionMetadata { + s.Mode = &v + return s +} + +// SetOutputs sets the Outputs field's value. +func (s *AutomationExecutionMetadata) SetOutputs(v map[string][]*string) *AutomationExecutionMetadata { + s.Outputs = v + return s +} + +// SetParentAutomationExecutionId sets the ParentAutomationExecutionId field's value. +func (s *AutomationExecutionMetadata) SetParentAutomationExecutionId(v string) *AutomationExecutionMetadata { + s.ParentAutomationExecutionId = &v + return s +} + +// SetResolvedTargets sets the ResolvedTargets field's value. +func (s *AutomationExecutionMetadata) SetResolvedTargets(v *ResolvedTargets) *AutomationExecutionMetadata { + s.ResolvedTargets = v + return s +} + +// SetTarget sets the Target field's value. +func (s *AutomationExecutionMetadata) SetTarget(v string) *AutomationExecutionMetadata { + s.Target = &v + return s +} + +// SetTargetMaps sets the TargetMaps field's value. +func (s *AutomationExecutionMetadata) SetTargetMaps(v []map[string][]*string) *AutomationExecutionMetadata { + s.TargetMaps = v + return s +} + +// SetTargetParameterName sets the TargetParameterName field's value. +func (s *AutomationExecutionMetadata) SetTargetParameterName(v string) *AutomationExecutionMetadata { + s.TargetParameterName = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *AutomationExecutionMetadata) SetTargets(v []*Target) *AutomationExecutionMetadata { + s.Targets = v + return s +} + +// There is no automation execution information for the requested automation +// execution ID. +type AutomationExecutionNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AutomationExecutionNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomationExecutionNotFoundException) GoString() string { + return s.String() +} + +func newErrorAutomationExecutionNotFoundException(v protocol.ResponseMetadata) error { + return &AutomationExecutionNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AutomationExecutionNotFoundException) Code() string { + return "AutomationExecutionNotFoundException" +} + +// Message returns the exception's message. +func (s *AutomationExecutionNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AutomationExecutionNotFoundException) OrigErr() error { + return nil +} + +func (s *AutomationExecutionNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AutomationExecutionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AutomationExecutionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified step name and execution ID don't exist. Verify the information +// and try again. +type AutomationStepNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AutomationStepNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomationStepNotFoundException) GoString() string { + return s.String() +} + +func newErrorAutomationStepNotFoundException(v protocol.ResponseMetadata) error { + return &AutomationStepNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AutomationStepNotFoundException) Code() string { + return "AutomationStepNotFoundException" +} + +// Message returns the exception's message. +func (s *AutomationStepNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AutomationStepNotFoundException) OrigErr() error { + return nil +} + +func (s *AutomationStepNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AutomationStepNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AutomationStepNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CancelCommandInput struct { + _ struct{} `type:"structure"` + + // The ID of the command you want to cancel. + // + // CommandId is a required field + CommandId *string `min:"36" type:"string" required:"true"` + + // (Optional) A list of instance IDs on which you want to cancel the command. + // If not provided, the command is canceled on every instance on which it was + // requested. + InstanceIds []*string `type:"list"` +} + +// String returns the string representation +func (s CancelCommandInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCommandInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelCommandInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelCommandInput"} + if s.CommandId == nil { + invalidParams.Add(request.NewErrParamRequired("CommandId")) + } + if s.CommandId != nil && len(*s.CommandId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("CommandId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommandId sets the CommandId field's value. +func (s *CancelCommandInput) SetCommandId(v string) *CancelCommandInput { + s.CommandId = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *CancelCommandInput) SetInstanceIds(v []*string) *CancelCommandInput { + s.InstanceIds = v + return s +} + +// Whether or not the command was successfully canceled. There is no guarantee +// that a request can be canceled. +type CancelCommandOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelCommandOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelCommandOutput) GoString() string { + return s.String() +} + +type CancelMaintenanceWindowExecutionInput struct { + _ struct{} `type:"structure"` + + // The ID of the maintenance window execution to stop. + // + // WindowExecutionId is a required field + WindowExecutionId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelMaintenanceWindowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelMaintenanceWindowExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelMaintenanceWindowExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelMaintenanceWindowExecutionInput"} + if s.WindowExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowExecutionId")) + } + if s.WindowExecutionId != nil && len(*s.WindowExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowExecutionId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *CancelMaintenanceWindowExecutionInput) SetWindowExecutionId(v string) *CancelMaintenanceWindowExecutionInput { + s.WindowExecutionId = &v + return s +} + +type CancelMaintenanceWindowExecutionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the maintenance window execution that has been stopped. + WindowExecutionId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s CancelMaintenanceWindowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelMaintenanceWindowExecutionOutput) GoString() string { + return s.String() +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *CancelMaintenanceWindowExecutionOutput) SetWindowExecutionId(v string) *CancelMaintenanceWindowExecutionOutput { + s.WindowExecutionId = &v + return s +} + +// Configuration options for sending command output to CloudWatch Logs. +type CloudWatchOutputConfig struct { + _ struct{} `type:"structure"` + + // The name of the CloudWatch log group where you want to send command output. + // If you don't specify a group name, Systems Manager automatically creates + // a log group for you. The log group uses the following naming format: aws/ssm/SystemsManagerDocumentName. + CloudWatchLogGroupName *string `min:"1" type:"string"` + + // Enables Systems Manager to send command output to CloudWatch Logs. + CloudWatchOutputEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s CloudWatchOutputConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchOutputConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudWatchOutputConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloudWatchOutputConfig"} + if s.CloudWatchLogGroupName != nil && len(*s.CloudWatchLogGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CloudWatchLogGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCloudWatchLogGroupName sets the CloudWatchLogGroupName field's value. +func (s *CloudWatchOutputConfig) SetCloudWatchLogGroupName(v string) *CloudWatchOutputConfig { + s.CloudWatchLogGroupName = &v + return s +} + +// SetCloudWatchOutputEnabled sets the CloudWatchOutputEnabled field's value. +func (s *CloudWatchOutputConfig) SetCloudWatchOutputEnabled(v bool) *CloudWatchOutputConfig { + s.CloudWatchOutputEnabled = &v + return s +} + +// Describes a command request. +type Command struct { + _ struct{} `type:"structure"` + + // CloudWatch Logs information where you want Systems Manager to send the command + // output. + CloudWatchOutputConfig *CloudWatchOutputConfig `type:"structure"` + + // A unique identifier for this command. + CommandId *string `min:"36" type:"string"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // The number of targets for which the command invocation reached a terminal + // state. Terminal states include the following: Success, Failed, Execution + // Timed Out, Delivery Timed Out, Canceled, Terminated, or Undeliverable. + CompletedCount *int64 `type:"integer"` + + // The number of targets for which the status is Delivery Timed Out. + DeliveryTimedOutCount *int64 `type:"integer"` + + // The name of the document requested for execution. + DocumentName *string `type:"string"` + + // The SSM document version. + DocumentVersion *string `type:"string"` + + // The number of targets for which the status is Failed or Execution Timed Out. + ErrorCount *int64 `type:"integer"` + + // If this time is reached and the command has not already started running, + // it will not run. Calculated based on the ExpiresAfter user input provided + // as part of the SendCommand API. + ExpiresAfter *time.Time `type:"timestamp"` + + // The instance IDs against which this command was requested. + InstanceIds []*string `type:"list"` + + // The maximum number of instances that are allowed to run the command at the + // same time. You can specify a number of instances, such as 10, or a percentage + // of instances, such as 10%. The default value is 50. For more information + // about how to use MaxConcurrency, see Running commands using Systems Manager + // Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html) + // in the AWS Systems Manager User Guide. + MaxConcurrency *string `min:"1" type:"string"` + + // The maximum number of errors allowed before the system stops sending the + // command to additional targets. You can specify a number of errors, such as + // 10, or a percentage or errors, such as 10%. The default value is 0. For more + // information about how to use MaxErrors, see Running commands using Systems + // Manager Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html) + // in the AWS Systems Manager User Guide. + MaxErrors *string `min:"1" type:"string"` + + // Configurations for sending notifications about command status changes. + NotificationConfig *NotificationConfig `type:"structure"` + + // The S3 bucket where the responses to the command executions should be stored. + // This was requested when issuing the command. + OutputS3BucketName *string `min:"3" type:"string"` + + // The S3 directory path inside the bucket where the responses to the command + // executions should be stored. This was requested when issuing the command. + OutputS3KeyPrefix *string `type:"string"` + + // (Deprecated) You can no longer specify this parameter. The system ignores + // it. Instead, Systems Manager automatically determines the Region of the S3 + // bucket. + OutputS3Region *string `min:"3" type:"string"` + + // The parameter values to be inserted in the document when running the command. + Parameters map[string][]*string `type:"map"` + + // The date and time the command was requested. + RequestedDateTime *time.Time `type:"timestamp"` + + // The IAM service role that Run Command uses to act on your behalf when sending + // notifications about command status changes. + ServiceRole *string `type:"string"` + + // The status of the command. + Status *string `type:"string" enum:"CommandStatus"` + + // A detailed status of the command execution. StatusDetails includes more information + // than Status because it includes states resulting from error and concurrency + // control parameters. StatusDetails can show different results than Status. + // For more information about these statuses, see Understanding command statuses + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) + // in the AWS Systems Manager User Guide. StatusDetails can be one of the following + // values: + // + // * Pending: The command has not been sent to any instances. + // + // * In Progress: The command has been sent to at least one instance but + // has not reached a final state on all instances. + // + // * Success: The command successfully ran on all invocations. This is a + // terminal state. + // + // * Delivery Timed Out: The value of MaxErrors or more command invocations + // shows a status of Delivery Timed Out. This is a terminal state. + // + // * Execution Timed Out: The value of MaxErrors or more command invocations + // shows a status of Execution Timed Out. This is a terminal state. + // + // * Failed: The value of MaxErrors or more command invocations shows a status + // of Failed. This is a terminal state. + // + // * Incomplete: The command was attempted on all instances and one or more + // invocations does not have a value of Success but not enough invocations + // failed for the status to be Failed. This is a terminal state. + // + // * Canceled: The command was terminated before it was completed. This is + // a terminal state. + // + // * Rate Exceeded: The number of instances targeted by the command exceeded + // the account limit for pending invocations. The system has canceled the + // command before running it on any instance. This is a terminal state. + StatusDetails *string `type:"string"` + + // The number of targets for the command. + TargetCount *int64 `type:"integer"` + + // An array of search criteria that targets instances using a Key,Value combination + // that you specify. Targets is required if you don't provide one or more instance + // IDs in the call. + Targets []*Target `type:"list"` + + // The TimeoutSeconds value specified for a command. + TimeoutSeconds *int64 `min:"30" type:"integer"` +} + +// String returns the string representation +func (s Command) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Command) GoString() string { + return s.String() +} + +// SetCloudWatchOutputConfig sets the CloudWatchOutputConfig field's value. +func (s *Command) SetCloudWatchOutputConfig(v *CloudWatchOutputConfig) *Command { + s.CloudWatchOutputConfig = v + return s +} + +// SetCommandId sets the CommandId field's value. +func (s *Command) SetCommandId(v string) *Command { + s.CommandId = &v + return s +} + +// SetComment sets the Comment field's value. +func (s *Command) SetComment(v string) *Command { + s.Comment = &v + return s +} + +// SetCompletedCount sets the CompletedCount field's value. +func (s *Command) SetCompletedCount(v int64) *Command { + s.CompletedCount = &v + return s +} + +// SetDeliveryTimedOutCount sets the DeliveryTimedOutCount field's value. +func (s *Command) SetDeliveryTimedOutCount(v int64) *Command { + s.DeliveryTimedOutCount = &v + return s +} + +// SetDocumentName sets the DocumentName field's value. +func (s *Command) SetDocumentName(v string) *Command { + s.DocumentName = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *Command) SetDocumentVersion(v string) *Command { + s.DocumentVersion = &v + return s +} + +// SetErrorCount sets the ErrorCount field's value. +func (s *Command) SetErrorCount(v int64) *Command { + s.ErrorCount = &v + return s +} + +// SetExpiresAfter sets the ExpiresAfter field's value. +func (s *Command) SetExpiresAfter(v time.Time) *Command { + s.ExpiresAfter = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *Command) SetInstanceIds(v []*string) *Command { + s.InstanceIds = v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *Command) SetMaxConcurrency(v string) *Command { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *Command) SetMaxErrors(v string) *Command { + s.MaxErrors = &v + return s +} + +// SetNotificationConfig sets the NotificationConfig field's value. +func (s *Command) SetNotificationConfig(v *NotificationConfig) *Command { + s.NotificationConfig = v + return s +} + +// SetOutputS3BucketName sets the OutputS3BucketName field's value. +func (s *Command) SetOutputS3BucketName(v string) *Command { + s.OutputS3BucketName = &v + return s +} + +// SetOutputS3KeyPrefix sets the OutputS3KeyPrefix field's value. +func (s *Command) SetOutputS3KeyPrefix(v string) *Command { + s.OutputS3KeyPrefix = &v + return s +} + +// SetOutputS3Region sets the OutputS3Region field's value. +func (s *Command) SetOutputS3Region(v string) *Command { + s.OutputS3Region = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *Command) SetParameters(v map[string][]*string) *Command { + s.Parameters = v + return s +} + +// SetRequestedDateTime sets the RequestedDateTime field's value. +func (s *Command) SetRequestedDateTime(v time.Time) *Command { + s.RequestedDateTime = &v + return s +} + +// SetServiceRole sets the ServiceRole field's value. +func (s *Command) SetServiceRole(v string) *Command { + s.ServiceRole = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Command) SetStatus(v string) *Command { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *Command) SetStatusDetails(v string) *Command { + s.StatusDetails = &v + return s +} + +// SetTargetCount sets the TargetCount field's value. +func (s *Command) SetTargetCount(v int64) *Command { + s.TargetCount = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *Command) SetTargets(v []*Target) *Command { + s.Targets = v + return s +} + +// SetTimeoutSeconds sets the TimeoutSeconds field's value. +func (s *Command) SetTimeoutSeconds(v int64) *Command { + s.TimeoutSeconds = &v + return s +} + +// Describes a command filter. +// +// An instance ID can't be specified when a command status is Pending because +// the command hasn't run on the instance yet. +type CommandFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `locationName:"key" type:"string" required:"true" enum:"CommandFilterKey"` + + // The filter value. Valid values for each filter key are as follows: + // + // * InvokedAfter: Specify a timestamp to limit your results. For example, + // specify 2018-07-07T00:00:00Z to see a list of command executions occurring + // July 7, 2018, and later. + // + // * InvokedBefore: Specify a timestamp to limit your results. For example, + // specify 2018-07-07T00:00:00Z to see a list of command executions from + // before July 7, 2018. + // + // * Status: Specify a valid command status to see a list of all command + // executions with that status. Status values you can specify include: Pending + // InProgress Success Cancelled Failed TimedOut Cancelling + // + // * DocumentName: Specify name of the SSM document for which you want to + // see command execution results. For example, specify AWS-RunPatchBaseline + // to see command executions that used this SSM document to perform security + // patching operations on instances. + // + // * ExecutionStage: Specify one of the following values: Executing: Returns + // a list of command executions that are currently still running. Complete: + // Returns a list of command executions that have already completed. + // + // Value is a required field + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CommandFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CommandFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CommandFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *CommandFilter) SetKey(v string) *CommandFilter { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *CommandFilter) SetValue(v string) *CommandFilter { + s.Value = &v + return s +} + +// An invocation is copy of a command sent to a specific instance. A command +// can apply to one or more instances. A command invocation applies to one instance. +// For example, if a user runs SendCommand against three instances, then a command +// invocation is created for each requested instance ID. A command invocation +// returns status and detail information about a command you ran. +type CommandInvocation struct { + _ struct{} `type:"structure"` + + // CloudWatch Logs information where you want Systems Manager to send the command + // output. + CloudWatchOutputConfig *CloudWatchOutputConfig `type:"structure"` + + // The command against which this invocation was requested. + CommandId *string `min:"36" type:"string"` + + CommandPlugins []*CommandPlugin `type:"list"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // The document name that was requested for execution. + DocumentName *string `type:"string"` + + // The SSM document version. + DocumentVersion *string `type:"string"` + + // The instance ID in which this invocation was requested. + InstanceId *string `type:"string"` + + // The name of the invocation target. For EC2 instances this is the value for + // the aws:Name tag. For on-premises instances, this is the name of the instance. + InstanceName *string `type:"string"` + + // Configurations for sending notifications about command status changes on + // a per instance basis. + NotificationConfig *NotificationConfig `type:"structure"` + + // The time and date the request was sent to this instance. + RequestedDateTime *time.Time `type:"timestamp"` + + // The IAM service role that Run Command uses to act on your behalf when sending + // notifications about command status changes on a per instance basis. + ServiceRole *string `type:"string"` + + // The URL to the plugin's StdErr file in Amazon S3, if the S3 bucket was defined + // for the parent command. For an invocation, StandardErrorUrl is populated + // if there is just one plugin defined for the command, and the S3 bucket was + // defined for the command. + StandardErrorUrl *string `type:"string"` + + // The URL to the plugin's StdOut file in Amazon S3, if the S3 bucket was defined + // for the parent command. For an invocation, StandardOutputUrl is populated + // if there is just one plugin defined for the command, and the S3 bucket was + // defined for the command. + StandardOutputUrl *string `type:"string"` + + // Whether or not the invocation succeeded, failed, or is pending. + Status *string `type:"string" enum:"CommandInvocationStatus"` + + // A detailed status of the command execution for each invocation (each instance + // targeted by the command). StatusDetails includes more information than Status + // because it includes states resulting from error and concurrency control parameters. + // StatusDetails can show different results than Status. For more information + // about these statuses, see Understanding command statuses (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) + // in the AWS Systems Manager User Guide. StatusDetails can be one of the following + // values: + // + // * Pending: The command has not been sent to the instance. + // + // * In Progress: The command has been sent to the instance but has not reached + // a terminal state. + // + // * Success: The execution of the command or plugin was successfully completed. + // This is a terminal state. + // + // * Delivery Timed Out: The command was not delivered to the instance before + // the delivery timeout expired. Delivery timeouts do not count against the + // parent command's MaxErrors limit, but they do contribute to whether the + // parent command status is Success or Incomplete. This is a terminal state. + // + // * Execution Timed Out: Command execution started on the instance, but + // the execution was not complete before the execution timeout expired. Execution + // timeouts count against the MaxErrors limit of the parent command. This + // is a terminal state. + // + // * Failed: The command was not successful on the instance. For a plugin, + // this indicates that the result code was not zero. For a command invocation, + // this indicates that the result code for one or more plugins was not zero. + // Invocation failures count against the MaxErrors limit of the parent command. + // This is a terminal state. + // + // * Canceled: The command was terminated before it was completed. This is + // a terminal state. + // + // * Undeliverable: The command can't be delivered to the instance. The instance + // might not exist or might not be responding. Undeliverable invocations + // don't count against the parent command's MaxErrors limit and don't contribute + // to whether the parent command status is Success or Incomplete. This is + // a terminal state. + // + // * Terminated: The parent command exceeded its MaxErrors limit and subsequent + // command invocations were canceled by the system. This is a terminal state. + StatusDetails *string `type:"string"` + + // Gets the trace output sent by the agent. + TraceOutput *string `type:"string"` +} + +// String returns the string representation +func (s CommandInvocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandInvocation) GoString() string { + return s.String() +} + +// SetCloudWatchOutputConfig sets the CloudWatchOutputConfig field's value. +func (s *CommandInvocation) SetCloudWatchOutputConfig(v *CloudWatchOutputConfig) *CommandInvocation { + s.CloudWatchOutputConfig = v + return s +} + +// SetCommandId sets the CommandId field's value. +func (s *CommandInvocation) SetCommandId(v string) *CommandInvocation { + s.CommandId = &v + return s +} + +// SetCommandPlugins sets the CommandPlugins field's value. +func (s *CommandInvocation) SetCommandPlugins(v []*CommandPlugin) *CommandInvocation { + s.CommandPlugins = v + return s +} + +// SetComment sets the Comment field's value. +func (s *CommandInvocation) SetComment(v string) *CommandInvocation { + s.Comment = &v + return s +} + +// SetDocumentName sets the DocumentName field's value. +func (s *CommandInvocation) SetDocumentName(v string) *CommandInvocation { + s.DocumentName = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *CommandInvocation) SetDocumentVersion(v string) *CommandInvocation { + s.DocumentVersion = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *CommandInvocation) SetInstanceId(v string) *CommandInvocation { + s.InstanceId = &v + return s +} + +// SetInstanceName sets the InstanceName field's value. +func (s *CommandInvocation) SetInstanceName(v string) *CommandInvocation { + s.InstanceName = &v + return s +} + +// SetNotificationConfig sets the NotificationConfig field's value. +func (s *CommandInvocation) SetNotificationConfig(v *NotificationConfig) *CommandInvocation { + s.NotificationConfig = v + return s +} + +// SetRequestedDateTime sets the RequestedDateTime field's value. +func (s *CommandInvocation) SetRequestedDateTime(v time.Time) *CommandInvocation { + s.RequestedDateTime = &v + return s +} + +// SetServiceRole sets the ServiceRole field's value. +func (s *CommandInvocation) SetServiceRole(v string) *CommandInvocation { + s.ServiceRole = &v + return s +} + +// SetStandardErrorUrl sets the StandardErrorUrl field's value. +func (s *CommandInvocation) SetStandardErrorUrl(v string) *CommandInvocation { + s.StandardErrorUrl = &v + return s +} + +// SetStandardOutputUrl sets the StandardOutputUrl field's value. +func (s *CommandInvocation) SetStandardOutputUrl(v string) *CommandInvocation { + s.StandardOutputUrl = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CommandInvocation) SetStatus(v string) *CommandInvocation { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *CommandInvocation) SetStatusDetails(v string) *CommandInvocation { + s.StatusDetails = &v + return s +} + +// SetTraceOutput sets the TraceOutput field's value. +func (s *CommandInvocation) SetTraceOutput(v string) *CommandInvocation { + s.TraceOutput = &v + return s +} + +// Describes plugin details. +type CommandPlugin struct { + _ struct{} `type:"structure"` + + // The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, + // aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, + // aws:runShellScript, or aws:updateSSMAgent. + Name *string `min:"4" type:"string"` + + // Output of the plugin execution. + Output *string `type:"string"` + + // The S3 bucket where the responses to the command executions should be stored. + // This was requested when issuing the command. For example, in the following + // response: + // + // doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript + // + // doc-example-bucket is the name of the S3 bucket; + // + // ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix; + // + // i-02573cafcfEXAMPLE is the instance ID; + // + // awsrunShellScript is the name of the plugin. + OutputS3BucketName *string `min:"3" type:"string"` + + // The S3 directory path inside the bucket where the responses to the command + // executions should be stored. This was requested when issuing the command. + // For example, in the following response: + // + // doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript + // + // doc-example-bucket is the name of the S3 bucket; + // + // ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix; + // + // i-02573cafcfEXAMPLE is the instance ID; + // + // awsrunShellScript is the name of the plugin. + OutputS3KeyPrefix *string `type:"string"` + + // (Deprecated) You can no longer specify this parameter. The system ignores + // it. Instead, Systems Manager automatically determines the S3 bucket region. + OutputS3Region *string `min:"3" type:"string"` + + // A numeric response code generated after running the plugin. + ResponseCode *int64 `type:"integer"` + + // The time the plugin stopped running. Could stop prematurely if, for example, + // a cancel command was sent. + ResponseFinishDateTime *time.Time `type:"timestamp"` + + // The time the plugin started running. + ResponseStartDateTime *time.Time `type:"timestamp"` + + // The URL for the complete text written by the plugin to stderr. If execution + // is not yet complete, then this string is empty. + StandardErrorUrl *string `type:"string"` + + // The URL for the complete text written by the plugin to stdout in Amazon S3. + // If the S3 bucket for the command was not specified, then this string is empty. + StandardOutputUrl *string `type:"string"` + + // The status of this plugin. You can run a document with multiple plugins. + Status *string `type:"string" enum:"CommandPluginStatus"` + + // A detailed status of the plugin execution. StatusDetails includes more information + // than Status because it includes states resulting from error and concurrency + // control parameters. StatusDetails can show different results than Status. + // For more information about these statuses, see Understanding command statuses + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) + // in the AWS Systems Manager User Guide. StatusDetails can be one of the following + // values: + // + // * Pending: The command has not been sent to the instance. + // + // * In Progress: The command has been sent to the instance but has not reached + // a terminal state. + // + // * Success: The execution of the command or plugin was successfully completed. + // This is a terminal state. + // + // * Delivery Timed Out: The command was not delivered to the instance before + // the delivery timeout expired. Delivery timeouts do not count against the + // parent command's MaxErrors limit, but they do contribute to whether the + // parent command status is Success or Incomplete. This is a terminal state. + // + // * Execution Timed Out: Command execution started on the instance, but + // the execution was not complete before the execution timeout expired. Execution + // timeouts count against the MaxErrors limit of the parent command. This + // is a terminal state. + // + // * Failed: The command was not successful on the instance. For a plugin, + // this indicates that the result code was not zero. For a command invocation, + // this indicates that the result code for one or more plugins was not zero. + // Invocation failures count against the MaxErrors limit of the parent command. + // This is a terminal state. + // + // * Canceled: The command was terminated before it was completed. This is + // a terminal state. + // + // * Undeliverable: The command can't be delivered to the instance. The instance + // might not exist, or it might not be responding. Undeliverable invocations + // don't count against the parent command's MaxErrors limit, and they don't + // contribute to whether the parent command status is Success or Incomplete. + // This is a terminal state. + // + // * Terminated: The parent command exceeded its MaxErrors limit and subsequent + // command invocations were canceled by the system. This is a terminal state. + StatusDetails *string `type:"string"` +} + +// String returns the string representation +func (s CommandPlugin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommandPlugin) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *CommandPlugin) SetName(v string) *CommandPlugin { + s.Name = &v + return s +} + +// SetOutput sets the Output field's value. +func (s *CommandPlugin) SetOutput(v string) *CommandPlugin { + s.Output = &v + return s +} + +// SetOutputS3BucketName sets the OutputS3BucketName field's value. +func (s *CommandPlugin) SetOutputS3BucketName(v string) *CommandPlugin { + s.OutputS3BucketName = &v + return s +} + +// SetOutputS3KeyPrefix sets the OutputS3KeyPrefix field's value. +func (s *CommandPlugin) SetOutputS3KeyPrefix(v string) *CommandPlugin { + s.OutputS3KeyPrefix = &v + return s +} + +// SetOutputS3Region sets the OutputS3Region field's value. +func (s *CommandPlugin) SetOutputS3Region(v string) *CommandPlugin { + s.OutputS3Region = &v + return s +} + +// SetResponseCode sets the ResponseCode field's value. +func (s *CommandPlugin) SetResponseCode(v int64) *CommandPlugin { + s.ResponseCode = &v + return s +} + +// SetResponseFinishDateTime sets the ResponseFinishDateTime field's value. +func (s *CommandPlugin) SetResponseFinishDateTime(v time.Time) *CommandPlugin { + s.ResponseFinishDateTime = &v + return s +} + +// SetResponseStartDateTime sets the ResponseStartDateTime field's value. +func (s *CommandPlugin) SetResponseStartDateTime(v time.Time) *CommandPlugin { + s.ResponseStartDateTime = &v + return s +} + +// SetStandardErrorUrl sets the StandardErrorUrl field's value. +func (s *CommandPlugin) SetStandardErrorUrl(v string) *CommandPlugin { + s.StandardErrorUrl = &v + return s +} + +// SetStandardOutputUrl sets the StandardOutputUrl field's value. +func (s *CommandPlugin) SetStandardOutputUrl(v string) *CommandPlugin { + s.StandardOutputUrl = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CommandPlugin) SetStatus(v string) *CommandPlugin { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *CommandPlugin) SetStatusDetails(v string) *CommandPlugin { + s.StatusDetails = &v + return s +} + +// A summary of the call execution that includes an execution ID, the type of +// execution (for example, Command), and the date/time of the execution using +// a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'. +type ComplianceExecutionSummary struct { + _ struct{} `type:"structure"` + + // An ID created by the system when PutComplianceItems was called. For example, + // CommandID is a valid execution ID. You can use this ID in subsequent calls. + ExecutionId *string `type:"string"` + + // The time the execution ran as a datetime object that is saved in the following + // format: yyyy-MM-dd'T'HH:mm:ss'Z'. + // + // ExecutionTime is a required field + ExecutionTime *time.Time `type:"timestamp" required:"true"` + + // The type of execution. For example, Command is a valid execution type. + ExecutionType *string `type:"string"` +} + +// String returns the string representation +func (s ComplianceExecutionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceExecutionSummary) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ComplianceExecutionSummary) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ComplianceExecutionSummary"} + if s.ExecutionTime == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExecutionId sets the ExecutionId field's value. +func (s *ComplianceExecutionSummary) SetExecutionId(v string) *ComplianceExecutionSummary { + s.ExecutionId = &v + return s +} + +// SetExecutionTime sets the ExecutionTime field's value. +func (s *ComplianceExecutionSummary) SetExecutionTime(v time.Time) *ComplianceExecutionSummary { + s.ExecutionTime = &v + return s +} + +// SetExecutionType sets the ExecutionType field's value. +func (s *ComplianceExecutionSummary) SetExecutionType(v string) *ComplianceExecutionSummary { + s.ExecutionType = &v + return s +} + +// Information about the compliance as defined by the resource type. For example, +// for a patch resource type, Items includes information about the PatchSeverity, +// Classification, and so on. +type ComplianceItem struct { + _ struct{} `type:"structure"` + + // The compliance type. For example, Association (for a State Manager association), + // Patch, or Custom:string are all valid compliance types. + ComplianceType *string `min:"1" type:"string"` + + // A "Key": "Value" tag combination for the compliance item. + Details map[string]*string `type:"map"` + + // A summary for the compliance item. The summary includes an execution ID, + // the execution type (for example, command), and the execution time. + ExecutionSummary *ComplianceExecutionSummary `type:"structure"` + + // An ID for the compliance item. For example, if the compliance item is a Windows + // patch, the ID could be the number of the KB article; for example: KB4010320. + Id *string `type:"string"` + + // An ID for the resource. For a managed instance, this is the instance ID. + ResourceId *string `min:"1" type:"string"` + + // The type of resource. ManagedInstance is currently the only supported resource + // type. + ResourceType *string `min:"1" type:"string"` + + // The severity of the compliance status. Severity can be one of the following: + // Critical, High, Medium, Low, Informational, Unspecified. + Severity *string `type:"string" enum:"ComplianceSeverity"` + + // The status of the compliance item. An item is either COMPLIANT, NON_COMPLIANT, + // or an empty string (for Windows patches that aren't applicable). + Status *string `type:"string" enum:"ComplianceStatus"` + + // A title for the compliance item. For example, if the compliance item is a + // Windows patch, the title could be the title of the KB article for the patch; + // for example: Security Update for Active Directory Federation Services. + Title *string `type:"string"` +} + +// String returns the string representation +func (s ComplianceItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceItem) GoString() string { + return s.String() +} + +// SetComplianceType sets the ComplianceType field's value. +func (s *ComplianceItem) SetComplianceType(v string) *ComplianceItem { + s.ComplianceType = &v + return s +} + +// SetDetails sets the Details field's value. +func (s *ComplianceItem) SetDetails(v map[string]*string) *ComplianceItem { + s.Details = v + return s +} + +// SetExecutionSummary sets the ExecutionSummary field's value. +func (s *ComplianceItem) SetExecutionSummary(v *ComplianceExecutionSummary) *ComplianceItem { + s.ExecutionSummary = v + return s +} + +// SetId sets the Id field's value. +func (s *ComplianceItem) SetId(v string) *ComplianceItem { + s.Id = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *ComplianceItem) SetResourceId(v string) *ComplianceItem { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ComplianceItem) SetResourceType(v string) *ComplianceItem { + s.ResourceType = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *ComplianceItem) SetSeverity(v string) *ComplianceItem { + s.Severity = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ComplianceItem) SetStatus(v string) *ComplianceItem { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *ComplianceItem) SetTitle(v string) *ComplianceItem { + s.Title = &v + return s +} + +// Information about a compliance item. +type ComplianceItemEntry struct { + _ struct{} `type:"structure"` + + // A "Key": "Value" tag combination for the compliance item. + Details map[string]*string `type:"map"` + + // The compliance item ID. For example, if the compliance item is a Windows + // patch, the ID could be the number of the KB article. + Id *string `type:"string"` + + // The severity of the compliance status. Severity can be one of the following: + // Critical, High, Medium, Low, Informational, Unspecified. + // + // Severity is a required field + Severity *string `type:"string" required:"true" enum:"ComplianceSeverity"` + + // The status of the compliance item. An item is either COMPLIANT or NON_COMPLIANT. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ComplianceStatus"` + + // The title of the compliance item. For example, if the compliance item is + // a Windows patch, the title could be the title of the KB article for the patch; + // for example: Security Update for Active Directory Federation Services. + Title *string `type:"string"` +} + +// String returns the string representation +func (s ComplianceItemEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceItemEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ComplianceItemEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ComplianceItemEntry"} + if s.Severity == nil { + invalidParams.Add(request.NewErrParamRequired("Severity")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetails sets the Details field's value. +func (s *ComplianceItemEntry) SetDetails(v map[string]*string) *ComplianceItemEntry { + s.Details = v + return s +} + +// SetId sets the Id field's value. +func (s *ComplianceItemEntry) SetId(v string) *ComplianceItemEntry { + s.Id = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *ComplianceItemEntry) SetSeverity(v string) *ComplianceItemEntry { + s.Severity = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ComplianceItemEntry) SetStatus(v string) *ComplianceItemEntry { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *ComplianceItemEntry) SetTitle(v string) *ComplianceItemEntry { + s.Title = &v + return s +} + +// One or more filters. Use a filter to return a more specific list of results. +type ComplianceStringFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Key *string `min:"1" type:"string"` + + // The type of comparison that should be performed for the value: Equal, NotEqual, + // BeginWith, LessThan, or GreaterThan. + Type *string `type:"string" enum:"ComplianceQueryOperatorType"` + + // The value for which to search. + Values []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s ComplianceStringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceStringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ComplianceStringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ComplianceStringFilter"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ComplianceStringFilter) SetKey(v string) *ComplianceStringFilter { + s.Key = &v + return s +} + +// SetType sets the Type field's value. +func (s *ComplianceStringFilter) SetType(v string) *ComplianceStringFilter { + s.Type = &v + return s +} + +// SetValues sets the Values field's value. +func (s *ComplianceStringFilter) SetValues(v []*string) *ComplianceStringFilter { + s.Values = v + return s +} + +// A summary of compliance information by compliance type. +type ComplianceSummaryItem struct { + _ struct{} `type:"structure"` + + // The type of compliance item. For example, the compliance type can be Association, + // Patch, or Custom:string. + ComplianceType *string `min:"1" type:"string"` + + // A list of COMPLIANT items for the specified compliance type. + CompliantSummary *CompliantSummary `type:"structure"` + + // A list of NON_COMPLIANT items for the specified compliance type. + NonCompliantSummary *NonCompliantSummary `type:"structure"` +} + +// String returns the string representation +func (s ComplianceSummaryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceSummaryItem) GoString() string { + return s.String() +} + +// SetComplianceType sets the ComplianceType field's value. +func (s *ComplianceSummaryItem) SetComplianceType(v string) *ComplianceSummaryItem { + s.ComplianceType = &v + return s +} + +// SetCompliantSummary sets the CompliantSummary field's value. +func (s *ComplianceSummaryItem) SetCompliantSummary(v *CompliantSummary) *ComplianceSummaryItem { + s.CompliantSummary = v + return s +} + +// SetNonCompliantSummary sets the NonCompliantSummary field's value. +func (s *ComplianceSummaryItem) SetNonCompliantSummary(v *NonCompliantSummary) *ComplianceSummaryItem { + s.NonCompliantSummary = v + return s +} + +// You specified too many custom compliance types. You can specify a maximum +// of 10 different types. +type ComplianceTypeCountLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ComplianceTypeCountLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComplianceTypeCountLimitExceededException) GoString() string { + return s.String() +} + +func newErrorComplianceTypeCountLimitExceededException(v protocol.ResponseMetadata) error { + return &ComplianceTypeCountLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ComplianceTypeCountLimitExceededException) Code() string { + return "ComplianceTypeCountLimitExceededException" +} + +// Message returns the exception's message. +func (s *ComplianceTypeCountLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ComplianceTypeCountLimitExceededException) OrigErr() error { + return nil +} + +func (s *ComplianceTypeCountLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ComplianceTypeCountLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ComplianceTypeCountLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A summary of resources that are compliant. The summary is organized according +// to the resource count for each compliance type. +type CompliantSummary struct { + _ struct{} `type:"structure"` + + // The total number of resources that are compliant. + CompliantCount *int64 `type:"integer"` + + // A summary of the compliance severity by compliance type. + SeveritySummary *SeveritySummary `type:"structure"` +} + +// String returns the string representation +func (s CompliantSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompliantSummary) GoString() string { + return s.String() +} + +// SetCompliantCount sets the CompliantCount field's value. +func (s *CompliantSummary) SetCompliantCount(v int64) *CompliantSummary { + s.CompliantCount = &v + return s +} + +// SetSeveritySummary sets the SeveritySummary field's value. +func (s *CompliantSummary) SetSeveritySummary(v *SeveritySummary) *CompliantSummary { + s.SeveritySummary = v + return s +} + +type CreateActivationInput struct { + _ struct{} `type:"structure"` + + // The name of the registered, managed instance as it will appear in the Systems + // Manager console or when you use the AWS command line tools to list Systems + // Manager resources. + // + // Do not enter personally identifiable information in this field. + DefaultInstanceName *string `type:"string"` + + // A user-defined description of the resource that you want to register with + // Systems Manager. + // + // Do not enter personally identifiable information in this field. + Description *string `type:"string"` + + // The date by which this activation request should expire. The default value + // is 24 hours. + ExpirationDate *time.Time `type:"timestamp"` + + // The Amazon Identity and Access Management (IAM) role that you want to assign + // to the managed instance. This IAM role must provide AssumeRole permissions + // for the Systems Manager service principal ssm.amazonaws.com. For more information, + // see Create an IAM service role for a hybrid environment (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-service-role.html) + // in the AWS Systems Manager User Guide. + // + // IamRole is a required field + IamRole *string `type:"string" required:"true"` + + // Specify the maximum number of managed instances you want to register. The + // default value is 1 instance. + RegistrationLimit *int64 `min:"1" type:"integer"` + + // Optional metadata that you assign to a resource. Tags enable you to categorize + // a resource in different ways, such as by purpose, owner, or environment. + // For example, you might want to tag an activation to identify which servers + // or virtual machines (VMs) in your on-premises environment you intend to activate. + // In this case, you could specify the following key name/value pairs: + // + // * Key=OS,Value=Windows + // + // * Key=Environment,Value=Production + // + // When you install SSM Agent on your on-premises servers and VMs, you specify + // an activation ID and code. When you specify the activation ID and code, tags + // assigned to the activation are automatically applied to the on-premises servers + // or VMs. + // + // You can't add tags to or delete tags from an existing activation. You can + // tag your on-premises servers and VMs after they connect to Systems Manager + // for the first time and are assigned a managed instance ID. This means they + // are listed in the AWS Systems Manager console with an ID that is prefixed + // with "mi-". For information about how to add tags to your managed instances, + // see AddTagsToResource. For information about how to remove tags from your + // managed instances, see RemoveTagsFromResource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateActivationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateActivationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateActivationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateActivationInput"} + if s.IamRole == nil { + invalidParams.Add(request.NewErrParamRequired("IamRole")) + } + if s.RegistrationLimit != nil && *s.RegistrationLimit < 1 { + invalidParams.Add(request.NewErrParamMinValue("RegistrationLimit", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultInstanceName sets the DefaultInstanceName field's value. +func (s *CreateActivationInput) SetDefaultInstanceName(v string) *CreateActivationInput { + s.DefaultInstanceName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateActivationInput) SetDescription(v string) *CreateActivationInput { + s.Description = &v + return s +} + +// SetExpirationDate sets the ExpirationDate field's value. +func (s *CreateActivationInput) SetExpirationDate(v time.Time) *CreateActivationInput { + s.ExpirationDate = &v + return s +} + +// SetIamRole sets the IamRole field's value. +func (s *CreateActivationInput) SetIamRole(v string) *CreateActivationInput { + s.IamRole = &v + return s +} + +// SetRegistrationLimit sets the RegistrationLimit field's value. +func (s *CreateActivationInput) SetRegistrationLimit(v int64) *CreateActivationInput { + s.RegistrationLimit = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateActivationInput) SetTags(v []*Tag) *CreateActivationInput { + s.Tags = v + return s +} + +type CreateActivationOutput struct { + _ struct{} `type:"structure"` + + // The code the system generates when it processes the activation. The activation + // code functions like a password to validate the activation ID. + ActivationCode *string `min:"20" type:"string"` + + // The ID number generated by the system when it processed the activation. The + // activation ID functions like a user name. + ActivationId *string `type:"string"` +} + +// String returns the string representation +func (s CreateActivationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateActivationOutput) GoString() string { + return s.String() +} + +// SetActivationCode sets the ActivationCode field's value. +func (s *CreateActivationOutput) SetActivationCode(v string) *CreateActivationOutput { + s.ActivationCode = &v + return s +} + +// SetActivationId sets the ActivationId field's value. +func (s *CreateActivationOutput) SetActivationId(v string) *CreateActivationOutput { + s.ActivationId = &v + return s +} + +type CreateAssociationBatchInput struct { + _ struct{} `type:"structure"` + + // One or more associations. + // + // Entries is a required field + Entries []*CreateAssociationBatchRequestEntry `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateAssociationBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAssociationBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAssociationBatchInput"} + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.Entries != nil && len(s.Entries) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Entries", 1)) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntries sets the Entries field's value. +func (s *CreateAssociationBatchInput) SetEntries(v []*CreateAssociationBatchRequestEntry) *CreateAssociationBatchInput { + s.Entries = v + return s +} + +type CreateAssociationBatchOutput struct { + _ struct{} `type:"structure"` + + // Information about the associations that failed. + Failed []*FailedCreateAssociation `type:"list"` + + // Information about the associations that succeeded. + Successful []*AssociationDescription `type:"list"` +} + +// String returns the string representation +func (s CreateAssociationBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchOutput) GoString() string { + return s.String() +} + +// SetFailed sets the Failed field's value. +func (s *CreateAssociationBatchOutput) SetFailed(v []*FailedCreateAssociation) *CreateAssociationBatchOutput { + s.Failed = v + return s +} + +// SetSuccessful sets the Successful field's value. +func (s *CreateAssociationBatchOutput) SetSuccessful(v []*AssociationDescription) *CreateAssociationBatchOutput { + s.Successful = v + return s +} + +// Describes the association of a Systems Manager SSM document and an instance. +type CreateAssociationBatchRequestEntry struct { + _ struct{} `type:"structure"` + + // By default, when you create a new associations, the system runs it immediately + // after it is created and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // create it. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + + // Specify a descriptive name for the association. + AssociationName *string `type:"string"` + + // Specify the target for the association. This target is required for associations + // that use an Automation document and target resources by using rate controls. + AutomationTargetParameterName *string `min:"1" type:"string"` + + // The severity level to assign to the association. + ComplianceSeverity *string `type:"string" enum:"AssociationComplianceSeverity"` + + // The document version. + DocumentVersion *string `type:"string"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The maximum number of targets allowed to run the association at the same + // time. You can specify a number, for example 10, or a percentage of the target + // set, for example 10%. The default value is 100%, which means all targets + // run the association at the same time. + // + // If a new instance starts and attempts to run an association while Systems + // Manager is running MaxConcurrency associations, the association is allowed + // to run. During the next association interval, the new instance will process + // its association within the limit specified for MaxConcurrency. + MaxConcurrency *string `min:"1" type:"string"` + + // The number of errors that are allowed before the system stops sending requests + // to run the association on additional targets. You can specify either an absolute + // number of errors, for example 10, or a percentage of the target set, for + // example 10%. If you specify 3, for example, the system stops sending requests + // when the fourth error is received. If you specify 0, then the system stops + // sending requests after the first error is returned. If you run an association + // on 50 instances and set MaxError to 10%, then the system stops sending the + // request when the sixth error is received. + // + // Executions that are already running an association when MaxErrors is reached + // are allowed to complete, but some of these executions may fail as well. If + // you need to ensure that there won't be more than max-errors failed executions, + // set MaxConcurrency to 1 so that executions proceed one at a time. + MaxErrors *string `min:"1" type:"string"` + + // The name of the SSM document that contains the configuration information + // for the instance. You can specify Command or Automation documents. + // + // You can specify AWS-predefined documents, documents you created, or a document + // that is shared with you from another account. + // + // For SSM documents that are shared with you from other AWS accounts, you must + // specify the complete SSM document ARN, in the following format: + // + // arn:aws:ssm:region:account-id:document/document-name + // + // For example: + // + // arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document + // + // For AWS-predefined documents and SSM documents you created in your account, + // you only need to specify the document name. For example, AWS-ApplyPatchBaseline + // or My-Document. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // An S3 bucket where you want to store the results of this request. + OutputLocation *InstanceAssociationOutputLocation `type:"structure"` + + // A description of the parameters for a document. + Parameters map[string][]*string `type:"map"` + + // A cron expression that specifies a schedule when the association runs. + ScheduleExpression *string `min:"1" type:"string"` + + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + + // The instances targeted by the request. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s CreateAssociationBatchRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationBatchRequestEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAssociationBatchRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAssociationBatchRequestEntry"} + if s.AutomationTargetParameterName != nil && len(*s.AutomationTargetParameterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutomationTargetParameterName", 1)) + } + if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1)) + } + if s.MaxErrors != nil && len(*s.MaxErrors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxErrors", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ScheduleExpression != nil && len(*s.ScheduleExpression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScheduleExpression", 1)) + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *CreateAssociationBatchRequestEntry) SetApplyOnlyAtCronInterval(v bool) *CreateAssociationBatchRequestEntry { + s.ApplyOnlyAtCronInterval = &v + return s +} + +// SetAssociationName sets the AssociationName field's value. +func (s *CreateAssociationBatchRequestEntry) SetAssociationName(v string) *CreateAssociationBatchRequestEntry { + s.AssociationName = &v + return s +} + +// SetAutomationTargetParameterName sets the AutomationTargetParameterName field's value. +func (s *CreateAssociationBatchRequestEntry) SetAutomationTargetParameterName(v string) *CreateAssociationBatchRequestEntry { + s.AutomationTargetParameterName = &v + return s +} + +// SetComplianceSeverity sets the ComplianceSeverity field's value. +func (s *CreateAssociationBatchRequestEntry) SetComplianceSeverity(v string) *CreateAssociationBatchRequestEntry { + s.ComplianceSeverity = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *CreateAssociationBatchRequestEntry) SetDocumentVersion(v string) *CreateAssociationBatchRequestEntry { + s.DocumentVersion = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *CreateAssociationBatchRequestEntry) SetInstanceId(v string) *CreateAssociationBatchRequestEntry { + s.InstanceId = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *CreateAssociationBatchRequestEntry) SetMaxConcurrency(v string) *CreateAssociationBatchRequestEntry { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *CreateAssociationBatchRequestEntry) SetMaxErrors(v string) *CreateAssociationBatchRequestEntry { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateAssociationBatchRequestEntry) SetName(v string) *CreateAssociationBatchRequestEntry { + s.Name = &v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *CreateAssociationBatchRequestEntry) SetOutputLocation(v *InstanceAssociationOutputLocation) *CreateAssociationBatchRequestEntry { + s.OutputLocation = v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *CreateAssociationBatchRequestEntry) SetParameters(v map[string][]*string) *CreateAssociationBatchRequestEntry { + s.Parameters = v + return s +} + +// SetScheduleExpression sets the ScheduleExpression field's value. +func (s *CreateAssociationBatchRequestEntry) SetScheduleExpression(v string) *CreateAssociationBatchRequestEntry { + s.ScheduleExpression = &v + return s +} + +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *CreateAssociationBatchRequestEntry) SetSyncCompliance(v string) *CreateAssociationBatchRequestEntry { + s.SyncCompliance = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *CreateAssociationBatchRequestEntry) SetTargets(v []*Target) *CreateAssociationBatchRequestEntry { + s.Targets = v + return s +} + +type CreateAssociationInput struct { + _ struct{} `type:"structure"` + + // By default, when you create a new associations, the system runs it immediately + // after it is created and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // create it. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + + // Specify a descriptive name for the association. + AssociationName *string `type:"string"` + + // Specify the target for the association. This target is required for associations + // that use an Automation document and target resources by using rate controls. + AutomationTargetParameterName *string `min:"1" type:"string"` + + // The severity level to assign to the association. + ComplianceSeverity *string `type:"string" enum:"AssociationComplianceSeverity"` + + // The document version you want to associate with the target(s). Can be a specific + // version or the default version. + DocumentVersion *string `type:"string"` + + // The instance ID. + // + // InstanceId has been deprecated. To specify an instance ID for an association, + // use the Targets parameter. Requests that include the parameter InstanceID + // with SSM documents that use schema version 2.0 or later will fail. In addition, + // if you use the parameter InstanceId, you cannot use the parameters AssociationName, + // DocumentVersion, MaxErrors, MaxConcurrency, OutputLocation, or ScheduleExpression. + // To use these parameters, you must use the Targets parameter. + InstanceId *string `type:"string"` + + // The maximum number of targets allowed to run the association at the same + // time. You can specify a number, for example 10, or a percentage of the target + // set, for example 10%. The default value is 100%, which means all targets + // run the association at the same time. + // + // If a new instance starts and attempts to run an association while Systems + // Manager is running MaxConcurrency associations, the association is allowed + // to run. During the next association interval, the new instance will process + // its association within the limit specified for MaxConcurrency. + MaxConcurrency *string `min:"1" type:"string"` + + // The number of errors that are allowed before the system stops sending requests + // to run the association on additional targets. You can specify either an absolute + // number of errors, for example 10, or a percentage of the target set, for + // example 10%. If you specify 3, for example, the system stops sending requests + // when the fourth error is received. If you specify 0, then the system stops + // sending requests after the first error is returned. If you run an association + // on 50 instances and set MaxError to 10%, then the system stops sending the + // request when the sixth error is received. + // + // Executions that are already running an association when MaxErrors is reached + // are allowed to complete, but some of these executions may fail as well. If + // you need to ensure that there won't be more than max-errors failed executions, + // set MaxConcurrency to 1 so that executions proceed one at a time. + MaxErrors *string `min:"1" type:"string"` + + // The name of the SSM document that contains the configuration information + // for the instance. You can specify Command or Automation documents. + // + // You can specify AWS-predefined documents, documents you created, or a document + // that is shared with you from another account. + // + // For SSM documents that are shared with you from other AWS accounts, you must + // specify the complete SSM document ARN, in the following format: + // + // arn:partition:ssm:region:account-id:document/document-name + // + // For example: + // + // arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document + // + // For AWS-predefined documents and SSM documents you created in your account, + // you only need to specify the document name. For example, AWS-ApplyPatchBaseline + // or My-Document. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // An S3 bucket where you want to store the output details of the request. + OutputLocation *InstanceAssociationOutputLocation `type:"structure"` + + // The parameters for the runtime configuration of the document. + Parameters map[string][]*string `type:"map"` + + // A cron expression when the association will be applied to the target(s). + ScheduleExpression *string `min:"1" type:"string"` + + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + + // The targets for the association. You can target instances by using tags, + // AWS Resource Groups, all instances in an AWS account, or individual instance + // IDs. For more information about choosing targets for an association, see + // Using targets and rate controls with State Manager associations (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-state-manager-targets-and-rate-controls.html) + // in the AWS Systems Manager User Guide. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s CreateAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAssociationInput"} + if s.AutomationTargetParameterName != nil && len(*s.AutomationTargetParameterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutomationTargetParameterName", 1)) + } + if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1)) + } + if s.MaxErrors != nil && len(*s.MaxErrors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxErrors", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ScheduleExpression != nil && len(*s.ScheduleExpression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScheduleExpression", 1)) + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *CreateAssociationInput) SetApplyOnlyAtCronInterval(v bool) *CreateAssociationInput { + s.ApplyOnlyAtCronInterval = &v + return s +} + +// SetAssociationName sets the AssociationName field's value. +func (s *CreateAssociationInput) SetAssociationName(v string) *CreateAssociationInput { + s.AssociationName = &v + return s +} + +// SetAutomationTargetParameterName sets the AutomationTargetParameterName field's value. +func (s *CreateAssociationInput) SetAutomationTargetParameterName(v string) *CreateAssociationInput { + s.AutomationTargetParameterName = &v + return s +} + +// SetComplianceSeverity sets the ComplianceSeverity field's value. +func (s *CreateAssociationInput) SetComplianceSeverity(v string) *CreateAssociationInput { + s.ComplianceSeverity = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *CreateAssociationInput) SetDocumentVersion(v string) *CreateAssociationInput { + s.DocumentVersion = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *CreateAssociationInput) SetInstanceId(v string) *CreateAssociationInput { + s.InstanceId = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *CreateAssociationInput) SetMaxConcurrency(v string) *CreateAssociationInput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *CreateAssociationInput) SetMaxErrors(v string) *CreateAssociationInput { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateAssociationInput) SetName(v string) *CreateAssociationInput { + s.Name = &v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *CreateAssociationInput) SetOutputLocation(v *InstanceAssociationOutputLocation) *CreateAssociationInput { + s.OutputLocation = v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *CreateAssociationInput) SetParameters(v map[string][]*string) *CreateAssociationInput { + s.Parameters = v + return s +} + +// SetScheduleExpression sets the ScheduleExpression field's value. +func (s *CreateAssociationInput) SetScheduleExpression(v string) *CreateAssociationInput { + s.ScheduleExpression = &v + return s +} + +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *CreateAssociationInput) SetSyncCompliance(v string) *CreateAssociationInput { + s.SyncCompliance = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *CreateAssociationInput) SetTargets(v []*Target) *CreateAssociationInput { + s.Targets = v + return s +} + +type CreateAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAssociationOutput) GoString() string { + return s.String() +} + +// SetAssociationDescription sets the AssociationDescription field's value. +func (s *CreateAssociationOutput) SetAssociationDescription(v *AssociationDescription) *CreateAssociationOutput { + s.AssociationDescription = v + return s +} + +type CreateDocumentInput struct { + _ struct{} `type:"structure"` + + // A list of key and value pairs that describe attachments to a version of a + // document. + Attachments []*AttachmentsSource `type:"list"` + + // The content for the new SSM document in JSON or YAML format. We recommend + // storing the contents for your new document in an external JSON or YAML file + // and referencing the file in a command. + // + // For examples, see the following topics in the AWS Systems Manager User Guide. + // + // * Create an SSM document (AWS API) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-api.html) + // + // * Create an SSM document (AWS CLI) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-cli.html) + // + // * Create an SSM document (API) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-api.html) + // + // Content is a required field + Content *string `min:"1" type:"string" required:"true"` + + // Specify the document format for the request. The document format can be JSON, + // YAML, or TEXT. JSON is the default format. + DocumentFormat *string `type:"string" enum:"DocumentFormat"` + + // The type of document to create. + DocumentType *string `type:"string" enum:"DocumentType"` + + // A name for the Systems Manager document. + // + // You can't use the following strings as document name prefixes. These are + // reserved by AWS for use as document name prefixes: + // + // * aws- + // + // * amazon + // + // * amzn + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A list of SSM documents required by a document. This parameter is used exclusively + // by AWS AppConfig. When a user creates an AppConfig configuration in an SSM + // document, the user must also specify a required document for validation purposes. + // In this case, an ApplicationConfiguration document requires an ApplicationConfigurationSchema + // document for validation purposes. For more information, see AWS AppConfig + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/appconfig.html) + // in the AWS Systems Manager User Guide. + Requires []*DocumentRequires `min:"1" type:"list"` + + // Optional metadata that you assign to a resource. Tags enable you to categorize + // a resource in different ways, such as by purpose, owner, or environment. + // For example, you might want to tag an SSM document to identify the types + // of targets or the environment where it will run. In this case, you could + // specify the following key name/value pairs: + // + // * Key=OS,Value=Windows + // + // * Key=Environment,Value=Production + // + // To add tags to an existing SSM document, use the AddTagsToResource action. + Tags []*Tag `type:"list"` + + // Specify a target type to define the kinds of resources the document can run + // on. For example, to run a document on EC2 instances, specify the following + // value: /AWS::EC2::Instance. If you specify a value of '/' the document can + // run on all types of resources. If you don't specify a value, the document + // can't run on any resources. For a list of valid resource types, see AWS resource + // and property types reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide. + TargetType *string `type:"string"` + + // An optional field specifying the version of the artifact you are creating + // with the document. For example, "Release 12, Update 6". This value is unique + // across all versions of a document, and cannot be changed. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s CreateDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDocumentInput"} + if s.Content == nil { + invalidParams.Add(request.NewErrParamRequired("Content")) + } + if s.Content != nil && len(*s.Content) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Content", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Requires != nil && len(s.Requires) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Requires", 1)) + } + if s.Attachments != nil { + for i, v := range s.Attachments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attachments", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Requires != nil { + for i, v := range s.Requires { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Requires", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttachments sets the Attachments field's value. +func (s *CreateDocumentInput) SetAttachments(v []*AttachmentsSource) *CreateDocumentInput { + s.Attachments = v + return s +} + +// SetContent sets the Content field's value. +func (s *CreateDocumentInput) SetContent(v string) *CreateDocumentInput { + s.Content = &v + return s +} + +// SetDocumentFormat sets the DocumentFormat field's value. +func (s *CreateDocumentInput) SetDocumentFormat(v string) *CreateDocumentInput { + s.DocumentFormat = &v + return s +} + +// SetDocumentType sets the DocumentType field's value. +func (s *CreateDocumentInput) SetDocumentType(v string) *CreateDocumentInput { + s.DocumentType = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDocumentInput) SetName(v string) *CreateDocumentInput { + s.Name = &v + return s +} + +// SetRequires sets the Requires field's value. +func (s *CreateDocumentInput) SetRequires(v []*DocumentRequires) *CreateDocumentInput { + s.Requires = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDocumentInput) SetTags(v []*Tag) *CreateDocumentInput { + s.Tags = v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *CreateDocumentInput) SetTargetType(v string) *CreateDocumentInput { + s.TargetType = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *CreateDocumentInput) SetVersionName(v string) *CreateDocumentInput { + s.VersionName = &v + return s +} + +type CreateDocumentOutput struct { + _ struct{} `type:"structure"` + + // Information about the Systems Manager document. + DocumentDescription *DocumentDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDocumentOutput) GoString() string { + return s.String() +} + +// SetDocumentDescription sets the DocumentDescription field's value. +func (s *CreateDocumentOutput) SetDocumentDescription(v *DocumentDescription) *CreateDocumentOutput { + s.DocumentDescription = v + return s +} + +type CreateMaintenanceWindowInput struct { + _ struct{} `type:"structure"` + + // Enables a maintenance window task to run on managed instances, even if you + // have not registered those instances as targets. If enabled, then you must + // specify the unregistered instances (by instance ID) when you register a task + // with the maintenance window. + // + // If you don't enable this option, then you must specify previously-registered + // targets when you register a task with the maintenance window. + // + // AllowUnassociatedTargets is a required field + AllowUnassociatedTargets *bool `type:"boolean" required:"true"` + + // User-provided idempotency token. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The number of hours before the end of the maintenance window that Systems + // Manager stops scheduling new tasks for execution. + // + // Cutoff is a required field + Cutoff *int64 `type:"integer" required:"true"` + + // An optional description for the maintenance window. We recommend specifying + // a description to help you organize your maintenance windows. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The duration of the maintenance window in hours. + // + // Duration is a required field + Duration *int64 `min:"1" type:"integer" required:"true"` + + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become inactive. EndDate allows you to set a date and time in the + // future when the maintenance window will no longer run. + EndDate *string `type:"string"` + + // The name of the maintenance window. + // + // Name is a required field + Name *string `min:"3" type:"string" required:"true"` + + // The schedule of the maintenance window in the form of a cron or rate expression. + // + // Schedule is a required field + Schedule *string `min:"1" type:"string" required:"true"` + + // The number of days to wait after the date and time specified by a CRON expression + // before running the maintenance window. + // + // For example, the following cron expression schedules a maintenance window + // to run on the third Tuesday of every month at 11:30 PM. + // + // cron(0 30 23 ? * TUE#3 *) + // + // If the schedule offset is 2, the maintenance window won't run until two days + // later. + ScheduleOffset *int64 `min:"1" type:"integer"` + + // The time zone that the scheduled maintenance window executions are based + // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", + // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database + // (https://www.iana.org/time-zones) on the IANA website. + ScheduleTimezone *string `type:"string"` + + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become active. StartDate allows you to delay activation of the + // maintenance window until the specified future date. + StartDate *string `type:"string"` + + // Optional metadata that you assign to a resource. Tags enable you to categorize + // a resource in different ways, such as by purpose, owner, or environment. + // For example, you might want to tag a maintenance window to identify the type + // of tasks it will run, the types of targets, and the environment it will run + // in. In this case, you could specify the following key name/value pairs: + // + // * Key=TaskType,Value=AgentUpdate + // + // * Key=OS,Value=Windows + // + // * Key=Environment,Value=Production + // + // To add tags to an existing maintenance window, use the AddTagsToResource + // action. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateMaintenanceWindowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMaintenanceWindowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMaintenanceWindowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMaintenanceWindowInput"} + if s.AllowUnassociatedTargets == nil { + invalidParams.Add(request.NewErrParamRequired("AllowUnassociatedTargets")) + } + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.Cutoff == nil { + invalidParams.Add(request.NewErrParamRequired("Cutoff")) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Duration == nil { + invalidParams.Add(request.NewErrParamRequired("Duration")) + } + if s.Duration != nil && *s.Duration < 1 { + invalidParams.Add(request.NewErrParamMinValue("Duration", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Schedule != nil && len(*s.Schedule) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Schedule", 1)) + } + if s.ScheduleOffset != nil && *s.ScheduleOffset < 1 { + invalidParams.Add(request.NewErrParamMinValue("ScheduleOffset", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowUnassociatedTargets sets the AllowUnassociatedTargets field's value. +func (s *CreateMaintenanceWindowInput) SetAllowUnassociatedTargets(v bool) *CreateMaintenanceWindowInput { + s.AllowUnassociatedTargets = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateMaintenanceWindowInput) SetClientToken(v string) *CreateMaintenanceWindowInput { + s.ClientToken = &v + return s +} + +// SetCutoff sets the Cutoff field's value. +func (s *CreateMaintenanceWindowInput) SetCutoff(v int64) *CreateMaintenanceWindowInput { + s.Cutoff = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateMaintenanceWindowInput) SetDescription(v string) *CreateMaintenanceWindowInput { + s.Description = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *CreateMaintenanceWindowInput) SetDuration(v int64) *CreateMaintenanceWindowInput { + s.Duration = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *CreateMaintenanceWindowInput) SetEndDate(v string) *CreateMaintenanceWindowInput { + s.EndDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateMaintenanceWindowInput) SetName(v string) *CreateMaintenanceWindowInput { + s.Name = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *CreateMaintenanceWindowInput) SetSchedule(v string) *CreateMaintenanceWindowInput { + s.Schedule = &v + return s +} + +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *CreateMaintenanceWindowInput) SetScheduleOffset(v int64) *CreateMaintenanceWindowInput { + s.ScheduleOffset = &v + return s +} + +// SetScheduleTimezone sets the ScheduleTimezone field's value. +func (s *CreateMaintenanceWindowInput) SetScheduleTimezone(v string) *CreateMaintenanceWindowInput { + s.ScheduleTimezone = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *CreateMaintenanceWindowInput) SetStartDate(v string) *CreateMaintenanceWindowInput { + s.StartDate = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateMaintenanceWindowInput) SetTags(v []*Tag) *CreateMaintenanceWindowInput { + s.Tags = v + return s +} + +type CreateMaintenanceWindowOutput struct { + _ struct{} `type:"structure"` + + // The ID of the created maintenance window. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateMaintenanceWindowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMaintenanceWindowOutput) GoString() string { + return s.String() +} + +// SetWindowId sets the WindowId field's value. +func (s *CreateMaintenanceWindowOutput) SetWindowId(v string) *CreateMaintenanceWindowOutput { + s.WindowId = &v + return s +} + +type CreateOpsItemInput struct { + _ struct{} `type:"structure"` + + // Specify a category to assign to an OpsItem. + Category *string `min:"1" type:"string"` + + // Information about the OpsItem. + // + // Description is a required field + Description *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of an SNS topic where notifications are sent + // when this OpsItem is edited or changed. + Notifications []*OpsItemNotification `type:"list"` + + // Operational data is custom data that provides useful reference details about + // the OpsItem. For example, you can specify log files, error strings, license + // keys, troubleshooting tips, or other relevant data. You enter operational + // data as key-value pairs. The key has a maximum length of 128 characters. + // The value has a maximum size of 20 KB. + // + // Operational data keys can't begin with the following: amazon, aws, amzn, + // ssm, /amazon, /aws, /amzn, /ssm. + // + // You can choose to make the data searchable by other users in the account + // or you can restrict search access. Searchable data means that all users with + // access to the OpsItem Overview page (as provided by the DescribeOpsItems + // API action) can view and search on the specified data. Operational data that + // is not searchable is only viewable by users who have access to the OpsItem + // (as provided by the GetOpsItem API action). + // + // Use the /aws/resources key in OperationalData to specify a related resource + // in the request. Use the /aws/automations key in OperationalData to associate + // an Automation runbook with the OpsItem. To view AWS CLI example commands + // that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // in the AWS Systems Manager User Guide. + OperationalData map[string]*OpsItemDataValue `type:"map"` + + // The importance of this OpsItem in relation to other OpsItems in the system. + Priority *int64 `min:"1" type:"integer"` + + // One or more OpsItems that share something in common with the current OpsItems. + // For example, related OpsItems can include OpsItems with similar error messages, + // impacted resources, or statuses for the impacted resource. + RelatedOpsItems []*RelatedOpsItem `type:"list"` + + // Specify a severity to assign to an OpsItem. + Severity *string `min:"1" type:"string"` + + // The origin of the OpsItem, such as Amazon EC2 or Systems Manager. + // + // The source name can't contain the following strings: aws, amazon, and amzn. + // + // Source is a required field + Source *string `min:"1" type:"string" required:"true"` + + // Optional metadata that you assign to a resource. You can restrict access + // to OpsItems by using an inline IAM policy that specifies tags. For more information, + // see Getting started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html#OpsCenter-getting-started-user-permissions) + // in the AWS Systems Manager User Guide. + // + // Tags use a key-value pair. For example: + // + // Key=Department,Value=Finance + // + // To add tags to an existing OpsItem, use the AddTagsToResource action. + Tags []*Tag `type:"list"` + + // A short heading that describes the nature of the OpsItem and the impacted + // resource. + // + // Title is a required field + Title *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateOpsItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpsItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOpsItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOpsItemInput"} + if s.Category != nil && len(*s.Category) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Category", 1)) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Priority != nil && *s.Priority < 1 { + invalidParams.Add(request.NewErrParamMinValue("Priority", 1)) + } + if s.Severity != nil && len(*s.Severity) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Severity", 1)) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Source != nil && len(*s.Source) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Source", 1)) + } + if s.Title == nil { + invalidParams.Add(request.NewErrParamRequired("Title")) + } + if s.Title != nil && len(*s.Title) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Title", 1)) + } + if s.RelatedOpsItems != nil { + for i, v := range s.RelatedOpsItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RelatedOpsItems", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCategory sets the Category field's value. +func (s *CreateOpsItemInput) SetCategory(v string) *CreateOpsItemInput { + s.Category = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateOpsItemInput) SetDescription(v string) *CreateOpsItemInput { + s.Description = &v + return s +} + +// SetNotifications sets the Notifications field's value. +func (s *CreateOpsItemInput) SetNotifications(v []*OpsItemNotification) *CreateOpsItemInput { + s.Notifications = v + return s +} + +// SetOperationalData sets the OperationalData field's value. +func (s *CreateOpsItemInput) SetOperationalData(v map[string]*OpsItemDataValue) *CreateOpsItemInput { + s.OperationalData = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *CreateOpsItemInput) SetPriority(v int64) *CreateOpsItemInput { + s.Priority = &v + return s +} + +// SetRelatedOpsItems sets the RelatedOpsItems field's value. +func (s *CreateOpsItemInput) SetRelatedOpsItems(v []*RelatedOpsItem) *CreateOpsItemInput { + s.RelatedOpsItems = v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *CreateOpsItemInput) SetSeverity(v string) *CreateOpsItemInput { + s.Severity = &v + return s +} + +// SetSource sets the Source field's value. +func (s *CreateOpsItemInput) SetSource(v string) *CreateOpsItemInput { + s.Source = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateOpsItemInput) SetTags(v []*Tag) *CreateOpsItemInput { + s.Tags = v + return s +} + +// SetTitle sets the Title field's value. +func (s *CreateOpsItemInput) SetTitle(v string) *CreateOpsItemInput { + s.Title = &v + return s +} + +type CreateOpsItemOutput struct { + _ struct{} `type:"structure"` + + // The ID of the OpsItem. + OpsItemId *string `type:"string"` +} + +// String returns the string representation +func (s CreateOpsItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpsItemOutput) GoString() string { + return s.String() +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *CreateOpsItemOutput) SetOpsItemId(v string) *CreateOpsItemOutput { + s.OpsItemId = &v + return s +} + +type CreatePatchBaselineInput struct { + _ struct{} `type:"structure"` + + // A set of rules used to include patches in the baseline. + ApprovalRules *PatchRuleGroup `type:"structure"` + + // A list of explicitly approved patches for the baseline. + // + // For information about accepted formats for lists of approved patches and + // rejected patches, see About package name formats for approved and rejected + // patch lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) + // in the AWS Systems Manager User Guide. + ApprovedPatches []*string `type:"list"` + + // Defines the compliance level for approved patches. This means that if an + // approved patch is reported as missing, this is the severity of the compliance + // violation. The default value is UNSPECIFIED. + ApprovedPatchesComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + + // Indicates whether the list of approved patches includes non-security updates + // that should be applied to the instances. The default value is 'false'. Applies + // to Linux instances only. + ApprovedPatchesEnableNonSecurity *bool `type:"boolean"` + + // User-provided idempotency token. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // A description of the patch baseline. + Description *string `min:"1" type:"string"` + + // A set of global filters used to include patches in the baseline. + GlobalFilters *PatchFilterGroup `type:"structure"` + + // The name of the patch baseline. + // + // Name is a required field + Name *string `min:"3" type:"string" required:"true"` + + // Defines the operating system the patch baseline applies to. The Default value + // is WINDOWS. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // A list of explicitly rejected patches for the baseline. + // + // For information about accepted formats for lists of approved patches and + // rejected patches, see About package name formats for approved and rejected + // patch lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) + // in the AWS Systems Manager User Guide. + RejectedPatches []*string `type:"list"` + + // The action for Patch Manager to take on patches included in the RejectedPackages + // list. + // + // * ALLOW_AS_DEPENDENCY: A package in the Rejected patches list is installed + // only if it is a dependency of another package. It is considered compliant + // with the patch baseline, and its status is reported as InstalledOther. + // This is the default action if no option is specified. + // + // * BLOCK: Packages in the RejectedPatches list, and packages that include + // them as dependencies, are not installed under any circumstances. If a + // package was installed before it was added to the Rejected patches list, + // it is considered non-compliant with the patch baseline, and its status + // is reported as InstalledRejected. + RejectedPatchesAction *string `type:"string" enum:"PatchAction"` + + // Information about the patches to use to update the instances, including target + // operating systems and source repositories. Applies to Linux instances only. + Sources []*PatchSource `type:"list"` + + // Optional metadata that you assign to a resource. Tags enable you to categorize + // a resource in different ways, such as by purpose, owner, or environment. + // For example, you might want to tag a patch baseline to identify the severity + // level of patches it specifies and the operating system family it applies + // to. In this case, you could specify the following key name/value pairs: + // + // * Key=PatchSeverity,Value=Critical + // + // * Key=OS,Value=Windows + // + // To add tags to an existing patch baseline, use the AddTagsToResource action. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreatePatchBaselineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePatchBaselineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePatchBaselineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePatchBaselineInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.ApprovalRules != nil { + if err := s.ApprovalRules.Validate(); err != nil { + invalidParams.AddNested("ApprovalRules", err.(request.ErrInvalidParams)) + } + } + if s.GlobalFilters != nil { + if err := s.GlobalFilters.Validate(); err != nil { + invalidParams.AddNested("GlobalFilters", err.(request.ErrInvalidParams)) + } + } + if s.Sources != nil { + for i, v := range s.Sources { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sources", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApprovalRules sets the ApprovalRules field's value. +func (s *CreatePatchBaselineInput) SetApprovalRules(v *PatchRuleGroup) *CreatePatchBaselineInput { + s.ApprovalRules = v + return s +} + +// SetApprovedPatches sets the ApprovedPatches field's value. +func (s *CreatePatchBaselineInput) SetApprovedPatches(v []*string) *CreatePatchBaselineInput { + s.ApprovedPatches = v + return s +} + +// SetApprovedPatchesComplianceLevel sets the ApprovedPatchesComplianceLevel field's value. +func (s *CreatePatchBaselineInput) SetApprovedPatchesComplianceLevel(v string) *CreatePatchBaselineInput { + s.ApprovedPatchesComplianceLevel = &v + return s +} + +// SetApprovedPatchesEnableNonSecurity sets the ApprovedPatchesEnableNonSecurity field's value. +func (s *CreatePatchBaselineInput) SetApprovedPatchesEnableNonSecurity(v bool) *CreatePatchBaselineInput { + s.ApprovedPatchesEnableNonSecurity = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreatePatchBaselineInput) SetClientToken(v string) *CreatePatchBaselineInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreatePatchBaselineInput) SetDescription(v string) *CreatePatchBaselineInput { + s.Description = &v + return s +} + +// SetGlobalFilters sets the GlobalFilters field's value. +func (s *CreatePatchBaselineInput) SetGlobalFilters(v *PatchFilterGroup) *CreatePatchBaselineInput { + s.GlobalFilters = v + return s +} + +// SetName sets the Name field's value. +func (s *CreatePatchBaselineInput) SetName(v string) *CreatePatchBaselineInput { + s.Name = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *CreatePatchBaselineInput) SetOperatingSystem(v string) *CreatePatchBaselineInput { + s.OperatingSystem = &v + return s +} + +// SetRejectedPatches sets the RejectedPatches field's value. +func (s *CreatePatchBaselineInput) SetRejectedPatches(v []*string) *CreatePatchBaselineInput { + s.RejectedPatches = v + return s +} + +// SetRejectedPatchesAction sets the RejectedPatchesAction field's value. +func (s *CreatePatchBaselineInput) SetRejectedPatchesAction(v string) *CreatePatchBaselineInput { + s.RejectedPatchesAction = &v + return s +} + +// SetSources sets the Sources field's value. +func (s *CreatePatchBaselineInput) SetSources(v []*PatchSource) *CreatePatchBaselineInput { + s.Sources = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreatePatchBaselineInput) SetTags(v []*Tag) *CreatePatchBaselineInput { + s.Tags = v + return s +} + +type CreatePatchBaselineOutput struct { + _ struct{} `type:"structure"` + + // The ID of the created patch baseline. + BaselineId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreatePatchBaselineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePatchBaselineOutput) GoString() string { + return s.String() +} + +// SetBaselineId sets the BaselineId field's value. +func (s *CreatePatchBaselineOutput) SetBaselineId(v string) *CreatePatchBaselineOutput { + s.BaselineId = &v + return s +} + +type CreateResourceDataSyncInput struct { + _ struct{} `type:"structure"` + + // Amazon S3 configuration details for the sync. This parameter is required + // if the SyncType value is SyncToDestination. + S3Destination *ResourceDataSyncS3Destination `type:"structure"` + + // A name for the configuration. + // + // SyncName is a required field + SyncName *string `min:"1" type:"string" required:"true"` + + // Specify information about the data sources to synchronize. This parameter + // is required if the SyncType value is SyncFromSource. + SyncSource *ResourceDataSyncSource `type:"structure"` + + // Specify SyncToDestination to create a resource data sync that synchronizes + // data to an S3 bucket for Inventory. If you specify SyncToDestination, you + // must provide a value for S3Destination. Specify SyncFromSource to synchronize + // data from a single account and multiple Regions, or multiple AWS accounts + // and Regions, as listed in AWS Organizations for Explorer. If you specify + // SyncFromSource, you must provide a value for SyncSource. The default value + // is SyncToDestination. + SyncType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateResourceDataSyncInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResourceDataSyncInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateResourceDataSyncInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateResourceDataSyncInput"} + if s.SyncName == nil { + invalidParams.Add(request.NewErrParamRequired("SyncName")) + } + if s.SyncName != nil && len(*s.SyncName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SyncName", 1)) + } + if s.SyncType != nil && len(*s.SyncType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SyncType", 1)) + } + if s.S3Destination != nil { + if err := s.S3Destination.Validate(); err != nil { + invalidParams.AddNested("S3Destination", err.(request.ErrInvalidParams)) + } + } + if s.SyncSource != nil { + if err := s.SyncSource.Validate(); err != nil { + invalidParams.AddNested("SyncSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Destination sets the S3Destination field's value. +func (s *CreateResourceDataSyncInput) SetS3Destination(v *ResourceDataSyncS3Destination) *CreateResourceDataSyncInput { + s.S3Destination = v + return s +} + +// SetSyncName sets the SyncName field's value. +func (s *CreateResourceDataSyncInput) SetSyncName(v string) *CreateResourceDataSyncInput { + s.SyncName = &v + return s +} + +// SetSyncSource sets the SyncSource field's value. +func (s *CreateResourceDataSyncInput) SetSyncSource(v *ResourceDataSyncSource) *CreateResourceDataSyncInput { + s.SyncSource = v + return s +} + +// SetSyncType sets the SyncType field's value. +func (s *CreateResourceDataSyncInput) SetSyncType(v string) *CreateResourceDataSyncInput { + s.SyncType = &v + return s +} + +type CreateResourceDataSyncOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateResourceDataSyncOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResourceDataSyncOutput) GoString() string { + return s.String() +} + +// You have exceeded the limit for custom schemas. Delete one or more custom +// schemas and try again. +type CustomSchemaCountLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s CustomSchemaCountLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomSchemaCountLimitExceededException) GoString() string { + return s.String() +} + +func newErrorCustomSchemaCountLimitExceededException(v protocol.ResponseMetadata) error { + return &CustomSchemaCountLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *CustomSchemaCountLimitExceededException) Code() string { + return "CustomSchemaCountLimitExceededException" +} + +// Message returns the exception's message. +func (s *CustomSchemaCountLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *CustomSchemaCountLimitExceededException) OrigErr() error { + return nil +} + +func (s *CustomSchemaCountLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *CustomSchemaCountLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *CustomSchemaCountLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type DeleteActivationInput struct { + _ struct{} `type:"structure"` + + // The ID of the activation that you want to delete. + // + // ActivationId is a required field + ActivationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteActivationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteActivationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteActivationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteActivationInput"} + if s.ActivationId == nil { + invalidParams.Add(request.NewErrParamRequired("ActivationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActivationId sets the ActivationId field's value. +func (s *DeleteActivationInput) SetActivationId(v string) *DeleteActivationInput { + s.ActivationId = &v + return s +} + +type DeleteActivationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteActivationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteActivationOutput) GoString() string { + return s.String() +} + +type DeleteAssociationInput struct { + _ struct{} `type:"structure"` + + // The association ID that you want to delete. + AssociationId *string `type:"string"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The name of the Systems Manager document. + Name *string `type:"string"` +} + +// String returns the string representation +func (s DeleteAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssociationInput) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DeleteAssociationInput) SetAssociationId(v string) *DeleteAssociationInput { + s.AssociationId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DeleteAssociationInput) SetInstanceId(v string) *DeleteAssociationInput { + s.InstanceId = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteAssociationInput) SetName(v string) *DeleteAssociationInput { + s.Name = &v + return s +} + +type DeleteAssociationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAssociationOutput) GoString() string { + return s.String() +} + +type DeleteDocumentInput struct { + _ struct{} `type:"structure"` + + // The version of the document that you want to delete. If not provided, all + // versions of the document are deleted. + DocumentVersion *string `type:"string"` + + // Some SSM document types require that you specify a Force flag before you + // can delete the document. For example, you must specify a Force flag to delete + // a document of type ApplicationConfigurationSchema. You can restrict access + // to the Force flag in an AWS Identity and Access Management (IAM) policy. + Force *bool `type:"boolean"` + + // The name of the document. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The version name of the document that you want to delete. If not provided, + // all versions of the document are deleted. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s DeleteDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDocumentInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *DeleteDocumentInput) SetDocumentVersion(v string) *DeleteDocumentInput { + s.DocumentVersion = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DeleteDocumentInput) SetForce(v bool) *DeleteDocumentInput { + s.Force = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteDocumentInput) SetName(v string) *DeleteDocumentInput { + s.Name = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *DeleteDocumentInput) SetVersionName(v string) *DeleteDocumentInput { + s.VersionName = &v + return s +} + +type DeleteDocumentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDocumentOutput) GoString() string { + return s.String() +} + +type DeleteInventoryInput struct { + _ struct{} `type:"structure"` + + // User-provided idempotency token. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Use this option to view a summary of the deletion request without deleting + // any data or the data type. This option is useful when you only want to understand + // what will be deleted. Once you validate that the data to be deleted is what + // you intend to delete, you can run the same command without specifying the + // DryRun option. + DryRun *bool `type:"boolean"` + + // Use the SchemaDeleteOption to delete a custom inventory type (schema). If + // you don't choose this option, the system only deletes existing inventory + // data associated with the custom inventory type. Choose one of the following + // options: + // + // DisableSchema: If you choose this option, the system ignores all inventory + // data for the specified version, and any earlier versions. To enable this + // schema again, you must call the PutInventory action for a version greater + // than the disabled version. + // + // DeleteSchema: This option deletes the specified custom type from the Inventory + // service. You can recreate the schema later, if you want. + SchemaDeleteOption *string `type:"string" enum:"InventorySchemaDeleteOption"` + + // The name of the custom inventory type for which you want to delete either + // all previously collected data or the inventory type itself. + // + // TypeName is a required field + TypeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInventoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInventoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInventoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInventoryInput"} + if s.TypeName == nil { + invalidParams.Add(request.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *DeleteInventoryInput) SetClientToken(v string) *DeleteInventoryInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteInventoryInput) SetDryRun(v bool) *DeleteInventoryInput { + s.DryRun = &v + return s +} + +// SetSchemaDeleteOption sets the SchemaDeleteOption field's value. +func (s *DeleteInventoryInput) SetSchemaDeleteOption(v string) *DeleteInventoryInput { + s.SchemaDeleteOption = &v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *DeleteInventoryInput) SetTypeName(v string) *DeleteInventoryInput { + s.TypeName = &v + return s +} + +type DeleteInventoryOutput struct { + _ struct{} `type:"structure"` + + // Every DeleteInventory action is assigned a unique ID. This option returns + // a unique ID. You can use this ID to query the status of a delete operation. + // This option is useful for ensuring that a delete operation has completed + // before you begin other actions. + DeletionId *string `type:"string"` + + // A summary of the delete operation. For more information about this summary, + // see Deleting custom inventory (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete-summary) + // in the AWS Systems Manager User Guide. + DeletionSummary *InventoryDeletionSummary `type:"structure"` + + // The name of the inventory data type specified in the request. + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteInventoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInventoryOutput) GoString() string { + return s.String() +} + +// SetDeletionId sets the DeletionId field's value. +func (s *DeleteInventoryOutput) SetDeletionId(v string) *DeleteInventoryOutput { + s.DeletionId = &v + return s +} + +// SetDeletionSummary sets the DeletionSummary field's value. +func (s *DeleteInventoryOutput) SetDeletionSummary(v *InventoryDeletionSummary) *DeleteInventoryOutput { + s.DeletionSummary = v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *DeleteInventoryOutput) SetTypeName(v string) *DeleteInventoryOutput { + s.TypeName = &v + return s +} + +type DeleteMaintenanceWindowInput struct { + _ struct{} `type:"structure"` + + // The ID of the maintenance window to delete. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMaintenanceWindowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMaintenanceWindowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMaintenanceWindowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMaintenanceWindowInput"} + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWindowId sets the WindowId field's value. +func (s *DeleteMaintenanceWindowInput) SetWindowId(v string) *DeleteMaintenanceWindowInput { + s.WindowId = &v + return s +} + +type DeleteMaintenanceWindowOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted maintenance window. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s DeleteMaintenanceWindowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMaintenanceWindowOutput) GoString() string { + return s.String() +} + +// SetWindowId sets the WindowId field's value. +func (s *DeleteMaintenanceWindowOutput) SetWindowId(v string) *DeleteMaintenanceWindowOutput { + s.WindowId = &v + return s +} + +type DeleteParameterInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter to delete. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteParameterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteParameterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteParameterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteParameterInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteParameterInput) SetName(v string) *DeleteParameterInput { + s.Name = &v + return s +} + +type DeleteParameterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteParameterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteParameterOutput) GoString() string { + return s.String() +} + +type DeleteParametersInput struct { + _ struct{} `type:"structure"` + + // The names of the parameters to delete. + // + // Names is a required field + Names []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteParametersInput"} + if s.Names == nil { + invalidParams.Add(request.NewErrParamRequired("Names")) + } + if s.Names != nil && len(s.Names) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Names", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNames sets the Names field's value. +func (s *DeleteParametersInput) SetNames(v []*string) *DeleteParametersInput { + s.Names = v + return s +} + +type DeleteParametersOutput struct { + _ struct{} `type:"structure"` + + // The names of the deleted parameters. + DeletedParameters []*string `min:"1" type:"list"` + + // The names of parameters that weren't deleted because the parameters are not + // valid. + InvalidParameters []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s DeleteParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteParametersOutput) GoString() string { + return s.String() +} + +// SetDeletedParameters sets the DeletedParameters field's value. +func (s *DeleteParametersOutput) SetDeletedParameters(v []*string) *DeleteParametersOutput { + s.DeletedParameters = v + return s +} + +// SetInvalidParameters sets the InvalidParameters field's value. +func (s *DeleteParametersOutput) SetInvalidParameters(v []*string) *DeleteParametersOutput { + s.InvalidParameters = v + return s +} + +type DeletePatchBaselineInput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline to delete. + // + // BaselineId is a required field + BaselineId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePatchBaselineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePatchBaselineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePatchBaselineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePatchBaselineInput"} + if s.BaselineId == nil { + invalidParams.Add(request.NewErrParamRequired("BaselineId")) + } + if s.BaselineId != nil && len(*s.BaselineId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("BaselineId", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBaselineId sets the BaselineId field's value. +func (s *DeletePatchBaselineInput) SetBaselineId(v string) *DeletePatchBaselineInput { + s.BaselineId = &v + return s +} + +type DeletePatchBaselineOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted patch baseline. + BaselineId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s DeletePatchBaselineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePatchBaselineOutput) GoString() string { + return s.String() +} + +// SetBaselineId sets the BaselineId field's value. +func (s *DeletePatchBaselineOutput) SetBaselineId(v string) *DeletePatchBaselineOutput { + s.BaselineId = &v + return s +} + +type DeleteResourceDataSyncInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration to delete. + // + // SyncName is a required field + SyncName *string `min:"1" type:"string" required:"true"` + + // Specify the type of resource data sync to delete. + SyncType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteResourceDataSyncInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourceDataSyncInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteResourceDataSyncInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteResourceDataSyncInput"} + if s.SyncName == nil { + invalidParams.Add(request.NewErrParamRequired("SyncName")) + } + if s.SyncName != nil && len(*s.SyncName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SyncName", 1)) + } + if s.SyncType != nil && len(*s.SyncType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SyncType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSyncName sets the SyncName field's value. +func (s *DeleteResourceDataSyncInput) SetSyncName(v string) *DeleteResourceDataSyncInput { + s.SyncName = &v + return s +} + +// SetSyncType sets the SyncType field's value. +func (s *DeleteResourceDataSyncInput) SetSyncType(v string) *DeleteResourceDataSyncInput { + s.SyncType = &v + return s +} + +type DeleteResourceDataSyncOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteResourceDataSyncOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourceDataSyncOutput) GoString() string { + return s.String() +} + +type DeregisterManagedInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the managed instance when you registered it using the + // activation process. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterManagedInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterManagedInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterManagedInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterManagedInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DeregisterManagedInstanceInput) SetInstanceId(v string) *DeregisterManagedInstanceInput { + s.InstanceId = &v + return s +} + +type DeregisterManagedInstanceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterManagedInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterManagedInstanceOutput) GoString() string { + return s.String() +} + +type DeregisterPatchBaselineForPatchGroupInput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline to deregister the patch group from. + // + // BaselineId is a required field + BaselineId *string `min:"20" type:"string" required:"true"` + + // The name of the patch group that should be deregistered from the patch baseline. + // + // PatchGroup is a required field + PatchGroup *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterPatchBaselineForPatchGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterPatchBaselineForPatchGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterPatchBaselineForPatchGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterPatchBaselineForPatchGroupInput"} + if s.BaselineId == nil { + invalidParams.Add(request.NewErrParamRequired("BaselineId")) + } + if s.BaselineId != nil && len(*s.BaselineId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("BaselineId", 20)) + } + if s.PatchGroup == nil { + invalidParams.Add(request.NewErrParamRequired("PatchGroup")) + } + if s.PatchGroup != nil && len(*s.PatchGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PatchGroup", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBaselineId sets the BaselineId field's value. +func (s *DeregisterPatchBaselineForPatchGroupInput) SetBaselineId(v string) *DeregisterPatchBaselineForPatchGroupInput { + s.BaselineId = &v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *DeregisterPatchBaselineForPatchGroupInput) SetPatchGroup(v string) *DeregisterPatchBaselineForPatchGroupInput { + s.PatchGroup = &v + return s +} + +type DeregisterPatchBaselineForPatchGroupOutput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline the patch group was deregistered from. + BaselineId *string `min:"20" type:"string"` + + // The name of the patch group deregistered from the patch baseline. + PatchGroup *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeregisterPatchBaselineForPatchGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterPatchBaselineForPatchGroupOutput) GoString() string { + return s.String() +} + +// SetBaselineId sets the BaselineId field's value. +func (s *DeregisterPatchBaselineForPatchGroupOutput) SetBaselineId(v string) *DeregisterPatchBaselineForPatchGroupOutput { + s.BaselineId = &v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *DeregisterPatchBaselineForPatchGroupOutput) SetPatchGroup(v string) *DeregisterPatchBaselineForPatchGroupOutput { + s.PatchGroup = &v + return s +} + +type DeregisterTargetFromMaintenanceWindowInput struct { + _ struct{} `type:"structure"` + + // The system checks if the target is being referenced by a task. If the target + // is being referenced, the system returns an error and does not deregister + // the target from the maintenance window. + Safe *bool `type:"boolean"` + + // The ID of the maintenance window the target should be removed from. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` + + // The ID of the target definition to remove. + // + // WindowTargetId is a required field + WindowTargetId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterTargetFromMaintenanceWindowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTargetFromMaintenanceWindowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterTargetFromMaintenanceWindowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterTargetFromMaintenanceWindowInput"} + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.WindowTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowTargetId")) + } + if s.WindowTargetId != nil && len(*s.WindowTargetId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowTargetId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSafe sets the Safe field's value. +func (s *DeregisterTargetFromMaintenanceWindowInput) SetSafe(v bool) *DeregisterTargetFromMaintenanceWindowInput { + s.Safe = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *DeregisterTargetFromMaintenanceWindowInput) SetWindowId(v string) *DeregisterTargetFromMaintenanceWindowInput { + s.WindowId = &v + return s +} + +// SetWindowTargetId sets the WindowTargetId field's value. +func (s *DeregisterTargetFromMaintenanceWindowInput) SetWindowTargetId(v string) *DeregisterTargetFromMaintenanceWindowInput { + s.WindowTargetId = &v + return s +} + +type DeregisterTargetFromMaintenanceWindowOutput struct { + _ struct{} `type:"structure"` + + // The ID of the maintenance window the target was removed from. + WindowId *string `min:"20" type:"string"` + + // The ID of the removed target definition. + WindowTargetId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s DeregisterTargetFromMaintenanceWindowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTargetFromMaintenanceWindowOutput) GoString() string { + return s.String() +} + +// SetWindowId sets the WindowId field's value. +func (s *DeregisterTargetFromMaintenanceWindowOutput) SetWindowId(v string) *DeregisterTargetFromMaintenanceWindowOutput { + s.WindowId = &v + return s +} + +// SetWindowTargetId sets the WindowTargetId field's value. +func (s *DeregisterTargetFromMaintenanceWindowOutput) SetWindowTargetId(v string) *DeregisterTargetFromMaintenanceWindowOutput { + s.WindowTargetId = &v + return s +} + +type DeregisterTaskFromMaintenanceWindowInput struct { + _ struct{} `type:"structure"` + + // The ID of the maintenance window the task should be removed from. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` + + // The ID of the task to remove from the maintenance window. + // + // WindowTaskId is a required field + WindowTaskId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterTaskFromMaintenanceWindowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskFromMaintenanceWindowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterTaskFromMaintenanceWindowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterTaskFromMaintenanceWindowInput"} + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.WindowTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowTaskId")) + } + if s.WindowTaskId != nil && len(*s.WindowTaskId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowTaskId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWindowId sets the WindowId field's value. +func (s *DeregisterTaskFromMaintenanceWindowInput) SetWindowId(v string) *DeregisterTaskFromMaintenanceWindowInput { + s.WindowId = &v + return s +} + +// SetWindowTaskId sets the WindowTaskId field's value. +func (s *DeregisterTaskFromMaintenanceWindowInput) SetWindowTaskId(v string) *DeregisterTaskFromMaintenanceWindowInput { + s.WindowTaskId = &v + return s +} + +type DeregisterTaskFromMaintenanceWindowOutput struct { + _ struct{} `type:"structure"` + + // The ID of the maintenance window the task was removed from. + WindowId *string `min:"20" type:"string"` + + // The ID of the task removed from the maintenance window. + WindowTaskId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s DeregisterTaskFromMaintenanceWindowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTaskFromMaintenanceWindowOutput) GoString() string { + return s.String() +} + +// SetWindowId sets the WindowId field's value. +func (s *DeregisterTaskFromMaintenanceWindowOutput) SetWindowId(v string) *DeregisterTaskFromMaintenanceWindowOutput { + s.WindowId = &v + return s +} + +// SetWindowTaskId sets the WindowTaskId field's value. +func (s *DeregisterTaskFromMaintenanceWindowOutput) SetWindowTaskId(v string) *DeregisterTaskFromMaintenanceWindowOutput { + s.WindowTaskId = &v + return s +} + +// Filter for the DescribeActivation API. +type DescribeActivationsFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + FilterKey *string `type:"string" enum:"DescribeActivationsFilterKeys"` + + // The filter values. + FilterValues []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeActivationsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivationsFilter) GoString() string { + return s.String() +} + +// SetFilterKey sets the FilterKey field's value. +func (s *DescribeActivationsFilter) SetFilterKey(v string) *DescribeActivationsFilter { + s.FilterKey = &v + return s +} + +// SetFilterValues sets the FilterValues field's value. +func (s *DescribeActivationsFilter) SetFilterValues(v []*string) *DescribeActivationsFilter { + s.FilterValues = v + return s +} + +type DescribeActivationsInput struct { + _ struct{} `type:"structure"` + + // A filter to view information about your activations. + Filters []*DescribeActivationsFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeActivationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeActivationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeActivationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeActivationsInput) SetFilters(v []*DescribeActivationsFilter) *DescribeActivationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeActivationsInput) SetMaxResults(v int64) *DescribeActivationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeActivationsInput) SetNextToken(v string) *DescribeActivationsInput { + s.NextToken = &v + return s +} + +type DescribeActivationsOutput struct { + _ struct{} `type:"structure"` + + // A list of activations for your AWS account. + ActivationList []*Activation `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeActivationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActivationsOutput) GoString() string { + return s.String() +} + +// SetActivationList sets the ActivationList field's value. +func (s *DescribeActivationsOutput) SetActivationList(v []*Activation) *DescribeActivationsOutput { + s.ActivationList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeActivationsOutput) SetNextToken(v string) *DescribeActivationsOutput { + s.NextToken = &v + return s +} + +type DescribeAssociationExecutionTargetsInput struct { + _ struct{} `type:"structure"` + + // The association ID that includes the execution for which you want to view + // details. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` + + // The execution ID for which you want to view details. + // + // ExecutionId is a required field + ExecutionId *string `type:"string" required:"true"` + + // Filters for the request. You can specify the following filters and values. + // + // Status (EQUAL) + // + // ResourceId (EQUAL) + // + // ResourceType (EQUAL) + Filters []*AssociationExecutionTargetsFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAssociationExecutionTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationExecutionTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAssociationExecutionTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAssociationExecutionTargetsInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.ExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionId")) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DescribeAssociationExecutionTargetsInput) SetAssociationId(v string) *DescribeAssociationExecutionTargetsInput { + s.AssociationId = &v + return s +} + +// SetExecutionId sets the ExecutionId field's value. +func (s *DescribeAssociationExecutionTargetsInput) SetExecutionId(v string) *DescribeAssociationExecutionTargetsInput { + s.ExecutionId = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeAssociationExecutionTargetsInput) SetFilters(v []*AssociationExecutionTargetsFilter) *DescribeAssociationExecutionTargetsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAssociationExecutionTargetsInput) SetMaxResults(v int64) *DescribeAssociationExecutionTargetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAssociationExecutionTargetsInput) SetNextToken(v string) *DescribeAssociationExecutionTargetsInput { + s.NextToken = &v + return s +} + +type DescribeAssociationExecutionTargetsOutput struct { + _ struct{} `type:"structure"` + + // Information about the execution. + AssociationExecutionTargets []*AssociationExecutionTarget `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAssociationExecutionTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationExecutionTargetsOutput) GoString() string { + return s.String() +} + +// SetAssociationExecutionTargets sets the AssociationExecutionTargets field's value. +func (s *DescribeAssociationExecutionTargetsOutput) SetAssociationExecutionTargets(v []*AssociationExecutionTarget) *DescribeAssociationExecutionTargetsOutput { + s.AssociationExecutionTargets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAssociationExecutionTargetsOutput) SetNextToken(v string) *DescribeAssociationExecutionTargetsOutput { + s.NextToken = &v + return s +} + +type DescribeAssociationExecutionsInput struct { + _ struct{} `type:"structure"` + + // The association ID for which you want to view execution history details. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` + + // Filters for the request. You can specify the following filters and values. + // + // ExecutionId (EQUAL) + // + // Status (EQUAL) + // + // CreatedTime (EQUAL, GREATER_THAN, LESS_THAN) + Filters []*AssociationExecutionFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAssociationExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAssociationExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAssociationExecutionsInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DescribeAssociationExecutionsInput) SetAssociationId(v string) *DescribeAssociationExecutionsInput { + s.AssociationId = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeAssociationExecutionsInput) SetFilters(v []*AssociationExecutionFilter) *DescribeAssociationExecutionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAssociationExecutionsInput) SetMaxResults(v int64) *DescribeAssociationExecutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAssociationExecutionsInput) SetNextToken(v string) *DescribeAssociationExecutionsInput { + s.NextToken = &v + return s +} + +type DescribeAssociationExecutionsOutput struct { + _ struct{} `type:"structure"` + + // A list of the executions for the specified association ID. + AssociationExecutions []*AssociationExecution `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAssociationExecutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationExecutionsOutput) GoString() string { + return s.String() +} + +// SetAssociationExecutions sets the AssociationExecutions field's value. +func (s *DescribeAssociationExecutionsOutput) SetAssociationExecutions(v []*AssociationExecution) *DescribeAssociationExecutionsOutput { + s.AssociationExecutions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAssociationExecutionsOutput) SetNextToken(v string) *DescribeAssociationExecutionsOutput { + s.NextToken = &v + return s +} + +type DescribeAssociationInput struct { + _ struct{} `type:"structure"` + + // The association ID for which you want information. + AssociationId *string `type:"string"` + + // Specify the association version to retrieve. To view the latest version, + // either specify $LATEST for this parameter, or omit this parameter. To view + // a list of all associations for an instance, use ListAssociations. To get + // a list of versions for a specific association, use ListAssociationVersions. + AssociationVersion *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // The name of the Systems Manager document. + Name *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationInput) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DescribeAssociationInput) SetAssociationId(v string) *DescribeAssociationInput { + s.AssociationId = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *DescribeAssociationInput) SetAssociationVersion(v string) *DescribeAssociationInput { + s.AssociationVersion = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DescribeAssociationInput) SetInstanceId(v string) *DescribeAssociationInput { + s.InstanceId = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeAssociationInput) SetName(v string) *DescribeAssociationInput { + s.Name = &v + return s +} + +type DescribeAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAssociationOutput) GoString() string { + return s.String() +} + +// SetAssociationDescription sets the AssociationDescription field's value. +func (s *DescribeAssociationOutput) SetAssociationDescription(v *AssociationDescription) *DescribeAssociationOutput { + s.AssociationDescription = v + return s +} + +type DescribeAutomationExecutionsInput struct { + _ struct{} `type:"structure"` + + // Filters used to limit the scope of executions that are requested. + Filters []*AutomationExecutionFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutomationExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutomationExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAutomationExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAutomationExecutionsInput"} + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeAutomationExecutionsInput) SetFilters(v []*AutomationExecutionFilter) *DescribeAutomationExecutionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAutomationExecutionsInput) SetMaxResults(v int64) *DescribeAutomationExecutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAutomationExecutionsInput) SetNextToken(v string) *DescribeAutomationExecutionsInput { + s.NextToken = &v + return s +} + +type DescribeAutomationExecutionsOutput struct { + _ struct{} `type:"structure"` + + // The list of details about each automation execution which has occurred which + // matches the filter specification, if any. + AutomationExecutionMetadataList []*AutomationExecutionMetadata `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAutomationExecutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutomationExecutionsOutput) GoString() string { + return s.String() +} + +// SetAutomationExecutionMetadataList sets the AutomationExecutionMetadataList field's value. +func (s *DescribeAutomationExecutionsOutput) SetAutomationExecutionMetadataList(v []*AutomationExecutionMetadata) *DescribeAutomationExecutionsOutput { + s.AutomationExecutionMetadataList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAutomationExecutionsOutput) SetNextToken(v string) *DescribeAutomationExecutionsOutput { + s.NextToken = &v + return s +} + +type DescribeAutomationStepExecutionsInput struct { + _ struct{} `type:"structure"` + + // The Automation execution ID for which you want step execution descriptions. + // + // AutomationExecutionId is a required field + AutomationExecutionId *string `min:"36" type:"string" required:"true"` + + // One or more filters to limit the number of step executions returned by the + // request. + Filters []*StepExecutionFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // A boolean that indicates whether to list step executions in reverse order + // by start time. The default value is false. + ReverseOrder *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeAutomationStepExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutomationStepExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAutomationStepExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAutomationStepExecutionsInput"} + if s.AutomationExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("AutomationExecutionId")) + } + if s.AutomationExecutionId != nil && len(*s.AutomationExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("AutomationExecutionId", 36)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutomationExecutionId sets the AutomationExecutionId field's value. +func (s *DescribeAutomationStepExecutionsInput) SetAutomationExecutionId(v string) *DescribeAutomationStepExecutionsInput { + s.AutomationExecutionId = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeAutomationStepExecutionsInput) SetFilters(v []*StepExecutionFilter) *DescribeAutomationStepExecutionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAutomationStepExecutionsInput) SetMaxResults(v int64) *DescribeAutomationStepExecutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAutomationStepExecutionsInput) SetNextToken(v string) *DescribeAutomationStepExecutionsInput { + s.NextToken = &v + return s +} + +// SetReverseOrder sets the ReverseOrder field's value. +func (s *DescribeAutomationStepExecutionsInput) SetReverseOrder(v bool) *DescribeAutomationStepExecutionsInput { + s.ReverseOrder = &v + return s +} + +type DescribeAutomationStepExecutionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // A list of details about the current state of all steps that make up an execution. + StepExecutions []*StepExecution `type:"list"` +} + +// String returns the string representation +func (s DescribeAutomationStepExecutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAutomationStepExecutionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAutomationStepExecutionsOutput) SetNextToken(v string) *DescribeAutomationStepExecutionsOutput { + s.NextToken = &v + return s +} + +// SetStepExecutions sets the StepExecutions field's value. +func (s *DescribeAutomationStepExecutionsOutput) SetStepExecutions(v []*StepExecution) *DescribeAutomationStepExecutionsOutput { + s.StepExecutions = v + return s +} + +type DescribeAvailablePatchesInput struct { + _ struct{} `type:"structure"` + + // Filters used to scope down the returned patches. + Filters []*PatchOrchestratorFilter `type:"list"` + + // The maximum number of patches to return (per page). + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAvailablePatchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailablePatchesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAvailablePatchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAvailablePatchesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeAvailablePatchesInput) SetFilters(v []*PatchOrchestratorFilter) *DescribeAvailablePatchesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAvailablePatchesInput) SetMaxResults(v int64) *DescribeAvailablePatchesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAvailablePatchesInput) SetNextToken(v string) *DescribeAvailablePatchesInput { + s.NextToken = &v + return s +} + +type DescribeAvailablePatchesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // An array of patches. Each entry in the array is a patch structure. + Patches []*Patch `type:"list"` +} + +// String returns the string representation +func (s DescribeAvailablePatchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAvailablePatchesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAvailablePatchesOutput) SetNextToken(v string) *DescribeAvailablePatchesOutput { + s.NextToken = &v + return s +} + +// SetPatches sets the Patches field's value. +func (s *DescribeAvailablePatchesOutput) SetPatches(v []*Patch) *DescribeAvailablePatchesOutput { + s.Patches = v + return s +} + +type DescribeDocumentInput struct { + _ struct{} `type:"structure"` + + // The document version for which you want information. Can be a specific version + // or the default version. + DocumentVersion *string `type:"string"` + + // The name of the Systems Manager document. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // An optional field specifying the version of the artifact associated with + // the document. For example, "Release 12, Update 6". This value is unique across + // all versions of a document, and cannot be changed. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDocumentInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *DescribeDocumentInput) SetDocumentVersion(v string) *DescribeDocumentInput { + s.DocumentVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeDocumentInput) SetName(v string) *DescribeDocumentInput { + s.Name = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *DescribeDocumentInput) SetVersionName(v string) *DescribeDocumentInput { + s.VersionName = &v + return s +} + +type DescribeDocumentOutput struct { + _ struct{} `type:"structure"` + + // Information about the Systems Manager document. + Document *DocumentDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentOutput) GoString() string { + return s.String() +} + +// SetDocument sets the Document field's value. +func (s *DescribeDocumentOutput) SetDocument(v *DocumentDescription) *DescribeDocumentOutput { + s.Document = v + return s +} + +type DescribeDocumentPermissionInput struct { + _ struct{} `type:"structure"` + + // The name of the document for which you are the owner. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The permission type for the document. The permission type can be Share. + // + // PermissionType is a required field + PermissionType *string `type:"string" required:"true" enum:"DocumentPermissionType"` +} + +// String returns the string representation +func (s DescribeDocumentPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDocumentPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDocumentPermissionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.PermissionType == nil { + invalidParams.Add(request.NewErrParamRequired("PermissionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DescribeDocumentPermissionInput) SetName(v string) *DescribeDocumentPermissionInput { + s.Name = &v + return s +} + +// SetPermissionType sets the PermissionType field's value. +func (s *DescribeDocumentPermissionInput) SetPermissionType(v string) *DescribeDocumentPermissionInput { + s.PermissionType = &v + return s +} + +type DescribeDocumentPermissionOutput struct { + _ struct{} `type:"structure"` + + // The account IDs that have permission to use this document. The ID can be + // either an AWS account or All. + AccountIds []*string `type:"list"` + + // A list of AWS accounts where the current document is shared and the version + // shared with each account. + AccountSharingInfoList []*AccountSharingInfo `type:"list"` +} + +// String returns the string representation +func (s DescribeDocumentPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDocumentPermissionOutput) GoString() string { + return s.String() +} + +// SetAccountIds sets the AccountIds field's value. +func (s *DescribeDocumentPermissionOutput) SetAccountIds(v []*string) *DescribeDocumentPermissionOutput { + s.AccountIds = v + return s +} + +// SetAccountSharingInfoList sets the AccountSharingInfoList field's value. +func (s *DescribeDocumentPermissionOutput) SetAccountSharingInfoList(v []*AccountSharingInfo) *DescribeDocumentPermissionOutput { + s.AccountSharingInfoList = v + return s +} + +type DescribeEffectiveInstanceAssociationsInput struct { + _ struct{} `type:"structure"` + + // The instance ID for which you want to view all associations. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEffectiveInstanceAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEffectiveInstanceAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEffectiveInstanceAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEffectiveInstanceAssociationsInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DescribeEffectiveInstanceAssociationsInput) SetInstanceId(v string) *DescribeEffectiveInstanceAssociationsInput { + s.InstanceId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeEffectiveInstanceAssociationsInput) SetMaxResults(v int64) *DescribeEffectiveInstanceAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeEffectiveInstanceAssociationsInput) SetNextToken(v string) *DescribeEffectiveInstanceAssociationsInput { + s.NextToken = &v + return s +} + +type DescribeEffectiveInstanceAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The associations for the requested instance. + Associations []*InstanceAssociation `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEffectiveInstanceAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEffectiveInstanceAssociationsOutput) GoString() string { + return s.String() +} + +// SetAssociations sets the Associations field's value. +func (s *DescribeEffectiveInstanceAssociationsOutput) SetAssociations(v []*InstanceAssociation) *DescribeEffectiveInstanceAssociationsOutput { + s.Associations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeEffectiveInstanceAssociationsOutput) SetNextToken(v string) *DescribeEffectiveInstanceAssociationsOutput { + s.NextToken = &v + return s +} + +type DescribeEffectivePatchesForPatchBaselineInput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline to retrieve the effective patches for. + // + // BaselineId is a required field + BaselineId *string `min:"20" type:"string" required:"true"` + + // The maximum number of patches to return (per page). + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEffectivePatchesForPatchBaselineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEffectivePatchesForPatchBaselineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeEffectivePatchesForPatchBaselineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEffectivePatchesForPatchBaselineInput"} + if s.BaselineId == nil { + invalidParams.Add(request.NewErrParamRequired("BaselineId")) + } + if s.BaselineId != nil && len(*s.BaselineId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("BaselineId", 20)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBaselineId sets the BaselineId field's value. +func (s *DescribeEffectivePatchesForPatchBaselineInput) SetBaselineId(v string) *DescribeEffectivePatchesForPatchBaselineInput { + s.BaselineId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeEffectivePatchesForPatchBaselineInput) SetMaxResults(v int64) *DescribeEffectivePatchesForPatchBaselineInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeEffectivePatchesForPatchBaselineInput) SetNextToken(v string) *DescribeEffectivePatchesForPatchBaselineInput { + s.NextToken = &v + return s +} + +type DescribeEffectivePatchesForPatchBaselineOutput struct { + _ struct{} `type:"structure"` + + // An array of patches and patch status. + EffectivePatches []*EffectivePatch `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeEffectivePatchesForPatchBaselineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEffectivePatchesForPatchBaselineOutput) GoString() string { + return s.String() +} + +// SetEffectivePatches sets the EffectivePatches field's value. +func (s *DescribeEffectivePatchesForPatchBaselineOutput) SetEffectivePatches(v []*EffectivePatch) *DescribeEffectivePatchesForPatchBaselineOutput { + s.EffectivePatches = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeEffectivePatchesForPatchBaselineOutput) SetNextToken(v string) *DescribeEffectivePatchesForPatchBaselineOutput { + s.NextToken = &v + return s +} + +type DescribeInstanceAssociationsStatusInput struct { + _ struct{} `type:"structure"` + + // The instance IDs for which you want association status information. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceAssociationsStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAssociationsStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceAssociationsStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceAssociationsStatusInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DescribeInstanceAssociationsStatusInput) SetInstanceId(v string) *DescribeInstanceAssociationsStatusInput { + s.InstanceId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstanceAssociationsStatusInput) SetMaxResults(v int64) *DescribeInstanceAssociationsStatusInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceAssociationsStatusInput) SetNextToken(v string) *DescribeInstanceAssociationsStatusInput { + s.NextToken = &v + return s +} + +type DescribeInstanceAssociationsStatusOutput struct { + _ struct{} `type:"structure"` + + // Status information about the association. + InstanceAssociationStatusInfos []*InstanceAssociationStatusInfo `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceAssociationsStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceAssociationsStatusOutput) GoString() string { + return s.String() +} + +// SetInstanceAssociationStatusInfos sets the InstanceAssociationStatusInfos field's value. +func (s *DescribeInstanceAssociationsStatusOutput) SetInstanceAssociationStatusInfos(v []*InstanceAssociationStatusInfo) *DescribeInstanceAssociationsStatusOutput { + s.InstanceAssociationStatusInfos = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceAssociationsStatusOutput) SetNextToken(v string) *DescribeInstanceAssociationsStatusOutput { + s.NextToken = &v + return s +} + +type DescribeInstanceInformationInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of instances. + // You can filter based on tags applied to EC2 instances. Use this Filters data + // type instead of InstanceInformationFilterList, which is deprecated. + Filters []*InstanceInformationStringFilter `type:"list"` + + // This is a legacy method. We recommend that you don't use this method. Instead, + // use the Filters data type. Filters enables you to return instance information + // by filtering based on tags applied to managed instances. + // + // Attempting to use InstanceInformationFilterList and Filters leads to an exception + // error. + InstanceInformationFilterList []*InstanceInformationFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceInformationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceInformationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceInformationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceInformationInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + if s.InstanceInformationFilterList != nil { + for i, v := range s.InstanceInformationFilterList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceInformationFilterList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstanceInformationInput) SetFilters(v []*InstanceInformationStringFilter) *DescribeInstanceInformationInput { + s.Filters = v + return s +} + +// SetInstanceInformationFilterList sets the InstanceInformationFilterList field's value. +func (s *DescribeInstanceInformationInput) SetInstanceInformationFilterList(v []*InstanceInformationFilter) *DescribeInstanceInformationInput { + s.InstanceInformationFilterList = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstanceInformationInput) SetMaxResults(v int64) *DescribeInstanceInformationInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceInformationInput) SetNextToken(v string) *DescribeInstanceInformationInput { + s.NextToken = &v + return s +} + +type DescribeInstanceInformationOutput struct { + _ struct{} `type:"structure"` + + // The instance information list. + InstanceInformationList []*InstanceInformation `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceInformationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceInformationOutput) GoString() string { + return s.String() +} + +// SetInstanceInformationList sets the InstanceInformationList field's value. +func (s *DescribeInstanceInformationOutput) SetInstanceInformationList(v []*InstanceInformation) *DescribeInstanceInformationOutput { + s.InstanceInformationList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceInformationOutput) SetNextToken(v string) *DescribeInstanceInformationOutput { + s.NextToken = &v + return s +} + +type DescribeInstancePatchStatesForPatchGroupInput struct { + _ struct{} `type:"structure"` + + // Each entry in the array is a structure containing: + // + // Key (string between 1 and 200 characters) + // + // Values (array containing a single string) + // + // Type (string "Equal", "NotEqual", "LessThan", "GreaterThan") + Filters []*InstancePatchStateFilter `type:"list"` + + // The maximum number of patches to return (per page). + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The name of the patch group for which the patch state information should + // be retrieved. + // + // PatchGroup is a required field + PatchGroup *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInstancePatchStatesForPatchGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancePatchStatesForPatchGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstancePatchStatesForPatchGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstancePatchStatesForPatchGroupInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + if s.PatchGroup == nil { + invalidParams.Add(request.NewErrParamRequired("PatchGroup")) + } + if s.PatchGroup != nil && len(*s.PatchGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PatchGroup", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstancePatchStatesForPatchGroupInput) SetFilters(v []*InstancePatchStateFilter) *DescribeInstancePatchStatesForPatchGroupInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstancePatchStatesForPatchGroupInput) SetMaxResults(v int64) *DescribeInstancePatchStatesForPatchGroupInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancePatchStatesForPatchGroupInput) SetNextToken(v string) *DescribeInstancePatchStatesForPatchGroupInput { + s.NextToken = &v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *DescribeInstancePatchStatesForPatchGroupInput) SetPatchGroup(v string) *DescribeInstancePatchStatesForPatchGroupInput { + s.PatchGroup = &v + return s +} + +type DescribeInstancePatchStatesForPatchGroupOutput struct { + _ struct{} `type:"structure"` + + // The high-level patch state for the requested instances. + InstancePatchStates []*InstancePatchState `min:"1" type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstancePatchStatesForPatchGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancePatchStatesForPatchGroupOutput) GoString() string { + return s.String() +} + +// SetInstancePatchStates sets the InstancePatchStates field's value. +func (s *DescribeInstancePatchStatesForPatchGroupOutput) SetInstancePatchStates(v []*InstancePatchState) *DescribeInstancePatchStatesForPatchGroupOutput { + s.InstancePatchStates = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancePatchStatesForPatchGroupOutput) SetNextToken(v string) *DescribeInstancePatchStatesForPatchGroupOutput { + s.NextToken = &v + return s +} + +type DescribeInstancePatchStatesInput struct { + _ struct{} `type:"structure"` + + // The ID of the instance whose patch state information should be retrieved. + // + // InstanceIds is a required field + InstanceIds []*string `type:"list" required:"true"` + + // The maximum number of instances to return (per page). + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstancePatchStatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancePatchStatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstancePatchStatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstancePatchStatesInput"} + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) + } + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *DescribeInstancePatchStatesInput) SetInstanceIds(v []*string) *DescribeInstancePatchStatesInput { + s.InstanceIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstancePatchStatesInput) SetMaxResults(v int64) *DescribeInstancePatchStatesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancePatchStatesInput) SetNextToken(v string) *DescribeInstancePatchStatesInput { + s.NextToken = &v + return s +} + +type DescribeInstancePatchStatesOutput struct { + _ struct{} `type:"structure"` + + // The high-level patch state for the requested instances. + InstancePatchStates []*InstancePatchState `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstancePatchStatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancePatchStatesOutput) GoString() string { + return s.String() +} + +// SetInstancePatchStates sets the InstancePatchStates field's value. +func (s *DescribeInstancePatchStatesOutput) SetInstancePatchStates(v []*InstancePatchState) *DescribeInstancePatchStatesOutput { + s.InstancePatchStates = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancePatchStatesOutput) SetNextToken(v string) *DescribeInstancePatchStatesOutput { + s.NextToken = &v + return s +} + +type DescribeInstancePatchesInput struct { + _ struct{} `type:"structure"` + + // An array of structures. Each entry in the array is a structure containing + // a Key, Value combination. Valid values for Key are Classification | KBId + // | Severity | State. + Filters []*PatchOrchestratorFilter `type:"list"` + + // The ID of the instance whose patch state information should be retrieved. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The maximum number of patches to return (per page). + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstancePatchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancePatchesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstancePatchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstancePatchesInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstancePatchesInput) SetFilters(v []*PatchOrchestratorFilter) *DescribeInstancePatchesInput { + s.Filters = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *DescribeInstancePatchesInput) SetInstanceId(v string) *DescribeInstancePatchesInput { + s.InstanceId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInstancePatchesInput) SetMaxResults(v int64) *DescribeInstancePatchesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancePatchesInput) SetNextToken(v string) *DescribeInstancePatchesInput { + s.NextToken = &v + return s +} + +type DescribeInstancePatchesOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // Each entry in the array is a structure containing: + // + // Title (string) + // + // KBId (string) + // + // Classification (string) + // + // Severity (string) + // + // State (string, such as "INSTALLED" or "FAILED") + // + // InstalledTime (DateTime) + // + // InstalledBy (string) + Patches []*PatchComplianceData `type:"list"` +} + +// String returns the string representation +func (s DescribeInstancePatchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstancePatchesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstancePatchesOutput) SetNextToken(v string) *DescribeInstancePatchesOutput { + s.NextToken = &v + return s +} + +// SetPatches sets the Patches field's value. +func (s *DescribeInstancePatchesOutput) SetPatches(v []*PatchComplianceData) *DescribeInstancePatchesOutput { + s.Patches = v + return s +} + +type DescribeInventoryDeletionsInput struct { + _ struct{} `type:"structure"` + + // Specify the delete inventory ID for which you want information. This ID was + // returned by the DeleteInventory action. + DeletionId *string `type:"string"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInventoryDeletionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInventoryDeletionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInventoryDeletionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInventoryDeletionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeletionId sets the DeletionId field's value. +func (s *DescribeInventoryDeletionsInput) SetDeletionId(v string) *DescribeInventoryDeletionsInput { + s.DeletionId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInventoryDeletionsInput) SetMaxResults(v int64) *DescribeInventoryDeletionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInventoryDeletionsInput) SetNextToken(v string) *DescribeInventoryDeletionsInput { + s.NextToken = &v + return s +} + +type DescribeInventoryDeletionsOutput struct { + _ struct{} `type:"structure"` + + // A list of status items for deleted inventory. + InventoryDeletions []*InventoryDeletionStatusItem `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInventoryDeletionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInventoryDeletionsOutput) GoString() string { + return s.String() +} + +// SetInventoryDeletions sets the InventoryDeletions field's value. +func (s *DescribeInventoryDeletionsOutput) SetInventoryDeletions(v []*InventoryDeletionStatusItem) *DescribeInventoryDeletionsOutput { + s.InventoryDeletions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInventoryDeletionsOutput) SetNextToken(v string) *DescribeInventoryDeletionsOutput { + s.NextToken = &v + return s +} + +type DescribeMaintenanceWindowExecutionTaskInvocationsInput struct { + _ struct{} `type:"structure"` + + // Optional filters used to scope down the returned task invocations. The supported + // filter key is STATUS with the corresponding values PENDING, IN_PROGRESS, + // SUCCESS, FAILED, TIMED_OUT, CANCELLING, and CANCELLED. + Filters []*MaintenanceWindowFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The ID of the specific task in the maintenance window task that should be + // retrieved. + // + // TaskId is a required field + TaskId *string `min:"36" type:"string" required:"true"` + + // The ID of the maintenance window execution the task is part of. + // + // WindowExecutionId is a required field + WindowExecutionId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowExecutionTaskInvocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowExecutionTaskInvocationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceWindowExecutionTaskInvocationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceWindowExecutionTaskInvocationsInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 36)) + } + if s.WindowExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowExecutionId")) + } + if s.WindowExecutionId != nil && len(*s.WindowExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowExecutionId", 36)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMaintenanceWindowExecutionTaskInvocationsInput) SetFilters(v []*MaintenanceWindowFilter) *DescribeMaintenanceWindowExecutionTaskInvocationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMaintenanceWindowExecutionTaskInvocationsInput) SetMaxResults(v int64) *DescribeMaintenanceWindowExecutionTaskInvocationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowExecutionTaskInvocationsInput) SetNextToken(v string) *DescribeMaintenanceWindowExecutionTaskInvocationsInput { + s.NextToken = &v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *DescribeMaintenanceWindowExecutionTaskInvocationsInput) SetTaskId(v string) *DescribeMaintenanceWindowExecutionTaskInvocationsInput { + s.TaskId = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *DescribeMaintenanceWindowExecutionTaskInvocationsInput) SetWindowExecutionId(v string) *DescribeMaintenanceWindowExecutionTaskInvocationsInput { + s.WindowExecutionId = &v + return s +} + +type DescribeMaintenanceWindowExecutionTaskInvocationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // Information about the task invocation results per invocation. + WindowExecutionTaskInvocationIdentities []*MaintenanceWindowExecutionTaskInvocationIdentity `type:"list"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowExecutionTaskInvocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowExecutionTaskInvocationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowExecutionTaskInvocationsOutput) SetNextToken(v string) *DescribeMaintenanceWindowExecutionTaskInvocationsOutput { + s.NextToken = &v + return s +} + +// SetWindowExecutionTaskInvocationIdentities sets the WindowExecutionTaskInvocationIdentities field's value. +func (s *DescribeMaintenanceWindowExecutionTaskInvocationsOutput) SetWindowExecutionTaskInvocationIdentities(v []*MaintenanceWindowExecutionTaskInvocationIdentity) *DescribeMaintenanceWindowExecutionTaskInvocationsOutput { + s.WindowExecutionTaskInvocationIdentities = v + return s +} + +type DescribeMaintenanceWindowExecutionTasksInput struct { + _ struct{} `type:"structure"` + + // Optional filters used to scope down the returned tasks. The supported filter + // key is STATUS with the corresponding values PENDING, IN_PROGRESS, SUCCESS, + // FAILED, TIMED_OUT, CANCELLING, and CANCELLED. + Filters []*MaintenanceWindowFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The ID of the maintenance window execution whose task executions should be + // retrieved. + // + // WindowExecutionId is a required field + WindowExecutionId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowExecutionTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowExecutionTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceWindowExecutionTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceWindowExecutionTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + if s.WindowExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowExecutionId")) + } + if s.WindowExecutionId != nil && len(*s.WindowExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowExecutionId", 36)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMaintenanceWindowExecutionTasksInput) SetFilters(v []*MaintenanceWindowFilter) *DescribeMaintenanceWindowExecutionTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMaintenanceWindowExecutionTasksInput) SetMaxResults(v int64) *DescribeMaintenanceWindowExecutionTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowExecutionTasksInput) SetNextToken(v string) *DescribeMaintenanceWindowExecutionTasksInput { + s.NextToken = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *DescribeMaintenanceWindowExecutionTasksInput) SetWindowExecutionId(v string) *DescribeMaintenanceWindowExecutionTasksInput { + s.WindowExecutionId = &v + return s +} + +type DescribeMaintenanceWindowExecutionTasksOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // Information about the task executions. + WindowExecutionTaskIdentities []*MaintenanceWindowExecutionTaskIdentity `type:"list"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowExecutionTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowExecutionTasksOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowExecutionTasksOutput) SetNextToken(v string) *DescribeMaintenanceWindowExecutionTasksOutput { + s.NextToken = &v + return s +} + +// SetWindowExecutionTaskIdentities sets the WindowExecutionTaskIdentities field's value. +func (s *DescribeMaintenanceWindowExecutionTasksOutput) SetWindowExecutionTaskIdentities(v []*MaintenanceWindowExecutionTaskIdentity) *DescribeMaintenanceWindowExecutionTasksOutput { + s.WindowExecutionTaskIdentities = v + return s +} + +type DescribeMaintenanceWindowExecutionsInput struct { + _ struct{} `type:"structure"` + + // Each entry in the array is a structure containing: + // + // Key (string, between 1 and 128 characters) + // + // Values (array of strings, each string is between 1 and 256 characters) + // + // The supported Keys are ExecutedBefore and ExecutedAfter with the value being + // a date/time string such as 2016-11-04T05:00:00Z. + Filters []*MaintenanceWindowFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The ID of the maintenance window whose executions should be retrieved. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceWindowExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceWindowExecutionsInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMaintenanceWindowExecutionsInput) SetFilters(v []*MaintenanceWindowFilter) *DescribeMaintenanceWindowExecutionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMaintenanceWindowExecutionsInput) SetMaxResults(v int64) *DescribeMaintenanceWindowExecutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowExecutionsInput) SetNextToken(v string) *DescribeMaintenanceWindowExecutionsInput { + s.NextToken = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *DescribeMaintenanceWindowExecutionsInput) SetWindowId(v string) *DescribeMaintenanceWindowExecutionsInput { + s.WindowId = &v + return s +} + +type DescribeMaintenanceWindowExecutionsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // Information about the maintenance window executions. + WindowExecutions []*MaintenanceWindowExecution `type:"list"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowExecutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowExecutionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowExecutionsOutput) SetNextToken(v string) *DescribeMaintenanceWindowExecutionsOutput { + s.NextToken = &v + return s +} + +// SetWindowExecutions sets the WindowExecutions field's value. +func (s *DescribeMaintenanceWindowExecutionsOutput) SetWindowExecutions(v []*MaintenanceWindowExecution) *DescribeMaintenanceWindowExecutionsOutput { + s.WindowExecutions = v + return s +} + +type DescribeMaintenanceWindowScheduleInput struct { + _ struct{} `type:"structure"` + + // Filters used to limit the range of results. For example, you can limit maintenance + // window executions to only those scheduled before or after a certain date + // and time. + Filters []*PatchOrchestratorFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The type of resource you want to retrieve information about. For example, + // "INSTANCE". + ResourceType *string `type:"string" enum:"MaintenanceWindowResourceType"` + + // The instance ID or key/value pair to retrieve information about. + Targets []*Target `type:"list"` + + // The ID of the maintenance window to retrieve information about. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceWindowScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceWindowScheduleInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMaintenanceWindowScheduleInput) SetFilters(v []*PatchOrchestratorFilter) *DescribeMaintenanceWindowScheduleInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMaintenanceWindowScheduleInput) SetMaxResults(v int64) *DescribeMaintenanceWindowScheduleInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowScheduleInput) SetNextToken(v string) *DescribeMaintenanceWindowScheduleInput { + s.NextToken = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *DescribeMaintenanceWindowScheduleInput) SetResourceType(v string) *DescribeMaintenanceWindowScheduleInput { + s.ResourceType = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *DescribeMaintenanceWindowScheduleInput) SetTargets(v []*Target) *DescribeMaintenanceWindowScheduleInput { + s.Targets = v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *DescribeMaintenanceWindowScheduleInput) SetWindowId(v string) *DescribeMaintenanceWindowScheduleInput { + s.WindowId = &v + return s +} + +type DescribeMaintenanceWindowScheduleOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. (You use this token in the + // next call.) + NextToken *string `type:"string"` + + // Information about maintenance window executions scheduled for the specified + // time range. + ScheduledWindowExecutions []*ScheduledWindowExecution `type:"list"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowScheduleOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowScheduleOutput) SetNextToken(v string) *DescribeMaintenanceWindowScheduleOutput { + s.NextToken = &v + return s +} + +// SetScheduledWindowExecutions sets the ScheduledWindowExecutions field's value. +func (s *DescribeMaintenanceWindowScheduleOutput) SetScheduledWindowExecutions(v []*ScheduledWindowExecution) *DescribeMaintenanceWindowScheduleOutput { + s.ScheduledWindowExecutions = v + return s +} + +type DescribeMaintenanceWindowTargetsInput struct { + _ struct{} `type:"structure"` + + // Optional filters that can be used to narrow down the scope of the returned + // window targets. The supported filter keys are Type, WindowTargetId and OwnerInformation. + Filters []*MaintenanceWindowFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The ID of the maintenance window whose targets should be retrieved. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceWindowTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceWindowTargetsInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMaintenanceWindowTargetsInput) SetFilters(v []*MaintenanceWindowFilter) *DescribeMaintenanceWindowTargetsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMaintenanceWindowTargetsInput) SetMaxResults(v int64) *DescribeMaintenanceWindowTargetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowTargetsInput) SetNextToken(v string) *DescribeMaintenanceWindowTargetsInput { + s.NextToken = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *DescribeMaintenanceWindowTargetsInput) SetWindowId(v string) *DescribeMaintenanceWindowTargetsInput { + s.WindowId = &v + return s +} + +type DescribeMaintenanceWindowTargetsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // Information about the targets in the maintenance window. + Targets []*MaintenanceWindowTarget `type:"list"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowTargetsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowTargetsOutput) SetNextToken(v string) *DescribeMaintenanceWindowTargetsOutput { + s.NextToken = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *DescribeMaintenanceWindowTargetsOutput) SetTargets(v []*MaintenanceWindowTarget) *DescribeMaintenanceWindowTargetsOutput { + s.Targets = v + return s +} + +type DescribeMaintenanceWindowTasksInput struct { + _ struct{} `type:"structure"` + + // Optional filters used to narrow down the scope of the returned tasks. The + // supported filter keys are WindowTaskId, TaskArn, Priority, and TaskType. + Filters []*MaintenanceWindowFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The ID of the maintenance window whose tasks should be retrieved. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceWindowTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceWindowTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMaintenanceWindowTasksInput) SetFilters(v []*MaintenanceWindowFilter) *DescribeMaintenanceWindowTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMaintenanceWindowTasksInput) SetMaxResults(v int64) *DescribeMaintenanceWindowTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowTasksInput) SetNextToken(v string) *DescribeMaintenanceWindowTasksInput { + s.NextToken = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *DescribeMaintenanceWindowTasksInput) SetWindowId(v string) *DescribeMaintenanceWindowTasksInput { + s.WindowId = &v + return s +} + +type DescribeMaintenanceWindowTasksOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // Information about the tasks in the maintenance window. + Tasks []*MaintenanceWindowTask `type:"list"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowTasksOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowTasksOutput) SetNextToken(v string) *DescribeMaintenanceWindowTasksOutput { + s.NextToken = &v + return s +} + +// SetTasks sets the Tasks field's value. +func (s *DescribeMaintenanceWindowTasksOutput) SetTasks(v []*MaintenanceWindowTask) *DescribeMaintenanceWindowTasksOutput { + s.Tasks = v + return s +} + +type DescribeMaintenanceWindowsForTargetInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The type of resource you want to retrieve information about. For example, + // "INSTANCE". + // + // ResourceType is a required field + ResourceType *string `type:"string" required:"true" enum:"MaintenanceWindowResourceType"` + + // The instance ID or key/value pair to retrieve information about. + // + // Targets is a required field + Targets []*Target `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowsForTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowsForTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceWindowsForTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceWindowsForTargetInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMaintenanceWindowsForTargetInput) SetMaxResults(v int64) *DescribeMaintenanceWindowsForTargetInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowsForTargetInput) SetNextToken(v string) *DescribeMaintenanceWindowsForTargetInput { + s.NextToken = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *DescribeMaintenanceWindowsForTargetInput) SetResourceType(v string) *DescribeMaintenanceWindowsForTargetInput { + s.ResourceType = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *DescribeMaintenanceWindowsForTargetInput) SetTargets(v []*Target) *DescribeMaintenanceWindowsForTargetInput { + s.Targets = v + return s +} + +type DescribeMaintenanceWindowsForTargetOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. (You use this token in the + // next call.) + NextToken *string `type:"string"` + + // Information about the maintenance window targets and tasks an instance is + // associated with. + WindowIdentities []*MaintenanceWindowIdentityForTarget `type:"list"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowsForTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowsForTargetOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowsForTargetOutput) SetNextToken(v string) *DescribeMaintenanceWindowsForTargetOutput { + s.NextToken = &v + return s +} + +// SetWindowIdentities sets the WindowIdentities field's value. +func (s *DescribeMaintenanceWindowsForTargetOutput) SetWindowIdentities(v []*MaintenanceWindowIdentityForTarget) *DescribeMaintenanceWindowsForTargetOutput { + s.WindowIdentities = v + return s +} + +type DescribeMaintenanceWindowsInput struct { + _ struct{} `type:"structure"` + + // Optional filters used to narrow down the scope of the returned maintenance + // windows. Supported filter keys are Name and Enabled. + Filters []*MaintenanceWindowFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"10" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMaintenanceWindowsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceWindowsInput"} + if s.MaxResults != nil && *s.MaxResults < 10 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 10)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMaintenanceWindowsInput) SetFilters(v []*MaintenanceWindowFilter) *DescribeMaintenanceWindowsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMaintenanceWindowsInput) SetMaxResults(v int64) *DescribeMaintenanceWindowsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowsInput) SetNextToken(v string) *DescribeMaintenanceWindowsInput { + s.NextToken = &v + return s +} + +type DescribeMaintenanceWindowsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // Information about the maintenance windows. + WindowIdentities []*MaintenanceWindowIdentity `type:"list"` +} + +// String returns the string representation +func (s DescribeMaintenanceWindowsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMaintenanceWindowsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMaintenanceWindowsOutput) SetNextToken(v string) *DescribeMaintenanceWindowsOutput { + s.NextToken = &v + return s +} + +// SetWindowIdentities sets the WindowIdentities field's value. +func (s *DescribeMaintenanceWindowsOutput) SetWindowIdentities(v []*MaintenanceWindowIdentity) *DescribeMaintenanceWindowsOutput { + s.WindowIdentities = v + return s +} + +type DescribeOpsItemsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` + + // One or more filters to limit the response. + // + // * Key: CreatedTime Operations: GreaterThan, LessThan + // + // * Key: LastModifiedBy Operations: Contains, Equals + // + // * Key: LastModifiedTime Operations: GreaterThan, LessThan + // + // * Key: Priority Operations: Equals + // + // * Key: Source Operations: Contains, Equals + // + // * Key: Status Operations: Equals + // + // * Key: Title Operations: Contains + // + // * Key: OperationalData* Operations: Equals + // + // * Key: OperationalDataKey Operations: Equals + // + // * Key: OperationalDataValue Operations: Equals, Contains + // + // * Key: OpsItemId Operations: Equals + // + // * Key: ResourceId Operations: Contains + // + // * Key: AutomationId Operations: Equals + // + // *If you filter the response by using the OperationalData operator, specify + // a key-value pair by using the following JSON format: {"key":"key_name","value":"a_value"} + OpsItemFilters []*OpsItemFilter `type:"list"` +} + +// String returns the string representation +func (s DescribeOpsItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOpsItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeOpsItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOpsItemsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.OpsItemFilters != nil { + for i, v := range s.OpsItemFilters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OpsItemFilters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeOpsItemsInput) SetMaxResults(v int64) *DescribeOpsItemsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeOpsItemsInput) SetNextToken(v string) *DescribeOpsItemsInput { + s.NextToken = &v + return s +} + +// SetOpsItemFilters sets the OpsItemFilters field's value. +func (s *DescribeOpsItemsInput) SetOpsItemFilters(v []*OpsItemFilter) *DescribeOpsItemsInput { + s.OpsItemFilters = v + return s +} + +type DescribeOpsItemsOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` + + // A list of OpsItems. + OpsItemSummaries []*OpsItemSummary `type:"list"` +} + +// String returns the string representation +func (s DescribeOpsItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOpsItemsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeOpsItemsOutput) SetNextToken(v string) *DescribeOpsItemsOutput { + s.NextToken = &v + return s +} + +// SetOpsItemSummaries sets the OpsItemSummaries field's value. +func (s *DescribeOpsItemsOutput) SetOpsItemSummaries(v []*OpsItemSummary) *DescribeOpsItemsOutput { + s.OpsItemSummaries = v + return s +} + +type DescribeParametersInput struct { + _ struct{} `type:"structure"` + + // This data type is deprecated. Instead, use ParameterFilters. + Filters []*ParametersFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // Filters to limit the request results. + ParameterFilters []*ParameterStringFilter `type:"list"` +} + +// String returns the string representation +func (s DescribeParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeParametersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ParameterFilters != nil { + for i, v := range s.ParameterFilters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParameterFilters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeParametersInput) SetFilters(v []*ParametersFilter) *DescribeParametersInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeParametersInput) SetMaxResults(v int64) *DescribeParametersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeParametersInput) SetNextToken(v string) *DescribeParametersInput { + s.NextToken = &v + return s +} + +// SetParameterFilters sets the ParameterFilters field's value. +func (s *DescribeParametersInput) SetParameterFilters(v []*ParameterStringFilter) *DescribeParametersInput { + s.ParameterFilters = v + return s +} + +type DescribeParametersOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. + NextToken *string `type:"string"` + + // Parameters returned by the request. + Parameters []*ParameterMetadata `type:"list"` +} + +// String returns the string representation +func (s DescribeParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeParametersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeParametersOutput) SetNextToken(v string) *DescribeParametersOutput { + s.NextToken = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *DescribeParametersOutput) SetParameters(v []*ParameterMetadata) *DescribeParametersOutput { + s.Parameters = v + return s +} + +type DescribePatchBaselinesInput struct { + _ struct{} `type:"structure"` + + // Each element in the array is a structure containing: + // + // Key: (string, "NAME_PREFIX" or "OWNER") + // + // Value: (array of strings, exactly 1 entry, between 1 and 255 characters) + Filters []*PatchOrchestratorFilter `type:"list"` + + // The maximum number of patch baselines to return (per page). + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribePatchBaselinesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePatchBaselinesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePatchBaselinesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePatchBaselinesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribePatchBaselinesInput) SetFilters(v []*PatchOrchestratorFilter) *DescribePatchBaselinesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePatchBaselinesInput) SetMaxResults(v int64) *DescribePatchBaselinesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePatchBaselinesInput) SetNextToken(v string) *DescribePatchBaselinesInput { + s.NextToken = &v + return s +} + +type DescribePatchBaselinesOutput struct { + _ struct{} `type:"structure"` + + // An array of PatchBaselineIdentity elements. + BaselineIdentities []*PatchBaselineIdentity `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribePatchBaselinesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePatchBaselinesOutput) GoString() string { + return s.String() +} + +// SetBaselineIdentities sets the BaselineIdentities field's value. +func (s *DescribePatchBaselinesOutput) SetBaselineIdentities(v []*PatchBaselineIdentity) *DescribePatchBaselinesOutput { + s.BaselineIdentities = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePatchBaselinesOutput) SetNextToken(v string) *DescribePatchBaselinesOutput { + s.NextToken = &v + return s +} + +type DescribePatchGroupStateInput struct { + _ struct{} `type:"structure"` + + // The name of the patch group whose patch snapshot should be retrieved. + // + // PatchGroup is a required field + PatchGroup *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribePatchGroupStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePatchGroupStateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePatchGroupStateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePatchGroupStateInput"} + if s.PatchGroup == nil { + invalidParams.Add(request.NewErrParamRequired("PatchGroup")) + } + if s.PatchGroup != nil && len(*s.PatchGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PatchGroup", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *DescribePatchGroupStateInput) SetPatchGroup(v string) *DescribePatchGroupStateInput { + s.PatchGroup = &v + return s +} + +type DescribePatchGroupStateOutput struct { + _ struct{} `type:"structure"` + + // The number of instances in the patch group. + Instances *int64 `type:"integer"` + + // The number of instances with patches from the patch baseline that failed + // to install. + InstancesWithFailedPatches *int64 `type:"integer"` + + // The number of instances with patches installed that aren't defined in the + // patch baseline. + InstancesWithInstalledOtherPatches *int64 `type:"integer"` + + // The number of instances with installed patches. + InstancesWithInstalledPatches *int64 `type:"integer"` + + // The number of instances with patches installed by Patch Manager that have + // not been rebooted after the patch installation. The status of these instances + // is NON_COMPLIANT. + InstancesWithInstalledPendingRebootPatches *int64 `type:"integer"` + + // The number of instances with patches installed that are specified in a RejectedPatches + // list. Patches with a status of INSTALLED_REJECTED were typically installed + // before they were added to a RejectedPatches list. + // + // If ALLOW_AS_DEPENDENCY is the specified option for RejectedPatchesAction, + // the value of InstancesWithInstalledRejectedPatches will always be 0 (zero). + InstancesWithInstalledRejectedPatches *int64 `type:"integer"` + + // The number of instances with missing patches from the patch baseline. + InstancesWithMissingPatches *int64 `type:"integer"` + + // The number of instances with patches that aren't applicable. + InstancesWithNotApplicablePatches *int64 `type:"integer"` + + // The number of instances with NotApplicable patches beyond the supported limit, + // which are not reported by name to Systems Manager Inventory. + InstancesWithUnreportedNotApplicablePatches *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribePatchGroupStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePatchGroupStateOutput) GoString() string { + return s.String() +} + +// SetInstances sets the Instances field's value. +func (s *DescribePatchGroupStateOutput) SetInstances(v int64) *DescribePatchGroupStateOutput { + s.Instances = &v + return s +} + +// SetInstancesWithFailedPatches sets the InstancesWithFailedPatches field's value. +func (s *DescribePatchGroupStateOutput) SetInstancesWithFailedPatches(v int64) *DescribePatchGroupStateOutput { + s.InstancesWithFailedPatches = &v + return s +} + +// SetInstancesWithInstalledOtherPatches sets the InstancesWithInstalledOtherPatches field's value. +func (s *DescribePatchGroupStateOutput) SetInstancesWithInstalledOtherPatches(v int64) *DescribePatchGroupStateOutput { + s.InstancesWithInstalledOtherPatches = &v + return s +} + +// SetInstancesWithInstalledPatches sets the InstancesWithInstalledPatches field's value. +func (s *DescribePatchGroupStateOutput) SetInstancesWithInstalledPatches(v int64) *DescribePatchGroupStateOutput { + s.InstancesWithInstalledPatches = &v + return s +} + +// SetInstancesWithInstalledPendingRebootPatches sets the InstancesWithInstalledPendingRebootPatches field's value. +func (s *DescribePatchGroupStateOutput) SetInstancesWithInstalledPendingRebootPatches(v int64) *DescribePatchGroupStateOutput { + s.InstancesWithInstalledPendingRebootPatches = &v + return s +} + +// SetInstancesWithInstalledRejectedPatches sets the InstancesWithInstalledRejectedPatches field's value. +func (s *DescribePatchGroupStateOutput) SetInstancesWithInstalledRejectedPatches(v int64) *DescribePatchGroupStateOutput { + s.InstancesWithInstalledRejectedPatches = &v + return s +} + +// SetInstancesWithMissingPatches sets the InstancesWithMissingPatches field's value. +func (s *DescribePatchGroupStateOutput) SetInstancesWithMissingPatches(v int64) *DescribePatchGroupStateOutput { + s.InstancesWithMissingPatches = &v + return s +} + +// SetInstancesWithNotApplicablePatches sets the InstancesWithNotApplicablePatches field's value. +func (s *DescribePatchGroupStateOutput) SetInstancesWithNotApplicablePatches(v int64) *DescribePatchGroupStateOutput { + s.InstancesWithNotApplicablePatches = &v + return s +} + +// SetInstancesWithUnreportedNotApplicablePatches sets the InstancesWithUnreportedNotApplicablePatches field's value. +func (s *DescribePatchGroupStateOutput) SetInstancesWithUnreportedNotApplicablePatches(v int64) *DescribePatchGroupStateOutput { + s.InstancesWithUnreportedNotApplicablePatches = &v + return s +} + +type DescribePatchGroupsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of results. + // + // For DescribePatchGroups,valid filter keys include the following: + // + // * NAME_PREFIX: The name of the patch group. Wildcards (*) are accepted. + // + // * OPERATING_SYSTEM: The supported operating system type to return results + // for. For valid operating system values, see GetDefaultPatchBaselineRequest$OperatingSystem + // in CreatePatchBaseline. Examples: --filters Key=NAME_PREFIX,Values=MyPatchGroup* + // --filters Key=OPERATING_SYSTEM,Values=AMAZON_LINUX_2 + Filters []*PatchOrchestratorFilter `type:"list"` + + // The maximum number of patch groups to return (per page). + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribePatchGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePatchGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePatchGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePatchGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribePatchGroupsInput) SetFilters(v []*PatchOrchestratorFilter) *DescribePatchGroupsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePatchGroupsInput) SetMaxResults(v int64) *DescribePatchGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePatchGroupsInput) SetNextToken(v string) *DescribePatchGroupsInput { + s.NextToken = &v + return s +} + +type DescribePatchGroupsOutput struct { + _ struct{} `type:"structure"` + + // Each entry in the array contains: + // + // PatchGroup: string (between 1 and 256 characters, Regex: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$) + // + // PatchBaselineIdentity: A PatchBaselineIdentity element. + Mappings []*PatchGroupPatchBaselineMapping `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribePatchGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePatchGroupsOutput) GoString() string { + return s.String() +} + +// SetMappings sets the Mappings field's value. +func (s *DescribePatchGroupsOutput) SetMappings(v []*PatchGroupPatchBaselineMapping) *DescribePatchGroupsOutput { + s.Mappings = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePatchGroupsOutput) SetNextToken(v string) *DescribePatchGroupsOutput { + s.NextToken = &v + return s +} + +type DescribePatchPropertiesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The operating system type for which to list patches. + // + // OperatingSystem is a required field + OperatingSystem *string `type:"string" required:"true" enum:"OperatingSystem"` + + // Indicates whether to list patches for the Windows operating system or for + // Microsoft applications. Not applicable for Linux operating systems. + PatchSet *string `type:"string" enum:"PatchSet"` + + // The patch property for which you want to view patch details. + // + // Property is a required field + Property *string `type:"string" required:"true" enum:"PatchProperty"` +} + +// String returns the string representation +func (s DescribePatchPropertiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePatchPropertiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePatchPropertiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribePatchPropertiesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.OperatingSystem == nil { + invalidParams.Add(request.NewErrParamRequired("OperatingSystem")) + } + if s.Property == nil { + invalidParams.Add(request.NewErrParamRequired("Property")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePatchPropertiesInput) SetMaxResults(v int64) *DescribePatchPropertiesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePatchPropertiesInput) SetNextToken(v string) *DescribePatchPropertiesInput { + s.NextToken = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *DescribePatchPropertiesInput) SetOperatingSystem(v string) *DescribePatchPropertiesInput { + s.OperatingSystem = &v + return s +} + +// SetPatchSet sets the PatchSet field's value. +func (s *DescribePatchPropertiesInput) SetPatchSet(v string) *DescribePatchPropertiesInput { + s.PatchSet = &v + return s +} + +// SetProperty sets the Property field's value. +func (s *DescribePatchPropertiesInput) SetProperty(v string) *DescribePatchPropertiesInput { + s.Property = &v + return s +} + +type DescribePatchPropertiesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. (You use this token in the + // next call.) + NextToken *string `type:"string"` + + // A list of the properties for patches matching the filter request parameters. + Properties []map[string]*string `type:"list"` +} + +// String returns the string representation +func (s DescribePatchPropertiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribePatchPropertiesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribePatchPropertiesOutput) SetNextToken(v string) *DescribePatchPropertiesOutput { + s.NextToken = &v + return s +} + +// SetProperties sets the Properties field's value. +func (s *DescribePatchPropertiesOutput) SetProperties(v []map[string]*string) *DescribePatchPropertiesOutput { + s.Properties = v + return s +} + +type DescribeSessionsInput struct { + _ struct{} `type:"structure"` + + // One or more filters to limit the type of sessions returned by the request. + Filters []*SessionFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The session status to retrieve a list of sessions for. For example, "Active". + // + // State is a required field + State *string `type:"string" required:"true" enum:"SessionState"` +} + +// String returns the string representation +func (s DescribeSessionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSessionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSessionsInput"} + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.State == nil { + invalidParams.Add(request.NewErrParamRequired("State")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeSessionsInput) SetFilters(v []*SessionFilter) *DescribeSessionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSessionsInput) SetMaxResults(v int64) *DescribeSessionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSessionsInput) SetNextToken(v string) *DescribeSessionsInput { + s.NextToken = &v + return s +} + +// SetState sets the State field's value. +func (s *DescribeSessionsInput) SetState(v string) *DescribeSessionsInput { + s.State = &v + return s +} + +type DescribeSessionsOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // A list of sessions meeting the request parameters. + Sessions []*Session `type:"list"` +} + +// String returns the string representation +func (s DescribeSessionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSessionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSessionsOutput) SetNextToken(v string) *DescribeSessionsOutput { + s.NextToken = &v + return s +} + +// SetSessions sets the Sessions field's value. +func (s *DescribeSessionsOutput) SetSessions(v []*Session) *DescribeSessionsOutput { + s.Sessions = v + return s +} + +// The specified document already exists. +type DocumentAlreadyExists struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DocumentAlreadyExists) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentAlreadyExists) GoString() string { + return s.String() +} + +func newErrorDocumentAlreadyExists(v protocol.ResponseMetadata) error { + return &DocumentAlreadyExists{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DocumentAlreadyExists) Code() string { + return "DocumentAlreadyExists" +} + +// Message returns the exception's message. +func (s *DocumentAlreadyExists) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DocumentAlreadyExists) OrigErr() error { + return nil +} + +func (s *DocumentAlreadyExists) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DocumentAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DocumentAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID +} + +// A default version of a document. +type DocumentDefaultVersionDescription struct { + _ struct{} `type:"structure"` + + // The default version of the document. + DefaultVersion *string `type:"string"` + + // The default version of the artifact associated with the document. + DefaultVersionName *string `type:"string"` + + // The name of the document. + Name *string `type:"string"` +} + +// String returns the string representation +func (s DocumentDefaultVersionDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentDefaultVersionDescription) GoString() string { + return s.String() +} + +// SetDefaultVersion sets the DefaultVersion field's value. +func (s *DocumentDefaultVersionDescription) SetDefaultVersion(v string) *DocumentDefaultVersionDescription { + s.DefaultVersion = &v + return s +} + +// SetDefaultVersionName sets the DefaultVersionName field's value. +func (s *DocumentDefaultVersionDescription) SetDefaultVersionName(v string) *DocumentDefaultVersionDescription { + s.DefaultVersionName = &v + return s +} + +// SetName sets the Name field's value. +func (s *DocumentDefaultVersionDescription) SetName(v string) *DocumentDefaultVersionDescription { + s.Name = &v + return s +} + +// Describes a Systems Manager document. +type DocumentDescription struct { + _ struct{} `type:"structure"` + + // Details about the document attachments, including names, locations, sizes, + // and so on. + AttachmentsInformation []*AttachmentInformation `type:"list"` + + // The date when the document was created. + CreatedDate *time.Time `type:"timestamp"` + + // The default version. + DefaultVersion *string `type:"string"` + + // A description of the document. + Description *string `type:"string"` + + // The document format, either JSON or YAML. + DocumentFormat *string `type:"string" enum:"DocumentFormat"` + + // The type of document. + DocumentType *string `type:"string" enum:"DocumentType"` + + // The document version. + DocumentVersion *string `type:"string"` + + // The Sha256 or Sha1 hash created by the system when the document was created. + // + // Sha1 hashes have been deprecated. + Hash *string `type:"string"` + + // The hash type of the document. Valid values include Sha256 or Sha1. + // + // Sha1 hashes have been deprecated. + HashType *string `type:"string" enum:"DocumentHashType"` + + // The latest version of the document. + LatestVersion *string `type:"string"` + + // The name of the Systems Manager document. + Name *string `type:"string"` + + // The AWS user account that created the document. + Owner *string `type:"string"` + + // A description of the parameters for a document. + Parameters []*DocumentParameter `type:"list"` + + // The list of OS platforms compatible with this Systems Manager document. + PlatformTypes []*string `type:"list"` + + // A list of SSM documents required by a document. For example, an ApplicationConfiguration + // document requires an ApplicationConfigurationSchema document. + Requires []*DocumentRequires `min:"1" type:"list"` + + // The schema version. + SchemaVersion *string `type:"string"` + + // The SHA1 hash of the document, which you can use for verification. + Sha1 *string `type:"string"` + + // The status of the Systems Manager document. + Status *string `type:"string" enum:"DocumentStatus"` + + // A message returned by AWS Systems Manager that explains the Status value. + // For example, a Failed status might be explained by the StatusInformation + // message, "The specified S3 bucket does not exist. Verify that the URL of + // the S3 bucket is correct." + StatusInformation *string `type:"string"` + + // The tags, or metadata, that have been applied to the document. + Tags []*Tag `type:"list"` + + // The target type which defines the kinds of resources the document can run + // on. For example, /AWS::EC2::Instance. For a list of valid resource types, + // see AWS resource and property types reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide. + TargetType *string `type:"string"` + + // The version of the artifact associated with the document. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s DocumentDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentDescription) GoString() string { + return s.String() +} + +// SetAttachmentsInformation sets the AttachmentsInformation field's value. +func (s *DocumentDescription) SetAttachmentsInformation(v []*AttachmentInformation) *DocumentDescription { + s.AttachmentsInformation = v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *DocumentDescription) SetCreatedDate(v time.Time) *DocumentDescription { + s.CreatedDate = &v + return s +} + +// SetDefaultVersion sets the DefaultVersion field's value. +func (s *DocumentDescription) SetDefaultVersion(v string) *DocumentDescription { + s.DefaultVersion = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *DocumentDescription) SetDescription(v string) *DocumentDescription { + s.Description = &v + return s +} + +// SetDocumentFormat sets the DocumentFormat field's value. +func (s *DocumentDescription) SetDocumentFormat(v string) *DocumentDescription { + s.DocumentFormat = &v + return s +} + +// SetDocumentType sets the DocumentType field's value. +func (s *DocumentDescription) SetDocumentType(v string) *DocumentDescription { + s.DocumentType = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *DocumentDescription) SetDocumentVersion(v string) *DocumentDescription { + s.DocumentVersion = &v + return s +} + +// SetHash sets the Hash field's value. +func (s *DocumentDescription) SetHash(v string) *DocumentDescription { + s.Hash = &v + return s +} + +// SetHashType sets the HashType field's value. +func (s *DocumentDescription) SetHashType(v string) *DocumentDescription { + s.HashType = &v + return s +} + +// SetLatestVersion sets the LatestVersion field's value. +func (s *DocumentDescription) SetLatestVersion(v string) *DocumentDescription { + s.LatestVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *DocumentDescription) SetName(v string) *DocumentDescription { + s.Name = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DocumentDescription) SetOwner(v string) *DocumentDescription { + s.Owner = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *DocumentDescription) SetParameters(v []*DocumentParameter) *DocumentDescription { + s.Parameters = v + return s +} + +// SetPlatformTypes sets the PlatformTypes field's value. +func (s *DocumentDescription) SetPlatformTypes(v []*string) *DocumentDescription { + s.PlatformTypes = v + return s +} + +// SetRequires sets the Requires field's value. +func (s *DocumentDescription) SetRequires(v []*DocumentRequires) *DocumentDescription { + s.Requires = v + return s +} + +// SetSchemaVersion sets the SchemaVersion field's value. +func (s *DocumentDescription) SetSchemaVersion(v string) *DocumentDescription { + s.SchemaVersion = &v + return s +} + +// SetSha1 sets the Sha1 field's value. +func (s *DocumentDescription) SetSha1(v string) *DocumentDescription { + s.Sha1 = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DocumentDescription) SetStatus(v string) *DocumentDescription { + s.Status = &v + return s +} + +// SetStatusInformation sets the StatusInformation field's value. +func (s *DocumentDescription) SetStatusInformation(v string) *DocumentDescription { + s.StatusInformation = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DocumentDescription) SetTags(v []*Tag) *DocumentDescription { + s.Tags = v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *DocumentDescription) SetTargetType(v string) *DocumentDescription { + s.TargetType = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *DocumentDescription) SetVersionName(v string) *DocumentDescription { + s.VersionName = &v + return s +} + +// This data type is deprecated. Instead, use DocumentKeyValuesFilter. +type DocumentFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `locationName:"key" type:"string" required:"true" enum:"DocumentFilterKey"` + + // The value of the filter. + // + // Value is a required field + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DocumentFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DocumentFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DocumentFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *DocumentFilter) SetKey(v string) *DocumentFilter { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *DocumentFilter) SetValue(v string) *DocumentFilter { + s.Value = &v + return s +} + +// Describes the name of a Systems Manager document. +type DocumentIdentifier struct { + _ struct{} `type:"structure"` + + // The document format, either JSON or YAML. + DocumentFormat *string `type:"string" enum:"DocumentFormat"` + + // The document type. + DocumentType *string `type:"string" enum:"DocumentType"` + + // The document version. + DocumentVersion *string `type:"string"` + + // The name of the Systems Manager document. + Name *string `type:"string"` + + // The AWS user account that created the document. + Owner *string `type:"string"` + + // The operating system platform. + PlatformTypes []*string `type:"list"` + + // A list of SSM documents required by a document. For example, an ApplicationConfiguration + // document requires an ApplicationConfigurationSchema document. + Requires []*DocumentRequires `min:"1" type:"list"` + + // The schema version. + SchemaVersion *string `type:"string"` + + // The tags, or metadata, that have been applied to the document. + Tags []*Tag `type:"list"` + + // The target type which defines the kinds of resources the document can run + // on. For example, /AWS::EC2::Instance. For a list of valid resource types, + // see AWS resource and property types reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // in the AWS CloudFormation User Guide. + TargetType *string `type:"string"` + + // An optional field specifying the version of the artifact associated with + // the document. For example, "Release 12, Update 6". This value is unique across + // all versions of a document, and cannot be changed. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s DocumentIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentIdentifier) GoString() string { + return s.String() +} + +// SetDocumentFormat sets the DocumentFormat field's value. +func (s *DocumentIdentifier) SetDocumentFormat(v string) *DocumentIdentifier { + s.DocumentFormat = &v + return s +} + +// SetDocumentType sets the DocumentType field's value. +func (s *DocumentIdentifier) SetDocumentType(v string) *DocumentIdentifier { + s.DocumentType = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *DocumentIdentifier) SetDocumentVersion(v string) *DocumentIdentifier { + s.DocumentVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *DocumentIdentifier) SetName(v string) *DocumentIdentifier { + s.Name = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DocumentIdentifier) SetOwner(v string) *DocumentIdentifier { + s.Owner = &v + return s +} + +// SetPlatformTypes sets the PlatformTypes field's value. +func (s *DocumentIdentifier) SetPlatformTypes(v []*string) *DocumentIdentifier { + s.PlatformTypes = v + return s +} + +// SetRequires sets the Requires field's value. +func (s *DocumentIdentifier) SetRequires(v []*DocumentRequires) *DocumentIdentifier { + s.Requires = v + return s +} + +// SetSchemaVersion sets the SchemaVersion field's value. +func (s *DocumentIdentifier) SetSchemaVersion(v string) *DocumentIdentifier { + s.SchemaVersion = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DocumentIdentifier) SetTags(v []*Tag) *DocumentIdentifier { + s.Tags = v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *DocumentIdentifier) SetTargetType(v string) *DocumentIdentifier { + s.TargetType = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *DocumentIdentifier) SetVersionName(v string) *DocumentIdentifier { + s.VersionName = &v + return s +} + +// One or more filters. Use a filter to return a more specific list of documents. +// +// For keys, you can specify one or more tags that have been applied to a document. +// +// You can also use AWS-provided keys, some of which have specific allowed values. +// These keys and their associated values are as follows: +// +// DocumentType +// +// * ApplicationConfiguration +// +// * ApplicationConfigurationSchema +// +// * Automation +// +// * ChangeCalendar +// +// * Command +// +// * DeploymentStrategy +// +// * Package +// +// * Policy +// +// * Session +// +// Owner +// +// Note that only one Owner can be specified in a request. For example: Key=Owner,Values=Self. +// +// * Amazon +// +// * Private +// +// * Public +// +// * Self +// +// * ThirdParty +// +// PlatformTypes +// +// * Linux +// +// * Windows +// +// Name is another AWS-provided key. If you use Name as a key, you can use a +// name prefix to return a list of documents. For example, in the AWS CLI, to +// return a list of all documents that begin with Te, run the following command: +// +// aws ssm list-documents --filters Key=Name,Values=Te +// +// You can also use the TargetType AWS-provided key. For a list of valid resource +// type values that can be used with this key, see AWS resource and property +// types reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) +// in the AWS CloudFormation User Guide. +// +// If you specify more than two keys, only documents that are identified by +// all the tags are returned in the results. If you specify more than two values +// for a key, documents that are identified by any of the values are returned +// in the results. +// +// To specify a custom key and value pair, use the format Key=tag:tagName,Values=valueName. +// +// For example, if you created a key called region and are using the AWS CLI +// to call the list-documents command: +// +// aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self +type DocumentKeyValuesFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter key. + Key *string `min:"1" type:"string"` + + // The value for the filter key. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s DocumentKeyValuesFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentKeyValuesFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DocumentKeyValuesFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DocumentKeyValuesFilter"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *DocumentKeyValuesFilter) SetKey(v string) *DocumentKeyValuesFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *DocumentKeyValuesFilter) SetValues(v []*string) *DocumentKeyValuesFilter { + s.Values = v + return s +} + +// You can have at most 500 active Systems Manager documents. +type DocumentLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DocumentLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentLimitExceeded) GoString() string { + return s.String() +} + +func newErrorDocumentLimitExceeded(v protocol.ResponseMetadata) error { + return &DocumentLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DocumentLimitExceeded) Code() string { + return "DocumentLimitExceeded" +} + +// Message returns the exception's message. +func (s *DocumentLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DocumentLimitExceeded) OrigErr() error { + return nil +} + +func (s *DocumentLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DocumentLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DocumentLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// Parameters specified in a System Manager document that run on the server +// when the command is run. +type DocumentParameter struct { + _ struct{} `type:"structure"` + + // If specified, the default values for the parameters. Parameters without a + // default value are required. Parameters with a default value are optional. + DefaultValue *string `type:"string"` + + // A description of what the parameter does, how to use it, the default value, + // and whether or not the parameter is optional. + Description *string `type:"string"` + + // The name of the parameter. + Name *string `type:"string"` + + // The type of parameter. The type can be either String or StringList. + Type *string `type:"string" enum:"DocumentParameterType"` +} + +// String returns the string representation +func (s DocumentParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentParameter) GoString() string { + return s.String() +} + +// SetDefaultValue sets the DefaultValue field's value. +func (s *DocumentParameter) SetDefaultValue(v string) *DocumentParameter { + s.DefaultValue = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *DocumentParameter) SetDescription(v string) *DocumentParameter { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *DocumentParameter) SetName(v string) *DocumentParameter { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *DocumentParameter) SetType(v string) *DocumentParameter { + s.Type = &v + return s +} + +// The document cannot be shared with more AWS user accounts. You can share +// a document with a maximum of 20 accounts. You can publicly share up to five +// documents. If you need to increase this limit, contact AWS Support. +type DocumentPermissionLimit struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DocumentPermissionLimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentPermissionLimit) GoString() string { + return s.String() +} + +func newErrorDocumentPermissionLimit(v protocol.ResponseMetadata) error { + return &DocumentPermissionLimit{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DocumentPermissionLimit) Code() string { + return "DocumentPermissionLimit" +} + +// Message returns the exception's message. +func (s *DocumentPermissionLimit) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DocumentPermissionLimit) OrigErr() error { + return nil +} + +func (s *DocumentPermissionLimit) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DocumentPermissionLimit) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DocumentPermissionLimit) RequestID() string { + return s.RespMetadata.RequestID +} + +// An SSM document required by the current document. +type DocumentRequires struct { + _ struct{} `type:"structure"` + + // The name of the required SSM document. The name can be an Amazon Resource + // Name (ARN). + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The document version required by the current document. + Version *string `type:"string"` +} + +// String returns the string representation +func (s DocumentRequires) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentRequires) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DocumentRequires) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DocumentRequires"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DocumentRequires) SetName(v string) *DocumentRequires { + s.Name = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *DocumentRequires) SetVersion(v string) *DocumentRequires { + s.Version = &v + return s +} + +// Version information about the document. +type DocumentVersionInfo struct { + _ struct{} `type:"structure"` + + // The date the document was created. + CreatedDate *time.Time `type:"timestamp"` + + // The document format, either JSON or YAML. + DocumentFormat *string `type:"string" enum:"DocumentFormat"` + + // The document version. + DocumentVersion *string `type:"string"` + + // An identifier for the default version of the document. + IsDefaultVersion *bool `type:"boolean"` + + // The document name. + Name *string `type:"string"` + + // The status of the Systems Manager document, such as Creating, Active, Failed, + // and Deleting. + Status *string `type:"string" enum:"DocumentStatus"` + + // A message returned by AWS Systems Manager that explains the Status value. + // For example, a Failed status might be explained by the StatusInformation + // message, "The specified S3 bucket does not exist. Verify that the URL of + // the S3 bucket is correct." + StatusInformation *string `type:"string"` + + // The version of the artifact associated with the document. For example, "Release + // 12, Update 6". This value is unique across all versions of a document, and + // cannot be changed. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s DocumentVersionInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentVersionInfo) GoString() string { + return s.String() +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *DocumentVersionInfo) SetCreatedDate(v time.Time) *DocumentVersionInfo { + s.CreatedDate = &v + return s +} + +// SetDocumentFormat sets the DocumentFormat field's value. +func (s *DocumentVersionInfo) SetDocumentFormat(v string) *DocumentVersionInfo { + s.DocumentFormat = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *DocumentVersionInfo) SetDocumentVersion(v string) *DocumentVersionInfo { + s.DocumentVersion = &v + return s +} + +// SetIsDefaultVersion sets the IsDefaultVersion field's value. +func (s *DocumentVersionInfo) SetIsDefaultVersion(v bool) *DocumentVersionInfo { + s.IsDefaultVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *DocumentVersionInfo) SetName(v string) *DocumentVersionInfo { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DocumentVersionInfo) SetStatus(v string) *DocumentVersionInfo { + s.Status = &v + return s +} + +// SetStatusInformation sets the StatusInformation field's value. +func (s *DocumentVersionInfo) SetStatusInformation(v string) *DocumentVersionInfo { + s.StatusInformation = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *DocumentVersionInfo) SetVersionName(v string) *DocumentVersionInfo { + s.VersionName = &v + return s +} + +// The document has too many versions. Delete one or more document versions +// and try again. +type DocumentVersionLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DocumentVersionLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DocumentVersionLimitExceeded) GoString() string { + return s.String() +} + +func newErrorDocumentVersionLimitExceeded(v protocol.ResponseMetadata) error { + return &DocumentVersionLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DocumentVersionLimitExceeded) Code() string { + return "DocumentVersionLimitExceeded" +} + +// Message returns the exception's message. +func (s *DocumentVersionLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DocumentVersionLimitExceeded) OrigErr() error { + return nil +} + +func (s *DocumentVersionLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DocumentVersionLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DocumentVersionLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +type DoesNotExistException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DoesNotExistException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DoesNotExistException) GoString() string { + return s.String() +} + +func newErrorDoesNotExistException(v protocol.ResponseMetadata) error { + return &DoesNotExistException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DoesNotExistException) Code() string { + return "DoesNotExistException" +} + +// Message returns the exception's message. +func (s *DoesNotExistException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DoesNotExistException) OrigErr() error { + return nil +} + +func (s *DoesNotExistException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The content of the association document matches another document. Change +// the content of the document and try again. +type DuplicateDocumentContent struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DuplicateDocumentContent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DuplicateDocumentContent) GoString() string { + return s.String() +} + +func newErrorDuplicateDocumentContent(v protocol.ResponseMetadata) error { + return &DuplicateDocumentContent{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DuplicateDocumentContent) Code() string { + return "DuplicateDocumentContent" +} + +// Message returns the exception's message. +func (s *DuplicateDocumentContent) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DuplicateDocumentContent) OrigErr() error { + return nil +} + +func (s *DuplicateDocumentContent) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DuplicateDocumentContent) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DuplicateDocumentContent) RequestID() string { + return s.RespMetadata.RequestID +} + +// The version name has already been used in this document. Specify a different +// version name, and then try again. +type DuplicateDocumentVersionName struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DuplicateDocumentVersionName) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DuplicateDocumentVersionName) GoString() string { + return s.String() +} + +func newErrorDuplicateDocumentVersionName(v protocol.ResponseMetadata) error { + return &DuplicateDocumentVersionName{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DuplicateDocumentVersionName) Code() string { + return "DuplicateDocumentVersionName" +} + +// Message returns the exception's message. +func (s *DuplicateDocumentVersionName) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DuplicateDocumentVersionName) OrigErr() error { + return nil +} + +func (s *DuplicateDocumentVersionName) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DuplicateDocumentVersionName) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DuplicateDocumentVersionName) RequestID() string { + return s.RespMetadata.RequestID +} + +// You cannot specify an instance ID in more than one association. +type DuplicateInstanceId struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DuplicateInstanceId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DuplicateInstanceId) GoString() string { + return s.String() +} + +func newErrorDuplicateInstanceId(v protocol.ResponseMetadata) error { + return &DuplicateInstanceId{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DuplicateInstanceId) Code() string { + return "DuplicateInstanceId" +} + +// Message returns the exception's message. +func (s *DuplicateInstanceId) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DuplicateInstanceId) OrigErr() error { + return nil +} + +func (s *DuplicateInstanceId) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DuplicateInstanceId) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DuplicateInstanceId) RequestID() string { + return s.RespMetadata.RequestID +} + +// The EffectivePatch structure defines metadata about a patch along with the +// approval state of the patch in a particular patch baseline. The approval +// state includes information about whether the patch is currently approved, +// due to be approved by a rule, explicitly approved, or explicitly rejected +// and the date the patch was or will be approved. +type EffectivePatch struct { + _ struct{} `type:"structure"` + + // Provides metadata for a patch, including information such as the KB ID, severity, + // classification and a URL for where more information can be obtained about + // the patch. + Patch *Patch `type:"structure"` + + // The status of the patch in a patch baseline. This includes information about + // whether the patch is currently approved, due to be approved by a rule, explicitly + // approved, or explicitly rejected and the date the patch was or will be approved. + PatchStatus *PatchStatus `type:"structure"` +} + +// String returns the string representation +func (s EffectivePatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EffectivePatch) GoString() string { + return s.String() +} + +// SetPatch sets the Patch field's value. +func (s *EffectivePatch) SetPatch(v *Patch) *EffectivePatch { + s.Patch = v + return s +} + +// SetPatchStatus sets the PatchStatus field's value. +func (s *EffectivePatch) SetPatchStatus(v *PatchStatus) *EffectivePatch { + s.PatchStatus = v + return s +} + +// Describes a failed association. +type FailedCreateAssociation struct { + _ struct{} `type:"structure"` + + // The association. + Entry *CreateAssociationBatchRequestEntry `type:"structure"` + + // The source of the failure. + Fault *string `type:"string" enum:"Fault"` + + // A description of the failure. + Message *string `type:"string"` +} + +// String returns the string representation +func (s FailedCreateAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedCreateAssociation) GoString() string { + return s.String() +} + +// SetEntry sets the Entry field's value. +func (s *FailedCreateAssociation) SetEntry(v *CreateAssociationBatchRequestEntry) *FailedCreateAssociation { + s.Entry = v + return s +} + +// SetFault sets the Fault field's value. +func (s *FailedCreateAssociation) SetFault(v string) *FailedCreateAssociation { + s.Fault = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *FailedCreateAssociation) SetMessage(v string) *FailedCreateAssociation { + s.Message = &v + return s +} + +// Information about an Automation failure. +type FailureDetails struct { + _ struct{} `type:"structure"` + + // Detailed information about the Automation step failure. + Details map[string][]*string `min:"1" type:"map"` + + // The stage of the Automation execution when the failure occurred. The stages + // include the following: InputValidation, PreVerification, Invocation, PostVerification. + FailureStage *string `type:"string"` + + // The type of Automation failure. Failure types include the following: Action, + // Permission, Throttling, Verification, Internal. + FailureType *string `type:"string"` +} + +// String returns the string representation +func (s FailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailureDetails) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *FailureDetails) SetDetails(v map[string][]*string) *FailureDetails { + s.Details = v + return s +} + +// SetFailureStage sets the FailureStage field's value. +func (s *FailureDetails) SetFailureStage(v string) *FailureDetails { + s.FailureStage = &v + return s +} + +// SetFailureType sets the FailureType field's value. +func (s *FailureDetails) SetFailureType(v string) *FailureDetails { + s.FailureType = &v + return s +} + +// You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where +// the corresponding service is not available. +type FeatureNotAvailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s FeatureNotAvailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FeatureNotAvailableException) GoString() string { + return s.String() +} + +func newErrorFeatureNotAvailableException(v protocol.ResponseMetadata) error { + return &FeatureNotAvailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *FeatureNotAvailableException) Code() string { + return "FeatureNotAvailableException" +} + +// Message returns the exception's message. +func (s *FeatureNotAvailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *FeatureNotAvailableException) OrigErr() error { + return nil +} + +func (s *FeatureNotAvailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *FeatureNotAvailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *FeatureNotAvailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +type GetAutomationExecutionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for an existing automation execution to examine. The + // execution ID is returned by StartAutomationExecution when the execution of + // an Automation document is initiated. + // + // AutomationExecutionId is a required field + AutomationExecutionId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAutomationExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAutomationExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAutomationExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAutomationExecutionInput"} + if s.AutomationExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("AutomationExecutionId")) + } + if s.AutomationExecutionId != nil && len(*s.AutomationExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("AutomationExecutionId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutomationExecutionId sets the AutomationExecutionId field's value. +func (s *GetAutomationExecutionInput) SetAutomationExecutionId(v string) *GetAutomationExecutionInput { + s.AutomationExecutionId = &v + return s +} + +type GetAutomationExecutionOutput struct { + _ struct{} `type:"structure"` + + // Detailed information about the current state of an automation execution. + AutomationExecution *AutomationExecution `type:"structure"` +} + +// String returns the string representation +func (s GetAutomationExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAutomationExecutionOutput) GoString() string { + return s.String() +} + +// SetAutomationExecution sets the AutomationExecution field's value. +func (s *GetAutomationExecutionOutput) SetAutomationExecution(v *AutomationExecution) *GetAutomationExecutionOutput { + s.AutomationExecution = v + return s +} + +type GetCalendarStateInput struct { + _ struct{} `type:"structure"` + + // (Optional) The specific time for which you want to get calendar state information, + // in ISO 8601 (https://en.wikipedia.org/wiki/ISO_8601) format. If you do not + // add AtTime, the current time is assumed. + AtTime *string `type:"string"` + + // The names or Amazon Resource Names (ARNs) of the Systems Manager documents + // that represent the calendar entries for which you want to get the state. + // + // CalendarNames is a required field + CalendarNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetCalendarStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCalendarStateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCalendarStateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCalendarStateInput"} + if s.CalendarNames == nil { + invalidParams.Add(request.NewErrParamRequired("CalendarNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAtTime sets the AtTime field's value. +func (s *GetCalendarStateInput) SetAtTime(v string) *GetCalendarStateInput { + s.AtTime = &v + return s +} + +// SetCalendarNames sets the CalendarNames field's value. +func (s *GetCalendarStateInput) SetCalendarNames(v []*string) *GetCalendarStateInput { + s.CalendarNames = v + return s +} + +type GetCalendarStateOutput struct { + _ struct{} `type:"structure"` + + // The time, as an ISO 8601 (https://en.wikipedia.org/wiki/ISO_8601) string, + // that you specified in your command. If you did not specify a time, GetCalendarState + // uses the current time. + AtTime *string `type:"string"` + + // The time, as an ISO 8601 (https://en.wikipedia.org/wiki/ISO_8601) string, + // that the calendar state will change. If the current calendar state is OPEN, + // NextTransitionTime indicates when the calendar state changes to CLOSED, and + // vice-versa. + NextTransitionTime *string `type:"string"` + + // The state of the calendar. An OPEN calendar indicates that actions are allowed + // to proceed, and a CLOSED calendar indicates that actions are not allowed + // to proceed. + State *string `type:"string" enum:"CalendarState"` +} + +// String returns the string representation +func (s GetCalendarStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCalendarStateOutput) GoString() string { + return s.String() +} + +// SetAtTime sets the AtTime field's value. +func (s *GetCalendarStateOutput) SetAtTime(v string) *GetCalendarStateOutput { + s.AtTime = &v + return s +} + +// SetNextTransitionTime sets the NextTransitionTime field's value. +func (s *GetCalendarStateOutput) SetNextTransitionTime(v string) *GetCalendarStateOutput { + s.NextTransitionTime = &v + return s +} + +// SetState sets the State field's value. +func (s *GetCalendarStateOutput) SetState(v string) *GetCalendarStateOutput { + s.State = &v + return s +} + +type GetCommandInvocationInput struct { + _ struct{} `type:"structure"` + + // (Required) The parent command ID of the invocation plugin. + // + // CommandId is a required field + CommandId *string `min:"36" type:"string" required:"true"` + + // (Required) The ID of the managed instance targeted by the command. A managed + // instance can be an EC2 instance or an instance in your hybrid environment + // that is configured for Systems Manager. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // (Optional) The name of the plugin for which you want detailed results. If + // the document contains only one plugin, the name can be omitted and the details + // will be returned. + // + // Plugin names are also referred to as step names in Systems Manager documents. + PluginName *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s GetCommandInvocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCommandInvocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCommandInvocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCommandInvocationInput"} + if s.CommandId == nil { + invalidParams.Add(request.NewErrParamRequired("CommandId")) + } + if s.CommandId != nil && len(*s.CommandId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("CommandId", 36)) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.PluginName != nil && len(*s.PluginName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("PluginName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommandId sets the CommandId field's value. +func (s *GetCommandInvocationInput) SetCommandId(v string) *GetCommandInvocationInput { + s.CommandId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetCommandInvocationInput) SetInstanceId(v string) *GetCommandInvocationInput { + s.InstanceId = &v + return s +} + +// SetPluginName sets the PluginName field's value. +func (s *GetCommandInvocationInput) SetPluginName(v string) *GetCommandInvocationInput { + s.PluginName = &v + return s +} + +type GetCommandInvocationOutput struct { + _ struct{} `type:"structure"` + + // CloudWatch Logs information where Systems Manager sent the command output. + CloudWatchOutputConfig *CloudWatchOutputConfig `type:"structure"` + + // The parent command ID of the invocation plugin. + CommandId *string `min:"36" type:"string"` + + // The comment text for the command. + Comment *string `type:"string"` + + // The name of the document that was run. For example, AWS-RunShellScript. + DocumentName *string `type:"string"` + + // The SSM document version used in the request. + DocumentVersion *string `type:"string"` + + // Duration since ExecutionStartDateTime. + ExecutionElapsedTime *string `type:"string"` + + // The date and time the plugin was finished running. Date and time are written + // in ISO 8601 format. For example, June 7, 2017 is represented as 2017-06-7. + // The following sample AWS CLI command uses the InvokedAfter filter. + // + // aws ssm list-commands --filters key=InvokedAfter,value=2017-06-07T00:00:00Z + // + // If the plugin has not started to run, the string is empty. + ExecutionEndDateTime *string `type:"string"` + + // The date and time the plugin started running. Date and time are written in + // ISO 8601 format. For example, June 7, 2017 is represented as 2017-06-7. The + // following sample AWS CLI command uses the InvokedBefore filter. + // + // aws ssm list-commands --filters key=InvokedBefore,value=2017-06-07T00:00:00Z + // + // If the plugin has not started to run, the string is empty. + ExecutionStartDateTime *string `type:"string"` + + // The ID of the managed instance targeted by the command. A managed instance + // can be an EC2 instance or an instance in your hybrid environment that is + // configured for Systems Manager. + InstanceId *string `type:"string"` + + // The name of the plugin for which you want detailed results. For example, + // aws:RunShellScript is a plugin. + PluginName *string `min:"4" type:"string"` + + // The error level response code for the plugin script. If the response code + // is -1, then the command has not started running on the instance, or it was + // not received by the instance. + ResponseCode *int64 `type:"integer"` + + // The first 8,000 characters written by the plugin to stderr. If the command + // has not finished running, then this string is empty. + StandardErrorContent *string `type:"string"` + + // The URL for the complete text written by the plugin to stderr. If the command + // has not finished running, then this string is empty. + StandardErrorUrl *string `type:"string"` + + // The first 24,000 characters written by the plugin to stdout. If the command + // has not finished running, if ExecutionStatus is neither Succeeded nor Failed, + // then this string is empty. + StandardOutputContent *string `type:"string"` + + // The URL for the complete text written by the plugin to stdout in Amazon S3. + // If an S3 bucket was not specified, then this string is empty. + StandardOutputUrl *string `type:"string"` + + // The status of this invocation plugin. This status can be different than StatusDetails. + Status *string `type:"string" enum:"CommandInvocationStatus"` + + // A detailed status of the command execution for an invocation. StatusDetails + // includes more information than Status because it includes states resulting + // from error and concurrency control parameters. StatusDetails can show different + // results than Status. For more information about these statuses, see Understanding + // command statuses (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) + // in the AWS Systems Manager User Guide. StatusDetails can be one of the following + // values: + // + // * Pending: The command has not been sent to the instance. + // + // * In Progress: The command has been sent to the instance but has not reached + // a terminal state. + // + // * Delayed: The system attempted to send the command to the target, but + // the target was not available. The instance might not be available because + // of network issues, because the instance was stopped, or for similar reasons. + // The system will try to send the command again. + // + // * Success: The command or plugin ran successfully. This is a terminal + // state. + // + // * Delivery Timed Out: The command was not delivered to the instance before + // the delivery timeout expired. Delivery timeouts do not count against the + // parent command's MaxErrors limit, but they do contribute to whether the + // parent command status is Success or Incomplete. This is a terminal state. + // + // * Execution Timed Out: The command started to run on the instance, but + // the execution was not complete before the timeout expired. Execution timeouts + // count against the MaxErrors limit of the parent command. This is a terminal + // state. + // + // * Failed: The command wasn't run successfully on the instance. For a plugin, + // this indicates that the result code was not zero. For a command invocation, + // this indicates that the result code for one or more plugins was not zero. + // Invocation failures count against the MaxErrors limit of the parent command. + // This is a terminal state. + // + // * Canceled: The command was terminated before it was completed. This is + // a terminal state. + // + // * Undeliverable: The command can't be delivered to the instance. The instance + // might not exist or might not be responding. Undeliverable invocations + // don't count against the parent command's MaxErrors limit and don't contribute + // to whether the parent command status is Success or Incomplete. This is + // a terminal state. + // + // * Terminated: The parent command exceeded its MaxErrors limit and subsequent + // command invocations were canceled by the system. This is a terminal state. + StatusDetails *string `type:"string"` +} + +// String returns the string representation +func (s GetCommandInvocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCommandInvocationOutput) GoString() string { + return s.String() +} + +// SetCloudWatchOutputConfig sets the CloudWatchOutputConfig field's value. +func (s *GetCommandInvocationOutput) SetCloudWatchOutputConfig(v *CloudWatchOutputConfig) *GetCommandInvocationOutput { + s.CloudWatchOutputConfig = v + return s +} + +// SetCommandId sets the CommandId field's value. +func (s *GetCommandInvocationOutput) SetCommandId(v string) *GetCommandInvocationOutput { + s.CommandId = &v + return s +} + +// SetComment sets the Comment field's value. +func (s *GetCommandInvocationOutput) SetComment(v string) *GetCommandInvocationOutput { + s.Comment = &v + return s +} + +// SetDocumentName sets the DocumentName field's value. +func (s *GetCommandInvocationOutput) SetDocumentName(v string) *GetCommandInvocationOutput { + s.DocumentName = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *GetCommandInvocationOutput) SetDocumentVersion(v string) *GetCommandInvocationOutput { + s.DocumentVersion = &v + return s +} + +// SetExecutionElapsedTime sets the ExecutionElapsedTime field's value. +func (s *GetCommandInvocationOutput) SetExecutionElapsedTime(v string) *GetCommandInvocationOutput { + s.ExecutionElapsedTime = &v + return s +} + +// SetExecutionEndDateTime sets the ExecutionEndDateTime field's value. +func (s *GetCommandInvocationOutput) SetExecutionEndDateTime(v string) *GetCommandInvocationOutput { + s.ExecutionEndDateTime = &v + return s +} + +// SetExecutionStartDateTime sets the ExecutionStartDateTime field's value. +func (s *GetCommandInvocationOutput) SetExecutionStartDateTime(v string) *GetCommandInvocationOutput { + s.ExecutionStartDateTime = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetCommandInvocationOutput) SetInstanceId(v string) *GetCommandInvocationOutput { + s.InstanceId = &v + return s +} + +// SetPluginName sets the PluginName field's value. +func (s *GetCommandInvocationOutput) SetPluginName(v string) *GetCommandInvocationOutput { + s.PluginName = &v + return s +} + +// SetResponseCode sets the ResponseCode field's value. +func (s *GetCommandInvocationOutput) SetResponseCode(v int64) *GetCommandInvocationOutput { + s.ResponseCode = &v + return s +} + +// SetStandardErrorContent sets the StandardErrorContent field's value. +func (s *GetCommandInvocationOutput) SetStandardErrorContent(v string) *GetCommandInvocationOutput { + s.StandardErrorContent = &v + return s +} + +// SetStandardErrorUrl sets the StandardErrorUrl field's value. +func (s *GetCommandInvocationOutput) SetStandardErrorUrl(v string) *GetCommandInvocationOutput { + s.StandardErrorUrl = &v + return s +} + +// SetStandardOutputContent sets the StandardOutputContent field's value. +func (s *GetCommandInvocationOutput) SetStandardOutputContent(v string) *GetCommandInvocationOutput { + s.StandardOutputContent = &v + return s +} + +// SetStandardOutputUrl sets the StandardOutputUrl field's value. +func (s *GetCommandInvocationOutput) SetStandardOutputUrl(v string) *GetCommandInvocationOutput { + s.StandardOutputUrl = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetCommandInvocationOutput) SetStatus(v string) *GetCommandInvocationOutput { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *GetCommandInvocationOutput) SetStatusDetails(v string) *GetCommandInvocationOutput { + s.StatusDetails = &v + return s +} + +type GetConnectionStatusInput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + // + // Target is a required field + Target *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetConnectionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConnectionStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConnectionStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConnectionStatusInput"} + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + if s.Target != nil && len(*s.Target) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Target", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTarget sets the Target field's value. +func (s *GetConnectionStatusInput) SetTarget(v string) *GetConnectionStatusInput { + s.Target = &v + return s +} + +type GetConnectionStatusOutput struct { + _ struct{} `type:"structure"` + + // The status of the connection to the instance. For example, 'Connected' or + // 'Not Connected'. + Status *string `type:"string" enum:"ConnectionStatus"` + + // The ID of the instance to check connection status. + Target *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetConnectionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConnectionStatusOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetConnectionStatusOutput) SetStatus(v string) *GetConnectionStatusOutput { + s.Status = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *GetConnectionStatusOutput) SetTarget(v string) *GetConnectionStatusOutput { + s.Target = &v + return s +} + +type GetDefaultPatchBaselineInput struct { + _ struct{} `type:"structure"` + + // Returns the default patch baseline for the specified operating system. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` +} + +// String returns the string representation +func (s GetDefaultPatchBaselineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDefaultPatchBaselineInput) GoString() string { + return s.String() +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetDefaultPatchBaselineInput) SetOperatingSystem(v string) *GetDefaultPatchBaselineInput { + s.OperatingSystem = &v + return s +} + +type GetDefaultPatchBaselineOutput struct { + _ struct{} `type:"structure"` + + // The ID of the default patch baseline. + BaselineId *string `min:"20" type:"string"` + + // The operating system for the returned patch baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` +} + +// String returns the string representation +func (s GetDefaultPatchBaselineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDefaultPatchBaselineOutput) GoString() string { + return s.String() +} + +// SetBaselineId sets the BaselineId field's value. +func (s *GetDefaultPatchBaselineOutput) SetBaselineId(v string) *GetDefaultPatchBaselineOutput { + s.BaselineId = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetDefaultPatchBaselineOutput) SetOperatingSystem(v string) *GetDefaultPatchBaselineOutput { + s.OperatingSystem = &v + return s +} + +type GetDeployablePatchSnapshotForInstanceInput struct { + _ struct{} `type:"structure"` + + // The ID of the instance for which the appropriate patch snapshot should be + // retrieved. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The user-defined snapshot ID. + // + // SnapshotId is a required field + SnapshotId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeployablePatchSnapshotForInstanceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeployablePatchSnapshotForInstanceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeployablePatchSnapshotForInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDeployablePatchSnapshotForInstanceInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.SnapshotId == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotId")) + } + if s.SnapshotId != nil && len(*s.SnapshotId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("SnapshotId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetDeployablePatchSnapshotForInstanceInput) SetInstanceId(v string) *GetDeployablePatchSnapshotForInstanceInput { + s.InstanceId = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *GetDeployablePatchSnapshotForInstanceInput) SetSnapshotId(v string) *GetDeployablePatchSnapshotForInstanceInput { + s.SnapshotId = &v + return s +} + +type GetDeployablePatchSnapshotForInstanceOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // Returns the specific operating system (for example Windows Server 2012 or + // Amazon Linux 2015.09) on the instance for the specified patch snapshot. + Product *string `type:"string"` + + // A pre-signed Amazon S3 URL that can be used to download the patch snapshot. + SnapshotDownloadUrl *string `type:"string"` + + // The user-defined snapshot ID. + SnapshotId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s GetDeployablePatchSnapshotForInstanceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDeployablePatchSnapshotForInstanceOutput) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GetDeployablePatchSnapshotForInstanceOutput) SetInstanceId(v string) *GetDeployablePatchSnapshotForInstanceOutput { + s.InstanceId = &v + return s +} + +// SetProduct sets the Product field's value. +func (s *GetDeployablePatchSnapshotForInstanceOutput) SetProduct(v string) *GetDeployablePatchSnapshotForInstanceOutput { + s.Product = &v + return s +} + +// SetSnapshotDownloadUrl sets the SnapshotDownloadUrl field's value. +func (s *GetDeployablePatchSnapshotForInstanceOutput) SetSnapshotDownloadUrl(v string) *GetDeployablePatchSnapshotForInstanceOutput { + s.SnapshotDownloadUrl = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *GetDeployablePatchSnapshotForInstanceOutput) SetSnapshotId(v string) *GetDeployablePatchSnapshotForInstanceOutput { + s.SnapshotId = &v + return s +} + +type GetDocumentInput struct { + _ struct{} `type:"structure"` + + // Returns the document in the specified format. The document format can be + // either JSON or YAML. JSON is the default format. + DocumentFormat *string `type:"string" enum:"DocumentFormat"` + + // The document version for which you want information. + DocumentVersion *string `type:"string"` + + // The name of the Systems Manager document. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // An optional field specifying the version of the artifact associated with + // the document. For example, "Release 12, Update 6". This value is unique across + // all versions of a document and can't be changed. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s GetDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDocumentInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDocumentFormat sets the DocumentFormat field's value. +func (s *GetDocumentInput) SetDocumentFormat(v string) *GetDocumentInput { + s.DocumentFormat = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *GetDocumentInput) SetDocumentVersion(v string) *GetDocumentInput { + s.DocumentVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetDocumentInput) SetName(v string) *GetDocumentInput { + s.Name = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *GetDocumentInput) SetVersionName(v string) *GetDocumentInput { + s.VersionName = &v + return s +} + +type GetDocumentOutput struct { + _ struct{} `type:"structure"` + + // A description of the document attachments, including names, locations, sizes, + // and so on. + AttachmentsContent []*AttachmentContent `type:"list"` + + // The contents of the Systems Manager document. + Content *string `min:"1" type:"string"` + + // The document format, either JSON or YAML. + DocumentFormat *string `type:"string" enum:"DocumentFormat"` + + // The document type. + DocumentType *string `type:"string" enum:"DocumentType"` + + // The document version. + DocumentVersion *string `type:"string"` + + // The name of the Systems Manager document. + Name *string `type:"string"` + + // A list of SSM documents required by a document. For example, an ApplicationConfiguration + // document requires an ApplicationConfigurationSchema document. + Requires []*DocumentRequires `min:"1" type:"list"` + + // The status of the Systems Manager document, such as Creating, Active, Updating, + // Failed, and Deleting. + Status *string `type:"string" enum:"DocumentStatus"` + + // A message returned by AWS Systems Manager that explains the Status value. + // For example, a Failed status might be explained by the StatusInformation + // message, "The specified S3 bucket does not exist. Verify that the URL of + // the S3 bucket is correct." + StatusInformation *string `type:"string"` + + // The version of the artifact associated with the document. For example, "Release + // 12, Update 6". This value is unique across all versions of a document, and + // cannot be changed. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s GetDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDocumentOutput) GoString() string { + return s.String() +} + +// SetAttachmentsContent sets the AttachmentsContent field's value. +func (s *GetDocumentOutput) SetAttachmentsContent(v []*AttachmentContent) *GetDocumentOutput { + s.AttachmentsContent = v + return s +} + +// SetContent sets the Content field's value. +func (s *GetDocumentOutput) SetContent(v string) *GetDocumentOutput { + s.Content = &v + return s +} + +// SetDocumentFormat sets the DocumentFormat field's value. +func (s *GetDocumentOutput) SetDocumentFormat(v string) *GetDocumentOutput { + s.DocumentFormat = &v + return s +} + +// SetDocumentType sets the DocumentType field's value. +func (s *GetDocumentOutput) SetDocumentType(v string) *GetDocumentOutput { + s.DocumentType = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *GetDocumentOutput) SetDocumentVersion(v string) *GetDocumentOutput { + s.DocumentVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetDocumentOutput) SetName(v string) *GetDocumentOutput { + s.Name = &v + return s +} + +// SetRequires sets the Requires field's value. +func (s *GetDocumentOutput) SetRequires(v []*DocumentRequires) *GetDocumentOutput { + s.Requires = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetDocumentOutput) SetStatus(v string) *GetDocumentOutput { + s.Status = &v + return s +} + +// SetStatusInformation sets the StatusInformation field's value. +func (s *GetDocumentOutput) SetStatusInformation(v string) *GetDocumentOutput { + s.StatusInformation = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *GetDocumentOutput) SetVersionName(v string) *GetDocumentOutput { + s.VersionName = &v + return s +} + +type GetInventoryInput struct { + _ struct{} `type:"structure"` + + // Returns counts of inventory types based on one or more expressions. For example, + // if you aggregate by using an expression that uses the AWS:InstanceInformation.PlatformType + // type, you can see a count of how many Windows and Linux instances exist in + // your inventoried fleet. + Aggregators []*InventoryAggregator `min:"1" type:"list"` + + // One or more filters. Use a filter to return a more specific list of results. + Filters []*InventoryFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The list of inventory item types to return. + ResultAttributes []*ResultAttribute `min:"1" type:"list"` +} + +// String returns the string representation +func (s GetInventoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInventoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInventoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInventoryInput"} + if s.Aggregators != nil && len(s.Aggregators) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Aggregators", 1)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResultAttributes != nil && len(s.ResultAttributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResultAttributes", 1)) + } + if s.Aggregators != nil { + for i, v := range s.Aggregators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Aggregators", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ResultAttributes != nil { + for i, v := range s.ResultAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResultAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregators sets the Aggregators field's value. +func (s *GetInventoryInput) SetAggregators(v []*InventoryAggregator) *GetInventoryInput { + s.Aggregators = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetInventoryInput) SetFilters(v []*InventoryFilter) *GetInventoryInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetInventoryInput) SetMaxResults(v int64) *GetInventoryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetInventoryInput) SetNextToken(v string) *GetInventoryInput { + s.NextToken = &v + return s +} + +// SetResultAttributes sets the ResultAttributes field's value. +func (s *GetInventoryInput) SetResultAttributes(v []*ResultAttribute) *GetInventoryInput { + s.ResultAttributes = v + return s +} + +type GetInventoryOutput struct { + _ struct{} `type:"structure"` + + // Collection of inventory entities such as a collection of instance inventory. + Entities []*InventoryResultEntity `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetInventoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInventoryOutput) GoString() string { + return s.String() +} + +// SetEntities sets the Entities field's value. +func (s *GetInventoryOutput) SetEntities(v []*InventoryResultEntity) *GetInventoryOutput { + s.Entities = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetInventoryOutput) SetNextToken(v string) *GetInventoryOutput { + s.NextToken = &v + return s +} + +type GetInventorySchemaInput struct { + _ struct{} `type:"structure"` + + // Returns inventory schemas that support aggregation. For example, this call + // returns the AWS:InstanceInformation type, because it supports aggregation + // based on the PlatformName, PlatformType, and PlatformVersion attributes. + Aggregator *bool `type:"boolean"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"50" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // Returns the sub-type schema for a specified inventory type. + SubType *bool `type:"boolean"` + + // The type of inventory item to return. + TypeName *string `type:"string"` +} + +// String returns the string representation +func (s GetInventorySchemaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInventorySchemaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInventorySchemaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInventorySchemaInput"} + if s.MaxResults != nil && *s.MaxResults < 50 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregator sets the Aggregator field's value. +func (s *GetInventorySchemaInput) SetAggregator(v bool) *GetInventorySchemaInput { + s.Aggregator = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetInventorySchemaInput) SetMaxResults(v int64) *GetInventorySchemaInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetInventorySchemaInput) SetNextToken(v string) *GetInventorySchemaInput { + s.NextToken = &v + return s +} + +// SetSubType sets the SubType field's value. +func (s *GetInventorySchemaInput) SetSubType(v bool) *GetInventorySchemaInput { + s.SubType = &v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *GetInventorySchemaInput) SetTypeName(v string) *GetInventorySchemaInput { + s.TypeName = &v + return s +} + +type GetInventorySchemaOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // Inventory schemas returned by the request. + Schemas []*InventoryItemSchema `type:"list"` +} + +// String returns the string representation +func (s GetInventorySchemaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInventorySchemaOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetInventorySchemaOutput) SetNextToken(v string) *GetInventorySchemaOutput { + s.NextToken = &v + return s +} + +// SetSchemas sets the Schemas field's value. +func (s *GetInventorySchemaOutput) SetSchemas(v []*InventoryItemSchema) *GetInventorySchemaOutput { + s.Schemas = v + return s +} + +type GetMaintenanceWindowExecutionInput struct { + _ struct{} `type:"structure"` + + // The ID of the maintenance window execution that includes the task. + // + // WindowExecutionId is a required field + WindowExecutionId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMaintenanceWindowExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMaintenanceWindowExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMaintenanceWindowExecutionInput"} + if s.WindowExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowExecutionId")) + } + if s.WindowExecutionId != nil && len(*s.WindowExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowExecutionId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *GetMaintenanceWindowExecutionInput) SetWindowExecutionId(v string) *GetMaintenanceWindowExecutionInput { + s.WindowExecutionId = &v + return s +} + +type GetMaintenanceWindowExecutionOutput struct { + _ struct{} `type:"structure"` + + // The time the maintenance window finished running. + EndTime *time.Time `type:"timestamp"` + + // The time the maintenance window started running. + StartTime *time.Time `type:"timestamp"` + + // The status of the maintenance window execution. + Status *string `type:"string" enum:"MaintenanceWindowExecutionStatus"` + + // The details explaining the Status. Only available for certain status values. + StatusDetails *string `type:"string"` + + // The ID of the task executions from the maintenance window execution. + TaskIds []*string `type:"list"` + + // The ID of the maintenance window execution. + WindowExecutionId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s GetMaintenanceWindowExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowExecutionOutput) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *GetMaintenanceWindowExecutionOutput) SetEndTime(v time.Time) *GetMaintenanceWindowExecutionOutput { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetMaintenanceWindowExecutionOutput) SetStartTime(v time.Time) *GetMaintenanceWindowExecutionOutput { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetMaintenanceWindowExecutionOutput) SetStatus(v string) *GetMaintenanceWindowExecutionOutput { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *GetMaintenanceWindowExecutionOutput) SetStatusDetails(v string) *GetMaintenanceWindowExecutionOutput { + s.StatusDetails = &v + return s +} + +// SetTaskIds sets the TaskIds field's value. +func (s *GetMaintenanceWindowExecutionOutput) SetTaskIds(v []*string) *GetMaintenanceWindowExecutionOutput { + s.TaskIds = v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *GetMaintenanceWindowExecutionOutput) SetWindowExecutionId(v string) *GetMaintenanceWindowExecutionOutput { + s.WindowExecutionId = &v + return s +} + +type GetMaintenanceWindowExecutionTaskInput struct { + _ struct{} `type:"structure"` + + // The ID of the specific task execution in the maintenance window task that + // should be retrieved. + // + // TaskId is a required field + TaskId *string `min:"36" type:"string" required:"true"` + + // The ID of the maintenance window execution that includes the task. + // + // WindowExecutionId is a required field + WindowExecutionId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMaintenanceWindowExecutionTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowExecutionTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMaintenanceWindowExecutionTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMaintenanceWindowExecutionTaskInput"} + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 36)) + } + if s.WindowExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowExecutionId")) + } + if s.WindowExecutionId != nil && len(*s.WindowExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowExecutionId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTaskId sets the TaskId field's value. +func (s *GetMaintenanceWindowExecutionTaskInput) SetTaskId(v string) *GetMaintenanceWindowExecutionTaskInput { + s.TaskId = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *GetMaintenanceWindowExecutionTaskInput) SetWindowExecutionId(v string) *GetMaintenanceWindowExecutionTaskInput { + s.WindowExecutionId = &v + return s +} + +type GetMaintenanceWindowExecutionTaskInvocationInput struct { + _ struct{} `type:"structure"` + + // The invocation ID to retrieve. + // + // InvocationId is a required field + InvocationId *string `min:"36" type:"string" required:"true"` + + // The ID of the specific task in the maintenance window task that should be + // retrieved. + // + // TaskId is a required field + TaskId *string `min:"36" type:"string" required:"true"` + + // The ID of the maintenance window execution for which the task is a part. + // + // WindowExecutionId is a required field + WindowExecutionId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMaintenanceWindowExecutionTaskInvocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowExecutionTaskInvocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMaintenanceWindowExecutionTaskInvocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMaintenanceWindowExecutionTaskInvocationInput"} + if s.InvocationId == nil { + invalidParams.Add(request.NewErrParamRequired("InvocationId")) + } + if s.InvocationId != nil && len(*s.InvocationId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("InvocationId", 36)) + } + if s.TaskId == nil { + invalidParams.Add(request.NewErrParamRequired("TaskId")) + } + if s.TaskId != nil && len(*s.TaskId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("TaskId", 36)) + } + if s.WindowExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowExecutionId")) + } + if s.WindowExecutionId != nil && len(*s.WindowExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowExecutionId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInvocationId sets the InvocationId field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationInput) SetInvocationId(v string) *GetMaintenanceWindowExecutionTaskInvocationInput { + s.InvocationId = &v + return s +} + +// SetTaskId sets the TaskId field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationInput) SetTaskId(v string) *GetMaintenanceWindowExecutionTaskInvocationInput { + s.TaskId = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationInput) SetWindowExecutionId(v string) *GetMaintenanceWindowExecutionTaskInvocationInput { + s.WindowExecutionId = &v + return s +} + +type GetMaintenanceWindowExecutionTaskInvocationOutput struct { + _ struct{} `type:"structure"` + + // The time that the task finished running on the target. + EndTime *time.Time `type:"timestamp"` + + // The execution ID. + ExecutionId *string `type:"string"` + + // The invocation ID. + InvocationId *string `min:"36" type:"string"` + + // User-provided value to be included in any CloudWatch events raised while + // running tasks for these targets in this maintenance window. + OwnerInformation *string `min:"1" type:"string" sensitive:"true"` + + // The parameters used at the time that the task ran. + Parameters *string `type:"string" sensitive:"true"` + + // The time that the task started running on the target. + StartTime *time.Time `type:"timestamp"` + + // The task status for an invocation. + Status *string `type:"string" enum:"MaintenanceWindowExecutionStatus"` + + // The details explaining the status. Details are only available for certain + // status values. + StatusDetails *string `type:"string"` + + // The task execution ID. + TaskExecutionId *string `min:"36" type:"string"` + + // Retrieves the task type for a maintenance window. Task types include the + // following: LAMBDA, STEP_FUNCTIONS, AUTOMATION, RUN_COMMAND. + TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` + + // The maintenance window execution ID. + WindowExecutionId *string `min:"36" type:"string"` + + // The maintenance window target ID. + WindowTargetId *string `type:"string"` +} + +// String returns the string representation +func (s GetMaintenanceWindowExecutionTaskInvocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowExecutionTaskInvocationOutput) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetEndTime(v time.Time) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.EndTime = &v + return s +} + +// SetExecutionId sets the ExecutionId field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetExecutionId(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.ExecutionId = &v + return s +} + +// SetInvocationId sets the InvocationId field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetInvocationId(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.InvocationId = &v + return s +} + +// SetOwnerInformation sets the OwnerInformation field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetOwnerInformation(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.OwnerInformation = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetParameters(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.Parameters = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetStartTime(v time.Time) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetStatus(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetStatusDetails(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.StatusDetails = &v + return s +} + +// SetTaskExecutionId sets the TaskExecutionId field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetTaskExecutionId(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.TaskExecutionId = &v + return s +} + +// SetTaskType sets the TaskType field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetTaskType(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.TaskType = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetWindowExecutionId(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.WindowExecutionId = &v + return s +} + +// SetWindowTargetId sets the WindowTargetId field's value. +func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetWindowTargetId(v string) *GetMaintenanceWindowExecutionTaskInvocationOutput { + s.WindowTargetId = &v + return s +} + +type GetMaintenanceWindowExecutionTaskOutput struct { + _ struct{} `type:"structure"` + + // The time the task execution completed. + EndTime *time.Time `type:"timestamp"` + + // The defined maximum number of task executions that could be run in parallel. + MaxConcurrency *string `min:"1" type:"string"` + + // The defined maximum number of task execution errors allowed before scheduling + // of the task execution would have been stopped. + MaxErrors *string `min:"1" type:"string"` + + // The priority of the task. + Priority *int64 `type:"integer"` + + // The role that was assumed when running the task. + ServiceRole *string `type:"string"` + + // The time the task execution started. + StartTime *time.Time `type:"timestamp"` + + // The status of the task. + Status *string `type:"string" enum:"MaintenanceWindowExecutionStatus"` + + // The details explaining the Status. Only available for certain status values. + StatusDetails *string `type:"string"` + + // The ARN of the task that ran. + TaskArn *string `min:"1" type:"string"` + + // The ID of the specific task execution in the maintenance window task that + // was retrieved. + TaskExecutionId *string `min:"36" type:"string"` + + // The parameters passed to the task when it was run. + // + // TaskParameters has been deprecated. To specify parameters to pass to a task + // when it runs, instead use the Parameters option in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + // + // The map has the following format: + // + // Key: string, between 1 and 255 characters + // + // Value: an array of strings, each string is between 1 and 255 characters + TaskParameters []map[string]*MaintenanceWindowTaskParameterValueExpression `type:"list" sensitive:"true"` + + // The type of task that was run. + Type *string `type:"string" enum:"MaintenanceWindowTaskType"` + + // The ID of the maintenance window execution that includes the task. + WindowExecutionId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s GetMaintenanceWindowExecutionTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowExecutionTaskOutput) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetEndTime(v time.Time) *GetMaintenanceWindowExecutionTaskOutput { + s.EndTime = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetMaxConcurrency(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetMaxErrors(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.MaxErrors = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetPriority(v int64) *GetMaintenanceWindowExecutionTaskOutput { + s.Priority = &v + return s +} + +// SetServiceRole sets the ServiceRole field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetServiceRole(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.ServiceRole = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetStartTime(v time.Time) *GetMaintenanceWindowExecutionTaskOutput { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetStatus(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetStatusDetails(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.StatusDetails = &v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetTaskArn(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.TaskArn = &v + return s +} + +// SetTaskExecutionId sets the TaskExecutionId field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetTaskExecutionId(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.TaskExecutionId = &v + return s +} + +// SetTaskParameters sets the TaskParameters field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetTaskParameters(v []map[string]*MaintenanceWindowTaskParameterValueExpression) *GetMaintenanceWindowExecutionTaskOutput { + s.TaskParameters = v + return s +} + +// SetType sets the Type field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetType(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.Type = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *GetMaintenanceWindowExecutionTaskOutput) SetWindowExecutionId(v string) *GetMaintenanceWindowExecutionTaskOutput { + s.WindowExecutionId = &v + return s +} + +type GetMaintenanceWindowInput struct { + _ struct{} `type:"structure"` + + // The ID of the maintenance window for which you want to retrieve information. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMaintenanceWindowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMaintenanceWindowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMaintenanceWindowInput"} + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWindowId sets the WindowId field's value. +func (s *GetMaintenanceWindowInput) SetWindowId(v string) *GetMaintenanceWindowInput { + s.WindowId = &v + return s +} + +type GetMaintenanceWindowOutput struct { + _ struct{} `type:"structure"` + + // Whether targets must be registered with the maintenance window before tasks + // can be defined for those targets. + AllowUnassociatedTargets *bool `type:"boolean"` + + // The date the maintenance window was created. + CreatedDate *time.Time `type:"timestamp"` + + // The number of hours before the end of the maintenance window that Systems + // Manager stops scheduling new tasks for execution. + Cutoff *int64 `type:"integer"` + + // The description of the maintenance window. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The duration of the maintenance window in hours. + Duration *int64 `min:"1" type:"integer"` + + // Indicates whether the maintenance window is enabled. + Enabled *bool `type:"boolean"` + + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. The maintenance window will not run + // after this specified time. + EndDate *string `type:"string"` + + // The date the maintenance window was last modified. + ModifiedDate *time.Time `type:"timestamp"` + + // The name of the maintenance window. + Name *string `min:"3" type:"string"` + + // The next time the maintenance window will actually run, taking into account + // any specified times for the maintenance window to become active or inactive. + NextExecutionTime *string `type:"string"` + + // The schedule of the maintenance window in the form of a cron or rate expression. + Schedule *string `min:"1" type:"string"` + + // The number of days to wait to run a maintenance window after the scheduled + // CRON expression date and time. + ScheduleOffset *int64 `min:"1" type:"integer"` + + // The time zone that the scheduled maintenance window executions are based + // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", + // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database + // (https://www.iana.org/time-zones) on the IANA website. + ScheduleTimezone *string `type:"string"` + + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. The maintenance window will not run + // before this specified time. + StartDate *string `type:"string"` + + // The ID of the created maintenance window. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s GetMaintenanceWindowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowOutput) GoString() string { + return s.String() +} + +// SetAllowUnassociatedTargets sets the AllowUnassociatedTargets field's value. +func (s *GetMaintenanceWindowOutput) SetAllowUnassociatedTargets(v bool) *GetMaintenanceWindowOutput { + s.AllowUnassociatedTargets = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *GetMaintenanceWindowOutput) SetCreatedDate(v time.Time) *GetMaintenanceWindowOutput { + s.CreatedDate = &v + return s +} + +// SetCutoff sets the Cutoff field's value. +func (s *GetMaintenanceWindowOutput) SetCutoff(v int64) *GetMaintenanceWindowOutput { + s.Cutoff = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *GetMaintenanceWindowOutput) SetDescription(v string) *GetMaintenanceWindowOutput { + s.Description = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *GetMaintenanceWindowOutput) SetDuration(v int64) *GetMaintenanceWindowOutput { + s.Duration = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *GetMaintenanceWindowOutput) SetEnabled(v bool) *GetMaintenanceWindowOutput { + s.Enabled = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *GetMaintenanceWindowOutput) SetEndDate(v string) *GetMaintenanceWindowOutput { + s.EndDate = &v + return s +} + +// SetModifiedDate sets the ModifiedDate field's value. +func (s *GetMaintenanceWindowOutput) SetModifiedDate(v time.Time) *GetMaintenanceWindowOutput { + s.ModifiedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetMaintenanceWindowOutput) SetName(v string) *GetMaintenanceWindowOutput { + s.Name = &v + return s +} + +// SetNextExecutionTime sets the NextExecutionTime field's value. +func (s *GetMaintenanceWindowOutput) SetNextExecutionTime(v string) *GetMaintenanceWindowOutput { + s.NextExecutionTime = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *GetMaintenanceWindowOutput) SetSchedule(v string) *GetMaintenanceWindowOutput { + s.Schedule = &v + return s +} + +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *GetMaintenanceWindowOutput) SetScheduleOffset(v int64) *GetMaintenanceWindowOutput { + s.ScheduleOffset = &v + return s +} + +// SetScheduleTimezone sets the ScheduleTimezone field's value. +func (s *GetMaintenanceWindowOutput) SetScheduleTimezone(v string) *GetMaintenanceWindowOutput { + s.ScheduleTimezone = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *GetMaintenanceWindowOutput) SetStartDate(v string) *GetMaintenanceWindowOutput { + s.StartDate = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *GetMaintenanceWindowOutput) SetWindowId(v string) *GetMaintenanceWindowOutput { + s.WindowId = &v + return s +} + +type GetMaintenanceWindowTaskInput struct { + _ struct{} `type:"structure"` + + // The maintenance window ID that includes the task to retrieve. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` + + // The maintenance window task ID to retrieve. + // + // WindowTaskId is a required field + WindowTaskId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMaintenanceWindowTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMaintenanceWindowTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMaintenanceWindowTaskInput"} + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.WindowTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowTaskId")) + } + if s.WindowTaskId != nil && len(*s.WindowTaskId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowTaskId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWindowId sets the WindowId field's value. +func (s *GetMaintenanceWindowTaskInput) SetWindowId(v string) *GetMaintenanceWindowTaskInput { + s.WindowId = &v + return s +} + +// SetWindowTaskId sets the WindowTaskId field's value. +func (s *GetMaintenanceWindowTaskInput) SetWindowTaskId(v string) *GetMaintenanceWindowTaskInput { + s.WindowTaskId = &v + return s +} + +type GetMaintenanceWindowTaskOutput struct { + _ struct{} `type:"structure"` + + // The retrieved task description. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The location in Amazon S3 where the task results are logged. + // + // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, + // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + LoggingInfo *LoggingInfo `type:"structure"` + + // The maximum number of targets allowed to run this task in parallel. + MaxConcurrency *string `min:"1" type:"string"` + + // The maximum number of errors allowed before the task stops being scheduled. + MaxErrors *string `min:"1" type:"string"` + + // The retrieved task name. + Name *string `min:"3" type:"string"` + + // The priority of the task when it runs. The lower the number, the higher the + // priority. Tasks that have the same priority are scheduled in parallel. + Priority *int64 `type:"integer"` + + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. + ServiceRoleArn *string `type:"string"` + + // The targets where the task should run. + Targets []*Target `type:"list"` + + // The resource that the task used during execution. For RUN_COMMAND and AUTOMATION + // task types, the TaskArn is the Systems Manager Document name/ARN. For LAMBDA + // tasks, the value is the function name/ARN. For STEP_FUNCTIONS tasks, the + // value is the state machine ARN. + TaskArn *string `min:"1" type:"string"` + + // The parameters to pass to the task when it runs. + TaskInvocationParameters *MaintenanceWindowTaskInvocationParameters `type:"structure"` + + // The parameters to pass to the task when it runs. + // + // TaskParameters has been deprecated. To specify parameters to pass to a task + // when it runs, instead use the Parameters option in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` + + // The type of task to run. + TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` + + // The retrieved maintenance window ID. + WindowId *string `min:"20" type:"string"` + + // The retrieved maintenance window task ID. + WindowTaskId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s GetMaintenanceWindowTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMaintenanceWindowTaskOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *GetMaintenanceWindowTaskOutput) SetDescription(v string) *GetMaintenanceWindowTaskOutput { + s.Description = &v + return s +} + +// SetLoggingInfo sets the LoggingInfo field's value. +func (s *GetMaintenanceWindowTaskOutput) SetLoggingInfo(v *LoggingInfo) *GetMaintenanceWindowTaskOutput { + s.LoggingInfo = v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *GetMaintenanceWindowTaskOutput) SetMaxConcurrency(v string) *GetMaintenanceWindowTaskOutput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *GetMaintenanceWindowTaskOutput) SetMaxErrors(v string) *GetMaintenanceWindowTaskOutput { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetMaintenanceWindowTaskOutput) SetName(v string) *GetMaintenanceWindowTaskOutput { + s.Name = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *GetMaintenanceWindowTaskOutput) SetPriority(v int64) *GetMaintenanceWindowTaskOutput { + s.Priority = &v + return s +} + +// SetServiceRoleArn sets the ServiceRoleArn field's value. +func (s *GetMaintenanceWindowTaskOutput) SetServiceRoleArn(v string) *GetMaintenanceWindowTaskOutput { + s.ServiceRoleArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *GetMaintenanceWindowTaskOutput) SetTargets(v []*Target) *GetMaintenanceWindowTaskOutput { + s.Targets = v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *GetMaintenanceWindowTaskOutput) SetTaskArn(v string) *GetMaintenanceWindowTaskOutput { + s.TaskArn = &v + return s +} + +// SetTaskInvocationParameters sets the TaskInvocationParameters field's value. +func (s *GetMaintenanceWindowTaskOutput) SetTaskInvocationParameters(v *MaintenanceWindowTaskInvocationParameters) *GetMaintenanceWindowTaskOutput { + s.TaskInvocationParameters = v + return s +} + +// SetTaskParameters sets the TaskParameters field's value. +func (s *GetMaintenanceWindowTaskOutput) SetTaskParameters(v map[string]*MaintenanceWindowTaskParameterValueExpression) *GetMaintenanceWindowTaskOutput { + s.TaskParameters = v + return s +} + +// SetTaskType sets the TaskType field's value. +func (s *GetMaintenanceWindowTaskOutput) SetTaskType(v string) *GetMaintenanceWindowTaskOutput { + s.TaskType = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *GetMaintenanceWindowTaskOutput) SetWindowId(v string) *GetMaintenanceWindowTaskOutput { + s.WindowId = &v + return s +} + +// SetWindowTaskId sets the WindowTaskId field's value. +func (s *GetMaintenanceWindowTaskOutput) SetWindowTaskId(v string) *GetMaintenanceWindowTaskOutput { + s.WindowTaskId = &v + return s +} + +type GetOpsItemInput struct { + _ struct{} `type:"structure"` + + // The ID of the OpsItem that you want to get. + // + // OpsItemId is a required field + OpsItemId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOpsItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpsItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpsItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpsItemInput"} + if s.OpsItemId == nil { + invalidParams.Add(request.NewErrParamRequired("OpsItemId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *GetOpsItemInput) SetOpsItemId(v string) *GetOpsItemInput { + s.OpsItemId = &v + return s +} + +type GetOpsItemOutput struct { + _ struct{} `type:"structure"` + + // The OpsItem. + OpsItem *OpsItem `type:"structure"` +} + +// String returns the string representation +func (s GetOpsItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpsItemOutput) GoString() string { + return s.String() +} + +// SetOpsItem sets the OpsItem field's value. +func (s *GetOpsItemOutput) SetOpsItem(v *OpsItem) *GetOpsItemOutput { + s.OpsItem = v + return s +} + +type GetOpsSummaryInput struct { + _ struct{} `type:"structure"` + + // Optional aggregators that return counts of OpsItems based on one or more + // expressions. + Aggregators []*OpsAggregator `min:"1" type:"list"` + + // Optional filters used to scope down the returned OpsItems. + Filters []*OpsFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` + + // The OpsItem data type to return. + ResultAttributes []*OpsResultAttribute `min:"1" type:"list"` + + // Specify the name of a resource data sync to get. + SyncName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetOpsSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpsSummaryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpsSummaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpsSummaryInput"} + if s.Aggregators != nil && len(s.Aggregators) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Aggregators", 1)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResultAttributes != nil && len(s.ResultAttributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResultAttributes", 1)) + } + if s.SyncName != nil && len(*s.SyncName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SyncName", 1)) + } + if s.Aggregators != nil { + for i, v := range s.Aggregators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Aggregators", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ResultAttributes != nil { + for i, v := range s.ResultAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResultAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregators sets the Aggregators field's value. +func (s *GetOpsSummaryInput) SetAggregators(v []*OpsAggregator) *GetOpsSummaryInput { + s.Aggregators = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetOpsSummaryInput) SetFilters(v []*OpsFilter) *GetOpsSummaryInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetOpsSummaryInput) SetMaxResults(v int64) *GetOpsSummaryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetOpsSummaryInput) SetNextToken(v string) *GetOpsSummaryInput { + s.NextToken = &v + return s +} + +// SetResultAttributes sets the ResultAttributes field's value. +func (s *GetOpsSummaryInput) SetResultAttributes(v []*OpsResultAttribute) *GetOpsSummaryInput { + s.ResultAttributes = v + return s +} + +// SetSyncName sets the SyncName field's value. +func (s *GetOpsSummaryInput) SetSyncName(v string) *GetOpsSummaryInput { + s.SyncName = &v + return s +} + +type GetOpsSummaryOutput struct { + _ struct{} `type:"structure"` + + // The list of aggregated and filtered OpsItems. + Entities []*OpsEntity `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetOpsSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpsSummaryOutput) GoString() string { + return s.String() +} + +// SetEntities sets the Entities field's value. +func (s *GetOpsSummaryOutput) SetEntities(v []*OpsEntity) *GetOpsSummaryOutput { + s.Entities = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetOpsSummaryOutput) SetNextToken(v string) *GetOpsSummaryOutput { + s.NextToken = &v + return s +} + +type GetParameterHistoryInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The name of the parameter for which you want to review history. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // Return decrypted values for secure string parameters. This flag is ignored + // for String and StringList parameter types. + WithDecryption *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetParameterHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParameterHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetParameterHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetParameterHistoryInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetParameterHistoryInput) SetMaxResults(v int64) *GetParameterHistoryInput { + s.MaxResults = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetParameterHistoryInput) SetName(v string) *GetParameterHistoryInput { + s.Name = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetParameterHistoryInput) SetNextToken(v string) *GetParameterHistoryInput { + s.NextToken = &v + return s +} + +// SetWithDecryption sets the WithDecryption field's value. +func (s *GetParameterHistoryInput) SetWithDecryption(v bool) *GetParameterHistoryInput { + s.WithDecryption = &v + return s +} + +type GetParameterHistoryOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // A list of parameters returned by the request. + Parameters []*ParameterHistory `type:"list"` +} + +// String returns the string representation +func (s GetParameterHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParameterHistoryOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetParameterHistoryOutput) SetNextToken(v string) *GetParameterHistoryOutput { + s.NextToken = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *GetParameterHistoryOutput) SetParameters(v []*ParameterHistory) *GetParameterHistoryOutput { + s.Parameters = v + return s +} + +type GetParameterInput struct { + _ struct{} `type:"structure"` + + // The name of the parameter you want to query. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Return decrypted values for secure string parameters. This flag is ignored + // for String and StringList parameter types. + WithDecryption *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetParameterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParameterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetParameterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetParameterInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetParameterInput) SetName(v string) *GetParameterInput { + s.Name = &v + return s +} + +// SetWithDecryption sets the WithDecryption field's value. +func (s *GetParameterInput) SetWithDecryption(v bool) *GetParameterInput { + s.WithDecryption = &v + return s +} + +type GetParameterOutput struct { + _ struct{} `type:"structure"` + + // Information about a parameter. + Parameter *Parameter `type:"structure"` +} + +// String returns the string representation +func (s GetParameterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParameterOutput) GoString() string { + return s.String() +} + +// SetParameter sets the Parameter field's value. +func (s *GetParameterOutput) SetParameter(v *Parameter) *GetParameterOutput { + s.Parameter = v + return s +} + +type GetParametersByPathInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` + + // Filters to limit the request results. + // + // For GetParametersByPath, the following filter Key names are supported: Type, + // KeyId, Label, and DataType. + // + // The following Key values are not supported for GetParametersByPath: tag, + // Name, Path, and Tier. + ParameterFilters []*ParameterStringFilter `type:"list"` + + // The hierarchy for the parameter. Hierarchies start with a forward slash (/) + // and end with the parameter name. A parameter name hierarchy can have a maximum + // of 15 levels. Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33 + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // Retrieve all parameters within a hierarchy. + // + // If a user has access to a path, then the user can access all levels of that + // path. For example, if a user has permission to access path /a, then the user + // can also access /a/b. Even if a user has explicitly been denied access in + // IAM for parameter /a/b, they can still call the GetParametersByPath API action + // recursively for /a and view /a/b. + Recursive *bool `type:"boolean"` + + // Retrieve all parameters in a hierarchy with their value decrypted. + WithDecryption *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetParametersByPathInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParametersByPathInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetParametersByPathInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetParametersByPathInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.ParameterFilters != nil { + for i, v := range s.ParameterFilters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ParameterFilters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetParametersByPathInput) SetMaxResults(v int64) *GetParametersByPathInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetParametersByPathInput) SetNextToken(v string) *GetParametersByPathInput { + s.NextToken = &v + return s +} + +// SetParameterFilters sets the ParameterFilters field's value. +func (s *GetParametersByPathInput) SetParameterFilters(v []*ParameterStringFilter) *GetParametersByPathInput { + s.ParameterFilters = v + return s +} + +// SetPath sets the Path field's value. +func (s *GetParametersByPathInput) SetPath(v string) *GetParametersByPathInput { + s.Path = &v + return s +} + +// SetRecursive sets the Recursive field's value. +func (s *GetParametersByPathInput) SetRecursive(v bool) *GetParametersByPathInput { + s.Recursive = &v + return s +} + +// SetWithDecryption sets the WithDecryption field's value. +func (s *GetParametersByPathInput) SetWithDecryption(v bool) *GetParametersByPathInput { + s.WithDecryption = &v + return s +} + +type GetParametersByPathOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` + + // A list of parameters found in the specified hierarchy. + Parameters []*Parameter `type:"list"` +} + +// String returns the string representation +func (s GetParametersByPathOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParametersByPathOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetParametersByPathOutput) SetNextToken(v string) *GetParametersByPathOutput { + s.NextToken = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *GetParametersByPathOutput) SetParameters(v []*Parameter) *GetParametersByPathOutput { + s.Parameters = v + return s +} + +type GetParametersInput struct { + _ struct{} `type:"structure"` + + // Names of the parameters for which you want to query information. + // + // Names is a required field + Names []*string `min:"1" type:"list" required:"true"` + + // Return decrypted secure string value. Return decrypted values for secure + // string parameters. This flag is ignored for String and StringList parameter + // types. + WithDecryption *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetParametersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParametersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetParametersInput"} + if s.Names == nil { + invalidParams.Add(request.NewErrParamRequired("Names")) + } + if s.Names != nil && len(s.Names) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Names", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNames sets the Names field's value. +func (s *GetParametersInput) SetNames(v []*string) *GetParametersInput { + s.Names = v + return s +} + +// SetWithDecryption sets the WithDecryption field's value. +func (s *GetParametersInput) SetWithDecryption(v bool) *GetParametersInput { + s.WithDecryption = &v + return s +} + +type GetParametersOutput struct { + _ struct{} `type:"structure"` + + // A list of parameters that are not formatted correctly or do not run during + // an execution. + InvalidParameters []*string `min:"1" type:"list"` + + // A list of details for a parameter. + Parameters []*Parameter `type:"list"` +} + +// String returns the string representation +func (s GetParametersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParametersOutput) GoString() string { + return s.String() +} + +// SetInvalidParameters sets the InvalidParameters field's value. +func (s *GetParametersOutput) SetInvalidParameters(v []*string) *GetParametersOutput { + s.InvalidParameters = v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *GetParametersOutput) SetParameters(v []*Parameter) *GetParametersOutput { + s.Parameters = v + return s +} + +type GetPatchBaselineForPatchGroupInput struct { + _ struct{} `type:"structure"` + + // Returns he operating system rule specified for patch groups using the patch + // baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // The name of the patch group whose patch baseline should be retrieved. + // + // PatchGroup is a required field + PatchGroup *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPatchBaselineForPatchGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPatchBaselineForPatchGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPatchBaselineForPatchGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPatchBaselineForPatchGroupInput"} + if s.PatchGroup == nil { + invalidParams.Add(request.NewErrParamRequired("PatchGroup")) + } + if s.PatchGroup != nil && len(*s.PatchGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PatchGroup", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetPatchBaselineForPatchGroupInput) SetOperatingSystem(v string) *GetPatchBaselineForPatchGroupInput { + s.OperatingSystem = &v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *GetPatchBaselineForPatchGroupInput) SetPatchGroup(v string) *GetPatchBaselineForPatchGroupInput { + s.PatchGroup = &v + return s +} + +type GetPatchBaselineForPatchGroupOutput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline that should be used for the patch group. + BaselineId *string `min:"20" type:"string"` + + // The operating system rule specified for patch groups using the patch baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // The name of the patch group. + PatchGroup *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetPatchBaselineForPatchGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPatchBaselineForPatchGroupOutput) GoString() string { + return s.String() +} + +// SetBaselineId sets the BaselineId field's value. +func (s *GetPatchBaselineForPatchGroupOutput) SetBaselineId(v string) *GetPatchBaselineForPatchGroupOutput { + s.BaselineId = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetPatchBaselineForPatchGroupOutput) SetOperatingSystem(v string) *GetPatchBaselineForPatchGroupOutput { + s.OperatingSystem = &v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *GetPatchBaselineForPatchGroupOutput) SetPatchGroup(v string) *GetPatchBaselineForPatchGroupOutput { + s.PatchGroup = &v + return s +} + +type GetPatchBaselineInput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline to retrieve. + // + // BaselineId is a required field + BaselineId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPatchBaselineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPatchBaselineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPatchBaselineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPatchBaselineInput"} + if s.BaselineId == nil { + invalidParams.Add(request.NewErrParamRequired("BaselineId")) + } + if s.BaselineId != nil && len(*s.BaselineId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("BaselineId", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBaselineId sets the BaselineId field's value. +func (s *GetPatchBaselineInput) SetBaselineId(v string) *GetPatchBaselineInput { + s.BaselineId = &v + return s +} + +type GetPatchBaselineOutput struct { + _ struct{} `type:"structure"` + + // A set of rules used to include patches in the baseline. + ApprovalRules *PatchRuleGroup `type:"structure"` + + // A list of explicitly approved patches for the baseline. + ApprovedPatches []*string `type:"list"` + + // Returns the specified compliance severity level for approved patches in the + // patch baseline. + ApprovedPatchesComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + + // Indicates whether the list of approved patches includes non-security updates + // that should be applied to the instances. The default value is 'false'. Applies + // to Linux instances only. + ApprovedPatchesEnableNonSecurity *bool `type:"boolean"` + + // The ID of the retrieved patch baseline. + BaselineId *string `min:"20" type:"string"` + + // The date the patch baseline was created. + CreatedDate *time.Time `type:"timestamp"` + + // A description of the patch baseline. + Description *string `min:"1" type:"string"` + + // A set of global filters used to exclude patches from the baseline. + GlobalFilters *PatchFilterGroup `type:"structure"` + + // The date the patch baseline was last modified. + ModifiedDate *time.Time `type:"timestamp"` + + // The name of the patch baseline. + Name *string `min:"3" type:"string"` + + // Returns the operating system specified for the patch baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // Patch groups included in the patch baseline. + PatchGroups []*string `type:"list"` + + // A list of explicitly rejected patches for the baseline. + RejectedPatches []*string `type:"list"` + + // The action specified to take on patches included in the RejectedPatches list. + // A patch can be allowed only if it is a dependency of another package, or + // blocked entirely along with packages that include it as a dependency. + RejectedPatchesAction *string `type:"string" enum:"PatchAction"` + + // Information about the patches to use to update the instances, including target + // operating systems and source repositories. Applies to Linux instances only. + Sources []*PatchSource `type:"list"` +} + +// String returns the string representation +func (s GetPatchBaselineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPatchBaselineOutput) GoString() string { + return s.String() +} + +// SetApprovalRules sets the ApprovalRules field's value. +func (s *GetPatchBaselineOutput) SetApprovalRules(v *PatchRuleGroup) *GetPatchBaselineOutput { + s.ApprovalRules = v + return s +} + +// SetApprovedPatches sets the ApprovedPatches field's value. +func (s *GetPatchBaselineOutput) SetApprovedPatches(v []*string) *GetPatchBaselineOutput { + s.ApprovedPatches = v + return s +} + +// SetApprovedPatchesComplianceLevel sets the ApprovedPatchesComplianceLevel field's value. +func (s *GetPatchBaselineOutput) SetApprovedPatchesComplianceLevel(v string) *GetPatchBaselineOutput { + s.ApprovedPatchesComplianceLevel = &v + return s +} + +// SetApprovedPatchesEnableNonSecurity sets the ApprovedPatchesEnableNonSecurity field's value. +func (s *GetPatchBaselineOutput) SetApprovedPatchesEnableNonSecurity(v bool) *GetPatchBaselineOutput { + s.ApprovedPatchesEnableNonSecurity = &v + return s +} + +// SetBaselineId sets the BaselineId field's value. +func (s *GetPatchBaselineOutput) SetBaselineId(v string) *GetPatchBaselineOutput { + s.BaselineId = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *GetPatchBaselineOutput) SetCreatedDate(v time.Time) *GetPatchBaselineOutput { + s.CreatedDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *GetPatchBaselineOutput) SetDescription(v string) *GetPatchBaselineOutput { + s.Description = &v + return s +} + +// SetGlobalFilters sets the GlobalFilters field's value. +func (s *GetPatchBaselineOutput) SetGlobalFilters(v *PatchFilterGroup) *GetPatchBaselineOutput { + s.GlobalFilters = v + return s +} + +// SetModifiedDate sets the ModifiedDate field's value. +func (s *GetPatchBaselineOutput) SetModifiedDate(v time.Time) *GetPatchBaselineOutput { + s.ModifiedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetPatchBaselineOutput) SetName(v string) *GetPatchBaselineOutput { + s.Name = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *GetPatchBaselineOutput) SetOperatingSystem(v string) *GetPatchBaselineOutput { + s.OperatingSystem = &v + return s +} + +// SetPatchGroups sets the PatchGroups field's value. +func (s *GetPatchBaselineOutput) SetPatchGroups(v []*string) *GetPatchBaselineOutput { + s.PatchGroups = v + return s +} + +// SetRejectedPatches sets the RejectedPatches field's value. +func (s *GetPatchBaselineOutput) SetRejectedPatches(v []*string) *GetPatchBaselineOutput { + s.RejectedPatches = v + return s +} + +// SetRejectedPatchesAction sets the RejectedPatchesAction field's value. +func (s *GetPatchBaselineOutput) SetRejectedPatchesAction(v string) *GetPatchBaselineOutput { + s.RejectedPatchesAction = &v + return s +} + +// SetSources sets the Sources field's value. +func (s *GetPatchBaselineOutput) SetSources(v []*PatchSource) *GetPatchBaselineOutput { + s.Sources = v + return s +} + +// The request body of the GetServiceSetting API action. +type GetServiceSettingInput struct { + _ struct{} `type:"structure"` + + // The ID of the service setting to get. The setting ID can be /ssm/parameter-store/default-parameter-tier, + // /ssm/parameter-store/high-throughput-enabled, or /ssm/managed-instance/activation-tier. + // + // SettingId is a required field + SettingId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServiceSettingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceSettingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceSettingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServiceSettingInput"} + if s.SettingId == nil { + invalidParams.Add(request.NewErrParamRequired("SettingId")) + } + if s.SettingId != nil && len(*s.SettingId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SettingId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSettingId sets the SettingId field's value. +func (s *GetServiceSettingInput) SetSettingId(v string) *GetServiceSettingInput { + s.SettingId = &v + return s +} + +// The query result body of the GetServiceSetting API action. +type GetServiceSettingOutput struct { + _ struct{} `type:"structure"` + + // The query result of the current service setting. + ServiceSetting *ServiceSetting `type:"structure"` +} + +// String returns the string representation +func (s GetServiceSettingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceSettingOutput) GoString() string { + return s.String() +} + +// SetServiceSetting sets the ServiceSetting field's value. +func (s *GetServiceSettingOutput) SetServiceSetting(v *ServiceSetting) *GetServiceSettingOutput { + s.ServiceSetting = v + return s +} + +// A hierarchy can have a maximum of 15 levels. For more information, see Requirements +// and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) +// in the AWS Systems Manager User Guide. +type HierarchyLevelLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A hierarchy can have a maximum of 15 levels. For more information, see Requirements + // and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) + // in the AWS Systems Manager User Guide. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s HierarchyLevelLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HierarchyLevelLimitExceededException) GoString() string { + return s.String() +} + +func newErrorHierarchyLevelLimitExceededException(v protocol.ResponseMetadata) error { + return &HierarchyLevelLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *HierarchyLevelLimitExceededException) Code() string { + return "HierarchyLevelLimitExceededException" +} + +// Message returns the exception's message. +func (s *HierarchyLevelLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *HierarchyLevelLimitExceededException) OrigErr() error { + return nil +} + +func (s *HierarchyLevelLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *HierarchyLevelLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *HierarchyLevelLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Parameter Store does not support changing a parameter type in a hierarchy. +// For example, you can't change a parameter from a String type to a SecureString +// type. You must create a new, unique parameter. +type HierarchyTypeMismatchException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Parameter Store does not support changing a parameter type in a hierarchy. + // For example, you can't change a parameter from a String type to a SecureString + // type. You must create a new, unique parameter. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s HierarchyTypeMismatchException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HierarchyTypeMismatchException) GoString() string { + return s.String() +} + +func newErrorHierarchyTypeMismatchException(v protocol.ResponseMetadata) error { + return &HierarchyTypeMismatchException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *HierarchyTypeMismatchException) Code() string { + return "HierarchyTypeMismatchException" +} + +// Message returns the exception's message. +func (s *HierarchyTypeMismatchException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *HierarchyTypeMismatchException) OrigErr() error { + return nil +} + +func (s *HierarchyTypeMismatchException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *HierarchyTypeMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *HierarchyTypeMismatchException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Error returned when an idempotent operation is retried and the parameters +// don't match the original call to the API with the same idempotency token. +type IdempotentParameterMismatch struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s IdempotentParameterMismatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdempotentParameterMismatch) GoString() string { + return s.String() +} + +func newErrorIdempotentParameterMismatch(v protocol.ResponseMetadata) error { + return &IdempotentParameterMismatch{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IdempotentParameterMismatch) Code() string { + return "IdempotentParameterMismatch" +} + +// Message returns the exception's message. +func (s *IdempotentParameterMismatch) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IdempotentParameterMismatch) OrigErr() error { + return nil +} + +func (s *IdempotentParameterMismatch) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IdempotentParameterMismatch) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IdempotentParameterMismatch) RequestID() string { + return s.RespMetadata.RequestID +} + +// There is a conflict in the policies specified for this parameter. You can't, +// for example, specify two Expiration policies for a parameter. Review your +// policies, and try again. +type IncompatiblePolicyException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s IncompatiblePolicyException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncompatiblePolicyException) GoString() string { + return s.String() +} + +func newErrorIncompatiblePolicyException(v protocol.ResponseMetadata) error { + return &IncompatiblePolicyException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IncompatiblePolicyException) Code() string { + return "IncompatiblePolicyException" +} + +// Message returns the exception's message. +func (s *IncompatiblePolicyException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IncompatiblePolicyException) OrigErr() error { + return nil +} + +func (s *IncompatiblePolicyException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IncompatiblePolicyException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IncompatiblePolicyException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Status information about the aggregated associations. +type InstanceAggregatedAssociationOverview struct { + _ struct{} `type:"structure"` + + // Detailed status information about the aggregated associations. + DetailedStatus *string `type:"string"` + + // The number of associations for the instance(s). + InstanceAssociationStatusAggregatedCount map[string]*int64 `type:"map"` +} + +// String returns the string representation +func (s InstanceAggregatedAssociationOverview) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceAggregatedAssociationOverview) GoString() string { + return s.String() +} + +// SetDetailedStatus sets the DetailedStatus field's value. +func (s *InstanceAggregatedAssociationOverview) SetDetailedStatus(v string) *InstanceAggregatedAssociationOverview { + s.DetailedStatus = &v + return s +} + +// SetInstanceAssociationStatusAggregatedCount sets the InstanceAssociationStatusAggregatedCount field's value. +func (s *InstanceAggregatedAssociationOverview) SetInstanceAssociationStatusAggregatedCount(v map[string]*int64) *InstanceAggregatedAssociationOverview { + s.InstanceAssociationStatusAggregatedCount = v + return s +} + +// One or more association documents on the instance. +type InstanceAssociation struct { + _ struct{} `type:"structure"` + + // The association ID. + AssociationId *string `type:"string"` + + // Version information for the association on the instance. + AssociationVersion *string `type:"string"` + + // The content of the association document for the instance(s). + Content *string `min:"1" type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s InstanceAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *InstanceAssociation) SetAssociationId(v string) *InstanceAssociation { + s.AssociationId = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *InstanceAssociation) SetAssociationVersion(v string) *InstanceAssociation { + s.AssociationVersion = &v + return s +} + +// SetContent sets the Content field's value. +func (s *InstanceAssociation) SetContent(v string) *InstanceAssociation { + s.Content = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceAssociation) SetInstanceId(v string) *InstanceAssociation { + s.InstanceId = &v + return s +} + +// An S3 bucket where you want to store the results of this request. +type InstanceAssociationOutputLocation struct { + _ struct{} `type:"structure"` + + // An S3 bucket where you want to store the results of this request. + S3Location *S3OutputLocation `type:"structure"` +} + +// String returns the string representation +func (s InstanceAssociationOutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceAssociationOutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceAssociationOutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceAssociationOutputLocation"} + if s.S3Location != nil { + if err := s.S3Location.Validate(); err != nil { + invalidParams.AddNested("S3Location", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Location sets the S3Location field's value. +func (s *InstanceAssociationOutputLocation) SetS3Location(v *S3OutputLocation) *InstanceAssociationOutputLocation { + s.S3Location = v + return s +} + +// The URL of S3 bucket where you want to store the results of this request. +type InstanceAssociationOutputUrl struct { + _ struct{} `type:"structure"` + + // The URL of S3 bucket where you want to store the results of this request. + S3OutputUrl *S3OutputUrl `type:"structure"` +} + +// String returns the string representation +func (s InstanceAssociationOutputUrl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceAssociationOutputUrl) GoString() string { + return s.String() +} + +// SetS3OutputUrl sets the S3OutputUrl field's value. +func (s *InstanceAssociationOutputUrl) SetS3OutputUrl(v *S3OutputUrl) *InstanceAssociationOutputUrl { + s.S3OutputUrl = v + return s +} + +// Status information about the instance association. +type InstanceAssociationStatusInfo struct { + _ struct{} `type:"structure"` + + // The association ID. + AssociationId *string `type:"string"` + + // The name of the association applied to the instance. + AssociationName *string `type:"string"` + + // The version of the association applied to the instance. + AssociationVersion *string `type:"string"` + + // Detailed status information about the instance association. + DetailedStatus *string `type:"string"` + + // The association document versions. + DocumentVersion *string `type:"string"` + + // An error code returned by the request to create the association. + ErrorCode *string `type:"string"` + + // The date the instance association ran. + ExecutionDate *time.Time `type:"timestamp"` + + // Summary information about association execution. + ExecutionSummary *string `min:"1" type:"string"` + + // The instance ID where the association was created. + InstanceId *string `type:"string"` + + // The name of the association. + Name *string `type:"string"` + + // A URL for an S3 bucket where you want to store the results of this request. + OutputUrl *InstanceAssociationOutputUrl `type:"structure"` + + // Status information about the instance association. + Status *string `type:"string"` +} + +// String returns the string representation +func (s InstanceAssociationStatusInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceAssociationStatusInfo) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *InstanceAssociationStatusInfo) SetAssociationId(v string) *InstanceAssociationStatusInfo { + s.AssociationId = &v + return s +} + +// SetAssociationName sets the AssociationName field's value. +func (s *InstanceAssociationStatusInfo) SetAssociationName(v string) *InstanceAssociationStatusInfo { + s.AssociationName = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *InstanceAssociationStatusInfo) SetAssociationVersion(v string) *InstanceAssociationStatusInfo { + s.AssociationVersion = &v + return s +} + +// SetDetailedStatus sets the DetailedStatus field's value. +func (s *InstanceAssociationStatusInfo) SetDetailedStatus(v string) *InstanceAssociationStatusInfo { + s.DetailedStatus = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *InstanceAssociationStatusInfo) SetDocumentVersion(v string) *InstanceAssociationStatusInfo { + s.DocumentVersion = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *InstanceAssociationStatusInfo) SetErrorCode(v string) *InstanceAssociationStatusInfo { + s.ErrorCode = &v + return s +} + +// SetExecutionDate sets the ExecutionDate field's value. +func (s *InstanceAssociationStatusInfo) SetExecutionDate(v time.Time) *InstanceAssociationStatusInfo { + s.ExecutionDate = &v + return s +} + +// SetExecutionSummary sets the ExecutionSummary field's value. +func (s *InstanceAssociationStatusInfo) SetExecutionSummary(v string) *InstanceAssociationStatusInfo { + s.ExecutionSummary = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceAssociationStatusInfo) SetInstanceId(v string) *InstanceAssociationStatusInfo { + s.InstanceId = &v + return s +} + +// SetName sets the Name field's value. +func (s *InstanceAssociationStatusInfo) SetName(v string) *InstanceAssociationStatusInfo { + s.Name = &v + return s +} + +// SetOutputUrl sets the OutputUrl field's value. +func (s *InstanceAssociationStatusInfo) SetOutputUrl(v *InstanceAssociationOutputUrl) *InstanceAssociationStatusInfo { + s.OutputUrl = v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstanceAssociationStatusInfo) SetStatus(v string) *InstanceAssociationStatusInfo { + s.Status = &v + return s +} + +// Describes a filter for a specific list of instances. +type InstanceInformation struct { + _ struct{} `type:"structure"` + + // The activation ID created by Systems Manager when the server or VM was registered. + ActivationId *string `type:"string"` + + // The version of SSM Agent running on your Linux instance. + AgentVersion *string `type:"string"` + + // Information about the association. + AssociationOverview *InstanceAggregatedAssociationOverview `type:"structure"` + + // The status of the association. + AssociationStatus *string `type:"string"` + + // The fully qualified host name of the managed instance. + ComputerName *string `min:"1" type:"string"` + + // The IP address of the managed instance. + IPAddress *string `min:"1" type:"string"` + + // The Amazon Identity and Access Management (IAM) role assigned to the on-premises + // Systems Manager managed instance. This call does not return the IAM role + // for EC2 instances. To retrieve the IAM role for an EC2 instance, use the + // Amazon EC2 DescribeInstances action. For information, see DescribeInstances + // (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) + // in the Amazon EC2 API Reference or describe-instances (http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html) + // in the AWS CLI Command Reference. + IamRole *string `type:"string"` + + // The instance ID. + InstanceId *string `type:"string"` + + // Indicates whether the latest version of SSM Agent is running on your Linux + // Managed Instance. This field does not indicate whether or not the latest + // version is installed on Windows managed instances, because some older versions + // of Windows Server use the EC2Config service to process SSM requests. + IsLatestVersion *bool `type:"boolean"` + + // The date the association was last run. + LastAssociationExecutionDate *time.Time `type:"timestamp"` + + // The date and time when agent last pinged Systems Manager service. + LastPingDateTime *time.Time `type:"timestamp"` + + // The last date the association was successfully run. + LastSuccessfulAssociationExecutionDate *time.Time `type:"timestamp"` + + // The name assigned to an on-premises server or virtual machine (VM) when it + // is activated as a Systems Manager managed instance. The name is specified + // as the DefaultInstanceName property using the CreateActivation command. It + // is applied to the managed instance by specifying the Activation Code and + // Activation ID when you install SSM Agent on the instance, as explained in + // Install SSM Agent for a hybrid environment (Linux) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-linux.html) + // and Install SSM Agent for a hybrid environment (Windows) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-win.html). + // To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances + // action. For information, see DescribeInstances (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) + // in the Amazon EC2 API Reference or describe-instances (http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html) + // in the AWS CLI Command Reference. + Name *string `type:"string"` + + // Connection status of SSM Agent. + // + // The status Inactive has been deprecated and is no longer in use. + PingStatus *string `type:"string" enum:"PingStatus"` + + // The name of the operating system platform running on your instance. + PlatformName *string `type:"string"` + + // The operating system platform type. + PlatformType *string `type:"string" enum:"PlatformType"` + + // The version of the OS platform running on your instance. + PlatformVersion *string `type:"string"` + + // The date the server or VM was registered with AWS as a managed instance. + RegistrationDate *time.Time `type:"timestamp"` + + // The type of instance. Instances are either EC2 instances or managed instances. + ResourceType *string `type:"string" enum:"ResourceType"` +} + +// String returns the string representation +func (s InstanceInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInformation) GoString() string { + return s.String() +} + +// SetActivationId sets the ActivationId field's value. +func (s *InstanceInformation) SetActivationId(v string) *InstanceInformation { + s.ActivationId = &v + return s +} + +// SetAgentVersion sets the AgentVersion field's value. +func (s *InstanceInformation) SetAgentVersion(v string) *InstanceInformation { + s.AgentVersion = &v + return s +} + +// SetAssociationOverview sets the AssociationOverview field's value. +func (s *InstanceInformation) SetAssociationOverview(v *InstanceAggregatedAssociationOverview) *InstanceInformation { + s.AssociationOverview = v + return s +} + +// SetAssociationStatus sets the AssociationStatus field's value. +func (s *InstanceInformation) SetAssociationStatus(v string) *InstanceInformation { + s.AssociationStatus = &v + return s +} + +// SetComputerName sets the ComputerName field's value. +func (s *InstanceInformation) SetComputerName(v string) *InstanceInformation { + s.ComputerName = &v + return s +} + +// SetIPAddress sets the IPAddress field's value. +func (s *InstanceInformation) SetIPAddress(v string) *InstanceInformation { + s.IPAddress = &v + return s +} + +// SetIamRole sets the IamRole field's value. +func (s *InstanceInformation) SetIamRole(v string) *InstanceInformation { + s.IamRole = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceInformation) SetInstanceId(v string) *InstanceInformation { + s.InstanceId = &v + return s +} + +// SetIsLatestVersion sets the IsLatestVersion field's value. +func (s *InstanceInformation) SetIsLatestVersion(v bool) *InstanceInformation { + s.IsLatestVersion = &v + return s +} + +// SetLastAssociationExecutionDate sets the LastAssociationExecutionDate field's value. +func (s *InstanceInformation) SetLastAssociationExecutionDate(v time.Time) *InstanceInformation { + s.LastAssociationExecutionDate = &v + return s +} + +// SetLastPingDateTime sets the LastPingDateTime field's value. +func (s *InstanceInformation) SetLastPingDateTime(v time.Time) *InstanceInformation { + s.LastPingDateTime = &v + return s +} + +// SetLastSuccessfulAssociationExecutionDate sets the LastSuccessfulAssociationExecutionDate field's value. +func (s *InstanceInformation) SetLastSuccessfulAssociationExecutionDate(v time.Time) *InstanceInformation { + s.LastSuccessfulAssociationExecutionDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *InstanceInformation) SetName(v string) *InstanceInformation { + s.Name = &v + return s +} + +// SetPingStatus sets the PingStatus field's value. +func (s *InstanceInformation) SetPingStatus(v string) *InstanceInformation { + s.PingStatus = &v + return s +} + +// SetPlatformName sets the PlatformName field's value. +func (s *InstanceInformation) SetPlatformName(v string) *InstanceInformation { + s.PlatformName = &v + return s +} + +// SetPlatformType sets the PlatformType field's value. +func (s *InstanceInformation) SetPlatformType(v string) *InstanceInformation { + s.PlatformType = &v + return s +} + +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *InstanceInformation) SetPlatformVersion(v string) *InstanceInformation { + s.PlatformVersion = &v + return s +} + +// SetRegistrationDate sets the RegistrationDate field's value. +func (s *InstanceInformation) SetRegistrationDate(v time.Time) *InstanceInformation { + s.RegistrationDate = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *InstanceInformation) SetResourceType(v string) *InstanceInformation { + s.ResourceType = &v + return s +} + +// Describes a filter for a specific list of instances. You can filter instances +// information by using tags. You specify tags by using a key-value mapping. +// +// Use this action instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList +// method. The InstanceInformationFilterList method is a legacy method and does +// not support tags. +type InstanceInformationFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `locationName:"key" type:"string" required:"true" enum:"InstanceInformationFilterKey"` + + // The filter values. + // + // ValueSet is a required field + ValueSet []*string `locationName:"valueSet" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceInformationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInformationFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceInformationFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceInformationFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.ValueSet == nil { + invalidParams.Add(request.NewErrParamRequired("ValueSet")) + } + if s.ValueSet != nil && len(s.ValueSet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ValueSet", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *InstanceInformationFilter) SetKey(v string) *InstanceInformationFilter { + s.Key = &v + return s +} + +// SetValueSet sets the ValueSet field's value. +func (s *InstanceInformationFilter) SetValueSet(v []*string) *InstanceInformationFilter { + s.ValueSet = v + return s +} + +// The filters to describe or get information about your managed instances. +type InstanceInformationStringFilter struct { + _ struct{} `type:"structure"` + + // The filter key name to describe your instances. For example: + // + // "InstanceIds"|"AgentVersion"|"PingStatus"|"PlatformTypes"|"ActivationIds"|"IamRole"|"ResourceType"|"AssociationStatus"|"Tag + // Key" + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The filter values. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceInformationStringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceInformationStringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceInformationStringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceInformationStringFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *InstanceInformationStringFilter) SetKey(v string) *InstanceInformationStringFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *InstanceInformationStringFilter) SetValues(v []*string) *InstanceInformationStringFilter { + s.Values = v + return s +} + +// Defines the high-level patch compliance state for a managed instance, providing +// information about the number of installed, missing, not applicable, and failed +// patches along with metadata about the operation when this information was +// gathered for the instance. +type InstancePatchState struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline used to patch the instance. + // + // BaselineId is a required field + BaselineId *string `min:"20" type:"string" required:"true"` + + // The number of patches from the patch baseline that were attempted to be installed + // during the last patching operation, but failed to install. + FailedCount *int64 `type:"integer"` + + // An https URL or an Amazon S3 path-style URL to a list of patches to be installed. + // This patch installation list, which you maintain in an S3 bucket in YAML + // format and specify in the SSM document AWS-RunPatchBaseline, overrides the + // patches specified by the default patch baseline. + // + // For more information about the InstallOverrideList parameter, see About the + // SSM document AWS-RunPatchBaseline (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-about-aws-runpatchbaseline.html) + // in the AWS Systems Manager User Guide. + InstallOverrideList *string `min:"1" type:"string"` + + // The number of patches from the patch baseline that are installed on the instance. + InstalledCount *int64 `type:"integer"` + + // The number of patches not specified in the patch baseline that are installed + // on the instance. + InstalledOtherCount *int64 `type:"integer"` + + // The number of patches installed by Patch Manager since the last time the + // instance was rebooted. + InstalledPendingRebootCount *int64 `type:"integer"` + + // The number of patches installed on an instance that are specified in a RejectedPatches + // list. Patches with a status of InstalledRejected were typically installed + // before they were added to a RejectedPatches list. + // + // If ALLOW_AS_DEPENDENCY is the specified option for RejectedPatchesAction, + // the value of InstalledRejectedCount will always be 0 (zero). + InstalledRejectedCount *int64 `type:"integer"` + + // The ID of the managed instance the high-level patch compliance information + // was collected for. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The time of the last attempt to patch the instance with NoReboot specified + // as the reboot option. + LastNoRebootInstallOperationTime *time.Time `type:"timestamp"` + + // The number of patches from the patch baseline that are applicable for the + // instance but aren't currently installed. + MissingCount *int64 `type:"integer"` + + // The number of patches from the patch baseline that aren't applicable for + // the instance and therefore aren't installed on the instance. This number + // may be truncated if the list of patch names is very large. The number of + // patches beyond this limit are reported in UnreportedNotApplicableCount. + NotApplicableCount *int64 `type:"integer"` + + // The type of patching operation that was performed: SCAN (assess patch compliance + // state) or INSTALL (install missing patches). + // + // Operation is a required field + Operation *string `type:"string" required:"true" enum:"PatchOperationType"` + + // The time the most recent patching operation completed on the instance. + // + // OperationEndTime is a required field + OperationEndTime *time.Time `type:"timestamp" required:"true"` + + // The time the most recent patching operation was started on the instance. + // + // OperationStartTime is a required field + OperationStartTime *time.Time `type:"timestamp" required:"true"` + + // Placeholder information. This field will always be empty in the current release + // of the service. + OwnerInformation *string `min:"1" type:"string" sensitive:"true"` + + // The name of the patch group the managed instance belongs to. + // + // PatchGroup is a required field + PatchGroup *string `min:"1" type:"string" required:"true"` + + // Indicates the reboot option specified in the patch baseline. + // + // Reboot options apply to Install operations only. Reboots are not attempted + // for Patch Manager Scan operations. + // + // * RebootIfNeeded: Patch Manager tries to reboot the instance if it installed + // any patches, or if any patches are detected with a status of InstalledPendingReboot. + // + // * NoReboot: Patch Manager attempts to install missing packages without + // trying to reboot the system. Patches installed with this option are assigned + // a status of InstalledPendingReboot. These patches might not be in effect + // until a reboot is performed. + RebootOption *string `type:"string" enum:"RebootOption"` + + // The ID of the patch baseline snapshot used during the patching operation + // when this compliance data was collected. + SnapshotId *string `min:"36" type:"string"` + + // The number of patches beyond the supported limit of NotApplicableCount that + // are not reported by name to Systems Manager Inventory. + UnreportedNotApplicableCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s InstancePatchState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancePatchState) GoString() string { + return s.String() +} + +// SetBaselineId sets the BaselineId field's value. +func (s *InstancePatchState) SetBaselineId(v string) *InstancePatchState { + s.BaselineId = &v + return s +} + +// SetFailedCount sets the FailedCount field's value. +func (s *InstancePatchState) SetFailedCount(v int64) *InstancePatchState { + s.FailedCount = &v + return s +} + +// SetInstallOverrideList sets the InstallOverrideList field's value. +func (s *InstancePatchState) SetInstallOverrideList(v string) *InstancePatchState { + s.InstallOverrideList = &v + return s +} + +// SetInstalledCount sets the InstalledCount field's value. +func (s *InstancePatchState) SetInstalledCount(v int64) *InstancePatchState { + s.InstalledCount = &v + return s +} + +// SetInstalledOtherCount sets the InstalledOtherCount field's value. +func (s *InstancePatchState) SetInstalledOtherCount(v int64) *InstancePatchState { + s.InstalledOtherCount = &v + return s +} + +// SetInstalledPendingRebootCount sets the InstalledPendingRebootCount field's value. +func (s *InstancePatchState) SetInstalledPendingRebootCount(v int64) *InstancePatchState { + s.InstalledPendingRebootCount = &v + return s +} + +// SetInstalledRejectedCount sets the InstalledRejectedCount field's value. +func (s *InstancePatchState) SetInstalledRejectedCount(v int64) *InstancePatchState { + s.InstalledRejectedCount = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstancePatchState) SetInstanceId(v string) *InstancePatchState { + s.InstanceId = &v + return s +} + +// SetLastNoRebootInstallOperationTime sets the LastNoRebootInstallOperationTime field's value. +func (s *InstancePatchState) SetLastNoRebootInstallOperationTime(v time.Time) *InstancePatchState { + s.LastNoRebootInstallOperationTime = &v + return s +} + +// SetMissingCount sets the MissingCount field's value. +func (s *InstancePatchState) SetMissingCount(v int64) *InstancePatchState { + s.MissingCount = &v + return s +} + +// SetNotApplicableCount sets the NotApplicableCount field's value. +func (s *InstancePatchState) SetNotApplicableCount(v int64) *InstancePatchState { + s.NotApplicableCount = &v + return s +} + +// SetOperation sets the Operation field's value. +func (s *InstancePatchState) SetOperation(v string) *InstancePatchState { + s.Operation = &v + return s +} + +// SetOperationEndTime sets the OperationEndTime field's value. +func (s *InstancePatchState) SetOperationEndTime(v time.Time) *InstancePatchState { + s.OperationEndTime = &v + return s +} + +// SetOperationStartTime sets the OperationStartTime field's value. +func (s *InstancePatchState) SetOperationStartTime(v time.Time) *InstancePatchState { + s.OperationStartTime = &v + return s +} + +// SetOwnerInformation sets the OwnerInformation field's value. +func (s *InstancePatchState) SetOwnerInformation(v string) *InstancePatchState { + s.OwnerInformation = &v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *InstancePatchState) SetPatchGroup(v string) *InstancePatchState { + s.PatchGroup = &v + return s +} + +// SetRebootOption sets the RebootOption field's value. +func (s *InstancePatchState) SetRebootOption(v string) *InstancePatchState { + s.RebootOption = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *InstancePatchState) SetSnapshotId(v string) *InstancePatchState { + s.SnapshotId = &v + return s +} + +// SetUnreportedNotApplicableCount sets the UnreportedNotApplicableCount field's value. +func (s *InstancePatchState) SetUnreportedNotApplicableCount(v int64) *InstancePatchState { + s.UnreportedNotApplicableCount = &v + return s +} + +// Defines a filter used in DescribeInstancePatchStatesForPatchGroup used to +// scope down the information returned by the API. +type InstancePatchStateFilter struct { + _ struct{} `type:"structure"` + + // The key for the filter. Supported values are FailedCount, InstalledCount, + // InstalledOtherCount, MissingCount and NotApplicableCount. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The type of comparison that should be performed for the value: Equal, NotEqual, + // LessThan or GreaterThan. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"InstancePatchStateOperatorType"` + + // The value for the filter, must be an integer greater than or equal to 0. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s InstancePatchStateFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstancePatchStateFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstancePatchStateFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstancePatchStateFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *InstancePatchStateFilter) SetKey(v string) *InstancePatchStateFilter { + s.Key = &v + return s +} + +// SetType sets the Type field's value. +func (s *InstancePatchStateFilter) SetType(v string) *InstancePatchStateFilter { + s.Type = &v + return s +} + +// SetValues sets the Values field's value. +func (s *InstancePatchStateFilter) SetValues(v []*string) *InstancePatchStateFilter { + s.Values = v + return s +} + +// An error occurred on the server side. +type InternalServerError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InternalServerError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternalServerError) GoString() string { + return s.String() +} + +func newErrorInternalServerError(v protocol.ResponseMetadata) error { + return &InternalServerError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerError) Code() string { + return "InternalServerError" +} + +// Message returns the exception's message. +func (s *InternalServerError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerError) OrigErr() error { + return nil +} + +func (s *InternalServerError) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID +} + +// The activation is not valid. The activation might have been deleted, or the +// ActivationId and the ActivationCode do not match. +type InvalidActivation struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidActivation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidActivation) GoString() string { + return s.String() +} + +func newErrorInvalidActivation(v protocol.ResponseMetadata) error { + return &InvalidActivation{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidActivation) Code() string { + return "InvalidActivation" +} + +// Message returns the exception's message. +func (s *InvalidActivation) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidActivation) OrigErr() error { + return nil +} + +func (s *InvalidActivation) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidActivation) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidActivation) RequestID() string { + return s.RespMetadata.RequestID +} + +// The activation ID is not valid. Verify the you entered the correct ActivationId +// or ActivationCode and try again. +type InvalidActivationId struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidActivationId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidActivationId) GoString() string { + return s.String() +} + +func newErrorInvalidActivationId(v protocol.ResponseMetadata) error { + return &InvalidActivationId{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidActivationId) Code() string { + return "InvalidActivationId" +} + +// Message returns the exception's message. +func (s *InvalidActivationId) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidActivationId) OrigErr() error { + return nil +} + +func (s *InvalidActivationId) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidActivationId) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidActivationId) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified aggregator is not valid for inventory groups. Verify that the +// aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation. +type InvalidAggregatorException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidAggregatorException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidAggregatorException) GoString() string { + return s.String() +} + +func newErrorInvalidAggregatorException(v protocol.ResponseMetadata) error { + return &InvalidAggregatorException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidAggregatorException) Code() string { + return "InvalidAggregatorException" +} + +// Message returns the exception's message. +func (s *InvalidAggregatorException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidAggregatorException) OrigErr() error { + return nil +} + +func (s *InvalidAggregatorException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidAggregatorException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidAggregatorException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request does not meet the regular expression requirement. +type InvalidAllowedPatternException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The request does not meet the regular expression requirement. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidAllowedPatternException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidAllowedPatternException) GoString() string { + return s.String() +} + +func newErrorInvalidAllowedPatternException(v protocol.ResponseMetadata) error { + return &InvalidAllowedPatternException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidAllowedPatternException) Code() string { + return "InvalidAllowedPatternException" +} + +// Message returns the exception's message. +func (s *InvalidAllowedPatternException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidAllowedPatternException) OrigErr() error { + return nil +} + +func (s *InvalidAllowedPatternException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidAllowedPatternException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidAllowedPatternException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The association is not valid or does not exist. +type InvalidAssociation struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidAssociation) GoString() string { + return s.String() +} + +func newErrorInvalidAssociation(v protocol.ResponseMetadata) error { + return &InvalidAssociation{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidAssociation) Code() string { + return "InvalidAssociation" +} + +// Message returns the exception's message. +func (s *InvalidAssociation) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidAssociation) OrigErr() error { + return nil +} + +func (s *InvalidAssociation) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidAssociation) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidAssociation) RequestID() string { + return s.RespMetadata.RequestID +} + +// The version you specified is not valid. Use ListAssociationVersions to view +// all versions of an association according to the association ID. Or, use the +// $LATEST parameter to view the latest version of the association. +type InvalidAssociationVersion struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidAssociationVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidAssociationVersion) GoString() string { + return s.String() +} + +func newErrorInvalidAssociationVersion(v protocol.ResponseMetadata) error { + return &InvalidAssociationVersion{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidAssociationVersion) Code() string { + return "InvalidAssociationVersion" +} + +// Message returns the exception's message. +func (s *InvalidAssociationVersion) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidAssociationVersion) OrigErr() error { + return nil +} + +func (s *InvalidAssociationVersion) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidAssociationVersion) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidAssociationVersion) RequestID() string { + return s.RespMetadata.RequestID +} + +// The supplied parameters for invoking the specified Automation document are +// incorrect. For example, they may not match the set of parameters permitted +// for the specified Automation document. +type InvalidAutomationExecutionParametersException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidAutomationExecutionParametersException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidAutomationExecutionParametersException) GoString() string { + return s.String() +} + +func newErrorInvalidAutomationExecutionParametersException(v protocol.ResponseMetadata) error { + return &InvalidAutomationExecutionParametersException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidAutomationExecutionParametersException) Code() string { + return "InvalidAutomationExecutionParametersException" +} + +// Message returns the exception's message. +func (s *InvalidAutomationExecutionParametersException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidAutomationExecutionParametersException) OrigErr() error { + return nil +} + +func (s *InvalidAutomationExecutionParametersException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidAutomationExecutionParametersException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidAutomationExecutionParametersException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The signal is not valid for the current Automation execution. +type InvalidAutomationSignalException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidAutomationSignalException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidAutomationSignalException) GoString() string { + return s.String() +} + +func newErrorInvalidAutomationSignalException(v protocol.ResponseMetadata) error { + return &InvalidAutomationSignalException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidAutomationSignalException) Code() string { + return "InvalidAutomationSignalException" +} + +// Message returns the exception's message. +func (s *InvalidAutomationSignalException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidAutomationSignalException) OrigErr() error { + return nil +} + +func (s *InvalidAutomationSignalException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidAutomationSignalException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidAutomationSignalException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified update status operation is not valid. +type InvalidAutomationStatusUpdateException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidAutomationStatusUpdateException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidAutomationStatusUpdateException) GoString() string { + return s.String() +} + +func newErrorInvalidAutomationStatusUpdateException(v protocol.ResponseMetadata) error { + return &InvalidAutomationStatusUpdateException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidAutomationStatusUpdateException) Code() string { + return "InvalidAutomationStatusUpdateException" +} + +// Message returns the exception's message. +func (s *InvalidAutomationStatusUpdateException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidAutomationStatusUpdateException) OrigErr() error { + return nil +} + +func (s *InvalidAutomationStatusUpdateException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidAutomationStatusUpdateException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidAutomationStatusUpdateException) RequestID() string { + return s.RespMetadata.RequestID +} + +type InvalidCommandId struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidCommandId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidCommandId) GoString() string { + return s.String() +} + +func newErrorInvalidCommandId(v protocol.ResponseMetadata) error { + return &InvalidCommandId{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidCommandId) Code() string { + return "InvalidCommandId" +} + +// Message returns the exception's message. +func (s *InvalidCommandId) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidCommandId) OrigErr() error { + return nil +} + +func (s *InvalidCommandId) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidCommandId) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidCommandId) RequestID() string { + return s.RespMetadata.RequestID +} + +// One or more of the parameters specified for the delete operation is not valid. +// Verify all parameters and try again. +type InvalidDeleteInventoryParametersException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidDeleteInventoryParametersException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidDeleteInventoryParametersException) GoString() string { + return s.String() +} + +func newErrorInvalidDeleteInventoryParametersException(v protocol.ResponseMetadata) error { + return &InvalidDeleteInventoryParametersException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidDeleteInventoryParametersException) Code() string { + return "InvalidDeleteInventoryParametersException" +} + +// Message returns the exception's message. +func (s *InvalidDeleteInventoryParametersException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidDeleteInventoryParametersException) OrigErr() error { + return nil +} + +func (s *InvalidDeleteInventoryParametersException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidDeleteInventoryParametersException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidDeleteInventoryParametersException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The ID specified for the delete operation does not exist or is not valid. +// Verify the ID and try again. +type InvalidDeletionIdException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidDeletionIdException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidDeletionIdException) GoString() string { + return s.String() +} + +func newErrorInvalidDeletionIdException(v protocol.ResponseMetadata) error { + return &InvalidDeletionIdException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidDeletionIdException) Code() string { + return "InvalidDeletionIdException" +} + +// Message returns the exception's message. +func (s *InvalidDeletionIdException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidDeletionIdException) OrigErr() error { + return nil +} + +func (s *InvalidDeletionIdException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidDeletionIdException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidDeletionIdException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified document does not exist. +type InvalidDocument struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The document does not exist or the document is not available to the user. + // This exception can be issued by CreateAssociation, CreateAssociationBatch, + // DeleteAssociation, DeleteDocument, DescribeAssociation, DescribeDocument, + // GetDocument, SendCommand, or UpdateAssociationStatus. + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidDocument) GoString() string { + return s.String() +} + +func newErrorInvalidDocument(v protocol.ResponseMetadata) error { + return &InvalidDocument{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidDocument) Code() string { + return "InvalidDocument" +} + +// Message returns the exception's message. +func (s *InvalidDocument) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidDocument) OrigErr() error { + return nil +} + +func (s *InvalidDocument) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidDocument) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidDocument) RequestID() string { + return s.RespMetadata.RequestID +} + +// The content for the document is not valid. +type InvalidDocumentContent struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A description of the validation error. + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidDocumentContent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidDocumentContent) GoString() string { + return s.String() +} + +func newErrorInvalidDocumentContent(v protocol.ResponseMetadata) error { + return &InvalidDocumentContent{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidDocumentContent) Code() string { + return "InvalidDocumentContent" +} + +// Message returns the exception's message. +func (s *InvalidDocumentContent) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidDocumentContent) OrigErr() error { + return nil +} + +func (s *InvalidDocumentContent) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidDocumentContent) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidDocumentContent) RequestID() string { + return s.RespMetadata.RequestID +} + +// You attempted to delete a document while it is still shared. You must stop +// sharing the document before you can delete it. +type InvalidDocumentOperation struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidDocumentOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidDocumentOperation) GoString() string { + return s.String() +} + +func newErrorInvalidDocumentOperation(v protocol.ResponseMetadata) error { + return &InvalidDocumentOperation{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidDocumentOperation) Code() string { + return "InvalidDocumentOperation" +} + +// Message returns the exception's message. +func (s *InvalidDocumentOperation) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidDocumentOperation) OrigErr() error { + return nil +} + +func (s *InvalidDocumentOperation) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidDocumentOperation) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidDocumentOperation) RequestID() string { + return s.RespMetadata.RequestID +} + +// The version of the document schema is not supported. +type InvalidDocumentSchemaVersion struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidDocumentSchemaVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidDocumentSchemaVersion) GoString() string { + return s.String() +} + +func newErrorInvalidDocumentSchemaVersion(v protocol.ResponseMetadata) error { + return &InvalidDocumentSchemaVersion{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidDocumentSchemaVersion) Code() string { + return "InvalidDocumentSchemaVersion" +} + +// Message returns the exception's message. +func (s *InvalidDocumentSchemaVersion) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidDocumentSchemaVersion) OrigErr() error { + return nil +} + +func (s *InvalidDocumentSchemaVersion) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidDocumentSchemaVersion) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidDocumentSchemaVersion) RequestID() string { + return s.RespMetadata.RequestID +} + +// The document type is not valid. Valid document types are described in the +// DocumentType property. +type InvalidDocumentType struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidDocumentType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidDocumentType) GoString() string { + return s.String() +} + +func newErrorInvalidDocumentType(v protocol.ResponseMetadata) error { + return &InvalidDocumentType{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidDocumentType) Code() string { + return "InvalidDocumentType" +} + +// Message returns the exception's message. +func (s *InvalidDocumentType) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidDocumentType) OrigErr() error { + return nil +} + +func (s *InvalidDocumentType) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidDocumentType) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidDocumentType) RequestID() string { + return s.RespMetadata.RequestID +} + +// The document version is not valid or does not exist. +type InvalidDocumentVersion struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidDocumentVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidDocumentVersion) GoString() string { + return s.String() +} + +func newErrorInvalidDocumentVersion(v protocol.ResponseMetadata) error { + return &InvalidDocumentVersion{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidDocumentVersion) Code() string { + return "InvalidDocumentVersion" +} + +// Message returns the exception's message. +func (s *InvalidDocumentVersion) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidDocumentVersion) OrigErr() error { + return nil +} + +func (s *InvalidDocumentVersion) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidDocumentVersion) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidDocumentVersion) RequestID() string { + return s.RespMetadata.RequestID +} + +// The filter name is not valid. Verify the you entered the correct name and +// try again. +type InvalidFilter struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidFilter) GoString() string { + return s.String() +} + +func newErrorInvalidFilter(v protocol.ResponseMetadata) error { + return &InvalidFilter{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidFilter) Code() string { + return "InvalidFilter" +} + +// Message returns the exception's message. +func (s *InvalidFilter) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidFilter) OrigErr() error { + return nil +} + +func (s *InvalidFilter) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidFilter) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidFilter) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified key is not valid. +type InvalidFilterKey struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidFilterKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidFilterKey) GoString() string { + return s.String() +} + +func newErrorInvalidFilterKey(v protocol.ResponseMetadata) error { + return &InvalidFilterKey{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidFilterKey) Code() string { + return "InvalidFilterKey" +} + +// Message returns the exception's message. +func (s *InvalidFilterKey) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidFilterKey) OrigErr() error { + return nil +} + +func (s *InvalidFilterKey) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidFilterKey) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidFilterKey) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified filter option is not valid. Valid options are Equals and BeginsWith. +// For Path filter, valid options are Recursive and OneLevel. +type InvalidFilterOption struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The specified filter option is not valid. Valid options are Equals and BeginsWith. + // For Path filter, valid options are Recursive and OneLevel. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidFilterOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidFilterOption) GoString() string { + return s.String() +} + +func newErrorInvalidFilterOption(v protocol.ResponseMetadata) error { + return &InvalidFilterOption{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidFilterOption) Code() string { + return "InvalidFilterOption" +} + +// Message returns the exception's message. +func (s *InvalidFilterOption) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidFilterOption) OrigErr() error { + return nil +} + +func (s *InvalidFilterOption) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidFilterOption) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidFilterOption) RequestID() string { + return s.RespMetadata.RequestID +} + +// The filter value is not valid. Verify the value and try again. +type InvalidFilterValue struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidFilterValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidFilterValue) GoString() string { + return s.String() +} + +func newErrorInvalidFilterValue(v protocol.ResponseMetadata) error { + return &InvalidFilterValue{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidFilterValue) Code() string { + return "InvalidFilterValue" +} + +// Message returns the exception's message. +func (s *InvalidFilterValue) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidFilterValue) OrigErr() error { + return nil +} + +func (s *InvalidFilterValue) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidFilterValue) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidFilterValue) RequestID() string { + return s.RespMetadata.RequestID +} + +// The following problems can cause this exception: +// +// You do not have permission to access the instance. +// +// SSM Agent is not running. Verify that SSM Agent is running. +// +// SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. +// +// The instance is not in valid state. Valid states are: Running, Pending, Stopped, +// Stopping. Invalid states are: Shutting-down and Terminated. +type InvalidInstanceId struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidInstanceId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidInstanceId) GoString() string { + return s.String() +} + +func newErrorInvalidInstanceId(v protocol.ResponseMetadata) error { + return &InvalidInstanceId{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidInstanceId) Code() string { + return "InvalidInstanceId" +} + +// Message returns the exception's message. +func (s *InvalidInstanceId) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidInstanceId) OrigErr() error { + return nil +} + +func (s *InvalidInstanceId) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidInstanceId) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidInstanceId) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified filter value is not valid. +type InvalidInstanceInformationFilterValue struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidInstanceInformationFilterValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidInstanceInformationFilterValue) GoString() string { + return s.String() +} + +func newErrorInvalidInstanceInformationFilterValue(v protocol.ResponseMetadata) error { + return &InvalidInstanceInformationFilterValue{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidInstanceInformationFilterValue) Code() string { + return "InvalidInstanceInformationFilterValue" +} + +// Message returns the exception's message. +func (s *InvalidInstanceInformationFilterValue) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidInstanceInformationFilterValue) OrigErr() error { + return nil +} + +func (s *InvalidInstanceInformationFilterValue) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidInstanceInformationFilterValue) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidInstanceInformationFilterValue) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified inventory group is not valid. +type InvalidInventoryGroupException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidInventoryGroupException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidInventoryGroupException) GoString() string { + return s.String() +} + +func newErrorInvalidInventoryGroupException(v protocol.ResponseMetadata) error { + return &InvalidInventoryGroupException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidInventoryGroupException) Code() string { + return "InvalidInventoryGroupException" +} + +// Message returns the exception's message. +func (s *InvalidInventoryGroupException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidInventoryGroupException) OrigErr() error { + return nil +} + +func (s *InvalidInventoryGroupException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidInventoryGroupException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidInventoryGroupException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You specified invalid keys or values in the Context attribute for InventoryItem. +// Verify the keys and values, and try again. +type InvalidInventoryItemContextException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidInventoryItemContextException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidInventoryItemContextException) GoString() string { + return s.String() +} + +func newErrorInvalidInventoryItemContextException(v protocol.ResponseMetadata) error { + return &InvalidInventoryItemContextException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidInventoryItemContextException) Code() string { + return "InvalidInventoryItemContextException" +} + +// Message returns the exception's message. +func (s *InvalidInventoryItemContextException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidInventoryItemContextException) OrigErr() error { + return nil +} + +func (s *InvalidInventoryItemContextException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidInventoryItemContextException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidInventoryItemContextException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request is not valid. +type InvalidInventoryRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidInventoryRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidInventoryRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidInventoryRequestException(v protocol.ResponseMetadata) error { + return &InvalidInventoryRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidInventoryRequestException) Code() string { + return "InvalidInventoryRequestException" +} + +// Message returns the exception's message. +func (s *InvalidInventoryRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidInventoryRequestException) OrigErr() error { + return nil +} + +func (s *InvalidInventoryRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidInventoryRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidInventoryRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// One or more content items is not valid. +type InvalidItemContentException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s InvalidItemContentException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidItemContentException) GoString() string { + return s.String() +} + +func newErrorInvalidItemContentException(v protocol.ResponseMetadata) error { + return &InvalidItemContentException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidItemContentException) Code() string { + return "InvalidItemContentException" +} + +// Message returns the exception's message. +func (s *InvalidItemContentException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidItemContentException) OrigErr() error { + return nil +} + +func (s *InvalidItemContentException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidItemContentException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidItemContentException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The query key ID is not valid. +type InvalidKeyId struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidKeyId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidKeyId) GoString() string { + return s.String() +} + +func newErrorInvalidKeyId(v protocol.ResponseMetadata) error { + return &InvalidKeyId{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidKeyId) Code() string { + return "InvalidKeyId" +} + +// Message returns the exception's message. +func (s *InvalidKeyId) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidKeyId) OrigErr() error { + return nil +} + +func (s *InvalidKeyId) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidKeyId) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidKeyId) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified token is not valid. +type InvalidNextToken struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidNextToken) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidNextToken) GoString() string { + return s.String() +} + +func newErrorInvalidNextToken(v protocol.ResponseMetadata) error { + return &InvalidNextToken{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidNextToken) Code() string { + return "InvalidNextToken" +} + +// Message returns the exception's message. +func (s *InvalidNextToken) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidNextToken) OrigErr() error { + return nil +} + +func (s *InvalidNextToken) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidNextToken) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidNextToken) RequestID() string { + return s.RespMetadata.RequestID +} + +// One or more configuration items is not valid. Verify that a valid Amazon +// Resource Name (ARN) was provided for an Amazon SNS topic. +type InvalidNotificationConfig struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidNotificationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidNotificationConfig) GoString() string { + return s.String() +} + +func newErrorInvalidNotificationConfig(v protocol.ResponseMetadata) error { + return &InvalidNotificationConfig{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidNotificationConfig) Code() string { + return "InvalidNotificationConfig" +} + +// Message returns the exception's message. +func (s *InvalidNotificationConfig) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidNotificationConfig) OrigErr() error { + return nil +} + +func (s *InvalidNotificationConfig) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidNotificationConfig) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidNotificationConfig) RequestID() string { + return s.RespMetadata.RequestID +} + +// The delete inventory option specified is not valid. Verify the option and +// try again. +type InvalidOptionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidOptionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidOptionException) GoString() string { + return s.String() +} + +func newErrorInvalidOptionException(v protocol.ResponseMetadata) error { + return &InvalidOptionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidOptionException) Code() string { + return "InvalidOptionException" +} + +// Message returns the exception's message. +func (s *InvalidOptionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidOptionException) OrigErr() error { + return nil +} + +func (s *InvalidOptionException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidOptionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidOptionException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The S3 bucket does not exist. +type InvalidOutputFolder struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidOutputFolder) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidOutputFolder) GoString() string { + return s.String() +} + +func newErrorInvalidOutputFolder(v protocol.ResponseMetadata) error { + return &InvalidOutputFolder{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidOutputFolder) Code() string { + return "InvalidOutputFolder" +} + +// Message returns the exception's message. +func (s *InvalidOutputFolder) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidOutputFolder) OrigErr() error { + return nil +} + +func (s *InvalidOutputFolder) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidOutputFolder) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidOutputFolder) RequestID() string { + return s.RespMetadata.RequestID +} + +// The output location is not valid or does not exist. +type InvalidOutputLocation struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidOutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidOutputLocation) GoString() string { + return s.String() +} + +func newErrorInvalidOutputLocation(v protocol.ResponseMetadata) error { + return &InvalidOutputLocation{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidOutputLocation) Code() string { + return "InvalidOutputLocation" +} + +// Message returns the exception's message. +func (s *InvalidOutputLocation) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidOutputLocation) OrigErr() error { + return nil +} + +func (s *InvalidOutputLocation) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidOutputLocation) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidOutputLocation) RequestID() string { + return s.RespMetadata.RequestID +} + +// You must specify values for all required parameters in the Systems Manager +// document. You can only supply values to parameters defined in the Systems +// Manager document. +type InvalidParameters struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidParameters) GoString() string { + return s.String() +} + +func newErrorInvalidParameters(v protocol.ResponseMetadata) error { + return &InvalidParameters{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidParameters) Code() string { + return "InvalidParameters" +} + +// Message returns the exception's message. +func (s *InvalidParameters) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameters) OrigErr() error { + return nil +} + +func (s *InvalidParameters) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameters) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameters) RequestID() string { + return s.RespMetadata.RequestID +} + +// The permission type is not supported. Share is the only supported permission +// type. +type InvalidPermissionType struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidPermissionType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidPermissionType) GoString() string { + return s.String() +} + +func newErrorInvalidPermissionType(v protocol.ResponseMetadata) error { + return &InvalidPermissionType{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidPermissionType) Code() string { + return "InvalidPermissionType" +} + +// Message returns the exception's message. +func (s *InvalidPermissionType) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidPermissionType) OrigErr() error { + return nil +} + +func (s *InvalidPermissionType) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidPermissionType) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidPermissionType) RequestID() string { + return s.RespMetadata.RequestID +} + +// The plugin name is not valid. +type InvalidPluginName struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidPluginName) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidPluginName) GoString() string { + return s.String() +} + +func newErrorInvalidPluginName(v protocol.ResponseMetadata) error { + return &InvalidPluginName{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidPluginName) Code() string { + return "InvalidPluginName" +} + +// Message returns the exception's message. +func (s *InvalidPluginName) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidPluginName) OrigErr() error { + return nil +} + +func (s *InvalidPluginName) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidPluginName) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidPluginName) RequestID() string { + return s.RespMetadata.RequestID +} + +// A policy attribute or its value is invalid. +type InvalidPolicyAttributeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidPolicyAttributeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidPolicyAttributeException) GoString() string { + return s.String() +} + +func newErrorInvalidPolicyAttributeException(v protocol.ResponseMetadata) error { + return &InvalidPolicyAttributeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidPolicyAttributeException) Code() string { + return "InvalidPolicyAttributeException" +} + +// Message returns the exception's message. +func (s *InvalidPolicyAttributeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidPolicyAttributeException) OrigErr() error { + return nil +} + +func (s *InvalidPolicyAttributeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidPolicyAttributeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidPolicyAttributeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The policy type is not supported. Parameter Store supports the following +// policy types: Expiration, ExpirationNotification, and NoChangeNotification. +type InvalidPolicyTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidPolicyTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidPolicyTypeException) GoString() string { + return s.String() +} + +func newErrorInvalidPolicyTypeException(v protocol.ResponseMetadata) error { + return &InvalidPolicyTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidPolicyTypeException) Code() string { + return "InvalidPolicyTypeException" +} + +// Message returns the exception's message. +func (s *InvalidPolicyTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidPolicyTypeException) OrigErr() error { + return nil +} + +func (s *InvalidPolicyTypeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidPolicyTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidPolicyTypeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The resource ID is not valid. Verify that you entered the correct ID and +// try again. +type InvalidResourceId struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidResourceId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidResourceId) GoString() string { + return s.String() +} + +func newErrorInvalidResourceId(v protocol.ResponseMetadata) error { + return &InvalidResourceId{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidResourceId) Code() string { + return "InvalidResourceId" +} + +// Message returns the exception's message. +func (s *InvalidResourceId) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidResourceId) OrigErr() error { + return nil +} + +func (s *InvalidResourceId) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidResourceId) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidResourceId) RequestID() string { + return s.RespMetadata.RequestID +} + +// The resource type is not valid. For example, if you are attempting to tag +// an instance, the instance must be a registered, managed instance. +type InvalidResourceType struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidResourceType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidResourceType) GoString() string { + return s.String() +} + +func newErrorInvalidResourceType(v protocol.ResponseMetadata) error { + return &InvalidResourceType{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidResourceType) Code() string { + return "InvalidResourceType" +} + +// Message returns the exception's message. +func (s *InvalidResourceType) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidResourceType) OrigErr() error { + return nil +} + +func (s *InvalidResourceType) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidResourceType) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidResourceType) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified inventory item result attribute is not valid. +type InvalidResultAttributeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidResultAttributeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidResultAttributeException) GoString() string { + return s.String() +} + +func newErrorInvalidResultAttributeException(v protocol.ResponseMetadata) error { + return &InvalidResultAttributeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidResultAttributeException) Code() string { + return "InvalidResultAttributeException" +} + +// Message returns the exception's message. +func (s *InvalidResultAttributeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidResultAttributeException) OrigErr() error { + return nil +} + +func (s *InvalidResultAttributeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidResultAttributeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidResultAttributeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The role name can't contain invalid characters. Also verify that you specified +// an IAM role for notifications that includes the required trust policy. For +// information about configuring the IAM role for Run Command notifications, +// see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) +// in the AWS Systems Manager User Guide. +type InvalidRole struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidRole) GoString() string { + return s.String() +} + +func newErrorInvalidRole(v protocol.ResponseMetadata) error { + return &InvalidRole{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRole) Code() string { + return "InvalidRole" +} + +// Message returns the exception's message. +func (s *InvalidRole) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRole) OrigErr() error { + return nil +} + +func (s *InvalidRole) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRole) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRole) RequestID() string { + return s.RespMetadata.RequestID +} + +// The schedule is invalid. Verify your cron or rate expression and try again. +type InvalidSchedule struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidSchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidSchedule) GoString() string { + return s.String() +} + +func newErrorInvalidSchedule(v protocol.ResponseMetadata) error { + return &InvalidSchedule{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidSchedule) Code() string { + return "InvalidSchedule" +} + +// Message returns the exception's message. +func (s *InvalidSchedule) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidSchedule) OrigErr() error { + return nil +} + +func (s *InvalidSchedule) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidSchedule) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidSchedule) RequestID() string { + return s.RespMetadata.RequestID +} + +// The target is not valid or does not exist. It might not be configured for +// Systems Manager or you might not have permission to perform the operation. +type InvalidTarget struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidTarget) GoString() string { + return s.String() +} + +func newErrorInvalidTarget(v protocol.ResponseMetadata) error { + return &InvalidTarget{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidTarget) Code() string { + return "InvalidTarget" +} + +// Message returns the exception's message. +func (s *InvalidTarget) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidTarget) OrigErr() error { + return nil +} + +func (s *InvalidTarget) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidTarget) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidTarget) RequestID() string { + return s.RespMetadata.RequestID +} + +// The parameter type name is not valid. +type InvalidTypeNameException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidTypeNameException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidTypeNameException) GoString() string { + return s.String() +} + +func newErrorInvalidTypeNameException(v protocol.ResponseMetadata) error { + return &InvalidTypeNameException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidTypeNameException) Code() string { + return "InvalidTypeNameException" +} + +// Message returns the exception's message. +func (s *InvalidTypeNameException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidTypeNameException) OrigErr() error { + return nil +} + +func (s *InvalidTypeNameException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidTypeNameException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidTypeNameException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The update is not valid. +type InvalidUpdate struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s InvalidUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidUpdate) GoString() string { + return s.String() +} + +func newErrorInvalidUpdate(v protocol.ResponseMetadata) error { + return &InvalidUpdate{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidUpdate) Code() string { + return "InvalidUpdate" +} + +// Message returns the exception's message. +func (s *InvalidUpdate) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidUpdate) OrigErr() error { + return nil +} + +func (s *InvalidUpdate) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidUpdate) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidUpdate) RequestID() string { + return s.RespMetadata.RequestID +} + +// Specifies the inventory type and attribute for the aggregation execution. +type InventoryAggregator struct { + _ struct{} `type:"structure"` + + // Nested aggregators to further refine aggregation for an inventory type. + Aggregators []*InventoryAggregator `min:"1" type:"list"` + + // The inventory type and attribute name for aggregation. + Expression *string `min:"1" type:"string"` + + // A user-defined set of one or more filters on which to aggregate inventory + // data. Groups return a count of resources that match and don't match the specified + // criteria. + Groups []*InventoryGroup `min:"1" type:"list"` +} + +// String returns the string representation +func (s InventoryAggregator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryAggregator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryAggregator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryAggregator"} + if s.Aggregators != nil && len(s.Aggregators) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Aggregators", 1)) + } + if s.Expression != nil && len(*s.Expression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Expression", 1)) + } + if s.Groups != nil && len(s.Groups) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Groups", 1)) + } + if s.Aggregators != nil { + for i, v := range s.Aggregators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Aggregators", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Groups != nil { + for i, v := range s.Groups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Groups", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregators sets the Aggregators field's value. +func (s *InventoryAggregator) SetAggregators(v []*InventoryAggregator) *InventoryAggregator { + s.Aggregators = v + return s +} + +// SetExpression sets the Expression field's value. +func (s *InventoryAggregator) SetExpression(v string) *InventoryAggregator { + s.Expression = &v + return s +} + +// SetGroups sets the Groups field's value. +func (s *InventoryAggregator) SetGroups(v []*InventoryGroup) *InventoryAggregator { + s.Groups = v + return s +} + +// Status information returned by the DeleteInventory action. +type InventoryDeletionStatusItem struct { + _ struct{} `type:"structure"` + + // The deletion ID returned by the DeleteInventory action. + DeletionId *string `type:"string"` + + // The UTC timestamp when the delete operation started. + DeletionStartTime *time.Time `type:"timestamp"` + + // Information about the delete operation. For more information about this summary, + // see Understanding the delete inventory summary (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete) + // in the AWS Systems Manager User Guide. + DeletionSummary *InventoryDeletionSummary `type:"structure"` + + // The status of the operation. Possible values are InProgress and Complete. + LastStatus *string `type:"string" enum:"InventoryDeletionStatus"` + + // Information about the status. + LastStatusMessage *string `type:"string"` + + // The UTC timestamp of when the last status report. + LastStatusUpdateTime *time.Time `type:"timestamp"` + + // The name of the inventory data type. + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s InventoryDeletionStatusItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDeletionStatusItem) GoString() string { + return s.String() +} + +// SetDeletionId sets the DeletionId field's value. +func (s *InventoryDeletionStatusItem) SetDeletionId(v string) *InventoryDeletionStatusItem { + s.DeletionId = &v + return s +} + +// SetDeletionStartTime sets the DeletionStartTime field's value. +func (s *InventoryDeletionStatusItem) SetDeletionStartTime(v time.Time) *InventoryDeletionStatusItem { + s.DeletionStartTime = &v + return s +} + +// SetDeletionSummary sets the DeletionSummary field's value. +func (s *InventoryDeletionStatusItem) SetDeletionSummary(v *InventoryDeletionSummary) *InventoryDeletionStatusItem { + s.DeletionSummary = v + return s +} + +// SetLastStatus sets the LastStatus field's value. +func (s *InventoryDeletionStatusItem) SetLastStatus(v string) *InventoryDeletionStatusItem { + s.LastStatus = &v + return s +} + +// SetLastStatusMessage sets the LastStatusMessage field's value. +func (s *InventoryDeletionStatusItem) SetLastStatusMessage(v string) *InventoryDeletionStatusItem { + s.LastStatusMessage = &v + return s +} + +// SetLastStatusUpdateTime sets the LastStatusUpdateTime field's value. +func (s *InventoryDeletionStatusItem) SetLastStatusUpdateTime(v time.Time) *InventoryDeletionStatusItem { + s.LastStatusUpdateTime = &v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *InventoryDeletionStatusItem) SetTypeName(v string) *InventoryDeletionStatusItem { + s.TypeName = &v + return s +} + +// Information about the delete operation. +type InventoryDeletionSummary struct { + _ struct{} `type:"structure"` + + // Remaining number of items to delete. + RemainingCount *int64 `type:"integer"` + + // A list of counts and versions for deleted items. + SummaryItems []*InventoryDeletionSummaryItem `type:"list"` + + // The total number of items to delete. This count does not change during the + // delete operation. + TotalCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s InventoryDeletionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDeletionSummary) GoString() string { + return s.String() +} + +// SetRemainingCount sets the RemainingCount field's value. +func (s *InventoryDeletionSummary) SetRemainingCount(v int64) *InventoryDeletionSummary { + s.RemainingCount = &v + return s +} + +// SetSummaryItems sets the SummaryItems field's value. +func (s *InventoryDeletionSummary) SetSummaryItems(v []*InventoryDeletionSummaryItem) *InventoryDeletionSummary { + s.SummaryItems = v + return s +} + +// SetTotalCount sets the TotalCount field's value. +func (s *InventoryDeletionSummary) SetTotalCount(v int64) *InventoryDeletionSummary { + s.TotalCount = &v + return s +} + +// Either a count, remaining count, or a version number in a delete inventory +// summary. +type InventoryDeletionSummaryItem struct { + _ struct{} `type:"structure"` + + // A count of the number of deleted items. + Count *int64 `type:"integer"` + + // The remaining number of items to delete. + RemainingCount *int64 `type:"integer"` + + // The inventory type version. + Version *string `type:"string"` +} + +// String returns the string representation +func (s InventoryDeletionSummaryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDeletionSummaryItem) GoString() string { + return s.String() +} + +// SetCount sets the Count field's value. +func (s *InventoryDeletionSummaryItem) SetCount(v int64) *InventoryDeletionSummaryItem { + s.Count = &v + return s +} + +// SetRemainingCount sets the RemainingCount field's value. +func (s *InventoryDeletionSummaryItem) SetRemainingCount(v int64) *InventoryDeletionSummaryItem { + s.RemainingCount = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *InventoryDeletionSummaryItem) SetVersion(v string) *InventoryDeletionSummaryItem { + s.Version = &v + return s +} + +// One or more filters. Use a filter to return a more specific list of results. +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The type of filter. + // + // The Exists filter must be used with aggregators. For more information, see + // Aggregating inventory data (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-aggregate.html) + // in the AWS Systems Manager User Guide. + Type *string `type:"string" enum:"InventoryQueryOperatorType"` + + // Inventory filter values. Example: inventory filter where instance IDs are + // specified as values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, + // i-1a2b3c4d5e6,Type=Equal + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *InventoryFilter) SetKey(v string) *InventoryFilter { + s.Key = &v + return s +} + +// SetType sets the Type field's value. +func (s *InventoryFilter) SetType(v string) *InventoryFilter { + s.Type = &v + return s +} + +// SetValues sets the Values field's value. +func (s *InventoryFilter) SetValues(v []*string) *InventoryFilter { + s.Values = v + return s +} + +// A user-defined set of one or more filters on which to aggregate inventory +// data. Groups return a count of resources that match and don't match the specified +// criteria. +type InventoryGroup struct { + _ struct{} `type:"structure"` + + // Filters define the criteria for the group. The matchingCount field displays + // the number of resources that match the criteria. The notMatchingCount field + // displays the number of resources that don't match the criteria. + // + // Filters is a required field + Filters []*InventoryFilter `min:"1" type:"list" required:"true"` + + // The name of the group. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryGroup) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryGroup) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryGroup"} + if s.Filters == nil { + invalidParams.Add(request.NewErrParamRequired("Filters")) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *InventoryGroup) SetFilters(v []*InventoryFilter) *InventoryGroup { + s.Filters = v + return s +} + +// SetName sets the Name field's value. +func (s *InventoryGroup) SetName(v string) *InventoryGroup { + s.Name = &v + return s +} + +// Information collected from managed instances based on your inventory policy +// document +type InventoryItem struct { + _ struct{} `type:"structure"` + + // The time the inventory information was collected. + // + // CaptureTime is a required field + CaptureTime *string `type:"string" required:"true"` + + // The inventory data of the inventory type. + Content []map[string]*string `type:"list"` + + // MD5 hash of the inventory item type contents. The content hash is used to + // determine whether to update inventory information. The PutInventory API does + // not update the inventory item type contents if the MD5 hash has not changed + // since last update. + ContentHash *string `type:"string"` + + // A map of associated properties for a specified inventory type. For example, + // with this attribute, you can specify the ExecutionId, ExecutionType, ComplianceType + // properties of the AWS:ComplianceItem type. + Context map[string]*string `type:"map"` + + // The schema version for the inventory item. + // + // SchemaVersion is a required field + SchemaVersion *string `type:"string" required:"true"` + + // The name of the inventory type. Default inventory item type names start with + // AWS. Custom inventory type names will start with Custom. Default inventory + // item types include the following: AWS:AWSComponent, AWS:Application, AWS:InstanceInformation, + // AWS:Network, and AWS:WindowsUpdate. + // + // TypeName is a required field + TypeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryItem"} + if s.CaptureTime == nil { + invalidParams.Add(request.NewErrParamRequired("CaptureTime")) + } + if s.SchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaVersion")) + } + if s.TypeName == nil { + invalidParams.Add(request.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCaptureTime sets the CaptureTime field's value. +func (s *InventoryItem) SetCaptureTime(v string) *InventoryItem { + s.CaptureTime = &v + return s +} + +// SetContent sets the Content field's value. +func (s *InventoryItem) SetContent(v []map[string]*string) *InventoryItem { + s.Content = v + return s +} + +// SetContentHash sets the ContentHash field's value. +func (s *InventoryItem) SetContentHash(v string) *InventoryItem { + s.ContentHash = &v + return s +} + +// SetContext sets the Context field's value. +func (s *InventoryItem) SetContext(v map[string]*string) *InventoryItem { + s.Context = v + return s +} + +// SetSchemaVersion sets the SchemaVersion field's value. +func (s *InventoryItem) SetSchemaVersion(v string) *InventoryItem { + s.SchemaVersion = &v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *InventoryItem) SetTypeName(v string) *InventoryItem { + s.TypeName = &v + return s +} + +// Attributes are the entries within the inventory item content. It contains +// name and value. +type InventoryItemAttribute struct { + _ struct{} `type:"structure"` + + // The data type of the inventory item attribute. + // + // DataType is a required field + DataType *string `type:"string" required:"true" enum:"InventoryAttributeDataType"` + + // Name of the inventory item attribute. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryItemAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryItemAttribute) GoString() string { + return s.String() +} + +// SetDataType sets the DataType field's value. +func (s *InventoryItemAttribute) SetDataType(v string) *InventoryItemAttribute { + s.DataType = &v + return s +} + +// SetName sets the Name field's value. +func (s *InventoryItemAttribute) SetName(v string) *InventoryItemAttribute { + s.Name = &v + return s +} + +// The inventory item schema definition. Users can use this to compose inventory +// query filters. +type InventoryItemSchema struct { + _ struct{} `type:"structure"` + + // The schema attributes for inventory. This contains data type and attribute + // name. + // + // Attributes is a required field + Attributes []*InventoryItemAttribute `min:"1" type:"list" required:"true"` + + // The alias name of the inventory type. The alias name is used for display + // purposes. + DisplayName *string `type:"string"` + + // The name of the inventory type. Default inventory item type names start with + // AWS. Custom inventory type names will start with Custom. Default inventory + // item types include the following: AWS:AWSComponent, AWS:Application, AWS:InstanceInformation, + // AWS:Network, and AWS:WindowsUpdate. + // + // TypeName is a required field + TypeName *string `min:"1" type:"string" required:"true"` + + // The schema version for the inventory item. + Version *string `type:"string"` +} + +// String returns the string representation +func (s InventoryItemSchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryItemSchema) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *InventoryItemSchema) SetAttributes(v []*InventoryItemAttribute) *InventoryItemSchema { + s.Attributes = v + return s +} + +// SetDisplayName sets the DisplayName field's value. +func (s *InventoryItemSchema) SetDisplayName(v string) *InventoryItemSchema { + s.DisplayName = &v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *InventoryItemSchema) SetTypeName(v string) *InventoryItemSchema { + s.TypeName = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *InventoryItemSchema) SetVersion(v string) *InventoryItemSchema { + s.Version = &v + return s +} + +// Inventory query results. +type InventoryResultEntity struct { + _ struct{} `type:"structure"` + + // The data section in the inventory result entity JSON. + Data map[string]*InventoryResultItem `type:"map"` + + // ID of the inventory result entity. For example, for managed instance inventory + // the result will be the managed instance ID. For EC2 instance inventory, the + // result will be the instance ID. + Id *string `type:"string"` +} + +// String returns the string representation +func (s InventoryResultEntity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryResultEntity) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *InventoryResultEntity) SetData(v map[string]*InventoryResultItem) *InventoryResultEntity { + s.Data = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryResultEntity) SetId(v string) *InventoryResultEntity { + s.Id = &v + return s +} + +// The inventory result item. +type InventoryResultItem struct { + _ struct{} `type:"structure"` + + // The time inventory item data was captured. + CaptureTime *string `type:"string"` + + // Contains all the inventory data of the item type. Results include attribute + // names and values. + // + // Content is a required field + Content []map[string]*string `type:"list" required:"true"` + + // MD5 hash of the inventory item type contents. The content hash is used to + // determine whether to update inventory information. The PutInventory API does + // not update the inventory item type contents if the MD5 hash has not changed + // since last update. + ContentHash *string `type:"string"` + + // The schema version for the inventory result item/ + // + // SchemaVersion is a required field + SchemaVersion *string `type:"string" required:"true"` + + // The name of the inventory result item type. + // + // TypeName is a required field + TypeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryResultItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryResultItem) GoString() string { + return s.String() +} + +// SetCaptureTime sets the CaptureTime field's value. +func (s *InventoryResultItem) SetCaptureTime(v string) *InventoryResultItem { + s.CaptureTime = &v + return s +} + +// SetContent sets the Content field's value. +func (s *InventoryResultItem) SetContent(v []map[string]*string) *InventoryResultItem { + s.Content = v + return s +} + +// SetContentHash sets the ContentHash field's value. +func (s *InventoryResultItem) SetContentHash(v string) *InventoryResultItem { + s.ContentHash = &v + return s +} + +// SetSchemaVersion sets the SchemaVersion field's value. +func (s *InventoryResultItem) SetSchemaVersion(v string) *InventoryResultItem { + s.SchemaVersion = &v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *InventoryResultItem) SetTypeName(v string) *InventoryResultItem { + s.TypeName = &v + return s +} + +// The command ID and instance ID you specified did not match any invocations. +// Verify the command ID and the instance ID and try again. +type InvocationDoesNotExist struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvocationDoesNotExist) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvocationDoesNotExist) GoString() string { + return s.String() +} + +func newErrorInvocationDoesNotExist(v protocol.ResponseMetadata) error { + return &InvocationDoesNotExist{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvocationDoesNotExist) Code() string { + return "InvocationDoesNotExist" +} + +// Message returns the exception's message. +func (s *InvocationDoesNotExist) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvocationDoesNotExist) OrigErr() error { + return nil +} + +func (s *InvocationDoesNotExist) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvocationDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvocationDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID +} + +// The inventory item has invalid content. +type ItemContentMismatchException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ItemContentMismatchException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ItemContentMismatchException) GoString() string { + return s.String() +} + +func newErrorItemContentMismatchException(v protocol.ResponseMetadata) error { + return &ItemContentMismatchException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ItemContentMismatchException) Code() string { + return "ItemContentMismatchException" +} + +// Message returns the exception's message. +func (s *ItemContentMismatchException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ItemContentMismatchException) OrigErr() error { + return nil +} + +func (s *ItemContentMismatchException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ItemContentMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ItemContentMismatchException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The inventory item size has exceeded the size limit. +type ItemSizeLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ItemSizeLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ItemSizeLimitExceededException) GoString() string { + return s.String() +} + +func newErrorItemSizeLimitExceededException(v protocol.ResponseMetadata) error { + return &ItemSizeLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ItemSizeLimitExceededException) Code() string { + return "ItemSizeLimitExceededException" +} + +// Message returns the exception's message. +func (s *ItemSizeLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ItemSizeLimitExceededException) OrigErr() error { + return nil +} + +func (s *ItemSizeLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ItemSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ItemSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type LabelParameterVersionInput struct { + _ struct{} `type:"structure"` + + // One or more labels to attach to the specified parameter version. + // + // Labels is a required field + Labels []*string `min:"1" type:"list" required:"true"` + + // The parameter name on which you want to attach one or more labels. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The specific version of the parameter on which you want to attach one or + // more labels. If no version is specified, the system attaches the label to + // the latest version. + ParameterVersion *int64 `type:"long"` +} + +// String returns the string representation +func (s LabelParameterVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LabelParameterVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LabelParameterVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LabelParameterVersionInput"} + if s.Labels == nil { + invalidParams.Add(request.NewErrParamRequired("Labels")) + } + if s.Labels != nil && len(s.Labels) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Labels", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLabels sets the Labels field's value. +func (s *LabelParameterVersionInput) SetLabels(v []*string) *LabelParameterVersionInput { + s.Labels = v + return s +} + +// SetName sets the Name field's value. +func (s *LabelParameterVersionInput) SetName(v string) *LabelParameterVersionInput { + s.Name = &v + return s +} + +// SetParameterVersion sets the ParameterVersion field's value. +func (s *LabelParameterVersionInput) SetParameterVersion(v int64) *LabelParameterVersionInput { + s.ParameterVersion = &v + return s +} + +type LabelParameterVersionOutput struct { + _ struct{} `type:"structure"` + + // The label does not meet the requirements. For information about parameter + // label requirements, see Labeling parameters (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html) + // in the AWS Systems Manager User Guide. + InvalidLabels []*string `min:"1" type:"list"` + + // The version of the parameter that has been labeled. + ParameterVersion *int64 `type:"long"` +} + +// String returns the string representation +func (s LabelParameterVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LabelParameterVersionOutput) GoString() string { + return s.String() +} + +// SetInvalidLabels sets the InvalidLabels field's value. +func (s *LabelParameterVersionOutput) SetInvalidLabels(v []*string) *LabelParameterVersionOutput { + s.InvalidLabels = v + return s +} + +// SetParameterVersion sets the ParameterVersion field's value. +func (s *LabelParameterVersionOutput) SetParameterVersion(v int64) *LabelParameterVersionOutput { + s.ParameterVersion = &v + return s +} + +type ListAssociationVersionsInput struct { + _ struct{} `type:"structure"` + + // The association ID for which you want to view all versions. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAssociationVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociationVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssociationVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssociationVersionsInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *ListAssociationVersionsInput) SetAssociationId(v string) *ListAssociationVersionsInput { + s.AssociationId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAssociationVersionsInput) SetMaxResults(v int64) *ListAssociationVersionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAssociationVersionsInput) SetNextToken(v string) *ListAssociationVersionsInput { + s.NextToken = &v + return s +} + +type ListAssociationVersionsOutput struct { + _ struct{} `type:"structure"` + + // Information about all versions of the association for the specified association + // ID. + AssociationVersions []*AssociationVersionInfo `min:"1" type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAssociationVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociationVersionsOutput) GoString() string { + return s.String() +} + +// SetAssociationVersions sets the AssociationVersions field's value. +func (s *ListAssociationVersionsOutput) SetAssociationVersions(v []*AssociationVersionInfo) *ListAssociationVersionsOutput { + s.AssociationVersions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAssociationVersionsOutput) SetNextToken(v string) *ListAssociationVersionsOutput { + s.NextToken = &v + return s +} + +type ListAssociationsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of results. + // + // Filtering associations using the InstanceID attribute only returns legacy + // associations created using the InstanceID attribute. Associations targeting + // the instance that are part of the Target Attributes ResourceGroup or Tags + // are not returned. + AssociationFilterList []*AssociationFilter `min:"1" type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAssociationsInput"} + if s.AssociationFilterList != nil && len(s.AssociationFilterList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssociationFilterList", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.AssociationFilterList != nil { + for i, v := range s.AssociationFilterList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AssociationFilterList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationFilterList sets the AssociationFilterList field's value. +func (s *ListAssociationsInput) SetAssociationFilterList(v []*AssociationFilter) *ListAssociationsInput { + s.AssociationFilterList = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAssociationsInput) SetMaxResults(v int64) *ListAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAssociationsInput) SetNextToken(v string) *ListAssociationsInput { + s.NextToken = &v + return s +} + +type ListAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The associations. + Associations []*Association `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAssociationsOutput) GoString() string { + return s.String() +} + +// SetAssociations sets the Associations field's value. +func (s *ListAssociationsOutput) SetAssociations(v []*Association) *ListAssociationsOutput { + s.Associations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAssociationsOutput) SetNextToken(v string) *ListAssociationsOutput { + s.NextToken = &v + return s +} + +type ListCommandInvocationsInput struct { + _ struct{} `type:"structure"` + + // (Optional) The invocations for a specific command ID. + CommandId *string `min:"36" type:"string"` + + // (Optional) If set this returns the response of the command executions and + // any command output. By default this is set to False. + Details *bool `type:"boolean"` + + // (Optional) One or more filters. Use a filter to return a more specific list + // of results. + Filters []*CommandFilter `min:"1" type:"list"` + + // (Optional) The command execution details for a specific instance ID. + InstanceId *string `type:"string"` + + // (Optional) The maximum number of items to return for this call. The call + // also returns a token that you can specify in a subsequent call to get the + // next set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandInvocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandInvocationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCommandInvocationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCommandInvocationsInput"} + if s.CommandId != nil && len(*s.CommandId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("CommandId", 36)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommandId sets the CommandId field's value. +func (s *ListCommandInvocationsInput) SetCommandId(v string) *ListCommandInvocationsInput { + s.CommandId = &v + return s +} + +// SetDetails sets the Details field's value. +func (s *ListCommandInvocationsInput) SetDetails(v bool) *ListCommandInvocationsInput { + s.Details = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *ListCommandInvocationsInput) SetFilters(v []*CommandFilter) *ListCommandInvocationsInput { + s.Filters = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ListCommandInvocationsInput) SetInstanceId(v string) *ListCommandInvocationsInput { + s.InstanceId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListCommandInvocationsInput) SetMaxResults(v int64) *ListCommandInvocationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCommandInvocationsInput) SetNextToken(v string) *ListCommandInvocationsInput { + s.NextToken = &v + return s +} + +type ListCommandInvocationsOutput struct { + _ struct{} `type:"structure"` + + // (Optional) A list of all invocations. + CommandInvocations []*CommandInvocation `type:"list"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandInvocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandInvocationsOutput) GoString() string { + return s.String() +} + +// SetCommandInvocations sets the CommandInvocations field's value. +func (s *ListCommandInvocationsOutput) SetCommandInvocations(v []*CommandInvocation) *ListCommandInvocationsOutput { + s.CommandInvocations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCommandInvocationsOutput) SetNextToken(v string) *ListCommandInvocationsOutput { + s.NextToken = &v + return s +} + +type ListCommandsInput struct { + _ struct{} `type:"structure"` + + // (Optional) If provided, lists only the specified command. + CommandId *string `min:"36" type:"string"` + + // (Optional) One or more filters. Use a filter to return a more specific list + // of results. + Filters []*CommandFilter `min:"1" type:"list"` + + // (Optional) Lists commands issued against this instance ID. + // + // You can't specify an instance ID in the same command that you specify Status + // = Pending. This is because the command has not reached the instance yet. + InstanceId *string `type:"string"` + + // (Optional) The maximum number of items to return for this call. The call + // also returns a token that you can specify in a subsequent call to get the + // next set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCommandsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCommandsInput"} + if s.CommandId != nil && len(*s.CommandId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("CommandId", 36)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommandId sets the CommandId field's value. +func (s *ListCommandsInput) SetCommandId(v string) *ListCommandsInput { + s.CommandId = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *ListCommandsInput) SetFilters(v []*CommandFilter) *ListCommandsInput { + s.Filters = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ListCommandsInput) SetInstanceId(v string) *ListCommandsInput { + s.InstanceId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListCommandsInput) SetMaxResults(v int64) *ListCommandsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCommandsInput) SetNextToken(v string) *ListCommandsInput { + s.NextToken = &v + return s +} + +type ListCommandsOutput struct { + _ struct{} `type:"structure"` + + // (Optional) The list of commands requested by the user. + Commands []*Command `type:"list"` + + // (Optional) The token for the next set of items to return. (You received this + // token from a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCommandsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCommandsOutput) GoString() string { + return s.String() +} + +// SetCommands sets the Commands field's value. +func (s *ListCommandsOutput) SetCommands(v []*Command) *ListCommandsOutput { + s.Commands = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCommandsOutput) SetNextToken(v string) *ListCommandsOutput { + s.NextToken = &v + return s +} + +type ListComplianceItemsInput struct { + _ struct{} `type:"structure"` + + // One or more compliance filters. Use a filter to return a more specific list + // of results. + Filters []*ComplianceStringFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` + + // The ID for the resources from which to get compliance information. Currently, + // you can only specify one resource ID. + ResourceIds []*string `min:"1" type:"list"` + + // The type of resource from which to get compliance information. Currently, + // the only supported resource type is ManagedInstance. + ResourceTypes []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s ListComplianceItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListComplianceItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListComplianceItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListComplianceItemsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResourceIds != nil && len(s.ResourceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceIds", 1)) + } + if s.ResourceTypes != nil && len(s.ResourceTypes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceTypes", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListComplianceItemsInput) SetFilters(v []*ComplianceStringFilter) *ListComplianceItemsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListComplianceItemsInput) SetMaxResults(v int64) *ListComplianceItemsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListComplianceItemsInput) SetNextToken(v string) *ListComplianceItemsInput { + s.NextToken = &v + return s +} + +// SetResourceIds sets the ResourceIds field's value. +func (s *ListComplianceItemsInput) SetResourceIds(v []*string) *ListComplianceItemsInput { + s.ResourceIds = v + return s +} + +// SetResourceTypes sets the ResourceTypes field's value. +func (s *ListComplianceItemsInput) SetResourceTypes(v []*string) *ListComplianceItemsInput { + s.ResourceTypes = v + return s +} + +type ListComplianceItemsOutput struct { + _ struct{} `type:"structure"` + + // A list of compliance information for the specified resource ID. + ComplianceItems []*ComplianceItem `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListComplianceItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListComplianceItemsOutput) GoString() string { + return s.String() +} + +// SetComplianceItems sets the ComplianceItems field's value. +func (s *ListComplianceItemsOutput) SetComplianceItems(v []*ComplianceItem) *ListComplianceItemsOutput { + s.ComplianceItems = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListComplianceItemsOutput) SetNextToken(v string) *ListComplianceItemsOutput { + s.NextToken = &v + return s +} + +type ListComplianceSummariesInput struct { + _ struct{} `type:"structure"` + + // One or more compliance or inventory filters. Use a filter to return a more + // specific list of results. + Filters []*ComplianceStringFilter `type:"list"` + + // The maximum number of items to return for this call. Currently, you can specify + // null or 50. The call also returns a token that you can specify in a subsequent + // call to get the next set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListComplianceSummariesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListComplianceSummariesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListComplianceSummariesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListComplianceSummariesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListComplianceSummariesInput) SetFilters(v []*ComplianceStringFilter) *ListComplianceSummariesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListComplianceSummariesInput) SetMaxResults(v int64) *ListComplianceSummariesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListComplianceSummariesInput) SetNextToken(v string) *ListComplianceSummariesInput { + s.NextToken = &v + return s +} + +type ListComplianceSummariesOutput struct { + _ struct{} `type:"structure"` + + // A list of compliant and non-compliant summary counts based on compliance + // types. For example, this call returns State Manager associations, patches, + // or custom compliance types according to the filter criteria that you specified. + ComplianceSummaryItems []*ComplianceSummaryItem `type:"list"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListComplianceSummariesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListComplianceSummariesOutput) GoString() string { + return s.String() +} + +// SetComplianceSummaryItems sets the ComplianceSummaryItems field's value. +func (s *ListComplianceSummariesOutput) SetComplianceSummaryItems(v []*ComplianceSummaryItem) *ListComplianceSummariesOutput { + s.ComplianceSummaryItems = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListComplianceSummariesOutput) SetNextToken(v string) *ListComplianceSummariesOutput { + s.NextToken = &v + return s +} + +type ListDocumentVersionsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The name of the document. You can specify an Amazon Resource Name (ARN). + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDocumentVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDocumentVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDocumentVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDocumentVersionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDocumentVersionsInput) SetMaxResults(v int64) *ListDocumentVersionsInput { + s.MaxResults = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListDocumentVersionsInput) SetName(v string) *ListDocumentVersionsInput { + s.Name = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDocumentVersionsInput) SetNextToken(v string) *ListDocumentVersionsInput { + s.NextToken = &v + return s +} + +type ListDocumentVersionsOutput struct { + _ struct{} `type:"structure"` + + // The document versions. + DocumentVersions []*DocumentVersionInfo `min:"1" type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDocumentVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDocumentVersionsOutput) GoString() string { + return s.String() +} + +// SetDocumentVersions sets the DocumentVersions field's value. +func (s *ListDocumentVersionsOutput) SetDocumentVersions(v []*DocumentVersionInfo) *ListDocumentVersionsOutput { + s.DocumentVersions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDocumentVersionsOutput) SetNextToken(v string) *ListDocumentVersionsOutput { + s.NextToken = &v + return s +} + +type ListDocumentsInput struct { + _ struct{} `type:"structure"` + + // This data type is deprecated. Instead, use Filters. + DocumentFilterList []*DocumentFilter `min:"1" type:"list"` + + // One or more DocumentKeyValuesFilter objects. Use a filter to return a more + // specific list of results. For keys, you can specify one or more key-value + // pair tags that have been applied to a document. Other valid keys include + // Owner, Name, PlatformTypes, DocumentType, and TargetType. For example, to + // return documents you own use Key=Owner,Values=Self. To specify a custom key-value + // pair, use the format Key=tag:tagName,Values=valueName. + Filters []*DocumentKeyValuesFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDocumentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDocumentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDocumentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDocumentsInput"} + if s.DocumentFilterList != nil && len(s.DocumentFilterList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DocumentFilterList", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.DocumentFilterList != nil { + for i, v := range s.DocumentFilterList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DocumentFilterList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDocumentFilterList sets the DocumentFilterList field's value. +func (s *ListDocumentsInput) SetDocumentFilterList(v []*DocumentFilter) *ListDocumentsInput { + s.DocumentFilterList = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *ListDocumentsInput) SetFilters(v []*DocumentKeyValuesFilter) *ListDocumentsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDocumentsInput) SetMaxResults(v int64) *ListDocumentsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDocumentsInput) SetNextToken(v string) *ListDocumentsInput { + s.NextToken = &v + return s +} + +type ListDocumentsOutput struct { + _ struct{} `type:"structure"` + + // The names of the Systems Manager documents. + DocumentIdentifiers []*DocumentIdentifier `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDocumentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDocumentsOutput) GoString() string { + return s.String() +} + +// SetDocumentIdentifiers sets the DocumentIdentifiers field's value. +func (s *ListDocumentsOutput) SetDocumentIdentifiers(v []*DocumentIdentifier) *ListDocumentsOutput { + s.DocumentIdentifiers = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDocumentsOutput) SetNextToken(v string) *ListDocumentsOutput { + s.NextToken = &v + return s +} + +type ListInventoryEntriesInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of results. + Filters []*InventoryFilter `min:"1" type:"list"` + + // The instance ID for which you want inventory information. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` + + // The type of inventory item for which you want information. + // + // TypeName is a required field + TypeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListInventoryEntriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInventoryEntriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInventoryEntriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInventoryEntriesInput"} + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.TypeName == nil { + invalidParams.Add(request.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListInventoryEntriesInput) SetFilters(v []*InventoryFilter) *ListInventoryEntriesInput { + s.Filters = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ListInventoryEntriesInput) SetInstanceId(v string) *ListInventoryEntriesInput { + s.InstanceId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListInventoryEntriesInput) SetMaxResults(v int64) *ListInventoryEntriesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListInventoryEntriesInput) SetNextToken(v string) *ListInventoryEntriesInput { + s.NextToken = &v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *ListInventoryEntriesInput) SetTypeName(v string) *ListInventoryEntriesInput { + s.TypeName = &v + return s +} + +type ListInventoryEntriesOutput struct { + _ struct{} `type:"structure"` + + // The time that inventory information was collected for the instance(s). + CaptureTime *string `type:"string"` + + // A list of inventory items on the instance(s). + Entries []map[string]*string `type:"list"` + + // The instance ID targeted by the request to query inventory information. + InstanceId *string `type:"string"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` + + // The inventory schema version used by the instance(s). + SchemaVersion *string `type:"string"` + + // The type of inventory item returned by the request. + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInventoryEntriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInventoryEntriesOutput) GoString() string { + return s.String() +} + +// SetCaptureTime sets the CaptureTime field's value. +func (s *ListInventoryEntriesOutput) SetCaptureTime(v string) *ListInventoryEntriesOutput { + s.CaptureTime = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *ListInventoryEntriesOutput) SetEntries(v []map[string]*string) *ListInventoryEntriesOutput { + s.Entries = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ListInventoryEntriesOutput) SetInstanceId(v string) *ListInventoryEntriesOutput { + s.InstanceId = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListInventoryEntriesOutput) SetNextToken(v string) *ListInventoryEntriesOutput { + s.NextToken = &v + return s +} + +// SetSchemaVersion sets the SchemaVersion field's value. +func (s *ListInventoryEntriesOutput) SetSchemaVersion(v string) *ListInventoryEntriesOutput { + s.SchemaVersion = &v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *ListInventoryEntriesOutput) SetTypeName(v string) *ListInventoryEntriesOutput { + s.TypeName = &v + return s +} + +type ListResourceComplianceSummariesInput struct { + _ struct{} `type:"structure"` + + // One or more filters. Use a filter to return a more specific list of results. + Filters []*ComplianceStringFilter `type:"list"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListResourceComplianceSummariesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceComplianceSummariesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourceComplianceSummariesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourceComplianceSummariesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListResourceComplianceSummariesInput) SetFilters(v []*ComplianceStringFilter) *ListResourceComplianceSummariesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListResourceComplianceSummariesInput) SetMaxResults(v int64) *ListResourceComplianceSummariesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResourceComplianceSummariesInput) SetNextToken(v string) *ListResourceComplianceSummariesInput { + s.NextToken = &v + return s +} + +type ListResourceComplianceSummariesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` + + // A summary count for specified or targeted managed instances. Summary count + // includes information about compliant and non-compliant State Manager associations, + // patch status, or custom items according to the filter criteria that you specify. + ResourceComplianceSummaryItems []*ResourceComplianceSummaryItem `type:"list"` +} + +// String returns the string representation +func (s ListResourceComplianceSummariesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceComplianceSummariesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResourceComplianceSummariesOutput) SetNextToken(v string) *ListResourceComplianceSummariesOutput { + s.NextToken = &v + return s +} + +// SetResourceComplianceSummaryItems sets the ResourceComplianceSummaryItems field's value. +func (s *ListResourceComplianceSummariesOutput) SetResourceComplianceSummaryItems(v []*ResourceComplianceSummaryItem) *ListResourceComplianceSummariesOutput { + s.ResourceComplianceSummaryItems = v + return s +} + +type ListResourceDataSyncInput struct { + _ struct{} `type:"structure"` + + // The maximum number of items to return for this call. The call also returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"1" type:"integer"` + + // A token to start the list. Use this token to get the next set of results. + NextToken *string `type:"string"` + + // View a list of resource data syncs according to the sync type. Specify SyncToDestination + // to view resource data syncs that synchronize data to an Amazon S3 bucket. + // Specify SyncFromSource to view resource data syncs from AWS Organizations + // or from multiple AWS Regions. + SyncType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListResourceDataSyncInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceDataSyncInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourceDataSyncInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourceDataSyncInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.SyncType != nil && len(*s.SyncType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SyncType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListResourceDataSyncInput) SetMaxResults(v int64) *ListResourceDataSyncInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResourceDataSyncInput) SetNextToken(v string) *ListResourceDataSyncInput { + s.NextToken = &v + return s +} + +// SetSyncType sets the SyncType field's value. +func (s *ListResourceDataSyncInput) SetSyncType(v string) *ListResourceDataSyncInput { + s.SyncType = &v + return s +} + +type ListResourceDataSyncOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. Use this token to get the + // next set of results. + NextToken *string `type:"string"` + + // A list of your current Resource Data Sync configurations and their statuses. + ResourceDataSyncItems []*ResourceDataSyncItem `type:"list"` +} + +// String returns the string representation +func (s ListResourceDataSyncOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceDataSyncOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResourceDataSyncOutput) SetNextToken(v string) *ListResourceDataSyncOutput { + s.NextToken = &v + return s +} + +// SetResourceDataSyncItems sets the ResourceDataSyncItems field's value. +func (s *ListResourceDataSyncOutput) SetResourceDataSyncItems(v []*ResourceDataSyncItem) *ListResourceDataSyncOutput { + s.ResourceDataSyncItems = v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The resource ID for which you want to see a list of tags. + // + // ResourceId is a required field + ResourceId *string `type:"string" required:"true"` + + // Returns a list of tags for a specific resource type. + // + // ResourceType is a required field + ResourceType *string `type:"string" required:"true" enum:"ResourceTypeForTagging"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *ListTagsForResourceInput) SetResourceId(v string) *ListTagsForResourceInput { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ListTagsForResourceInput) SetResourceType(v string) *ListTagsForResourceInput { + s.ResourceType = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A list of tags. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTagList sets the TagList field's value. +func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOutput { + s.TagList = v + return s +} + +// Information about an S3 bucket to write instance-level logs to. +// +// LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, +// instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters +// structure. For information about how Systems Manager handles these options +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. +type LoggingInfo struct { + _ struct{} `type:"structure"` + + // The name of an S3 bucket where execution logs are stored . + // + // S3BucketName is a required field + S3BucketName *string `min:"3" type:"string" required:"true"` + + // (Optional) The S3 bucket subfolder. + S3KeyPrefix *string `type:"string"` + + // The Region where the S3 bucket is located. + // + // S3Region is a required field + S3Region *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingInfo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingInfo"} + if s.S3BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketName")) + } + if s.S3BucketName != nil && len(*s.S3BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("S3BucketName", 3)) + } + if s.S3Region == nil { + invalidParams.Add(request.NewErrParamRequired("S3Region")) + } + if s.S3Region != nil && len(*s.S3Region) < 3 { + invalidParams.Add(request.NewErrParamMinLen("S3Region", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *LoggingInfo) SetS3BucketName(v string) *LoggingInfo { + s.S3BucketName = &v + return s +} + +// SetS3KeyPrefix sets the S3KeyPrefix field's value. +func (s *LoggingInfo) SetS3KeyPrefix(v string) *LoggingInfo { + s.S3KeyPrefix = &v + return s +} + +// SetS3Region sets the S3Region field's value. +func (s *LoggingInfo) SetS3Region(v string) *LoggingInfo { + s.S3Region = &v + return s +} + +// The parameters for an AUTOMATION task type. +type MaintenanceWindowAutomationParameters struct { + _ struct{} `type:"structure"` + + // The version of an Automation document to use during task execution. + DocumentVersion *string `type:"string"` + + // The parameters for the AUTOMATION task. + // + // For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow + // and UpdateMaintenanceWindowTask. + // + // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, + // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + // + // TaskParameters has been deprecated. To specify parameters to pass to a task + // when it runs, instead use the Parameters option in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + // + // For AUTOMATION task types, Systems Manager ignores any values specified for + // these parameters. + Parameters map[string][]*string `min:"1" type:"map"` +} + +// String returns the string representation +func (s MaintenanceWindowAutomationParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowAutomationParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MaintenanceWindowAutomationParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MaintenanceWindowAutomationParameters"} + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *MaintenanceWindowAutomationParameters) SetDocumentVersion(v string) *MaintenanceWindowAutomationParameters { + s.DocumentVersion = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *MaintenanceWindowAutomationParameters) SetParameters(v map[string][]*string) *MaintenanceWindowAutomationParameters { + s.Parameters = v + return s +} + +// Describes the information about an execution of a maintenance window. +type MaintenanceWindowExecution struct { + _ struct{} `type:"structure"` + + // The time the execution finished. + EndTime *time.Time `type:"timestamp"` + + // The time the execution started. + StartTime *time.Time `type:"timestamp"` + + // The status of the execution. + Status *string `type:"string" enum:"MaintenanceWindowExecutionStatus"` + + // The details explaining the Status. Only available for certain status values. + StatusDetails *string `type:"string"` + + // The ID of the maintenance window execution. + WindowExecutionId *string `min:"36" type:"string"` + + // The ID of the maintenance window. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowExecution) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *MaintenanceWindowExecution) SetEndTime(v time.Time) *MaintenanceWindowExecution { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *MaintenanceWindowExecution) SetStartTime(v time.Time) *MaintenanceWindowExecution { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *MaintenanceWindowExecution) SetStatus(v string) *MaintenanceWindowExecution { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *MaintenanceWindowExecution) SetStatusDetails(v string) *MaintenanceWindowExecution { + s.StatusDetails = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *MaintenanceWindowExecution) SetWindowExecutionId(v string) *MaintenanceWindowExecution { + s.WindowExecutionId = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *MaintenanceWindowExecution) SetWindowId(v string) *MaintenanceWindowExecution { + s.WindowId = &v + return s +} + +// Information about a task execution performed as part of a maintenance window +// execution. +type MaintenanceWindowExecutionTaskIdentity struct { + _ struct{} `type:"structure"` + + // The time the task execution finished. + EndTime *time.Time `type:"timestamp"` + + // The time the task execution started. + StartTime *time.Time `type:"timestamp"` + + // The status of the task execution. + Status *string `type:"string" enum:"MaintenanceWindowExecutionStatus"` + + // The details explaining the status of the task execution. Only available for + // certain status values. + StatusDetails *string `type:"string"` + + // The ARN of the task that ran. + TaskArn *string `min:"1" type:"string"` + + // The ID of the specific task execution in the maintenance window execution. + TaskExecutionId *string `min:"36" type:"string"` + + // The type of task that ran. + TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` + + // The ID of the maintenance window execution that ran the task. + WindowExecutionId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowExecutionTaskIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowExecutionTaskIdentity) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *MaintenanceWindowExecutionTaskIdentity) SetEndTime(v time.Time) *MaintenanceWindowExecutionTaskIdentity { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *MaintenanceWindowExecutionTaskIdentity) SetStartTime(v time.Time) *MaintenanceWindowExecutionTaskIdentity { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *MaintenanceWindowExecutionTaskIdentity) SetStatus(v string) *MaintenanceWindowExecutionTaskIdentity { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *MaintenanceWindowExecutionTaskIdentity) SetStatusDetails(v string) *MaintenanceWindowExecutionTaskIdentity { + s.StatusDetails = &v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *MaintenanceWindowExecutionTaskIdentity) SetTaskArn(v string) *MaintenanceWindowExecutionTaskIdentity { + s.TaskArn = &v + return s +} + +// SetTaskExecutionId sets the TaskExecutionId field's value. +func (s *MaintenanceWindowExecutionTaskIdentity) SetTaskExecutionId(v string) *MaintenanceWindowExecutionTaskIdentity { + s.TaskExecutionId = &v + return s +} + +// SetTaskType sets the TaskType field's value. +func (s *MaintenanceWindowExecutionTaskIdentity) SetTaskType(v string) *MaintenanceWindowExecutionTaskIdentity { + s.TaskType = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *MaintenanceWindowExecutionTaskIdentity) SetWindowExecutionId(v string) *MaintenanceWindowExecutionTaskIdentity { + s.WindowExecutionId = &v + return s +} + +// Describes the information about a task invocation for a particular target +// as part of a task execution performed as part of a maintenance window execution. +type MaintenanceWindowExecutionTaskInvocationIdentity struct { + _ struct{} `type:"structure"` + + // The time the invocation finished. + EndTime *time.Time `type:"timestamp"` + + // The ID of the action performed in the service that actually handled the task + // invocation. If the task type is RUN_COMMAND, this value is the command ID. + ExecutionId *string `type:"string"` + + // The ID of the task invocation. + InvocationId *string `min:"36" type:"string"` + + // User-provided value that was specified when the target was registered with + // the maintenance window. This was also included in any CloudWatch events raised + // during the task invocation. + OwnerInformation *string `min:"1" type:"string" sensitive:"true"` + + // The parameters that were provided for the invocation when it was run. + Parameters *string `type:"string" sensitive:"true"` + + // The time the invocation started. + StartTime *time.Time `type:"timestamp"` + + // The status of the task invocation. + Status *string `type:"string" enum:"MaintenanceWindowExecutionStatus"` + + // The details explaining the status of the task invocation. Only available + // for certain Status values. + StatusDetails *string `type:"string"` + + // The ID of the specific task execution in the maintenance window execution. + TaskExecutionId *string `min:"36" type:"string"` + + // The task type. + TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` + + // The ID of the maintenance window execution that ran the task. + WindowExecutionId *string `min:"36" type:"string"` + + // The ID of the target definition in this maintenance window the invocation + // was performed for. + WindowTargetId *string `type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowExecutionTaskInvocationIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowExecutionTaskInvocationIdentity) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetEndTime(v time.Time) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.EndTime = &v + return s +} + +// SetExecutionId sets the ExecutionId field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetExecutionId(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.ExecutionId = &v + return s +} + +// SetInvocationId sets the InvocationId field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetInvocationId(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.InvocationId = &v + return s +} + +// SetOwnerInformation sets the OwnerInformation field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetOwnerInformation(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.OwnerInformation = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetParameters(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.Parameters = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetStartTime(v time.Time) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetStatus(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.Status = &v + return s +} + +// SetStatusDetails sets the StatusDetails field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetStatusDetails(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.StatusDetails = &v + return s +} + +// SetTaskExecutionId sets the TaskExecutionId field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetTaskExecutionId(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.TaskExecutionId = &v + return s +} + +// SetTaskType sets the TaskType field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetTaskType(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.TaskType = &v + return s +} + +// SetWindowExecutionId sets the WindowExecutionId field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetWindowExecutionId(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.WindowExecutionId = &v + return s +} + +// SetWindowTargetId sets the WindowTargetId field's value. +func (s *MaintenanceWindowExecutionTaskInvocationIdentity) SetWindowTargetId(v string) *MaintenanceWindowExecutionTaskInvocationIdentity { + s.WindowTargetId = &v + return s +} + +// Filter used in the request. Supported filter keys are Name and Enabled. +type MaintenanceWindowFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + Key *string `min:"1" type:"string"` + + // The filter values. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s MaintenanceWindowFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MaintenanceWindowFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MaintenanceWindowFilter"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *MaintenanceWindowFilter) SetKey(v string) *MaintenanceWindowFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *MaintenanceWindowFilter) SetValues(v []*string) *MaintenanceWindowFilter { + s.Values = v + return s +} + +// Information about the maintenance window. +type MaintenanceWindowIdentity struct { + _ struct{} `type:"structure"` + + // The number of hours before the end of the maintenance window that Systems + // Manager stops scheduling new tasks for execution. + Cutoff *int64 `type:"integer"` + + // A description of the maintenance window. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The duration of the maintenance window in hours. + Duration *int64 `min:"1" type:"integer"` + + // Indicates whether the maintenance window is enabled. + Enabled *bool `type:"boolean"` + + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. + EndDate *string `type:"string"` + + // The name of the maintenance window. + Name *string `min:"3" type:"string"` + + // The next time the maintenance window will actually run, taking into account + // any specified times for the maintenance window to become active or inactive. + NextExecutionTime *string `type:"string"` + + // The schedule of the maintenance window in the form of a cron or rate expression. + Schedule *string `min:"1" type:"string"` + + // The number of days to wait to run a maintenance window after the scheduled + // CRON expression date and time. + ScheduleOffset *int64 `min:"1" type:"integer"` + + // The time zone that the scheduled maintenance window executions are based + // on, in Internet Assigned Numbers Authority (IANA) format. + ScheduleTimezone *string `type:"string"` + + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. + StartDate *string `type:"string"` + + // The ID of the maintenance window. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowIdentity) GoString() string { + return s.String() +} + +// SetCutoff sets the Cutoff field's value. +func (s *MaintenanceWindowIdentity) SetCutoff(v int64) *MaintenanceWindowIdentity { + s.Cutoff = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *MaintenanceWindowIdentity) SetDescription(v string) *MaintenanceWindowIdentity { + s.Description = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *MaintenanceWindowIdentity) SetDuration(v int64) *MaintenanceWindowIdentity { + s.Duration = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *MaintenanceWindowIdentity) SetEnabled(v bool) *MaintenanceWindowIdentity { + s.Enabled = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *MaintenanceWindowIdentity) SetEndDate(v string) *MaintenanceWindowIdentity { + s.EndDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *MaintenanceWindowIdentity) SetName(v string) *MaintenanceWindowIdentity { + s.Name = &v + return s +} + +// SetNextExecutionTime sets the NextExecutionTime field's value. +func (s *MaintenanceWindowIdentity) SetNextExecutionTime(v string) *MaintenanceWindowIdentity { + s.NextExecutionTime = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *MaintenanceWindowIdentity) SetSchedule(v string) *MaintenanceWindowIdentity { + s.Schedule = &v + return s +} + +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *MaintenanceWindowIdentity) SetScheduleOffset(v int64) *MaintenanceWindowIdentity { + s.ScheduleOffset = &v + return s +} + +// SetScheduleTimezone sets the ScheduleTimezone field's value. +func (s *MaintenanceWindowIdentity) SetScheduleTimezone(v string) *MaintenanceWindowIdentity { + s.ScheduleTimezone = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *MaintenanceWindowIdentity) SetStartDate(v string) *MaintenanceWindowIdentity { + s.StartDate = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *MaintenanceWindowIdentity) SetWindowId(v string) *MaintenanceWindowIdentity { + s.WindowId = &v + return s +} + +// The maintenance window to which the specified target belongs. +type MaintenanceWindowIdentityForTarget struct { + _ struct{} `type:"structure"` + + // The name of the maintenance window. + Name *string `min:"3" type:"string"` + + // The ID of the maintenance window. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowIdentityForTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowIdentityForTarget) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MaintenanceWindowIdentityForTarget) SetName(v string) *MaintenanceWindowIdentityForTarget { + s.Name = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *MaintenanceWindowIdentityForTarget) SetWindowId(v string) *MaintenanceWindowIdentityForTarget { + s.WindowId = &v + return s +} + +// The parameters for a LAMBDA task type. +// +// For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow +// and UpdateMaintenanceWindowTask. +// +// LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, +// instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters +// structure. For information about how Systems Manager handles these options +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. +// +// TaskParameters has been deprecated. To specify parameters to pass to a task +// when it runs, instead use the Parameters option in the TaskInvocationParameters +// structure. For information about how Systems Manager handles these options +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. +// +// For Lambda tasks, Systems Manager ignores any values specified for TaskParameters +// and LoggingInfo. +type MaintenanceWindowLambdaParameters struct { + _ struct{} `type:"structure"` + + // Pass client-specific information to the Lambda function that you are invoking. + // You can then process the client information in your Lambda function as you + // choose through the context variable. + ClientContext *string `min:"1" type:"string"` + + // JSON to provide to your Lambda function as input. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob" sensitive:"true"` + + // (Optional) Specify a Lambda function version or alias name. If you specify + // a function version, the action uses the qualified function ARN to invoke + // a specific Lambda function. If you specify an alias name, the action uses + // the alias ARN to invoke the Lambda function version to which the alias points. + Qualifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowLambdaParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowLambdaParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MaintenanceWindowLambdaParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MaintenanceWindowLambdaParameters"} + if s.ClientContext != nil && len(*s.ClientContext) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientContext", 1)) + } + if s.Qualifier != nil && len(*s.Qualifier) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Qualifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientContext sets the ClientContext field's value. +func (s *MaintenanceWindowLambdaParameters) SetClientContext(v string) *MaintenanceWindowLambdaParameters { + s.ClientContext = &v + return s +} + +// SetPayload sets the Payload field's value. +func (s *MaintenanceWindowLambdaParameters) SetPayload(v []byte) *MaintenanceWindowLambdaParameters { + s.Payload = v + return s +} + +// SetQualifier sets the Qualifier field's value. +func (s *MaintenanceWindowLambdaParameters) SetQualifier(v string) *MaintenanceWindowLambdaParameters { + s.Qualifier = &v + return s +} + +// The parameters for a RUN_COMMAND task type. +// +// For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow +// and UpdateMaintenanceWindowTask. +// +// LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, +// instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters +// structure. For information about how Systems Manager handles these options +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. +// +// TaskParameters has been deprecated. To specify parameters to pass to a task +// when it runs, instead use the Parameters option in the TaskInvocationParameters +// structure. For information about how Systems Manager handles these options +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. +// +// For Run Command tasks, Systems Manager uses specified values for TaskParameters +// and LoggingInfo only if no values are specified for TaskInvocationParameters. +type MaintenanceWindowRunCommandParameters struct { + _ struct{} `type:"structure"` + + // Configuration options for sending command output to CloudWatch Logs. + CloudWatchOutputConfig *CloudWatchOutputConfig `type:"structure"` + + // Information about the commands to run. + Comment *string `type:"string"` + + // The SHA-256 or SHA-1 hash created by the system when the document was created. + // SHA-1 hashes have been deprecated. + DocumentHash *string `type:"string"` + + // SHA-256 or SHA-1. SHA-1 hashes have been deprecated. + DocumentHashType *string `type:"string" enum:"DocumentHashType"` + + // The SSM document version to use in the request. You can specify $DEFAULT, + // $LATEST, or a specific version number. If you run commands by using the AWS + // CLI, then you must escape the first two options by using a backslash. If + // you specify a version number, then you don't need to use the backslash. For + // example: + // + // --document-version "\$DEFAULT" + // + // --document-version "\$LATEST" + // + // --document-version "3" + DocumentVersion *string `type:"string"` + + // Configurations for sending notifications about command status changes on + // a per-instance basis. + NotificationConfig *NotificationConfig `type:"structure"` + + // The name of the S3 bucket. + OutputS3BucketName *string `min:"3" type:"string"` + + // The S3 bucket subfolder. + OutputS3KeyPrefix *string `type:"string"` + + // The parameters for the RUN_COMMAND task execution. + Parameters map[string][]*string `type:"map"` + + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. + ServiceRoleArn *string `type:"string"` + + // If this time is reached and the command has not already started running, + // it doesn't run. + TimeoutSeconds *int64 `min:"30" type:"integer"` +} + +// String returns the string representation +func (s MaintenanceWindowRunCommandParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowRunCommandParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MaintenanceWindowRunCommandParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MaintenanceWindowRunCommandParameters"} + if s.OutputS3BucketName != nil && len(*s.OutputS3BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("OutputS3BucketName", 3)) + } + if s.TimeoutSeconds != nil && *s.TimeoutSeconds < 30 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutSeconds", 30)) + } + if s.CloudWatchOutputConfig != nil { + if err := s.CloudWatchOutputConfig.Validate(); err != nil { + invalidParams.AddNested("CloudWatchOutputConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCloudWatchOutputConfig sets the CloudWatchOutputConfig field's value. +func (s *MaintenanceWindowRunCommandParameters) SetCloudWatchOutputConfig(v *CloudWatchOutputConfig) *MaintenanceWindowRunCommandParameters { + s.CloudWatchOutputConfig = v + return s +} + +// SetComment sets the Comment field's value. +func (s *MaintenanceWindowRunCommandParameters) SetComment(v string) *MaintenanceWindowRunCommandParameters { + s.Comment = &v + return s +} + +// SetDocumentHash sets the DocumentHash field's value. +func (s *MaintenanceWindowRunCommandParameters) SetDocumentHash(v string) *MaintenanceWindowRunCommandParameters { + s.DocumentHash = &v + return s +} + +// SetDocumentHashType sets the DocumentHashType field's value. +func (s *MaintenanceWindowRunCommandParameters) SetDocumentHashType(v string) *MaintenanceWindowRunCommandParameters { + s.DocumentHashType = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *MaintenanceWindowRunCommandParameters) SetDocumentVersion(v string) *MaintenanceWindowRunCommandParameters { + s.DocumentVersion = &v + return s +} + +// SetNotificationConfig sets the NotificationConfig field's value. +func (s *MaintenanceWindowRunCommandParameters) SetNotificationConfig(v *NotificationConfig) *MaintenanceWindowRunCommandParameters { + s.NotificationConfig = v + return s +} + +// SetOutputS3BucketName sets the OutputS3BucketName field's value. +func (s *MaintenanceWindowRunCommandParameters) SetOutputS3BucketName(v string) *MaintenanceWindowRunCommandParameters { + s.OutputS3BucketName = &v + return s +} + +// SetOutputS3KeyPrefix sets the OutputS3KeyPrefix field's value. +func (s *MaintenanceWindowRunCommandParameters) SetOutputS3KeyPrefix(v string) *MaintenanceWindowRunCommandParameters { + s.OutputS3KeyPrefix = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *MaintenanceWindowRunCommandParameters) SetParameters(v map[string][]*string) *MaintenanceWindowRunCommandParameters { + s.Parameters = v + return s +} + +// SetServiceRoleArn sets the ServiceRoleArn field's value. +func (s *MaintenanceWindowRunCommandParameters) SetServiceRoleArn(v string) *MaintenanceWindowRunCommandParameters { + s.ServiceRoleArn = &v + return s +} + +// SetTimeoutSeconds sets the TimeoutSeconds field's value. +func (s *MaintenanceWindowRunCommandParameters) SetTimeoutSeconds(v int64) *MaintenanceWindowRunCommandParameters { + s.TimeoutSeconds = &v + return s +} + +// The parameters for a STEP_FUNCTIONS task. +// +// For information about specifying and updating task parameters, see RegisterTaskWithMaintenanceWindow +// and UpdateMaintenanceWindowTask. +// +// LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, +// instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters +// structure. For information about how Systems Manager handles these options +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. +// +// TaskParameters has been deprecated. To specify parameters to pass to a task +// when it runs, instead use the Parameters option in the TaskInvocationParameters +// structure. For information about how Systems Manager handles these options +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. +// +// For Step Functions tasks, Systems Manager ignores any values specified for +// TaskParameters and LoggingInfo. +type MaintenanceWindowStepFunctionsParameters struct { + _ struct{} `type:"structure"` + + // The inputs for the STEP_FUNCTIONS task. + Input *string `type:"string" sensitive:"true"` + + // The name of the STEP_FUNCTIONS task. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowStepFunctionsParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowStepFunctionsParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MaintenanceWindowStepFunctionsParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MaintenanceWindowStepFunctionsParameters"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInput sets the Input field's value. +func (s *MaintenanceWindowStepFunctionsParameters) SetInput(v string) *MaintenanceWindowStepFunctionsParameters { + s.Input = &v + return s +} + +// SetName sets the Name field's value. +func (s *MaintenanceWindowStepFunctionsParameters) SetName(v string) *MaintenanceWindowStepFunctionsParameters { + s.Name = &v + return s +} + +// The target registered with the maintenance window. +type MaintenanceWindowTarget struct { + _ struct{} `type:"structure"` + + // A description for the target. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The name for the maintenance window target. + Name *string `min:"3" type:"string"` + + // A user-provided value that will be included in any CloudWatch events that + // are raised while running tasks for these targets in this maintenance window. + OwnerInformation *string `min:"1" type:"string" sensitive:"true"` + + // The type of target that is being registered with the maintenance window. + ResourceType *string `type:"string" enum:"MaintenanceWindowResourceType"` + + // The targets, either instances or tags. + // + // Specify instances using the following format: + // + // Key=instanceids,Values=, + // + // Tags are specified using the following format: + // + // Key=,Values=. + Targets []*Target `type:"list"` + + // The ID of the maintenance window to register the target with. + WindowId *string `min:"20" type:"string"` + + // The ID of the target. + WindowTargetId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowTarget) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *MaintenanceWindowTarget) SetDescription(v string) *MaintenanceWindowTarget { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *MaintenanceWindowTarget) SetName(v string) *MaintenanceWindowTarget { + s.Name = &v + return s +} + +// SetOwnerInformation sets the OwnerInformation field's value. +func (s *MaintenanceWindowTarget) SetOwnerInformation(v string) *MaintenanceWindowTarget { + s.OwnerInformation = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *MaintenanceWindowTarget) SetResourceType(v string) *MaintenanceWindowTarget { + s.ResourceType = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *MaintenanceWindowTarget) SetTargets(v []*Target) *MaintenanceWindowTarget { + s.Targets = v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *MaintenanceWindowTarget) SetWindowId(v string) *MaintenanceWindowTarget { + s.WindowId = &v + return s +} + +// SetWindowTargetId sets the WindowTargetId field's value. +func (s *MaintenanceWindowTarget) SetWindowTargetId(v string) *MaintenanceWindowTarget { + s.WindowTargetId = &v + return s +} + +// Information about a task defined for a maintenance window. +type MaintenanceWindowTask struct { + _ struct{} `type:"structure"` + + // A description of the task. + Description *string `min:"1" type:"string" sensitive:"true"` + + // Information about an S3 bucket to write task-level logs to. + // + // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, + // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + LoggingInfo *LoggingInfo `type:"structure"` + + // The maximum number of targets this task can be run for, in parallel. + MaxConcurrency *string `min:"1" type:"string"` + + // The maximum number of errors allowed before this task stops being scheduled. + MaxErrors *string `min:"1" type:"string"` + + // The task name. + Name *string `min:"3" type:"string"` + + // The priority of the task in the maintenance window. The lower the number, + // the higher the priority. Tasks that have the same priority are scheduled + // in parallel. + Priority *int64 `type:"integer"` + + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. + ServiceRoleArn *string `type:"string"` + + // The targets (either instances or tags). Instances are specified using Key=instanceids,Values=,. + // Tags are specified using Key=,Values=. + Targets []*Target `type:"list"` + + // The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION + // task types, TaskArn is the Systems Manager document name or ARN. For LAMBDA + // tasks, it's the function name or ARN. For STEP_FUNCTIONS tasks, it's the + // state machine ARN. + TaskArn *string `min:"1" type:"string"` + + // The parameters that should be passed to the task when it is run. + // + // TaskParameters has been deprecated. To specify parameters to pass to a task + // when it runs, instead use the Parameters option in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` + + // The type of task. The type can be one of the following: RUN_COMMAND, AUTOMATION, + // LAMBDA, or STEP_FUNCTIONS. + Type *string `type:"string" enum:"MaintenanceWindowTaskType"` + + // The ID of the maintenance window where the task is registered. + WindowId *string `min:"20" type:"string"` + + // The task ID. + WindowTaskId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s MaintenanceWindowTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowTask) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *MaintenanceWindowTask) SetDescription(v string) *MaintenanceWindowTask { + s.Description = &v + return s +} + +// SetLoggingInfo sets the LoggingInfo field's value. +func (s *MaintenanceWindowTask) SetLoggingInfo(v *LoggingInfo) *MaintenanceWindowTask { + s.LoggingInfo = v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *MaintenanceWindowTask) SetMaxConcurrency(v string) *MaintenanceWindowTask { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *MaintenanceWindowTask) SetMaxErrors(v string) *MaintenanceWindowTask { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *MaintenanceWindowTask) SetName(v string) *MaintenanceWindowTask { + s.Name = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *MaintenanceWindowTask) SetPriority(v int64) *MaintenanceWindowTask { + s.Priority = &v + return s +} + +// SetServiceRoleArn sets the ServiceRoleArn field's value. +func (s *MaintenanceWindowTask) SetServiceRoleArn(v string) *MaintenanceWindowTask { + s.ServiceRoleArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *MaintenanceWindowTask) SetTargets(v []*Target) *MaintenanceWindowTask { + s.Targets = v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *MaintenanceWindowTask) SetTaskArn(v string) *MaintenanceWindowTask { + s.TaskArn = &v + return s +} + +// SetTaskParameters sets the TaskParameters field's value. +func (s *MaintenanceWindowTask) SetTaskParameters(v map[string]*MaintenanceWindowTaskParameterValueExpression) *MaintenanceWindowTask { + s.TaskParameters = v + return s +} + +// SetType sets the Type field's value. +func (s *MaintenanceWindowTask) SetType(v string) *MaintenanceWindowTask { + s.Type = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *MaintenanceWindowTask) SetWindowId(v string) *MaintenanceWindowTask { + s.WindowId = &v + return s +} + +// SetWindowTaskId sets the WindowTaskId field's value. +func (s *MaintenanceWindowTask) SetWindowTaskId(v string) *MaintenanceWindowTask { + s.WindowTaskId = &v + return s +} + +// The parameters for task execution. +type MaintenanceWindowTaskInvocationParameters struct { + _ struct{} `type:"structure"` + + // The parameters for an AUTOMATION task type. + Automation *MaintenanceWindowAutomationParameters `type:"structure"` + + // The parameters for a LAMBDA task type. + Lambda *MaintenanceWindowLambdaParameters `type:"structure"` + + // The parameters for a RUN_COMMAND task type. + RunCommand *MaintenanceWindowRunCommandParameters `type:"structure"` + + // The parameters for a STEP_FUNCTIONS task type. + StepFunctions *MaintenanceWindowStepFunctionsParameters `type:"structure"` +} + +// String returns the string representation +func (s MaintenanceWindowTaskInvocationParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowTaskInvocationParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MaintenanceWindowTaskInvocationParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MaintenanceWindowTaskInvocationParameters"} + if s.Automation != nil { + if err := s.Automation.Validate(); err != nil { + invalidParams.AddNested("Automation", err.(request.ErrInvalidParams)) + } + } + if s.Lambda != nil { + if err := s.Lambda.Validate(); err != nil { + invalidParams.AddNested("Lambda", err.(request.ErrInvalidParams)) + } + } + if s.RunCommand != nil { + if err := s.RunCommand.Validate(); err != nil { + invalidParams.AddNested("RunCommand", err.(request.ErrInvalidParams)) + } + } + if s.StepFunctions != nil { + if err := s.StepFunctions.Validate(); err != nil { + invalidParams.AddNested("StepFunctions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutomation sets the Automation field's value. +func (s *MaintenanceWindowTaskInvocationParameters) SetAutomation(v *MaintenanceWindowAutomationParameters) *MaintenanceWindowTaskInvocationParameters { + s.Automation = v + return s +} + +// SetLambda sets the Lambda field's value. +func (s *MaintenanceWindowTaskInvocationParameters) SetLambda(v *MaintenanceWindowLambdaParameters) *MaintenanceWindowTaskInvocationParameters { + s.Lambda = v + return s +} + +// SetRunCommand sets the RunCommand field's value. +func (s *MaintenanceWindowTaskInvocationParameters) SetRunCommand(v *MaintenanceWindowRunCommandParameters) *MaintenanceWindowTaskInvocationParameters { + s.RunCommand = v + return s +} + +// SetStepFunctions sets the StepFunctions field's value. +func (s *MaintenanceWindowTaskInvocationParameters) SetStepFunctions(v *MaintenanceWindowStepFunctionsParameters) *MaintenanceWindowTaskInvocationParameters { + s.StepFunctions = v + return s +} + +// Defines the values for a task parameter. +type MaintenanceWindowTaskParameterValueExpression struct { + _ struct{} `type:"structure" sensitive:"true"` + + // This field contains an array of 0 or more strings, each 1 to 255 characters + // in length. + Values []*string `type:"list" sensitive:"true"` +} + +// String returns the string representation +func (s MaintenanceWindowTaskParameterValueExpression) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaintenanceWindowTaskParameterValueExpression) GoString() string { + return s.String() +} + +// SetValues sets the Values field's value. +func (s *MaintenanceWindowTaskParameterValueExpression) SetValues(v []*string) *MaintenanceWindowTaskParameterValueExpression { + s.Values = v + return s +} + +// The size limit of a document is 64 KB. +type MaxDocumentSizeExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s MaxDocumentSizeExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MaxDocumentSizeExceeded) GoString() string { + return s.String() +} + +func newErrorMaxDocumentSizeExceeded(v protocol.ResponseMetadata) error { + return &MaxDocumentSizeExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *MaxDocumentSizeExceeded) Code() string { + return "MaxDocumentSizeExceeded" +} + +// Message returns the exception's message. +func (s *MaxDocumentSizeExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MaxDocumentSizeExceeded) OrigErr() error { + return nil +} + +func (s *MaxDocumentSizeExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *MaxDocumentSizeExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *MaxDocumentSizeExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +type ModifyDocumentPermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS user accounts that should have access to the document. The account + // IDs can either be a group of account IDs or All. + AccountIdsToAdd []*string `type:"list"` + + // The AWS user accounts that should no longer have access to the document. + // The AWS user account can either be a group of account IDs or All. This action + // has a higher priority than AccountIdsToAdd. If you specify an account ID + // to add and the same ID to remove, the system removes access to the document. + AccountIdsToRemove []*string `type:"list"` + + // The name of the document that you want to share. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The permission type for the document. The permission type can be Share. + // + // PermissionType is a required field + PermissionType *string `type:"string" required:"true" enum:"DocumentPermissionType"` + + // (Optional) The version of the document to share. If it's not specified, the + // system choose the Default version to share. + SharedDocumentVersion *string `type:"string"` +} + +// String returns the string representation +func (s ModifyDocumentPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDocumentPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDocumentPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDocumentPermissionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.PermissionType == nil { + invalidParams.Add(request.NewErrParamRequired("PermissionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIdsToAdd sets the AccountIdsToAdd field's value. +func (s *ModifyDocumentPermissionInput) SetAccountIdsToAdd(v []*string) *ModifyDocumentPermissionInput { + s.AccountIdsToAdd = v + return s +} + +// SetAccountIdsToRemove sets the AccountIdsToRemove field's value. +func (s *ModifyDocumentPermissionInput) SetAccountIdsToRemove(v []*string) *ModifyDocumentPermissionInput { + s.AccountIdsToRemove = v + return s +} + +// SetName sets the Name field's value. +func (s *ModifyDocumentPermissionInput) SetName(v string) *ModifyDocumentPermissionInput { + s.Name = &v + return s +} + +// SetPermissionType sets the PermissionType field's value. +func (s *ModifyDocumentPermissionInput) SetPermissionType(v string) *ModifyDocumentPermissionInput { + s.PermissionType = &v + return s +} + +// SetSharedDocumentVersion sets the SharedDocumentVersion field's value. +func (s *ModifyDocumentPermissionInput) SetSharedDocumentVersion(v string) *ModifyDocumentPermissionInput { + s.SharedDocumentVersion = &v + return s +} + +type ModifyDocumentPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyDocumentPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDocumentPermissionOutput) GoString() string { + return s.String() +} + +// A summary of resources that are not compliant. The summary is organized according +// to resource type. +type NonCompliantSummary struct { + _ struct{} `type:"structure"` + + // The total number of compliance items that are not compliant. + NonCompliantCount *int64 `type:"integer"` + + // A summary of the non-compliance severity by compliance type + SeveritySummary *SeveritySummary `type:"structure"` +} + +// String returns the string representation +func (s NonCompliantSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NonCompliantSummary) GoString() string { + return s.String() +} + +// SetNonCompliantCount sets the NonCompliantCount field's value. +func (s *NonCompliantSummary) SetNonCompliantCount(v int64) *NonCompliantSummary { + s.NonCompliantCount = &v + return s +} + +// SetSeveritySummary sets the SeveritySummary field's value. +func (s *NonCompliantSummary) SetSeveritySummary(v *SeveritySummary) *NonCompliantSummary { + s.SeveritySummary = v + return s +} + +// Configurations for sending notifications. +type NotificationConfig struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) for an Amazon Simple Notification Service (Amazon + // SNS) topic. Run Command pushes notifications about command status changes + // to this topic. + NotificationArn *string `type:"string"` + + // The different events for which you can receive notifications. These events + // include the following: All (events), InProgress, Success, TimedOut, Cancelled, + // Failed. To learn more about these events, see Monitoring Systems Manager + // status changes using Amazon SNS notifications (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-sns-notifications.html) + // in the AWS Systems Manager User Guide. + NotificationEvents []*string `type:"list"` + + // Command: Receive notification when the status of a command changes. Invocation: + // For commands sent to multiple instances, receive notification on a per-instance + // basis when the status of a command changes. + NotificationType *string `type:"string" enum:"NotificationType"` +} + +// String returns the string representation +func (s NotificationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfig) GoString() string { + return s.String() +} + +// SetNotificationArn sets the NotificationArn field's value. +func (s *NotificationConfig) SetNotificationArn(v string) *NotificationConfig { + s.NotificationArn = &v + return s +} + +// SetNotificationEvents sets the NotificationEvents field's value. +func (s *NotificationConfig) SetNotificationEvents(v []*string) *NotificationConfig { + s.NotificationEvents = v + return s +} + +// SetNotificationType sets the NotificationType field's value. +func (s *NotificationConfig) SetNotificationType(v string) *NotificationConfig { + s.NotificationType = &v + return s +} + +// One or more aggregators for viewing counts of OpsItems using different dimensions +// such as Source, CreatedTime, or Source and CreatedTime, to name a few. +type OpsAggregator struct { + _ struct{} `type:"structure"` + + // Either a Range or Count aggregator for limiting an OpsItem summary. + AggregatorType *string `min:"1" type:"string"` + + // A nested aggregator for viewing counts of OpsItems. + Aggregators []*OpsAggregator `min:"1" type:"list"` + + // The name of an OpsItem attribute on which to limit the count of OpsItems. + AttributeName *string `min:"1" type:"string"` + + // The aggregator filters. + Filters []*OpsFilter `min:"1" type:"list"` + + // The data type name to use for viewing counts of OpsItems. + TypeName *string `min:"1" type:"string"` + + // The aggregator value. + Values map[string]*string `type:"map"` +} + +// String returns the string representation +func (s OpsAggregator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsAggregator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpsAggregator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OpsAggregator"} + if s.AggregatorType != nil && len(*s.AggregatorType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AggregatorType", 1)) + } + if s.Aggregators != nil && len(s.Aggregators) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Aggregators", 1)) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.Filters != nil && len(s.Filters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + if s.Aggregators != nil { + for i, v := range s.Aggregators { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Aggregators", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregatorType sets the AggregatorType field's value. +func (s *OpsAggregator) SetAggregatorType(v string) *OpsAggregator { + s.AggregatorType = &v + return s +} + +// SetAggregators sets the Aggregators field's value. +func (s *OpsAggregator) SetAggregators(v []*OpsAggregator) *OpsAggregator { + s.Aggregators = v + return s +} + +// SetAttributeName sets the AttributeName field's value. +func (s *OpsAggregator) SetAttributeName(v string) *OpsAggregator { + s.AttributeName = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *OpsAggregator) SetFilters(v []*OpsFilter) *OpsAggregator { + s.Filters = v + return s +} + +// SetTypeName sets the TypeName field's value. +func (s *OpsAggregator) SetTypeName(v string) *OpsAggregator { + s.TypeName = &v + return s +} + +// SetValues sets the Values field's value. +func (s *OpsAggregator) SetValues(v map[string]*string) *OpsAggregator { + s.Values = v + return s +} + +// The result of the query. +type OpsEntity struct { + _ struct{} `type:"structure"` + + // The data returned by the query. + Data map[string]*OpsEntityItem `type:"map"` + + // The query ID. + Id *string `type:"string"` +} + +// String returns the string representation +func (s OpsEntity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsEntity) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *OpsEntity) SetData(v map[string]*OpsEntityItem) *OpsEntity { + s.Data = v + return s +} + +// SetId sets the Id field's value. +func (s *OpsEntity) SetId(v string) *OpsEntity { + s.Id = &v + return s +} + +// The OpsItem summaries result item. +type OpsEntityItem struct { + _ struct{} `type:"structure"` + + // The time OpsItem data was captured. + CaptureTime *string `type:"string"` + + // The detailed data content for an OpsItem summaries result item. + Content []map[string]*string `type:"list"` +} + +// String returns the string representation +func (s OpsEntityItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsEntityItem) GoString() string { + return s.String() +} + +// SetCaptureTime sets the CaptureTime field's value. +func (s *OpsEntityItem) SetCaptureTime(v string) *OpsEntityItem { + s.CaptureTime = &v + return s +} + +// SetContent sets the Content field's value. +func (s *OpsEntityItem) SetContent(v []map[string]*string) *OpsEntityItem { + s.Content = v + return s +} + +// A filter for viewing OpsItem summaries. +type OpsFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The type of filter. + Type *string `type:"string" enum:"OpsFilterOperatorType"` + + // The filter value. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s OpsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OpsFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *OpsFilter) SetKey(v string) *OpsFilter { + s.Key = &v + return s +} + +// SetType sets the Type field's value. +func (s *OpsFilter) SetType(v string) *OpsFilter { + s.Type = &v + return s +} + +// SetValues sets the Values field's value. +func (s *OpsFilter) SetValues(v []*string) *OpsFilter { + s.Values = v + return s +} + +// Operations engineers and IT professionals use OpsCenter to view, investigate, +// and remediate operational issues impacting the performance and health of +// their AWS resources. For more information, see AWS Systems Manager OpsCenter +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// in the AWS Systems Manager User Guide. +type OpsItem struct { + _ struct{} `type:"structure"` + + // An OpsItem category. Category options include: Availability, Cost, Performance, + // Recovery, Security. + Category *string `min:"1" type:"string"` + + // The ARN of the AWS account that created the OpsItem. + CreatedBy *string `type:"string"` + + // The date and time the OpsItem was created. + CreatedTime *time.Time `type:"timestamp"` + + // The OpsItem description. + Description *string `min:"1" type:"string"` + + // The ARN of the AWS account that last updated the OpsItem. + LastModifiedBy *string `type:"string"` + + // The date and time the OpsItem was last updated. + LastModifiedTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of an SNS topic where notifications are sent + // when this OpsItem is edited or changed. + Notifications []*OpsItemNotification `type:"list"` + + // Operational data is custom data that provides useful reference details about + // the OpsItem. For example, you can specify log files, error strings, license + // keys, troubleshooting tips, or other relevant data. You enter operational + // data as key-value pairs. The key has a maximum length of 128 characters. + // The value has a maximum size of 20 KB. + // + // Operational data keys can't begin with the following: amazon, aws, amzn, + // ssm, /amazon, /aws, /amzn, /ssm. + // + // You can choose to make the data searchable by other users in the account + // or you can restrict search access. Searchable data means that all users with + // access to the OpsItem Overview page (as provided by the DescribeOpsItems + // API action) can view and search on the specified data. Operational data that + // is not searchable is only viewable by users who have access to the OpsItem + // (as provided by the GetOpsItem API action). + // + // Use the /aws/resources key in OperationalData to specify a related resource + // in the request. Use the /aws/automations key in OperationalData to associate + // an Automation runbook with the OpsItem. To view AWS CLI example commands + // that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // in the AWS Systems Manager User Guide. + OperationalData map[string]*OpsItemDataValue `type:"map"` + + // The ID of the OpsItem. + OpsItemId *string `type:"string"` + + // The importance of this OpsItem in relation to other OpsItems in the system. + Priority *int64 `min:"1" type:"integer"` + + // One or more OpsItems that share something in common with the current OpsItem. + // For example, related OpsItems can include OpsItems with similar error messages, + // impacted resources, or statuses for the impacted resource. + RelatedOpsItems []*RelatedOpsItem `type:"list"` + + // The severity of the OpsItem. Severity options range from 1 to 4. + Severity *string `min:"1" type:"string"` + + // The origin of the OpsItem, such as Amazon EC2 or Systems Manager. The impacted + // resource is a subset of source. + Source *string `min:"1" type:"string"` + + // The OpsItem status. Status can be Open, In Progress, or Resolved. For more + // information, see Editing OpsItem details (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems-editing-details.html) + // in the AWS Systems Manager User Guide. + Status *string `type:"string" enum:"OpsItemStatus"` + + // A short heading that describes the nature of the OpsItem and the impacted + // resource. + Title *string `min:"1" type:"string"` + + // The version of this OpsItem. Each time the OpsItem is edited the version + // number increments by one. + Version *string `type:"string"` +} + +// String returns the string representation +func (s OpsItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItem) GoString() string { + return s.String() +} + +// SetCategory sets the Category field's value. +func (s *OpsItem) SetCategory(v string) *OpsItem { + s.Category = &v + return s +} + +// SetCreatedBy sets the CreatedBy field's value. +func (s *OpsItem) SetCreatedBy(v string) *OpsItem { + s.CreatedBy = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *OpsItem) SetCreatedTime(v time.Time) *OpsItem { + s.CreatedTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *OpsItem) SetDescription(v string) *OpsItem { + s.Description = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *OpsItem) SetLastModifiedBy(v string) *OpsItem { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *OpsItem) SetLastModifiedTime(v time.Time) *OpsItem { + s.LastModifiedTime = &v + return s +} + +// SetNotifications sets the Notifications field's value. +func (s *OpsItem) SetNotifications(v []*OpsItemNotification) *OpsItem { + s.Notifications = v + return s +} + +// SetOperationalData sets the OperationalData field's value. +func (s *OpsItem) SetOperationalData(v map[string]*OpsItemDataValue) *OpsItem { + s.OperationalData = v + return s +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *OpsItem) SetOpsItemId(v string) *OpsItem { + s.OpsItemId = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *OpsItem) SetPriority(v int64) *OpsItem { + s.Priority = &v + return s +} + +// SetRelatedOpsItems sets the RelatedOpsItems field's value. +func (s *OpsItem) SetRelatedOpsItems(v []*RelatedOpsItem) *OpsItem { + s.RelatedOpsItems = v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *OpsItem) SetSeverity(v string) *OpsItem { + s.Severity = &v + return s +} + +// SetSource sets the Source field's value. +func (s *OpsItem) SetSource(v string) *OpsItem { + s.Source = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *OpsItem) SetStatus(v string) *OpsItem { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *OpsItem) SetTitle(v string) *OpsItem { + s.Title = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *OpsItem) SetVersion(v string) *OpsItem { + s.Version = &v + return s +} + +// The OpsItem already exists. +type OpsItemAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + OpsItemId *string `type:"string"` +} + +// String returns the string representation +func (s OpsItemAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorOpsItemAlreadyExistsException(v protocol.ResponseMetadata) error { + return &OpsItemAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *OpsItemAlreadyExistsException) Code() string { + return "OpsItemAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *OpsItemAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OpsItemAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *OpsItemAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *OpsItemAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *OpsItemAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object that defines the value of the key and its type in the OperationalData +// map. +type OpsItemDataValue struct { + _ struct{} `type:"structure"` + + // The type of key-value pair. Valid types include SearchableString and String. + Type *string `type:"string" enum:"OpsItemDataType"` + + // The value of the OperationalData key. + Value *string `type:"string"` +} + +// String returns the string representation +func (s OpsItemDataValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemDataValue) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *OpsItemDataValue) SetType(v string) *OpsItemDataValue { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *OpsItemDataValue) SetValue(v string) *OpsItemDataValue { + s.Value = &v + return s +} + +// Describes an OpsItem filter. +type OpsItemFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `type:"string" required:"true" enum:"OpsItemFilterKey"` + + // The operator used by the filter call. + // + // Operator is a required field + Operator *string `type:"string" required:"true" enum:"OpsItemFilterOperator"` + + // The filter value. + // + // Values is a required field + Values []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s OpsItemFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpsItemFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OpsItemFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Operator == nil { + invalidParams.Add(request.NewErrParamRequired("Operator")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *OpsItemFilter) SetKey(v string) *OpsItemFilter { + s.Key = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *OpsItemFilter) SetOperator(v string) *OpsItemFilter { + s.Operator = &v + return s +} + +// SetValues sets the Values field's value. +func (s *OpsItemFilter) SetValues(v []*string) *OpsItemFilter { + s.Values = v + return s +} + +// A specified parameter argument isn't valid. Verify the available arguments +// and try again. +type OpsItemInvalidParameterException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + ParameterNames []*string `type:"list"` +} + +// String returns the string representation +func (s OpsItemInvalidParameterException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemInvalidParameterException) GoString() string { + return s.String() +} + +func newErrorOpsItemInvalidParameterException(v protocol.ResponseMetadata) error { + return &OpsItemInvalidParameterException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *OpsItemInvalidParameterException) Code() string { + return "OpsItemInvalidParameterException" +} + +// Message returns the exception's message. +func (s *OpsItemInvalidParameterException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OpsItemInvalidParameterException) OrigErr() error { + return nil +} + +func (s *OpsItemInvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *OpsItemInvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *OpsItemInvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request caused OpsItems to exceed one or more quotas. For information +// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). +type OpsItemLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Limit *int64 `type:"integer"` + + LimitType *string `type:"string"` + + Message_ *string `locationName:"Message" type:"string"` + + ResourceTypes []*string `type:"list"` +} + +// String returns the string representation +func (s OpsItemLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemLimitExceededException) GoString() string { + return s.String() +} + +func newErrorOpsItemLimitExceededException(v protocol.ResponseMetadata) error { + return &OpsItemLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *OpsItemLimitExceededException) Code() string { + return "OpsItemLimitExceededException" +} + +// Message returns the exception's message. +func (s *OpsItemLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OpsItemLimitExceededException) OrigErr() error { + return nil +} + +func (s *OpsItemLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *OpsItemLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *OpsItemLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified OpsItem ID doesn't exist. Verify the ID and try again. +type OpsItemNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s OpsItemNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemNotFoundException) GoString() string { + return s.String() +} + +func newErrorOpsItemNotFoundException(v protocol.ResponseMetadata) error { + return &OpsItemNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *OpsItemNotFoundException) Code() string { + return "OpsItemNotFoundException" +} + +// Message returns the exception's message. +func (s *OpsItemNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OpsItemNotFoundException) OrigErr() error { + return nil +} + +func (s *OpsItemNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *OpsItemNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *OpsItemNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A notification about the OpsItem. +type OpsItemNotification struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of an SNS topic where notifications are sent + // when this OpsItem is edited or changed. + Arn *string `type:"string"` +} + +// String returns the string representation +func (s OpsItemNotification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemNotification) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *OpsItemNotification) SetArn(v string) *OpsItemNotification { + s.Arn = &v + return s +} + +// A count of OpsItems. +type OpsItemSummary struct { + _ struct{} `type:"structure"` + + // A list of OpsItems by category. + Category *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM entity that created the OpsItem. + CreatedBy *string `type:"string"` + + // The date and time the OpsItem was created. + CreatedTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the IAM entity that created the OpsItem. + LastModifiedBy *string `type:"string"` + + // The date and time the OpsItem was last updated. + LastModifiedTime *time.Time `type:"timestamp"` + + // Operational data is custom data that provides useful reference details about + // the OpsItem. + OperationalData map[string]*OpsItemDataValue `type:"map"` + + // The ID of the OpsItem. + OpsItemId *string `type:"string"` + + // The importance of this OpsItem in relation to other OpsItems in the system. + Priority *int64 `min:"1" type:"integer"` + + // A list of OpsItems by severity. + Severity *string `min:"1" type:"string"` + + // The impacted AWS resource. + Source *string `min:"1" type:"string"` + + // The OpsItem status. Status can be Open, In Progress, or Resolved. + Status *string `type:"string" enum:"OpsItemStatus"` + + // A short heading that describes the nature of the OpsItem and the impacted + // resource. + Title *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s OpsItemSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsItemSummary) GoString() string { + return s.String() +} + +// SetCategory sets the Category field's value. +func (s *OpsItemSummary) SetCategory(v string) *OpsItemSummary { + s.Category = &v + return s +} + +// SetCreatedBy sets the CreatedBy field's value. +func (s *OpsItemSummary) SetCreatedBy(v string) *OpsItemSummary { + s.CreatedBy = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *OpsItemSummary) SetCreatedTime(v time.Time) *OpsItemSummary { + s.CreatedTime = &v + return s +} + +// SetLastModifiedBy sets the LastModifiedBy field's value. +func (s *OpsItemSummary) SetLastModifiedBy(v string) *OpsItemSummary { + s.LastModifiedBy = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *OpsItemSummary) SetLastModifiedTime(v time.Time) *OpsItemSummary { + s.LastModifiedTime = &v + return s +} + +// SetOperationalData sets the OperationalData field's value. +func (s *OpsItemSummary) SetOperationalData(v map[string]*OpsItemDataValue) *OpsItemSummary { + s.OperationalData = v + return s +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *OpsItemSummary) SetOpsItemId(v string) *OpsItemSummary { + s.OpsItemId = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *OpsItemSummary) SetPriority(v int64) *OpsItemSummary { + s.Priority = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *OpsItemSummary) SetSeverity(v string) *OpsItemSummary { + s.Severity = &v + return s +} + +// SetSource sets the Source field's value. +func (s *OpsItemSummary) SetSource(v string) *OpsItemSummary { + s.Source = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *OpsItemSummary) SetStatus(v string) *OpsItemSummary { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *OpsItemSummary) SetTitle(v string) *OpsItemSummary { + s.Title = &v + return s +} + +// The OpsItem data type to return. +type OpsResultAttribute struct { + _ struct{} `type:"structure"` + + // Name of the data type. Valid value: AWS:OpsItem, AWS:EC2InstanceInformation, + // AWS:OpsItemTrendline, or AWS:ComplianceSummary. + // + // TypeName is a required field + TypeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s OpsResultAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpsResultAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpsResultAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OpsResultAttribute"} + if s.TypeName == nil { + invalidParams.Add(request.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTypeName sets the TypeName field's value. +func (s *OpsResultAttribute) SetTypeName(v string) *OpsResultAttribute { + s.TypeName = &v + return s +} + +// Information about the source where the association execution details are +// stored. +type OutputSource struct { + _ struct{} `type:"structure"` + + // The ID of the output source, for example the URL of an S3 bucket. + OutputSourceId *string `min:"36" type:"string"` + + // The type of source where the association execution details are stored, for + // example, Amazon S3. + OutputSourceType *string `type:"string"` +} + +// String returns the string representation +func (s OutputSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputSource) GoString() string { + return s.String() +} + +// SetOutputSourceId sets the OutputSourceId field's value. +func (s *OutputSource) SetOutputSourceId(v string) *OutputSource { + s.OutputSourceId = &v + return s +} + +// SetOutputSourceType sets the OutputSourceType field's value. +func (s *OutputSource) SetOutputSourceType(v string) *OutputSource { + s.OutputSourceType = &v + return s +} + +// An Systems Manager parameter in Parameter Store. +type Parameter struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the parameter. + ARN *string `type:"string"` + + // The data type of the parameter, such as text or aws:ec2:image. The default + // is text. + DataType *string `type:"string"` + + // Date the parameter was last changed or updated and the parameter version + // was created. + LastModifiedDate *time.Time `type:"timestamp"` + + // The name of the parameter. + Name *string `min:"1" type:"string"` + + // Either the version number or the label used to retrieve the parameter value. + // Specify selectors by using one of the following formats: + // + // parameter_name:version + // + // parameter_name:label + Selector *string `type:"string"` + + // Applies to parameters that reference information in other AWS services. SourceResult + // is the raw result or response from the source. + SourceResult *string `type:"string"` + + // The type of parameter. Valid values include the following: String, StringList, + // and SecureString. + Type *string `type:"string" enum:"ParameterType"` + + // The parameter value. + Value *string `type:"string"` + + // The parameter version. + Version *int64 `type:"long"` +} + +// String returns the string representation +func (s Parameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameter) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *Parameter) SetARN(v string) *Parameter { + s.ARN = &v + return s +} + +// SetDataType sets the DataType field's value. +func (s *Parameter) SetDataType(v string) *Parameter { + s.DataType = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *Parameter) SetLastModifiedDate(v time.Time) *Parameter { + s.LastModifiedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Parameter) SetName(v string) *Parameter { + s.Name = &v + return s +} + +// SetSelector sets the Selector field's value. +func (s *Parameter) SetSelector(v string) *Parameter { + s.Selector = &v + return s +} + +// SetSourceResult sets the SourceResult field's value. +func (s *Parameter) SetSourceResult(v string) *Parameter { + s.SourceResult = &v + return s +} + +// SetType sets the Type field's value. +func (s *Parameter) SetType(v string) *Parameter { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Parameter) SetValue(v string) *Parameter { + s.Value = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *Parameter) SetVersion(v int64) *Parameter { + s.Version = &v + return s +} + +// The parameter already exists. You can't create duplicate parameters. +type ParameterAlreadyExists struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ParameterAlreadyExists) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterAlreadyExists) GoString() string { + return s.String() +} + +func newErrorParameterAlreadyExists(v protocol.ResponseMetadata) error { + return &ParameterAlreadyExists{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ParameterAlreadyExists) Code() string { + return "ParameterAlreadyExists" +} + +// Message returns the exception's message. +func (s *ParameterAlreadyExists) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ParameterAlreadyExists) OrigErr() error { + return nil +} + +func (s *ParameterAlreadyExists) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ParameterAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ParameterAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about parameter usage. +type ParameterHistory struct { + _ struct{} `type:"structure"` + + // Parameter names can include the following letters and symbols. + // + // a-zA-Z0-9_.- + AllowedPattern *string `type:"string"` + + // The data type of the parameter, such as text or aws:ec2:image. The default + // is text. + DataType *string `type:"string"` + + // Information about the parameter. + Description *string `type:"string"` + + // The ID of the query key used for this parameter. + KeyId *string `min:"1" type:"string"` + + // Labels assigned to the parameter version. + Labels []*string `min:"1" type:"list"` + + // Date the parameter was last changed or updated. + LastModifiedDate *time.Time `type:"timestamp"` + + // Amazon Resource Name (ARN) of the AWS user who last changed the parameter. + LastModifiedUser *string `type:"string"` + + // The name of the parameter. + Name *string `min:"1" type:"string"` + + // Information about the policies assigned to a parameter. + // + // Assigning parameter policies (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) + // in the AWS Systems Manager User Guide. + Policies []*ParameterInlinePolicy `type:"list"` + + // The parameter tier. + Tier *string `type:"string" enum:"ParameterTier"` + + // The type of parameter used. + Type *string `type:"string" enum:"ParameterType"` + + // The parameter value. + Value *string `type:"string"` + + // The parameter version. + Version *int64 `type:"long"` +} + +// String returns the string representation +func (s ParameterHistory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterHistory) GoString() string { + return s.String() +} + +// SetAllowedPattern sets the AllowedPattern field's value. +func (s *ParameterHistory) SetAllowedPattern(v string) *ParameterHistory { + s.AllowedPattern = &v + return s +} + +// SetDataType sets the DataType field's value. +func (s *ParameterHistory) SetDataType(v string) *ParameterHistory { + s.DataType = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ParameterHistory) SetDescription(v string) *ParameterHistory { + s.Description = &v + return s +} + +// SetKeyId sets the KeyId field's value. +func (s *ParameterHistory) SetKeyId(v string) *ParameterHistory { + s.KeyId = &v + return s +} + +// SetLabels sets the Labels field's value. +func (s *ParameterHistory) SetLabels(v []*string) *ParameterHistory { + s.Labels = v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *ParameterHistory) SetLastModifiedDate(v time.Time) *ParameterHistory { + s.LastModifiedDate = &v + return s +} + +// SetLastModifiedUser sets the LastModifiedUser field's value. +func (s *ParameterHistory) SetLastModifiedUser(v string) *ParameterHistory { + s.LastModifiedUser = &v + return s +} + +// SetName sets the Name field's value. +func (s *ParameterHistory) SetName(v string) *ParameterHistory { + s.Name = &v + return s +} + +// SetPolicies sets the Policies field's value. +func (s *ParameterHistory) SetPolicies(v []*ParameterInlinePolicy) *ParameterHistory { + s.Policies = v + return s +} + +// SetTier sets the Tier field's value. +func (s *ParameterHistory) SetTier(v string) *ParameterHistory { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *ParameterHistory) SetType(v string) *ParameterHistory { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ParameterHistory) SetValue(v string) *ParameterHistory { + s.Value = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *ParameterHistory) SetVersion(v int64) *ParameterHistory { + s.Version = &v + return s +} + +// One or more policies assigned to a parameter. +type ParameterInlinePolicy struct { + _ struct{} `type:"structure"` + + // The status of the policy. Policies report the following statuses: Pending + // (the policy has not been enforced or applied yet), Finished (the policy was + // applied), Failed (the policy was not applied), or InProgress (the policy + // is being applied now). + PolicyStatus *string `type:"string"` + + // The JSON text of the policy. + PolicyText *string `type:"string"` + + // The type of policy. Parameter Store supports the following policy types: + // Expiration, ExpirationNotification, and NoChangeNotification. + PolicyType *string `type:"string"` +} + +// String returns the string representation +func (s ParameterInlinePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterInlinePolicy) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *ParameterInlinePolicy) SetPolicyStatus(v string) *ParameterInlinePolicy { + s.PolicyStatus = &v + return s +} + +// SetPolicyText sets the PolicyText field's value. +func (s *ParameterInlinePolicy) SetPolicyText(v string) *ParameterInlinePolicy { + s.PolicyText = &v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *ParameterInlinePolicy) SetPolicyType(v string) *ParameterInlinePolicy { + s.PolicyType = &v + return s +} + +// You have exceeded the number of parameters for this AWS account. Delete one +// or more parameters and try again. +type ParameterLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ParameterLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterLimitExceeded) GoString() string { + return s.String() +} + +func newErrorParameterLimitExceeded(v protocol.ResponseMetadata) error { + return &ParameterLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ParameterLimitExceeded) Code() string { + return "ParameterLimitExceeded" +} + +// Message returns the exception's message. +func (s *ParameterLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ParameterLimitExceeded) OrigErr() error { + return nil +} + +func (s *ParameterLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ParameterLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ParameterLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// Parameter Store retains the 100 most recently created versions of a parameter. +// After this number of versions has been created, Parameter Store deletes the +// oldest version when a new one is created. However, if the oldest version +// has a label attached to it, Parameter Store will not delete the version and +// instead presents this error message: +// +// An error occurred (ParameterMaxVersionLimitExceeded) when calling the PutParameter +// operation: You attempted to create a new version of parameter-name by calling +// the PutParameter API with the overwrite flag. Version version-number, the +// oldest version, can't be deleted because it has a label associated with it. +// Move the label to another version of the parameter, and try again. +// +// This safeguard is to prevent parameter versions with mission critical labels +// assigned to them from being deleted. To continue creating new parameters, +// first move the label from the oldest version of the parameter to a newer +// one for use in your operations. For information about moving parameter labels, +// see Move a parameter label (console) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html#sysman-paramstore-labels-console-move) +// or Move a parameter label (CLI) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html#sysman-paramstore-labels-cli-move) +// in the AWS Systems Manager User Guide. +type ParameterMaxVersionLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ParameterMaxVersionLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterMaxVersionLimitExceeded) GoString() string { + return s.String() +} + +func newErrorParameterMaxVersionLimitExceeded(v protocol.ResponseMetadata) error { + return &ParameterMaxVersionLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ParameterMaxVersionLimitExceeded) Code() string { + return "ParameterMaxVersionLimitExceeded" +} + +// Message returns the exception's message. +func (s *ParameterMaxVersionLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ParameterMaxVersionLimitExceeded) OrigErr() error { + return nil +} + +func (s *ParameterMaxVersionLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ParameterMaxVersionLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ParameterMaxVersionLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// Metadata includes information like the ARN of the last user and the date/time +// the parameter was last used. +type ParameterMetadata struct { + _ struct{} `type:"structure"` + + // A parameter name can include only the following letters and symbols. + // + // a-zA-Z0-9_.- + AllowedPattern *string `type:"string"` + + // The data type of the parameter, such as text or aws:ec2:image. The default + // is text. + DataType *string `type:"string"` + + // Description of the parameter actions. + Description *string `type:"string"` + + // The ID of the query key used for this parameter. + KeyId *string `min:"1" type:"string"` + + // Date the parameter was last changed or updated. + LastModifiedDate *time.Time `type:"timestamp"` + + // Amazon Resource Name (ARN) of the AWS user who last changed the parameter. + LastModifiedUser *string `type:"string"` + + // The parameter name. + Name *string `min:"1" type:"string"` + + // A list of policies associated with a parameter. + Policies []*ParameterInlinePolicy `type:"list"` + + // The parameter tier. + Tier *string `type:"string" enum:"ParameterTier"` + + // The type of parameter. Valid parameter types include the following: String, + // StringList, and SecureString. + Type *string `type:"string" enum:"ParameterType"` + + // The parameter version. + Version *int64 `type:"long"` +} + +// String returns the string representation +func (s ParameterMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterMetadata) GoString() string { + return s.String() +} + +// SetAllowedPattern sets the AllowedPattern field's value. +func (s *ParameterMetadata) SetAllowedPattern(v string) *ParameterMetadata { + s.AllowedPattern = &v + return s +} + +// SetDataType sets the DataType field's value. +func (s *ParameterMetadata) SetDataType(v string) *ParameterMetadata { + s.DataType = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ParameterMetadata) SetDescription(v string) *ParameterMetadata { + s.Description = &v + return s +} + +// SetKeyId sets the KeyId field's value. +func (s *ParameterMetadata) SetKeyId(v string) *ParameterMetadata { + s.KeyId = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *ParameterMetadata) SetLastModifiedDate(v time.Time) *ParameterMetadata { + s.LastModifiedDate = &v + return s +} + +// SetLastModifiedUser sets the LastModifiedUser field's value. +func (s *ParameterMetadata) SetLastModifiedUser(v string) *ParameterMetadata { + s.LastModifiedUser = &v + return s +} + +// SetName sets the Name field's value. +func (s *ParameterMetadata) SetName(v string) *ParameterMetadata { + s.Name = &v + return s +} + +// SetPolicies sets the Policies field's value. +func (s *ParameterMetadata) SetPolicies(v []*ParameterInlinePolicy) *ParameterMetadata { + s.Policies = v + return s +} + +// SetTier sets the Tier field's value. +func (s *ParameterMetadata) SetTier(v string) *ParameterMetadata { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *ParameterMetadata) SetType(v string) *ParameterMetadata { + s.Type = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *ParameterMetadata) SetVersion(v int64) *ParameterMetadata { + s.Version = &v + return s +} + +// The parameter could not be found. Verify the name and try again. +type ParameterNotFound struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ParameterNotFound) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterNotFound) GoString() string { + return s.String() +} + +func newErrorParameterNotFound(v protocol.ResponseMetadata) error { + return &ParameterNotFound{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ParameterNotFound) Code() string { + return "ParameterNotFound" +} + +// Message returns the exception's message. +func (s *ParameterNotFound) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ParameterNotFound) OrigErr() error { + return nil +} + +func (s *ParameterNotFound) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ParameterNotFound) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ParameterNotFound) RequestID() string { + return s.RespMetadata.RequestID +} + +// The parameter name is not valid. +type ParameterPatternMismatchException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The parameter name is not valid. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ParameterPatternMismatchException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterPatternMismatchException) GoString() string { + return s.String() +} + +func newErrorParameterPatternMismatchException(v protocol.ResponseMetadata) error { + return &ParameterPatternMismatchException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ParameterPatternMismatchException) Code() string { + return "ParameterPatternMismatchException" +} + +// Message returns the exception's message. +func (s *ParameterPatternMismatchException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ParameterPatternMismatchException) OrigErr() error { + return nil +} + +func (s *ParameterPatternMismatchException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ParameterPatternMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ParameterPatternMismatchException) RequestID() string { + return s.RespMetadata.RequestID +} + +// One or more filters. Use a filter to return a more specific list of results. +type ParameterStringFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath + // API actions. However, not all of the pattern values listed for Key can be + // used with both actions. + // + // For DescribeActions, all of the listed patterns are valid, with the exception + // of Label. + // + // For GetParametersByPath, the following patterns listed for Key are not valid: + // tag, Name, Path, and Tier. + // + // For examples of CLI commands demonstrating valid parameter filter constructions, + // see Searching for Systems Manager parameters (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-search.html) + // in the AWS Systems Manager User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // For all filters used with DescribeParameters, valid options include Equals + // and BeginsWith. The Name filter additionally supports the Contains option. + // (Exception: For filters using the key Path, valid options include Recursive + // and OneLevel.) + // + // For filters used with GetParametersByPath, valid options include Equals and + // BeginsWith. (Exception: For filters using Label as the Key name, the only + // valid option is Equals.) + Option *string `min:"1" type:"string"` + + // The value you want to search for. + Values []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s ParameterStringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterStringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParameterStringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParameterStringFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Option != nil && len(*s.Option) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Option", 1)) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ParameterStringFilter) SetKey(v string) *ParameterStringFilter { + s.Key = &v + return s +} + +// SetOption sets the Option field's value. +func (s *ParameterStringFilter) SetOption(v string) *ParameterStringFilter { + s.Option = &v + return s +} + +// SetValues sets the Values field's value. +func (s *ParameterStringFilter) SetValues(v []*string) *ParameterStringFilter { + s.Values = v + return s +} + +// A parameter version can have a maximum of ten labels. +type ParameterVersionLabelLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ParameterVersionLabelLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterVersionLabelLimitExceeded) GoString() string { + return s.String() +} + +func newErrorParameterVersionLabelLimitExceeded(v protocol.ResponseMetadata) error { + return &ParameterVersionLabelLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ParameterVersionLabelLimitExceeded) Code() string { + return "ParameterVersionLabelLimitExceeded" +} + +// Message returns the exception's message. +func (s *ParameterVersionLabelLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ParameterVersionLabelLimitExceeded) OrigErr() error { + return nil +} + +func (s *ParameterVersionLabelLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ParameterVersionLabelLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ParameterVersionLabelLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified parameter version was not found. Verify the parameter name +// and version, and try again. +type ParameterVersionNotFound struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ParameterVersionNotFound) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterVersionNotFound) GoString() string { + return s.String() +} + +func newErrorParameterVersionNotFound(v protocol.ResponseMetadata) error { + return &ParameterVersionNotFound{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ParameterVersionNotFound) Code() string { + return "ParameterVersionNotFound" +} + +// Message returns the exception's message. +func (s *ParameterVersionNotFound) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ParameterVersionNotFound) OrigErr() error { + return nil +} + +func (s *ParameterVersionNotFound) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ParameterVersionNotFound) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ParameterVersionNotFound) RequestID() string { + return s.RespMetadata.RequestID +} + +// This data type is deprecated. Instead, use ParameterStringFilter. +type ParametersFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `type:"string" required:"true" enum:"ParametersFilterKey"` + + // The filter values. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s ParametersFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParametersFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParametersFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParametersFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ParametersFilter) SetKey(v string) *ParametersFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *ParametersFilter) SetValues(v []*string) *ParametersFilter { + s.Values = v + return s +} + +// Represents metadata about a patch. +type Patch struct { + _ struct{} `type:"structure"` + + // The Advisory ID of the patch. For example, RHSA-2020:3779. Applies to Linux-based + // instances only. + AdvisoryIds []*string `type:"list"` + + // The architecture of the patch. For example, in example-pkg-0.710.10-2.7.abcd.x86_64, + // the architecture is indicated by x86_64. Applies to Linux-based instances + // only. + Arch *string `type:"string"` + + // The Bugzilla ID of the patch. For example, 1600646. Applies to Linux-based + // instances only. + BugzillaIds []*string `type:"list"` + + // The Common Vulnerabilities and Exposures (CVE) ID of the patch. For example, + // CVE-1999-0067. Applies to Linux-based instances only. + CVEIds []*string `type:"list"` + + // The classification of the patch. For example, SecurityUpdates, Updates, or + // CriticalUpdates. + Classification *string `type:"string"` + + // The URL where more information can be obtained about the patch. + ContentUrl *string `type:"string"` + + // The description of the patch. + Description *string `type:"string"` + + // The epoch of the patch. For example in pkg-example-EE-20180914-2.2.amzn1.noarch, + // the epoch value is 20180914-2. Applies to Linux-based instances only. + Epoch *int64 `type:"integer"` + + // The ID of the patch. Applies to Windows patches only. + // + // This ID is not the same as the Microsoft Knowledge Base ID. + Id *string `min:"1" type:"string"` + + // The Microsoft Knowledge Base ID of the patch. Applies to Windows patches + // only. + KbNumber *string `type:"string"` + + // The language of the patch if it's language-specific. + Language *string `type:"string"` + + // The ID of the Microsoft Security Response Center (MSRC) bulletin the patch + // is related to. For example, MS14-045. Applies to Windows patches only. + MsrcNumber *string `type:"string"` + + // The severity of the patch, such as Critical, Important, or Moderate. Applies + // to Windows patches only. + MsrcSeverity *string `type:"string"` + + // The name of the patch. Applies to Linux-based instances only. + Name *string `type:"string"` + + // The specific product the patch is applicable for. For example, WindowsServer2016 + // or AmazonLinux2018.03. + Product *string `type:"string"` + + // The product family the patch is applicable for. For example, Windows or Amazon + // Linux 2. + ProductFamily *string `type:"string"` + + // The particular release of a patch. For example, in pkg-example-EE-20180914-2.2.amzn1.noarch, + // the release is 2.amaz1. Applies to Linux-based instances only. + Release *string `type:"string"` + + // The date the patch was released. + ReleaseDate *time.Time `type:"timestamp"` + + // The source patch repository for the operating system and version, such as + // trusty-security for Ubuntu Server 14.04 LTE and focal-security for Ubuntu + // Server 20.04 LTE. Applies to Linux-based instances only. + Repository *string `type:"string"` + + // The severity level of the patch. For example, CRITICAL or MODERATE. + Severity *string `type:"string"` + + // The title of the patch. + Title *string `type:"string"` + + // The name of the vendor providing the patch. + Vendor *string `type:"string"` + + // The version number of the patch. For example, in example-pkg-1.710.10-2.7.abcd.x86_64, + // the version number is indicated by -1. Applies to Linux-based instances only. + Version *string `type:"string"` +} + +// String returns the string representation +func (s Patch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Patch) GoString() string { + return s.String() +} + +// SetAdvisoryIds sets the AdvisoryIds field's value. +func (s *Patch) SetAdvisoryIds(v []*string) *Patch { + s.AdvisoryIds = v + return s +} + +// SetArch sets the Arch field's value. +func (s *Patch) SetArch(v string) *Patch { + s.Arch = &v + return s +} + +// SetBugzillaIds sets the BugzillaIds field's value. +func (s *Patch) SetBugzillaIds(v []*string) *Patch { + s.BugzillaIds = v + return s +} + +// SetCVEIds sets the CVEIds field's value. +func (s *Patch) SetCVEIds(v []*string) *Patch { + s.CVEIds = v + return s +} + +// SetClassification sets the Classification field's value. +func (s *Patch) SetClassification(v string) *Patch { + s.Classification = &v + return s +} + +// SetContentUrl sets the ContentUrl field's value. +func (s *Patch) SetContentUrl(v string) *Patch { + s.ContentUrl = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Patch) SetDescription(v string) *Patch { + s.Description = &v + return s +} + +// SetEpoch sets the Epoch field's value. +func (s *Patch) SetEpoch(v int64) *Patch { + s.Epoch = &v + return s +} + +// SetId sets the Id field's value. +func (s *Patch) SetId(v string) *Patch { + s.Id = &v + return s +} + +// SetKbNumber sets the KbNumber field's value. +func (s *Patch) SetKbNumber(v string) *Patch { + s.KbNumber = &v + return s +} + +// SetLanguage sets the Language field's value. +func (s *Patch) SetLanguage(v string) *Patch { + s.Language = &v + return s +} + +// SetMsrcNumber sets the MsrcNumber field's value. +func (s *Patch) SetMsrcNumber(v string) *Patch { + s.MsrcNumber = &v + return s +} + +// SetMsrcSeverity sets the MsrcSeverity field's value. +func (s *Patch) SetMsrcSeverity(v string) *Patch { + s.MsrcSeverity = &v + return s +} + +// SetName sets the Name field's value. +func (s *Patch) SetName(v string) *Patch { + s.Name = &v + return s +} + +// SetProduct sets the Product field's value. +func (s *Patch) SetProduct(v string) *Patch { + s.Product = &v + return s +} + +// SetProductFamily sets the ProductFamily field's value. +func (s *Patch) SetProductFamily(v string) *Patch { + s.ProductFamily = &v + return s +} + +// SetRelease sets the Release field's value. +func (s *Patch) SetRelease(v string) *Patch { + s.Release = &v + return s +} + +// SetReleaseDate sets the ReleaseDate field's value. +func (s *Patch) SetReleaseDate(v time.Time) *Patch { + s.ReleaseDate = &v + return s +} + +// SetRepository sets the Repository field's value. +func (s *Patch) SetRepository(v string) *Patch { + s.Repository = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *Patch) SetSeverity(v string) *Patch { + s.Severity = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *Patch) SetTitle(v string) *Patch { + s.Title = &v + return s +} + +// SetVendor sets the Vendor field's value. +func (s *Patch) SetVendor(v string) *Patch { + s.Vendor = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *Patch) SetVersion(v string) *Patch { + s.Version = &v + return s +} + +// Defines the basic information about a patch baseline. +type PatchBaselineIdentity struct { + _ struct{} `type:"structure"` + + // The description of the patch baseline. + BaselineDescription *string `min:"1" type:"string"` + + // The ID of the patch baseline. + BaselineId *string `min:"20" type:"string"` + + // The name of the patch baseline. + BaselineName *string `min:"3" type:"string"` + + // Whether this is the default baseline. Note that Systems Manager supports + // creating multiple default patch baselines. For example, you can create a + // default patch baseline for each operating system. + DefaultBaseline *bool `type:"boolean"` + + // Defines the operating system the patch baseline applies to. The Default value + // is WINDOWS. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` +} + +// String returns the string representation +func (s PatchBaselineIdentity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchBaselineIdentity) GoString() string { + return s.String() +} + +// SetBaselineDescription sets the BaselineDescription field's value. +func (s *PatchBaselineIdentity) SetBaselineDescription(v string) *PatchBaselineIdentity { + s.BaselineDescription = &v + return s +} + +// SetBaselineId sets the BaselineId field's value. +func (s *PatchBaselineIdentity) SetBaselineId(v string) *PatchBaselineIdentity { + s.BaselineId = &v + return s +} + +// SetBaselineName sets the BaselineName field's value. +func (s *PatchBaselineIdentity) SetBaselineName(v string) *PatchBaselineIdentity { + s.BaselineName = &v + return s +} + +// SetDefaultBaseline sets the DefaultBaseline field's value. +func (s *PatchBaselineIdentity) SetDefaultBaseline(v bool) *PatchBaselineIdentity { + s.DefaultBaseline = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *PatchBaselineIdentity) SetOperatingSystem(v string) *PatchBaselineIdentity { + s.OperatingSystem = &v + return s +} + +// Information about the state of a patch on a particular instance as it relates +// to the patch baseline used to patch the instance. +type PatchComplianceData struct { + _ struct{} `type:"structure"` + + // The IDs of one or more Common Vulnerabilities and Exposure (CVE) issues that + // are resolved by the patch. + CVEIds *string `type:"string"` + + // The classification of the patch (for example, SecurityUpdates, Updates, CriticalUpdates). + // + // Classification is a required field + Classification *string `type:"string" required:"true"` + + // The date/time the patch was installed on the instance. Note that not all + // operating systems provide this level of information. + // + // InstalledTime is a required field + InstalledTime *time.Time `type:"timestamp" required:"true"` + + // The operating system-specific ID of the patch. + // + // KBId is a required field + KBId *string `type:"string" required:"true"` + + // The severity of the patch (for example, Critical, Important, Moderate). + // + // Severity is a required field + Severity *string `type:"string" required:"true"` + + // The state of the patch on the instance, such as INSTALLED or FAILED. + // + // For descriptions of each patch state, see About patch compliance (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-compliance-about.html#sysman-compliance-monitor-patch) + // in the AWS Systems Manager User Guide. + // + // State is a required field + State *string `type:"string" required:"true" enum:"PatchComplianceDataState"` + + // The title of the patch. + // + // Title is a required field + Title *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PatchComplianceData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchComplianceData) GoString() string { + return s.String() +} + +// SetCVEIds sets the CVEIds field's value. +func (s *PatchComplianceData) SetCVEIds(v string) *PatchComplianceData { + s.CVEIds = &v + return s +} + +// SetClassification sets the Classification field's value. +func (s *PatchComplianceData) SetClassification(v string) *PatchComplianceData { + s.Classification = &v + return s +} + +// SetInstalledTime sets the InstalledTime field's value. +func (s *PatchComplianceData) SetInstalledTime(v time.Time) *PatchComplianceData { + s.InstalledTime = &v + return s +} + +// SetKBId sets the KBId field's value. +func (s *PatchComplianceData) SetKBId(v string) *PatchComplianceData { + s.KBId = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *PatchComplianceData) SetSeverity(v string) *PatchComplianceData { + s.Severity = &v + return s +} + +// SetState sets the State field's value. +func (s *PatchComplianceData) SetState(v string) *PatchComplianceData { + s.State = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *PatchComplianceData) SetTitle(v string) *PatchComplianceData { + s.Title = &v + return s +} + +// Defines which patches should be included in a patch baseline. +// +// A patch filter consists of a key and a set of values. The filter key is a +// patch property. For example, the available filter keys for WINDOWS are PATCH_SET, +// PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, and MSRC_SEVERITY. The filter values +// define a matching criterion for the patch property indicated by the key. +// For example, if the filter key is PRODUCT and the filter values are ["Office +// 2013", "Office 2016"], then the filter accepts all patches where product +// name is either "Office 2013" or "Office 2016". The filter values can be exact +// values for the patch property given as a key, or a wildcard (*), which matches +// all values. +// +// You can view lists of valid values for the patch properties by running the +// DescribePatchProperties command. For information about which patch properties +// can be used with each major operating system, see DescribePatchProperties. +type PatchFilter struct { + _ struct{} `type:"structure"` + + // The key for the filter. + // + // Run the DescribePatchProperties command to view lists of valid keys for each + // operating system type. + // + // Key is a required field + Key *string `type:"string" required:"true" enum:"PatchFilterKey"` + + // The value for the filter key. + // + // Run the DescribePatchProperties command to view lists of valid values for + // each key based on operating system type. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PatchFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PatchFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PatchFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *PatchFilter) SetKey(v string) *PatchFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *PatchFilter) SetValues(v []*string) *PatchFilter { + s.Values = v + return s +} + +// A set of patch filters, typically used for approval rules. +type PatchFilterGroup struct { + _ struct{} `type:"structure"` + + // The set of patch filters that make up the group. + // + // PatchFilters is a required field + PatchFilters []*PatchFilter `type:"list" required:"true"` +} + +// String returns the string representation +func (s PatchFilterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchFilterGroup) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PatchFilterGroup) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PatchFilterGroup"} + if s.PatchFilters == nil { + invalidParams.Add(request.NewErrParamRequired("PatchFilters")) + } + if s.PatchFilters != nil { + for i, v := range s.PatchFilters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PatchFilters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPatchFilters sets the PatchFilters field's value. +func (s *PatchFilterGroup) SetPatchFilters(v []*PatchFilter) *PatchFilterGroup { + s.PatchFilters = v + return s +} + +// The mapping between a patch group and the patch baseline the patch group +// is registered with. +type PatchGroupPatchBaselineMapping struct { + _ struct{} `type:"structure"` + + // The patch baseline the patch group is registered with. + BaselineIdentity *PatchBaselineIdentity `type:"structure"` + + // The name of the patch group registered with the patch baseline. + PatchGroup *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PatchGroupPatchBaselineMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchGroupPatchBaselineMapping) GoString() string { + return s.String() +} + +// SetBaselineIdentity sets the BaselineIdentity field's value. +func (s *PatchGroupPatchBaselineMapping) SetBaselineIdentity(v *PatchBaselineIdentity) *PatchGroupPatchBaselineMapping { + s.BaselineIdentity = v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *PatchGroupPatchBaselineMapping) SetPatchGroup(v string) *PatchGroupPatchBaselineMapping { + s.PatchGroup = &v + return s +} + +// Defines a filter used in Patch Manager APIs. +type PatchOrchestratorFilter struct { + _ struct{} `type:"structure"` + + // The key for the filter. + Key *string `min:"1" type:"string"` + + // The value for the filter. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s PatchOrchestratorFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchOrchestratorFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PatchOrchestratorFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PatchOrchestratorFilter"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *PatchOrchestratorFilter) SetKey(v string) *PatchOrchestratorFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *PatchOrchestratorFilter) SetValues(v []*string) *PatchOrchestratorFilter { + s.Values = v + return s +} + +// Defines an approval rule for a patch baseline. +type PatchRule struct { + _ struct{} `type:"structure"` + + // The number of days after the release date of each patch matched by the rule + // that the patch is marked as approved in the patch baseline. For example, + // a value of 7 means that patches are approved seven days after they are released. + // Not supported on Ubuntu Server. + ApproveAfterDays *int64 `type:"integer"` + + // The cutoff date for auto approval of released patches. Any patches released + // on or before this date are installed automatically. Not supported on Ubuntu + // Server. + // + // Enter dates in the format YYYY-MM-DD. For example, 2020-12-31. + ApproveUntilDate *string `min:"1" type:"string"` + + // A compliance severity level for all approved patches in a patch baseline. + ComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + + // For instances identified by the approval rule filters, enables a patch baseline + // to apply non-security updates available in the specified repository. The + // default value is 'false'. Applies to Linux instances only. + EnableNonSecurity *bool `type:"boolean"` + + // The patch filter group that defines the criteria for the rule. + // + // PatchFilterGroup is a required field + PatchFilterGroup *PatchFilterGroup `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PatchRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PatchRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PatchRule"} + if s.ApproveUntilDate != nil && len(*s.ApproveUntilDate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApproveUntilDate", 1)) + } + if s.PatchFilterGroup == nil { + invalidParams.Add(request.NewErrParamRequired("PatchFilterGroup")) + } + if s.PatchFilterGroup != nil { + if err := s.PatchFilterGroup.Validate(); err != nil { + invalidParams.AddNested("PatchFilterGroup", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApproveAfterDays sets the ApproveAfterDays field's value. +func (s *PatchRule) SetApproveAfterDays(v int64) *PatchRule { + s.ApproveAfterDays = &v + return s +} + +// SetApproveUntilDate sets the ApproveUntilDate field's value. +func (s *PatchRule) SetApproveUntilDate(v string) *PatchRule { + s.ApproveUntilDate = &v + return s +} + +// SetComplianceLevel sets the ComplianceLevel field's value. +func (s *PatchRule) SetComplianceLevel(v string) *PatchRule { + s.ComplianceLevel = &v + return s +} + +// SetEnableNonSecurity sets the EnableNonSecurity field's value. +func (s *PatchRule) SetEnableNonSecurity(v bool) *PatchRule { + s.EnableNonSecurity = &v + return s +} + +// SetPatchFilterGroup sets the PatchFilterGroup field's value. +func (s *PatchRule) SetPatchFilterGroup(v *PatchFilterGroup) *PatchRule { + s.PatchFilterGroup = v + return s +} + +// A set of rules defining the approval rules for a patch baseline. +type PatchRuleGroup struct { + _ struct{} `type:"structure"` + + // The rules that make up the rule group. + // + // PatchRules is a required field + PatchRules []*PatchRule `type:"list" required:"true"` +} + +// String returns the string representation +func (s PatchRuleGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchRuleGroup) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PatchRuleGroup) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PatchRuleGroup"} + if s.PatchRules == nil { + invalidParams.Add(request.NewErrParamRequired("PatchRules")) + } + if s.PatchRules != nil { + for i, v := range s.PatchRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PatchRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPatchRules sets the PatchRules field's value. +func (s *PatchRuleGroup) SetPatchRules(v []*PatchRule) *PatchRuleGroup { + s.PatchRules = v + return s +} + +// Information about the patches to use to update the instances, including target +// operating systems and source repository. Applies to Linux instances only. +type PatchSource struct { + _ struct{} `type:"structure"` + + // The value of the yum repo configuration. For example: + // + // [main] + // + // cachedir=/var/cache/yum/$basesearch$releasever + // + // keepcache=0 + // + // debuglevel=2 + // + // Configuration is a required field + Configuration *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The name specified to identify the patch source. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The specific operating system versions a patch repository applies to, such + // as "Ubuntu16.04", "AmazonLinux2016.09", "RedhatEnterpriseLinux7.2" or "Suse12.7". + // For lists of supported product values, see PatchFilter. + // + // Products is a required field + Products []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PatchSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PatchSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PatchSource"} + if s.Configuration == nil { + invalidParams.Add(request.NewErrParamRequired("Configuration")) + } + if s.Configuration != nil && len(*s.Configuration) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Configuration", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Products == nil { + invalidParams.Add(request.NewErrParamRequired("Products")) + } + if s.Products != nil && len(s.Products) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Products", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfiguration sets the Configuration field's value. +func (s *PatchSource) SetConfiguration(v string) *PatchSource { + s.Configuration = &v + return s +} + +// SetName sets the Name field's value. +func (s *PatchSource) SetName(v string) *PatchSource { + s.Name = &v + return s +} + +// SetProducts sets the Products field's value. +func (s *PatchSource) SetProducts(v []*string) *PatchSource { + s.Products = v + return s +} + +// Information about the approval status of a patch. +type PatchStatus struct { + _ struct{} `type:"structure"` + + // The date the patch was approved (or will be approved if the status is PENDING_APPROVAL). + ApprovalDate *time.Time `type:"timestamp"` + + // The compliance severity level for a patch. + ComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + + // The approval status of a patch (APPROVED, PENDING_APPROVAL, EXPLICIT_APPROVED, + // EXPLICIT_REJECTED). + DeploymentStatus *string `type:"string" enum:"PatchDeploymentStatus"` +} + +// String returns the string representation +func (s PatchStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchStatus) GoString() string { + return s.String() +} + +// SetApprovalDate sets the ApprovalDate field's value. +func (s *PatchStatus) SetApprovalDate(v time.Time) *PatchStatus { + s.ApprovalDate = &v + return s +} + +// SetComplianceLevel sets the ComplianceLevel field's value. +func (s *PatchStatus) SetComplianceLevel(v string) *PatchStatus { + s.ComplianceLevel = &v + return s +} + +// SetDeploymentStatus sets the DeploymentStatus field's value. +func (s *PatchStatus) SetDeploymentStatus(v string) *PatchStatus { + s.DeploymentStatus = &v + return s +} + +// You specified more than the maximum number of allowed policies for the parameter. +// The maximum is 10. +type PoliciesLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s PoliciesLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PoliciesLimitExceededException) GoString() string { + return s.String() +} + +func newErrorPoliciesLimitExceededException(v protocol.ResponseMetadata) error { + return &PoliciesLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PoliciesLimitExceededException) Code() string { + return "PoliciesLimitExceededException" +} + +// Message returns the exception's message. +func (s *PoliciesLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PoliciesLimitExceededException) OrigErr() error { + return nil +} + +func (s *PoliciesLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PoliciesLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PoliciesLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An aggregate of step execution statuses displayed in the AWS Console for +// a multi-Region and multi-account Automation execution. +type ProgressCounters struct { + _ struct{} `type:"structure"` + + // The total number of steps that the system cancelled in all specified AWS + // Regions and accounts for the current Automation execution. + CancelledSteps *int64 `type:"integer"` + + // The total number of steps that failed to run in all specified AWS Regions + // and accounts for the current Automation execution. + FailedSteps *int64 `type:"integer"` + + // The total number of steps that successfully completed in all specified AWS + // Regions and accounts for the current Automation execution. + SuccessSteps *int64 `type:"integer"` + + // The total number of steps that timed out in all specified AWS Regions and + // accounts for the current Automation execution. + TimedOutSteps *int64 `type:"integer"` + + // The total number of steps run in all specified AWS Regions and accounts for + // the current Automation execution. + TotalSteps *int64 `type:"integer"` +} + +// String returns the string representation +func (s ProgressCounters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProgressCounters) GoString() string { + return s.String() +} + +// SetCancelledSteps sets the CancelledSteps field's value. +func (s *ProgressCounters) SetCancelledSteps(v int64) *ProgressCounters { + s.CancelledSteps = &v + return s +} + +// SetFailedSteps sets the FailedSteps field's value. +func (s *ProgressCounters) SetFailedSteps(v int64) *ProgressCounters { + s.FailedSteps = &v + return s +} + +// SetSuccessSteps sets the SuccessSteps field's value. +func (s *ProgressCounters) SetSuccessSteps(v int64) *ProgressCounters { + s.SuccessSteps = &v + return s +} + +// SetTimedOutSteps sets the TimedOutSteps field's value. +func (s *ProgressCounters) SetTimedOutSteps(v int64) *ProgressCounters { + s.TimedOutSteps = &v + return s +} + +// SetTotalSteps sets the TotalSteps field's value. +func (s *ProgressCounters) SetTotalSteps(v int64) *ProgressCounters { + s.TotalSteps = &v + return s +} + +type PutComplianceItemsInput struct { + _ struct{} `type:"structure"` + + // Specify the compliance type. For example, specify Association (for a State + // Manager association), Patch, or Custom:string. + // + // ComplianceType is a required field + ComplianceType *string `min:"1" type:"string" required:"true"` + + // A summary of the call execution that includes an execution ID, the type of + // execution (for example, Command), and the date/time of the execution using + // a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'. + // + // ExecutionSummary is a required field + ExecutionSummary *ComplianceExecutionSummary `type:"structure" required:"true"` + + // MD5 or SHA-256 content hash. The content hash is used to determine if existing + // information should be overwritten or ignored. If the content hashes match, + // the request to put compliance information is ignored. + ItemContentHash *string `type:"string"` + + // Information about the compliance as defined by the resource type. For example, + // for a patch compliance type, Items includes information about the PatchSeverity, + // Classification, and so on. + // + // Items is a required field + Items []*ComplianceItemEntry `type:"list" required:"true"` + + // Specify an ID for this resource. For a managed instance, this is the instance + // ID. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // Specify the type of resource. ManagedInstance is currently the only supported + // resource type. + // + // ResourceType is a required field + ResourceType *string `min:"1" type:"string" required:"true"` + + // The mode for uploading compliance items. You can specify COMPLETE or PARTIAL. + // In COMPLETE mode, the system overwrites all existing compliance information + // for the resource. You must provide a full list of compliance items each time + // you send the request. + // + // In PARTIAL mode, the system overwrites compliance information for a specific + // association. The association must be configured with SyncCompliance set to + // MANUAL. By default, all requests use COMPLETE mode. + // + // This attribute is only valid for association compliance. + UploadType *string `type:"string" enum:"ComplianceUploadType"` +} + +// String returns the string representation +func (s PutComplianceItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutComplianceItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutComplianceItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutComplianceItemsInput"} + if s.ComplianceType == nil { + invalidParams.Add(request.NewErrParamRequired("ComplianceType")) + } + if s.ComplianceType != nil && len(*s.ComplianceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ComplianceType", 1)) + } + if s.ExecutionSummary == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionSummary")) + } + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) + } + if s.ExecutionSummary != nil { + if err := s.ExecutionSummary.Validate(); err != nil { + invalidParams.AddNested("ExecutionSummary", err.(request.ErrInvalidParams)) + } + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComplianceType sets the ComplianceType field's value. +func (s *PutComplianceItemsInput) SetComplianceType(v string) *PutComplianceItemsInput { + s.ComplianceType = &v + return s +} + +// SetExecutionSummary sets the ExecutionSummary field's value. +func (s *PutComplianceItemsInput) SetExecutionSummary(v *ComplianceExecutionSummary) *PutComplianceItemsInput { + s.ExecutionSummary = v + return s +} + +// SetItemContentHash sets the ItemContentHash field's value. +func (s *PutComplianceItemsInput) SetItemContentHash(v string) *PutComplianceItemsInput { + s.ItemContentHash = &v + return s +} + +// SetItems sets the Items field's value. +func (s *PutComplianceItemsInput) SetItems(v []*ComplianceItemEntry) *PutComplianceItemsInput { + s.Items = v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *PutComplianceItemsInput) SetResourceId(v string) *PutComplianceItemsInput { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *PutComplianceItemsInput) SetResourceType(v string) *PutComplianceItemsInput { + s.ResourceType = &v + return s +} + +// SetUploadType sets the UploadType field's value. +func (s *PutComplianceItemsInput) SetUploadType(v string) *PutComplianceItemsInput { + s.UploadType = &v + return s +} + +type PutComplianceItemsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutComplianceItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutComplianceItemsOutput) GoString() string { + return s.String() +} + +type PutInventoryInput struct { + _ struct{} `type:"structure"` + + // An instance ID where you want to add or update inventory items. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The inventory items that you want to add or update on instances. + // + // Items is a required field + Items []*InventoryItem `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PutInventoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutInventoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutInventoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutInventoryInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Items != nil && len(s.Items) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Items", 1)) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceId sets the InstanceId field's value. +func (s *PutInventoryInput) SetInstanceId(v string) *PutInventoryInput { + s.InstanceId = &v + return s +} + +// SetItems sets the Items field's value. +func (s *PutInventoryInput) SetItems(v []*InventoryItem) *PutInventoryInput { + s.Items = v + return s +} + +type PutInventoryOutput struct { + _ struct{} `type:"structure"` + + // Information about the request. + Message *string `type:"string"` +} + +// String returns the string representation +func (s PutInventoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutInventoryOutput) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *PutInventoryOutput) SetMessage(v string) *PutInventoryOutput { + s.Message = &v + return s +} + +type PutParameterInput struct { + _ struct{} `type:"structure"` + + // A regular expression used to validate the parameter value. For example, for + // String types with values restricted to numbers, you can specify the following: + // AllowedPattern=^\d+$ + AllowedPattern *string `type:"string"` + + // The data type for a String parameter. Supported data types include plain + // text and Amazon Machine Image IDs. + // + // The following data type values are supported. + // + // * text + // + // * aws:ec2:image + // + // When you create a String parameter and specify aws:ec2:image, Systems Manager + // validates the parameter value is in the required format, such as ami-12345abcdeEXAMPLE, + // and that the specified AMI is available in your AWS account. For more information, + // see Native parameter support for Amazon Machine Image IDs (http://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-ec2-aliases.html) + // in the AWS Systems Manager User Guide. + DataType *string `type:"string"` + + // Information about the parameter that you want to add to the system. Optional + // but recommended. + // + // Do not enter personally identifiable information in this field. + Description *string `type:"string"` + + // The KMS Key ID that you want to use to encrypt a parameter. Either the default + // AWS Key Management Service (AWS KMS) key automatically assigned to your AWS + // account or a custom key. Required for parameters that use the SecureString + // data type. + // + // If you don't specify a key ID, the system uses the default key associated + // with your AWS account. + // + // * To use your default AWS KMS key, choose the SecureString data type, + // and do not specify the Key ID when you create the parameter. The system + // automatically populates Key ID with your default KMS key. + // + // * To use a custom KMS key, choose the SecureString data type with the + // Key ID parameter. + KeyId *string `min:"1" type:"string"` + + // The fully qualified name of the parameter that you want to add to the system. + // The fully qualified name includes the complete hierarchy of the parameter + // path and name. For parameters in a hierarchy, you must include a leading + // forward slash character (/) when you create or reference a parameter. For + // example: /Dev/DBServer/MySQL/db-string13 + // + // Naming Constraints: + // + // * Parameter names are case sensitive. + // + // * A parameter name must be unique within an AWS Region + // + // * A parameter name can't be prefixed with "aws" or "ssm" (case-insensitive). + // + // * Parameter names can include only the following symbols and letters: + // a-zA-Z0-9_.-/ + // + // * A parameter name can't include spaces. + // + // * Parameter hierarchies are limited to a maximum depth of fifteen levels. + // + // For additional information about valid values for parameter names, see About + // requirements and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) + // in the AWS Systems Manager User Guide. + // + // The maximum length constraint listed below includes capacity for additional + // system attributes that are not part of the name. The maximum length for a + // parameter name, including the full length of the parameter ARN, is 1011 characters. + // For example, the length of the following parameter name is 65 characters, + // not 20 characters: + // + // arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Overwrite an existing parameter. If not specified, will default to "false". + Overwrite *bool `type:"boolean"` + + // One or more policies to apply to a parameter. This action takes a JSON array. + // Parameter Store supports the following policy types: + // + // Expiration: This policy deletes the parameter after it expires. When you + // create the policy, you specify the expiration date. You can update the expiration + // date and time by updating the policy. Updating the parameter does not affect + // the expiration date and time. When the expiration time is reached, Parameter + // Store deletes the parameter. + // + // ExpirationNotification: This policy triggers an event in Amazon CloudWatch + // Events that notifies you about the expiration. By using this policy, you + // can receive notification before or after the expiration time is reached, + // in units of days or hours. + // + // NoChangeNotification: This policy triggers a CloudWatch event if a parameter + // has not been modified for a specified period of time. This policy type is + // useful when, for example, a secret needs to be changed within a period of + // time, but it has not been changed. + // + // All existing policies are preserved until you send new policies or an empty + // policy. For more information about parameter policies, see Assigning parameter + // policies (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html). + Policies *string `min:"1" type:"string"` + + // Optional metadata that you assign to a resource. Tags enable you to categorize + // a resource in different ways, such as by purpose, owner, or environment. + // For example, you might want to tag a Systems Manager parameter to identify + // the type of resource to which it applies, the environment, or the type of + // configuration data referenced by the parameter. In this case, you could specify + // the following key name/value pairs: + // + // * Key=Resource,Value=S3bucket + // + // * Key=OS,Value=Windows + // + // * Key=ParameterType,Value=LicenseKey + // + // To add tags to an existing Systems Manager parameter, use the AddTagsToResource + // action. + Tags []*Tag `type:"list"` + + // The parameter tier to assign to a parameter. + // + // Parameter Store offers a standard tier and an advanced tier for parameters. + // Standard parameters have a content size limit of 4 KB and can't be configured + // to use parameter policies. You can create a maximum of 10,000 standard parameters + // for each Region in an AWS account. Standard parameters are offered at no + // additional cost. + // + // Advanced parameters have a content size limit of 8 KB and can be configured + // to use parameter policies. You can create a maximum of 100,000 advanced parameters + // for each Region in an AWS account. Advanced parameters incur a charge. For + // more information, see Standard and advanced parameter tiers (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html) + // in the AWS Systems Manager User Guide. + // + // You can change a standard parameter to an advanced parameter any time. But + // you can't revert an advanced parameter to a standard parameter. Reverting + // an advanced parameter to a standard parameter would result in data loss because + // the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting + // would also remove any policies attached to the parameter. Lastly, advanced + // parameters use a different form of encryption than standard parameters. + // + // If you no longer need an advanced parameter, or if you no longer want to + // incur charges for an advanced parameter, you must delete it and recreate + // it as a new standard parameter. + // + // Using the Default Tier Configuration + // + // In PutParameter requests, you can specify the tier to create the parameter + // in. Whenever you specify a tier in the request, Parameter Store creates or + // updates the parameter according to that request. However, if you do not specify + // a tier in a request, Parameter Store assigns the tier based on the current + // Parameter Store default tier configuration. + // + // The default tier when you begin using Parameter Store is the standard-parameter + // tier. If you use the advanced-parameter tier, you can specify one of the + // following as the default: + // + // * Advanced: With this option, Parameter Store evaluates all requests as + // advanced parameters. + // + // * Intelligent-Tiering: With this option, Parameter Store evaluates each + // request to determine if the parameter is standard or advanced. If the + // request doesn't include any options that require an advanced parameter, + // the parameter is created in the standard-parameter tier. If one or more + // options requiring an advanced parameter are included in the request, Parameter + // Store create a parameter in the advanced-parameter tier. This approach + // helps control your parameter-related costs by always creating standard + // parameters unless an advanced parameter is necessary. + // + // Options that require an advanced parameter include the following: + // + // * The content size of the parameter is more than 4 KB. + // + // * The parameter uses a parameter policy. + // + // * More than 10,000 parameters already exist in your AWS account in the + // current Region. + // + // For more information about configuring the default tier option, see Specifying + // a default parameter tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/ps-default-tier.html) + // in the AWS Systems Manager User Guide. + Tier *string `type:"string" enum:"ParameterTier"` + + // The type of parameter that you want to add to the system. + // + // SecureString is not currently supported for AWS CloudFormation templates. + // + // Items in a StringList must be separated by a comma (,). You can't use other + // punctuation or special character to escape items in the list. If you have + // a parameter value that requires a comma, then use the String data type. + // + // Specifying a parameter type is not required when updating a parameter. You + // must specify a parameter type when creating a parameter. + Type *string `type:"string" enum:"ParameterType"` + + // The parameter value that you want to add to the system. Standard parameters + // have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB. + // + // Parameters can't be referenced or nested in the values of other parameters. + // You can't include {{}} or {{ssm:parameter-name}} in a parameter value. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutParameterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutParameterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutParameterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutParameterInput"} + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Policies != nil && len(*s.Policies) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policies", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedPattern sets the AllowedPattern field's value. +func (s *PutParameterInput) SetAllowedPattern(v string) *PutParameterInput { + s.AllowedPattern = &v + return s +} + +// SetDataType sets the DataType field's value. +func (s *PutParameterInput) SetDataType(v string) *PutParameterInput { + s.DataType = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *PutParameterInput) SetDescription(v string) *PutParameterInput { + s.Description = &v + return s +} + +// SetKeyId sets the KeyId field's value. +func (s *PutParameterInput) SetKeyId(v string) *PutParameterInput { + s.KeyId = &v + return s +} + +// SetName sets the Name field's value. +func (s *PutParameterInput) SetName(v string) *PutParameterInput { + s.Name = &v + return s +} + +// SetOverwrite sets the Overwrite field's value. +func (s *PutParameterInput) SetOverwrite(v bool) *PutParameterInput { + s.Overwrite = &v + return s +} + +// SetPolicies sets the Policies field's value. +func (s *PutParameterInput) SetPolicies(v string) *PutParameterInput { + s.Policies = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutParameterInput) SetTags(v []*Tag) *PutParameterInput { + s.Tags = v + return s +} + +// SetTier sets the Tier field's value. +func (s *PutParameterInput) SetTier(v string) *PutParameterInput { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *PutParameterInput) SetType(v string) *PutParameterInput { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *PutParameterInput) SetValue(v string) *PutParameterInput { + s.Value = &v + return s +} + +type PutParameterOutput struct { + _ struct{} `type:"structure"` + + // The tier assigned to the parameter. + Tier *string `type:"string" enum:"ParameterTier"` + + // The new version number of a parameter. If you edit a parameter value, Parameter + // Store automatically creates a new version and assigns this new version a + // unique ID. You can reference a parameter version ID in API actions or in + // Systems Manager documents (SSM documents). By default, if you don't specify + // a specific version, the system returns the latest parameter value when a + // parameter is called. + Version *int64 `type:"long"` +} + +// String returns the string representation +func (s PutParameterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutParameterOutput) GoString() string { + return s.String() +} + +// SetTier sets the Tier field's value. +func (s *PutParameterOutput) SetTier(v string) *PutParameterOutput { + s.Tier = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *PutParameterOutput) SetVersion(v int64) *PutParameterOutput { + s.Version = &v + return s +} + +type RegisterDefaultPatchBaselineInput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline that should be the default patch baseline. + // + // BaselineId is a required field + BaselineId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterDefaultPatchBaselineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDefaultPatchBaselineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterDefaultPatchBaselineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterDefaultPatchBaselineInput"} + if s.BaselineId == nil { + invalidParams.Add(request.NewErrParamRequired("BaselineId")) + } + if s.BaselineId != nil && len(*s.BaselineId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("BaselineId", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBaselineId sets the BaselineId field's value. +func (s *RegisterDefaultPatchBaselineInput) SetBaselineId(v string) *RegisterDefaultPatchBaselineInput { + s.BaselineId = &v + return s +} + +type RegisterDefaultPatchBaselineOutput struct { + _ struct{} `type:"structure"` + + // The ID of the default patch baseline. + BaselineId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s RegisterDefaultPatchBaselineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDefaultPatchBaselineOutput) GoString() string { + return s.String() +} + +// SetBaselineId sets the BaselineId field's value. +func (s *RegisterDefaultPatchBaselineOutput) SetBaselineId(v string) *RegisterDefaultPatchBaselineOutput { + s.BaselineId = &v + return s +} + +type RegisterPatchBaselineForPatchGroupInput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline to register the patch group with. + // + // BaselineId is a required field + BaselineId *string `min:"20" type:"string" required:"true"` + + // The name of the patch group that should be registered with the patch baseline. + // + // PatchGroup is a required field + PatchGroup *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterPatchBaselineForPatchGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterPatchBaselineForPatchGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterPatchBaselineForPatchGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterPatchBaselineForPatchGroupInput"} + if s.BaselineId == nil { + invalidParams.Add(request.NewErrParamRequired("BaselineId")) + } + if s.BaselineId != nil && len(*s.BaselineId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("BaselineId", 20)) + } + if s.PatchGroup == nil { + invalidParams.Add(request.NewErrParamRequired("PatchGroup")) + } + if s.PatchGroup != nil && len(*s.PatchGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PatchGroup", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBaselineId sets the BaselineId field's value. +func (s *RegisterPatchBaselineForPatchGroupInput) SetBaselineId(v string) *RegisterPatchBaselineForPatchGroupInput { + s.BaselineId = &v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *RegisterPatchBaselineForPatchGroupInput) SetPatchGroup(v string) *RegisterPatchBaselineForPatchGroupInput { + s.PatchGroup = &v + return s +} + +type RegisterPatchBaselineForPatchGroupOutput struct { + _ struct{} `type:"structure"` + + // The ID of the patch baseline the patch group was registered with. + BaselineId *string `min:"20" type:"string"` + + // The name of the patch group registered with the patch baseline. + PatchGroup *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RegisterPatchBaselineForPatchGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterPatchBaselineForPatchGroupOutput) GoString() string { + return s.String() +} + +// SetBaselineId sets the BaselineId field's value. +func (s *RegisterPatchBaselineForPatchGroupOutput) SetBaselineId(v string) *RegisterPatchBaselineForPatchGroupOutput { + s.BaselineId = &v + return s +} + +// SetPatchGroup sets the PatchGroup field's value. +func (s *RegisterPatchBaselineForPatchGroupOutput) SetPatchGroup(v string) *RegisterPatchBaselineForPatchGroupOutput { + s.PatchGroup = &v + return s +} + +type RegisterTargetWithMaintenanceWindowInput struct { + _ struct{} `type:"structure"` + + // User-provided idempotency token. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // An optional description for the target. + Description *string `min:"1" type:"string" sensitive:"true"` + + // An optional name for the target. + Name *string `min:"3" type:"string"` + + // User-provided value that will be included in any CloudWatch events raised + // while running tasks for these targets in this maintenance window. + OwnerInformation *string `min:"1" type:"string" sensitive:"true"` + + // The type of target being registered with the maintenance window. + // + // ResourceType is a required field + ResourceType *string `type:"string" required:"true" enum:"MaintenanceWindowResourceType"` + + // The targets to register with the maintenance window. In other words, the + // instances to run commands on when the maintenance window runs. + // + // You can specify targets using instance IDs, resource group names, or tags + // that have been applied to instances. + // + // Example 1: Specify instance IDs + // + // Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3 + // + // Example 2: Use tag key-pairs applied to instances + // + // Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2 + // + // Example 3: Use tag-keys applied to instances + // + // Key=tag-key,Values=my-tag-key-1,my-tag-key-2 + // + // Example 4: Use resource group names + // + // Key=resource-groups:Name,Values=resource-group-name + // + // Example 5: Use filters for resource group types + // + // Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 + // + // For Key=resource-groups:ResourceTypeFilters, specify resource types in the + // following format + // + // Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC + // + // For more information about these examples formats, including the best use + // case for each one, see Examples: Register targets with a maintenance window + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/mw-cli-tutorial-targets-examples.html) + // in the AWS Systems Manager User Guide. + // + // Targets is a required field + Targets []*Target `type:"list" required:"true"` + + // The ID of the maintenance window the target should be registered with. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterTargetWithMaintenanceWindowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTargetWithMaintenanceWindowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterTargetWithMaintenanceWindowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterTargetWithMaintenanceWindowInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.OwnerInformation != nil && len(*s.OwnerInformation) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OwnerInformation", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *RegisterTargetWithMaintenanceWindowInput) SetClientToken(v string) *RegisterTargetWithMaintenanceWindowInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RegisterTargetWithMaintenanceWindowInput) SetDescription(v string) *RegisterTargetWithMaintenanceWindowInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *RegisterTargetWithMaintenanceWindowInput) SetName(v string) *RegisterTargetWithMaintenanceWindowInput { + s.Name = &v + return s +} + +// SetOwnerInformation sets the OwnerInformation field's value. +func (s *RegisterTargetWithMaintenanceWindowInput) SetOwnerInformation(v string) *RegisterTargetWithMaintenanceWindowInput { + s.OwnerInformation = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *RegisterTargetWithMaintenanceWindowInput) SetResourceType(v string) *RegisterTargetWithMaintenanceWindowInput { + s.ResourceType = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *RegisterTargetWithMaintenanceWindowInput) SetTargets(v []*Target) *RegisterTargetWithMaintenanceWindowInput { + s.Targets = v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *RegisterTargetWithMaintenanceWindowInput) SetWindowId(v string) *RegisterTargetWithMaintenanceWindowInput { + s.WindowId = &v + return s +} + +type RegisterTargetWithMaintenanceWindowOutput struct { + _ struct{} `type:"structure"` + + // The ID of the target definition in this maintenance window. + WindowTargetId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s RegisterTargetWithMaintenanceWindowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTargetWithMaintenanceWindowOutput) GoString() string { + return s.String() +} + +// SetWindowTargetId sets the WindowTargetId field's value. +func (s *RegisterTargetWithMaintenanceWindowOutput) SetWindowTargetId(v string) *RegisterTargetWithMaintenanceWindowOutput { + s.WindowTargetId = &v + return s +} + +type RegisterTaskWithMaintenanceWindowInput struct { + _ struct{} `type:"structure"` + + // User-provided idempotency token. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // An optional description for the task. + Description *string `min:"1" type:"string" sensitive:"true"` + + // A structure containing information about an S3 bucket to write instance-level + // logs to. + // + // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, + // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + LoggingInfo *LoggingInfo `type:"structure"` + + // The maximum number of targets this task can be run for in parallel. + // + // MaxConcurrency is a required field + MaxConcurrency *string `min:"1" type:"string" required:"true"` + + // The maximum number of errors allowed before this task stops being scheduled. + // + // MaxErrors is a required field + MaxErrors *string `min:"1" type:"string" required:"true"` + + // An optional name for the task. + Name *string `min:"3" type:"string"` + + // The priority of the task in the maintenance window, the lower the number + // the higher the priority. Tasks in a maintenance window are scheduled in priority + // order with tasks that have the same priority scheduled in parallel. + Priority *int64 `type:"integer"` + + // The ARN of the IAM service role for Systems Manager to assume when running + // a maintenance window task. If you do not specify a service role ARN, Systems + // Manager uses your account's service-linked role. If no service-linked role + // for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. + // + // For more information, see the following topics in the in the AWS Systems + // Manager User Guide: + // + // * Using service-linked roles for Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) + // + // * Should I use a service-linked role or a custom service role to run maintenance + // window tasks? (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) + ServiceRoleArn *string `type:"string"` + + // The targets (either instances or maintenance window targets). + // + // Specify instances using the following format: + // + // Key=InstanceIds,Values=, + // + // Specify maintenance window targets using the following format: + // + // Key=WindowTargetIds;,Values=, + // + // Targets is a required field + Targets []*Target `type:"list" required:"true"` + + // The ARN of the task to run. + // + // TaskArn is a required field + TaskArn *string `min:"1" type:"string" required:"true"` + + // The parameters that the task should use during execution. Populate only the + // fields that match the task type. All other fields should be empty. + TaskInvocationParameters *MaintenanceWindowTaskInvocationParameters `type:"structure"` + + // The parameters that should be passed to the task when it is run. + // + // TaskParameters has been deprecated. To specify parameters to pass to a task + // when it runs, instead use the Parameters option in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` + + // The type of task being registered. + // + // TaskType is a required field + TaskType *string `type:"string" required:"true" enum:"MaintenanceWindowTaskType"` + + // The ID of the maintenance window the task should be added to. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterTaskWithMaintenanceWindowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskWithMaintenanceWindowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterTaskWithMaintenanceWindowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterTaskWithMaintenanceWindowInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.MaxConcurrency == nil { + invalidParams.Add(request.NewErrParamRequired("MaxConcurrency")) + } + if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1)) + } + if s.MaxErrors == nil { + invalidParams.Add(request.NewErrParamRequired("MaxErrors")) + } + if s.MaxErrors != nil && len(*s.MaxErrors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxErrors", 1)) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.TaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("TaskArn")) + } + if s.TaskArn != nil && len(*s.TaskArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskArn", 1)) + } + if s.TaskType == nil { + invalidParams.Add(request.NewErrParamRequired("TaskType")) + } + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.LoggingInfo != nil { + if err := s.LoggingInfo.Validate(); err != nil { + invalidParams.AddNested("LoggingInfo", err.(request.ErrInvalidParams)) + } + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TaskInvocationParameters != nil { + if err := s.TaskInvocationParameters.Validate(); err != nil { + invalidParams.AddNested("TaskInvocationParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetClientToken(v string) *RegisterTaskWithMaintenanceWindowInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetDescription(v string) *RegisterTaskWithMaintenanceWindowInput { + s.Description = &v + return s +} + +// SetLoggingInfo sets the LoggingInfo field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetLoggingInfo(v *LoggingInfo) *RegisterTaskWithMaintenanceWindowInput { + s.LoggingInfo = v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetMaxConcurrency(v string) *RegisterTaskWithMaintenanceWindowInput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetMaxErrors(v string) *RegisterTaskWithMaintenanceWindowInput { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetName(v string) *RegisterTaskWithMaintenanceWindowInput { + s.Name = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetPriority(v int64) *RegisterTaskWithMaintenanceWindowInput { + s.Priority = &v + return s +} + +// SetServiceRoleArn sets the ServiceRoleArn field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetServiceRoleArn(v string) *RegisterTaskWithMaintenanceWindowInput { + s.ServiceRoleArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetTargets(v []*Target) *RegisterTaskWithMaintenanceWindowInput { + s.Targets = v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetTaskArn(v string) *RegisterTaskWithMaintenanceWindowInput { + s.TaskArn = &v + return s +} + +// SetTaskInvocationParameters sets the TaskInvocationParameters field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetTaskInvocationParameters(v *MaintenanceWindowTaskInvocationParameters) *RegisterTaskWithMaintenanceWindowInput { + s.TaskInvocationParameters = v + return s +} + +// SetTaskParameters sets the TaskParameters field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetTaskParameters(v map[string]*MaintenanceWindowTaskParameterValueExpression) *RegisterTaskWithMaintenanceWindowInput { + s.TaskParameters = v + return s +} + +// SetTaskType sets the TaskType field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetTaskType(v string) *RegisterTaskWithMaintenanceWindowInput { + s.TaskType = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *RegisterTaskWithMaintenanceWindowInput) SetWindowId(v string) *RegisterTaskWithMaintenanceWindowInput { + s.WindowId = &v + return s +} + +type RegisterTaskWithMaintenanceWindowOutput struct { + _ struct{} `type:"structure"` + + // The ID of the task in the maintenance window. + WindowTaskId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s RegisterTaskWithMaintenanceWindowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTaskWithMaintenanceWindowOutput) GoString() string { + return s.String() +} + +// SetWindowTaskId sets the WindowTaskId field's value. +func (s *RegisterTaskWithMaintenanceWindowOutput) SetWindowTaskId(v string) *RegisterTaskWithMaintenanceWindowOutput { + s.WindowTaskId = &v + return s +} + +// An OpsItems that shares something in common with the current OpsItem. For +// example, related OpsItems can include OpsItems with similar error messages, +// impacted resources, or statuses for the impacted resource. +type RelatedOpsItem struct { + _ struct{} `type:"structure"` + + // The ID of an OpsItem related to the current OpsItem. + // + // OpsItemId is a required field + OpsItemId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RelatedOpsItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RelatedOpsItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RelatedOpsItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RelatedOpsItem"} + if s.OpsItemId == nil { + invalidParams.Add(request.NewErrParamRequired("OpsItemId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *RelatedOpsItem) SetOpsItemId(v string) *RelatedOpsItem { + s.OpsItemId = &v + return s +} + +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the resource from which you want to remove tags. For example: + // + // ManagedInstance: mi-012345abcde + // + // MaintenanceWindow: mw-012345abcde + // + // PatchBaseline: pb-012345abcde + // + // For the Document and Parameter values, use the name of the resource. + // + // The ManagedInstance type for this API action is only for on-premises managed + // instances. Specify the name of the managed instance in the following format: + // mi-ID_number. For example, mi-1a2b3c4d5e6f. + // + // ResourceId is a required field + ResourceId *string `type:"string" required:"true"` + + // The type of resource from which you want to remove a tag. + // + // The ManagedInstance type for this API action is only for on-premises managed + // instances. Specify the name of the managed instance in the following format: + // mi-ID_number. For example, mi-1a2b3c4d5e6f. + // + // ResourceType is a required field + ResourceType *string `type:"string" required:"true" enum:"ResourceTypeForTagging"` + + // Tag keys that you want to remove from the specified resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *RemoveTagsFromResourceInput) SetResourceId(v string) *RemoveTagsFromResourceInput { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *RemoveTagsFromResourceInput) SetResourceType(v string) *RemoveTagsFromResourceInput { + s.ResourceType = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *RemoveTagsFromResourceInput) SetTagKeys(v []*string) *RemoveTagsFromResourceInput { + s.TagKeys = v + return s +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + +// The request body of the ResetServiceSetting API action. +type ResetServiceSettingInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the service setting to reset. The setting + // ID can be /ssm/parameter-store/default-parameter-tier, /ssm/parameter-store/high-throughput-enabled, + // or /ssm/managed-instance/activation-tier. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled. + // + // SettingId is a required field + SettingId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetServiceSettingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetServiceSettingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetServiceSettingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetServiceSettingInput"} + if s.SettingId == nil { + invalidParams.Add(request.NewErrParamRequired("SettingId")) + } + if s.SettingId != nil && len(*s.SettingId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SettingId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSettingId sets the SettingId field's value. +func (s *ResetServiceSettingInput) SetSettingId(v string) *ResetServiceSettingInput { + s.SettingId = &v + return s +} + +// The result body of the ResetServiceSetting API action. +type ResetServiceSettingOutput struct { + _ struct{} `type:"structure"` + + // The current, effective service setting after calling the ResetServiceSetting + // API action. + ServiceSetting *ServiceSetting `type:"structure"` +} + +// String returns the string representation +func (s ResetServiceSettingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetServiceSettingOutput) GoString() string { + return s.String() +} + +// SetServiceSetting sets the ServiceSetting field's value. +func (s *ResetServiceSettingOutput) SetServiceSetting(v *ServiceSetting) *ResetServiceSettingOutput { + s.ServiceSetting = v + return s +} + +// Information about targets that resolved during the Automation execution. +type ResolvedTargets struct { + _ struct{} `type:"structure"` + + // A list of parameter values sent to targets that resolved during the Automation + // execution. + ParameterValues []*string `type:"list"` + + // A boolean value indicating whether the resolved target list is truncated. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ResolvedTargets) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolvedTargets) GoString() string { + return s.String() +} + +// SetParameterValues sets the ParameterValues field's value. +func (s *ResolvedTargets) SetParameterValues(v []*string) *ResolvedTargets { + s.ParameterValues = v + return s +} + +// SetTruncated sets the Truncated field's value. +func (s *ResolvedTargets) SetTruncated(v bool) *ResolvedTargets { + s.Truncated = &v + return s +} + +// Compliance summary information for a specific resource. +type ResourceComplianceSummaryItem struct { + _ struct{} `type:"structure"` + + // The compliance type. + ComplianceType *string `min:"1" type:"string"` + + // A list of items that are compliant for the resource. + CompliantSummary *CompliantSummary `type:"structure"` + + // Information about the execution. + ExecutionSummary *ComplianceExecutionSummary `type:"structure"` + + // A list of items that aren't compliant for the resource. + NonCompliantSummary *NonCompliantSummary `type:"structure"` + + // The highest severity item found for the resource. The resource is compliant + // for this item. + OverallSeverity *string `type:"string" enum:"ComplianceSeverity"` + + // The resource ID. + ResourceId *string `min:"1" type:"string"` + + // The resource type. + ResourceType *string `min:"1" type:"string"` + + // The compliance status for the resource. + Status *string `type:"string" enum:"ComplianceStatus"` +} + +// String returns the string representation +func (s ResourceComplianceSummaryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceComplianceSummaryItem) GoString() string { + return s.String() +} + +// SetComplianceType sets the ComplianceType field's value. +func (s *ResourceComplianceSummaryItem) SetComplianceType(v string) *ResourceComplianceSummaryItem { + s.ComplianceType = &v + return s +} + +// SetCompliantSummary sets the CompliantSummary field's value. +func (s *ResourceComplianceSummaryItem) SetCompliantSummary(v *CompliantSummary) *ResourceComplianceSummaryItem { + s.CompliantSummary = v + return s +} + +// SetExecutionSummary sets the ExecutionSummary field's value. +func (s *ResourceComplianceSummaryItem) SetExecutionSummary(v *ComplianceExecutionSummary) *ResourceComplianceSummaryItem { + s.ExecutionSummary = v + return s +} + +// SetNonCompliantSummary sets the NonCompliantSummary field's value. +func (s *ResourceComplianceSummaryItem) SetNonCompliantSummary(v *NonCompliantSummary) *ResourceComplianceSummaryItem { + s.NonCompliantSummary = v + return s +} + +// SetOverallSeverity sets the OverallSeverity field's value. +func (s *ResourceComplianceSummaryItem) SetOverallSeverity(v string) *ResourceComplianceSummaryItem { + s.OverallSeverity = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *ResourceComplianceSummaryItem) SetResourceId(v string) *ResourceComplianceSummaryItem { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ResourceComplianceSummaryItem) SetResourceType(v string) *ResourceComplianceSummaryItem { + s.ResourceType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ResourceComplianceSummaryItem) SetStatus(v string) *ResourceComplianceSummaryItem { + s.Status = &v + return s +} + +// A sync configuration with the same name already exists. +type ResourceDataSyncAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` + + SyncName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncAlreadyExistsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncAlreadyExistsException) GoString() string { + return s.String() +} + +func newErrorResourceDataSyncAlreadyExistsException(v protocol.ResponseMetadata) error { + return &ResourceDataSyncAlreadyExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceDataSyncAlreadyExistsException) Code() string { + return "ResourceDataSyncAlreadyExistsException" +} + +// Message returns the exception's message. +func (s *ResourceDataSyncAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceDataSyncAlreadyExistsException) OrigErr() error { + return nil +} + +func (s *ResourceDataSyncAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceDataSyncAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceDataSyncAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about the AwsOrganizationsSource resource data sync source. A +// sync source of this type can synchronize data from AWS Organizations or, +// if an AWS Organization is not present, from multiple AWS Regions. +type ResourceDataSyncAwsOrganizationsSource struct { + _ struct{} `type:"structure"` + + // If an AWS Organization is present, this is either OrganizationalUnits or + // EntireOrganization. For OrganizationalUnits, the data is aggregated from + // a set of organization units. For EntireOrganization, the data is aggregated + // from the entire AWS Organization. + // + // OrganizationSourceType is a required field + OrganizationSourceType *string `min:"1" type:"string" required:"true"` + + // The AWS Organizations organization units included in the sync. + OrganizationalUnits []*ResourceDataSyncOrganizationalUnit `min:"1" type:"list"` +} + +// String returns the string representation +func (s ResourceDataSyncAwsOrganizationsSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncAwsOrganizationsSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncAwsOrganizationsSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceDataSyncAwsOrganizationsSource"} + if s.OrganizationSourceType == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationSourceType")) + } + if s.OrganizationSourceType != nil && len(*s.OrganizationSourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationSourceType", 1)) + } + if s.OrganizationalUnits != nil && len(s.OrganizationalUnits) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationalUnits", 1)) + } + if s.OrganizationalUnits != nil { + for i, v := range s.OrganizationalUnits { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OrganizationalUnits", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOrganizationSourceType sets the OrganizationSourceType field's value. +func (s *ResourceDataSyncAwsOrganizationsSource) SetOrganizationSourceType(v string) *ResourceDataSyncAwsOrganizationsSource { + s.OrganizationSourceType = &v + return s +} + +// SetOrganizationalUnits sets the OrganizationalUnits field's value. +func (s *ResourceDataSyncAwsOrganizationsSource) SetOrganizationalUnits(v []*ResourceDataSyncOrganizationalUnit) *ResourceDataSyncAwsOrganizationsSource { + s.OrganizationalUnits = v + return s +} + +// Another UpdateResourceDataSync request is being processed. Wait a few minutes +// and try again. +type ResourceDataSyncConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncConflictException) GoString() string { + return s.String() +} + +func newErrorResourceDataSyncConflictException(v protocol.ResponseMetadata) error { + return &ResourceDataSyncConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceDataSyncConflictException) Code() string { + return "ResourceDataSyncConflictException" +} + +// Message returns the exception's message. +func (s *ResourceDataSyncConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceDataSyncConflictException) OrigErr() error { + return nil +} + +func (s *ResourceDataSyncConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceDataSyncConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceDataSyncConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You have exceeded the allowed maximum sync configurations. +type ResourceDataSyncCountExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncCountExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncCountExceededException) GoString() string { + return s.String() +} + +func newErrorResourceDataSyncCountExceededException(v protocol.ResponseMetadata) error { + return &ResourceDataSyncCountExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceDataSyncCountExceededException) Code() string { + return "ResourceDataSyncCountExceededException" +} + +// Message returns the exception's message. +func (s *ResourceDataSyncCountExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceDataSyncCountExceededException) OrigErr() error { + return nil +} + +func (s *ResourceDataSyncCountExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceDataSyncCountExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceDataSyncCountExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Synchronize Systems Manager Inventory data from multiple AWS accounts defined +// in AWS Organizations to a centralized S3 bucket. Data is synchronized to +// individual key prefixes in the central bucket. Each key prefix represents +// a different AWS account ID. +type ResourceDataSyncDestinationDataSharing struct { + _ struct{} `type:"structure"` + + // The sharing data type. Only Organization is supported. + DestinationDataSharingType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncDestinationDataSharing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncDestinationDataSharing) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncDestinationDataSharing) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceDataSyncDestinationDataSharing"} + if s.DestinationDataSharingType != nil && len(*s.DestinationDataSharingType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationDataSharingType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationDataSharingType sets the DestinationDataSharingType field's value. +func (s *ResourceDataSyncDestinationDataSharing) SetDestinationDataSharingType(v string) *ResourceDataSyncDestinationDataSharing { + s.DestinationDataSharingType = &v + return s +} + +// The specified sync configuration is invalid. +type ResourceDataSyncInvalidConfigurationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncInvalidConfigurationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncInvalidConfigurationException) GoString() string { + return s.String() +} + +func newErrorResourceDataSyncInvalidConfigurationException(v protocol.ResponseMetadata) error { + return &ResourceDataSyncInvalidConfigurationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceDataSyncInvalidConfigurationException) Code() string { + return "ResourceDataSyncInvalidConfigurationException" +} + +// Message returns the exception's message. +func (s *ResourceDataSyncInvalidConfigurationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceDataSyncInvalidConfigurationException) OrigErr() error { + return nil +} + +func (s *ResourceDataSyncInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceDataSyncInvalidConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceDataSyncInvalidConfigurationException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about a Resource Data Sync configuration, including its current +// status and last successful sync. +type ResourceDataSyncItem struct { + _ struct{} `type:"structure"` + + // The status reported by the last sync. + LastStatus *string `type:"string" enum:"LastResourceDataSyncStatus"` + + // The last time the sync operations returned a status of SUCCESSFUL (UTC). + LastSuccessfulSyncTime *time.Time `type:"timestamp"` + + // The status message details reported by the last sync. + LastSyncStatusMessage *string `type:"string"` + + // The last time the configuration attempted to sync (UTC). + LastSyncTime *time.Time `type:"timestamp"` + + // Configuration information for the target S3 bucket. + S3Destination *ResourceDataSyncS3Destination `type:"structure"` + + // The date and time the configuration was created (UTC). + SyncCreatedTime *time.Time `type:"timestamp"` + + // The date and time the resource data sync was changed. + SyncLastModifiedTime *time.Time `type:"timestamp"` + + // The name of the Resource Data Sync. + SyncName *string `min:"1" type:"string"` + + // Information about the source where the data was synchronized. + SyncSource *ResourceDataSyncSourceWithState `type:"structure"` + + // The type of resource data sync. If SyncType is SyncToDestination, then the + // resource data sync synchronizes data to an S3 bucket. If the SyncType is + // SyncFromSource then the resource data sync synchronizes data from AWS Organizations + // or from multiple AWS Regions. + SyncType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncItem) GoString() string { + return s.String() +} + +// SetLastStatus sets the LastStatus field's value. +func (s *ResourceDataSyncItem) SetLastStatus(v string) *ResourceDataSyncItem { + s.LastStatus = &v + return s +} + +// SetLastSuccessfulSyncTime sets the LastSuccessfulSyncTime field's value. +func (s *ResourceDataSyncItem) SetLastSuccessfulSyncTime(v time.Time) *ResourceDataSyncItem { + s.LastSuccessfulSyncTime = &v + return s +} + +// SetLastSyncStatusMessage sets the LastSyncStatusMessage field's value. +func (s *ResourceDataSyncItem) SetLastSyncStatusMessage(v string) *ResourceDataSyncItem { + s.LastSyncStatusMessage = &v + return s +} + +// SetLastSyncTime sets the LastSyncTime field's value. +func (s *ResourceDataSyncItem) SetLastSyncTime(v time.Time) *ResourceDataSyncItem { + s.LastSyncTime = &v + return s +} + +// SetS3Destination sets the S3Destination field's value. +func (s *ResourceDataSyncItem) SetS3Destination(v *ResourceDataSyncS3Destination) *ResourceDataSyncItem { + s.S3Destination = v + return s +} + +// SetSyncCreatedTime sets the SyncCreatedTime field's value. +func (s *ResourceDataSyncItem) SetSyncCreatedTime(v time.Time) *ResourceDataSyncItem { + s.SyncCreatedTime = &v + return s +} + +// SetSyncLastModifiedTime sets the SyncLastModifiedTime field's value. +func (s *ResourceDataSyncItem) SetSyncLastModifiedTime(v time.Time) *ResourceDataSyncItem { + s.SyncLastModifiedTime = &v + return s +} + +// SetSyncName sets the SyncName field's value. +func (s *ResourceDataSyncItem) SetSyncName(v string) *ResourceDataSyncItem { + s.SyncName = &v + return s +} + +// SetSyncSource sets the SyncSource field's value. +func (s *ResourceDataSyncItem) SetSyncSource(v *ResourceDataSyncSourceWithState) *ResourceDataSyncItem { + s.SyncSource = v + return s +} + +// SetSyncType sets the SyncType field's value. +func (s *ResourceDataSyncItem) SetSyncType(v string) *ResourceDataSyncItem { + s.SyncType = &v + return s +} + +// The specified sync name was not found. +type ResourceDataSyncNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + SyncName *string `min:"1" type:"string"` + + SyncType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceDataSyncNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceDataSyncNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceDataSyncNotFoundException) Code() string { + return "ResourceDataSyncNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceDataSyncNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceDataSyncNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceDataSyncNotFoundException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceDataSyncNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceDataSyncNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The AWS Organizations organizational unit data source for the sync. +type ResourceDataSyncOrganizationalUnit struct { + _ struct{} `type:"structure"` + + // The AWS Organization unit ID data source for the sync. + OrganizationalUnitId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncOrganizationalUnit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncOrganizationalUnit) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncOrganizationalUnit) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceDataSyncOrganizationalUnit"} + if s.OrganizationalUnitId != nil && len(*s.OrganizationalUnitId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationalUnitId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOrganizationalUnitId sets the OrganizationalUnitId field's value. +func (s *ResourceDataSyncOrganizationalUnit) SetOrganizationalUnitId(v string) *ResourceDataSyncOrganizationalUnit { + s.OrganizationalUnitId = &v + return s +} + +// Information about the target S3 bucket for the Resource Data Sync. +type ResourceDataSyncS3Destination struct { + _ struct{} `type:"structure"` + + // The ARN of an encryption key for a destination in Amazon S3. Must belong + // to the same Region as the destination S3 bucket. + AWSKMSKeyARN *string `min:"1" type:"string"` + + // The name of the S3 bucket where the aggregated data is stored. + // + // BucketName is a required field + BucketName *string `min:"1" type:"string" required:"true"` + + // Enables destination data sharing. By default, this field is null. + DestinationDataSharing *ResourceDataSyncDestinationDataSharing `type:"structure"` + + // An Amazon S3 prefix for the bucket. + Prefix *string `min:"1" type:"string"` + + // The AWS Region with the S3 bucket targeted by the Resource Data Sync. + // + // Region is a required field + Region *string `min:"1" type:"string" required:"true"` + + // A supported sync format. The following format is currently supported: JsonSerDe + // + // SyncFormat is a required field + SyncFormat *string `type:"string" required:"true" enum:"ResourceDataSyncS3Format"` +} + +// String returns the string representation +func (s ResourceDataSyncS3Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncS3Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncS3Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceDataSyncS3Destination"} + if s.AWSKMSKeyARN != nil && len(*s.AWSKMSKeyARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AWSKMSKeyARN", 1)) + } + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 1)) + } + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) + } + if s.Region == nil { + invalidParams.Add(request.NewErrParamRequired("Region")) + } + if s.Region != nil && len(*s.Region) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Region", 1)) + } + if s.SyncFormat == nil { + invalidParams.Add(request.NewErrParamRequired("SyncFormat")) + } + if s.DestinationDataSharing != nil { + if err := s.DestinationDataSharing.Validate(); err != nil { + invalidParams.AddNested("DestinationDataSharing", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAWSKMSKeyARN sets the AWSKMSKeyARN field's value. +func (s *ResourceDataSyncS3Destination) SetAWSKMSKeyARN(v string) *ResourceDataSyncS3Destination { + s.AWSKMSKeyARN = &v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *ResourceDataSyncS3Destination) SetBucketName(v string) *ResourceDataSyncS3Destination { + s.BucketName = &v + return s +} + +// SetDestinationDataSharing sets the DestinationDataSharing field's value. +func (s *ResourceDataSyncS3Destination) SetDestinationDataSharing(v *ResourceDataSyncDestinationDataSharing) *ResourceDataSyncS3Destination { + s.DestinationDataSharing = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ResourceDataSyncS3Destination) SetPrefix(v string) *ResourceDataSyncS3Destination { + s.Prefix = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *ResourceDataSyncS3Destination) SetRegion(v string) *ResourceDataSyncS3Destination { + s.Region = &v + return s +} + +// SetSyncFormat sets the SyncFormat field's value. +func (s *ResourceDataSyncS3Destination) SetSyncFormat(v string) *ResourceDataSyncS3Destination { + s.SyncFormat = &v + return s +} + +// Information about the source of the data included in the resource data sync. +type ResourceDataSyncSource struct { + _ struct{} `type:"structure"` + + // Information about the AwsOrganizationsSource resource data sync source. A + // sync source of this type can synchronize data from AWS Organizations. + AwsOrganizationsSource *ResourceDataSyncAwsOrganizationsSource `type:"structure"` + + // Whether to automatically synchronize and aggregate data from new AWS Regions + // when those Regions come online. + IncludeFutureRegions *bool `type:"boolean"` + + // The SyncSource AWS Regions included in the resource data sync. + // + // SourceRegions is a required field + SourceRegions []*string `type:"list" required:"true"` + + // The type of data source for the resource data sync. SourceType is either + // AwsOrganizations (if an organization is present in AWS Organizations) or + // singleAccountMultiRegions. + // + // SourceType is a required field + SourceType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceDataSyncSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceDataSyncSource"} + if s.SourceRegions == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegions")) + } + if s.SourceType == nil { + invalidParams.Add(request.NewErrParamRequired("SourceType")) + } + if s.SourceType != nil && len(*s.SourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceType", 1)) + } + if s.AwsOrganizationsSource != nil { + if err := s.AwsOrganizationsSource.Validate(); err != nil { + invalidParams.AddNested("AwsOrganizationsSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsOrganizationsSource sets the AwsOrganizationsSource field's value. +func (s *ResourceDataSyncSource) SetAwsOrganizationsSource(v *ResourceDataSyncAwsOrganizationsSource) *ResourceDataSyncSource { + s.AwsOrganizationsSource = v + return s +} + +// SetIncludeFutureRegions sets the IncludeFutureRegions field's value. +func (s *ResourceDataSyncSource) SetIncludeFutureRegions(v bool) *ResourceDataSyncSource { + s.IncludeFutureRegions = &v + return s +} + +// SetSourceRegions sets the SourceRegions field's value. +func (s *ResourceDataSyncSource) SetSourceRegions(v []*string) *ResourceDataSyncSource { + s.SourceRegions = v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *ResourceDataSyncSource) SetSourceType(v string) *ResourceDataSyncSource { + s.SourceType = &v + return s +} + +// The data type name for including resource data sync state. There are four +// sync states: +// +// OrganizationNotExists (Your organization doesn't exist) +// +// NoPermissions (The system can't locate the service-linked role. This role +// is automatically created when a user creates a resource data sync in Explorer.) +// +// InvalidOrganizationalUnit (You specified or selected an invalid unit in the +// resource data sync configuration.) +// +// TrustedAccessDisabled (You disabled Systems Manager access in the organization +// in AWS Organizations.) +type ResourceDataSyncSourceWithState struct { + _ struct{} `type:"structure"` + + // The field name in SyncSource for the ResourceDataSyncAwsOrganizationsSource + // type. + AwsOrganizationsSource *ResourceDataSyncAwsOrganizationsSource `type:"structure"` + + // Whether to automatically synchronize and aggregate data from new AWS Regions + // when those Regions come online. + IncludeFutureRegions *bool `type:"boolean"` + + // The SyncSource AWS Regions included in the resource data sync. + SourceRegions []*string `type:"list"` + + // The type of data source for the resource data sync. SourceType is either + // AwsOrganizations (if an organization is present in AWS Organizations) or + // singleAccountMultiRegions. + SourceType *string `min:"1" type:"string"` + + // The data type name for including resource data sync state. There are four + // sync states: + // + // OrganizationNotExists: Your organization doesn't exist. + // + // NoPermissions: The system can't locate the service-linked role. This role + // is automatically created when a user creates a resource data sync in Explorer. + // + // InvalidOrganizationalUnit: You specified or selected an invalid unit in the + // resource data sync configuration. + // + // TrustedAccessDisabled: You disabled Systems Manager access in the organization + // in AWS Organizations. + State *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncSourceWithState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncSourceWithState) GoString() string { + return s.String() +} + +// SetAwsOrganizationsSource sets the AwsOrganizationsSource field's value. +func (s *ResourceDataSyncSourceWithState) SetAwsOrganizationsSource(v *ResourceDataSyncAwsOrganizationsSource) *ResourceDataSyncSourceWithState { + s.AwsOrganizationsSource = v + return s +} + +// SetIncludeFutureRegions sets the IncludeFutureRegions field's value. +func (s *ResourceDataSyncSourceWithState) SetIncludeFutureRegions(v bool) *ResourceDataSyncSourceWithState { + s.IncludeFutureRegions = &v + return s +} + +// SetSourceRegions sets the SourceRegions field's value. +func (s *ResourceDataSyncSourceWithState) SetSourceRegions(v []*string) *ResourceDataSyncSourceWithState { + s.SourceRegions = v + return s +} + +// SetSourceType sets the SourceType field's value. +func (s *ResourceDataSyncSourceWithState) SetSourceType(v string) *ResourceDataSyncSourceWithState { + s.SourceType = &v + return s +} + +// SetState sets the State field's value. +func (s *ResourceDataSyncSourceWithState) SetState(v string) *ResourceDataSyncSourceWithState { + s.State = &v + return s +} + +// Error returned if an attempt is made to delete a patch baseline that is registered +// for a patch group. +type ResourceInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceInUseException) GoString() string { + return s.String() +} + +func newErrorResourceInUseException(v protocol.ResponseMetadata) error { + return &ResourceInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceInUseException) Code() string { + return "ResourceInUseException" +} + +// Message returns the exception's message. +func (s *ResourceInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceInUseException) OrigErr() error { + return nil +} + +func (s *ResourceInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Error returned when the caller has exceeded the default resource quotas. +// For example, too many maintenance windows or patch baselines have been created. +// +// For information about resource quotas in Systems Manager, see Systems Manager +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// in the AWS General Reference. +type ResourceLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceLimitExceededException) GoString() string { + return s.String() +} + +func newErrorResourceLimitExceededException(v protocol.ResponseMetadata) error { + return &ResourceLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceLimitExceededException) Code() string { + return "ResourceLimitExceededException" +} + +// Message returns the exception's message. +func (s *ResourceLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceLimitExceededException) OrigErr() error { + return nil +} + +func (s *ResourceLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The inventory item result attribute. +type ResultAttribute struct { + _ struct{} `type:"structure"` + + // Name of the inventory item type. Valid value: AWS:InstanceInformation. Default + // Value: AWS:InstanceInformation. + // + // TypeName is a required field + TypeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResultAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResultAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResultAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResultAttribute"} + if s.TypeName == nil { + invalidParams.Add(request.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TypeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTypeName sets the TypeName field's value. +func (s *ResultAttribute) SetTypeName(v string) *ResultAttribute { + s.TypeName = &v + return s +} + +type ResumeSessionInput struct { + _ struct{} `type:"structure"` + + // The ID of the disconnected session to resume. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResumeSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResumeSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResumeSessionInput"} + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSessionId sets the SessionId field's value. +func (s *ResumeSessionInput) SetSessionId(v string) *ResumeSessionInput { + s.SessionId = &v + return s +} + +type ResumeSessionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the session. + SessionId *string `min:"1" type:"string"` + + // A URL back to SSM Agent on the instance that the Session Manager client uses + // to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output). + // + // region represents the Region identifier for an AWS Region supported by AWS + // Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list + // of supported region values, see the Region column in Systems Manager service + // endpoints (http://docs.aws.amazon.com/general/latest/gr/ssm.html#ssm_region) + // in the AWS General Reference. + // + // session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE. + StreamUrl *string `type:"string"` + + // An encrypted token value containing session and caller information. Used + // to authenticate the connection to the instance. + TokenValue *string `type:"string"` +} + +// String returns the string representation +func (s ResumeSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeSessionOutput) GoString() string { + return s.String() +} + +// SetSessionId sets the SessionId field's value. +func (s *ResumeSessionOutput) SetSessionId(v string) *ResumeSessionOutput { + s.SessionId = &v + return s +} + +// SetStreamUrl sets the StreamUrl field's value. +func (s *ResumeSessionOutput) SetStreamUrl(v string) *ResumeSessionOutput { + s.StreamUrl = &v + return s +} + +// SetTokenValue sets the TokenValue field's value. +func (s *ResumeSessionOutput) SetTokenValue(v string) *ResumeSessionOutput { + s.TokenValue = &v + return s +} + +// An S3 bucket where you want to store the results of this request. +type S3OutputLocation struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket. + OutputS3BucketName *string `min:"3" type:"string"` + + // The S3 bucket subfolder. + OutputS3KeyPrefix *string `type:"string"` + + // (Deprecated) You can no longer specify this parameter. The system ignores + // it. Instead, Systems Manager automatically determines the Region of the S3 + // bucket. + OutputS3Region *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s S3OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3OutputLocation"} + if s.OutputS3BucketName != nil && len(*s.OutputS3BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("OutputS3BucketName", 3)) + } + if s.OutputS3Region != nil && len(*s.OutputS3Region) < 3 { + invalidParams.Add(request.NewErrParamMinLen("OutputS3Region", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOutputS3BucketName sets the OutputS3BucketName field's value. +func (s *S3OutputLocation) SetOutputS3BucketName(v string) *S3OutputLocation { + s.OutputS3BucketName = &v + return s +} + +// SetOutputS3KeyPrefix sets the OutputS3KeyPrefix field's value. +func (s *S3OutputLocation) SetOutputS3KeyPrefix(v string) *S3OutputLocation { + s.OutputS3KeyPrefix = &v + return s +} + +// SetOutputS3Region sets the OutputS3Region field's value. +func (s *S3OutputLocation) SetOutputS3Region(v string) *S3OutputLocation { + s.OutputS3Region = &v + return s +} + +// A URL for the S3 bucket where you want to store the results of this request. +type S3OutputUrl struct { + _ struct{} `type:"structure"` + + // A URL for an S3 bucket where you want to store the results of this request. + OutputUrl *string `type:"string"` +} + +// String returns the string representation +func (s S3OutputUrl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3OutputUrl) GoString() string { + return s.String() +} + +// SetOutputUrl sets the OutputUrl field's value. +func (s *S3OutputUrl) SetOutputUrl(v string) *S3OutputUrl { + s.OutputUrl = &v + return s +} + +// Information about a scheduled execution for a maintenance window. +type ScheduledWindowExecution struct { + _ struct{} `type:"structure"` + + // The time, in ISO-8601 Extended format, that the maintenance window is scheduled + // to be run. + ExecutionTime *string `type:"string"` + + // The name of the maintenance window to be run. + Name *string `min:"3" type:"string"` + + // The ID of the maintenance window to be run. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s ScheduledWindowExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledWindowExecution) GoString() string { + return s.String() +} + +// SetExecutionTime sets the ExecutionTime field's value. +func (s *ScheduledWindowExecution) SetExecutionTime(v string) *ScheduledWindowExecution { + s.ExecutionTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *ScheduledWindowExecution) SetName(v string) *ScheduledWindowExecution { + s.Name = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *ScheduledWindowExecution) SetWindowId(v string) *ScheduledWindowExecution { + s.WindowId = &v + return s +} + +type SendAutomationSignalInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for an existing Automation execution that you want + // to send the signal to. + // + // AutomationExecutionId is a required field + AutomationExecutionId *string `min:"36" type:"string" required:"true"` + + // The data sent with the signal. The data schema depends on the type of signal + // used in the request. + // + // For Approve and Reject signal types, the payload is an optional comment that + // you can send with the signal type. For example: + // + // Comment="Looks good" + // + // For StartStep and Resume signal types, you must send the name of the Automation + // step to start or resume as the payload. For example: + // + // StepName="step1" + // + // For the StopStep signal type, you must send the step execution ID as the + // payload. For example: + // + // StepExecutionId="97fff367-fc5a-4299-aed8-0123456789ab" + Payload map[string][]*string `min:"1" type:"map"` + + // The type of signal to send to an Automation execution. + // + // SignalType is a required field + SignalType *string `type:"string" required:"true" enum:"SignalType"` +} + +// String returns the string representation +func (s SendAutomationSignalInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendAutomationSignalInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendAutomationSignalInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendAutomationSignalInput"} + if s.AutomationExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("AutomationExecutionId")) + } + if s.AutomationExecutionId != nil && len(*s.AutomationExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("AutomationExecutionId", 36)) + } + if s.Payload != nil && len(s.Payload) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Payload", 1)) + } + if s.SignalType == nil { + invalidParams.Add(request.NewErrParamRequired("SignalType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutomationExecutionId sets the AutomationExecutionId field's value. +func (s *SendAutomationSignalInput) SetAutomationExecutionId(v string) *SendAutomationSignalInput { + s.AutomationExecutionId = &v + return s +} + +// SetPayload sets the Payload field's value. +func (s *SendAutomationSignalInput) SetPayload(v map[string][]*string) *SendAutomationSignalInput { + s.Payload = v + return s +} + +// SetSignalType sets the SignalType field's value. +func (s *SendAutomationSignalInput) SetSignalType(v string) *SendAutomationSignalInput { + s.SignalType = &v + return s +} + +type SendAutomationSignalOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SendAutomationSignalOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendAutomationSignalOutput) GoString() string { + return s.String() +} + +type SendCommandInput struct { + _ struct{} `type:"structure"` + + // Enables Systems Manager to send Run Command output to Amazon CloudWatch Logs. + CloudWatchOutputConfig *CloudWatchOutputConfig `type:"structure"` + + // User-specified information about the command, such as a brief description + // of what the command should do. + Comment *string `type:"string"` + + // The Sha256 or Sha1 hash created by the system when the document was created. + // + // Sha1 hashes have been deprecated. + DocumentHash *string `type:"string"` + + // Sha256 or Sha1. + // + // Sha1 hashes have been deprecated. + DocumentHashType *string `type:"string" enum:"DocumentHashType"` + + // Required. The name of the Systems Manager document to run. This can be a + // public document or a custom document. + // + // DocumentName is a required field + DocumentName *string `type:"string" required:"true"` + + // The SSM document version to use in the request. You can specify $DEFAULT, + // $LATEST, or a specific version number. If you run commands by using the AWS + // CLI, then you must escape the first two options by using a backslash. If + // you specify a version number, then you don't need to use the backslash. For + // example: + // + // --document-version "\$DEFAULT" + // + // --document-version "\$LATEST" + // + // --document-version "3" + DocumentVersion *string `type:"string"` + + // The IDs of the instances where the command should run. Specifying instance + // IDs is most useful when you are targeting a limited number of instances, + // though you can specify up to 50 IDs. + // + // To target a larger number of instances, or if you prefer not to list individual + // instance IDs, we recommend using the Targets option instead. Using Targets, + // which accepts tag key-value pairs to identify the instances to send commands + // to, you can a send command to tens, hundreds, or thousands of instances at + // once. + // + // For more information about how to use targets, see Using targets and rate + // controls to send commands to a fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) + // in the AWS Systems Manager User Guide. + InstanceIds []*string `type:"list"` + + // (Optional) The maximum number of instances that are allowed to run the command + // at the same time. You can specify a number such as 10 or a percentage such + // as 10%. The default value is 50. For more information about how to use MaxConcurrency, + // see Using concurrency controls (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-velocity) + // in the AWS Systems Manager User Guide. + MaxConcurrency *string `min:"1" type:"string"` + + // The maximum number of errors allowed without the command failing. When the + // command fails one more time beyond the value of MaxErrors, the systems stops + // sending the command to additional targets. You can specify a number like + // 10 or a percentage like 10%. The default value is 0. For more information + // about how to use MaxErrors, see Using error controls (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-maxerrors) + // in the AWS Systems Manager User Guide. + MaxErrors *string `min:"1" type:"string"` + + // Configurations for sending notifications. + NotificationConfig *NotificationConfig `type:"structure"` + + // The name of the S3 bucket where command execution responses should be stored. + OutputS3BucketName *string `min:"3" type:"string"` + + // The directory structure within the S3 bucket where the responses should be + // stored. + OutputS3KeyPrefix *string `type:"string"` + + // (Deprecated) You can no longer specify this parameter. The system ignores + // it. Instead, Systems Manager automatically determines the Region of the S3 + // bucket. + OutputS3Region *string `min:"3" type:"string"` + + // The required and optional parameters specified in the document being run. + Parameters map[string][]*string `type:"map"` + + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for Run Command commands. + ServiceRoleArn *string `type:"string"` + + // An array of search criteria that targets instances using a Key,Value combination + // that you specify. Specifying targets is most useful when you want to send + // a command to a large number of instances at once. Using Targets, which accepts + // tag key-value pairs to identify instances, you can send a command to tens, + // hundreds, or thousands of instances at once. + // + // To send a command to a smaller number of instances, you can use the InstanceIds + // option instead. + // + // For more information about how to use targets, see Sending commands to a + // fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) + // in the AWS Systems Manager User Guide. + Targets []*Target `type:"list"` + + // If this time is reached and the command has not already started running, + // it will not run. + TimeoutSeconds *int64 `min:"30" type:"integer"` +} + +// String returns the string representation +func (s SendCommandInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendCommandInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendCommandInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendCommandInput"} + if s.DocumentName == nil { + invalidParams.Add(request.NewErrParamRequired("DocumentName")) + } + if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1)) + } + if s.MaxErrors != nil && len(*s.MaxErrors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxErrors", 1)) + } + if s.OutputS3BucketName != nil && len(*s.OutputS3BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("OutputS3BucketName", 3)) + } + if s.OutputS3Region != nil && len(*s.OutputS3Region) < 3 { + invalidParams.Add(request.NewErrParamMinLen("OutputS3Region", 3)) + } + if s.TimeoutSeconds != nil && *s.TimeoutSeconds < 30 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutSeconds", 30)) + } + if s.CloudWatchOutputConfig != nil { + if err := s.CloudWatchOutputConfig.Validate(); err != nil { + invalidParams.AddNested("CloudWatchOutputConfig", err.(request.ErrInvalidParams)) + } + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCloudWatchOutputConfig sets the CloudWatchOutputConfig field's value. +func (s *SendCommandInput) SetCloudWatchOutputConfig(v *CloudWatchOutputConfig) *SendCommandInput { + s.CloudWatchOutputConfig = v + return s +} + +// SetComment sets the Comment field's value. +func (s *SendCommandInput) SetComment(v string) *SendCommandInput { + s.Comment = &v + return s +} + +// SetDocumentHash sets the DocumentHash field's value. +func (s *SendCommandInput) SetDocumentHash(v string) *SendCommandInput { + s.DocumentHash = &v + return s +} + +// SetDocumentHashType sets the DocumentHashType field's value. +func (s *SendCommandInput) SetDocumentHashType(v string) *SendCommandInput { + s.DocumentHashType = &v + return s +} + +// SetDocumentName sets the DocumentName field's value. +func (s *SendCommandInput) SetDocumentName(v string) *SendCommandInput { + s.DocumentName = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *SendCommandInput) SetDocumentVersion(v string) *SendCommandInput { + s.DocumentVersion = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *SendCommandInput) SetInstanceIds(v []*string) *SendCommandInput { + s.InstanceIds = v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *SendCommandInput) SetMaxConcurrency(v string) *SendCommandInput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *SendCommandInput) SetMaxErrors(v string) *SendCommandInput { + s.MaxErrors = &v + return s +} + +// SetNotificationConfig sets the NotificationConfig field's value. +func (s *SendCommandInput) SetNotificationConfig(v *NotificationConfig) *SendCommandInput { + s.NotificationConfig = v + return s +} + +// SetOutputS3BucketName sets the OutputS3BucketName field's value. +func (s *SendCommandInput) SetOutputS3BucketName(v string) *SendCommandInput { + s.OutputS3BucketName = &v + return s +} + +// SetOutputS3KeyPrefix sets the OutputS3KeyPrefix field's value. +func (s *SendCommandInput) SetOutputS3KeyPrefix(v string) *SendCommandInput { + s.OutputS3KeyPrefix = &v + return s +} + +// SetOutputS3Region sets the OutputS3Region field's value. +func (s *SendCommandInput) SetOutputS3Region(v string) *SendCommandInput { + s.OutputS3Region = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *SendCommandInput) SetParameters(v map[string][]*string) *SendCommandInput { + s.Parameters = v + return s +} + +// SetServiceRoleArn sets the ServiceRoleArn field's value. +func (s *SendCommandInput) SetServiceRoleArn(v string) *SendCommandInput { + s.ServiceRoleArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *SendCommandInput) SetTargets(v []*Target) *SendCommandInput { + s.Targets = v + return s +} + +// SetTimeoutSeconds sets the TimeoutSeconds field's value. +func (s *SendCommandInput) SetTimeoutSeconds(v int64) *SendCommandInput { + s.TimeoutSeconds = &v + return s +} + +type SendCommandOutput struct { + _ struct{} `type:"structure"` + + // The request as it was received by Systems Manager. Also provides the command + // ID which can be used future references to this request. + Command *Command `type:"structure"` +} + +// String returns the string representation +func (s SendCommandOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendCommandOutput) GoString() string { + return s.String() +} + +// SetCommand sets the Command field's value. +func (s *SendCommandOutput) SetCommand(v *Command) *SendCommandOutput { + s.Command = v + return s +} + +// The service setting data structure. +// +// ServiceSetting is an account-level setting for an AWS service. This setting +// defines how a user interacts with or uses a service or a feature of a service. +// For example, if an AWS service charges money to the account based on feature +// or service usage, then the AWS service team might create a default setting +// of "false". This means the user can't use this feature unless they change +// the setting to "true" and intentionally opt in for a paid feature. +// +// Services map a SettingId object to a setting value. AWS services teams define +// the default value for a SettingId. You can't create a new SettingId, but +// you can overwrite the default value if you have the ssm:UpdateServiceSetting +// permission for the setting. Use the UpdateServiceSetting API action to change +// the default setting. Or, use the ResetServiceSetting to change the value +// back to the original value defined by the AWS service team. +type ServiceSetting struct { + _ struct{} `type:"structure"` + + // The ARN of the service setting. + ARN *string `type:"string"` + + // The last time the service setting was modified. + LastModifiedDate *time.Time `type:"timestamp"` + + // The ARN of the last modified user. This field is populated only if the setting + // value was overwritten. + LastModifiedUser *string `type:"string"` + + // The ID of the service setting. + SettingId *string `min:"1" type:"string"` + + // The value of the service setting. + SettingValue *string `min:"1" type:"string"` + + // The status of the service setting. The value can be Default, Customized or + // PendingUpdate. + // + // * Default: The current setting uses a default value provisioned by the + // AWS service team. + // + // * Customized: The current setting use a custom value specified by the + // customer. + // + // * PendingUpdate: The current setting uses a default or custom value, but + // a setting change request is pending approval. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ServiceSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceSetting) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *ServiceSetting) SetARN(v string) *ServiceSetting { + s.ARN = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *ServiceSetting) SetLastModifiedDate(v time.Time) *ServiceSetting { + s.LastModifiedDate = &v + return s +} + +// SetLastModifiedUser sets the LastModifiedUser field's value. +func (s *ServiceSetting) SetLastModifiedUser(v string) *ServiceSetting { + s.LastModifiedUser = &v + return s +} + +// SetSettingId sets the SettingId field's value. +func (s *ServiceSetting) SetSettingId(v string) *ServiceSetting { + s.SettingId = &v + return s +} + +// SetSettingValue sets the SettingValue field's value. +func (s *ServiceSetting) SetSettingValue(v string) *ServiceSetting { + s.SettingValue = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ServiceSetting) SetStatus(v string) *ServiceSetting { + s.Status = &v + return s +} + +// The specified service setting was not found. Either the service name or the +// setting has not been provisioned by the AWS service team. +type ServiceSettingNotFound struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ServiceSettingNotFound) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceSettingNotFound) GoString() string { + return s.String() +} + +func newErrorServiceSettingNotFound(v protocol.ResponseMetadata) error { + return &ServiceSettingNotFound{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceSettingNotFound) Code() string { + return "ServiceSettingNotFound" +} + +// Message returns the exception's message. +func (s *ServiceSettingNotFound) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceSettingNotFound) OrigErr() error { + return nil +} + +func (s *ServiceSettingNotFound) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceSettingNotFound) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceSettingNotFound) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about a Session Manager connection to an instance. +type Session struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + Details *string `min:"1" type:"string"` + + // The name of the Session Manager SSM document used to define the parameters + // and plugin settings for the session. For example, SSM-SessionManagerRunShell. + DocumentName *string `type:"string"` + + // The date and time, in ISO-8601 Extended format, when the session was terminated. + EndDate *time.Time `type:"timestamp"` + + // Reserved for future use. + OutputUrl *SessionManagerOutputUrl `type:"structure"` + + // The ID of the AWS user account that started the session. + Owner *string `min:"1" type:"string"` + + // The ID of the session. + SessionId *string `min:"1" type:"string"` + + // The date and time, in ISO-8601 Extended format, when the session began. + StartDate *time.Time `type:"timestamp"` + + // The status of the session. For example, "Connected" or "Terminated". + Status *string `type:"string" enum:"SessionStatus"` + + // The instance that the Session Manager session connected to. + Target *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Session) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Session) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *Session) SetDetails(v string) *Session { + s.Details = &v + return s +} + +// SetDocumentName sets the DocumentName field's value. +func (s *Session) SetDocumentName(v string) *Session { + s.DocumentName = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *Session) SetEndDate(v time.Time) *Session { + s.EndDate = &v + return s +} + +// SetOutputUrl sets the OutputUrl field's value. +func (s *Session) SetOutputUrl(v *SessionManagerOutputUrl) *Session { + s.OutputUrl = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Session) SetOwner(v string) *Session { + s.Owner = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *Session) SetSessionId(v string) *Session { + s.SessionId = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *Session) SetStartDate(v time.Time) *Session { + s.StartDate = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Session) SetStatus(v string) *Session { + s.Status = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *Session) SetTarget(v string) *Session { + s.Target = &v + return s +} + +// Describes a filter for Session Manager information. +type SessionFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter. + // + // Key is a required field + Key *string `locationName:"key" type:"string" required:"true" enum:"SessionFilterKey"` + + // The filter value. Valid values for each filter key are as follows: + // + // * InvokedAfter: Specify a timestamp to limit your results. For example, + // specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, + // and later. + // + // * InvokedBefore: Specify a timestamp to limit your results. For example, + // specify 2018-08-29T00:00:00Z to see sessions that started before August + // 29, 2018. + // + // * Target: Specify an instance to which session connections have been made. + // + // * Owner: Specify an AWS user account to see a list of sessions started + // by that user. + // + // * Status: Specify a valid session status to see a list of all sessions + // with that status. Status values you can specify include: Connected Connecting + // Disconnected Terminated Terminating Failed + // + // * SessionId: Specify a session ID to return details about the session. + // + // Value is a required field + Value *string `locationName:"value" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SessionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SessionFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SessionFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SessionFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *SessionFilter) SetKey(v string) *SessionFilter { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *SessionFilter) SetValue(v string) *SessionFilter { + s.Value = &v + return s +} + +// Reserved for future use. +type SessionManagerOutputUrl struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + CloudWatchOutputUrl *string `min:"1" type:"string"` + + // Reserved for future use. + S3OutputUrl *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SessionManagerOutputUrl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SessionManagerOutputUrl) GoString() string { + return s.String() +} + +// SetCloudWatchOutputUrl sets the CloudWatchOutputUrl field's value. +func (s *SessionManagerOutputUrl) SetCloudWatchOutputUrl(v string) *SessionManagerOutputUrl { + s.CloudWatchOutputUrl = &v + return s +} + +// SetS3OutputUrl sets the S3OutputUrl field's value. +func (s *SessionManagerOutputUrl) SetS3OutputUrl(v string) *SessionManagerOutputUrl { + s.S3OutputUrl = &v + return s +} + +// The number of managed instances found for each patch severity level defined +// in the request filter. +type SeveritySummary struct { + _ struct{} `type:"structure"` + + // The total number of resources or compliance items that have a severity level + // of critical. Critical severity is determined by the organization that published + // the compliance items. + CriticalCount *int64 `type:"integer"` + + // The total number of resources or compliance items that have a severity level + // of high. High severity is determined by the organization that published the + // compliance items. + HighCount *int64 `type:"integer"` + + // The total number of resources or compliance items that have a severity level + // of informational. Informational severity is determined by the organization + // that published the compliance items. + InformationalCount *int64 `type:"integer"` + + // The total number of resources or compliance items that have a severity level + // of low. Low severity is determined by the organization that published the + // compliance items. + LowCount *int64 `type:"integer"` + + // The total number of resources or compliance items that have a severity level + // of medium. Medium severity is determined by the organization that published + // the compliance items. + MediumCount *int64 `type:"integer"` + + // The total number of resources or compliance items that have a severity level + // of unspecified. Unspecified severity is determined by the organization that + // published the compliance items. + UnspecifiedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s SeveritySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SeveritySummary) GoString() string { + return s.String() +} + +// SetCriticalCount sets the CriticalCount field's value. +func (s *SeveritySummary) SetCriticalCount(v int64) *SeveritySummary { + s.CriticalCount = &v + return s +} + +// SetHighCount sets the HighCount field's value. +func (s *SeveritySummary) SetHighCount(v int64) *SeveritySummary { + s.HighCount = &v + return s +} + +// SetInformationalCount sets the InformationalCount field's value. +func (s *SeveritySummary) SetInformationalCount(v int64) *SeveritySummary { + s.InformationalCount = &v + return s +} + +// SetLowCount sets the LowCount field's value. +func (s *SeveritySummary) SetLowCount(v int64) *SeveritySummary { + s.LowCount = &v + return s +} + +// SetMediumCount sets the MediumCount field's value. +func (s *SeveritySummary) SetMediumCount(v int64) *SeveritySummary { + s.MediumCount = &v + return s +} + +// SetUnspecifiedCount sets the UnspecifiedCount field's value. +func (s *SeveritySummary) SetUnspecifiedCount(v int64) *SeveritySummary { + s.UnspecifiedCount = &v + return s +} + +type StartAssociationsOnceInput struct { + _ struct{} `type:"structure"` + + // The association IDs that you want to run immediately and only one time. + // + // AssociationIds is a required field + AssociationIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartAssociationsOnceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartAssociationsOnceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartAssociationsOnceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartAssociationsOnceInput"} + if s.AssociationIds == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationIds")) + } + if s.AssociationIds != nil && len(s.AssociationIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssociationIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationIds sets the AssociationIds field's value. +func (s *StartAssociationsOnceInput) SetAssociationIds(v []*string) *StartAssociationsOnceInput { + s.AssociationIds = v + return s +} + +type StartAssociationsOnceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartAssociationsOnceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartAssociationsOnceOutput) GoString() string { + return s.String() +} + +type StartAutomationExecutionInput struct { + _ struct{} `type:"structure"` + + // User-provided idempotency token. The token must be unique, is case insensitive, + // enforces the UUID format, and can't be reused. + ClientToken *string `min:"36" type:"string"` + + // The name of the Automation document to use for this execution. + // + // DocumentName is a required field + DocumentName *string `type:"string" required:"true"` + + // The version of the Automation document to use for this execution. + DocumentVersion *string `type:"string"` + + // The maximum number of targets allowed to run this task in parallel. You can + // specify a number, such as 10, or a percentage, such as 10%. The default value + // is 10. + MaxConcurrency *string `min:"1" type:"string"` + + // The number of errors that are allowed before the system stops running the + // automation on additional targets. You can specify either an absolute number + // of errors, for example 10, or a percentage of the target set, for example + // 10%. If you specify 3, for example, the system stops running the automation + // when the fourth error is received. If you specify 0, then the system stops + // running the automation on additional targets after the first error result + // is returned. If you run an automation on 50 resources and set max-errors + // to 10%, then the system stops running the automation on additional targets + // when the sixth error is received. + // + // Executions that are already running an automation when max-errors is reached + // are allowed to complete, but some of these executions may fail as well. If + // you need to ensure that there won't be more than max-errors failed executions, + // set max-concurrency to 1 so the executions proceed one at a time. + MaxErrors *string `min:"1" type:"string"` + + // The execution mode of the automation. Valid modes include the following: + // Auto and Interactive. The default mode is Auto. + Mode *string `type:"string" enum:"ExecutionMode"` + + // A key-value map of execution parameters, which match the declared parameters + // in the Automation document. + Parameters map[string][]*string `min:"1" type:"map"` + + // Optional metadata that you assign to a resource. You can specify a maximum + // of five tags for an automation. Tags enable you to categorize a resource + // in different ways, such as by purpose, owner, or environment. For example, + // you might want to tag an automation to identify an environment or operating + // system. In this case, you could specify the following key name/value pairs: + // + // * Key=environment,Value=test + // + // * Key=OS,Value=Windows + // + // To add tags to an existing patch baseline, use the AddTagsToResource action. + Tags []*Tag `type:"list"` + + // A location is a combination of AWS Regions and/or AWS accounts where you + // want to run the Automation. Use this action to start an Automation in multiple + // Regions and multiple accounts. For more information, see Running Automation + // workflows in multiple AWS Regions and accounts (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-automation-multiple-accounts-and-regions.html) + // in the AWS Systems Manager User Guide. + TargetLocations []*TargetLocation `min:"1" type:"list"` + + // A key-value mapping of document parameters to target resources. Both Targets + // and TargetMaps cannot be specified together. + TargetMaps []map[string][]*string `type:"list"` + + // The name of the parameter used as the target resource for the rate-controlled + // execution. Required if you specify targets. + TargetParameterName *string `min:"1" type:"string"` + + // A key-value mapping to target resources. Required if you specify TargetParameterName. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s StartAutomationExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartAutomationExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartAutomationExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartAutomationExecutionInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 36)) + } + if s.DocumentName == nil { + invalidParams.Add(request.NewErrParamRequired("DocumentName")) + } + if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1)) + } + if s.MaxErrors != nil && len(*s.MaxErrors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxErrors", 1)) + } + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.TargetLocations != nil && len(s.TargetLocations) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetLocations", 1)) + } + if s.TargetParameterName != nil && len(*s.TargetParameterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetParameterName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TargetLocations != nil { + for i, v := range s.TargetLocations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetLocations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *StartAutomationExecutionInput) SetClientToken(v string) *StartAutomationExecutionInput { + s.ClientToken = &v + return s +} + +// SetDocumentName sets the DocumentName field's value. +func (s *StartAutomationExecutionInput) SetDocumentName(v string) *StartAutomationExecutionInput { + s.DocumentName = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *StartAutomationExecutionInput) SetDocumentVersion(v string) *StartAutomationExecutionInput { + s.DocumentVersion = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *StartAutomationExecutionInput) SetMaxConcurrency(v string) *StartAutomationExecutionInput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *StartAutomationExecutionInput) SetMaxErrors(v string) *StartAutomationExecutionInput { + s.MaxErrors = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *StartAutomationExecutionInput) SetMode(v string) *StartAutomationExecutionInput { + s.Mode = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *StartAutomationExecutionInput) SetParameters(v map[string][]*string) *StartAutomationExecutionInput { + s.Parameters = v + return s +} + +// SetTags sets the Tags field's value. +func (s *StartAutomationExecutionInput) SetTags(v []*Tag) *StartAutomationExecutionInput { + s.Tags = v + return s +} + +// SetTargetLocations sets the TargetLocations field's value. +func (s *StartAutomationExecutionInput) SetTargetLocations(v []*TargetLocation) *StartAutomationExecutionInput { + s.TargetLocations = v + return s +} + +// SetTargetMaps sets the TargetMaps field's value. +func (s *StartAutomationExecutionInput) SetTargetMaps(v []map[string][]*string) *StartAutomationExecutionInput { + s.TargetMaps = v + return s +} + +// SetTargetParameterName sets the TargetParameterName field's value. +func (s *StartAutomationExecutionInput) SetTargetParameterName(v string) *StartAutomationExecutionInput { + s.TargetParameterName = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *StartAutomationExecutionInput) SetTargets(v []*Target) *StartAutomationExecutionInput { + s.Targets = v + return s +} + +type StartAutomationExecutionOutput struct { + _ struct{} `type:"structure"` + + // The unique ID of a newly scheduled automation execution. + AutomationExecutionId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s StartAutomationExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartAutomationExecutionOutput) GoString() string { + return s.String() +} + +// SetAutomationExecutionId sets the AutomationExecutionId field's value. +func (s *StartAutomationExecutionOutput) SetAutomationExecutionId(v string) *StartAutomationExecutionOutput { + s.AutomationExecutionId = &v + return s +} + +type StartSessionInput struct { + _ struct{} `type:"structure"` + + // The name of the SSM document to define the parameters and plugin settings + // for the session. For example, SSM-SessionManagerRunShell. You can call the + // GetDocument API to verify the document exists before attempting to start + // a session. If no document name is provided, a shell to the instance is launched + // by default. + DocumentName *string `type:"string"` + + // Reserved for future use. + Parameters map[string][]*string `type:"map"` + + // The instance to connect to for the session. + // + // Target is a required field + Target *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartSessionInput"} + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + if s.Target != nil && len(*s.Target) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Target", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDocumentName sets the DocumentName field's value. +func (s *StartSessionInput) SetDocumentName(v string) *StartSessionInput { + s.DocumentName = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *StartSessionInput) SetParameters(v map[string][]*string) *StartSessionInput { + s.Parameters = v + return s +} + +// SetTarget sets the Target field's value. +func (s *StartSessionInput) SetTarget(v string) *StartSessionInput { + s.Target = &v + return s +} + +type StartSessionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the session. + SessionId *string `min:"1" type:"string"` + + // A URL back to SSM Agent on the instance that the Session Manager client uses + // to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output) + // + // region represents the Region identifier for an AWS Region supported by AWS + // Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list + // of supported region values, see the Region column in Systems Manager service + // endpoints (http://docs.aws.amazon.com/general/latest/gr/ssm.html#ssm_region) + // in the AWS General Reference. + // + // session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE. + StreamUrl *string `type:"string"` + + // An encrypted token value containing session and caller information. Used + // to authenticate the connection to the instance. + TokenValue *string `type:"string"` +} + +// String returns the string representation +func (s StartSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartSessionOutput) GoString() string { + return s.String() +} + +// SetSessionId sets the SessionId field's value. +func (s *StartSessionOutput) SetSessionId(v string) *StartSessionOutput { + s.SessionId = &v + return s +} + +// SetStreamUrl sets the StreamUrl field's value. +func (s *StartSessionOutput) SetStreamUrl(v string) *StartSessionOutput { + s.StreamUrl = &v + return s +} + +// SetTokenValue sets the TokenValue field's value. +func (s *StartSessionOutput) SetTokenValue(v string) *StartSessionOutput { + s.TokenValue = &v + return s +} + +// The updated status is the same as the current status. +type StatusUnchanged struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s StatusUnchanged) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusUnchanged) GoString() string { + return s.String() +} + +func newErrorStatusUnchanged(v protocol.ResponseMetadata) error { + return &StatusUnchanged{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *StatusUnchanged) Code() string { + return "StatusUnchanged" +} + +// Message returns the exception's message. +func (s *StatusUnchanged) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *StatusUnchanged) OrigErr() error { + return nil +} + +func (s *StatusUnchanged) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *StatusUnchanged) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *StatusUnchanged) RequestID() string { + return s.RespMetadata.RequestID +} + +// Detailed information about an the execution state of an Automation step. +type StepExecution struct { + _ struct{} `type:"structure"` + + // The action this step performs. The action determines the behavior of the + // step. + Action *string `type:"string"` + + // If a step has finished execution, this contains the time the execution ended. + // If the step has not yet concluded, this field is not populated. + ExecutionEndTime *time.Time `type:"timestamp"` + + // If a step has begun execution, this contains the time the step started. If + // the step is in Pending status, this field is not populated. + ExecutionStartTime *time.Time `type:"timestamp"` + + // Information about the Automation failure. + FailureDetails *FailureDetails `type:"structure"` + + // If a step failed, this message explains why the execution failed. + FailureMessage *string `type:"string"` + + // Fully-resolved values passed into the step before execution. + Inputs map[string]*string `type:"map"` + + // The flag which can be used to help decide whether the failure of current + // step leads to the Automation failure. + IsCritical *bool `type:"boolean"` + + // The flag which can be used to end automation no matter whether the step succeeds + // or fails. + IsEnd *bool `type:"boolean"` + + // The maximum number of tries to run the action of the step. The default value + // is 1. + MaxAttempts *int64 `type:"integer"` + + // The next step after the step succeeds. + NextStep *string `type:"string"` + + // The action to take if the step fails. The default value is Abort. + OnFailure *string `type:"string"` + + // Returned values from the execution of the step. + Outputs map[string][]*string `min:"1" type:"map"` + + // A user-specified list of parameters to override when running a step. + OverriddenParameters map[string][]*string `min:"1" type:"map"` + + // A message associated with the response code for an execution. + Response *string `type:"string"` + + // The response code returned by the execution of the step. + ResponseCode *string `type:"string"` + + // The unique ID of a step execution. + StepExecutionId *string `type:"string"` + + // The name of this execution step. + StepName *string `type:"string"` + + // The execution status for this step. + StepStatus *string `type:"string" enum:"AutomationExecutionStatus"` + + // The combination of AWS Regions and accounts targeted by the current Automation + // execution. + TargetLocation *TargetLocation `type:"structure"` + + // The targets for the step execution. + Targets []*Target `type:"list"` + + // The timeout seconds of the step. + TimeoutSeconds *int64 `type:"long"` + + // Strategies used when step fails, we support Continue and Abort. Abort will + // fail the automation when the step fails. Continue will ignore the failure + // of current step and allow automation to run the next step. With conditional + // branching, we add step:stepName to support the automation to go to another + // specific step. + ValidNextSteps []*string `type:"list"` +} + +// String returns the string representation +func (s StepExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepExecution) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *StepExecution) SetAction(v string) *StepExecution { + s.Action = &v + return s +} + +// SetExecutionEndTime sets the ExecutionEndTime field's value. +func (s *StepExecution) SetExecutionEndTime(v time.Time) *StepExecution { + s.ExecutionEndTime = &v + return s +} + +// SetExecutionStartTime sets the ExecutionStartTime field's value. +func (s *StepExecution) SetExecutionStartTime(v time.Time) *StepExecution { + s.ExecutionStartTime = &v + return s +} + +// SetFailureDetails sets the FailureDetails field's value. +func (s *StepExecution) SetFailureDetails(v *FailureDetails) *StepExecution { + s.FailureDetails = v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *StepExecution) SetFailureMessage(v string) *StepExecution { + s.FailureMessage = &v + return s +} + +// SetInputs sets the Inputs field's value. +func (s *StepExecution) SetInputs(v map[string]*string) *StepExecution { + s.Inputs = v + return s +} + +// SetIsCritical sets the IsCritical field's value. +func (s *StepExecution) SetIsCritical(v bool) *StepExecution { + s.IsCritical = &v + return s +} + +// SetIsEnd sets the IsEnd field's value. +func (s *StepExecution) SetIsEnd(v bool) *StepExecution { + s.IsEnd = &v + return s +} + +// SetMaxAttempts sets the MaxAttempts field's value. +func (s *StepExecution) SetMaxAttempts(v int64) *StepExecution { + s.MaxAttempts = &v + return s +} + +// SetNextStep sets the NextStep field's value. +func (s *StepExecution) SetNextStep(v string) *StepExecution { + s.NextStep = &v + return s +} + +// SetOnFailure sets the OnFailure field's value. +func (s *StepExecution) SetOnFailure(v string) *StepExecution { + s.OnFailure = &v + return s +} + +// SetOutputs sets the Outputs field's value. +func (s *StepExecution) SetOutputs(v map[string][]*string) *StepExecution { + s.Outputs = v + return s +} + +// SetOverriddenParameters sets the OverriddenParameters field's value. +func (s *StepExecution) SetOverriddenParameters(v map[string][]*string) *StepExecution { + s.OverriddenParameters = v + return s +} + +// SetResponse sets the Response field's value. +func (s *StepExecution) SetResponse(v string) *StepExecution { + s.Response = &v + return s +} + +// SetResponseCode sets the ResponseCode field's value. +func (s *StepExecution) SetResponseCode(v string) *StepExecution { + s.ResponseCode = &v + return s +} + +// SetStepExecutionId sets the StepExecutionId field's value. +func (s *StepExecution) SetStepExecutionId(v string) *StepExecution { + s.StepExecutionId = &v + return s +} + +// SetStepName sets the StepName field's value. +func (s *StepExecution) SetStepName(v string) *StepExecution { + s.StepName = &v + return s +} + +// SetStepStatus sets the StepStatus field's value. +func (s *StepExecution) SetStepStatus(v string) *StepExecution { + s.StepStatus = &v + return s +} + +// SetTargetLocation sets the TargetLocation field's value. +func (s *StepExecution) SetTargetLocation(v *TargetLocation) *StepExecution { + s.TargetLocation = v + return s +} + +// SetTargets sets the Targets field's value. +func (s *StepExecution) SetTargets(v []*Target) *StepExecution { + s.Targets = v + return s +} + +// SetTimeoutSeconds sets the TimeoutSeconds field's value. +func (s *StepExecution) SetTimeoutSeconds(v int64) *StepExecution { + s.TimeoutSeconds = &v + return s +} + +// SetValidNextSteps sets the ValidNextSteps field's value. +func (s *StepExecution) SetValidNextSteps(v []*string) *StepExecution { + s.ValidNextSteps = v + return s +} + +// A filter to limit the amount of step execution information returned by the +// call. +type StepExecutionFilter struct { + _ struct{} `type:"structure"` + + // One or more keys to limit the results. Valid filter keys include the following: + // StepName, Action, StepExecutionId, StepExecutionStatus, StartTimeBefore, + // StartTimeAfter. + // + // Key is a required field + Key *string `type:"string" required:"true" enum:"StepExecutionFilterKey"` + + // The values of the filter key. + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s StepExecutionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepExecutionFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StepExecutionFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StepExecutionFilter"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *StepExecutionFilter) SetKey(v string) *StepExecutionFilter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *StepExecutionFilter) SetValues(v []*string) *StepExecutionFilter { + s.Values = v + return s +} + +type StopAutomationExecutionInput struct { + _ struct{} `type:"structure"` + + // The execution ID of the Automation to stop. + // + // AutomationExecutionId is a required field + AutomationExecutionId *string `min:"36" type:"string" required:"true"` + + // The stop request type. Valid types include the following: Cancel and Complete. + // The default type is Cancel. + Type *string `type:"string" enum:"StopType"` +} + +// String returns the string representation +func (s StopAutomationExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopAutomationExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopAutomationExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopAutomationExecutionInput"} + if s.AutomationExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("AutomationExecutionId")) + } + if s.AutomationExecutionId != nil && len(*s.AutomationExecutionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("AutomationExecutionId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutomationExecutionId sets the AutomationExecutionId field's value. +func (s *StopAutomationExecutionInput) SetAutomationExecutionId(v string) *StopAutomationExecutionInput { + s.AutomationExecutionId = &v + return s +} + +// SetType sets the Type field's value. +func (s *StopAutomationExecutionInput) SetType(v string) *StopAutomationExecutionInput { + s.Type = &v + return s +} + +type StopAutomationExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopAutomationExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopAutomationExecutionOutput) GoString() string { + return s.String() +} + +// The sub-type count exceeded the limit for the inventory type. +type SubTypeCountLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s SubTypeCountLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubTypeCountLimitExceededException) GoString() string { + return s.String() +} + +func newErrorSubTypeCountLimitExceededException(v protocol.ResponseMetadata) error { + return &SubTypeCountLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SubTypeCountLimitExceededException) Code() string { + return "SubTypeCountLimitExceededException" +} + +// Message returns the exception's message. +func (s *SubTypeCountLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SubTypeCountLimitExceededException) OrigErr() error { + return nil +} + +func (s *SubTypeCountLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SubTypeCountLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SubTypeCountLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Metadata that you assign to your AWS resources. Tags enable you to categorize +// your resources in different ways, for example, by purpose, owner, or environment. +// In Systems Manager, you can apply tags to documents, managed instances, maintenance +// windows, Parameter Store parameters, and patch baselines. +type Tag struct { + _ struct{} `type:"structure"` + + // The name of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + // + // Value is a required field + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// An array of search criteria that targets instances using a Key,Value combination +// that you specify. +// +// Supported formats include the following. +// +// * Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3 +// +// * Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2 +// +// * Key=tag-key,Values=my-tag-key-1,my-tag-key-2 +// +// * Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=resource-group-name +// +// * Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 +// +// * Automation targets only: Key=ResourceGroup;Values=resource-group-name +// +// For example: +// +// * Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE +// +// * Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3 +// +// * Key=tag-key,Values=Name,Instance-Type,CostCenter +// +// * Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=ProductionResourceGroup +// This example demonstrates how to target all resources in the resource +// group ProductionResourceGroup in your maintenance window. +// +// * Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC +// This example demonstrates how to target only EC2 instances and VPCs in +// your maintenance window. +// +// * Automation targets only: Key=ResourceGroup,Values=MyResourceGroup +// +// * State Manager association targets only: Key=InstanceIds,Values=* This +// example demonstrates how to target all managed instances in the AWS Region +// where the association was created. +// +// For more information about how to send commands that target instances using +// Key,Value parameters, see Targeting multiple instances (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting) +// in the AWS Systems Manager User Guide. +type Target struct { + _ struct{} `type:"structure"` + + // User-defined criteria for sending commands that target instances that meet + // the criteria. + Key *string `min:"1" type:"string"` + + // User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, + // you could specify value:WebServer to run a command on instances that include + // EC2 tags of ServerRole,WebServer. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s Target) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Target) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Target) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Target"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Target) SetKey(v string) *Target { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *Target) SetValues(v []*string) *Target { + s.Values = v + return s +} + +// You specified the Safe option for the DeregisterTargetFromMaintenanceWindow +// operation, but the target is still referenced in a task. +type TargetInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s TargetInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetInUseException) GoString() string { + return s.String() +} + +func newErrorTargetInUseException(v protocol.ResponseMetadata) error { + return &TargetInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TargetInUseException) Code() string { + return "TargetInUseException" +} + +// Message returns the exception's message. +func (s *TargetInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TargetInUseException) OrigErr() error { + return nil +} + +func (s *TargetInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TargetInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TargetInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The combination of AWS Regions and accounts targeted by the current Automation +// execution. +type TargetLocation struct { + _ struct{} `type:"structure"` + + // The AWS accounts targeted by the current Automation execution. + Accounts []*string `min:"1" type:"list"` + + // The Automation execution role used by the currently running Automation. + ExecutionRoleName *string `min:"1" type:"string"` + + // The AWS Regions targeted by the current Automation execution. + Regions []*string `min:"1" type:"list"` + + // The maximum number of AWS accounts and AWS regions allowed to run the Automation + // concurrently + TargetLocationMaxConcurrency *string `min:"1" type:"string"` + + // The maximum number of errors allowed before the system stops queueing additional + // Automation executions for the currently running Automation. + TargetLocationMaxErrors *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TargetLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetLocation"} + if s.Accounts != nil && len(s.Accounts) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Accounts", 1)) + } + if s.ExecutionRoleName != nil && len(*s.ExecutionRoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleName", 1)) + } + if s.Regions != nil && len(s.Regions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regions", 1)) + } + if s.TargetLocationMaxConcurrency != nil && len(*s.TargetLocationMaxConcurrency) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetLocationMaxConcurrency", 1)) + } + if s.TargetLocationMaxErrors != nil && len(*s.TargetLocationMaxErrors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetLocationMaxErrors", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccounts sets the Accounts field's value. +func (s *TargetLocation) SetAccounts(v []*string) *TargetLocation { + s.Accounts = v + return s +} + +// SetExecutionRoleName sets the ExecutionRoleName field's value. +func (s *TargetLocation) SetExecutionRoleName(v string) *TargetLocation { + s.ExecutionRoleName = &v + return s +} + +// SetRegions sets the Regions field's value. +func (s *TargetLocation) SetRegions(v []*string) *TargetLocation { + s.Regions = v + return s +} + +// SetTargetLocationMaxConcurrency sets the TargetLocationMaxConcurrency field's value. +func (s *TargetLocation) SetTargetLocationMaxConcurrency(v string) *TargetLocation { + s.TargetLocationMaxConcurrency = &v + return s +} + +// SetTargetLocationMaxErrors sets the TargetLocationMaxErrors field's value. +func (s *TargetLocation) SetTargetLocationMaxErrors(v string) *TargetLocation { + s.TargetLocationMaxErrors = &v + return s +} + +// The specified target instance for the session is not fully configured for +// use with Session Manager. For more information, see Getting started with +// Session Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) +// in the AWS Systems Manager User Guide. This error is also returned if you +// attempt to start a session on an instance that is located in a different +// account or Region +type TargetNotConnected struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s TargetNotConnected) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetNotConnected) GoString() string { + return s.String() +} + +func newErrorTargetNotConnected(v protocol.ResponseMetadata) error { + return &TargetNotConnected{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TargetNotConnected) Code() string { + return "TargetNotConnected" +} + +// Message returns the exception's message. +func (s *TargetNotConnected) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TargetNotConnected) OrigErr() error { + return nil +} + +func (s *TargetNotConnected) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TargetNotConnected) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TargetNotConnected) RequestID() string { + return s.RespMetadata.RequestID +} + +type TerminateSessionInput struct { + _ struct{} `type:"structure"` + + // The ID of the session to terminate. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TerminateSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateSessionInput"} + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSessionId sets the SessionId field's value. +func (s *TerminateSessionInput) SetSessionId(v string) *TerminateSessionInput { + s.SessionId = &v + return s +} + +type TerminateSessionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the session that has been terminated. + SessionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TerminateSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateSessionOutput) GoString() string { + return s.String() +} + +// SetSessionId sets the SessionId field's value. +func (s *TerminateSessionOutput) SetSessionId(v string) *TerminateSessionOutput { + s.SessionId = &v + return s +} + +// The Targets parameter includes too many tags. Remove one or more tags and +// try the command again. +type TooManyTagsError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s TooManyTagsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyTagsError) GoString() string { + return s.String() +} + +func newErrorTooManyTagsError(v protocol.ResponseMetadata) error { + return &TooManyTagsError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyTagsError) Code() string { + return "TooManyTagsError" +} + +// Message returns the exception's message. +func (s *TooManyTagsError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyTagsError) OrigErr() error { + return nil +} + +func (s *TooManyTagsError) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyTagsError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyTagsError) RequestID() string { + return s.RespMetadata.RequestID +} + +// There are concurrent updates for a resource that supports one update at a +// time. +type TooManyUpdates struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s TooManyUpdates) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyUpdates) GoString() string { + return s.String() +} + +func newErrorTooManyUpdates(v protocol.ResponseMetadata) error { + return &TooManyUpdates{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyUpdates) Code() string { + return "TooManyUpdates" +} + +// Message returns the exception's message. +func (s *TooManyUpdates) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyUpdates) OrigErr() error { + return nil +} + +func (s *TooManyUpdates) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyUpdates) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyUpdates) RequestID() string { + return s.RespMetadata.RequestID +} + +// The size of inventory data has exceeded the total size limit for the resource. +type TotalSizeLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s TotalSizeLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TotalSizeLimitExceededException) GoString() string { + return s.String() +} + +func newErrorTotalSizeLimitExceededException(v protocol.ResponseMetadata) error { + return &TotalSizeLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TotalSizeLimitExceededException) Code() string { + return "TotalSizeLimitExceededException" +} + +// Message returns the exception's message. +func (s *TotalSizeLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TotalSizeLimitExceededException) OrigErr() error { + return nil +} + +func (s *TotalSizeLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TotalSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TotalSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The calendar entry contained in the specified Systems Manager document is +// not supported. +type UnsupportedCalendarException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedCalendarException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedCalendarException) GoString() string { + return s.String() +} + +func newErrorUnsupportedCalendarException(v protocol.ResponseMetadata) error { + return &UnsupportedCalendarException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedCalendarException) Code() string { + return "UnsupportedCalendarException" +} + +// Message returns the exception's message. +func (s *UnsupportedCalendarException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedCalendarException) OrigErr() error { + return nil +} + +func (s *UnsupportedCalendarException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedCalendarException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedCalendarException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Microsoft application patching is only available on EC2 instances and advanced +// instances. To patch Microsoft applications on on-premises servers and VMs, +// you must enable advanced instances. For more information, see Using the advanced-instances +// tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) +// in the AWS Systems Manager User Guide. +type UnsupportedFeatureRequiredException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedFeatureRequiredException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedFeatureRequiredException) GoString() string { + return s.String() +} + +func newErrorUnsupportedFeatureRequiredException(v protocol.ResponseMetadata) error { + return &UnsupportedFeatureRequiredException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedFeatureRequiredException) Code() string { + return "UnsupportedFeatureRequiredException" +} + +// Message returns the exception's message. +func (s *UnsupportedFeatureRequiredException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedFeatureRequiredException) OrigErr() error { + return nil +} + +func (s *UnsupportedFeatureRequiredException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedFeatureRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedFeatureRequiredException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The Context attribute that you specified for the InventoryItem is not allowed +// for this inventory type. You can only use the Context attribute with inventory +// types like AWS:ComplianceItem. +type UnsupportedInventoryItemContextException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + TypeName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UnsupportedInventoryItemContextException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedInventoryItemContextException) GoString() string { + return s.String() +} + +func newErrorUnsupportedInventoryItemContextException(v protocol.ResponseMetadata) error { + return &UnsupportedInventoryItemContextException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedInventoryItemContextException) Code() string { + return "UnsupportedInventoryItemContextException" +} + +// Message returns the exception's message. +func (s *UnsupportedInventoryItemContextException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedInventoryItemContextException) OrigErr() error { + return nil +} + +func (s *UnsupportedInventoryItemContextException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedInventoryItemContextException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedInventoryItemContextException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Inventory item type schema version has to match supported versions in the +// service. Check output of GetInventorySchema to see the available schema version +// for each type. +type UnsupportedInventorySchemaVersionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedInventorySchemaVersionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedInventorySchemaVersionException) GoString() string { + return s.String() +} + +func newErrorUnsupportedInventorySchemaVersionException(v protocol.ResponseMetadata) error { + return &UnsupportedInventorySchemaVersionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedInventorySchemaVersionException) Code() string { + return "UnsupportedInventorySchemaVersionException" +} + +// Message returns the exception's message. +func (s *UnsupportedInventorySchemaVersionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedInventorySchemaVersionException) OrigErr() error { + return nil +} + +func (s *UnsupportedInventorySchemaVersionException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedInventorySchemaVersionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedInventorySchemaVersionException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operating systems you specified is not supported, or the operation is +// not supported for the operating system. +type UnsupportedOperatingSystem struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedOperatingSystem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedOperatingSystem) GoString() string { + return s.String() +} + +func newErrorUnsupportedOperatingSystem(v protocol.ResponseMetadata) error { + return &UnsupportedOperatingSystem{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedOperatingSystem) Code() string { + return "UnsupportedOperatingSystem" +} + +// Message returns the exception's message. +func (s *UnsupportedOperatingSystem) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedOperatingSystem) OrigErr() error { + return nil +} + +func (s *UnsupportedOperatingSystem) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedOperatingSystem) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedOperatingSystem) RequestID() string { + return s.RespMetadata.RequestID +} + +// The parameter type is not supported. +type UnsupportedParameterType struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedParameterType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedParameterType) GoString() string { + return s.String() +} + +func newErrorUnsupportedParameterType(v protocol.ResponseMetadata) error { + return &UnsupportedParameterType{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedParameterType) Code() string { + return "UnsupportedParameterType" +} + +// Message returns the exception's message. +func (s *UnsupportedParameterType) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedParameterType) OrigErr() error { + return nil +} + +func (s *UnsupportedParameterType) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedParameterType) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedParameterType) RequestID() string { + return s.RespMetadata.RequestID +} + +// The document does not support the platform type of the given instance ID(s). +// For example, you sent an document for a Windows instance to a Linux instance. +type UnsupportedPlatformType struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedPlatformType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedPlatformType) GoString() string { + return s.String() +} + +func newErrorUnsupportedPlatformType(v protocol.ResponseMetadata) error { + return &UnsupportedPlatformType{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedPlatformType) Code() string { + return "UnsupportedPlatformType" +} + +// Message returns the exception's message. +func (s *UnsupportedPlatformType) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedPlatformType) OrigErr() error { + return nil +} + +func (s *UnsupportedPlatformType) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedPlatformType) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedPlatformType) RequestID() string { + return s.RespMetadata.RequestID +} + +type UpdateAssociationInput struct { + _ struct{} `type:"structure"` + + // By default, when you update an association, the system runs it immediately + // after it is updated and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // update it. + // + // Also, if you specified this option when you created the association, you + // can reset it. To do so, specify the no-apply-only-at-cron-interval parameter + // when you update the association from the command line. This parameter forces + // the association to run immediately after updating it and according to the + // interval specified. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + + // The ID of the association you want to update. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` + + // The name of the association that you want to update. + AssociationName *string `type:"string"` + + // This parameter is provided for concurrency control purposes. You must specify + // the latest association version in the service. If you want to ensure that + // this request succeeds, either specify $LATEST, or omit this parameter. + AssociationVersion *string `type:"string"` + + // Specify the target for the association. This target is required for associations + // that use an Automation document and target resources by using rate controls. + AutomationTargetParameterName *string `min:"1" type:"string"` + + // The severity level to assign to the association. + ComplianceSeverity *string `type:"string" enum:"AssociationComplianceSeverity"` + + // The document version you want update for the association. + DocumentVersion *string `type:"string"` + + // The maximum number of targets allowed to run the association at the same + // time. You can specify a number, for example 10, or a percentage of the target + // set, for example 10%. The default value is 100%, which means all targets + // run the association at the same time. + // + // If a new instance starts and attempts to run an association while Systems + // Manager is running MaxConcurrency associations, the association is allowed + // to run. During the next association interval, the new instance will process + // its association within the limit specified for MaxConcurrency. + MaxConcurrency *string `min:"1" type:"string"` + + // The number of errors that are allowed before the system stops sending requests + // to run the association on additional targets. You can specify either an absolute + // number of errors, for example 10, or a percentage of the target set, for + // example 10%. If you specify 3, for example, the system stops sending requests + // when the fourth error is received. If you specify 0, then the system stops + // sending requests after the first error is returned. If you run an association + // on 50 instances and set MaxError to 10%, then the system stops sending the + // request when the sixth error is received. + // + // Executions that are already running an association when MaxErrors is reached + // are allowed to complete, but some of these executions may fail as well. If + // you need to ensure that there won't be more than max-errors failed executions, + // set MaxConcurrency to 1 so that executions proceed one at a time. + MaxErrors *string `min:"1" type:"string"` + + // The name of the SSM document that contains the configuration information + // for the instance. You can specify Command or Automation documents. + // + // You can specify AWS-predefined documents, documents you created, or a document + // that is shared with you from another account. + // + // For SSM documents that are shared with you from other AWS accounts, you must + // specify the complete SSM document ARN, in the following format: + // + // arn:aws:ssm:region:account-id:document/document-name + // + // For example: + // + // arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document + // + // For AWS-predefined documents and SSM documents you created in your account, + // you only need to specify the document name. For example, AWS-ApplyPatchBaseline + // or My-Document. + Name *string `type:"string"` + + // An S3 bucket where you want to store the results of this request. + OutputLocation *InstanceAssociationOutputLocation `type:"structure"` + + // The parameters you want to update for the association. If you create a parameter + // using Parameter Store, you can reference the parameter using {{ssm:parameter-name}} + Parameters map[string][]*string `type:"map"` + + // The cron expression used to schedule the association that you want to update. + ScheduleExpression *string `min:"1" type:"string"` + + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + + // The targets of the association. + Targets []*Target `type:"list"` +} + +// String returns the string representation +func (s UpdateAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAssociationInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.AutomationTargetParameterName != nil && len(*s.AutomationTargetParameterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutomationTargetParameterName", 1)) + } + if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1)) + } + if s.MaxErrors != nil && len(*s.MaxErrors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxErrors", 1)) + } + if s.ScheduleExpression != nil && len(*s.ScheduleExpression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ScheduleExpression", 1)) + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *UpdateAssociationInput) SetApplyOnlyAtCronInterval(v bool) *UpdateAssociationInput { + s.ApplyOnlyAtCronInterval = &v + return s +} + +// SetAssociationId sets the AssociationId field's value. +func (s *UpdateAssociationInput) SetAssociationId(v string) *UpdateAssociationInput { + s.AssociationId = &v + return s +} + +// SetAssociationName sets the AssociationName field's value. +func (s *UpdateAssociationInput) SetAssociationName(v string) *UpdateAssociationInput { + s.AssociationName = &v + return s +} + +// SetAssociationVersion sets the AssociationVersion field's value. +func (s *UpdateAssociationInput) SetAssociationVersion(v string) *UpdateAssociationInput { + s.AssociationVersion = &v + return s +} + +// SetAutomationTargetParameterName sets the AutomationTargetParameterName field's value. +func (s *UpdateAssociationInput) SetAutomationTargetParameterName(v string) *UpdateAssociationInput { + s.AutomationTargetParameterName = &v + return s +} + +// SetComplianceSeverity sets the ComplianceSeverity field's value. +func (s *UpdateAssociationInput) SetComplianceSeverity(v string) *UpdateAssociationInput { + s.ComplianceSeverity = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *UpdateAssociationInput) SetDocumentVersion(v string) *UpdateAssociationInput { + s.DocumentVersion = &v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *UpdateAssociationInput) SetMaxConcurrency(v string) *UpdateAssociationInput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *UpdateAssociationInput) SetMaxErrors(v string) *UpdateAssociationInput { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateAssociationInput) SetName(v string) *UpdateAssociationInput { + s.Name = &v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *UpdateAssociationInput) SetOutputLocation(v *InstanceAssociationOutputLocation) *UpdateAssociationInput { + s.OutputLocation = v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *UpdateAssociationInput) SetParameters(v map[string][]*string) *UpdateAssociationInput { + s.Parameters = v + return s +} + +// SetScheduleExpression sets the ScheduleExpression field's value. +func (s *UpdateAssociationInput) SetScheduleExpression(v string) *UpdateAssociationInput { + s.ScheduleExpression = &v + return s +} + +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *UpdateAssociationInput) SetSyncCompliance(v string) *UpdateAssociationInput { + s.SyncCompliance = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *UpdateAssociationInput) SetTargets(v []*Target) *UpdateAssociationInput { + s.Targets = v + return s +} + +type UpdateAssociationOutput struct { + _ struct{} `type:"structure"` + + // The description of the association that was updated. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssociationOutput) GoString() string { + return s.String() +} + +// SetAssociationDescription sets the AssociationDescription field's value. +func (s *UpdateAssociationOutput) SetAssociationDescription(v *AssociationDescription) *UpdateAssociationOutput { + s.AssociationDescription = v + return s +} + +type UpdateAssociationStatusInput struct { + _ struct{} `type:"structure"` + + // The association status. + // + // AssociationStatus is a required field + AssociationStatus *AssociationStatus `type:"structure" required:"true"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` + + // The name of the Systems Manager document. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssociationStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssociationStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAssociationStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAssociationStatusInput"} + if s.AssociationStatus == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationStatus")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.AssociationStatus != nil { + if err := s.AssociationStatus.Validate(); err != nil { + invalidParams.AddNested("AssociationStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationStatus sets the AssociationStatus field's value. +func (s *UpdateAssociationStatusInput) SetAssociationStatus(v *AssociationStatus) *UpdateAssociationStatusInput { + s.AssociationStatus = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *UpdateAssociationStatusInput) SetInstanceId(v string) *UpdateAssociationStatusInput { + s.InstanceId = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateAssociationStatusInput) SetName(v string) *UpdateAssociationStatusInput { + s.Name = &v + return s +} + +type UpdateAssociationStatusOutput struct { + _ struct{} `type:"structure"` + + // Information about the association. + AssociationDescription *AssociationDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssociationStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssociationStatusOutput) GoString() string { + return s.String() +} + +// SetAssociationDescription sets the AssociationDescription field's value. +func (s *UpdateAssociationStatusOutput) SetAssociationDescription(v *AssociationDescription) *UpdateAssociationStatusOutput { + s.AssociationDescription = v + return s +} + +type UpdateDocumentDefaultVersionInput struct { + _ struct{} `type:"structure"` + + // The version of a custom document that you want to set as the default version. + // + // DocumentVersion is a required field + DocumentVersion *string `type:"string" required:"true"` + + // The name of a custom document that you want to set as the default version. + // + // Name is a required field + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDocumentDefaultVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDocumentDefaultVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDocumentDefaultVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDocumentDefaultVersionInput"} + if s.DocumentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("DocumentVersion")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *UpdateDocumentDefaultVersionInput) SetDocumentVersion(v string) *UpdateDocumentDefaultVersionInput { + s.DocumentVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateDocumentDefaultVersionInput) SetName(v string) *UpdateDocumentDefaultVersionInput { + s.Name = &v + return s +} + +type UpdateDocumentDefaultVersionOutput struct { + _ struct{} `type:"structure"` + + // The description of a custom document that you want to set as the default + // version. + Description *DocumentDefaultVersionDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateDocumentDefaultVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDocumentDefaultVersionOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *UpdateDocumentDefaultVersionOutput) SetDescription(v *DocumentDefaultVersionDescription) *UpdateDocumentDefaultVersionOutput { + s.Description = v + return s +} + +type UpdateDocumentInput struct { + _ struct{} `type:"structure"` + + // A list of key and value pairs that describe attachments to a version of a + // document. + Attachments []*AttachmentsSource `type:"list"` + + // A valid JSON or YAML string. + // + // Content is a required field + Content *string `min:"1" type:"string" required:"true"` + + // Specify the document format for the new document version. Systems Manager + // supports JSON and YAML documents. JSON is the default format. + DocumentFormat *string `type:"string" enum:"DocumentFormat"` + + // (Required) The latest version of the document that you want to update. The + // latest document version can be specified using the $LATEST variable or by + // the version number. Updating a previous version of a document is not supported. + DocumentVersion *string `type:"string"` + + // The name of the document that you want to update. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Specify a new target type for the document. + TargetType *string `type:"string"` + + // An optional field specifying the version of the artifact you are updating + // with the document. For example, "Release 12, Update 6". This value is unique + // across all versions of a document, and cannot be changed. + VersionName *string `type:"string"` +} + +// String returns the string representation +func (s UpdateDocumentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDocumentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDocumentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDocumentInput"} + if s.Content == nil { + invalidParams.Add(request.NewErrParamRequired("Content")) + } + if s.Content != nil && len(*s.Content) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Content", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Attachments != nil { + for i, v := range s.Attachments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attachments", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttachments sets the Attachments field's value. +func (s *UpdateDocumentInput) SetAttachments(v []*AttachmentsSource) *UpdateDocumentInput { + s.Attachments = v + return s +} + +// SetContent sets the Content field's value. +func (s *UpdateDocumentInput) SetContent(v string) *UpdateDocumentInput { + s.Content = &v + return s +} + +// SetDocumentFormat sets the DocumentFormat field's value. +func (s *UpdateDocumentInput) SetDocumentFormat(v string) *UpdateDocumentInput { + s.DocumentFormat = &v + return s +} + +// SetDocumentVersion sets the DocumentVersion field's value. +func (s *UpdateDocumentInput) SetDocumentVersion(v string) *UpdateDocumentInput { + s.DocumentVersion = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateDocumentInput) SetName(v string) *UpdateDocumentInput { + s.Name = &v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *UpdateDocumentInput) SetTargetType(v string) *UpdateDocumentInput { + s.TargetType = &v + return s +} + +// SetVersionName sets the VersionName field's value. +func (s *UpdateDocumentInput) SetVersionName(v string) *UpdateDocumentInput { + s.VersionName = &v + return s +} + +type UpdateDocumentOutput struct { + _ struct{} `type:"structure"` + + // A description of the document that was updated. + DocumentDescription *DocumentDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateDocumentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDocumentOutput) GoString() string { + return s.String() +} + +// SetDocumentDescription sets the DocumentDescription field's value. +func (s *UpdateDocumentOutput) SetDocumentDescription(v *DocumentDescription) *UpdateDocumentOutput { + s.DocumentDescription = v + return s +} + +type UpdateMaintenanceWindowInput struct { + _ struct{} `type:"structure"` + + // Whether targets must be registered with the maintenance window before tasks + // can be defined for those targets. + AllowUnassociatedTargets *bool `type:"boolean"` + + // The number of hours before the end of the maintenance window that Systems + // Manager stops scheduling new tasks for execution. + Cutoff *int64 `type:"integer"` + + // An optional description for the update request. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The duration of the maintenance window in hours. + Duration *int64 `min:"1" type:"integer"` + + // Whether the maintenance window is enabled. + Enabled *bool `type:"boolean"` + + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become inactive. EndDate allows you to set a date and time in the + // future when the maintenance window will no longer run. + EndDate *string `type:"string"` + + // The name of the maintenance window. + Name *string `min:"3" type:"string"` + + // If True, then all fields that are required by the CreateMaintenanceWindow + // action are also required for this API request. Optional fields that are not + // specified are set to null. + Replace *bool `type:"boolean"` + + // The schedule of the maintenance window in the form of a cron or rate expression. + Schedule *string `min:"1" type:"string"` + + // The number of days to wait after the date and time specified by a CRON expression + // before running the maintenance window. + // + // For example, the following cron expression schedules a maintenance window + // to run the third Tuesday of every month at 11:30 PM. + // + // cron(0 30 23 ? * TUE#3 *) + // + // If the schedule offset is 2, the maintenance window won't run until two days + // later. + ScheduleOffset *int64 `min:"1" type:"integer"` + + // The time zone that the scheduled maintenance window executions are based + // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", + // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database + // (https://www.iana.org/time-zones) on the IANA website. + ScheduleTimezone *string `type:"string"` + + // The time zone that the scheduled maintenance window executions are based + // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", + // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database + // (https://www.iana.org/time-zones) on the IANA website. + StartDate *string `type:"string"` + + // The ID of the maintenance window to update. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMaintenanceWindowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceWindowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMaintenanceWindowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMaintenanceWindowInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Duration != nil && *s.Duration < 1 { + invalidParams.Add(request.NewErrParamMinValue("Duration", 1)) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.Schedule != nil && len(*s.Schedule) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Schedule", 1)) + } + if s.ScheduleOffset != nil && *s.ScheduleOffset < 1 { + invalidParams.Add(request.NewErrParamMinValue("ScheduleOffset", 1)) + } + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowUnassociatedTargets sets the AllowUnassociatedTargets field's value. +func (s *UpdateMaintenanceWindowInput) SetAllowUnassociatedTargets(v bool) *UpdateMaintenanceWindowInput { + s.AllowUnassociatedTargets = &v + return s +} + +// SetCutoff sets the Cutoff field's value. +func (s *UpdateMaintenanceWindowInput) SetCutoff(v int64) *UpdateMaintenanceWindowInput { + s.Cutoff = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateMaintenanceWindowInput) SetDescription(v string) *UpdateMaintenanceWindowInput { + s.Description = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *UpdateMaintenanceWindowInput) SetDuration(v int64) *UpdateMaintenanceWindowInput { + s.Duration = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *UpdateMaintenanceWindowInput) SetEnabled(v bool) *UpdateMaintenanceWindowInput { + s.Enabled = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *UpdateMaintenanceWindowInput) SetEndDate(v string) *UpdateMaintenanceWindowInput { + s.EndDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateMaintenanceWindowInput) SetName(v string) *UpdateMaintenanceWindowInput { + s.Name = &v + return s +} + +// SetReplace sets the Replace field's value. +func (s *UpdateMaintenanceWindowInput) SetReplace(v bool) *UpdateMaintenanceWindowInput { + s.Replace = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *UpdateMaintenanceWindowInput) SetSchedule(v string) *UpdateMaintenanceWindowInput { + s.Schedule = &v + return s +} + +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *UpdateMaintenanceWindowInput) SetScheduleOffset(v int64) *UpdateMaintenanceWindowInput { + s.ScheduleOffset = &v + return s +} + +// SetScheduleTimezone sets the ScheduleTimezone field's value. +func (s *UpdateMaintenanceWindowInput) SetScheduleTimezone(v string) *UpdateMaintenanceWindowInput { + s.ScheduleTimezone = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *UpdateMaintenanceWindowInput) SetStartDate(v string) *UpdateMaintenanceWindowInput { + s.StartDate = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *UpdateMaintenanceWindowInput) SetWindowId(v string) *UpdateMaintenanceWindowInput { + s.WindowId = &v + return s +} + +type UpdateMaintenanceWindowOutput struct { + _ struct{} `type:"structure"` + + // Whether targets must be registered with the maintenance window before tasks + // can be defined for those targets. + AllowUnassociatedTargets *bool `type:"boolean"` + + // The number of hours before the end of the maintenance window that Systems + // Manager stops scheduling new tasks for execution. + Cutoff *int64 `type:"integer"` + + // An optional description of the update. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The duration of the maintenance window in hours. + Duration *int64 `min:"1" type:"integer"` + + // Whether the maintenance window is enabled. + Enabled *bool `type:"boolean"` + + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. The maintenance window will not run + // after this specified time. + EndDate *string `type:"string"` + + // The name of the maintenance window. + Name *string `min:"3" type:"string"` + + // The schedule of the maintenance window in the form of a cron or rate expression. + Schedule *string `min:"1" type:"string"` + + // The number of days to wait to run a maintenance window after the scheduled + // CRON expression date and time. + ScheduleOffset *int64 `min:"1" type:"integer"` + + // The time zone that the scheduled maintenance window executions are based + // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", + // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database + // (https://www.iana.org/time-zones) on the IANA website. + ScheduleTimezone *string `type:"string"` + + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. The maintenance window will not run + // before this specified time. + StartDate *string `type:"string"` + + // The ID of the created maintenance window. + WindowId *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateMaintenanceWindowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceWindowOutput) GoString() string { + return s.String() +} + +// SetAllowUnassociatedTargets sets the AllowUnassociatedTargets field's value. +func (s *UpdateMaintenanceWindowOutput) SetAllowUnassociatedTargets(v bool) *UpdateMaintenanceWindowOutput { + s.AllowUnassociatedTargets = &v + return s +} + +// SetCutoff sets the Cutoff field's value. +func (s *UpdateMaintenanceWindowOutput) SetCutoff(v int64) *UpdateMaintenanceWindowOutput { + s.Cutoff = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateMaintenanceWindowOutput) SetDescription(v string) *UpdateMaintenanceWindowOutput { + s.Description = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *UpdateMaintenanceWindowOutput) SetDuration(v int64) *UpdateMaintenanceWindowOutput { + s.Duration = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *UpdateMaintenanceWindowOutput) SetEnabled(v bool) *UpdateMaintenanceWindowOutput { + s.Enabled = &v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *UpdateMaintenanceWindowOutput) SetEndDate(v string) *UpdateMaintenanceWindowOutput { + s.EndDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateMaintenanceWindowOutput) SetName(v string) *UpdateMaintenanceWindowOutput { + s.Name = &v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *UpdateMaintenanceWindowOutput) SetSchedule(v string) *UpdateMaintenanceWindowOutput { + s.Schedule = &v + return s +} + +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *UpdateMaintenanceWindowOutput) SetScheduleOffset(v int64) *UpdateMaintenanceWindowOutput { + s.ScheduleOffset = &v + return s +} + +// SetScheduleTimezone sets the ScheduleTimezone field's value. +func (s *UpdateMaintenanceWindowOutput) SetScheduleTimezone(v string) *UpdateMaintenanceWindowOutput { + s.ScheduleTimezone = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *UpdateMaintenanceWindowOutput) SetStartDate(v string) *UpdateMaintenanceWindowOutput { + s.StartDate = &v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *UpdateMaintenanceWindowOutput) SetWindowId(v string) *UpdateMaintenanceWindowOutput { + s.WindowId = &v + return s +} + +type UpdateMaintenanceWindowTargetInput struct { + _ struct{} `type:"structure"` + + // An optional description for the update. + Description *string `min:"1" type:"string" sensitive:"true"` + + // A name for the update. + Name *string `min:"3" type:"string"` + + // User-provided value that will be included in any CloudWatch events raised + // while running tasks for these targets in this maintenance window. + OwnerInformation *string `min:"1" type:"string" sensitive:"true"` + + // If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow + // action are also required for this API request. Optional fields that are not + // specified are set to null. + Replace *bool `type:"boolean"` + + // The targets to add or replace. + Targets []*Target `type:"list"` + + // The maintenance window ID with which to modify the target. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` + + // The target ID to modify. + // + // WindowTargetId is a required field + WindowTargetId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMaintenanceWindowTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceWindowTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMaintenanceWindowTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMaintenanceWindowTargetInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.OwnerInformation != nil && len(*s.OwnerInformation) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OwnerInformation", 1)) + } + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.WindowTargetId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowTargetId")) + } + if s.WindowTargetId != nil && len(*s.WindowTargetId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowTargetId", 36)) + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateMaintenanceWindowTargetInput) SetDescription(v string) *UpdateMaintenanceWindowTargetInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateMaintenanceWindowTargetInput) SetName(v string) *UpdateMaintenanceWindowTargetInput { + s.Name = &v + return s +} + +// SetOwnerInformation sets the OwnerInformation field's value. +func (s *UpdateMaintenanceWindowTargetInput) SetOwnerInformation(v string) *UpdateMaintenanceWindowTargetInput { + s.OwnerInformation = &v + return s +} + +// SetReplace sets the Replace field's value. +func (s *UpdateMaintenanceWindowTargetInput) SetReplace(v bool) *UpdateMaintenanceWindowTargetInput { + s.Replace = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *UpdateMaintenanceWindowTargetInput) SetTargets(v []*Target) *UpdateMaintenanceWindowTargetInput { + s.Targets = v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *UpdateMaintenanceWindowTargetInput) SetWindowId(v string) *UpdateMaintenanceWindowTargetInput { + s.WindowId = &v + return s +} + +// SetWindowTargetId sets the WindowTargetId field's value. +func (s *UpdateMaintenanceWindowTargetInput) SetWindowTargetId(v string) *UpdateMaintenanceWindowTargetInput { + s.WindowTargetId = &v + return s +} + +type UpdateMaintenanceWindowTargetOutput struct { + _ struct{} `type:"structure"` + + // The updated description. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The updated name. + Name *string `min:"3" type:"string"` + + // The updated owner. + OwnerInformation *string `min:"1" type:"string" sensitive:"true"` + + // The updated targets. + Targets []*Target `type:"list"` + + // The maintenance window ID specified in the update request. + WindowId *string `min:"20" type:"string"` + + // The target ID specified in the update request. + WindowTargetId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s UpdateMaintenanceWindowTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceWindowTargetOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *UpdateMaintenanceWindowTargetOutput) SetDescription(v string) *UpdateMaintenanceWindowTargetOutput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateMaintenanceWindowTargetOutput) SetName(v string) *UpdateMaintenanceWindowTargetOutput { + s.Name = &v + return s +} + +// SetOwnerInformation sets the OwnerInformation field's value. +func (s *UpdateMaintenanceWindowTargetOutput) SetOwnerInformation(v string) *UpdateMaintenanceWindowTargetOutput { + s.OwnerInformation = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *UpdateMaintenanceWindowTargetOutput) SetTargets(v []*Target) *UpdateMaintenanceWindowTargetOutput { + s.Targets = v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *UpdateMaintenanceWindowTargetOutput) SetWindowId(v string) *UpdateMaintenanceWindowTargetOutput { + s.WindowId = &v + return s +} + +// SetWindowTargetId sets the WindowTargetId field's value. +func (s *UpdateMaintenanceWindowTargetOutput) SetWindowTargetId(v string) *UpdateMaintenanceWindowTargetOutput { + s.WindowTargetId = &v + return s +} + +type UpdateMaintenanceWindowTaskInput struct { + _ struct{} `type:"structure"` + + // The new task description to specify. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The new logging location in Amazon S3 to specify. + // + // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, + // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + LoggingInfo *LoggingInfo `type:"structure"` + + // The new MaxConcurrency value you want to specify. MaxConcurrency is the number + // of targets that are allowed to run this task in parallel. + MaxConcurrency *string `min:"1" type:"string"` + + // The new MaxErrors value to specify. MaxErrors is the maximum number of errors + // that are allowed before the task stops being scheduled. + MaxErrors *string `min:"1" type:"string"` + + // The new task name to specify. + Name *string `min:"3" type:"string"` + + // The new task priority to specify. The lower the number, the higher the priority. + // Tasks that have the same priority are scheduled in parallel. + Priority *int64 `type:"integer"` + + // If True, then all fields that are required by the RegisterTaskWithMaintenanceWindow + // action are also required for this API request. Optional fields that are not + // specified are set to null. + Replace *bool `type:"boolean"` + + // The ARN of the IAM service role for Systems Manager to assume when running + // a maintenance window task. If you do not specify a service role ARN, Systems + // Manager uses your account's service-linked role. If no service-linked role + // for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. + // + // For more information, see the following topics in the in the AWS Systems + // Manager User Guide: + // + // * Using service-linked roles for Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) + // + // * Should I use a service-linked role or a custom service role to run maintenance + // window tasks? (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) + ServiceRoleArn *string `type:"string"` + + // The targets (either instances or tags) to modify. Instances are specified + // using Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified + // using Key=tag_name,Values=tag_value. + Targets []*Target `type:"list"` + + // The task ARN to modify. + TaskArn *string `min:"1" type:"string"` + + // The parameters that the task should use during execution. Populate only the + // fields that match the task type. All other fields should be empty. + // + // When you update a maintenance window task that has options specified in TaskInvocationParameters, + // you must provide again all the TaskInvocationParameters values that you want + // to retain. The values you do not specify again are removed. For example, + // suppose that when you registered a Run Command task, you specified TaskInvocationParameters + // values for Comment, NotificationConfig, and OutputS3BucketName. If you update + // the maintenance window task and specify only a different OutputS3BucketName + // value, the values for Comment and NotificationConfig are removed. + TaskInvocationParameters *MaintenanceWindowTaskInvocationParameters `type:"structure"` + + // The parameters to modify. + // + // TaskParameters has been deprecated. To specify parameters to pass to a task + // when it runs, instead use the Parameters option in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + // + // The map has the following format: + // + // Key: string, between 1 and 255 characters + // + // Value: an array of strings, each string is between 1 and 255 characters + TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` + + // The maintenance window ID that contains the task to modify. + // + // WindowId is a required field + WindowId *string `min:"20" type:"string" required:"true"` + + // The task ID to modify. + // + // WindowTaskId is a required field + WindowTaskId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMaintenanceWindowTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceWindowTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMaintenanceWindowTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMaintenanceWindowTaskInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1)) + } + if s.MaxErrors != nil && len(*s.MaxErrors) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MaxErrors", 1)) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.TaskArn != nil && len(*s.TaskArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TaskArn", 1)) + } + if s.WindowId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowId")) + } + if s.WindowId != nil && len(*s.WindowId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WindowId", 20)) + } + if s.WindowTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("WindowTaskId")) + } + if s.WindowTaskId != nil && len(*s.WindowTaskId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("WindowTaskId", 36)) + } + if s.LoggingInfo != nil { + if err := s.LoggingInfo.Validate(); err != nil { + invalidParams.AddNested("LoggingInfo", err.(request.ErrInvalidParams)) + } + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TaskInvocationParameters != nil { + if err := s.TaskInvocationParameters.Validate(); err != nil { + invalidParams.AddNested("TaskInvocationParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetDescription(v string) *UpdateMaintenanceWindowTaskInput { + s.Description = &v + return s +} + +// SetLoggingInfo sets the LoggingInfo field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetLoggingInfo(v *LoggingInfo) *UpdateMaintenanceWindowTaskInput { + s.LoggingInfo = v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetMaxConcurrency(v string) *UpdateMaintenanceWindowTaskInput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetMaxErrors(v string) *UpdateMaintenanceWindowTaskInput { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetName(v string) *UpdateMaintenanceWindowTaskInput { + s.Name = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetPriority(v int64) *UpdateMaintenanceWindowTaskInput { + s.Priority = &v + return s +} + +// SetReplace sets the Replace field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetReplace(v bool) *UpdateMaintenanceWindowTaskInput { + s.Replace = &v + return s +} + +// SetServiceRoleArn sets the ServiceRoleArn field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetServiceRoleArn(v string) *UpdateMaintenanceWindowTaskInput { + s.ServiceRoleArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetTargets(v []*Target) *UpdateMaintenanceWindowTaskInput { + s.Targets = v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetTaskArn(v string) *UpdateMaintenanceWindowTaskInput { + s.TaskArn = &v + return s +} + +// SetTaskInvocationParameters sets the TaskInvocationParameters field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetTaskInvocationParameters(v *MaintenanceWindowTaskInvocationParameters) *UpdateMaintenanceWindowTaskInput { + s.TaskInvocationParameters = v + return s +} + +// SetTaskParameters sets the TaskParameters field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetTaskParameters(v map[string]*MaintenanceWindowTaskParameterValueExpression) *UpdateMaintenanceWindowTaskInput { + s.TaskParameters = v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetWindowId(v string) *UpdateMaintenanceWindowTaskInput { + s.WindowId = &v + return s +} + +// SetWindowTaskId sets the WindowTaskId field's value. +func (s *UpdateMaintenanceWindowTaskInput) SetWindowTaskId(v string) *UpdateMaintenanceWindowTaskInput { + s.WindowTaskId = &v + return s +} + +type UpdateMaintenanceWindowTaskOutput struct { + _ struct{} `type:"structure"` + + // The updated task description. + Description *string `min:"1" type:"string" sensitive:"true"` + + // The updated logging information in Amazon S3. + // + // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, + // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + LoggingInfo *LoggingInfo `type:"structure"` + + // The updated MaxConcurrency value. + MaxConcurrency *string `min:"1" type:"string"` + + // The updated MaxErrors value. + MaxErrors *string `min:"1" type:"string"` + + // The updated task name. + Name *string `min:"3" type:"string"` + + // The updated priority value. + Priority *int64 `type:"integer"` + + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. + ServiceRoleArn *string `type:"string"` + + // The updated target values. + Targets []*Target `type:"list"` + + // The updated task ARN value. + TaskArn *string `min:"1" type:"string"` + + // The updated parameter values. + TaskInvocationParameters *MaintenanceWindowTaskInvocationParameters `type:"structure"` + + // The updated parameter values. + // + // TaskParameters has been deprecated. To specify parameters to pass to a task + // when it runs, instead use the Parameters option in the TaskInvocationParameters + // structure. For information about how Systems Manager handles these options + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. + TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` + + // The ID of the maintenance window that was updated. + WindowId *string `min:"20" type:"string"` + + // The task ID of the maintenance window that was updated. + WindowTaskId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s UpdateMaintenanceWindowTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMaintenanceWindowTaskOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetDescription(v string) *UpdateMaintenanceWindowTaskOutput { + s.Description = &v + return s +} + +// SetLoggingInfo sets the LoggingInfo field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetLoggingInfo(v *LoggingInfo) *UpdateMaintenanceWindowTaskOutput { + s.LoggingInfo = v + return s +} + +// SetMaxConcurrency sets the MaxConcurrency field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetMaxConcurrency(v string) *UpdateMaintenanceWindowTaskOutput { + s.MaxConcurrency = &v + return s +} + +// SetMaxErrors sets the MaxErrors field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetMaxErrors(v string) *UpdateMaintenanceWindowTaskOutput { + s.MaxErrors = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetName(v string) *UpdateMaintenanceWindowTaskOutput { + s.Name = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetPriority(v int64) *UpdateMaintenanceWindowTaskOutput { + s.Priority = &v + return s +} + +// SetServiceRoleArn sets the ServiceRoleArn field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetServiceRoleArn(v string) *UpdateMaintenanceWindowTaskOutput { + s.ServiceRoleArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetTargets(v []*Target) *UpdateMaintenanceWindowTaskOutput { + s.Targets = v + return s +} + +// SetTaskArn sets the TaskArn field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetTaskArn(v string) *UpdateMaintenanceWindowTaskOutput { + s.TaskArn = &v + return s +} + +// SetTaskInvocationParameters sets the TaskInvocationParameters field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetTaskInvocationParameters(v *MaintenanceWindowTaskInvocationParameters) *UpdateMaintenanceWindowTaskOutput { + s.TaskInvocationParameters = v + return s +} + +// SetTaskParameters sets the TaskParameters field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetTaskParameters(v map[string]*MaintenanceWindowTaskParameterValueExpression) *UpdateMaintenanceWindowTaskOutput { + s.TaskParameters = v + return s +} + +// SetWindowId sets the WindowId field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetWindowId(v string) *UpdateMaintenanceWindowTaskOutput { + s.WindowId = &v + return s +} + +// SetWindowTaskId sets the WindowTaskId field's value. +func (s *UpdateMaintenanceWindowTaskOutput) SetWindowTaskId(v string) *UpdateMaintenanceWindowTaskOutput { + s.WindowTaskId = &v + return s +} + +type UpdateManagedInstanceRoleInput struct { + _ struct{} `type:"structure"` + + // The IAM role you want to assign or change. + // + // IamRole is a required field + IamRole *string `type:"string" required:"true"` + + // The ID of the managed instance where you want to update the role. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateManagedInstanceRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateManagedInstanceRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateManagedInstanceRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateManagedInstanceRoleInput"} + if s.IamRole == nil { + invalidParams.Add(request.NewErrParamRequired("IamRole")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIamRole sets the IamRole field's value. +func (s *UpdateManagedInstanceRoleInput) SetIamRole(v string) *UpdateManagedInstanceRoleInput { + s.IamRole = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *UpdateManagedInstanceRoleInput) SetInstanceId(v string) *UpdateManagedInstanceRoleInput { + s.InstanceId = &v + return s +} + +type UpdateManagedInstanceRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateManagedInstanceRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateManagedInstanceRoleOutput) GoString() string { + return s.String() +} + +type UpdateOpsItemInput struct { + _ struct{} `type:"structure"` + + // Specify a new category for an OpsItem. + Category *string `min:"1" type:"string"` + + // Update the information about the OpsItem. Provide enough information so that + // users reading this OpsItem for the first time understand the issue. + Description *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of an SNS topic where notifications are sent + // when this OpsItem is edited or changed. + Notifications []*OpsItemNotification `type:"list"` + + // Add new keys or edit existing key-value pairs of the OperationalData map + // in the OpsItem object. + // + // Operational data is custom data that provides useful reference details about + // the OpsItem. For example, you can specify log files, error strings, license + // keys, troubleshooting tips, or other relevant data. You enter operational + // data as key-value pairs. The key has a maximum length of 128 characters. + // The value has a maximum size of 20 KB. + // + // Operational data keys can't begin with the following: amazon, aws, amzn, + // ssm, /amazon, /aws, /amzn, /ssm. + // + // You can choose to make the data searchable by other users in the account + // or you can restrict search access. Searchable data means that all users with + // access to the OpsItem Overview page (as provided by the DescribeOpsItems + // API action) can view and search on the specified data. Operational data that + // is not searchable is only viewable by users who have access to the OpsItem + // (as provided by the GetOpsItem API action). + // + // Use the /aws/resources key in OperationalData to specify a related resource + // in the request. Use the /aws/automations key in OperationalData to associate + // an Automation runbook with the OpsItem. To view AWS CLI example commands + // that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // in the AWS Systems Manager User Guide. + OperationalData map[string]*OpsItemDataValue `type:"map"` + + // Keys that you want to remove from the OperationalData map. + OperationalDataToDelete []*string `type:"list"` + + // The ID of the OpsItem. + // + // OpsItemId is a required field + OpsItemId *string `type:"string" required:"true"` + + // The importance of this OpsItem in relation to other OpsItems in the system. + Priority *int64 `min:"1" type:"integer"` + + // One or more OpsItems that share something in common with the current OpsItems. + // For example, related OpsItems can include OpsItems with similar error messages, + // impacted resources, or statuses for the impacted resource. + RelatedOpsItems []*RelatedOpsItem `type:"list"` + + // Specify a new severity for an OpsItem. + Severity *string `min:"1" type:"string"` + + // The OpsItem status. Status can be Open, In Progress, or Resolved. For more + // information, see Editing OpsItem details (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems.html#OpsCenter-working-with-OpsItems-editing-details) + // in the AWS Systems Manager User Guide. + Status *string `type:"string" enum:"OpsItemStatus"` + + // A short heading that describes the nature of the OpsItem and the impacted + // resource. + Title *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateOpsItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpsItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateOpsItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateOpsItemInput"} + if s.Category != nil && len(*s.Category) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Category", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.OpsItemId == nil { + invalidParams.Add(request.NewErrParamRequired("OpsItemId")) + } + if s.Priority != nil && *s.Priority < 1 { + invalidParams.Add(request.NewErrParamMinValue("Priority", 1)) + } + if s.Severity != nil && len(*s.Severity) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Severity", 1)) + } + if s.Title != nil && len(*s.Title) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Title", 1)) + } + if s.RelatedOpsItems != nil { + for i, v := range s.RelatedOpsItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RelatedOpsItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCategory sets the Category field's value. +func (s *UpdateOpsItemInput) SetCategory(v string) *UpdateOpsItemInput { + s.Category = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateOpsItemInput) SetDescription(v string) *UpdateOpsItemInput { + s.Description = &v + return s +} + +// SetNotifications sets the Notifications field's value. +func (s *UpdateOpsItemInput) SetNotifications(v []*OpsItemNotification) *UpdateOpsItemInput { + s.Notifications = v + return s +} + +// SetOperationalData sets the OperationalData field's value. +func (s *UpdateOpsItemInput) SetOperationalData(v map[string]*OpsItemDataValue) *UpdateOpsItemInput { + s.OperationalData = v + return s +} + +// SetOperationalDataToDelete sets the OperationalDataToDelete field's value. +func (s *UpdateOpsItemInput) SetOperationalDataToDelete(v []*string) *UpdateOpsItemInput { + s.OperationalDataToDelete = v + return s +} + +// SetOpsItemId sets the OpsItemId field's value. +func (s *UpdateOpsItemInput) SetOpsItemId(v string) *UpdateOpsItemInput { + s.OpsItemId = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *UpdateOpsItemInput) SetPriority(v int64) *UpdateOpsItemInput { + s.Priority = &v + return s +} + +// SetRelatedOpsItems sets the RelatedOpsItems field's value. +func (s *UpdateOpsItemInput) SetRelatedOpsItems(v []*RelatedOpsItem) *UpdateOpsItemInput { + s.RelatedOpsItems = v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *UpdateOpsItemInput) SetSeverity(v string) *UpdateOpsItemInput { + s.Severity = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateOpsItemInput) SetStatus(v string) *UpdateOpsItemInput { + s.Status = &v + return s +} + +// SetTitle sets the Title field's value. +func (s *UpdateOpsItemInput) SetTitle(v string) *UpdateOpsItemInput { + s.Title = &v + return s +} + +type UpdateOpsItemOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateOpsItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpsItemOutput) GoString() string { + return s.String() +} + +type UpdatePatchBaselineInput struct { + _ struct{} `type:"structure"` + + // A set of rules used to include patches in the baseline. + ApprovalRules *PatchRuleGroup `type:"structure"` + + // A list of explicitly approved patches for the baseline. + // + // For information about accepted formats for lists of approved patches and + // rejected patches, see About package name formats for approved and rejected + // patch lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) + // in the AWS Systems Manager User Guide. + ApprovedPatches []*string `type:"list"` + + // Assigns a new compliance severity level to an existing patch baseline. + ApprovedPatchesComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + + // Indicates whether the list of approved patches includes non-security updates + // that should be applied to the instances. The default value is 'false'. Applies + // to Linux instances only. + ApprovedPatchesEnableNonSecurity *bool `type:"boolean"` + + // The ID of the patch baseline to update. + // + // BaselineId is a required field + BaselineId *string `min:"20" type:"string" required:"true"` + + // A description of the patch baseline. + Description *string `min:"1" type:"string"` + + // A set of global filters used to include patches in the baseline. + GlobalFilters *PatchFilterGroup `type:"structure"` + + // The name of the patch baseline. + Name *string `min:"3" type:"string"` + + // A list of explicitly rejected patches for the baseline. + // + // For information about accepted formats for lists of approved patches and + // rejected patches, see About package name formats for approved and rejected + // patch lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) + // in the AWS Systems Manager User Guide. + RejectedPatches []*string `type:"list"` + + // The action for Patch Manager to take on patches included in the RejectedPackages + // list. + // + // * ALLOW_AS_DEPENDENCY: A package in the Rejected patches list is installed + // only if it is a dependency of another package. It is considered compliant + // with the patch baseline, and its status is reported as InstalledOther. + // This is the default action if no option is specified. + // + // * BLOCK: Packages in the RejectedPatches list, and packages that include + // them as dependencies, are not installed under any circumstances. If a + // package was installed before it was added to the Rejected patches list, + // it is considered non-compliant with the patch baseline, and its status + // is reported as InstalledRejected. + RejectedPatchesAction *string `type:"string" enum:"PatchAction"` + + // If True, then all fields that are required by the CreatePatchBaseline action + // are also required for this API request. Optional fields that are not specified + // are set to null. + Replace *bool `type:"boolean"` + + // Information about the patches to use to update the instances, including target + // operating systems and source repositories. Applies to Linux instances only. + Sources []*PatchSource `type:"list"` +} + +// String returns the string representation +func (s UpdatePatchBaselineInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePatchBaselineInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePatchBaselineInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePatchBaselineInput"} + if s.BaselineId == nil { + invalidParams.Add(request.NewErrParamRequired("BaselineId")) + } + if s.BaselineId != nil && len(*s.BaselineId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("BaselineId", 20)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.ApprovalRules != nil { + if err := s.ApprovalRules.Validate(); err != nil { + invalidParams.AddNested("ApprovalRules", err.(request.ErrInvalidParams)) + } + } + if s.GlobalFilters != nil { + if err := s.GlobalFilters.Validate(); err != nil { + invalidParams.AddNested("GlobalFilters", err.(request.ErrInvalidParams)) + } + } + if s.Sources != nil { + for i, v := range s.Sources { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sources", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApprovalRules sets the ApprovalRules field's value. +func (s *UpdatePatchBaselineInput) SetApprovalRules(v *PatchRuleGroup) *UpdatePatchBaselineInput { + s.ApprovalRules = v + return s +} + +// SetApprovedPatches sets the ApprovedPatches field's value. +func (s *UpdatePatchBaselineInput) SetApprovedPatches(v []*string) *UpdatePatchBaselineInput { + s.ApprovedPatches = v + return s +} + +// SetApprovedPatchesComplianceLevel sets the ApprovedPatchesComplianceLevel field's value. +func (s *UpdatePatchBaselineInput) SetApprovedPatchesComplianceLevel(v string) *UpdatePatchBaselineInput { + s.ApprovedPatchesComplianceLevel = &v + return s +} + +// SetApprovedPatchesEnableNonSecurity sets the ApprovedPatchesEnableNonSecurity field's value. +func (s *UpdatePatchBaselineInput) SetApprovedPatchesEnableNonSecurity(v bool) *UpdatePatchBaselineInput { + s.ApprovedPatchesEnableNonSecurity = &v + return s +} + +// SetBaselineId sets the BaselineId field's value. +func (s *UpdatePatchBaselineInput) SetBaselineId(v string) *UpdatePatchBaselineInput { + s.BaselineId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdatePatchBaselineInput) SetDescription(v string) *UpdatePatchBaselineInput { + s.Description = &v + return s +} + +// SetGlobalFilters sets the GlobalFilters field's value. +func (s *UpdatePatchBaselineInput) SetGlobalFilters(v *PatchFilterGroup) *UpdatePatchBaselineInput { + s.GlobalFilters = v + return s +} + +// SetName sets the Name field's value. +func (s *UpdatePatchBaselineInput) SetName(v string) *UpdatePatchBaselineInput { + s.Name = &v + return s +} + +// SetRejectedPatches sets the RejectedPatches field's value. +func (s *UpdatePatchBaselineInput) SetRejectedPatches(v []*string) *UpdatePatchBaselineInput { + s.RejectedPatches = v + return s +} + +// SetRejectedPatchesAction sets the RejectedPatchesAction field's value. +func (s *UpdatePatchBaselineInput) SetRejectedPatchesAction(v string) *UpdatePatchBaselineInput { + s.RejectedPatchesAction = &v + return s +} + +// SetReplace sets the Replace field's value. +func (s *UpdatePatchBaselineInput) SetReplace(v bool) *UpdatePatchBaselineInput { + s.Replace = &v + return s +} + +// SetSources sets the Sources field's value. +func (s *UpdatePatchBaselineInput) SetSources(v []*PatchSource) *UpdatePatchBaselineInput { + s.Sources = v + return s +} + +type UpdatePatchBaselineOutput struct { + _ struct{} `type:"structure"` + + // A set of rules used to include patches in the baseline. + ApprovalRules *PatchRuleGroup `type:"structure"` + + // A list of explicitly approved patches for the baseline. + ApprovedPatches []*string `type:"list"` + + // The compliance severity level assigned to the patch baseline after the update + // completed. + ApprovedPatchesComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` + + // Indicates whether the list of approved patches includes non-security updates + // that should be applied to the instances. The default value is 'false'. Applies + // to Linux instances only. + ApprovedPatchesEnableNonSecurity *bool `type:"boolean"` + + // The ID of the deleted patch baseline. + BaselineId *string `min:"20" type:"string"` + + // The date when the patch baseline was created. + CreatedDate *time.Time `type:"timestamp"` + + // A description of the Patch Baseline. + Description *string `min:"1" type:"string"` + + // A set of global filters used to exclude patches from the baseline. + GlobalFilters *PatchFilterGroup `type:"structure"` + + // The date when the patch baseline was last modified. + ModifiedDate *time.Time `type:"timestamp"` + + // The name of the patch baseline. + Name *string `min:"3" type:"string"` + + // The operating system rule used by the updated patch baseline. + OperatingSystem *string `type:"string" enum:"OperatingSystem"` + + // A list of explicitly rejected patches for the baseline. + RejectedPatches []*string `type:"list"` + + // The action specified to take on patches included in the RejectedPatches list. + // A patch can be allowed only if it is a dependency of another package, or + // blocked entirely along with packages that include it as a dependency. + RejectedPatchesAction *string `type:"string" enum:"PatchAction"` + + // Information about the patches to use to update the instances, including target + // operating systems and source repositories. Applies to Linux instances only. + Sources []*PatchSource `type:"list"` +} + +// String returns the string representation +func (s UpdatePatchBaselineOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePatchBaselineOutput) GoString() string { + return s.String() +} + +// SetApprovalRules sets the ApprovalRules field's value. +func (s *UpdatePatchBaselineOutput) SetApprovalRules(v *PatchRuleGroup) *UpdatePatchBaselineOutput { + s.ApprovalRules = v + return s +} + +// SetApprovedPatches sets the ApprovedPatches field's value. +func (s *UpdatePatchBaselineOutput) SetApprovedPatches(v []*string) *UpdatePatchBaselineOutput { + s.ApprovedPatches = v + return s +} + +// SetApprovedPatchesComplianceLevel sets the ApprovedPatchesComplianceLevel field's value. +func (s *UpdatePatchBaselineOutput) SetApprovedPatchesComplianceLevel(v string) *UpdatePatchBaselineOutput { + s.ApprovedPatchesComplianceLevel = &v + return s +} + +// SetApprovedPatchesEnableNonSecurity sets the ApprovedPatchesEnableNonSecurity field's value. +func (s *UpdatePatchBaselineOutput) SetApprovedPatchesEnableNonSecurity(v bool) *UpdatePatchBaselineOutput { + s.ApprovedPatchesEnableNonSecurity = &v + return s +} + +// SetBaselineId sets the BaselineId field's value. +func (s *UpdatePatchBaselineOutput) SetBaselineId(v string) *UpdatePatchBaselineOutput { + s.BaselineId = &v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *UpdatePatchBaselineOutput) SetCreatedDate(v time.Time) *UpdatePatchBaselineOutput { + s.CreatedDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdatePatchBaselineOutput) SetDescription(v string) *UpdatePatchBaselineOutput { + s.Description = &v + return s +} + +// SetGlobalFilters sets the GlobalFilters field's value. +func (s *UpdatePatchBaselineOutput) SetGlobalFilters(v *PatchFilterGroup) *UpdatePatchBaselineOutput { + s.GlobalFilters = v + return s +} + +// SetModifiedDate sets the ModifiedDate field's value. +func (s *UpdatePatchBaselineOutput) SetModifiedDate(v time.Time) *UpdatePatchBaselineOutput { + s.ModifiedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdatePatchBaselineOutput) SetName(v string) *UpdatePatchBaselineOutput { + s.Name = &v + return s +} + +// SetOperatingSystem sets the OperatingSystem field's value. +func (s *UpdatePatchBaselineOutput) SetOperatingSystem(v string) *UpdatePatchBaselineOutput { + s.OperatingSystem = &v + return s +} + +// SetRejectedPatches sets the RejectedPatches field's value. +func (s *UpdatePatchBaselineOutput) SetRejectedPatches(v []*string) *UpdatePatchBaselineOutput { + s.RejectedPatches = v + return s +} + +// SetRejectedPatchesAction sets the RejectedPatchesAction field's value. +func (s *UpdatePatchBaselineOutput) SetRejectedPatchesAction(v string) *UpdatePatchBaselineOutput { + s.RejectedPatchesAction = &v + return s +} + +// SetSources sets the Sources field's value. +func (s *UpdatePatchBaselineOutput) SetSources(v []*PatchSource) *UpdatePatchBaselineOutput { + s.Sources = v + return s +} + +type UpdateResourceDataSyncInput struct { + _ struct{} `type:"structure"` + + // The name of the resource data sync you want to update. + // + // SyncName is a required field + SyncName *string `min:"1" type:"string" required:"true"` + + // Specify information about the data sources to synchronize. + // + // SyncSource is a required field + SyncSource *ResourceDataSyncSource `type:"structure" required:"true"` + + // The type of resource data sync. The supported SyncType is SyncFromSource. + // + // SyncType is a required field + SyncType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateResourceDataSyncInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateResourceDataSyncInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateResourceDataSyncInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateResourceDataSyncInput"} + if s.SyncName == nil { + invalidParams.Add(request.NewErrParamRequired("SyncName")) + } + if s.SyncName != nil && len(*s.SyncName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SyncName", 1)) + } + if s.SyncSource == nil { + invalidParams.Add(request.NewErrParamRequired("SyncSource")) + } + if s.SyncType == nil { + invalidParams.Add(request.NewErrParamRequired("SyncType")) + } + if s.SyncType != nil && len(*s.SyncType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SyncType", 1)) + } + if s.SyncSource != nil { + if err := s.SyncSource.Validate(); err != nil { + invalidParams.AddNested("SyncSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSyncName sets the SyncName field's value. +func (s *UpdateResourceDataSyncInput) SetSyncName(v string) *UpdateResourceDataSyncInput { + s.SyncName = &v + return s +} + +// SetSyncSource sets the SyncSource field's value. +func (s *UpdateResourceDataSyncInput) SetSyncSource(v *ResourceDataSyncSource) *UpdateResourceDataSyncInput { + s.SyncSource = v + return s +} + +// SetSyncType sets the SyncType field's value. +func (s *UpdateResourceDataSyncInput) SetSyncType(v string) *UpdateResourceDataSyncInput { + s.SyncType = &v + return s +} + +type UpdateResourceDataSyncOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateResourceDataSyncOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateResourceDataSyncOutput) GoString() string { + return s.String() +} + +// The request body of the UpdateServiceSetting API action. +type UpdateServiceSettingInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the service setting to reset. For example, + // arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled. + // The setting ID can be one of the following. + // + // * /ssm/parameter-store/default-parameter-tier + // + // * /ssm/parameter-store/high-throughput-enabled + // + // * /ssm/managed-instance/activation-tier + // + // SettingId is a required field + SettingId *string `min:"1" type:"string" required:"true"` + + // The new value to specify for the service setting. For the /ssm/parameter-store/default-parameter-tier + // setting ID, the setting value can be one of the following. + // + // * Standard + // + // * Advanced + // + // * Intelligent-Tiering + // + // For the /ssm/parameter-store/high-throughput-enabled, and /ssm/managed-instance/activation-tier + // setting IDs, the setting value can be true or false. + // + // SettingValue is a required field + SettingValue *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateServiceSettingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceSettingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServiceSettingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateServiceSettingInput"} + if s.SettingId == nil { + invalidParams.Add(request.NewErrParamRequired("SettingId")) + } + if s.SettingId != nil && len(*s.SettingId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SettingId", 1)) + } + if s.SettingValue == nil { + invalidParams.Add(request.NewErrParamRequired("SettingValue")) + } + if s.SettingValue != nil && len(*s.SettingValue) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SettingValue", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSettingId sets the SettingId field's value. +func (s *UpdateServiceSettingInput) SetSettingId(v string) *UpdateServiceSettingInput { + s.SettingId = &v + return s +} + +// SetSettingValue sets the SettingValue field's value. +func (s *UpdateServiceSettingInput) SetSettingValue(v string) *UpdateServiceSettingInput { + s.SettingValue = &v + return s +} + +// The result body of the UpdateServiceSetting API action. +type UpdateServiceSettingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateServiceSettingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceSettingOutput) GoString() string { + return s.String() +} + +const ( + // AssociationComplianceSeverityCritical is a AssociationComplianceSeverity enum value + AssociationComplianceSeverityCritical = "CRITICAL" + + // AssociationComplianceSeverityHigh is a AssociationComplianceSeverity enum value + AssociationComplianceSeverityHigh = "HIGH" + + // AssociationComplianceSeverityMedium is a AssociationComplianceSeverity enum value + AssociationComplianceSeverityMedium = "MEDIUM" + + // AssociationComplianceSeverityLow is a AssociationComplianceSeverity enum value + AssociationComplianceSeverityLow = "LOW" + + // AssociationComplianceSeverityUnspecified is a AssociationComplianceSeverity enum value + AssociationComplianceSeverityUnspecified = "UNSPECIFIED" +) + +// AssociationComplianceSeverity_Values returns all elements of the AssociationComplianceSeverity enum +func AssociationComplianceSeverity_Values() []string { + return []string{ + AssociationComplianceSeverityCritical, + AssociationComplianceSeverityHigh, + AssociationComplianceSeverityMedium, + AssociationComplianceSeverityLow, + AssociationComplianceSeverityUnspecified, + } +} + +const ( + // AssociationExecutionFilterKeyExecutionId is a AssociationExecutionFilterKey enum value + AssociationExecutionFilterKeyExecutionId = "ExecutionId" + + // AssociationExecutionFilterKeyStatus is a AssociationExecutionFilterKey enum value + AssociationExecutionFilterKeyStatus = "Status" + + // AssociationExecutionFilterKeyCreatedTime is a AssociationExecutionFilterKey enum value + AssociationExecutionFilterKeyCreatedTime = "CreatedTime" +) + +// AssociationExecutionFilterKey_Values returns all elements of the AssociationExecutionFilterKey enum +func AssociationExecutionFilterKey_Values() []string { + return []string{ + AssociationExecutionFilterKeyExecutionId, + AssociationExecutionFilterKeyStatus, + AssociationExecutionFilterKeyCreatedTime, + } +} + +const ( + // AssociationExecutionTargetsFilterKeyStatus is a AssociationExecutionTargetsFilterKey enum value + AssociationExecutionTargetsFilterKeyStatus = "Status" + + // AssociationExecutionTargetsFilterKeyResourceId is a AssociationExecutionTargetsFilterKey enum value + AssociationExecutionTargetsFilterKeyResourceId = "ResourceId" + + // AssociationExecutionTargetsFilterKeyResourceType is a AssociationExecutionTargetsFilterKey enum value + AssociationExecutionTargetsFilterKeyResourceType = "ResourceType" +) + +// AssociationExecutionTargetsFilterKey_Values returns all elements of the AssociationExecutionTargetsFilterKey enum +func AssociationExecutionTargetsFilterKey_Values() []string { + return []string{ + AssociationExecutionTargetsFilterKeyStatus, + AssociationExecutionTargetsFilterKeyResourceId, + AssociationExecutionTargetsFilterKeyResourceType, + } +} + +const ( + // AssociationFilterKeyInstanceId is a AssociationFilterKey enum value + AssociationFilterKeyInstanceId = "InstanceId" + + // AssociationFilterKeyName is a AssociationFilterKey enum value + AssociationFilterKeyName = "Name" + + // AssociationFilterKeyAssociationId is a AssociationFilterKey enum value + AssociationFilterKeyAssociationId = "AssociationId" + + // AssociationFilterKeyAssociationStatusName is a AssociationFilterKey enum value + AssociationFilterKeyAssociationStatusName = "AssociationStatusName" + + // AssociationFilterKeyLastExecutedBefore is a AssociationFilterKey enum value + AssociationFilterKeyLastExecutedBefore = "LastExecutedBefore" + + // AssociationFilterKeyLastExecutedAfter is a AssociationFilterKey enum value + AssociationFilterKeyLastExecutedAfter = "LastExecutedAfter" + + // AssociationFilterKeyAssociationName is a AssociationFilterKey enum value + AssociationFilterKeyAssociationName = "AssociationName" + + // AssociationFilterKeyResourceGroupName is a AssociationFilterKey enum value + AssociationFilterKeyResourceGroupName = "ResourceGroupName" +) + +// AssociationFilterKey_Values returns all elements of the AssociationFilterKey enum +func AssociationFilterKey_Values() []string { + return []string{ + AssociationFilterKeyInstanceId, + AssociationFilterKeyName, + AssociationFilterKeyAssociationId, + AssociationFilterKeyAssociationStatusName, + AssociationFilterKeyLastExecutedBefore, + AssociationFilterKeyLastExecutedAfter, + AssociationFilterKeyAssociationName, + AssociationFilterKeyResourceGroupName, + } +} + +const ( + // AssociationFilterOperatorTypeEqual is a AssociationFilterOperatorType enum value + AssociationFilterOperatorTypeEqual = "EQUAL" + + // AssociationFilterOperatorTypeLessThan is a AssociationFilterOperatorType enum value + AssociationFilterOperatorTypeLessThan = "LESS_THAN" + + // AssociationFilterOperatorTypeGreaterThan is a AssociationFilterOperatorType enum value + AssociationFilterOperatorTypeGreaterThan = "GREATER_THAN" +) + +// AssociationFilterOperatorType_Values returns all elements of the AssociationFilterOperatorType enum +func AssociationFilterOperatorType_Values() []string { + return []string{ + AssociationFilterOperatorTypeEqual, + AssociationFilterOperatorTypeLessThan, + AssociationFilterOperatorTypeGreaterThan, + } +} + +const ( + // AssociationStatusNamePending is a AssociationStatusName enum value + AssociationStatusNamePending = "Pending" + + // AssociationStatusNameSuccess is a AssociationStatusName enum value + AssociationStatusNameSuccess = "Success" + + // AssociationStatusNameFailed is a AssociationStatusName enum value + AssociationStatusNameFailed = "Failed" +) + +// AssociationStatusName_Values returns all elements of the AssociationStatusName enum +func AssociationStatusName_Values() []string { + return []string{ + AssociationStatusNamePending, + AssociationStatusNameSuccess, + AssociationStatusNameFailed, + } +} + +const ( + // AssociationSyncComplianceAuto is a AssociationSyncCompliance enum value + AssociationSyncComplianceAuto = "AUTO" + + // AssociationSyncComplianceManual is a AssociationSyncCompliance enum value + AssociationSyncComplianceManual = "MANUAL" +) + +// AssociationSyncCompliance_Values returns all elements of the AssociationSyncCompliance enum +func AssociationSyncCompliance_Values() []string { + return []string{ + AssociationSyncComplianceAuto, + AssociationSyncComplianceManual, + } +} + +const ( + // AttachmentHashTypeSha256 is a AttachmentHashType enum value + AttachmentHashTypeSha256 = "Sha256" +) + +// AttachmentHashType_Values returns all elements of the AttachmentHashType enum +func AttachmentHashType_Values() []string { + return []string{ + AttachmentHashTypeSha256, + } +} + +const ( + // AttachmentsSourceKeySourceUrl is a AttachmentsSourceKey enum value + AttachmentsSourceKeySourceUrl = "SourceUrl" + + // AttachmentsSourceKeyS3fileUrl is a AttachmentsSourceKey enum value + AttachmentsSourceKeyS3fileUrl = "S3FileUrl" + + // AttachmentsSourceKeyAttachmentReference is a AttachmentsSourceKey enum value + AttachmentsSourceKeyAttachmentReference = "AttachmentReference" +) + +// AttachmentsSourceKey_Values returns all elements of the AttachmentsSourceKey enum +func AttachmentsSourceKey_Values() []string { + return []string{ + AttachmentsSourceKeySourceUrl, + AttachmentsSourceKeyS3fileUrl, + AttachmentsSourceKeyAttachmentReference, + } +} + +const ( + // AutomationExecutionFilterKeyDocumentNamePrefix is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyDocumentNamePrefix = "DocumentNamePrefix" + + // AutomationExecutionFilterKeyExecutionStatus is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyExecutionStatus = "ExecutionStatus" + + // AutomationExecutionFilterKeyExecutionId is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyExecutionId = "ExecutionId" + + // AutomationExecutionFilterKeyParentExecutionId is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyParentExecutionId = "ParentExecutionId" + + // AutomationExecutionFilterKeyCurrentAction is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyCurrentAction = "CurrentAction" + + // AutomationExecutionFilterKeyStartTimeBefore is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyStartTimeBefore = "StartTimeBefore" + + // AutomationExecutionFilterKeyStartTimeAfter is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyStartTimeAfter = "StartTimeAfter" + + // AutomationExecutionFilterKeyAutomationType is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyAutomationType = "AutomationType" + + // AutomationExecutionFilterKeyTagKey is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyTagKey = "TagKey" + + // AutomationExecutionFilterKeyTargetResourceGroup is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyTargetResourceGroup = "TargetResourceGroup" +) + +// AutomationExecutionFilterKey_Values returns all elements of the AutomationExecutionFilterKey enum +func AutomationExecutionFilterKey_Values() []string { + return []string{ + AutomationExecutionFilterKeyDocumentNamePrefix, + AutomationExecutionFilterKeyExecutionStatus, + AutomationExecutionFilterKeyExecutionId, + AutomationExecutionFilterKeyParentExecutionId, + AutomationExecutionFilterKeyCurrentAction, + AutomationExecutionFilterKeyStartTimeBefore, + AutomationExecutionFilterKeyStartTimeAfter, + AutomationExecutionFilterKeyAutomationType, + AutomationExecutionFilterKeyTagKey, + AutomationExecutionFilterKeyTargetResourceGroup, + } +} + +const ( + // AutomationExecutionStatusPending is a AutomationExecutionStatus enum value + AutomationExecutionStatusPending = "Pending" + + // AutomationExecutionStatusInProgress is a AutomationExecutionStatus enum value + AutomationExecutionStatusInProgress = "InProgress" + + // AutomationExecutionStatusWaiting is a AutomationExecutionStatus enum value + AutomationExecutionStatusWaiting = "Waiting" + + // AutomationExecutionStatusSuccess is a AutomationExecutionStatus enum value + AutomationExecutionStatusSuccess = "Success" + + // AutomationExecutionStatusTimedOut is a AutomationExecutionStatus enum value + AutomationExecutionStatusTimedOut = "TimedOut" + + // AutomationExecutionStatusCancelling is a AutomationExecutionStatus enum value + AutomationExecutionStatusCancelling = "Cancelling" + + // AutomationExecutionStatusCancelled is a AutomationExecutionStatus enum value + AutomationExecutionStatusCancelled = "Cancelled" + + // AutomationExecutionStatusFailed is a AutomationExecutionStatus enum value + AutomationExecutionStatusFailed = "Failed" +) + +// AutomationExecutionStatus_Values returns all elements of the AutomationExecutionStatus enum +func AutomationExecutionStatus_Values() []string { + return []string{ + AutomationExecutionStatusPending, + AutomationExecutionStatusInProgress, + AutomationExecutionStatusWaiting, + AutomationExecutionStatusSuccess, + AutomationExecutionStatusTimedOut, + AutomationExecutionStatusCancelling, + AutomationExecutionStatusCancelled, + AutomationExecutionStatusFailed, + } +} + +const ( + // AutomationTypeCrossAccount is a AutomationType enum value + AutomationTypeCrossAccount = "CrossAccount" + + // AutomationTypeLocal is a AutomationType enum value + AutomationTypeLocal = "Local" +) + +// AutomationType_Values returns all elements of the AutomationType enum +func AutomationType_Values() []string { + return []string{ + AutomationTypeCrossAccount, + AutomationTypeLocal, + } +} + +const ( + // CalendarStateOpen is a CalendarState enum value + CalendarStateOpen = "OPEN" + + // CalendarStateClosed is a CalendarState enum value + CalendarStateClosed = "CLOSED" +) + +// CalendarState_Values returns all elements of the CalendarState enum +func CalendarState_Values() []string { + return []string{ + CalendarStateOpen, + CalendarStateClosed, + } +} + +const ( + // CommandFilterKeyInvokedAfter is a CommandFilterKey enum value + CommandFilterKeyInvokedAfter = "InvokedAfter" + + // CommandFilterKeyInvokedBefore is a CommandFilterKey enum value + CommandFilterKeyInvokedBefore = "InvokedBefore" + + // CommandFilterKeyStatus is a CommandFilterKey enum value + CommandFilterKeyStatus = "Status" + + // CommandFilterKeyExecutionStage is a CommandFilterKey enum value + CommandFilterKeyExecutionStage = "ExecutionStage" + + // CommandFilterKeyDocumentName is a CommandFilterKey enum value + CommandFilterKeyDocumentName = "DocumentName" +) + +// CommandFilterKey_Values returns all elements of the CommandFilterKey enum +func CommandFilterKey_Values() []string { + return []string{ + CommandFilterKeyInvokedAfter, + CommandFilterKeyInvokedBefore, + CommandFilterKeyStatus, + CommandFilterKeyExecutionStage, + CommandFilterKeyDocumentName, + } +} + +const ( + // CommandInvocationStatusPending is a CommandInvocationStatus enum value + CommandInvocationStatusPending = "Pending" + + // CommandInvocationStatusInProgress is a CommandInvocationStatus enum value + CommandInvocationStatusInProgress = "InProgress" + + // CommandInvocationStatusDelayed is a CommandInvocationStatus enum value + CommandInvocationStatusDelayed = "Delayed" + + // CommandInvocationStatusSuccess is a CommandInvocationStatus enum value + CommandInvocationStatusSuccess = "Success" + + // CommandInvocationStatusCancelled is a CommandInvocationStatus enum value + CommandInvocationStatusCancelled = "Cancelled" + + // CommandInvocationStatusTimedOut is a CommandInvocationStatus enum value + CommandInvocationStatusTimedOut = "TimedOut" + + // CommandInvocationStatusFailed is a CommandInvocationStatus enum value + CommandInvocationStatusFailed = "Failed" + + // CommandInvocationStatusCancelling is a CommandInvocationStatus enum value + CommandInvocationStatusCancelling = "Cancelling" +) + +// CommandInvocationStatus_Values returns all elements of the CommandInvocationStatus enum +func CommandInvocationStatus_Values() []string { + return []string{ + CommandInvocationStatusPending, + CommandInvocationStatusInProgress, + CommandInvocationStatusDelayed, + CommandInvocationStatusSuccess, + CommandInvocationStatusCancelled, + CommandInvocationStatusTimedOut, + CommandInvocationStatusFailed, + CommandInvocationStatusCancelling, + } +} + +const ( + // CommandPluginStatusPending is a CommandPluginStatus enum value + CommandPluginStatusPending = "Pending" + + // CommandPluginStatusInProgress is a CommandPluginStatus enum value + CommandPluginStatusInProgress = "InProgress" + + // CommandPluginStatusSuccess is a CommandPluginStatus enum value + CommandPluginStatusSuccess = "Success" + + // CommandPluginStatusTimedOut is a CommandPluginStatus enum value + CommandPluginStatusTimedOut = "TimedOut" + + // CommandPluginStatusCancelled is a CommandPluginStatus enum value + CommandPluginStatusCancelled = "Cancelled" + + // CommandPluginStatusFailed is a CommandPluginStatus enum value + CommandPluginStatusFailed = "Failed" +) + +// CommandPluginStatus_Values returns all elements of the CommandPluginStatus enum +func CommandPluginStatus_Values() []string { + return []string{ + CommandPluginStatusPending, + CommandPluginStatusInProgress, + CommandPluginStatusSuccess, + CommandPluginStatusTimedOut, + CommandPluginStatusCancelled, + CommandPluginStatusFailed, + } +} + +const ( + // CommandStatusPending is a CommandStatus enum value + CommandStatusPending = "Pending" + + // CommandStatusInProgress is a CommandStatus enum value + CommandStatusInProgress = "InProgress" + + // CommandStatusSuccess is a CommandStatus enum value + CommandStatusSuccess = "Success" + + // CommandStatusCancelled is a CommandStatus enum value + CommandStatusCancelled = "Cancelled" + + // CommandStatusFailed is a CommandStatus enum value + CommandStatusFailed = "Failed" + + // CommandStatusTimedOut is a CommandStatus enum value + CommandStatusTimedOut = "TimedOut" + + // CommandStatusCancelling is a CommandStatus enum value + CommandStatusCancelling = "Cancelling" +) + +// CommandStatus_Values returns all elements of the CommandStatus enum +func CommandStatus_Values() []string { + return []string{ + CommandStatusPending, + CommandStatusInProgress, + CommandStatusSuccess, + CommandStatusCancelled, + CommandStatusFailed, + CommandStatusTimedOut, + CommandStatusCancelling, + } +} + +const ( + // ComplianceQueryOperatorTypeEqual is a ComplianceQueryOperatorType enum value + ComplianceQueryOperatorTypeEqual = "EQUAL" + + // ComplianceQueryOperatorTypeNotEqual is a ComplianceQueryOperatorType enum value + ComplianceQueryOperatorTypeNotEqual = "NOT_EQUAL" + + // ComplianceQueryOperatorTypeBeginWith is a ComplianceQueryOperatorType enum value + ComplianceQueryOperatorTypeBeginWith = "BEGIN_WITH" + + // ComplianceQueryOperatorTypeLessThan is a ComplianceQueryOperatorType enum value + ComplianceQueryOperatorTypeLessThan = "LESS_THAN" + + // ComplianceQueryOperatorTypeGreaterThan is a ComplianceQueryOperatorType enum value + ComplianceQueryOperatorTypeGreaterThan = "GREATER_THAN" +) + +// ComplianceQueryOperatorType_Values returns all elements of the ComplianceQueryOperatorType enum +func ComplianceQueryOperatorType_Values() []string { + return []string{ + ComplianceQueryOperatorTypeEqual, + ComplianceQueryOperatorTypeNotEqual, + ComplianceQueryOperatorTypeBeginWith, + ComplianceQueryOperatorTypeLessThan, + ComplianceQueryOperatorTypeGreaterThan, + } +} + +const ( + // ComplianceSeverityCritical is a ComplianceSeverity enum value + ComplianceSeverityCritical = "CRITICAL" + + // ComplianceSeverityHigh is a ComplianceSeverity enum value + ComplianceSeverityHigh = "HIGH" + + // ComplianceSeverityMedium is a ComplianceSeverity enum value + ComplianceSeverityMedium = "MEDIUM" + + // ComplianceSeverityLow is a ComplianceSeverity enum value + ComplianceSeverityLow = "LOW" + + // ComplianceSeverityInformational is a ComplianceSeverity enum value + ComplianceSeverityInformational = "INFORMATIONAL" + + // ComplianceSeverityUnspecified is a ComplianceSeverity enum value + ComplianceSeverityUnspecified = "UNSPECIFIED" +) + +// ComplianceSeverity_Values returns all elements of the ComplianceSeverity enum +func ComplianceSeverity_Values() []string { + return []string{ + ComplianceSeverityCritical, + ComplianceSeverityHigh, + ComplianceSeverityMedium, + ComplianceSeverityLow, + ComplianceSeverityInformational, + ComplianceSeverityUnspecified, + } +} + +const ( + // ComplianceStatusCompliant is a ComplianceStatus enum value + ComplianceStatusCompliant = "COMPLIANT" + + // ComplianceStatusNonCompliant is a ComplianceStatus enum value + ComplianceStatusNonCompliant = "NON_COMPLIANT" +) + +// ComplianceStatus_Values returns all elements of the ComplianceStatus enum +func ComplianceStatus_Values() []string { + return []string{ + ComplianceStatusCompliant, + ComplianceStatusNonCompliant, + } +} + +const ( + // ComplianceUploadTypeComplete is a ComplianceUploadType enum value + ComplianceUploadTypeComplete = "COMPLETE" + + // ComplianceUploadTypePartial is a ComplianceUploadType enum value + ComplianceUploadTypePartial = "PARTIAL" +) + +// ComplianceUploadType_Values returns all elements of the ComplianceUploadType enum +func ComplianceUploadType_Values() []string { + return []string{ + ComplianceUploadTypeComplete, + ComplianceUploadTypePartial, + } +} + +const ( + // ConnectionStatusConnected is a ConnectionStatus enum value + ConnectionStatusConnected = "Connected" + + // ConnectionStatusNotConnected is a ConnectionStatus enum value + ConnectionStatusNotConnected = "NotConnected" +) + +// ConnectionStatus_Values returns all elements of the ConnectionStatus enum +func ConnectionStatus_Values() []string { + return []string{ + ConnectionStatusConnected, + ConnectionStatusNotConnected, + } +} + +const ( + // DescribeActivationsFilterKeysActivationIds is a DescribeActivationsFilterKeys enum value + DescribeActivationsFilterKeysActivationIds = "ActivationIds" + + // DescribeActivationsFilterKeysDefaultInstanceName is a DescribeActivationsFilterKeys enum value + DescribeActivationsFilterKeysDefaultInstanceName = "DefaultInstanceName" + + // DescribeActivationsFilterKeysIamRole is a DescribeActivationsFilterKeys enum value + DescribeActivationsFilterKeysIamRole = "IamRole" +) + +// DescribeActivationsFilterKeys_Values returns all elements of the DescribeActivationsFilterKeys enum +func DescribeActivationsFilterKeys_Values() []string { + return []string{ + DescribeActivationsFilterKeysActivationIds, + DescribeActivationsFilterKeysDefaultInstanceName, + DescribeActivationsFilterKeysIamRole, + } +} + +const ( + // DocumentFilterKeyName is a DocumentFilterKey enum value + DocumentFilterKeyName = "Name" + + // DocumentFilterKeyOwner is a DocumentFilterKey enum value + DocumentFilterKeyOwner = "Owner" + + // DocumentFilterKeyPlatformTypes is a DocumentFilterKey enum value + DocumentFilterKeyPlatformTypes = "PlatformTypes" + + // DocumentFilterKeyDocumentType is a DocumentFilterKey enum value + DocumentFilterKeyDocumentType = "DocumentType" +) + +// DocumentFilterKey_Values returns all elements of the DocumentFilterKey enum +func DocumentFilterKey_Values() []string { + return []string{ + DocumentFilterKeyName, + DocumentFilterKeyOwner, + DocumentFilterKeyPlatformTypes, + DocumentFilterKeyDocumentType, + } +} + +const ( + // DocumentFormatYaml is a DocumentFormat enum value + DocumentFormatYaml = "YAML" + + // DocumentFormatJson is a DocumentFormat enum value + DocumentFormatJson = "JSON" + + // DocumentFormatText is a DocumentFormat enum value + DocumentFormatText = "TEXT" +) + +// DocumentFormat_Values returns all elements of the DocumentFormat enum +func DocumentFormat_Values() []string { + return []string{ + DocumentFormatYaml, + DocumentFormatJson, + DocumentFormatText, + } +} + +const ( + // DocumentHashTypeSha256 is a DocumentHashType enum value + DocumentHashTypeSha256 = "Sha256" + + // DocumentHashTypeSha1 is a DocumentHashType enum value + DocumentHashTypeSha1 = "Sha1" +) + +// DocumentHashType_Values returns all elements of the DocumentHashType enum +func DocumentHashType_Values() []string { + return []string{ + DocumentHashTypeSha256, + DocumentHashTypeSha1, + } +} + +const ( + // DocumentParameterTypeString is a DocumentParameterType enum value + DocumentParameterTypeString = "String" + + // DocumentParameterTypeStringList is a DocumentParameterType enum value + DocumentParameterTypeStringList = "StringList" +) + +// DocumentParameterType_Values returns all elements of the DocumentParameterType enum +func DocumentParameterType_Values() []string { + return []string{ + DocumentParameterTypeString, + DocumentParameterTypeStringList, + } +} + +const ( + // DocumentPermissionTypeShare is a DocumentPermissionType enum value + DocumentPermissionTypeShare = "Share" +) + +// DocumentPermissionType_Values returns all elements of the DocumentPermissionType enum +func DocumentPermissionType_Values() []string { + return []string{ + DocumentPermissionTypeShare, + } +} + +// The status of a document. +const ( + // DocumentStatusCreating is a DocumentStatus enum value + DocumentStatusCreating = "Creating" + + // DocumentStatusActive is a DocumentStatus enum value + DocumentStatusActive = "Active" + + // DocumentStatusUpdating is a DocumentStatus enum value + DocumentStatusUpdating = "Updating" + + // DocumentStatusDeleting is a DocumentStatus enum value + DocumentStatusDeleting = "Deleting" + + // DocumentStatusFailed is a DocumentStatus enum value + DocumentStatusFailed = "Failed" +) + +// DocumentStatus_Values returns all elements of the DocumentStatus enum +func DocumentStatus_Values() []string { + return []string{ + DocumentStatusCreating, + DocumentStatusActive, + DocumentStatusUpdating, + DocumentStatusDeleting, + DocumentStatusFailed, + } +} + +const ( + // DocumentTypeCommand is a DocumentType enum value + DocumentTypeCommand = "Command" + + // DocumentTypePolicy is a DocumentType enum value + DocumentTypePolicy = "Policy" + + // DocumentTypeAutomation is a DocumentType enum value + DocumentTypeAutomation = "Automation" + + // DocumentTypeSession is a DocumentType enum value + DocumentTypeSession = "Session" + + // DocumentTypePackage is a DocumentType enum value + DocumentTypePackage = "Package" + + // DocumentTypeApplicationConfiguration is a DocumentType enum value + DocumentTypeApplicationConfiguration = "ApplicationConfiguration" + + // DocumentTypeApplicationConfigurationSchema is a DocumentType enum value + DocumentTypeApplicationConfigurationSchema = "ApplicationConfigurationSchema" + + // DocumentTypeDeploymentStrategy is a DocumentType enum value + DocumentTypeDeploymentStrategy = "DeploymentStrategy" + + // DocumentTypeChangeCalendar is a DocumentType enum value + DocumentTypeChangeCalendar = "ChangeCalendar" +) + +// DocumentType_Values returns all elements of the DocumentType enum +func DocumentType_Values() []string { + return []string{ + DocumentTypeCommand, + DocumentTypePolicy, + DocumentTypeAutomation, + DocumentTypeSession, + DocumentTypePackage, + DocumentTypeApplicationConfiguration, + DocumentTypeApplicationConfigurationSchema, + DocumentTypeDeploymentStrategy, + DocumentTypeChangeCalendar, + } +} + +const ( + // ExecutionModeAuto is a ExecutionMode enum value + ExecutionModeAuto = "Auto" + + // ExecutionModeInteractive is a ExecutionMode enum value + ExecutionModeInteractive = "Interactive" +) + +// ExecutionMode_Values returns all elements of the ExecutionMode enum +func ExecutionMode_Values() []string { + return []string{ + ExecutionModeAuto, + ExecutionModeInteractive, + } +} + +const ( + // FaultClient is a Fault enum value + FaultClient = "Client" + + // FaultServer is a Fault enum value + FaultServer = "Server" + + // FaultUnknown is a Fault enum value + FaultUnknown = "Unknown" +) + +// Fault_Values returns all elements of the Fault enum +func Fault_Values() []string { + return []string{ + FaultClient, + FaultServer, + FaultUnknown, + } +} + +const ( + // InstanceInformationFilterKeyInstanceIds is a InstanceInformationFilterKey enum value + InstanceInformationFilterKeyInstanceIds = "InstanceIds" + + // InstanceInformationFilterKeyAgentVersion is a InstanceInformationFilterKey enum value + InstanceInformationFilterKeyAgentVersion = "AgentVersion" + + // InstanceInformationFilterKeyPingStatus is a InstanceInformationFilterKey enum value + InstanceInformationFilterKeyPingStatus = "PingStatus" + + // InstanceInformationFilterKeyPlatformTypes is a InstanceInformationFilterKey enum value + InstanceInformationFilterKeyPlatformTypes = "PlatformTypes" + + // InstanceInformationFilterKeyActivationIds is a InstanceInformationFilterKey enum value + InstanceInformationFilterKeyActivationIds = "ActivationIds" + + // InstanceInformationFilterKeyIamRole is a InstanceInformationFilterKey enum value + InstanceInformationFilterKeyIamRole = "IamRole" + + // InstanceInformationFilterKeyResourceType is a InstanceInformationFilterKey enum value + InstanceInformationFilterKeyResourceType = "ResourceType" + + // InstanceInformationFilterKeyAssociationStatus is a InstanceInformationFilterKey enum value + InstanceInformationFilterKeyAssociationStatus = "AssociationStatus" +) + +// InstanceInformationFilterKey_Values returns all elements of the InstanceInformationFilterKey enum +func InstanceInformationFilterKey_Values() []string { + return []string{ + InstanceInformationFilterKeyInstanceIds, + InstanceInformationFilterKeyAgentVersion, + InstanceInformationFilterKeyPingStatus, + InstanceInformationFilterKeyPlatformTypes, + InstanceInformationFilterKeyActivationIds, + InstanceInformationFilterKeyIamRole, + InstanceInformationFilterKeyResourceType, + InstanceInformationFilterKeyAssociationStatus, + } +} + +const ( + // InstancePatchStateOperatorTypeEqual is a InstancePatchStateOperatorType enum value + InstancePatchStateOperatorTypeEqual = "Equal" + + // InstancePatchStateOperatorTypeNotEqual is a InstancePatchStateOperatorType enum value + InstancePatchStateOperatorTypeNotEqual = "NotEqual" + + // InstancePatchStateOperatorTypeLessThan is a InstancePatchStateOperatorType enum value + InstancePatchStateOperatorTypeLessThan = "LessThan" + + // InstancePatchStateOperatorTypeGreaterThan is a InstancePatchStateOperatorType enum value + InstancePatchStateOperatorTypeGreaterThan = "GreaterThan" +) + +// InstancePatchStateOperatorType_Values returns all elements of the InstancePatchStateOperatorType enum +func InstancePatchStateOperatorType_Values() []string { + return []string{ + InstancePatchStateOperatorTypeEqual, + InstancePatchStateOperatorTypeNotEqual, + InstancePatchStateOperatorTypeLessThan, + InstancePatchStateOperatorTypeGreaterThan, + } +} + +const ( + // InventoryAttributeDataTypeString is a InventoryAttributeDataType enum value + InventoryAttributeDataTypeString = "string" + + // InventoryAttributeDataTypeNumber is a InventoryAttributeDataType enum value + InventoryAttributeDataTypeNumber = "number" +) + +// InventoryAttributeDataType_Values returns all elements of the InventoryAttributeDataType enum +func InventoryAttributeDataType_Values() []string { + return []string{ + InventoryAttributeDataTypeString, + InventoryAttributeDataTypeNumber, + } +} + +const ( + // InventoryDeletionStatusInProgress is a InventoryDeletionStatus enum value + InventoryDeletionStatusInProgress = "InProgress" + + // InventoryDeletionStatusComplete is a InventoryDeletionStatus enum value + InventoryDeletionStatusComplete = "Complete" +) + +// InventoryDeletionStatus_Values returns all elements of the InventoryDeletionStatus enum +func InventoryDeletionStatus_Values() []string { + return []string{ + InventoryDeletionStatusInProgress, + InventoryDeletionStatusComplete, + } +} + +const ( + // InventoryQueryOperatorTypeEqual is a InventoryQueryOperatorType enum value + InventoryQueryOperatorTypeEqual = "Equal" + + // InventoryQueryOperatorTypeNotEqual is a InventoryQueryOperatorType enum value + InventoryQueryOperatorTypeNotEqual = "NotEqual" + + // InventoryQueryOperatorTypeBeginWith is a InventoryQueryOperatorType enum value + InventoryQueryOperatorTypeBeginWith = "BeginWith" + + // InventoryQueryOperatorTypeLessThan is a InventoryQueryOperatorType enum value + InventoryQueryOperatorTypeLessThan = "LessThan" + + // InventoryQueryOperatorTypeGreaterThan is a InventoryQueryOperatorType enum value + InventoryQueryOperatorTypeGreaterThan = "GreaterThan" + + // InventoryQueryOperatorTypeExists is a InventoryQueryOperatorType enum value + InventoryQueryOperatorTypeExists = "Exists" +) + +// InventoryQueryOperatorType_Values returns all elements of the InventoryQueryOperatorType enum +func InventoryQueryOperatorType_Values() []string { + return []string{ + InventoryQueryOperatorTypeEqual, + InventoryQueryOperatorTypeNotEqual, + InventoryQueryOperatorTypeBeginWith, + InventoryQueryOperatorTypeLessThan, + InventoryQueryOperatorTypeGreaterThan, + InventoryQueryOperatorTypeExists, + } +} + +const ( + // InventorySchemaDeleteOptionDisableSchema is a InventorySchemaDeleteOption enum value + InventorySchemaDeleteOptionDisableSchema = "DisableSchema" + + // InventorySchemaDeleteOptionDeleteSchema is a InventorySchemaDeleteOption enum value + InventorySchemaDeleteOptionDeleteSchema = "DeleteSchema" +) + +// InventorySchemaDeleteOption_Values returns all elements of the InventorySchemaDeleteOption enum +func InventorySchemaDeleteOption_Values() []string { + return []string{ + InventorySchemaDeleteOptionDisableSchema, + InventorySchemaDeleteOptionDeleteSchema, + } +} + +const ( + // LastResourceDataSyncStatusSuccessful is a LastResourceDataSyncStatus enum value + LastResourceDataSyncStatusSuccessful = "Successful" + + // LastResourceDataSyncStatusFailed is a LastResourceDataSyncStatus enum value + LastResourceDataSyncStatusFailed = "Failed" + + // LastResourceDataSyncStatusInProgress is a LastResourceDataSyncStatus enum value + LastResourceDataSyncStatusInProgress = "InProgress" +) + +// LastResourceDataSyncStatus_Values returns all elements of the LastResourceDataSyncStatus enum +func LastResourceDataSyncStatus_Values() []string { + return []string{ + LastResourceDataSyncStatusSuccessful, + LastResourceDataSyncStatusFailed, + LastResourceDataSyncStatusInProgress, + } +} + +const ( + // MaintenanceWindowExecutionStatusPending is a MaintenanceWindowExecutionStatus enum value + MaintenanceWindowExecutionStatusPending = "PENDING" + + // MaintenanceWindowExecutionStatusInProgress is a MaintenanceWindowExecutionStatus enum value + MaintenanceWindowExecutionStatusInProgress = "IN_PROGRESS" + + // MaintenanceWindowExecutionStatusSuccess is a MaintenanceWindowExecutionStatus enum value + MaintenanceWindowExecutionStatusSuccess = "SUCCESS" + + // MaintenanceWindowExecutionStatusFailed is a MaintenanceWindowExecutionStatus enum value + MaintenanceWindowExecutionStatusFailed = "FAILED" + + // MaintenanceWindowExecutionStatusTimedOut is a MaintenanceWindowExecutionStatus enum value + MaintenanceWindowExecutionStatusTimedOut = "TIMED_OUT" + + // MaintenanceWindowExecutionStatusCancelling is a MaintenanceWindowExecutionStatus enum value + MaintenanceWindowExecutionStatusCancelling = "CANCELLING" + + // MaintenanceWindowExecutionStatusCancelled is a MaintenanceWindowExecutionStatus enum value + MaintenanceWindowExecutionStatusCancelled = "CANCELLED" + + // MaintenanceWindowExecutionStatusSkippedOverlapping is a MaintenanceWindowExecutionStatus enum value + MaintenanceWindowExecutionStatusSkippedOverlapping = "SKIPPED_OVERLAPPING" +) + +// MaintenanceWindowExecutionStatus_Values returns all elements of the MaintenanceWindowExecutionStatus enum +func MaintenanceWindowExecutionStatus_Values() []string { + return []string{ + MaintenanceWindowExecutionStatusPending, + MaintenanceWindowExecutionStatusInProgress, + MaintenanceWindowExecutionStatusSuccess, + MaintenanceWindowExecutionStatusFailed, + MaintenanceWindowExecutionStatusTimedOut, + MaintenanceWindowExecutionStatusCancelling, + MaintenanceWindowExecutionStatusCancelled, + MaintenanceWindowExecutionStatusSkippedOverlapping, + } +} + +const ( + // MaintenanceWindowResourceTypeInstance is a MaintenanceWindowResourceType enum value + MaintenanceWindowResourceTypeInstance = "INSTANCE" + + // MaintenanceWindowResourceTypeResourceGroup is a MaintenanceWindowResourceType enum value + MaintenanceWindowResourceTypeResourceGroup = "RESOURCE_GROUP" +) + +// MaintenanceWindowResourceType_Values returns all elements of the MaintenanceWindowResourceType enum +func MaintenanceWindowResourceType_Values() []string { + return []string{ + MaintenanceWindowResourceTypeInstance, + MaintenanceWindowResourceTypeResourceGroup, + } +} + +const ( + // MaintenanceWindowTaskTypeRunCommand is a MaintenanceWindowTaskType enum value + MaintenanceWindowTaskTypeRunCommand = "RUN_COMMAND" + + // MaintenanceWindowTaskTypeAutomation is a MaintenanceWindowTaskType enum value + MaintenanceWindowTaskTypeAutomation = "AUTOMATION" + + // MaintenanceWindowTaskTypeStepFunctions is a MaintenanceWindowTaskType enum value + MaintenanceWindowTaskTypeStepFunctions = "STEP_FUNCTIONS" + + // MaintenanceWindowTaskTypeLambda is a MaintenanceWindowTaskType enum value + MaintenanceWindowTaskTypeLambda = "LAMBDA" +) + +// MaintenanceWindowTaskType_Values returns all elements of the MaintenanceWindowTaskType enum +func MaintenanceWindowTaskType_Values() []string { + return []string{ + MaintenanceWindowTaskTypeRunCommand, + MaintenanceWindowTaskTypeAutomation, + MaintenanceWindowTaskTypeStepFunctions, + MaintenanceWindowTaskTypeLambda, + } +} + +const ( + // NotificationEventAll is a NotificationEvent enum value + NotificationEventAll = "All" + + // NotificationEventInProgress is a NotificationEvent enum value + NotificationEventInProgress = "InProgress" + + // NotificationEventSuccess is a NotificationEvent enum value + NotificationEventSuccess = "Success" + + // NotificationEventTimedOut is a NotificationEvent enum value + NotificationEventTimedOut = "TimedOut" + + // NotificationEventCancelled is a NotificationEvent enum value + NotificationEventCancelled = "Cancelled" + + // NotificationEventFailed is a NotificationEvent enum value + NotificationEventFailed = "Failed" +) + +// NotificationEvent_Values returns all elements of the NotificationEvent enum +func NotificationEvent_Values() []string { + return []string{ + NotificationEventAll, + NotificationEventInProgress, + NotificationEventSuccess, + NotificationEventTimedOut, + NotificationEventCancelled, + NotificationEventFailed, + } +} + +const ( + // NotificationTypeCommand is a NotificationType enum value + NotificationTypeCommand = "Command" + + // NotificationTypeInvocation is a NotificationType enum value + NotificationTypeInvocation = "Invocation" +) + +// NotificationType_Values returns all elements of the NotificationType enum +func NotificationType_Values() []string { + return []string{ + NotificationTypeCommand, + NotificationTypeInvocation, + } +} + +const ( + // OperatingSystemWindows is a OperatingSystem enum value + OperatingSystemWindows = "WINDOWS" + + // OperatingSystemAmazonLinux is a OperatingSystem enum value + OperatingSystemAmazonLinux = "AMAZON_LINUX" + + // OperatingSystemAmazonLinux2 is a OperatingSystem enum value + OperatingSystemAmazonLinux2 = "AMAZON_LINUX_2" + + // OperatingSystemUbuntu is a OperatingSystem enum value + OperatingSystemUbuntu = "UBUNTU" + + // OperatingSystemRedhatEnterpriseLinux is a OperatingSystem enum value + OperatingSystemRedhatEnterpriseLinux = "REDHAT_ENTERPRISE_LINUX" + + // OperatingSystemSuse is a OperatingSystem enum value + OperatingSystemSuse = "SUSE" + + // OperatingSystemCentos is a OperatingSystem enum value + OperatingSystemCentos = "CENTOS" + + // OperatingSystemOracleLinux is a OperatingSystem enum value + OperatingSystemOracleLinux = "ORACLE_LINUX" + + // OperatingSystemDebian is a OperatingSystem enum value + OperatingSystemDebian = "DEBIAN" +) + +// OperatingSystem_Values returns all elements of the OperatingSystem enum +func OperatingSystem_Values() []string { + return []string{ + OperatingSystemWindows, + OperatingSystemAmazonLinux, + OperatingSystemAmazonLinux2, + OperatingSystemUbuntu, + OperatingSystemRedhatEnterpriseLinux, + OperatingSystemSuse, + OperatingSystemCentos, + OperatingSystemOracleLinux, + OperatingSystemDebian, + } +} + +const ( + // OpsFilterOperatorTypeEqual is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeEqual = "Equal" + + // OpsFilterOperatorTypeNotEqual is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeNotEqual = "NotEqual" + + // OpsFilterOperatorTypeBeginWith is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeBeginWith = "BeginWith" + + // OpsFilterOperatorTypeLessThan is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeLessThan = "LessThan" + + // OpsFilterOperatorTypeGreaterThan is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeGreaterThan = "GreaterThan" + + // OpsFilterOperatorTypeExists is a OpsFilterOperatorType enum value + OpsFilterOperatorTypeExists = "Exists" +) + +// OpsFilterOperatorType_Values returns all elements of the OpsFilterOperatorType enum +func OpsFilterOperatorType_Values() []string { + return []string{ + OpsFilterOperatorTypeEqual, + OpsFilterOperatorTypeNotEqual, + OpsFilterOperatorTypeBeginWith, + OpsFilterOperatorTypeLessThan, + OpsFilterOperatorTypeGreaterThan, + OpsFilterOperatorTypeExists, + } +} + +const ( + // OpsItemDataTypeSearchableString is a OpsItemDataType enum value + OpsItemDataTypeSearchableString = "SearchableString" + + // OpsItemDataTypeString is a OpsItemDataType enum value + OpsItemDataTypeString = "String" +) + +// OpsItemDataType_Values returns all elements of the OpsItemDataType enum +func OpsItemDataType_Values() []string { + return []string{ + OpsItemDataTypeSearchableString, + OpsItemDataTypeString, + } +} + +const ( + // OpsItemFilterKeyStatus is a OpsItemFilterKey enum value + OpsItemFilterKeyStatus = "Status" + + // OpsItemFilterKeyCreatedBy is a OpsItemFilterKey enum value + OpsItemFilterKeyCreatedBy = "CreatedBy" + + // OpsItemFilterKeySource is a OpsItemFilterKey enum value + OpsItemFilterKeySource = "Source" + + // OpsItemFilterKeyPriority is a OpsItemFilterKey enum value + OpsItemFilterKeyPriority = "Priority" + + // OpsItemFilterKeyTitle is a OpsItemFilterKey enum value + OpsItemFilterKeyTitle = "Title" + + // OpsItemFilterKeyOpsItemId is a OpsItemFilterKey enum value + OpsItemFilterKeyOpsItemId = "OpsItemId" + + // OpsItemFilterKeyCreatedTime is a OpsItemFilterKey enum value + OpsItemFilterKeyCreatedTime = "CreatedTime" + + // OpsItemFilterKeyLastModifiedTime is a OpsItemFilterKey enum value + OpsItemFilterKeyLastModifiedTime = "LastModifiedTime" + + // OpsItemFilterKeyOperationalData is a OpsItemFilterKey enum value + OpsItemFilterKeyOperationalData = "OperationalData" + + // OpsItemFilterKeyOperationalDataKey is a OpsItemFilterKey enum value + OpsItemFilterKeyOperationalDataKey = "OperationalDataKey" + + // OpsItemFilterKeyOperationalDataValue is a OpsItemFilterKey enum value + OpsItemFilterKeyOperationalDataValue = "OperationalDataValue" + + // OpsItemFilterKeyResourceId is a OpsItemFilterKey enum value + OpsItemFilterKeyResourceId = "ResourceId" + + // OpsItemFilterKeyAutomationId is a OpsItemFilterKey enum value + OpsItemFilterKeyAutomationId = "AutomationId" + + // OpsItemFilterKeyCategory is a OpsItemFilterKey enum value + OpsItemFilterKeyCategory = "Category" + + // OpsItemFilterKeySeverity is a OpsItemFilterKey enum value + OpsItemFilterKeySeverity = "Severity" +) + +// OpsItemFilterKey_Values returns all elements of the OpsItemFilterKey enum +func OpsItemFilterKey_Values() []string { + return []string{ + OpsItemFilterKeyStatus, + OpsItemFilterKeyCreatedBy, + OpsItemFilterKeySource, + OpsItemFilterKeyPriority, + OpsItemFilterKeyTitle, + OpsItemFilterKeyOpsItemId, + OpsItemFilterKeyCreatedTime, + OpsItemFilterKeyLastModifiedTime, + OpsItemFilterKeyOperationalData, + OpsItemFilterKeyOperationalDataKey, + OpsItemFilterKeyOperationalDataValue, + OpsItemFilterKeyResourceId, + OpsItemFilterKeyAutomationId, + OpsItemFilterKeyCategory, + OpsItemFilterKeySeverity, + } +} + +const ( + // OpsItemFilterOperatorEqual is a OpsItemFilterOperator enum value + OpsItemFilterOperatorEqual = "Equal" + + // OpsItemFilterOperatorContains is a OpsItemFilterOperator enum value + OpsItemFilterOperatorContains = "Contains" + + // OpsItemFilterOperatorGreaterThan is a OpsItemFilterOperator enum value + OpsItemFilterOperatorGreaterThan = "GreaterThan" + + // OpsItemFilterOperatorLessThan is a OpsItemFilterOperator enum value + OpsItemFilterOperatorLessThan = "LessThan" +) + +// OpsItemFilterOperator_Values returns all elements of the OpsItemFilterOperator enum +func OpsItemFilterOperator_Values() []string { + return []string{ + OpsItemFilterOperatorEqual, + OpsItemFilterOperatorContains, + OpsItemFilterOperatorGreaterThan, + OpsItemFilterOperatorLessThan, + } +} + +const ( + // OpsItemStatusOpen is a OpsItemStatus enum value + OpsItemStatusOpen = "Open" + + // OpsItemStatusInProgress is a OpsItemStatus enum value + OpsItemStatusInProgress = "InProgress" + + // OpsItemStatusResolved is a OpsItemStatus enum value + OpsItemStatusResolved = "Resolved" +) + +// OpsItemStatus_Values returns all elements of the OpsItemStatus enum +func OpsItemStatus_Values() []string { + return []string{ + OpsItemStatusOpen, + OpsItemStatusInProgress, + OpsItemStatusResolved, + } +} + +const ( + // ParameterTierStandard is a ParameterTier enum value + ParameterTierStandard = "Standard" + + // ParameterTierAdvanced is a ParameterTier enum value + ParameterTierAdvanced = "Advanced" + + // ParameterTierIntelligentTiering is a ParameterTier enum value + ParameterTierIntelligentTiering = "Intelligent-Tiering" +) + +// ParameterTier_Values returns all elements of the ParameterTier enum +func ParameterTier_Values() []string { + return []string{ + ParameterTierStandard, + ParameterTierAdvanced, + ParameterTierIntelligentTiering, + } +} + +const ( + // ParameterTypeString is a ParameterType enum value + ParameterTypeString = "String" + + // ParameterTypeStringList is a ParameterType enum value + ParameterTypeStringList = "StringList" + + // ParameterTypeSecureString is a ParameterType enum value + ParameterTypeSecureString = "SecureString" +) + +// ParameterType_Values returns all elements of the ParameterType enum +func ParameterType_Values() []string { + return []string{ + ParameterTypeString, + ParameterTypeStringList, + ParameterTypeSecureString, + } +} + +const ( + // ParametersFilterKeyName is a ParametersFilterKey enum value + ParametersFilterKeyName = "Name" + + // ParametersFilterKeyType is a ParametersFilterKey enum value + ParametersFilterKeyType = "Type" + + // ParametersFilterKeyKeyId is a ParametersFilterKey enum value + ParametersFilterKeyKeyId = "KeyId" +) + +// ParametersFilterKey_Values returns all elements of the ParametersFilterKey enum +func ParametersFilterKey_Values() []string { + return []string{ + ParametersFilterKeyName, + ParametersFilterKeyType, + ParametersFilterKeyKeyId, + } +} + +const ( + // PatchActionAllowAsDependency is a PatchAction enum value + PatchActionAllowAsDependency = "ALLOW_AS_DEPENDENCY" + + // PatchActionBlock is a PatchAction enum value + PatchActionBlock = "BLOCK" +) + +// PatchAction_Values returns all elements of the PatchAction enum +func PatchAction_Values() []string { + return []string{ + PatchActionAllowAsDependency, + PatchActionBlock, + } +} + +const ( + // PatchComplianceDataStateInstalled is a PatchComplianceDataState enum value + PatchComplianceDataStateInstalled = "INSTALLED" + + // PatchComplianceDataStateInstalledOther is a PatchComplianceDataState enum value + PatchComplianceDataStateInstalledOther = "INSTALLED_OTHER" + + // PatchComplianceDataStateInstalledPendingReboot is a PatchComplianceDataState enum value + PatchComplianceDataStateInstalledPendingReboot = "INSTALLED_PENDING_REBOOT" + + // PatchComplianceDataStateInstalledRejected is a PatchComplianceDataState enum value + PatchComplianceDataStateInstalledRejected = "INSTALLED_REJECTED" + + // PatchComplianceDataStateMissing is a PatchComplianceDataState enum value + PatchComplianceDataStateMissing = "MISSING" + + // PatchComplianceDataStateNotApplicable is a PatchComplianceDataState enum value + PatchComplianceDataStateNotApplicable = "NOT_APPLICABLE" + + // PatchComplianceDataStateFailed is a PatchComplianceDataState enum value + PatchComplianceDataStateFailed = "FAILED" +) + +// PatchComplianceDataState_Values returns all elements of the PatchComplianceDataState enum +func PatchComplianceDataState_Values() []string { + return []string{ + PatchComplianceDataStateInstalled, + PatchComplianceDataStateInstalledOther, + PatchComplianceDataStateInstalledPendingReboot, + PatchComplianceDataStateInstalledRejected, + PatchComplianceDataStateMissing, + PatchComplianceDataStateNotApplicable, + PatchComplianceDataStateFailed, + } +} + +const ( + // PatchComplianceLevelCritical is a PatchComplianceLevel enum value + PatchComplianceLevelCritical = "CRITICAL" + + // PatchComplianceLevelHigh is a PatchComplianceLevel enum value + PatchComplianceLevelHigh = "HIGH" + + // PatchComplianceLevelMedium is a PatchComplianceLevel enum value + PatchComplianceLevelMedium = "MEDIUM" + + // PatchComplianceLevelLow is a PatchComplianceLevel enum value + PatchComplianceLevelLow = "LOW" + + // PatchComplianceLevelInformational is a PatchComplianceLevel enum value + PatchComplianceLevelInformational = "INFORMATIONAL" + + // PatchComplianceLevelUnspecified is a PatchComplianceLevel enum value + PatchComplianceLevelUnspecified = "UNSPECIFIED" +) + +// PatchComplianceLevel_Values returns all elements of the PatchComplianceLevel enum +func PatchComplianceLevel_Values() []string { + return []string{ + PatchComplianceLevelCritical, + PatchComplianceLevelHigh, + PatchComplianceLevelMedium, + PatchComplianceLevelLow, + PatchComplianceLevelInformational, + PatchComplianceLevelUnspecified, + } +} + +const ( + // PatchDeploymentStatusApproved is a PatchDeploymentStatus enum value + PatchDeploymentStatusApproved = "APPROVED" + + // PatchDeploymentStatusPendingApproval is a PatchDeploymentStatus enum value + PatchDeploymentStatusPendingApproval = "PENDING_APPROVAL" + + // PatchDeploymentStatusExplicitApproved is a PatchDeploymentStatus enum value + PatchDeploymentStatusExplicitApproved = "EXPLICIT_APPROVED" + + // PatchDeploymentStatusExplicitRejected is a PatchDeploymentStatus enum value + PatchDeploymentStatusExplicitRejected = "EXPLICIT_REJECTED" +) + +// PatchDeploymentStatus_Values returns all elements of the PatchDeploymentStatus enum +func PatchDeploymentStatus_Values() []string { + return []string{ + PatchDeploymentStatusApproved, + PatchDeploymentStatusPendingApproval, + PatchDeploymentStatusExplicitApproved, + PatchDeploymentStatusExplicitRejected, + } +} + +const ( + // PatchFilterKeyArch is a PatchFilterKey enum value + PatchFilterKeyArch = "ARCH" + + // PatchFilterKeyAdvisoryId is a PatchFilterKey enum value + PatchFilterKeyAdvisoryId = "ADVISORY_ID" + + // PatchFilterKeyBugzillaId is a PatchFilterKey enum value + PatchFilterKeyBugzillaId = "BUGZILLA_ID" + + // PatchFilterKeyPatchSet is a PatchFilterKey enum value + PatchFilterKeyPatchSet = "PATCH_SET" + + // PatchFilterKeyProduct is a PatchFilterKey enum value + PatchFilterKeyProduct = "PRODUCT" + + // PatchFilterKeyProductFamily is a PatchFilterKey enum value + PatchFilterKeyProductFamily = "PRODUCT_FAMILY" + + // PatchFilterKeyClassification is a PatchFilterKey enum value + PatchFilterKeyClassification = "CLASSIFICATION" + + // PatchFilterKeyCveId is a PatchFilterKey enum value + PatchFilterKeyCveId = "CVE_ID" + + // PatchFilterKeyEpoch is a PatchFilterKey enum value + PatchFilterKeyEpoch = "EPOCH" + + // PatchFilterKeyMsrcSeverity is a PatchFilterKey enum value + PatchFilterKeyMsrcSeverity = "MSRC_SEVERITY" + + // PatchFilterKeyName is a PatchFilterKey enum value + PatchFilterKeyName = "NAME" + + // PatchFilterKeyPatchId is a PatchFilterKey enum value + PatchFilterKeyPatchId = "PATCH_ID" + + // PatchFilterKeySection is a PatchFilterKey enum value + PatchFilterKeySection = "SECTION" + + // PatchFilterKeyPriority is a PatchFilterKey enum value + PatchFilterKeyPriority = "PRIORITY" + + // PatchFilterKeyRepository is a PatchFilterKey enum value + PatchFilterKeyRepository = "REPOSITORY" + + // PatchFilterKeyRelease is a PatchFilterKey enum value + PatchFilterKeyRelease = "RELEASE" + + // PatchFilterKeySeverity is a PatchFilterKey enum value + PatchFilterKeySeverity = "SEVERITY" + + // PatchFilterKeySecurity is a PatchFilterKey enum value + PatchFilterKeySecurity = "SECURITY" + + // PatchFilterKeyVersion is a PatchFilterKey enum value + PatchFilterKeyVersion = "VERSION" +) + +// PatchFilterKey_Values returns all elements of the PatchFilterKey enum +func PatchFilterKey_Values() []string { + return []string{ + PatchFilterKeyArch, + PatchFilterKeyAdvisoryId, + PatchFilterKeyBugzillaId, + PatchFilterKeyPatchSet, + PatchFilterKeyProduct, + PatchFilterKeyProductFamily, + PatchFilterKeyClassification, + PatchFilterKeyCveId, + PatchFilterKeyEpoch, + PatchFilterKeyMsrcSeverity, + PatchFilterKeyName, + PatchFilterKeyPatchId, + PatchFilterKeySection, + PatchFilterKeyPriority, + PatchFilterKeyRepository, + PatchFilterKeyRelease, + PatchFilterKeySeverity, + PatchFilterKeySecurity, + PatchFilterKeyVersion, + } +} + +const ( + // PatchOperationTypeScan is a PatchOperationType enum value + PatchOperationTypeScan = "Scan" + + // PatchOperationTypeInstall is a PatchOperationType enum value + PatchOperationTypeInstall = "Install" +) + +// PatchOperationType_Values returns all elements of the PatchOperationType enum +func PatchOperationType_Values() []string { + return []string{ + PatchOperationTypeScan, + PatchOperationTypeInstall, + } +} + +const ( + // PatchPropertyProduct is a PatchProperty enum value + PatchPropertyProduct = "PRODUCT" + + // PatchPropertyProductFamily is a PatchProperty enum value + PatchPropertyProductFamily = "PRODUCT_FAMILY" + + // PatchPropertyClassification is a PatchProperty enum value + PatchPropertyClassification = "CLASSIFICATION" + + // PatchPropertyMsrcSeverity is a PatchProperty enum value + PatchPropertyMsrcSeverity = "MSRC_SEVERITY" + + // PatchPropertyPriority is a PatchProperty enum value + PatchPropertyPriority = "PRIORITY" + + // PatchPropertySeverity is a PatchProperty enum value + PatchPropertySeverity = "SEVERITY" +) + +// PatchProperty_Values returns all elements of the PatchProperty enum +func PatchProperty_Values() []string { + return []string{ + PatchPropertyProduct, + PatchPropertyProductFamily, + PatchPropertyClassification, + PatchPropertyMsrcSeverity, + PatchPropertyPriority, + PatchPropertySeverity, + } +} + +const ( + // PatchSetOs is a PatchSet enum value + PatchSetOs = "OS" + + // PatchSetApplication is a PatchSet enum value + PatchSetApplication = "APPLICATION" +) + +// PatchSet_Values returns all elements of the PatchSet enum +func PatchSet_Values() []string { + return []string{ + PatchSetOs, + PatchSetApplication, + } +} + +const ( + // PingStatusOnline is a PingStatus enum value + PingStatusOnline = "Online" + + // PingStatusConnectionLost is a PingStatus enum value + PingStatusConnectionLost = "ConnectionLost" + + // PingStatusInactive is a PingStatus enum value + PingStatusInactive = "Inactive" +) + +// PingStatus_Values returns all elements of the PingStatus enum +func PingStatus_Values() []string { + return []string{ + PingStatusOnline, + PingStatusConnectionLost, + PingStatusInactive, + } +} + +const ( + // PlatformTypeWindows is a PlatformType enum value + PlatformTypeWindows = "Windows" + + // PlatformTypeLinux is a PlatformType enum value + PlatformTypeLinux = "Linux" +) + +// PlatformType_Values returns all elements of the PlatformType enum +func PlatformType_Values() []string { + return []string{ + PlatformTypeWindows, + PlatformTypeLinux, + } +} + +const ( + // RebootOptionRebootIfNeeded is a RebootOption enum value + RebootOptionRebootIfNeeded = "RebootIfNeeded" + + // RebootOptionNoReboot is a RebootOption enum value + RebootOptionNoReboot = "NoReboot" +) + +// RebootOption_Values returns all elements of the RebootOption enum +func RebootOption_Values() []string { + return []string{ + RebootOptionRebootIfNeeded, + RebootOptionNoReboot, + } +} + +const ( + // ResourceDataSyncS3FormatJsonSerDe is a ResourceDataSyncS3Format enum value + ResourceDataSyncS3FormatJsonSerDe = "JsonSerDe" +) + +// ResourceDataSyncS3Format_Values returns all elements of the ResourceDataSyncS3Format enum +func ResourceDataSyncS3Format_Values() []string { + return []string{ + ResourceDataSyncS3FormatJsonSerDe, + } +} + +const ( + // ResourceTypeManagedInstance is a ResourceType enum value + ResourceTypeManagedInstance = "ManagedInstance" + + // ResourceTypeDocument is a ResourceType enum value + ResourceTypeDocument = "Document" + + // ResourceTypeEc2instance is a ResourceType enum value + ResourceTypeEc2instance = "EC2Instance" +) + +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeManagedInstance, + ResourceTypeDocument, + ResourceTypeEc2instance, + } +} + +const ( + // ResourceTypeForTaggingDocument is a ResourceTypeForTagging enum value + ResourceTypeForTaggingDocument = "Document" + + // ResourceTypeForTaggingManagedInstance is a ResourceTypeForTagging enum value + ResourceTypeForTaggingManagedInstance = "ManagedInstance" + + // ResourceTypeForTaggingMaintenanceWindow is a ResourceTypeForTagging enum value + ResourceTypeForTaggingMaintenanceWindow = "MaintenanceWindow" + + // ResourceTypeForTaggingParameter is a ResourceTypeForTagging enum value + ResourceTypeForTaggingParameter = "Parameter" + + // ResourceTypeForTaggingPatchBaseline is a ResourceTypeForTagging enum value + ResourceTypeForTaggingPatchBaseline = "PatchBaseline" + + // ResourceTypeForTaggingOpsItem is a ResourceTypeForTagging enum value + ResourceTypeForTaggingOpsItem = "OpsItem" +) + +// ResourceTypeForTagging_Values returns all elements of the ResourceTypeForTagging enum +func ResourceTypeForTagging_Values() []string { + return []string{ + ResourceTypeForTaggingDocument, + ResourceTypeForTaggingManagedInstance, + ResourceTypeForTaggingMaintenanceWindow, + ResourceTypeForTaggingParameter, + ResourceTypeForTaggingPatchBaseline, + ResourceTypeForTaggingOpsItem, + } +} + +const ( + // SessionFilterKeyInvokedAfter is a SessionFilterKey enum value + SessionFilterKeyInvokedAfter = "InvokedAfter" + + // SessionFilterKeyInvokedBefore is a SessionFilterKey enum value + SessionFilterKeyInvokedBefore = "InvokedBefore" + + // SessionFilterKeyTarget is a SessionFilterKey enum value + SessionFilterKeyTarget = "Target" + + // SessionFilterKeyOwner is a SessionFilterKey enum value + SessionFilterKeyOwner = "Owner" + + // SessionFilterKeyStatus is a SessionFilterKey enum value + SessionFilterKeyStatus = "Status" + + // SessionFilterKeySessionId is a SessionFilterKey enum value + SessionFilterKeySessionId = "SessionId" +) + +// SessionFilterKey_Values returns all elements of the SessionFilterKey enum +func SessionFilterKey_Values() []string { + return []string{ + SessionFilterKeyInvokedAfter, + SessionFilterKeyInvokedBefore, + SessionFilterKeyTarget, + SessionFilterKeyOwner, + SessionFilterKeyStatus, + SessionFilterKeySessionId, + } +} + +const ( + // SessionStateActive is a SessionState enum value + SessionStateActive = "Active" + + // SessionStateHistory is a SessionState enum value + SessionStateHistory = "History" +) + +// SessionState_Values returns all elements of the SessionState enum +func SessionState_Values() []string { + return []string{ + SessionStateActive, + SessionStateHistory, + } +} + +const ( + // SessionStatusConnected is a SessionStatus enum value + SessionStatusConnected = "Connected" + + // SessionStatusConnecting is a SessionStatus enum value + SessionStatusConnecting = "Connecting" + + // SessionStatusDisconnected is a SessionStatus enum value + SessionStatusDisconnected = "Disconnected" + + // SessionStatusTerminated is a SessionStatus enum value + SessionStatusTerminated = "Terminated" + + // SessionStatusTerminating is a SessionStatus enum value + SessionStatusTerminating = "Terminating" + + // SessionStatusFailed is a SessionStatus enum value + SessionStatusFailed = "Failed" +) + +// SessionStatus_Values returns all elements of the SessionStatus enum +func SessionStatus_Values() []string { + return []string{ + SessionStatusConnected, + SessionStatusConnecting, + SessionStatusDisconnected, + SessionStatusTerminated, + SessionStatusTerminating, + SessionStatusFailed, + } +} + +const ( + // SignalTypeApprove is a SignalType enum value + SignalTypeApprove = "Approve" + + // SignalTypeReject is a SignalType enum value + SignalTypeReject = "Reject" + + // SignalTypeStartStep is a SignalType enum value + SignalTypeStartStep = "StartStep" + + // SignalTypeStopStep is a SignalType enum value + SignalTypeStopStep = "StopStep" + + // SignalTypeResume is a SignalType enum value + SignalTypeResume = "Resume" +) + +// SignalType_Values returns all elements of the SignalType enum +func SignalType_Values() []string { + return []string{ + SignalTypeApprove, + SignalTypeReject, + SignalTypeStartStep, + SignalTypeStopStep, + SignalTypeResume, + } +} + +const ( + // StepExecutionFilterKeyStartTimeBefore is a StepExecutionFilterKey enum value + StepExecutionFilterKeyStartTimeBefore = "StartTimeBefore" + + // StepExecutionFilterKeyStartTimeAfter is a StepExecutionFilterKey enum value + StepExecutionFilterKeyStartTimeAfter = "StartTimeAfter" + + // StepExecutionFilterKeyStepExecutionStatus is a StepExecutionFilterKey enum value + StepExecutionFilterKeyStepExecutionStatus = "StepExecutionStatus" + + // StepExecutionFilterKeyStepExecutionId is a StepExecutionFilterKey enum value + StepExecutionFilterKeyStepExecutionId = "StepExecutionId" + + // StepExecutionFilterKeyStepName is a StepExecutionFilterKey enum value + StepExecutionFilterKeyStepName = "StepName" + + // StepExecutionFilterKeyAction is a StepExecutionFilterKey enum value + StepExecutionFilterKeyAction = "Action" +) + +// StepExecutionFilterKey_Values returns all elements of the StepExecutionFilterKey enum +func StepExecutionFilterKey_Values() []string { + return []string{ + StepExecutionFilterKeyStartTimeBefore, + StepExecutionFilterKeyStartTimeAfter, + StepExecutionFilterKeyStepExecutionStatus, + StepExecutionFilterKeyStepExecutionId, + StepExecutionFilterKeyStepName, + StepExecutionFilterKeyAction, + } +} + +const ( + // StopTypeComplete is a StopType enum value + StopTypeComplete = "Complete" + + // StopTypeCancel is a StopType enum value + StopTypeCancel = "Cancel" +) + +// StopType_Values returns all elements of the StopType enum +func StopType_Values() []string { + return []string{ + StopTypeComplete, + StopTypeCancel, + } +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go new file mode 100644 index 00000000000..2fe2457ca9b --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go @@ -0,0 +1,46 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssm provides the client and types for making API +// requests to Amazon Simple Systems Manager (SSM). +// +// AWS Systems Manager is a collection of capabilities that helps you automate +// management tasks such as collecting system inventory, applying operating +// system (OS) patches, automating the creation of Amazon Machine Images (AMIs), +// and configuring operating systems (OSs) and applications at scale. Systems +// Manager lets you remotely and securely manage the configuration of your managed +// instances. A managed instance is any Amazon Elastic Compute Cloud instance +// (EC2 instance), or any on-premises server or virtual machine (VM) in your +// hybrid environment that has been configured for Systems Manager. +// +// This reference is intended to be used with the AWS Systems Manager User Guide +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/). +// +// To get started, verify prerequisites and configure managed instances. For +// more information, see Setting up AWS Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html) +// in the AWS Systems Manager User Guide. +// +// For information about other API actions you can perform on EC2 instances, +// see the Amazon EC2 API Reference (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/). +// For information about how to use a Query API, see Making API requests (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/making-api-requests.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06 for more information on this service. +// +// See ssm package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssm/ +// +// Using the Client +// +// To contact Amazon Simple Systems Manager (SSM) with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Systems Manager (SSM) client SSM for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssm/#New +package ssm diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go new file mode 100644 index 00000000000..74b773c776e --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go @@ -0,0 +1,915 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssm + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAlreadyExistsException for service response error code + // "AlreadyExistsException". + // + // Error returned if an attempt is made to register a patch group with a patch + // baseline that is already registered with a different patch baseline. + ErrCodeAlreadyExistsException = "AlreadyExistsException" + + // ErrCodeAssociatedInstances for service response error code + // "AssociatedInstances". + // + // You must disassociate a document from all instances before you can delete + // it. + ErrCodeAssociatedInstances = "AssociatedInstances" + + // ErrCodeAssociationAlreadyExists for service response error code + // "AssociationAlreadyExists". + // + // The specified association already exists. + ErrCodeAssociationAlreadyExists = "AssociationAlreadyExists" + + // ErrCodeAssociationDoesNotExist for service response error code + // "AssociationDoesNotExist". + // + // The specified association does not exist. + ErrCodeAssociationDoesNotExist = "AssociationDoesNotExist" + + // ErrCodeAssociationExecutionDoesNotExist for service response error code + // "AssociationExecutionDoesNotExist". + // + // The specified execution ID does not exist. Verify the ID number and try again. + ErrCodeAssociationExecutionDoesNotExist = "AssociationExecutionDoesNotExist" + + // ErrCodeAssociationLimitExceeded for service response error code + // "AssociationLimitExceeded". + // + // You can have at most 2,000 active associations. + ErrCodeAssociationLimitExceeded = "AssociationLimitExceeded" + + // ErrCodeAssociationVersionLimitExceeded for service response error code + // "AssociationVersionLimitExceeded". + // + // You have reached the maximum number versions allowed for an association. + // Each association has a limit of 1,000 versions. + ErrCodeAssociationVersionLimitExceeded = "AssociationVersionLimitExceeded" + + // ErrCodeAutomationDefinitionNotFoundException for service response error code + // "AutomationDefinitionNotFoundException". + // + // An Automation document with the specified name could not be found. + ErrCodeAutomationDefinitionNotFoundException = "AutomationDefinitionNotFoundException" + + // ErrCodeAutomationDefinitionVersionNotFoundException for service response error code + // "AutomationDefinitionVersionNotFoundException". + // + // An Automation document with the specified name and version could not be found. + ErrCodeAutomationDefinitionVersionNotFoundException = "AutomationDefinitionVersionNotFoundException" + + // ErrCodeAutomationExecutionLimitExceededException for service response error code + // "AutomationExecutionLimitExceededException". + // + // The number of simultaneously running Automation executions exceeded the allowable + // limit. + ErrCodeAutomationExecutionLimitExceededException = "AutomationExecutionLimitExceededException" + + // ErrCodeAutomationExecutionNotFoundException for service response error code + // "AutomationExecutionNotFoundException". + // + // There is no automation execution information for the requested automation + // execution ID. + ErrCodeAutomationExecutionNotFoundException = "AutomationExecutionNotFoundException" + + // ErrCodeAutomationStepNotFoundException for service response error code + // "AutomationStepNotFoundException". + // + // The specified step name and execution ID don't exist. Verify the information + // and try again. + ErrCodeAutomationStepNotFoundException = "AutomationStepNotFoundException" + + // ErrCodeComplianceTypeCountLimitExceededException for service response error code + // "ComplianceTypeCountLimitExceededException". + // + // You specified too many custom compliance types. You can specify a maximum + // of 10 different types. + ErrCodeComplianceTypeCountLimitExceededException = "ComplianceTypeCountLimitExceededException" + + // ErrCodeCustomSchemaCountLimitExceededException for service response error code + // "CustomSchemaCountLimitExceededException". + // + // You have exceeded the limit for custom schemas. Delete one or more custom + // schemas and try again. + ErrCodeCustomSchemaCountLimitExceededException = "CustomSchemaCountLimitExceededException" + + // ErrCodeDocumentAlreadyExists for service response error code + // "DocumentAlreadyExists". + // + // The specified document already exists. + ErrCodeDocumentAlreadyExists = "DocumentAlreadyExists" + + // ErrCodeDocumentLimitExceeded for service response error code + // "DocumentLimitExceeded". + // + // You can have at most 500 active Systems Manager documents. + ErrCodeDocumentLimitExceeded = "DocumentLimitExceeded" + + // ErrCodeDocumentPermissionLimit for service response error code + // "DocumentPermissionLimit". + // + // The document cannot be shared with more AWS user accounts. You can share + // a document with a maximum of 20 accounts. You can publicly share up to five + // documents. If you need to increase this limit, contact AWS Support. + ErrCodeDocumentPermissionLimit = "DocumentPermissionLimit" + + // ErrCodeDocumentVersionLimitExceeded for service response error code + // "DocumentVersionLimitExceeded". + // + // The document has too many versions. Delete one or more document versions + // and try again. + ErrCodeDocumentVersionLimitExceeded = "DocumentVersionLimitExceeded" + + // ErrCodeDoesNotExistException for service response error code + // "DoesNotExistException". + // + // Error returned when the ID specified for a resource, such as a maintenance + // window or Patch baseline, doesn't exist. + // + // For information about resource quotas in Systems Manager, see Systems Manager + // service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) + // in the AWS General Reference. + ErrCodeDoesNotExistException = "DoesNotExistException" + + // ErrCodeDuplicateDocumentContent for service response error code + // "DuplicateDocumentContent". + // + // The content of the association document matches another document. Change + // the content of the document and try again. + ErrCodeDuplicateDocumentContent = "DuplicateDocumentContent" + + // ErrCodeDuplicateDocumentVersionName for service response error code + // "DuplicateDocumentVersionName". + // + // The version name has already been used in this document. Specify a different + // version name, and then try again. + ErrCodeDuplicateDocumentVersionName = "DuplicateDocumentVersionName" + + // ErrCodeDuplicateInstanceId for service response error code + // "DuplicateInstanceId". + // + // You cannot specify an instance ID in more than one association. + ErrCodeDuplicateInstanceId = "DuplicateInstanceId" + + // ErrCodeFeatureNotAvailableException for service response error code + // "FeatureNotAvailableException". + // + // You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where + // the corresponding service is not available. + ErrCodeFeatureNotAvailableException = "FeatureNotAvailableException" + + // ErrCodeHierarchyLevelLimitExceededException for service response error code + // "HierarchyLevelLimitExceededException". + // + // A hierarchy can have a maximum of 15 levels. For more information, see Requirements + // and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) + // in the AWS Systems Manager User Guide. + ErrCodeHierarchyLevelLimitExceededException = "HierarchyLevelLimitExceededException" + + // ErrCodeHierarchyTypeMismatchException for service response error code + // "HierarchyTypeMismatchException". + // + // Parameter Store does not support changing a parameter type in a hierarchy. + // For example, you can't change a parameter from a String type to a SecureString + // type. You must create a new, unique parameter. + ErrCodeHierarchyTypeMismatchException = "HierarchyTypeMismatchException" + + // ErrCodeIdempotentParameterMismatch for service response error code + // "IdempotentParameterMismatch". + // + // Error returned when an idempotent operation is retried and the parameters + // don't match the original call to the API with the same idempotency token. + ErrCodeIdempotentParameterMismatch = "IdempotentParameterMismatch" + + // ErrCodeIncompatiblePolicyException for service response error code + // "IncompatiblePolicyException". + // + // There is a conflict in the policies specified for this parameter. You can't, + // for example, specify two Expiration policies for a parameter. Review your + // policies, and try again. + ErrCodeIncompatiblePolicyException = "IncompatiblePolicyException" + + // ErrCodeInternalServerError for service response error code + // "InternalServerError". + // + // An error occurred on the server side. + ErrCodeInternalServerError = "InternalServerError" + + // ErrCodeInvalidActivation for service response error code + // "InvalidActivation". + // + // The activation is not valid. The activation might have been deleted, or the + // ActivationId and the ActivationCode do not match. + ErrCodeInvalidActivation = "InvalidActivation" + + // ErrCodeInvalidActivationId for service response error code + // "InvalidActivationId". + // + // The activation ID is not valid. Verify the you entered the correct ActivationId + // or ActivationCode and try again. + ErrCodeInvalidActivationId = "InvalidActivationId" + + // ErrCodeInvalidAggregatorException for service response error code + // "InvalidAggregatorException". + // + // The specified aggregator is not valid for inventory groups. Verify that the + // aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation. + ErrCodeInvalidAggregatorException = "InvalidAggregatorException" + + // ErrCodeInvalidAllowedPatternException for service response error code + // "InvalidAllowedPatternException". + // + // The request does not meet the regular expression requirement. + ErrCodeInvalidAllowedPatternException = "InvalidAllowedPatternException" + + // ErrCodeInvalidAssociation for service response error code + // "InvalidAssociation". + // + // The association is not valid or does not exist. + ErrCodeInvalidAssociation = "InvalidAssociation" + + // ErrCodeInvalidAssociationVersion for service response error code + // "InvalidAssociationVersion". + // + // The version you specified is not valid. Use ListAssociationVersions to view + // all versions of an association according to the association ID. Or, use the + // $LATEST parameter to view the latest version of the association. + ErrCodeInvalidAssociationVersion = "InvalidAssociationVersion" + + // ErrCodeInvalidAutomationExecutionParametersException for service response error code + // "InvalidAutomationExecutionParametersException". + // + // The supplied parameters for invoking the specified Automation document are + // incorrect. For example, they may not match the set of parameters permitted + // for the specified Automation document. + ErrCodeInvalidAutomationExecutionParametersException = "InvalidAutomationExecutionParametersException" + + // ErrCodeInvalidAutomationSignalException for service response error code + // "InvalidAutomationSignalException". + // + // The signal is not valid for the current Automation execution. + ErrCodeInvalidAutomationSignalException = "InvalidAutomationSignalException" + + // ErrCodeInvalidAutomationStatusUpdateException for service response error code + // "InvalidAutomationStatusUpdateException". + // + // The specified update status operation is not valid. + ErrCodeInvalidAutomationStatusUpdateException = "InvalidAutomationStatusUpdateException" + + // ErrCodeInvalidCommandId for service response error code + // "InvalidCommandId". + ErrCodeInvalidCommandId = "InvalidCommandId" + + // ErrCodeInvalidDeleteInventoryParametersException for service response error code + // "InvalidDeleteInventoryParametersException". + // + // One or more of the parameters specified for the delete operation is not valid. + // Verify all parameters and try again. + ErrCodeInvalidDeleteInventoryParametersException = "InvalidDeleteInventoryParametersException" + + // ErrCodeInvalidDeletionIdException for service response error code + // "InvalidDeletionIdException". + // + // The ID specified for the delete operation does not exist or is not valid. + // Verify the ID and try again. + ErrCodeInvalidDeletionIdException = "InvalidDeletionIdException" + + // ErrCodeInvalidDocument for service response error code + // "InvalidDocument". + // + // The specified document does not exist. + ErrCodeInvalidDocument = "InvalidDocument" + + // ErrCodeInvalidDocumentContent for service response error code + // "InvalidDocumentContent". + // + // The content for the document is not valid. + ErrCodeInvalidDocumentContent = "InvalidDocumentContent" + + // ErrCodeInvalidDocumentOperation for service response error code + // "InvalidDocumentOperation". + // + // You attempted to delete a document while it is still shared. You must stop + // sharing the document before you can delete it. + ErrCodeInvalidDocumentOperation = "InvalidDocumentOperation" + + // ErrCodeInvalidDocumentSchemaVersion for service response error code + // "InvalidDocumentSchemaVersion". + // + // The version of the document schema is not supported. + ErrCodeInvalidDocumentSchemaVersion = "InvalidDocumentSchemaVersion" + + // ErrCodeInvalidDocumentType for service response error code + // "InvalidDocumentType". + // + // The document type is not valid. Valid document types are described in the + // DocumentType property. + ErrCodeInvalidDocumentType = "InvalidDocumentType" + + // ErrCodeInvalidDocumentVersion for service response error code + // "InvalidDocumentVersion". + // + // The document version is not valid or does not exist. + ErrCodeInvalidDocumentVersion = "InvalidDocumentVersion" + + // ErrCodeInvalidFilter for service response error code + // "InvalidFilter". + // + // The filter name is not valid. Verify the you entered the correct name and + // try again. + ErrCodeInvalidFilter = "InvalidFilter" + + // ErrCodeInvalidFilterKey for service response error code + // "InvalidFilterKey". + // + // The specified key is not valid. + ErrCodeInvalidFilterKey = "InvalidFilterKey" + + // ErrCodeInvalidFilterOption for service response error code + // "InvalidFilterOption". + // + // The specified filter option is not valid. Valid options are Equals and BeginsWith. + // For Path filter, valid options are Recursive and OneLevel. + ErrCodeInvalidFilterOption = "InvalidFilterOption" + + // ErrCodeInvalidFilterValue for service response error code + // "InvalidFilterValue". + // + // The filter value is not valid. Verify the value and try again. + ErrCodeInvalidFilterValue = "InvalidFilterValue" + + // ErrCodeInvalidInstanceId for service response error code + // "InvalidInstanceId". + // + // The following problems can cause this exception: + // + // You do not have permission to access the instance. + // + // SSM Agent is not running. Verify that SSM Agent is running. + // + // SSM Agent is not registered with the SSM endpoint. Try reinstalling SSM Agent. + // + // The instance is not in valid state. Valid states are: Running, Pending, Stopped, + // Stopping. Invalid states are: Shutting-down and Terminated. + ErrCodeInvalidInstanceId = "InvalidInstanceId" + + // ErrCodeInvalidInstanceInformationFilterValue for service response error code + // "InvalidInstanceInformationFilterValue". + // + // The specified filter value is not valid. + ErrCodeInvalidInstanceInformationFilterValue = "InvalidInstanceInformationFilterValue" + + // ErrCodeInvalidInventoryGroupException for service response error code + // "InvalidInventoryGroupException". + // + // The specified inventory group is not valid. + ErrCodeInvalidInventoryGroupException = "InvalidInventoryGroupException" + + // ErrCodeInvalidInventoryItemContextException for service response error code + // "InvalidInventoryItemContextException". + // + // You specified invalid keys or values in the Context attribute for InventoryItem. + // Verify the keys and values, and try again. + ErrCodeInvalidInventoryItemContextException = "InvalidInventoryItemContextException" + + // ErrCodeInvalidInventoryRequestException for service response error code + // "InvalidInventoryRequestException". + // + // The request is not valid. + ErrCodeInvalidInventoryRequestException = "InvalidInventoryRequestException" + + // ErrCodeInvalidItemContentException for service response error code + // "InvalidItemContentException". + // + // One or more content items is not valid. + ErrCodeInvalidItemContentException = "InvalidItemContentException" + + // ErrCodeInvalidKeyId for service response error code + // "InvalidKeyId". + // + // The query key ID is not valid. + ErrCodeInvalidKeyId = "InvalidKeyId" + + // ErrCodeInvalidNextToken for service response error code + // "InvalidNextToken". + // + // The specified token is not valid. + ErrCodeInvalidNextToken = "InvalidNextToken" + + // ErrCodeInvalidNotificationConfig for service response error code + // "InvalidNotificationConfig". + // + // One or more configuration items is not valid. Verify that a valid Amazon + // Resource Name (ARN) was provided for an Amazon SNS topic. + ErrCodeInvalidNotificationConfig = "InvalidNotificationConfig" + + // ErrCodeInvalidOptionException for service response error code + // "InvalidOptionException". + // + // The delete inventory option specified is not valid. Verify the option and + // try again. + ErrCodeInvalidOptionException = "InvalidOptionException" + + // ErrCodeInvalidOutputFolder for service response error code + // "InvalidOutputFolder". + // + // The S3 bucket does not exist. + ErrCodeInvalidOutputFolder = "InvalidOutputFolder" + + // ErrCodeInvalidOutputLocation for service response error code + // "InvalidOutputLocation". + // + // The output location is not valid or does not exist. + ErrCodeInvalidOutputLocation = "InvalidOutputLocation" + + // ErrCodeInvalidParameters for service response error code + // "InvalidParameters". + // + // You must specify values for all required parameters in the Systems Manager + // document. You can only supply values to parameters defined in the Systems + // Manager document. + ErrCodeInvalidParameters = "InvalidParameters" + + // ErrCodeInvalidPermissionType for service response error code + // "InvalidPermissionType". + // + // The permission type is not supported. Share is the only supported permission + // type. + ErrCodeInvalidPermissionType = "InvalidPermissionType" + + // ErrCodeInvalidPluginName for service response error code + // "InvalidPluginName". + // + // The plugin name is not valid. + ErrCodeInvalidPluginName = "InvalidPluginName" + + // ErrCodeInvalidPolicyAttributeException for service response error code + // "InvalidPolicyAttributeException". + // + // A policy attribute or its value is invalid. + ErrCodeInvalidPolicyAttributeException = "InvalidPolicyAttributeException" + + // ErrCodeInvalidPolicyTypeException for service response error code + // "InvalidPolicyTypeException". + // + // The policy type is not supported. Parameter Store supports the following + // policy types: Expiration, ExpirationNotification, and NoChangeNotification. + ErrCodeInvalidPolicyTypeException = "InvalidPolicyTypeException" + + // ErrCodeInvalidResourceId for service response error code + // "InvalidResourceId". + // + // The resource ID is not valid. Verify that you entered the correct ID and + // try again. + ErrCodeInvalidResourceId = "InvalidResourceId" + + // ErrCodeInvalidResourceType for service response error code + // "InvalidResourceType". + // + // The resource type is not valid. For example, if you are attempting to tag + // an instance, the instance must be a registered, managed instance. + ErrCodeInvalidResourceType = "InvalidResourceType" + + // ErrCodeInvalidResultAttributeException for service response error code + // "InvalidResultAttributeException". + // + // The specified inventory item result attribute is not valid. + ErrCodeInvalidResultAttributeException = "InvalidResultAttributeException" + + // ErrCodeInvalidRole for service response error code + // "InvalidRole". + // + // The role name can't contain invalid characters. Also verify that you specified + // an IAM role for notifications that includes the required trust policy. For + // information about configuring the IAM role for Run Command notifications, + // see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) + // in the AWS Systems Manager User Guide. + ErrCodeInvalidRole = "InvalidRole" + + // ErrCodeInvalidSchedule for service response error code + // "InvalidSchedule". + // + // The schedule is invalid. Verify your cron or rate expression and try again. + ErrCodeInvalidSchedule = "InvalidSchedule" + + // ErrCodeInvalidTarget for service response error code + // "InvalidTarget". + // + // The target is not valid or does not exist. It might not be configured for + // Systems Manager or you might not have permission to perform the operation. + ErrCodeInvalidTarget = "InvalidTarget" + + // ErrCodeInvalidTypeNameException for service response error code + // "InvalidTypeNameException". + // + // The parameter type name is not valid. + ErrCodeInvalidTypeNameException = "InvalidTypeNameException" + + // ErrCodeInvalidUpdate for service response error code + // "InvalidUpdate". + // + // The update is not valid. + ErrCodeInvalidUpdate = "InvalidUpdate" + + // ErrCodeInvocationDoesNotExist for service response error code + // "InvocationDoesNotExist". + // + // The command ID and instance ID you specified did not match any invocations. + // Verify the command ID and the instance ID and try again. + ErrCodeInvocationDoesNotExist = "InvocationDoesNotExist" + + // ErrCodeItemContentMismatchException for service response error code + // "ItemContentMismatchException". + // + // The inventory item has invalid content. + ErrCodeItemContentMismatchException = "ItemContentMismatchException" + + // ErrCodeItemSizeLimitExceededException for service response error code + // "ItemSizeLimitExceededException". + // + // The inventory item size has exceeded the size limit. + ErrCodeItemSizeLimitExceededException = "ItemSizeLimitExceededException" + + // ErrCodeMaxDocumentSizeExceeded for service response error code + // "MaxDocumentSizeExceeded". + // + // The size limit of a document is 64 KB. + ErrCodeMaxDocumentSizeExceeded = "MaxDocumentSizeExceeded" + + // ErrCodeOpsItemAlreadyExistsException for service response error code + // "OpsItemAlreadyExistsException". + // + // The OpsItem already exists. + ErrCodeOpsItemAlreadyExistsException = "OpsItemAlreadyExistsException" + + // ErrCodeOpsItemInvalidParameterException for service response error code + // "OpsItemInvalidParameterException". + // + // A specified parameter argument isn't valid. Verify the available arguments + // and try again. + ErrCodeOpsItemInvalidParameterException = "OpsItemInvalidParameterException" + + // ErrCodeOpsItemLimitExceededException for service response error code + // "OpsItemLimitExceededException". + // + // The request caused OpsItems to exceed one or more quotas. For information + // about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). + ErrCodeOpsItemLimitExceededException = "OpsItemLimitExceededException" + + // ErrCodeOpsItemNotFoundException for service response error code + // "OpsItemNotFoundException". + // + // The specified OpsItem ID doesn't exist. Verify the ID and try again. + ErrCodeOpsItemNotFoundException = "OpsItemNotFoundException" + + // ErrCodeParameterAlreadyExists for service response error code + // "ParameterAlreadyExists". + // + // The parameter already exists. You can't create duplicate parameters. + ErrCodeParameterAlreadyExists = "ParameterAlreadyExists" + + // ErrCodeParameterLimitExceeded for service response error code + // "ParameterLimitExceeded". + // + // You have exceeded the number of parameters for this AWS account. Delete one + // or more parameters and try again. + ErrCodeParameterLimitExceeded = "ParameterLimitExceeded" + + // ErrCodeParameterMaxVersionLimitExceeded for service response error code + // "ParameterMaxVersionLimitExceeded". + // + // Parameter Store retains the 100 most recently created versions of a parameter. + // After this number of versions has been created, Parameter Store deletes the + // oldest version when a new one is created. However, if the oldest version + // has a label attached to it, Parameter Store will not delete the version and + // instead presents this error message: + // + // An error occurred (ParameterMaxVersionLimitExceeded) when calling the PutParameter + // operation: You attempted to create a new version of parameter-name by calling + // the PutParameter API with the overwrite flag. Version version-number, the + // oldest version, can't be deleted because it has a label associated with it. + // Move the label to another version of the parameter, and try again. + // + // This safeguard is to prevent parameter versions with mission critical labels + // assigned to them from being deleted. To continue creating new parameters, + // first move the label from the oldest version of the parameter to a newer + // one for use in your operations. For information about moving parameter labels, + // see Move a parameter label (console) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html#sysman-paramstore-labels-console-move) + // or Move a parameter label (CLI) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html#sysman-paramstore-labels-cli-move) + // in the AWS Systems Manager User Guide. + ErrCodeParameterMaxVersionLimitExceeded = "ParameterMaxVersionLimitExceeded" + + // ErrCodeParameterNotFound for service response error code + // "ParameterNotFound". + // + // The parameter could not be found. Verify the name and try again. + ErrCodeParameterNotFound = "ParameterNotFound" + + // ErrCodeParameterPatternMismatchException for service response error code + // "ParameterPatternMismatchException". + // + // The parameter name is not valid. + ErrCodeParameterPatternMismatchException = "ParameterPatternMismatchException" + + // ErrCodeParameterVersionLabelLimitExceeded for service response error code + // "ParameterVersionLabelLimitExceeded". + // + // A parameter version can have a maximum of ten labels. + ErrCodeParameterVersionLabelLimitExceeded = "ParameterVersionLabelLimitExceeded" + + // ErrCodeParameterVersionNotFound for service response error code + // "ParameterVersionNotFound". + // + // The specified parameter version was not found. Verify the parameter name + // and version, and try again. + ErrCodeParameterVersionNotFound = "ParameterVersionNotFound" + + // ErrCodePoliciesLimitExceededException for service response error code + // "PoliciesLimitExceededException". + // + // You specified more than the maximum number of allowed policies for the parameter. + // The maximum is 10. + ErrCodePoliciesLimitExceededException = "PoliciesLimitExceededException" + + // ErrCodeResourceDataSyncAlreadyExistsException for service response error code + // "ResourceDataSyncAlreadyExistsException". + // + // A sync configuration with the same name already exists. + ErrCodeResourceDataSyncAlreadyExistsException = "ResourceDataSyncAlreadyExistsException" + + // ErrCodeResourceDataSyncConflictException for service response error code + // "ResourceDataSyncConflictException". + // + // Another UpdateResourceDataSync request is being processed. Wait a few minutes + // and try again. + ErrCodeResourceDataSyncConflictException = "ResourceDataSyncConflictException" + + // ErrCodeResourceDataSyncCountExceededException for service response error code + // "ResourceDataSyncCountExceededException". + // + // You have exceeded the allowed maximum sync configurations. + ErrCodeResourceDataSyncCountExceededException = "ResourceDataSyncCountExceededException" + + // ErrCodeResourceDataSyncInvalidConfigurationException for service response error code + // "ResourceDataSyncInvalidConfigurationException". + // + // The specified sync configuration is invalid. + ErrCodeResourceDataSyncInvalidConfigurationException = "ResourceDataSyncInvalidConfigurationException" + + // ErrCodeResourceDataSyncNotFoundException for service response error code + // "ResourceDataSyncNotFoundException". + // + // The specified sync name was not found. + ErrCodeResourceDataSyncNotFoundException = "ResourceDataSyncNotFoundException" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // Error returned if an attempt is made to delete a patch baseline that is registered + // for a patch group. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceLimitExceededException for service response error code + // "ResourceLimitExceededException". + // + // Error returned when the caller has exceeded the default resource quotas. + // For example, too many maintenance windows or patch baselines have been created. + // + // For information about resource quotas in Systems Manager, see Systems Manager + // service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) + // in the AWS General Reference. + ErrCodeResourceLimitExceededException = "ResourceLimitExceededException" + + // ErrCodeServiceSettingNotFound for service response error code + // "ServiceSettingNotFound". + // + // The specified service setting was not found. Either the service name or the + // setting has not been provisioned by the AWS service team. + ErrCodeServiceSettingNotFound = "ServiceSettingNotFound" + + // ErrCodeStatusUnchanged for service response error code + // "StatusUnchanged". + // + // The updated status is the same as the current status. + ErrCodeStatusUnchanged = "StatusUnchanged" + + // ErrCodeSubTypeCountLimitExceededException for service response error code + // "SubTypeCountLimitExceededException". + // + // The sub-type count exceeded the limit for the inventory type. + ErrCodeSubTypeCountLimitExceededException = "SubTypeCountLimitExceededException" + + // ErrCodeTargetInUseException for service response error code + // "TargetInUseException". + // + // You specified the Safe option for the DeregisterTargetFromMaintenanceWindow + // operation, but the target is still referenced in a task. + ErrCodeTargetInUseException = "TargetInUseException" + + // ErrCodeTargetNotConnected for service response error code + // "TargetNotConnected". + // + // The specified target instance for the session is not fully configured for + // use with Session Manager. For more information, see Getting started with + // Session Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) + // in the AWS Systems Manager User Guide. This error is also returned if you + // attempt to start a session on an instance that is located in a different + // account or Region + ErrCodeTargetNotConnected = "TargetNotConnected" + + // ErrCodeTooManyTagsError for service response error code + // "TooManyTagsError". + // + // The Targets parameter includes too many tags. Remove one or more tags and + // try the command again. + ErrCodeTooManyTagsError = "TooManyTagsError" + + // ErrCodeTooManyUpdates for service response error code + // "TooManyUpdates". + // + // There are concurrent updates for a resource that supports one update at a + // time. + ErrCodeTooManyUpdates = "TooManyUpdates" + + // ErrCodeTotalSizeLimitExceededException for service response error code + // "TotalSizeLimitExceededException". + // + // The size of inventory data has exceeded the total size limit for the resource. + ErrCodeTotalSizeLimitExceededException = "TotalSizeLimitExceededException" + + // ErrCodeUnsupportedCalendarException for service response error code + // "UnsupportedCalendarException". + // + // The calendar entry contained in the specified Systems Manager document is + // not supported. + ErrCodeUnsupportedCalendarException = "UnsupportedCalendarException" + + // ErrCodeUnsupportedFeatureRequiredException for service response error code + // "UnsupportedFeatureRequiredException". + // + // Microsoft application patching is only available on EC2 instances and advanced + // instances. To patch Microsoft applications on on-premises servers and VMs, + // you must enable advanced instances. For more information, see Using the advanced-instances + // tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) + // in the AWS Systems Manager User Guide. + ErrCodeUnsupportedFeatureRequiredException = "UnsupportedFeatureRequiredException" + + // ErrCodeUnsupportedInventoryItemContextException for service response error code + // "UnsupportedInventoryItemContextException". + // + // The Context attribute that you specified for the InventoryItem is not allowed + // for this inventory type. You can only use the Context attribute with inventory + // types like AWS:ComplianceItem. + ErrCodeUnsupportedInventoryItemContextException = "UnsupportedInventoryItemContextException" + + // ErrCodeUnsupportedInventorySchemaVersionException for service response error code + // "UnsupportedInventorySchemaVersionException". + // + // Inventory item type schema version has to match supported versions in the + // service. Check output of GetInventorySchema to see the available schema version + // for each type. + ErrCodeUnsupportedInventorySchemaVersionException = "UnsupportedInventorySchemaVersionException" + + // ErrCodeUnsupportedOperatingSystem for service response error code + // "UnsupportedOperatingSystem". + // + // The operating systems you specified is not supported, or the operation is + // not supported for the operating system. + ErrCodeUnsupportedOperatingSystem = "UnsupportedOperatingSystem" + + // ErrCodeUnsupportedParameterType for service response error code + // "UnsupportedParameterType". + // + // The parameter type is not supported. + ErrCodeUnsupportedParameterType = "UnsupportedParameterType" + + // ErrCodeUnsupportedPlatformType for service response error code + // "UnsupportedPlatformType". + // + // The document does not support the platform type of the given instance ID(s). + // For example, you sent an document for a Windows instance to a Linux instance. + ErrCodeUnsupportedPlatformType = "UnsupportedPlatformType" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AlreadyExistsException": newErrorAlreadyExistsException, + "AssociatedInstances": newErrorAssociatedInstances, + "AssociationAlreadyExists": newErrorAssociationAlreadyExists, + "AssociationDoesNotExist": newErrorAssociationDoesNotExist, + "AssociationExecutionDoesNotExist": newErrorAssociationExecutionDoesNotExist, + "AssociationLimitExceeded": newErrorAssociationLimitExceeded, + "AssociationVersionLimitExceeded": newErrorAssociationVersionLimitExceeded, + "AutomationDefinitionNotFoundException": newErrorAutomationDefinitionNotFoundException, + "AutomationDefinitionVersionNotFoundException": newErrorAutomationDefinitionVersionNotFoundException, + "AutomationExecutionLimitExceededException": newErrorAutomationExecutionLimitExceededException, + "AutomationExecutionNotFoundException": newErrorAutomationExecutionNotFoundException, + "AutomationStepNotFoundException": newErrorAutomationStepNotFoundException, + "ComplianceTypeCountLimitExceededException": newErrorComplianceTypeCountLimitExceededException, + "CustomSchemaCountLimitExceededException": newErrorCustomSchemaCountLimitExceededException, + "DocumentAlreadyExists": newErrorDocumentAlreadyExists, + "DocumentLimitExceeded": newErrorDocumentLimitExceeded, + "DocumentPermissionLimit": newErrorDocumentPermissionLimit, + "DocumentVersionLimitExceeded": newErrorDocumentVersionLimitExceeded, + "DoesNotExistException": newErrorDoesNotExistException, + "DuplicateDocumentContent": newErrorDuplicateDocumentContent, + "DuplicateDocumentVersionName": newErrorDuplicateDocumentVersionName, + "DuplicateInstanceId": newErrorDuplicateInstanceId, + "FeatureNotAvailableException": newErrorFeatureNotAvailableException, + "HierarchyLevelLimitExceededException": newErrorHierarchyLevelLimitExceededException, + "HierarchyTypeMismatchException": newErrorHierarchyTypeMismatchException, + "IdempotentParameterMismatch": newErrorIdempotentParameterMismatch, + "IncompatiblePolicyException": newErrorIncompatiblePolicyException, + "InternalServerError": newErrorInternalServerError, + "InvalidActivation": newErrorInvalidActivation, + "InvalidActivationId": newErrorInvalidActivationId, + "InvalidAggregatorException": newErrorInvalidAggregatorException, + "InvalidAllowedPatternException": newErrorInvalidAllowedPatternException, + "InvalidAssociation": newErrorInvalidAssociation, + "InvalidAssociationVersion": newErrorInvalidAssociationVersion, + "InvalidAutomationExecutionParametersException": newErrorInvalidAutomationExecutionParametersException, + "InvalidAutomationSignalException": newErrorInvalidAutomationSignalException, + "InvalidAutomationStatusUpdateException": newErrorInvalidAutomationStatusUpdateException, + "InvalidCommandId": newErrorInvalidCommandId, + "InvalidDeleteInventoryParametersException": newErrorInvalidDeleteInventoryParametersException, + "InvalidDeletionIdException": newErrorInvalidDeletionIdException, + "InvalidDocument": newErrorInvalidDocument, + "InvalidDocumentContent": newErrorInvalidDocumentContent, + "InvalidDocumentOperation": newErrorInvalidDocumentOperation, + "InvalidDocumentSchemaVersion": newErrorInvalidDocumentSchemaVersion, + "InvalidDocumentType": newErrorInvalidDocumentType, + "InvalidDocumentVersion": newErrorInvalidDocumentVersion, + "InvalidFilter": newErrorInvalidFilter, + "InvalidFilterKey": newErrorInvalidFilterKey, + "InvalidFilterOption": newErrorInvalidFilterOption, + "InvalidFilterValue": newErrorInvalidFilterValue, + "InvalidInstanceId": newErrorInvalidInstanceId, + "InvalidInstanceInformationFilterValue": newErrorInvalidInstanceInformationFilterValue, + "InvalidInventoryGroupException": newErrorInvalidInventoryGroupException, + "InvalidInventoryItemContextException": newErrorInvalidInventoryItemContextException, + "InvalidInventoryRequestException": newErrorInvalidInventoryRequestException, + "InvalidItemContentException": newErrorInvalidItemContentException, + "InvalidKeyId": newErrorInvalidKeyId, + "InvalidNextToken": newErrorInvalidNextToken, + "InvalidNotificationConfig": newErrorInvalidNotificationConfig, + "InvalidOptionException": newErrorInvalidOptionException, + "InvalidOutputFolder": newErrorInvalidOutputFolder, + "InvalidOutputLocation": newErrorInvalidOutputLocation, + "InvalidParameters": newErrorInvalidParameters, + "InvalidPermissionType": newErrorInvalidPermissionType, + "InvalidPluginName": newErrorInvalidPluginName, + "InvalidPolicyAttributeException": newErrorInvalidPolicyAttributeException, + "InvalidPolicyTypeException": newErrorInvalidPolicyTypeException, + "InvalidResourceId": newErrorInvalidResourceId, + "InvalidResourceType": newErrorInvalidResourceType, + "InvalidResultAttributeException": newErrorInvalidResultAttributeException, + "InvalidRole": newErrorInvalidRole, + "InvalidSchedule": newErrorInvalidSchedule, + "InvalidTarget": newErrorInvalidTarget, + "InvalidTypeNameException": newErrorInvalidTypeNameException, + "InvalidUpdate": newErrorInvalidUpdate, + "InvocationDoesNotExist": newErrorInvocationDoesNotExist, + "ItemContentMismatchException": newErrorItemContentMismatchException, + "ItemSizeLimitExceededException": newErrorItemSizeLimitExceededException, + "MaxDocumentSizeExceeded": newErrorMaxDocumentSizeExceeded, + "OpsItemAlreadyExistsException": newErrorOpsItemAlreadyExistsException, + "OpsItemInvalidParameterException": newErrorOpsItemInvalidParameterException, + "OpsItemLimitExceededException": newErrorOpsItemLimitExceededException, + "OpsItemNotFoundException": newErrorOpsItemNotFoundException, + "ParameterAlreadyExists": newErrorParameterAlreadyExists, + "ParameterLimitExceeded": newErrorParameterLimitExceeded, + "ParameterMaxVersionLimitExceeded": newErrorParameterMaxVersionLimitExceeded, + "ParameterNotFound": newErrorParameterNotFound, + "ParameterPatternMismatchException": newErrorParameterPatternMismatchException, + "ParameterVersionLabelLimitExceeded": newErrorParameterVersionLabelLimitExceeded, + "ParameterVersionNotFound": newErrorParameterVersionNotFound, + "PoliciesLimitExceededException": newErrorPoliciesLimitExceededException, + "ResourceDataSyncAlreadyExistsException": newErrorResourceDataSyncAlreadyExistsException, + "ResourceDataSyncConflictException": newErrorResourceDataSyncConflictException, + "ResourceDataSyncCountExceededException": newErrorResourceDataSyncCountExceededException, + "ResourceDataSyncInvalidConfigurationException": newErrorResourceDataSyncInvalidConfigurationException, + "ResourceDataSyncNotFoundException": newErrorResourceDataSyncNotFoundException, + "ResourceInUseException": newErrorResourceInUseException, + "ResourceLimitExceededException": newErrorResourceLimitExceededException, + "ServiceSettingNotFound": newErrorServiceSettingNotFound, + "StatusUnchanged": newErrorStatusUnchanged, + "SubTypeCountLimitExceededException": newErrorSubTypeCountLimitExceededException, + "TargetInUseException": newErrorTargetInUseException, + "TargetNotConnected": newErrorTargetNotConnected, + "TooManyTagsError": newErrorTooManyTagsError, + "TooManyUpdates": newErrorTooManyUpdates, + "TotalSizeLimitExceededException": newErrorTotalSizeLimitExceededException, + "UnsupportedCalendarException": newErrorUnsupportedCalendarException, + "UnsupportedFeatureRequiredException": newErrorUnsupportedFeatureRequiredException, + "UnsupportedInventoryItemContextException": newErrorUnsupportedInventoryItemContextException, + "UnsupportedInventorySchemaVersionException": newErrorUnsupportedInventorySchemaVersionException, + "UnsupportedOperatingSystem": newErrorUnsupportedOperatingSystem, + "UnsupportedParameterType": newErrorUnsupportedParameterType, + "UnsupportedPlatformType": newErrorUnsupportedPlatformType, +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go new file mode 100644 index 00000000000..9d097020936 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go @@ -0,0 +1,103 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssm + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// SSM provides the API operation methods for making requests to +// Amazon Simple Systems Manager (SSM). See this package's package overview docs +// for details on the service. +// +// SSM methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "ssm" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "SSM" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a SSM client from just a session. +// svc := ssm.New(mySession) +// +// // Create a SSM client with additional configuration +// svc := ssm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSM { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSM { + svc := &SSM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2014-11-06", + JSONVersion: "1.1", + TargetPrefix: "AmazonSSM", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSM operation and runs any +// custom request initialization. +func (c *SSM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/waiters.go b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/waiters.go new file mode 100644 index 00000000000..4bc0d1401d9 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/ssm/waiters.go @@ -0,0 +1,91 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssm + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilCommandExecuted uses the Amazon SSM API operation +// GetCommandInvocation to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SSM) WaitUntilCommandExecuted(input *GetCommandInvocationInput) error { + return c.WaitUntilCommandExecutedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilCommandExecutedWithContext is an extended version of WaitUntilCommandExecuted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) WaitUntilCommandExecutedWithContext(ctx aws.Context, input *GetCommandInvocationInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilCommandExecuted", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Pending", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "InProgress", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Delayed", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Success", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Cancelled", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "TimedOut", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Failed", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Cancelling", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetCommandInvocationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetCommandInvocationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 00000000000..bfc4372f9fd --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,3119 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) +// in the IAM User Guide. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: +// +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service API operations. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. +// +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. +// +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). +// +// Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: +// +// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Session Duration +// +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. +// +// Permissions +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using root user credentials defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies can't exceed 2,048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging AWS STS +// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000000..d5307fcaa0f --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 00000000000..cb1debbaa45 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,32 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// AWS Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for AWS Identity and Access Management (IAM) users or for users +// that you authenticate (federated users). This guide provides descriptions +// of the STS API. For more information about using this service, see Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 00000000000..a233f542ef2 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,82 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An AWS conversion compresses the + // session policy document, session policy ARNs, and session tags into a packed + // binary format that has a separate limit. The error message indicates by percentage + // how close the policies and tags are to the upper size limit. For more information, + // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 00000000000..d34a6855331 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 00000000000..e2e1d6efe55 --- /dev/null +++ b/agent/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/CODE_OF_CONDUCT.md b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..5b627cfa60b --- /dev/null +++ b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/CONTRIBUTING.md b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/CONTRIBUTING.md new file mode 100644 index 00000000000..5d7d5fadca2 --- /dev/null +++ b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check [existing open](https://github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/issues), or [recently closed](https://github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *master* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/labels/help%20wanted) issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](https://github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. + +We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. diff --git a/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/LICENSE b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/LICENSE new file mode 100644 index 00000000000..9e30e05ab6d --- /dev/null +++ b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/LICENSE @@ -0,0 +1,14 @@ +Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/Makefile b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/Makefile new file mode 100644 index 00000000000..022a1ae5c1d --- /dev/null +++ b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/Makefile @@ -0,0 +1,16 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +.PHONY: test +test: + go test -timeout=120s -v -cover ./... diff --git a/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/README.md b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/README.md new file mode 100644 index 00000000000..419ad569023 --- /dev/null +++ b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/README.md @@ -0,0 +1,45 @@ +## Go Config Generator For Fluentd And Fluentbit + +A Go Library for programmatically generating Fluentd and Fluent Bit Configuration. + +For example usage, see the [unit test file](generator_test.go). + +The library exports the following interface: + +``` +type FluentConfig interface { + // AddInput adds an input/source directive + AddInput(name string, tag string, options map[string]string) FluentConfig + + // AddIncludeFilter adds a regex filter that only allows logs which match the regex + AddIncludeFilter(regex string, key string, tag string) FluentConfig + + // AddExcludeFilter adds a regex filter that only allows logs which do not match the regex + AddExcludeFilter(regex string, key string, tag string) FluentConfig + + // AddFieldToRecord adds a key value pair to log records + AddFieldToRecord(key string, value string, tag string) FluentConfig + + // AddExternalConfig adds an @INCLUDE directive to reference an external config file + AddExternalConfig(filePath string, position IncludePosition) FluentConfig + + // AddOutput adds an output/log destination + AddOutput(name string, tag string, options map[string]string) FluentConfig + + // WriteFluentdConfig outputs the config in Fluentd syntax + WriteFluentdConfig(wr io.Writer) error + + // WriteFluentBitConfig outputs the config in Fluent Bit syntax + WriteFluentBitConfig(wr io.Writer) error +} +``` + +The library uses Fluent Bit terminology in the interface; `AddInput` adds a 'source' in Fluentd syntax, and the `name` argument is used as the plugin `@type`. + +## License Summary + +This sample code is made available under the MIT-0 license. See the LICENSE file. + +#### Security disclosures + +If you think you’ve found a potential security issue, please do not post it in the Issues. Instead, please follow the instructions [here](https://aws.amazon.com/security/vulnerability-reporting/) or email AWS security directly at [aws-security@amazon.com](mailto:aws-security@amazon.com). diff --git a/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/fluent-bit-template.go b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/fluent-bit-template.go new file mode 100644 index 00000000000..447aa60b296 --- /dev/null +++ b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/fluent-bit-template.go @@ -0,0 +1,65 @@ +// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package generator + +var fluentBitConfigTemplate = `{{- range .IncludeConfigHeadOfFile -}} +@INCLUDE {{ . }} +{{ end -}} +{{- range .Inputs }} +[INPUT] + Name {{ .Name }} + {{- if .Tag }} + Tag {{ .Tag }} + {{- end }} + {{- range $key, $value := .Options }} + {{ $key }} {{ $value }} + {{- end }} +{{ end -}} +{{- range .IncludeConfigAfterInputs }} +@INCLUDE {{ . }} +{{ end -}} +{{- range .IncludeFilters }} +[FILTER] + Name grep + Match {{ .Tag }} + Regex {{ .Key }} {{ .Regex }} +{{ end -}} +{{- range .ExcludeFilters }} +[FILTER] + Name grep + Match {{ .Tag }} + Exclude {{ .Key }} {{ .Regex }} +{{ end -}} +{{- range $tag, $modifier := .ModifyRecords }} +[FILTER] + Name record_modifier + Match {{ $tag }} + {{- range $key, $value := $modifier.NewFields }} + Record {{ $key }} {{ $value }} + {{- end }} +{{ end -}} +{{- range .IncludeConfigAfterFilters }} +@INCLUDE {{ . }} +{{ end -}} +{{- range .Outputs }} +[OUTPUT] + Name {{ .Name }} + Match {{ .Tag }} + {{- range $key, $value := .Options }} + {{ $key }} {{ $value }} + {{- end }} +{{ end -}} +{{- range .IncludeConfigEndOfFile }} +@INCLUDE {{ . }} +{{ end -}}` diff --git a/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/fluentd-template.go b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/fluentd-template.go new file mode 100644 index 00000000000..b0cffddee22 --- /dev/null +++ b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/fluentd-template.go @@ -0,0 +1,74 @@ +// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package generator + +var fluentDConfigTemplate = `{{- range .IncludeConfigHeadOfFile -}} +@include {{ . }} +{{ end -}} +{{- range .Inputs }} + + @type {{ .Name }} + {{- if .Tag }} + tag {{ .Tag }} + {{- end }} + {{- range $key, $value := .Options }} + {{ $key }} {{ $value }} + {{- end }} + +{{ end -}} +{{- range .IncludeConfigAfterInputs }} +@include {{ . }} +{{ end -}} +{{- range .IncludeFilters }} + + @type grep + + key {{ .Key }} + pattern {{ .Regex }} + + +{{ end -}} +{{- range .ExcludeFilters }} + + @type grep + + key {{ .Key }} + pattern {{ .Regex }} + + +{{ end -}} +{{- range $tag, $modifier := .ModifyRecords }} + + @type record_transformer + + {{- range $key, $value := $modifier.NewFields }} + {{ $key }} {{ $value }} + {{- end }} + + +{{ end -}} +{{- range .IncludeConfigAfterFilters }} +@include {{ . }} +{{ end -}} +{{- range .Outputs }} + + @type {{ .Name }} + {{- range $key, $value := .Options }} + {{ $key }} {{ $value }} + {{- end }} + +{{ end -}} +{{- range .IncludeConfigEndOfFile }} +@include {{ . }} +{{ end -}}` diff --git a/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/generator.go b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/generator.go new file mode 100644 index 00000000000..18bc2a4ec22 --- /dev/null +++ b/agent/vendor/github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit/generator.go @@ -0,0 +1,178 @@ +// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package generator + +import ( + "io" + "text/template" +) + +// IncludePosition is a type to represent the position of an @INCLUDE directive +// to include external config +type IncludePosition int + +// This library assumes that the config is structured as follows: +// 1. Sources/inputs +// 2. Filters +// 3. Outputs +const ( + // HeadOfFile: The external config is included as the first directive in the config file + HeadOfFile IncludePosition = iota + // AfterInputs: The external config is included after the input/source directives + AfterInputs + // AfterFilters: The external config is included after the filter directives + AfterFilters + // EndOfFile: The external config is included at the end of the file + EndOfFile +) + +// FluentConfig is the interface for generating fluentd and fluentbit config +type FluentConfig interface { + AddInput(name string, tag string, options map[string]string) FluentConfig + AddIncludeFilter(regex string, key string, tag string) FluentConfig + AddExcludeFilter(regex string, key string, tag string) FluentConfig + AddFieldToRecord(key string, value string, tag string) FluentConfig + AddExternalConfig(filePath string, position IncludePosition) FluentConfig + AddOutput(name string, tag string, options map[string]string) FluentConfig + WriteFluentdConfig(wr io.Writer) error + WriteFluentBitConfig(wr io.Writer) error +} + +// New Creates a new Fluent Config generater +func New() FluentConfig { + return &FluentConfigGenerator{ + ModifyRecords: make(map[string]RecordModifier), + } +} + +// FluentConfigGenerator implements FluentConfig +// It and all its fields must be public, so that the template library can +// use them when +type FluentConfigGenerator struct { + Inputs []LogPipe + ModifyRecords map[string]RecordModifier + IncludeFilters []RegexFilter + ExcludeFilters []RegexFilter + IncludeConfigHeadOfFile []string + IncludeConfigAfterInputs []string + IncludeConfigAfterFilters []string + IncludeConfigEndOfFile []string + Outputs []LogPipe +} + +// LogPipe can represent an input or an output plugin +type LogPipe struct { + Name string + Tag string + Options map[string]string +} + +// RecordModifier tracks new fields added to log records +type RecordModifier struct { + NewFields map[string]string +} + +// RegexFilter represents a filter for logs +type RegexFilter struct { + Regex string + Key string + Tag string +} + +// AddInput add an input/source directive +func (config *FluentConfigGenerator) AddInput(name string, tag string, options map[string]string) FluentConfig { + config.Inputs = append(config.Inputs, LogPipe{ + Name: name, + Tag: tag, + Options: options, + }) + return config +} + +// AddIncludeFilter adds a regex filter that only allows logs which match the regex +func (config *FluentConfigGenerator) AddIncludeFilter(regex string, key string, tag string) FluentConfig { + config.IncludeFilters = append(config.IncludeFilters, RegexFilter{ + Regex: regex, + Key: key, + Tag: tag, + }) + return config +} + +// AddExcludeFilter adds a regex filter that only allows logs which do not match the regex +func (config *FluentConfigGenerator) AddExcludeFilter(regex string, key string, tag string) FluentConfig { + config.ExcludeFilters = append(config.ExcludeFilters, RegexFilter{ + Regex: regex, + Key: key, + Tag: tag, + }) + return config +} + +// AddFieldToRecord adds a key value pair to log records +func (config *FluentConfigGenerator) AddFieldToRecord(key string, value string, tag string) FluentConfig { + _, ok := config.ModifyRecords[tag] + if !ok { + config.ModifyRecords[tag] = RecordModifier{ + NewFields: make(map[string]string), + } + } + config.ModifyRecords[tag].NewFields[key] = value + + return config +} + +// AddExternalConfig adds an @INCLUDE directive to reference an external config file +func (config *FluentConfigGenerator) AddExternalConfig(filePath string, position IncludePosition) FluentConfig { + switch position { + case HeadOfFile: + config.IncludeConfigHeadOfFile = append(config.IncludeConfigHeadOfFile, filePath) + case AfterInputs: + config.IncludeConfigAfterInputs = append(config.IncludeConfigAfterInputs, filePath) + case AfterFilters: + config.IncludeConfigAfterFilters = append(config.IncludeConfigAfterFilters, filePath) + case EndOfFile: + config.IncludeConfigEndOfFile = append(config.IncludeConfigEndOfFile, filePath) + } + + return config +} + +// AddOutput adds an output/log destination +func (config *FluentConfigGenerator) AddOutput(name string, tag string, options map[string]string) FluentConfig { + config.Outputs = append(config.Outputs, LogPipe{ + Name: name, + Tag: tag, + Options: options, + }) + return config +} + +// WriteFluentdConfig outputs the config in Fluentd syntax +func (config *FluentConfigGenerator) WriteFluentdConfig(wr io.Writer) error { + tmpl, err := template.New("fluent.conf").Parse(fluentDConfigTemplate) + if err != nil { + return err + } + return tmpl.Execute(wr, config) +} + +// WriteFluentBitConfig outputs the config in Fluent Bit syntax +func (config *FluentConfigGenerator) WriteFluentBitConfig(wr io.Writer) error { + tmpl, err := template.New("fluent-bit.conf").Parse(fluentBitConfigTemplate) + if err != nil { + return err + } + return tmpl.Execute(wr, config) +} diff --git a/agent/vendor/github.com/beorn7/perks/LICENSE b/agent/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 00000000000..339177be663 --- /dev/null +++ b/agent/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/agent/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/agent/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 00000000000..1602287d7ce --- /dev/null +++ b/agent/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/agent/vendor/github.com/beorn7/perks/quantile/stream.go b/agent/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 00000000000..d7d14f8eb63 --- /dev/null +++ b/agent/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/agent/vendor/github.com/cihub/seelog/LICENSE.txt b/agent/vendor/github.com/cihub/seelog/LICENSE.txt new file mode 100644 index 00000000000..8c706814148 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/LICENSE.txt @@ -0,0 +1,24 @@ +Copyright (c) 2012, Cloud Instruments Co., Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Cloud Instruments Co., Ltd. nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/agent/vendor/github.com/cihub/seelog/README.markdown b/agent/vendor/github.com/cihub/seelog/README.markdown new file mode 100644 index 00000000000..7dd1ab3532a --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/README.markdown @@ -0,0 +1,116 @@ +Seelog +======= + +Seelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages. +It is natively written in the [Go](http://golang.org/) programming language. + +[![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest) + +Features +------------------ + +* Xml configuring to be able to change logger parameters without recompilation +* Changing configurations on the fly without app restart +* Possibility to set different log configurations for different project files and functions +* Adjustable message formatting +* Simultaneous log output to multiple streams +* Choosing logger priority strategy to minimize performance hit +* Different output writers + * Console writer + * File writer + * Buffered writer (Chunk writer) + * Rolling log writer (Logging with rotation) + * SMTP writer + * Others... (See [Wiki](https://github.com/cihub/seelog/wiki)) +* Log message wrappers (JSON, XML, etc.) +* Global variables and functions for easy usage in standalone apps +* Functions for flexible usage in libraries + +Quick-start +----------- + +```go +package main + +import log "github.com/cihub/seelog" + +func main() { + defer log.Flush() + log.Info("Hello from Seelog!") +} +``` + +Installation +------------ + +If you don't have the Go development environment installed, visit the +[Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command: + +``` +go get -u github.com/cihub/seelog +``` + +*IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get') + +Documentation +--------------- + +Seelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki + +Examples +--------------- + +Seelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples) + +Issues +--------------- + +Feel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues + +Changelog +--------------- +* **v2.6** : Config using code and custom formatters + * Configuration using code in addition to xml (All internal receiver/dispatcher/logger types are now exported). + * Custom formatters. Check [wiki](https://github.com/cihub/seelog/wiki/Custom-formatters) + * Bugfixes and internal improvements. +* **v2.5** : Interaction with other systems. Part 2: custom receivers + * Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers) + * Added 'LoggerFromCustomReceiver' + * Added 'LoggerFromWriterWithMinLevelAndFormat' + * Added 'LoggerFromCustomReceiver' + * Added 'LoggerFromParamConfigAs...' +* **v2.4** : Interaction with other systems. Part 1: wrapping seelog + * Added configurable caller stack skip logic + * Added 'SetAdditionalStackDepth' to 'LoggerInterface' +* **v2.3** : Rethinking 'rolling' receiver + * Reimplemented 'rolling' receiver + * Added 'Max rolls' feature for 'rolling' receiver with type='date' + * Fixed 'rolling' receiver issue: renaming on Windows +* **v2.2** : go1.0 compatibility point [go1.0 tag] + * Fixed internal bugs + * Added 'ANSI n [;k]' format identifier: %EscN + * Made current release go1 compatible +* **v2.1** : Some new features + * Rolling receiver archiving option. + * Added format identifier: %Line + * Smtp: added paths to PEM files directories + * Added format identifier: %FuncShort + * Warn, Error and Critical methods now return an error +* **v2.0** : Second major release. BREAKING CHANGES. + * Support of binaries with stripped symbols + * Added log strategy: adaptive + * Critical message now forces Flush() + * Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast + * Added receiver: conn (network connection writer) + * BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle + * Bug fixes +* **v1.0** : Initial release. Features: + * Xml config + * Changing configurations on the fly without app restart + * Contraints and exceptions + * Formatting + * Log strategies: sync, async loop, async timer + * Receivers: buffered, console, file, rolling, smtp + + + diff --git a/agent/vendor/github.com/cihub/seelog/archive/archive.go b/agent/vendor/github.com/cihub/seelog/archive/archive.go new file mode 100644 index 00000000000..923036f259e --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/archive/archive.go @@ -0,0 +1,198 @@ +package archive + +import ( + "archive/tar" + "archive/zip" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/cihub/seelog/archive/gzip" +) + +// Reader is the interface for reading files from an archive. +type Reader interface { + NextFile() (name string, err error) + io.Reader +} + +// ReadCloser is the interface that groups Reader with the Close method. +type ReadCloser interface { + Reader + io.Closer +} + +// Writer is the interface for writing files to an archived format. +type Writer interface { + NextFile(name string, fi os.FileInfo) error + io.Writer +} + +// WriteCloser is the interface that groups Writer with the Close method. +type WriteCloser interface { + Writer + io.Closer +} + +type nopCloser struct{ Reader } + +func (nopCloser) Close() error { return nil } + +// NopCloser returns a ReadCloser with a no-op Close method wrapping the +// provided Reader r. +func NopCloser(r Reader) ReadCloser { + return nopCloser{r} +} + +// Copy copies from src to dest until either EOF is reached on src or an error +// occurs. +// +// When the archive format of src matches that of dst, Copy streams the files +// directly into dst. Otherwise, copy buffers the contents to disk to compute +// headers before writing to dst. +func Copy(dst Writer, src Reader) error { + switch src := src.(type) { + case tarReader: + if dst, ok := dst.(tarWriter); ok { + return copyTar(dst, src) + } + case zipReader: + if dst, ok := dst.(zipWriter); ok { + return copyZip(dst, src) + } + // Switch on concrete type because gzip has no special methods + case *gzip.Reader: + if dst, ok := dst.(*gzip.Writer); ok { + _, err := io.Copy(dst, src) + return err + } + } + + return copyBuffer(dst, src) +} + +func copyBuffer(dst Writer, src Reader) (err error) { + const defaultFileMode = 0666 + + buf, err := ioutil.TempFile("", "archive_copy_buffer") + if err != nil { + return err + } + defer os.Remove(buf.Name()) // Do not care about failure removing temp + defer buf.Close() // Do not care about failure closing temp + for { + // Handle the next file + name, err := src.NextFile() + switch err { + case io.EOF: // Done copying + return nil + default: // Failed to write: bail out + return err + case nil: // Proceed below + } + + // Buffer the file + if _, err := io.Copy(buf, src); err != nil { + return fmt.Errorf("buffer to disk: %v", err) + } + + // Seek to the start of the file for full file copy + if _, err := buf.Seek(0, os.SEEK_SET); err != nil { + return err + } + + // Set desired file permissions + if err := os.Chmod(buf.Name(), defaultFileMode); err != nil { + return err + } + fi, err := buf.Stat() + if err != nil { + return err + } + + // Write the buffered file + if err := dst.NextFile(name, fi); err != nil { + return err + } + if _, err := io.Copy(dst, buf); err != nil { + return fmt.Errorf("copy to dst: %v", err) + } + if err := buf.Truncate(0); err != nil { + return err + } + if _, err := buf.Seek(0, os.SEEK_SET); err != nil { + return err + } + } +} + +type tarReader interface { + Next() (*tar.Header, error) + io.Reader +} + +type tarWriter interface { + WriteHeader(hdr *tar.Header) error + io.Writer +} + +type zipReader interface { + Files() []*zip.File +} + +type zipWriter interface { + CreateHeader(fh *zip.FileHeader) (io.Writer, error) +} + +func copyTar(w tarWriter, r tarReader) error { + for { + hdr, err := r.Next() + switch err { + case io.EOF: + return nil + default: // Handle error + return err + case nil: // Proceed below + } + + info := hdr.FileInfo() + // Skip directories + if info.IsDir() { + continue + } + if err := w.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(w, r); err != nil { + return err + } + } +} + +func copyZip(zw zipWriter, r zipReader) error { + for _, f := range r.Files() { + if err := copyZipFile(zw, f); err != nil { + return err + } + } + return nil +} + +func copyZipFile(zw zipWriter, f *zip.File) error { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() // Read-only + + hdr := f.FileHeader + hdr.SetModTime(time.Now()) + w, err := zw.CreateHeader(&hdr) + if err != nil { + return err + } + _, err = io.Copy(w, rc) + return err +} diff --git a/agent/vendor/github.com/cihub/seelog/archive/gzip/gzip.go b/agent/vendor/github.com/cihub/seelog/archive/gzip/gzip.go new file mode 100644 index 00000000000..ea121018a90 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/archive/gzip/gzip.go @@ -0,0 +1,64 @@ +// Package gzip implements reading and writing of gzip format compressed files. +// See the compress/gzip package for more details. +package gzip + +import ( + "compress/gzip" + "fmt" + "io" + "os" +) + +// Reader is an io.Reader that can be read to retrieve uncompressed data from a +// gzip-format compressed file. +type Reader struct { + gzip.Reader + name string + isEOF bool +} + +// NewReader creates a new Reader reading the given reader. +func NewReader(r io.Reader, name string) (*Reader, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &Reader{ + Reader: *gr, + name: name, + }, nil +} + +// NextFile returns the file name. Calls subsequent to the first call will +// return EOF. +func (r *Reader) NextFile() (name string, err error) { + if r.isEOF { + return "", io.EOF + } + + r.isEOF = true + return r.name, nil +} + +// Writer is an io.WriteCloser. Writes to a Writer are compressed and written to w. +type Writer struct { + gzip.Writer + name string + noMoreFiles bool +} + +// NextFile never returns a next file, and should not be called more than once. +func (w *Writer) NextFile(name string, _ os.FileInfo) error { + if w.noMoreFiles { + return fmt.Errorf("gzip: only accepts one file: already received %q and now %q", w.name, name) + } + w.noMoreFiles = true + w.name = name + return nil +} + +// NewWriter returns a new Writer. Writes to the returned writer are compressed +// and written to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *gzip.NewWriter(w)} +} diff --git a/agent/vendor/github.com/cihub/seelog/archive/tar/tar.go b/agent/vendor/github.com/cihub/seelog/archive/tar/tar.go new file mode 100644 index 00000000000..8dd87f57347 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/archive/tar/tar.go @@ -0,0 +1,72 @@ +package tar + +import ( + "archive/tar" + "io" + "os" +) + +// Reader provides sequential access to the contents of a tar archive. +type Reader struct { + tar.Reader +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { + return &Reader{Reader: *tar.NewReader(r)} +} + +// NextFile advances to the next file in the tar archive. +func (r *Reader) NextFile() (name string, err error) { + hdr, err := r.Next() + if err != nil { + return "", err + } + return hdr.Name, nil +} + +// Writer provides sequential writing of a tar archive in POSIX.1 format. +type Writer struct { + tar.Writer + closers []io.Closer +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *tar.NewWriter(w)} +} + +// NewWriteMultiCloser creates a new Writer writing to w that also closes all +// closers in order on close. +func NewWriteMultiCloser(w io.WriteCloser, closers ...io.Closer) *Writer { + return &Writer{ + Writer: *tar.NewWriter(w), + closers: closers, + } +} + +// NextFile computes and writes a header and prepares to accept the file's +// contents. +func (w *Writer) NextFile(name string, fi os.FileInfo) error { + if name == "" { + name = fi.Name() + } + hdr, err := tar.FileInfoHeader(fi, name) + if err != nil { + return err + } + hdr.Name = name + return w.WriteHeader(hdr) +} + +// Close closes the tar archive and all other closers, flushing any unwritten +// data to the underlying writer. +func (w *Writer) Close() error { + err := w.Writer.Close() + for _, c := range w.closers { + if cerr := c.Close(); cerr != nil && err == nil { + err = cerr + } + } + return err +} diff --git a/agent/vendor/github.com/cihub/seelog/archive/zip/zip.go b/agent/vendor/github.com/cihub/seelog/archive/zip/zip.go new file mode 100644 index 00000000000..4210b03b9fb --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/archive/zip/zip.go @@ -0,0 +1,89 @@ +package zip + +import ( + "archive/zip" + "io" + "os" +) + +// Reader provides sequential access to the contents of a zip archive. +type Reader struct { + zip.Reader + unread []*zip.File + rc io.ReadCloser +} + +// NewReader returns a new Reader reading from r, which is assumed to have the +// given size in bytes. +func NewReader(r io.ReaderAt, size int64) (*Reader, error) { + zr, err := zip.NewReader(r, size) + if err != nil { + return nil, err + } + return &Reader{Reader: *zr}, nil +} + +// NextFile advances to the next file in the zip archive. +func (r *Reader) NextFile() (name string, err error) { + // Initialize unread + if r.unread == nil { + r.unread = r.Files()[:] + } + + // Close previous file + if r.rc != nil { + r.rc.Close() // Read-only + } + + if len(r.unread) == 0 { + return "", io.EOF + } + + // Open and return next unread + f := r.unread[0] + name, r.unread = f.Name, r.unread[1:] + r.rc, err = f.Open() + if err != nil { + return "", err + } + return name, nil +} + +func (r *Reader) Read(p []byte) (n int, err error) { + return r.rc.Read(p) +} + +// Files returns the full list of files in the zip archive. +func (r *Reader) Files() []*zip.File { + return r.File +} + +// Writer provides sequential writing of a zip archive.1 format. +type Writer struct { + zip.Writer + w io.Writer +} + +// NewWriter returns a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *zip.NewWriter(w)} +} + +// NextFile computes and writes a header and prepares to accept the file's +// contents. +func (w *Writer) NextFile(name string, fi os.FileInfo) error { + if name == "" { + name = fi.Name() + } + hdr, err := zip.FileInfoHeader(fi) + if err != nil { + return err + } + hdr.Name = name + w.w, err = w.CreateHeader(hdr) + return err +} + +func (w *Writer) Write(p []byte) (n int, err error) { + return w.w.Write(p) +} diff --git a/agent/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go b/agent/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go new file mode 100644 index 00000000000..0c640caeb45 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/behavior_adaptivelogger.go @@ -0,0 +1,129 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "math" + "time" +) + +var ( + adaptiveLoggerMaxInterval = time.Minute + adaptiveLoggerMaxCriticalMsgCount = uint32(1000) +) + +// asyncAdaptiveLogger represents asynchronous adaptive logger which acts like +// an async timer logger, but its interval depends on the current message count +// in the queue. +// +// Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c: +// I = m + (C - Min(c, C)) / C * (M - m) +type asyncAdaptiveLogger struct { + asyncLogger + minInterval time.Duration + criticalMsgCount uint32 + maxInterval time.Duration +} + +// NewAsyncLoopLogger creates a new asynchronous adaptive logger +func NewAsyncAdaptiveLogger( + config *logConfig, + minInterval time.Duration, + maxInterval time.Duration, + criticalMsgCount uint32) (*asyncAdaptiveLogger, error) { + + if minInterval <= 0 { + return nil, errors.New("async adaptive logger min interval should be > 0") + } + + if maxInterval > adaptiveLoggerMaxInterval { + return nil, fmt.Errorf("async adaptive logger max interval should be <= %s", + adaptiveLoggerMaxInterval) + } + + if criticalMsgCount <= 0 { + return nil, errors.New("async adaptive logger critical msg count should be > 0") + } + + if criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount { + return nil, fmt.Errorf("async adaptive logger critical msg count should be <= %s", + adaptiveLoggerMaxInterval) + } + + asnAdaptiveLogger := new(asyncAdaptiveLogger) + + asnAdaptiveLogger.asyncLogger = *newAsyncLogger(config) + asnAdaptiveLogger.minInterval = minInterval + asnAdaptiveLogger.maxInterval = maxInterval + asnAdaptiveLogger.criticalMsgCount = criticalMsgCount + + go asnAdaptiveLogger.processQueue() + + return asnAdaptiveLogger, nil +} + +func (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) { + asnAdaptiveLogger.queueHasElements.L.Lock() + defer asnAdaptiveLogger.queueHasElements.L.Unlock() + + for asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.Closed() { + asnAdaptiveLogger.queueHasElements.Wait() + } + + if asnAdaptiveLogger.Closed() { + return true, asnAdaptiveLogger.msgQueue.Len() + } + + asnAdaptiveLogger.processQueueElement() + return false, asnAdaptiveLogger.msgQueue.Len() - 1 +} + +// I = m + (C - Min(c, C)) / C * (M - m) => +// I = m + cDiff * mDiff, +// cDiff = (C - Min(c, C)) / C) +// mDiff = (M - m) +func (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration { + critCountF := float64(asnAdaptiveLogger.criticalMsgCount) + cDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF + mDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval) + + return asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff) +} + +func (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() { + for !asnAdaptiveLogger.Closed() { + closed, itemCount := asnAdaptiveLogger.processItem() + + if closed { + break + } + + interval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount) + + <-time.After(interval) + } +} diff --git a/agent/vendor/github.com/cihub/seelog/behavior_asynclogger.go b/agent/vendor/github.com/cihub/seelog/behavior_asynclogger.go new file mode 100644 index 00000000000..75231067b9c --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/behavior_asynclogger.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "container/list" + "fmt" + "sync" +) + +// MaxQueueSize is the critical number of messages in the queue that result in an immediate flush. +const ( + MaxQueueSize = 10000 +) + +type msgQueueItem struct { + level LogLevel + context LogContextInterface + message fmt.Stringer +} + +// asyncLogger represents common data for all asynchronous loggers +type asyncLogger struct { + commonLogger + msgQueue *list.List + queueHasElements *sync.Cond +} + +// newAsyncLogger creates a new asynchronous logger +func newAsyncLogger(config *logConfig) *asyncLogger { + asnLogger := new(asyncLogger) + + asnLogger.msgQueue = list.New() + asnLogger.queueHasElements = sync.NewCond(new(sync.Mutex)) + + asnLogger.commonLogger = *newCommonLogger(config, asnLogger) + + return asnLogger +} + +func (asnLogger *asyncLogger) innerLog( + level LogLevel, + context LogContextInterface, + message fmt.Stringer) { + + asnLogger.addMsgToQueue(level, context, message) +} + +func (asnLogger *asyncLogger) Close() { + asnLogger.m.Lock() + defer asnLogger.m.Unlock() + + if !asnLogger.Closed() { + asnLogger.flushQueue(true) + asnLogger.config.RootDispatcher.Flush() + + if err := asnLogger.config.RootDispatcher.Close(); err != nil { + reportInternalError(err) + } + + asnLogger.closedM.Lock() + asnLogger.closed = true + asnLogger.closedM.Unlock() + asnLogger.queueHasElements.Broadcast() + } +} + +func (asnLogger *asyncLogger) Flush() { + asnLogger.m.Lock() + defer asnLogger.m.Unlock() + + if !asnLogger.Closed() { + asnLogger.flushQueue(true) + asnLogger.config.RootDispatcher.Flush() + } +} + +func (asnLogger *asyncLogger) flushQueue(lockNeeded bool) { + if lockNeeded { + asnLogger.queueHasElements.L.Lock() + defer asnLogger.queueHasElements.L.Unlock() + } + + for asnLogger.msgQueue.Len() > 0 { + asnLogger.processQueueElement() + } +} + +func (asnLogger *asyncLogger) processQueueElement() { + if asnLogger.msgQueue.Len() > 0 { + backElement := asnLogger.msgQueue.Front() + msg, _ := backElement.Value.(msgQueueItem) + asnLogger.processLogMsg(msg.level, msg.message, msg.context) + asnLogger.msgQueue.Remove(backElement) + } +} + +func (asnLogger *asyncLogger) addMsgToQueue( + level LogLevel, + context LogContextInterface, + message fmt.Stringer) { + + if !asnLogger.Closed() { + asnLogger.queueHasElements.L.Lock() + defer asnLogger.queueHasElements.L.Unlock() + + if asnLogger.msgQueue.Len() >= MaxQueueSize { + fmt.Printf("Seelog queue overflow: more than %v messages in the queue. Flushing.\n", MaxQueueSize) + asnLogger.flushQueue(false) + } + + queueItem := msgQueueItem{level, context, message} + + asnLogger.msgQueue.PushBack(queueItem) + asnLogger.queueHasElements.Broadcast() + } else { + err := fmt.Errorf("queue closed! Cannot process element: %d %#v", level, message) + reportInternalError(err) + } +} diff --git a/agent/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go b/agent/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go new file mode 100644 index 00000000000..972467b3fc9 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/behavior_asynclooplogger.go @@ -0,0 +1,69 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +// asyncLoopLogger represents asynchronous logger which processes the log queue in +// a 'for' loop +type asyncLoopLogger struct { + asyncLogger +} + +// NewAsyncLoopLogger creates a new asynchronous loop logger +func NewAsyncLoopLogger(config *logConfig) *asyncLoopLogger { + + asnLoopLogger := new(asyncLoopLogger) + + asnLoopLogger.asyncLogger = *newAsyncLogger(config) + + go asnLoopLogger.processQueue() + + return asnLoopLogger +} + +func (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) { + asnLoopLogger.queueHasElements.L.Lock() + defer asnLoopLogger.queueHasElements.L.Unlock() + + for asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.Closed() { + asnLoopLogger.queueHasElements.Wait() + } + + if asnLoopLogger.Closed() { + return true + } + + asnLoopLogger.processQueueElement() + return false +} + +func (asnLoopLogger *asyncLoopLogger) processQueue() { + for !asnLoopLogger.Closed() { + closed := asnLoopLogger.processItem() + + if closed { + break + } + } +} diff --git a/agent/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go b/agent/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go new file mode 100644 index 00000000000..8118f205009 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go @@ -0,0 +1,82 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "time" +) + +// asyncTimerLogger represents asynchronous logger which processes the log queue each +// 'duration' nanoseconds +type asyncTimerLogger struct { + asyncLogger + interval time.Duration +} + +// NewAsyncLoopLogger creates a new asynchronous loop logger +func NewAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) { + + if interval <= 0 { + return nil, errors.New("async logger interval should be > 0") + } + + asnTimerLogger := new(asyncTimerLogger) + + asnTimerLogger.asyncLogger = *newAsyncLogger(config) + asnTimerLogger.interval = interval + + go asnTimerLogger.processQueue() + + return asnTimerLogger, nil +} + +func (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) { + asnTimerLogger.queueHasElements.L.Lock() + defer asnTimerLogger.queueHasElements.L.Unlock() + + for asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.Closed() { + asnTimerLogger.queueHasElements.Wait() + } + + if asnTimerLogger.Closed() { + return true + } + + asnTimerLogger.processQueueElement() + return false +} + +func (asnTimerLogger *asyncTimerLogger) processQueue() { + for !asnTimerLogger.Closed() { + closed := asnTimerLogger.processItem() + + if closed { + break + } + + <-time.After(asnTimerLogger.interval) + } +} diff --git a/agent/vendor/github.com/cihub/seelog/behavior_synclogger.go b/agent/vendor/github.com/cihub/seelog/behavior_synclogger.go new file mode 100644 index 00000000000..5a022ebc622 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/behavior_synclogger.go @@ -0,0 +1,75 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" +) + +// syncLogger performs logging in the same goroutine where 'Trace/Debug/...' +// func was called +type syncLogger struct { + commonLogger +} + +// NewSyncLogger creates a new synchronous logger +func NewSyncLogger(config *logConfig) *syncLogger { + syncLogger := new(syncLogger) + + syncLogger.commonLogger = *newCommonLogger(config, syncLogger) + + return syncLogger +} + +func (syncLogger *syncLogger) innerLog( + level LogLevel, + context LogContextInterface, + message fmt.Stringer) { + + syncLogger.processLogMsg(level, message, context) +} + +func (syncLogger *syncLogger) Close() { + syncLogger.m.Lock() + defer syncLogger.m.Unlock() + + if !syncLogger.Closed() { + if err := syncLogger.config.RootDispatcher.Close(); err != nil { + reportInternalError(err) + } + syncLogger.closedM.Lock() + syncLogger.closed = true + syncLogger.closedM.Unlock() + } +} + +func (syncLogger *syncLogger) Flush() { + syncLogger.m.Lock() + defer syncLogger.m.Unlock() + + if !syncLogger.Closed() { + syncLogger.config.RootDispatcher.Flush() + } +} diff --git a/agent/vendor/github.com/cihub/seelog/cfg_config.go b/agent/vendor/github.com/cihub/seelog/cfg_config.go new file mode 100644 index 00000000000..76554fca0c5 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/cfg_config.go @@ -0,0 +1,212 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "os" +) + +// LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml. +func LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) { + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + conf, err := configFromReader(file) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml. +func LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) { + conf, err := configFromReader(bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml. +func LoggerFromConfigAsString(data string) (LoggerInterface, error) { + return LoggerFromConfigAsBytes([]byte(data)) +} + +// LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options. +// See 'CfgParseParams' comments. +func LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) { + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + conf, err := configFromReaderWithConfig(file, parserParams) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options. +// See 'CfgParseParams' comments. +func LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) { + conf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options. +// See 'CfgParseParams' comments. +func LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) { + return LoggerFromParamConfigAsBytes([]byte(data), parserParams) +} + +// LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) +func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) { + return LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) +} + +// LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the +// receiver with minimal level = minLevel and with specified format. +// +// All messages with level more or equal to minLevel will be written to output and +// formatted using the default seelog format. +// +// Can be called for usage with non-Seelog systems +func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) { + constraints, err := NewMinMaxConstraints(minLevel, CriticalLvl) + if err != nil { + return nil, err + } + formatter, err := NewFormatter(format) + if err != nil { + return nil, err + } + dispatcher, err := NewSplitDispatcher(formatter, []interface{}{output}) + if err != nil { + return nil, err + } + + conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node. +// It should contain valid seelog xml, except for root node name. +func LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) { + conf, err := configFromXMLDecoder(xmlParser, rootNode) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +// LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the +// receiver. +// +// All messages will be sent to the specified custom receiver without additional +// formatting ('%Msg' format is used). +// +// Check CustomReceiver, RegisterReceiver for additional info. +// +// NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated +// by the config parser while parsing config. So, if you are not planning to use the +// same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and +// loading from config, just leave AfterParse implementation empty. +// +// NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized +// instance that implements CustomReceiver. So, fill it with data and perform any initialization +// logic before calling this func and it won't be lost. +// +// So: +// * RegisterReceiver takes value just to get the reflect.Type from it and then +// instantiate it as many times as config is reloaded. +// +// * LoggerFromCustomReceiver takes value and uses it without modification and +// reinstantiation, directy passing it to the dispatcher tree. +func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) { + constraints, err := NewMinMaxConstraints(TraceLvl, CriticalLvl) + if err != nil { + return nil, err + } + + output, err := NewCustomReceiverDispatcherByValue(msgonlyformatter, receiver, "user-proxy", CustomReceiverInitArgs{}) + if err != nil { + return nil, err + } + dispatcher, err := NewSplitDispatcher(msgonlyformatter, []interface{}{output}) + if err != nil { + return nil, err + } + + conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) + if err != nil { + return nil, err + } + + return createLoggerFromFullConfig(conf) +} + +func CloneLogger(logger LoggerInterface) (LoggerInterface, error) { + switch logger := logger.(type) { + default: + return nil, fmt.Errorf("unexpected type %T", logger) + case *asyncAdaptiveLogger: + clone, err := NewAsyncAdaptiveLogger(logger.commonLogger.config, logger.minInterval, logger.maxInterval, logger.criticalMsgCount) + if err != nil { + return nil, err + } + return clone, nil + case *asyncLoopLogger: + return NewAsyncLoopLogger(logger.commonLogger.config), nil + case *asyncTimerLogger: + clone, err := NewAsyncTimerLogger(logger.commonLogger.config, logger.interval) + if err != nil { + return nil, err + } + return clone, nil + case *syncLogger: + return NewSyncLogger(logger.commonLogger.config), nil + } +} diff --git a/agent/vendor/github.com/cihub/seelog/cfg_errors.go b/agent/vendor/github.com/cihub/seelog/cfg_errors.go new file mode 100644 index 00000000000..c1fb4d10118 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/cfg_errors.go @@ -0,0 +1,61 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" +) + +var ( + errNodeMustHaveChildren = errors.New("node must have children") + errNodeCannotHaveChildren = errors.New("node cannot have children") +) + +type unexpectedChildElementError struct { + baseError +} + +func newUnexpectedChildElementError(msg string) *unexpectedChildElementError { + custmsg := "Unexpected child element: " + msg + return &unexpectedChildElementError{baseError{message: custmsg}} +} + +type missingArgumentError struct { + baseError +} + +func newMissingArgumentError(nodeName, attrName string) *missingArgumentError { + custmsg := "Output '" + nodeName + "' has no '" + attrName + "' attribute" + return &missingArgumentError{baseError{message: custmsg}} +} + +type unexpectedAttributeError struct { + baseError +} + +func newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError { + custmsg := nodeName + " has unexpected attribute: " + attr + return &unexpectedAttributeError{baseError{message: custmsg}} +} diff --git a/agent/vendor/github.com/cihub/seelog/cfg_logconfig.go b/agent/vendor/github.com/cihub/seelog/cfg_logconfig.go new file mode 100644 index 00000000000..6ba6f9a9485 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/cfg_logconfig.go @@ -0,0 +1,141 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" +) + +type loggerTypeFromString uint8 + +const ( + syncloggerTypeFromString = iota + asyncLooploggerTypeFromString + asyncTimerloggerTypeFromString + adaptiveLoggerTypeFromString + defaultloggerTypeFromString = asyncLooploggerTypeFromString +) + +const ( + syncloggerTypeFromStringStr = "sync" + asyncloggerTypeFromStringStr = "asyncloop" + asyncTimerloggerTypeFromStringStr = "asynctimer" + adaptiveLoggerTypeFromStringStr = "adaptive" +) + +// asyncTimerLoggerData represents specific data for async timer logger +type asyncTimerLoggerData struct { + AsyncInterval uint32 +} + +// adaptiveLoggerData represents specific data for adaptive timer logger +type adaptiveLoggerData struct { + MinInterval uint32 + MaxInterval uint32 + CriticalMsgCount uint32 +} + +var loggerTypeToStringRepresentations = map[loggerTypeFromString]string{ + syncloggerTypeFromString: syncloggerTypeFromStringStr, + asyncLooploggerTypeFromString: asyncloggerTypeFromStringStr, + asyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr, + adaptiveLoggerTypeFromString: adaptiveLoggerTypeFromStringStr, +} + +// getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful. +func getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) { + for logType, logTypeStr := range loggerTypeToStringRepresentations { + if logTypeStr == logTypeString { + return logType, true + } + } + + return 0, false +} + +// logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules +// (general constraints and exceptions) +type logConfig struct { + Constraints logLevelConstraints // General log level rules (>min and ' element. It takes the 'name' attribute + // of the element and tries to find a match in two places: + // 1) CfgParseParams.CustomReceiverProducers map + // 2) Global type map, filled by RegisterReceiver + // + // If a match is found in the CustomReceiverProducers map, parser calls the corresponding producer func + // passing the init args to it. The func takes exactly the same args as CustomReceiver.AfterParse. + // The producer func must return a correct receiver or an error. If case of error, seelog will behave + // in the same way as with any other config error. + // + // You may use this param to set custom producers in case you need to pass some context when instantiating + // a custom receiver or if you frequently change custom receivers with different parameters or in any other + // situation where package-level registering (RegisterReceiver) is not an option for you. + CustomReceiverProducers map[string]CustomReceiverProducer +} + +func (cfg *CfgParseParams) String() string { + return fmt.Sprintf("CfgParams: {custom_recs=%d}", len(cfg.CustomReceiverProducers)) +} + +type elementMapEntry struct { + constructor func(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) +} + +var elementMap map[string]elementMapEntry +var predefinedFormats map[string]*formatter + +func init() { + elementMap = map[string]elementMapEntry{ + fileWriterID: {createfileWriter}, + splitterDispatcherID: {createSplitter}, + customReceiverID: {createCustomReceiver}, + filterDispatcherID: {createFilter}, + consoleWriterID: {createConsoleWriter}, + rollingfileWriterID: {createRollingFileWriter}, + bufferedWriterID: {createbufferedWriter}, + smtpWriterID: {createSMTPWriter}, + connWriterID: {createconnWriter}, + } + + err := fillPredefinedFormats() + if err != nil { + panic(fmt.Sprintf("Seelog couldn't start: predefined formats creation failed. Error: %s", err.Error())) + } +} + +func fillPredefinedFormats() error { + predefinedFormatsWithoutPrefix := map[string]string{ + "xml-debug": `%Lev%Msg%RelFile%Func%Line`, + "xml-debug-short": `%Ns%l%Msg

%RelFile

%Func`, + "xml": `%Lev%Msg`, + "xml-short": `%Ns%l%Msg`, + + "json-debug": `{"time":%Ns,"lev":"%Lev","msg":"%Msg","path":"%RelFile","func":"%Func","line":"%Line"}`, + "json-debug-short": `{"t":%Ns,"l":"%Lev","m":"%Msg","p":"%RelFile","f":"%Func"}`, + "json": `{"time":%Ns,"lev":"%Lev","msg":"%Msg"}`, + "json-short": `{"t":%Ns,"l":"%Lev","m":"%Msg"}`, + + "debug": `[%LEVEL] %RelFile:%Func.%Line %Date %Time %Msg%n`, + "debug-short": `[%LEVEL] %Date %Time %Msg%n`, + "fast": `%Ns %l %Msg%n`, + } + + predefinedFormats = make(map[string]*formatter) + + for formatKey, format := range predefinedFormatsWithoutPrefix { + formatter, err := NewFormatter(format) + if err != nil { + return err + } + + predefinedFormats[predefinedPrefix+formatKey] = formatter + } + + return nil +} + +// configFromXMLDecoder parses data from a given XML decoder. +// Returns parsed config which can be used to create logger in case no errors occured. +// Returns error if format is incorrect or anything happened. +func configFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (*configForParsing, error) { + return configFromXMLDecoderWithConfig(xmlParser, rootNode, nil) +} + +// configFromXMLDecoderWithConfig parses data from a given XML decoder. +// Returns parsed config which can be used to create logger in case no errors occured. +// Returns error if format is incorrect or anything happened. +func configFromXMLDecoderWithConfig(xmlParser *xml.Decoder, rootNode xml.Token, cfg *CfgParseParams) (*configForParsing, error) { + _, ok := rootNode.(xml.StartElement) + if !ok { + return nil, errors.New("rootNode must be XML startElement") + } + + config, err := unmarshalNode(xmlParser, rootNode) + if err != nil { + return nil, err + } + if config == nil { + return nil, errors.New("xml has no content") + } + + return configFromXMLNodeWithConfig(config, cfg) +} + +// configFromReader parses data from a given reader. +// Returns parsed config which can be used to create logger in case no errors occured. +// Returns error if format is incorrect or anything happened. +func configFromReader(reader io.Reader) (*configForParsing, error) { + return configFromReaderWithConfig(reader, nil) +} + +// configFromReaderWithConfig parses data from a given reader. +// Returns parsed config which can be used to create logger in case no errors occured. +// Returns error if format is incorrect or anything happened. +func configFromReaderWithConfig(reader io.Reader, cfg *CfgParseParams) (*configForParsing, error) { + config, err := unmarshalConfig(reader) + if err != nil { + return nil, err + } + + if config.name != seelogConfigID { + return nil, errors.New("root xml tag must be '" + seelogConfigID + "'") + } + + return configFromXMLNodeWithConfig(config, cfg) +} + +func configFromXMLNodeWithConfig(config *xmlNode, cfg *CfgParseParams) (*configForParsing, error) { + err := checkUnexpectedAttribute( + config, + minLevelID, + maxLevelID, + levelsID, + loggerTypeFromStringAttr, + asyncLoggerIntervalAttr, + adaptLoggerMinIntervalAttr, + adaptLoggerMaxIntervalAttr, + adaptLoggerCriticalMsgCountAttr, + ) + if err != nil { + return nil, err + } + + err = checkExpectedElements(config, optionalElement(outputsID), optionalElement(formatsID), optionalElement(exceptionsID)) + if err != nil { + return nil, err + } + + constraints, err := getConstraints(config) + if err != nil { + return nil, err + } + + exceptions, err := getExceptions(config) + if err != nil { + return nil, err + } + err = checkDistinctExceptions(exceptions) + if err != nil { + return nil, err + } + + formats, err := getFormats(config) + if err != nil { + return nil, err + } + + dispatcher, err := getOutputsTree(config, formats, cfg) + if err != nil { + // If we open several files, but then fail to parse the config, we should close + // those files before reporting that config is invalid. + if dispatcher != nil { + dispatcher.Close() + } + + return nil, err + } + + loggerType, logData, err := getloggerTypeFromStringData(config) + if err != nil { + return nil, err + } + + return newFullLoggerConfig(constraints, exceptions, dispatcher, loggerType, logData, cfg) +} + +func getConstraints(node *xmlNode) (logLevelConstraints, error) { + minLevelStr, isMinLevel := node.attributes[minLevelID] + maxLevelStr, isMaxLevel := node.attributes[maxLevelID] + levelsStr, isLevels := node.attributes[levelsID] + + if isLevels && (isMinLevel && isMaxLevel) { + return nil, errors.New("for level declaration use '" + levelsID + "'' OR '" + minLevelID + + "', '" + maxLevelID + "'") + } + + offString := LogLevel(Off).String() + + if (isLevels && strings.TrimSpace(levelsStr) == offString) || + (isMinLevel && !isMaxLevel && minLevelStr == offString) { + + return NewOffConstraints() + } + + if isLevels { + levels, err := parseLevels(levelsStr) + if err != nil { + return nil, err + } + return NewListConstraints(levels) + } + + var minLevel = LogLevel(TraceLvl) + if isMinLevel { + found := true + minLevel, found = LogLevelFromString(minLevelStr) + if !found { + return nil, errors.New("declared " + minLevelID + " not found: " + minLevelStr) + } + } + + var maxLevel = LogLevel(CriticalLvl) + if isMaxLevel { + found := true + maxLevel, found = LogLevelFromString(maxLevelStr) + if !found { + return nil, errors.New("declared " + maxLevelID + " not found: " + maxLevelStr) + } + } + + return NewMinMaxConstraints(minLevel, maxLevel) +} + +func parseLevels(str string) ([]LogLevel, error) { + levelsStrArr := strings.Split(strings.Replace(str, " ", "", -1), ",") + var levels []LogLevel + for _, levelStr := range levelsStrArr { + level, found := LogLevelFromString(levelStr) + if !found { + return nil, errors.New("declared level not found: " + levelStr) + } + + levels = append(levels, level) + } + + return levels, nil +} + +func getExceptions(config *xmlNode) ([]*LogLevelException, error) { + var exceptions []*LogLevelException + + var exceptionsNode *xmlNode + for _, child := range config.children { + if child.name == exceptionsID { + exceptionsNode = child + break + } + } + + if exceptionsNode == nil { + return exceptions, nil + } + + err := checkUnexpectedAttribute(exceptionsNode) + if err != nil { + return nil, err + } + + err = checkExpectedElements(exceptionsNode, multipleMandatoryElements("exception")) + if err != nil { + return nil, err + } + + for _, exceptionNode := range exceptionsNode.children { + if exceptionNode.name != exceptionID { + return nil, errors.New("incorrect nested element in exceptions section: " + exceptionNode.name) + } + + err := checkUnexpectedAttribute(exceptionNode, minLevelID, maxLevelID, levelsID, funcPatternID, filePatternID) + if err != nil { + return nil, err + } + + constraints, err := getConstraints(exceptionNode) + if err != nil { + return nil, errors.New("incorrect " + exceptionsID + " node: " + err.Error()) + } + + funcPattern, isFuncPattern := exceptionNode.attributes[funcPatternID] + filePattern, isFilePattern := exceptionNode.attributes[filePatternID] + if !isFuncPattern { + funcPattern = "*" + } + if !isFilePattern { + filePattern = "*" + } + + exception, err := NewLogLevelException(funcPattern, filePattern, constraints) + if err != nil { + return nil, errors.New("incorrect exception node: " + err.Error()) + } + + exceptions = append(exceptions, exception) + } + + return exceptions, nil +} + +func checkDistinctExceptions(exceptions []*LogLevelException) error { + for i, exception := range exceptions { + for j, exception1 := range exceptions { + if i == j { + continue + } + + if exception.FuncPattern() == exception1.FuncPattern() && + exception.FilePattern() == exception1.FilePattern() { + + return fmt.Errorf("there are two or more duplicate exceptions. Func: %v, file %v", + exception.FuncPattern(), exception.FilePattern()) + } + } + } + + return nil +} + +func getFormats(config *xmlNode) (map[string]*formatter, error) { + formats := make(map[string]*formatter, 0) + + var formatsNode *xmlNode + for _, child := range config.children { + if child.name == formatsID { + formatsNode = child + break + } + } + + if formatsNode == nil { + return formats, nil + } + + err := checkUnexpectedAttribute(formatsNode) + if err != nil { + return nil, err + } + + err = checkExpectedElements(formatsNode, multipleMandatoryElements("format")) + if err != nil { + return nil, err + } + + for _, formatNode := range formatsNode.children { + if formatNode.name != formatID { + return nil, errors.New("incorrect nested element in " + formatsID + " section: " + formatNode.name) + } + + err := checkUnexpectedAttribute(formatNode, formatKeyAttrID, formatID) + if err != nil { + return nil, err + } + + id, isID := formatNode.attributes[formatKeyAttrID] + formatStr, isFormat := formatNode.attributes[formatAttrID] + if !isID { + return nil, errors.New("format has no '" + formatKeyAttrID + "' attribute") + } + if !isFormat { + return nil, errors.New("format[" + id + "] has no '" + formatAttrID + "' attribute") + } + + formatter, err := NewFormatter(formatStr) + if err != nil { + return nil, err + } + + formats[id] = formatter + } + + return formats, nil +} + +func getloggerTypeFromStringData(config *xmlNode) (logType loggerTypeFromString, logData interface{}, err error) { + logTypeStr, loggerTypeExists := config.attributes[loggerTypeFromStringAttr] + + if !loggerTypeExists { + return defaultloggerTypeFromString, nil, nil + } + + logType, found := getLoggerTypeFromString(logTypeStr) + + if !found { + return 0, nil, fmt.Errorf("unknown logger type: %s", logTypeStr) + } + + if logType == asyncTimerloggerTypeFromString { + intervalStr, intervalExists := config.attributes[asyncLoggerIntervalAttr] + if !intervalExists { + return 0, nil, newMissingArgumentError(config.name, asyncLoggerIntervalAttr) + } + + interval, err := strconv.ParseUint(intervalStr, 10, 32) + if err != nil { + return 0, nil, err + } + + logData = asyncTimerLoggerData{uint32(interval)} + } else if logType == adaptiveLoggerTypeFromString { + + // Min interval + minIntStr, minIntExists := config.attributes[adaptLoggerMinIntervalAttr] + if !minIntExists { + return 0, nil, newMissingArgumentError(config.name, adaptLoggerMinIntervalAttr) + } + minInterval, err := strconv.ParseUint(minIntStr, 10, 32) + if err != nil { + return 0, nil, err + } + + // Max interval + maxIntStr, maxIntExists := config.attributes[adaptLoggerMaxIntervalAttr] + if !maxIntExists { + return 0, nil, newMissingArgumentError(config.name, adaptLoggerMaxIntervalAttr) + } + maxInterval, err := strconv.ParseUint(maxIntStr, 10, 32) + if err != nil { + return 0, nil, err + } + + // Critical msg count + criticalMsgCountStr, criticalMsgCountExists := config.attributes[adaptLoggerCriticalMsgCountAttr] + if !criticalMsgCountExists { + return 0, nil, newMissingArgumentError(config.name, adaptLoggerCriticalMsgCountAttr) + } + criticalMsgCount, err := strconv.ParseUint(criticalMsgCountStr, 10, 32) + if err != nil { + return 0, nil, err + } + + logData = adaptiveLoggerData{uint32(minInterval), uint32(maxInterval), uint32(criticalMsgCount)} + } + + return logType, logData, nil +} + +func getOutputsTree(config *xmlNode, formats map[string]*formatter, cfg *CfgParseParams) (dispatcherInterface, error) { + var outputsNode *xmlNode + for _, child := range config.children { + if child.name == outputsID { + outputsNode = child + break + } + } + + if outputsNode != nil { + err := checkUnexpectedAttribute(outputsNode, outputFormatID) + if err != nil { + return nil, err + } + + formatter, err := getCurrentFormat(outputsNode, DefaultFormatter, formats) + if err != nil { + return nil, err + } + + output, err := createSplitter(outputsNode, formatter, formats, cfg) + if err != nil { + return nil, err + } + + dispatcher, ok := output.(dispatcherInterface) + if ok { + return dispatcher, nil + } + } + + console, err := NewConsoleWriter() + if err != nil { + return nil, err + } + return NewSplitDispatcher(DefaultFormatter, []interface{}{console}) +} + +func getCurrentFormat(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter) (*formatter, error) { + formatID, isFormatID := node.attributes[outputFormatID] + if !isFormatID { + return formatFromParent, nil + } + + format, ok := formats[formatID] + if ok { + return format, nil + } + + // Test for predefined format match + pdFormat, pdOk := predefinedFormats[formatID] + + if !pdOk { + return nil, errors.New("formatid = '" + formatID + "' doesn't exist") + } + + return pdFormat, nil +} + +func createInnerReceivers(node *xmlNode, format *formatter, formats map[string]*formatter, cfg *CfgParseParams) ([]interface{}, error) { + var outputs []interface{} + for _, childNode := range node.children { + entry, ok := elementMap[childNode.name] + if !ok { + return nil, errors.New("unnknown tag '" + childNode.name + "' in outputs section") + } + + output, err := entry.constructor(childNode, format, formats, cfg) + if err != nil { + return nil, err + } + + outputs = append(outputs, output) + } + + return outputs, nil +} + +func createSplitter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID) + if err != nil { + return nil, err + } + + if !node.hasChildren() { + return nil, errNodeMustHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) + if err != nil { + return nil, err + } + + return NewSplitDispatcher(currentFormat, receivers) +} + +func createCustomReceiver(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + dataCustomPrefixes := make(map[string]string) + // Expecting only 'formatid', 'name' and 'data-' attrs + for attr, attrval := range node.attributes { + isExpected := false + if attr == outputFormatID || + attr == customNameAttrID { + isExpected = true + } + if strings.HasPrefix(attr, customNameDataAttrPrefix) { + dataCustomPrefixes[attr[len(customNameDataAttrPrefix):]] = attrval + isExpected = true + } + if !isExpected { + return nil, newUnexpectedAttributeError(node.name, attr) + } + } + + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + customName, hasCustomName := node.attributes[customNameAttrID] + if !hasCustomName { + return nil, newMissingArgumentError(node.name, customNameAttrID) + } + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + args := CustomReceiverInitArgs{ + XmlCustomAttrs: dataCustomPrefixes, + } + + if cfg != nil && cfg.CustomReceiverProducers != nil { + if prod, ok := cfg.CustomReceiverProducers[customName]; ok { + rec, err := prod(args) + if err != nil { + return nil, err + } + creceiver, err := NewCustomReceiverDispatcherByValue(currentFormat, rec, customName, args) + if err != nil { + return nil, err + } + err = rec.AfterParse(args) + if err != nil { + return nil, err + } + return creceiver, nil + } + } + + return NewCustomReceiverDispatcher(currentFormat, customName, args) +} + +func createFilter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID, filterLevelsAttrID) + if err != nil { + return nil, err + } + + if !node.hasChildren() { + return nil, errNodeMustHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + levelsStr, isLevels := node.attributes[filterLevelsAttrID] + if !isLevels { + return nil, newMissingArgumentError(node.name, filterLevelsAttrID) + } + + levels, err := parseLevels(levelsStr) + if err != nil { + return nil, err + } + + receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) + if err != nil { + return nil, err + } + + return NewFilterDispatcher(currentFormat, receivers, levels...) +} + +func createfileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID, pathID) + if err != nil { + return nil, err + } + + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + path, isPath := node.attributes[pathID] + if !isPath { + return nil, newMissingArgumentError(node.name, pathID) + } + + fileWriter, err := NewFileWriter(path) + if err != nil { + return nil, err + } + + return NewFormattedWriter(fileWriter, currentFormat) +} + +// Creates new SMTP writer if encountered in the config file. +func createSMTPWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID, senderaddressID, senderNameID, hostNameID, hostPortID, userNameID, userPassID, subjectID) + if err != nil { + return nil, err + } + // Node must have children. + if !node.hasChildren() { + return nil, errNodeMustHaveChildren + } + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + senderAddress, ok := node.attributes[senderaddressID] + if !ok { + return nil, newMissingArgumentError(node.name, senderaddressID) + } + senderName, ok := node.attributes[senderNameID] + if !ok { + return nil, newMissingArgumentError(node.name, senderNameID) + } + // Process child nodes scanning for recipient email addresses and/or CA certificate paths. + var recipientAddresses []string + var caCertDirPaths []string + var mailHeaders []string + for _, childNode := range node.children { + switch childNode.name { + // Extract recipient address from child nodes. + case recipientID: + address, ok := childNode.attributes[addressID] + if !ok { + return nil, newMissingArgumentError(childNode.name, addressID) + } + recipientAddresses = append(recipientAddresses, address) + // Extract CA certificate file path from child nodes. + case cACertDirpathID: + path, ok := childNode.attributes[pathID] + if !ok { + return nil, newMissingArgumentError(childNode.name, pathID) + } + caCertDirPaths = append(caCertDirPaths, path) + + // Extract email headers from child nodes. + case mailHeaderID: + headerName, ok := childNode.attributes[mailHeaderNameID] + if !ok { + return nil, newMissingArgumentError(childNode.name, mailHeaderNameID) + } + + headerValue, ok := childNode.attributes[mailHeaderValueID] + if !ok { + return nil, newMissingArgumentError(childNode.name, mailHeaderValueID) + } + + // Build header line + mailHeaders = append(mailHeaders, fmt.Sprintf("%s: %s", headerName, headerValue)) + default: + return nil, newUnexpectedChildElementError(childNode.name) + } + } + hostName, ok := node.attributes[hostNameID] + if !ok { + return nil, newMissingArgumentError(node.name, hostNameID) + } + + hostPort, ok := node.attributes[hostPortID] + if !ok { + return nil, newMissingArgumentError(node.name, hostPortID) + } + + // Check if the string can really be converted into int. + if _, err := strconv.Atoi(hostPort); err != nil { + return nil, errors.New("invalid host port number") + } + + userName, ok := node.attributes[userNameID] + if !ok { + return nil, newMissingArgumentError(node.name, userNameID) + } + + userPass, ok := node.attributes[userPassID] + if !ok { + return nil, newMissingArgumentError(node.name, userPassID) + } + + // subject is optionally set by configuration. + // default value is defined by DefaultSubjectPhrase constant in the writers_smtpwriter.go + var subjectPhrase = DefaultSubjectPhrase + + subject, ok := node.attributes[subjectID] + if ok { + subjectPhrase = subject + } + + smtpWriter := NewSMTPWriter( + senderAddress, + senderName, + recipientAddresses, + hostName, + hostPort, + userName, + userPass, + caCertDirPaths, + subjectPhrase, + mailHeaders, + ) + + return NewFormattedWriter(smtpWriter, currentFormat) +} + +func createConsoleWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID) + if err != nil { + return nil, err + } + + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + consoleWriter, err := NewConsoleWriter() + if err != nil { + return nil, err + } + + return NewFormattedWriter(consoleWriter, currentFormat) +} + +func createconnWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + + err := checkUnexpectedAttribute(node, outputFormatID, connWriterAddrAttr, connWriterNetAttr, connWriterReconnectOnMsgAttr, connWriterUseTLSAttr, connWriterInsecureSkipVerifyAttr) + if err != nil { + return nil, err + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + addr, isAddr := node.attributes[connWriterAddrAttr] + if !isAddr { + return nil, newMissingArgumentError(node.name, connWriterAddrAttr) + } + + net, isNet := node.attributes[connWriterNetAttr] + if !isNet { + return nil, newMissingArgumentError(node.name, connWriterNetAttr) + } + + reconnectOnMsg := false + reconnectOnMsgStr, isReconnectOnMsgStr := node.attributes[connWriterReconnectOnMsgAttr] + if isReconnectOnMsgStr { + if reconnectOnMsgStr == "true" { + reconnectOnMsg = true + } else if reconnectOnMsgStr == "false" { + reconnectOnMsg = false + } else { + return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterReconnectOnMsgAttr + "' attribute value") + } + } + + useTLS := false + useTLSStr, isUseTLSStr := node.attributes[connWriterUseTLSAttr] + if isUseTLSStr { + if useTLSStr == "true" { + useTLS = true + } else if useTLSStr == "false" { + useTLS = false + } else { + return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterUseTLSAttr + "' attribute value") + } + if useTLS { + insecureSkipVerify := false + insecureSkipVerifyStr, isInsecureSkipVerify := node.attributes[connWriterInsecureSkipVerifyAttr] + if isInsecureSkipVerify { + if insecureSkipVerifyStr == "true" { + insecureSkipVerify = true + } else if insecureSkipVerifyStr == "false" { + insecureSkipVerify = false + } else { + return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterInsecureSkipVerifyAttr + "' attribute value") + } + } + config := tls.Config{InsecureSkipVerify: insecureSkipVerify} + connWriter := newTLSWriter(net, addr, reconnectOnMsg, &config) + return NewFormattedWriter(connWriter, currentFormat) + } + } + + connWriter := NewConnWriter(net, addr, reconnectOnMsg) + + return NewFormattedWriter(connWriter, currentFormat) +} + +func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + if node.hasChildren() { + return nil, errNodeCannotHaveChildren + } + + rollingTypeStr, isRollingType := node.attributes[rollingFileTypeAttr] + if !isRollingType { + return nil, newMissingArgumentError(node.name, rollingFileTypeAttr) + } + + rollingType, ok := rollingTypeFromString(rollingTypeStr) + if !ok { + return nil, errors.New("unknown rolling file type: " + rollingTypeStr) + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + path, isPath := node.attributes[rollingFilePathAttr] + if !isPath { + return nil, newMissingArgumentError(node.name, rollingFilePathAttr) + } + + rollingArchiveStr, archiveAttrExists := node.attributes[rollingFileArchiveAttr] + + var rArchiveType rollingArchiveType + var rArchivePath string + var rArchiveExploded bool = false + if !archiveAttrExists { + rArchiveType = rollingArchiveNone + rArchivePath = "" + } else { + rArchiveType, ok = rollingArchiveTypeFromString(rollingArchiveStr) + if !ok { + return nil, errors.New("unknown rolling archive type: " + rollingArchiveStr) + } + + if rArchiveType == rollingArchiveNone { + rArchivePath = "" + } else { + if rArchiveExplodedAttr, ok := node.attributes[rollingFileArchiveExplodedAttr]; ok { + if rArchiveExploded, err = strconv.ParseBool(rArchiveExplodedAttr); err != nil { + return nil, fmt.Errorf("archive exploded should be true or false, but was %v", + rArchiveExploded) + } + } + + rArchivePath, ok = node.attributes[rollingFileArchivePathAttr] + if ok { + if rArchivePath == "" { + return nil, fmt.Errorf("empty archive path is not supported") + } + } else { + if rArchiveExploded { + rArchivePath = rollingArchiveDefaultExplodedName + + } else { + rArchivePath, err = rollingArchiveTypeDefaultName(rArchiveType, false) + if err != nil { + return nil, err + } + } + } + } + } + + nameMode := rollingNameMode(rollingNameModePostfix) + nameModeStr, ok := node.attributes[rollingFileNameModeAttr] + if ok { + mode, found := rollingNameModeFromString(nameModeStr) + if !found { + return nil, errors.New("unknown rolling filename mode: " + nameModeStr) + } else { + nameMode = mode + } + } + + if rollingType == rollingTypeSize { + err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, + rollingFileMaxSizeAttr, rollingFileMaxRollsAttr, rollingFileArchiveAttr, + rollingFileArchivePathAttr, rollingFileArchiveExplodedAttr, rollingFileNameModeAttr) + if err != nil { + return nil, err + } + + maxSizeStr, ok := node.attributes[rollingFileMaxSizeAttr] + if !ok { + return nil, newMissingArgumentError(node.name, rollingFileMaxSizeAttr) + } + + maxSize, err := strconv.ParseInt(maxSizeStr, 10, 64) + if err != nil { + return nil, err + } + + maxRolls := 0 + maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] + if ok { + maxRolls, err = strconv.Atoi(maxRollsStr) + if err != nil { + return nil, err + } + } + + rollingWriter, err := NewRollingFileWriterSize(path, rArchiveType, rArchivePath, maxSize, maxRolls, nameMode, rArchiveExploded) + if err != nil { + return nil, err + } + + return NewFormattedWriter(rollingWriter, currentFormat) + + } else if rollingType == rollingTypeTime { + err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, + rollingFileDataPatternAttr, rollingFileArchiveAttr, rollingFileMaxRollsAttr, + rollingFileArchivePathAttr, rollingFileArchiveExplodedAttr, rollingFileNameModeAttr, + rollingFileFullNameAttr) + if err != nil { + return nil, err + } + + maxRolls := 0 + maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] + if ok { + maxRolls, err = strconv.Atoi(maxRollsStr) + if err != nil { + return nil, err + } + } + + fullName := false + fn, ok := node.attributes[rollingFileFullNameAttr] + if ok { + if fn == "true" { + fullName = true + } else if fn == "false" { + fullName = false + } else { + return nil, errors.New("node '" + node.name + "' has incorrect '" + rollingFileFullNameAttr + "' attribute value") + } + } + + dataPattern, ok := node.attributes[rollingFileDataPatternAttr] + if !ok { + return nil, newMissingArgumentError(node.name, rollingFileDataPatternAttr) + } + + rollingWriter, err := NewRollingFileWriterTime(path, rArchiveType, rArchivePath, maxRolls, dataPattern, nameMode, rArchiveExploded, fullName) + if err != nil { + return nil, err + } + + return NewFormattedWriter(rollingWriter, currentFormat) + } + + return nil, errors.New("incorrect rolling writer type " + rollingTypeStr) +} + +func createbufferedWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { + err := checkUnexpectedAttribute(node, outputFormatID, bufferedSizeAttr, bufferedFlushPeriodAttr) + if err != nil { + return nil, err + } + + if !node.hasChildren() { + return nil, errNodeMustHaveChildren + } + + currentFormat, err := getCurrentFormat(node, formatFromParent, formats) + if err != nil { + return nil, err + } + + sizeStr, isSize := node.attributes[bufferedSizeAttr] + if !isSize { + return nil, newMissingArgumentError(node.name, bufferedSizeAttr) + } + + size, err := strconv.Atoi(sizeStr) + if err != nil { + return nil, err + } + + flushPeriod := 0 + flushPeriodStr, isFlushPeriod := node.attributes[bufferedFlushPeriodAttr] + if isFlushPeriod { + flushPeriod, err = strconv.Atoi(flushPeriodStr) + if err != nil { + return nil, err + } + } + + // Inner writer couldn't have its own format, so we pass 'currentFormat' as its parent format + receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) + if err != nil { + return nil, err + } + + formattedWriter, ok := receivers[0].(*formattedWriter) + if !ok { + return nil, errors.New("buffered writer's child is not writer") + } + + // ... and then we check that it hasn't changed + if formattedWriter.Format() != currentFormat { + return nil, errors.New("inner writer cannot have his own format") + } + + bufferedWriter, err := NewBufferedWriter(formattedWriter.Writer(), size, time.Duration(flushPeriod)) + if err != nil { + return nil, err + } + + return NewFormattedWriter(bufferedWriter, currentFormat) +} + +// Returns an error if node has any attributes not listed in expectedAttrs. +func checkUnexpectedAttribute(node *xmlNode, expectedAttrs ...string) error { + for attr := range node.attributes { + isExpected := false + for _, expected := range expectedAttrs { + if attr == expected { + isExpected = true + break + } + } + if !isExpected { + return newUnexpectedAttributeError(node.name, attr) + } + } + + return nil +} + +type expectedElementInfo struct { + name string + mandatory bool + multiple bool +} + +func optionalElement(name string) expectedElementInfo { + return expectedElementInfo{name, false, false} +} +func mandatoryElement(name string) expectedElementInfo { + return expectedElementInfo{name, true, false} +} +func multipleElements(name string) expectedElementInfo { + return expectedElementInfo{name, false, true} +} +func multipleMandatoryElements(name string) expectedElementInfo { + return expectedElementInfo{name, true, true} +} + +func checkExpectedElements(node *xmlNode, elements ...expectedElementInfo) error { + for _, element := range elements { + count := 0 + for _, child := range node.children { + if child.name == element.name { + count++ + } + } + + if count == 0 && element.mandatory { + return errors.New(node.name + " does not have mandatory subnode - " + element.name) + } + if count > 1 && !element.multiple { + return errors.New(node.name + " has more then one subnode - " + element.name) + } + } + + for _, child := range node.children { + isExpected := false + for _, element := range elements { + if child.name == element.name { + isExpected = true + } + } + + if !isExpected { + return errors.New(node.name + " has unexpected child: " + child.name) + } + } + + return nil +} diff --git a/agent/vendor/github.com/cihub/seelog/common_closer.go b/agent/vendor/github.com/cihub/seelog/common_closer.go new file mode 100644 index 00000000000..1319c22179c --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/common_closer.go @@ -0,0 +1,25 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog diff --git a/agent/vendor/github.com/cihub/seelog/common_constraints.go b/agent/vendor/github.com/cihub/seelog/common_constraints.go new file mode 100644 index 00000000000..7ec2fe5b16b --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/common_constraints.go @@ -0,0 +1,162 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "strings" +) + +// Represents constraints which form a general rule for log levels selection +type logLevelConstraints interface { + IsAllowed(level LogLevel) bool +} + +// A minMaxConstraints represents constraints which use minimal and maximal allowed log levels. +type minMaxConstraints struct { + min LogLevel + max LogLevel +} + +// NewMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels. +func NewMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) { + if min > max { + return nil, fmt.Errorf("min level can't be greater than max. Got min: %d, max: %d", min, max) + } + if min < TraceLvl || min > CriticalLvl { + return nil, fmt.Errorf("min level can't be less than Trace or greater than Critical. Got min: %d", min) + } + if max < TraceLvl || max > CriticalLvl { + return nil, fmt.Errorf("max level can't be less than Trace or greater than Critical. Got max: %d", max) + } + + return &minMaxConstraints{min, max}, nil +} + +// IsAllowed returns true, if log level is in [min, max] range (inclusive). +func (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool { + return level >= minMaxConstr.min && level <= minMaxConstr.max +} + +func (minMaxConstr *minMaxConstraints) String() string { + return fmt.Sprintf("Min: %s. Max: %s", minMaxConstr.min, minMaxConstr.max) +} + +//======================================================= + +// A listConstraints represents constraints which use allowed log levels list. +type listConstraints struct { + allowedLevels map[LogLevel]bool +} + +// NewListConstraints creates a new listConstraints struct with the specified allowed levels. +func NewListConstraints(allowList []LogLevel) (*listConstraints, error) { + if allowList == nil { + return nil, errors.New("list can't be nil") + } + + allowLevels, err := createMapFromList(allowList) + if err != nil { + return nil, err + } + err = validateOffLevel(allowLevels) + if err != nil { + return nil, err + } + + return &listConstraints{allowLevels}, nil +} + +func (listConstr *listConstraints) String() string { + allowedList := "List: " + + listLevel := make([]string, len(listConstr.allowedLevels)) + + var logLevel LogLevel + i := 0 + for logLevel = TraceLvl; logLevel <= Off; logLevel++ { + if listConstr.allowedLevels[logLevel] { + listLevel[i] = logLevel.String() + i++ + } + } + + allowedList += strings.Join(listLevel, ",") + + return allowedList +} + +func createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) { + allowedLevels := make(map[LogLevel]bool, 0) + for _, level := range allowedList { + if level < TraceLvl || level > Off { + return nil, fmt.Errorf("level can't be less than Trace or greater than Critical. Got level: %d", level) + } + allowedLevels[level] = true + } + return allowedLevels, nil +} +func validateOffLevel(allowedLevels map[LogLevel]bool) error { + if _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 { + return errors.New("logLevel Off cant be mixed with other levels") + } + + return nil +} + +// IsAllowed returns true, if log level is in allowed log levels list. +// If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values. +func (listConstr *listConstraints) IsAllowed(level LogLevel) bool { + for l := range listConstr.allowedLevels { + if l == level && level != Off { + return true + } + } + + return false +} + +// AllowedLevels returns allowed levels configuration as a map. +func (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool { + return listConstr.allowedLevels +} + +//======================================================= + +type offConstraints struct { +} + +func NewOffConstraints() (*offConstraints, error) { + return &offConstraints{}, nil +} + +func (offConstr *offConstraints) IsAllowed(level LogLevel) bool { + return false +} + +func (offConstr *offConstraints) String() string { + return "Off constraint" +} diff --git a/agent/vendor/github.com/cihub/seelog/common_context.go b/agent/vendor/github.com/cihub/seelog/common_context.go new file mode 100644 index 00000000000..230a76ca186 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/common_context.go @@ -0,0 +1,234 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" +) + +var ( + workingDir = "/" + stackCache map[uintptr]*logContext + stackCacheLock sync.RWMutex +) + +func init() { + wd, err := os.Getwd() + if err == nil { + workingDir = filepath.ToSlash(wd) + "/" + } + stackCache = make(map[uintptr]*logContext) +} + +// Represents runtime caller context. +type LogContextInterface interface { + // Caller's function name. + Func() string + // Caller's line number. + Line() int + // Caller's file short path (in slashed form). + ShortPath() string + // Caller's file full path (in slashed form). + FullPath() string + // Caller's file name (without path). + FileName() string + // True if the context is correct and may be used. + // If false, then an error in context evaluation occurred and + // all its other data may be corrupted. + IsValid() bool + // Time when log function was called. + CallTime() time.Time + // Custom context that can be set by calling logger.SetContext + CustomContext() interface{} +} + +// Returns context of the caller +func currentContext(custom interface{}) (LogContextInterface, error) { + return specifyContext(1, custom) +} + +func extractCallerInfo(skip int) (*logContext, error) { + var stack [1]uintptr + if runtime.Callers(skip+1, stack[:]) != 1 { + return nil, errors.New("error during runtime.Callers") + } + pc := stack[0] + + // do we have a cache entry? + stackCacheLock.RLock() + ctx, ok := stackCache[pc] + stackCacheLock.RUnlock() + if ok { + return ctx, nil + } + + // look up the details of the given caller + funcInfo := runtime.FuncForPC(pc) + if funcInfo == nil { + return nil, errors.New("error during runtime.FuncForPC") + } + + var shortPath string + fullPath, line := funcInfo.FileLine(pc) + if strings.HasPrefix(fullPath, workingDir) { + shortPath = fullPath[len(workingDir):] + } else { + shortPath = fullPath + } + funcName := funcInfo.Name() + if strings.HasPrefix(funcName, workingDir) { + funcName = funcName[len(workingDir):] + } + + ctx = &logContext{ + funcName: funcName, + line: line, + shortPath: shortPath, + fullPath: fullPath, + fileName: filepath.Base(fullPath), + } + + // save the details in the cache; note that it's possible we might + // have written an entry into the map in between the test above and + // this section, but the behaviour is still correct + stackCacheLock.Lock() + stackCache[pc] = ctx + stackCacheLock.Unlock() + return ctx, nil +} + +// Returns context of the function with placed "skip" stack frames of the caller +// If skip == 0 then behaves like currentContext +// Context is returned in any situation, even if error occurs. But, if an error +// occurs, the returned context is an error context, which contains no paths +// or names, but states that they can't be extracted. +func specifyContext(skip int, custom interface{}) (LogContextInterface, error) { + callTime := time.Now() + if skip < 0 { + err := fmt.Errorf("can not skip negative stack frames") + return &errorContext{callTime, err}, err + } + caller, err := extractCallerInfo(skip + 2) + if err != nil { + return &errorContext{callTime, err}, err + } + ctx := new(logContext) + *ctx = *caller + ctx.callTime = callTime + ctx.custom = custom + return ctx, nil +} + +// Represents a normal runtime caller context. +type logContext struct { + funcName string + line int + shortPath string + fullPath string + fileName string + callTime time.Time + custom interface{} +} + +func (context *logContext) IsValid() bool { + return true +} + +func (context *logContext) Func() string { + return context.funcName +} + +func (context *logContext) Line() int { + return context.line +} + +func (context *logContext) ShortPath() string { + return context.shortPath +} + +func (context *logContext) FullPath() string { + return context.fullPath +} + +func (context *logContext) FileName() string { + return context.fileName +} + +func (context *logContext) CallTime() time.Time { + return context.callTime +} + +func (context *logContext) CustomContext() interface{} { + return context.custom +} + +// Represents an error context +type errorContext struct { + errorTime time.Time + err error +} + +func (errContext *errorContext) getErrorText(prefix string) string { + return fmt.Sprintf("%s() error: %s", prefix, errContext.err) +} + +func (errContext *errorContext) IsValid() bool { + return false +} + +func (errContext *errorContext) Line() int { + return -1 +} + +func (errContext *errorContext) Func() string { + return errContext.getErrorText("Func") +} + +func (errContext *errorContext) ShortPath() string { + return errContext.getErrorText("ShortPath") +} + +func (errContext *errorContext) FullPath() string { + return errContext.getErrorText("FullPath") +} + +func (errContext *errorContext) FileName() string { + return errContext.getErrorText("FileName") +} + +func (errContext *errorContext) CallTime() time.Time { + return errContext.errorTime +} + +func (errContext *errorContext) CustomContext() interface{} { + return nil +} diff --git a/agent/vendor/github.com/cihub/seelog/common_exception.go b/agent/vendor/github.com/cihub/seelog/common_exception.go new file mode 100644 index 00000000000..9acc2750701 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/common_exception.go @@ -0,0 +1,194 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Used in rules creation to validate input file and func filters +var ( + fileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\/ _\*\.]*`) + funcFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\*\.]*`) +) + +// LogLevelException represents an exceptional case used when you need some specific files or funcs to +// override general constraints and to use their own. +type LogLevelException struct { + funcPatternParts []string + filePatternParts []string + + funcPattern string + filePattern string + + constraints logLevelConstraints +} + +// NewLogLevelException creates a new exception. +func NewLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*LogLevelException, error) { + if constraints == nil { + return nil, errors.New("constraints can not be nil") + } + + exception := new(LogLevelException) + + err := exception.initFuncPatternParts(funcPattern) + if err != nil { + return nil, err + } + exception.funcPattern = strings.Join(exception.funcPatternParts, "") + + err = exception.initFilePatternParts(filePattern) + if err != nil { + return nil, err + } + exception.filePattern = strings.Join(exception.filePatternParts, "") + + exception.constraints = constraints + + return exception, nil +} + +// MatchesContext returns true if context matches the patterns of this LogLevelException +func (logLevelEx *LogLevelException) MatchesContext(context LogContextInterface) bool { + return logLevelEx.match(context.Func(), context.FullPath()) +} + +// IsAllowed returns true if log level is allowed according to the constraints of this LogLevelException +func (logLevelEx *LogLevelException) IsAllowed(level LogLevel) bool { + return logLevelEx.constraints.IsAllowed(level) +} + +// FuncPattern returns the function pattern of a exception +func (logLevelEx *LogLevelException) FuncPattern() string { + return logLevelEx.funcPattern +} + +// FuncPattern returns the file pattern of a exception +func (logLevelEx *LogLevelException) FilePattern() string { + return logLevelEx.filePattern +} + +// initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts +func (logLevelEx *LogLevelException) initFuncPatternParts(funcPattern string) (err error) { + + if funcFormatValidator.FindString(funcPattern) != funcPattern { + return errors.New("func path \"" + funcPattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)") + } + + logLevelEx.funcPatternParts = splitPattern(funcPattern) + return nil +} + +// Checks whether the file filter has a correct format and splits file patterns using splitPattern. +func (logLevelEx *LogLevelException) initFilePatternParts(filePattern string) (err error) { + + if fileFormatValidator.FindString(filePattern) != filePattern { + return errors.New("file path \"" + filePattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 \\ / _ * . allowed)") + } + + logLevelEx.filePatternParts = splitPattern(filePattern) + return err +} + +func (logLevelEx *LogLevelException) match(funcPath string, filePath string) bool { + if !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) { + return false + } + return stringMatchesPattern(logLevelEx.filePatternParts, filePath) +} + +func (logLevelEx *LogLevelException) String() string { + str := fmt.Sprintf("Func: %s File: %s", logLevelEx.funcPattern, logLevelEx.filePattern) + + if logLevelEx.constraints != nil { + str += fmt.Sprintf("Constr: %s", logLevelEx.constraints) + } else { + str += "nil" + } + + return str +} + +// splitPattern splits pattern into strings and asterisks. Example: "ab*cde**f" -> ["ab", "*", "cde", "*", "f"] +func splitPattern(pattern string) []string { + var patternParts []string + var lastChar rune + for _, char := range pattern { + if char == '*' { + if lastChar != '*' { + patternParts = append(patternParts, "*") + } + } else { + if len(patternParts) != 0 && lastChar != '*' { + patternParts[len(patternParts)-1] += string(char) + } else { + patternParts = append(patternParts, string(char)) + } + } + lastChar = char + } + + return patternParts +} + +// stringMatchesPattern check whether testString matches pattern with asterisks. +// Standard regexp functionality is not used here because of performance issues. +func stringMatchesPattern(patternparts []string, testString string) bool { + if len(patternparts) == 0 { + return len(testString) == 0 + } + + part := patternparts[0] + if part != "*" { + index := strings.Index(testString, part) + if index == 0 { + return stringMatchesPattern(patternparts[1:], testString[len(part):]) + } + } else { + if len(patternparts) == 1 { + return true + } + + newTestString := testString + part = patternparts[1] + for { + index := strings.Index(newTestString, part) + if index == -1 { + break + } + + newTestString = newTestString[index+len(part):] + result := stringMatchesPattern(patternparts[2:], newTestString) + if result { + return true + } + } + } + return false +} diff --git a/agent/vendor/github.com/cihub/seelog/common_flusher.go b/agent/vendor/github.com/cihub/seelog/common_flusher.go new file mode 100644 index 00000000000..0ef077c8daf --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/common_flusher.go @@ -0,0 +1,31 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +// flusherInterface represents all objects that have to do cleanup +// at certain moments of time (e.g. before app shutdown to avoid data loss) +type flusherInterface interface { + Flush() +} diff --git a/agent/vendor/github.com/cihub/seelog/common_loglevel.go b/agent/vendor/github.com/cihub/seelog/common_loglevel.go new file mode 100644 index 00000000000..d54ecf27090 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/common_loglevel.go @@ -0,0 +1,81 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +// Log level type +type LogLevel uint8 + +// Log levels +const ( + TraceLvl = iota + DebugLvl + InfoLvl + WarnLvl + ErrorLvl + CriticalLvl + Off +) + +// Log level string representations (used in configuration files) +const ( + TraceStr = "trace" + DebugStr = "debug" + InfoStr = "info" + WarnStr = "warn" + ErrorStr = "error" + CriticalStr = "critical" + OffStr = "off" +) + +var levelToStringRepresentations = map[LogLevel]string{ + TraceLvl: TraceStr, + DebugLvl: DebugStr, + InfoLvl: InfoStr, + WarnLvl: WarnStr, + ErrorLvl: ErrorStr, + CriticalLvl: CriticalStr, + Off: OffStr, +} + +// LogLevelFromString parses a string and returns a corresponding log level, if sucessfull. +func LogLevelFromString(levelStr string) (level LogLevel, found bool) { + for lvl, lvlStr := range levelToStringRepresentations { + if lvlStr == levelStr { + return lvl, true + } + } + + return 0, false +} + +// LogLevelToString returns seelog string representation for a specified level. Returns "" for invalid log levels. +func (level LogLevel) String() string { + levelStr, ok := levelToStringRepresentations[level] + if ok { + return levelStr + } + + return "" +} diff --git a/agent/vendor/github.com/cihub/seelog/dispatch_custom.go b/agent/vendor/github.com/cihub/seelog/dispatch_custom.go new file mode 100644 index 00000000000..383a77058c8 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/dispatch_custom.go @@ -0,0 +1,242 @@ +// Copyright (c) 2013 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +var registeredReceivers = make(map[string]reflect.Type) + +// RegisterReceiver records a custom receiver type, identified by a value +// of that type (second argument), under the specified name. Registered +// names can be used in the "name" attribute of config items. +// +// RegisterReceiver takes the type of the receiver argument, without taking +// the value into the account. So do NOT enter any data to the second argument +// and only call it like: +// RegisterReceiver("somename", &MyReceiverType{}) +// +// After that, when a '' config tag with this name is used, +// a receiver of the specified type would be instantiated. Check +// CustomReceiver comments for interface details. +// +// NOTE 1: RegisterReceiver fails if you attempt to register different types +// with the same name. +// +// NOTE 2: RegisterReceiver registers those receivers that must be used in +// the configuration files ( items). Basically it is just the way +// you tell seelog config parser what should it do when it meets a +// tag with a specific name and data attributes. +// +// But If you are only using seelog as a proxy to an already instantiated +// CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver. +func RegisterReceiver(name string, receiver CustomReceiver) { + newType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface()) + if t, ok := registeredReceivers[name]; ok && t != newType { + panic(fmt.Sprintf("duplicate types for %s: %s != %s", name, t, newType)) + } + registeredReceivers[name] = newType +} + +func customReceiverByName(name string) (creceiver CustomReceiver, err error) { + rt, ok := registeredReceivers[name] + if !ok { + return nil, fmt.Errorf("custom receiver name not registered: '%s'", name) + } + v, ok := reflect.New(rt).Interface().(CustomReceiver) + if !ok { + return nil, fmt.Errorf("cannot instantiate receiver with name='%s'", name) + } + return v, nil +} + +// CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init +// func when custom receiver is being initialized. +type CustomReceiverInitArgs struct { + // XmlCustomAttrs represent '' xml config item attributes that + // start with "data-". Map keys will be the attribute names without the "data-". + // Map values will the those attribute values. + // + // E.g. if you have a '' + // you will get map with 2 key-value pairs: "attr1"->"a1", "attr2"->"a2" + // + // Note that in custom items you can only use allowed attributes, like "name" and + // your custom attributes, starting with "data-". Any other will lead to a + // parsing error. + XmlCustomAttrs map[string]string +} + +// CustomReceiver is the interface that external custom seelog message receivers +// must implement in order to be able to process seelog messages. Those receivers +// are set in the xml config file using the tag. Check receivers reference +// wiki section on that. +// +// Use seelog.RegisterReceiver on the receiver type before using it. +type CustomReceiver interface { + // ReceiveMessage is called when the custom receiver gets seelog message from + // a parent dispatcher. + // + // Message, level and context args represent all data that was included in the seelog + // message at the time it was logged. + // + // The formatting is already applied to the message and depends on the config + // like with any other receiver. + // + // If you would like to inform seelog of an error that happened during the handling of + // the message, return a non-nil error. This way you'll end up seeing your error like + // any other internal seelog error. + ReceiveMessage(message string, level LogLevel, context LogContextInterface) error + + // AfterParse is called immediately after your custom receiver is instantiated by + // the xml config parser. So, if you need to do any startup logic after config parsing, + // like opening file or allocating any resources after the receiver is instantiated, do it here. + // + // If this func returns a non-nil error, then the loading procedure will fail. E.g. + // if you are loading a seelog xml config, the parser would not finish the loading + // procedure and inform about an error like with any other config error. + // + // If your custom logger needs some configuration, you can use custom attributes in + // your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments. + // + // IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used + // to create seelog proxy logger using the custom receiver. This func is only called when + // receiver is instantiated from a config. + AfterParse(initArgs CustomReceiverInitArgs) error + + // Flush is called when the custom receiver gets a 'flush' directive from a + // parent receiver. If custom receiver implements some kind of buffering or + // queing, then the appropriate reaction on a flush message is synchronous + // flushing of all those queues/buffers. If custom receiver doesn't have + // such mechanisms, then flush implementation may be left empty. + Flush() + + // Close is called when the custom receiver gets a 'close' directive from a + // parent receiver. This happens when a top-level seelog dispatcher is sending + // 'close' to all child nodes and it means that current seelog logger is being closed. + // If you need to do any cleanup after your custom receiver is done, you should do + // it here. + Close() error +} + +type customReceiverDispatcher struct { + formatter *formatter + innerReceiver CustomReceiver + customReceiverName string + usedArgs CustomReceiverInitArgs +} + +// NewCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created +// using a tag in the config file. +func NewCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { + if formatter == nil { + return nil, errors.New("formatter cannot be nil") + } + if len(customReceiverName) == 0 { + return nil, errors.New("custom receiver name cannot be empty") + } + + creceiver, err := customReceiverByName(customReceiverName) + if err != nil { + return nil, err + } + err = creceiver.AfterParse(cArgs) + if err != nil { + return nil, err + } + disp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs} + + return disp, nil +} + +// NewCustomReceiverDispatcherByValue is basically the same as NewCustomReceiverDispatcher, but using +// a specific CustomReceiver value instead of instantiating a new one by type. +func NewCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { + if formatter == nil { + return nil, errors.New("formatter cannot be nil") + } + if customReceiver == nil { + return nil, errors.New("customReceiver cannot be nil") + } + disp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs} + + return disp, nil +} + +// CustomReceiver implementation. Check CustomReceiver comments. +func (disp *customReceiverDispatcher) Dispatch( + message string, + level LogLevel, + context LogContextInterface, + errorFunc func(err error)) { + + defer func() { + if err := recover(); err != nil { + errorFunc(fmt.Errorf("panic in custom receiver '%s'.Dispatch: %s", reflect.TypeOf(disp.innerReceiver), err)) + } + }() + + err := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context) + if err != nil { + errorFunc(err) + } +} + +// CustomReceiver implementation. Check CustomReceiver comments. +func (disp *customReceiverDispatcher) Flush() { + disp.innerReceiver.Flush() +} + +// CustomReceiver implementation. Check CustomReceiver comments. +func (disp *customReceiverDispatcher) Close() error { + disp.innerReceiver.Flush() + + err := disp.innerReceiver.Close() + if err != nil { + return err + } + + return nil +} + +func (disp *customReceiverDispatcher) String() string { + datas := "" + skeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs)) + for i := range disp.usedArgs.XmlCustomAttrs { + skeys = append(skeys, i) + } + sort.Strings(skeys) + for _, key := range skeys { + datas += fmt.Sprintf("<%s, %s> ", key, disp.usedArgs.XmlCustomAttrs[key]) + } + + str := fmt.Sprintf("Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\n", + disp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver) + + return str +} diff --git a/agent/vendor/github.com/cihub/seelog/dispatch_dispatcher.go b/agent/vendor/github.com/cihub/seelog/dispatch_dispatcher.go new file mode 100644 index 00000000000..2bd3b4a4c9f --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/dispatch_dispatcher.go @@ -0,0 +1,189 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "io" +) + +// A dispatcherInterface is used to dispatch message to all underlying receivers. +// Dispatch logic depends on given context and log level. Any errors are reported using errorFunc. +// Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs +// an immediate cleanup of all data that is stored in the receivers +type dispatcherInterface interface { + flusherInterface + io.Closer + Dispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error)) +} + +type dispatcher struct { + formatter *formatter + writers []*formattedWriter + dispatchers []dispatcherInterface +} + +// Creates a dispatcher which dispatches data to a list of receivers. +// Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned +func createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) { + if formatter == nil { + return nil, errors.New("formatter cannot be nil") + } + if receivers == nil || len(receivers) == 0 { + return nil, errors.New("receivers cannot be nil or empty") + } + + disp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)} + for _, receiver := range receivers { + writer, ok := receiver.(*formattedWriter) + if ok { + disp.writers = append(disp.writers, writer) + continue + } + + ioWriter, ok := receiver.(io.Writer) + if ok { + writer, err := NewFormattedWriter(ioWriter, disp.formatter) + if err != nil { + return nil, err + } + disp.writers = append(disp.writers, writer) + continue + } + + dispInterface, ok := receiver.(dispatcherInterface) + if ok { + disp.dispatchers = append(disp.dispatchers, dispInterface) + continue + } + + return nil, errors.New("method can receive either io.Writer or dispatcherInterface") + } + + return disp, nil +} + +func (disp *dispatcher) Dispatch( + message string, + level LogLevel, + context LogContextInterface, + errorFunc func(err error)) { + + for _, writer := range disp.writers { + err := writer.Write(message, level, context) + if err != nil { + errorFunc(err) + } + } + + for _, dispInterface := range disp.dispatchers { + dispInterface.Dispatch(message, level, context, errorFunc) + } +} + +// Flush goes through all underlying writers which implement flusherInterface interface +// and closes them. Recursively performs the same action for underlying dispatchers +func (disp *dispatcher) Flush() { + for _, disp := range disp.Dispatchers() { + disp.Flush() + } + + for _, formatWriter := range disp.Writers() { + flusher, ok := formatWriter.Writer().(flusherInterface) + if ok { + flusher.Flush() + } + } +} + +// Close goes through all underlying writers which implement io.Closer interface +// and closes them. Recursively performs the same action for underlying dispatchers +// Before closing, writers are flushed to prevent loss of any buffered data, so +// a call to Flush() func before Close() is not necessary +func (disp *dispatcher) Close() error { + for _, disp := range disp.Dispatchers() { + disp.Flush() + err := disp.Close() + if err != nil { + return err + } + } + + for _, formatWriter := range disp.Writers() { + flusher, ok := formatWriter.Writer().(flusherInterface) + if ok { + flusher.Flush() + } + + closer, ok := formatWriter.Writer().(io.Closer) + if ok { + err := closer.Close() + if err != nil { + return err + } + } + } + + return nil +} + +func (disp *dispatcher) Writers() []*formattedWriter { + return disp.writers +} + +func (disp *dispatcher) Dispatchers() []dispatcherInterface { + return disp.dispatchers +} + +func (disp *dispatcher) String() string { + str := "formatter: " + disp.formatter.String() + "\n" + + str += " ->Dispatchers:" + + if len(disp.dispatchers) == 0 { + str += "none\n" + } else { + str += "\n" + + for _, disp := range disp.dispatchers { + str += fmt.Sprintf(" ->%s", disp) + } + } + + str += " ->Writers:" + + if len(disp.writers) == 0 { + str += "none\n" + } else { + str += "\n" + + for _, writer := range disp.writers { + str += fmt.Sprintf(" ->%s\n", writer) + } + } + + return str +} diff --git a/agent/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go b/agent/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go new file mode 100644 index 00000000000..9de8a72258f --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go @@ -0,0 +1,66 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" +) + +// A filterDispatcher writes the given message to underlying receivers only if message log level +// is in the allowed list. +type filterDispatcher struct { + *dispatcher + allowList map[LogLevel]bool +} + +// NewFilterDispatcher creates a new filterDispatcher using a list of allowed levels. +func NewFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) { + disp, err := createDispatcher(formatter, receivers) + if err != nil { + return nil, err + } + + allows := make(map[LogLevel]bool) + for _, allowLevel := range allowList { + allows[allowLevel] = true + } + + return &filterDispatcher{disp, allows}, nil +} + +func (filter *filterDispatcher) Dispatch( + message string, + level LogLevel, + context LogContextInterface, + errorFunc func(err error)) { + isAllowed, ok := filter.allowList[level] + if ok && isAllowed { + filter.dispatcher.Dispatch(message, level, context, errorFunc) + } +} + +func (filter *filterDispatcher) String() string { + return fmt.Sprintf("filterDispatcher ->\n%s", filter.dispatcher) +} diff --git a/agent/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go b/agent/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go new file mode 100644 index 00000000000..1d0fe7eaccd --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go @@ -0,0 +1,47 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" +) + +// A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.) +type splitDispatcher struct { + *dispatcher +} + +func NewSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) { + disp, err := createDispatcher(formatter, receivers) + if err != nil { + return nil, err + } + + return &splitDispatcher{disp}, nil +} + +func (splitter *splitDispatcher) String() string { + return fmt.Sprintf("splitDispatcher ->\n%s", splitter.dispatcher.String()) +} diff --git a/agent/vendor/github.com/cihub/seelog/doc.go b/agent/vendor/github.com/cihub/seelog/doc.go new file mode 100644 index 00000000000..2734c9cbb3e --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/doc.go @@ -0,0 +1,175 @@ +// Copyright (c) 2014 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package seelog implements logging functionality with flexible dispatching, filtering, and formatting. + +Creation + +To create a logger, use one of the following constructors: + func LoggerFromConfigAsBytes + func LoggerFromConfigAsFile + func LoggerFromConfigAsString + func LoggerFromWriterWithMinLevel + func LoggerFromWriterWithMinLevelAndFormat + func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers) +Example: + import log "github.com/cihub/seelog" + + func main() { + logger, err := log.LoggerFromConfigAsFile("seelog.xml") + if err != nil { + panic(err) + } + defer logger.Flush() + ... use logger ... + } +The "defer" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some +messages when you close your application because they are processed in another non-blocking goroutine. To avoid that you +explicitly defer flushing all messages before closing. + +Usage + +Logger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs. +Example: + import log "github.com/cihub/seelog" + + func main() { + logger, err := log.LoggerFromConfigAsFile("seelog.xml") + if err != nil { + panic(err) + } + defer logger.Flush() + logger.Trace("test") + logger.Debugf("var = %s", "abc") + } + +Having loggers as variables is convenient if you are writing your own package with internal logging or if you have +several loggers with different options. +But for most standalone apps it is more convenient to use package level funcs and vars. There is a package level +var 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs: + import log "github.com/cihub/seelog" + + func main() { + logger, err := log.LoggerFromConfigAsFile("seelog.xml") + if err != nil { + panic(err) + } + log.ReplaceLogger(logger) + defer log.Flush() + log.Trace("test") + log.Debugf("var = %s", "abc") + } +Last lines + log.Trace("test") + log.Debugf("var = %s", "abc") +do the same as + log.Current.Trace("test") + log.Current.Debugf("var = %s", "abc") +In this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config. +This way you are able to use package level funcs instead of passing the logger variable. + +Configuration + +Main seelog point is to configure logger via config files and not the code. +The configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try +to create a logger using it. + +All the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki. +There are many sections covering different aspects of seelog, but the most important for understanding configs are: + https://github.com/cihub/seelog/wiki/Constraints-and-exceptions + https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers + https://github.com/cihub/seelog/wiki/Formatting + https://github.com/cihub/seelog/wiki/Logger-types +After you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date +list of dispatchers, receivers, formats, and logger types. + +Here is an example config with all these features: + + + + + + + + + + + + + + + + + + + + + +This config represents a logger with adaptive timeout between log messages (check logger types reference) which +logs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only +use log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test' +this logger prohibits all levels below 'error'. + +Configuration using code + +Although configuration using code is not recommended, it is sometimes needed and it is possible to do with seelog. Basically, what +you need to do to get started is to create constraints, exceptions and a dispatcher tree (same as with config). Most of the New* +functions in this package are used to provide such capabilities. + +Here is an example of configuration in code, that demonstrates an async loop logger that logs to a simple split dispatcher with +a console receiver using a specified format and is filtered using a top-level min-max constraints and one expection for +the 'main.go' file. So, this is basically a demonstration of configuration of most of the features: + + package main + + import log "github.com/cihub/seelog" + + func main() { + defer log.Flush() + log.Info("Hello from Seelog!") + + consoleWriter, _ := log.NewConsoleWriter() + formatter, _ := log.NewFormatter("%Level %Msg %File%n") + root, _ := log.NewSplitDispatcher(formatter, []interface{}{consoleWriter}) + constraints, _ := log.NewMinMaxConstraints(log.TraceLvl, log.CriticalLvl) + specificConstraints, _ := log.NewListConstraints([]log.LogLevel{log.InfoLvl, log.ErrorLvl}) + ex, _ := log.NewLogLevelException("*", "*main.go", specificConstraints) + exceptions := []*log.LogLevelException{ex} + + logger := log.NewAsyncLoopLogger(log.NewLoggerConfig(constraints, exceptions, root)) + log.ReplaceLogger(logger) + + log.Trace("This should not be seen") + log.Debug("This should not be seen") + log.Info("Test") + log.Error("Test2") + } + +Examples + +To learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples +It contains many example configs and usecases. +*/ +package seelog diff --git a/agent/vendor/github.com/cihub/seelog/format.go b/agent/vendor/github.com/cihub/seelog/format.go new file mode 100644 index 00000000000..ec47b45704b --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/format.go @@ -0,0 +1,466 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// FormatterSymbol is a special symbol used in config files to mark special format aliases. +const ( + FormatterSymbol = '%' +) + +const ( + formatterParameterStart = '(' + formatterParameterEnd = ')' +) + +// Time and date formats used for %Date and %Time aliases. +const ( + DateDefaultFormat = "2006-01-02" + TimeFormat = "15:04:05" +) + +var DefaultMsgFormat = "%Ns [%Level] %Msg%n" + +var ( + DefaultFormatter *formatter + msgonlyformatter *formatter +) + +func init() { + var err error + if DefaultFormatter, err = NewFormatter(DefaultMsgFormat); err != nil { + reportInternalError(fmt.Errorf("error during creating DefaultFormatter: %s", err)) + } + if msgonlyformatter, err = NewFormatter("%Msg"); err != nil { + reportInternalError(fmt.Errorf("error during creating msgonlyformatter: %s", err)) + } +} + +// FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute +// of the 'format' config item. These special symbols are replaced with context values or special +// strings when message is written to byte receiver. +// +// Check https://github.com/cihub/seelog/wiki/Formatting for details. +// Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference +// +// FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object +// that can be evaluated as string. +type FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{} + +// FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized +// formatters (such as %Date or %EscM) and custom user formatters. +type FormatterFuncCreator func(param string) FormatterFunc + +var formatterFuncs = map[string]FormatterFunc{ + "Level": formatterLevel, + "Lev": formatterLev, + "LEVEL": formatterLEVEL, + "LEV": formatterLEV, + "l": formatterl, + "Msg": formatterMsg, + "FullPath": formatterFullPath, + "File": formatterFile, + "RelFile": formatterRelFile, + "Func": FormatterFunction, + "FuncShort": FormatterFunctionShort, + "Line": formatterLine, + "Time": formatterTime, + "UTCTime": formatterUTCTime, + "Ns": formatterNs, + "UTCNs": formatterUTCNs, + "r": formatterr, + "n": formattern, + "t": formattert, +} + +var formatterFuncsParameterized = map[string]FormatterFuncCreator{ + "Date": createDateTimeFormatterFunc, + "UTCDate": createUTCDateTimeFormatterFunc, + "EscM": createANSIEscapeFunc, +} + +func errorAliasReserved(name string) error { + return fmt.Errorf("cannot use '%s' as custom formatter name. Name is reserved", name) +} + +// RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil, +// then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and +// it will be treated like the standard parameterized formatter identifiers. +// +// RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation +// is to call it once in 'init' func of your application or any initializer func. +// +// For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters. +// +// Name must only consist of letters (unicode.IsLetter). +// +// Name must not be one of the already registered standard formatter names +// (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered +// custom format names. To avoid any potential name conflicts (in future releases), it is recommended +// to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword. +func RegisterCustomFormatter(name string, creator FormatterFuncCreator) error { + if _, ok := formatterFuncs[name]; ok { + return errorAliasReserved(name) + } + if _, ok := formatterFuncsParameterized[name]; ok { + return errorAliasReserved(name) + } + formatterFuncsParameterized[name] = creator + return nil +} + +// formatter is used to write messages in a specific format, inserting such additional data +// as log level, date/time, etc. +type formatter struct { + fmtStringOriginal string + fmtString string + formatterFuncs []FormatterFunc +} + +// NewFormatter creates a new formatter using a format string +func NewFormatter(formatString string) (*formatter, error) { + fmtr := new(formatter) + fmtr.fmtStringOriginal = formatString + if err := buildFormatterFuncs(fmtr); err != nil { + return nil, err + } + return fmtr, nil +} + +func buildFormatterFuncs(formatter *formatter) error { + var ( + fsbuf = new(bytes.Buffer) + fsolm1 = len(formatter.fmtStringOriginal) - 1 + ) + for i := 0; i <= fsolm1; i++ { + if char := formatter.fmtStringOriginal[i]; char != FormatterSymbol { + fsbuf.WriteByte(char) + continue + } + // Check if the index is at the end of the string. + if i == fsolm1 { + return fmt.Errorf("format error: %c cannot be last symbol", FormatterSymbol) + } + // Check if the formatter symbol is doubled and skip it as nonmatching. + if formatter.fmtStringOriginal[i+1] == FormatterSymbol { + fsbuf.WriteRune(FormatterSymbol) + i++ + continue + } + function, ni, err := formatter.extractFormatterFunc(i + 1) + if err != nil { + return err + } + // Append formatting string "%v". + fsbuf.Write([]byte{37, 118}) + i = ni + formatter.formatterFuncs = append(formatter.formatterFuncs, function) + } + formatter.fmtString = fsbuf.String() + return nil +} + +func (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) { + letterSequence := formatter.extractLetterSequence(index) + if len(letterSequence) == 0 { + return nil, 0, fmt.Errorf("format error: lack of formatter after %c at %d", FormatterSymbol, index) + } + + function, formatterLength, ok := formatter.findFormatterFunc(letterSequence) + if ok { + return function, index + formatterLength - 1, nil + } + + function, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index) + if err != nil { + return nil, 0, err + } + if ok { + return function, index + formatterLength - 1, nil + } + + return nil, 0, errors.New("format error: unrecognized formatter at " + strconv.Itoa(index) + ": " + letterSequence) +} + +func (formatter *formatter) extractLetterSequence(index int) string { + letters := "" + + bytesToParse := []byte(formatter.fmtStringOriginal[index:]) + runeCount := utf8.RuneCount(bytesToParse) + for i := 0; i < runeCount; i++ { + rune, runeSize := utf8.DecodeRune(bytesToParse) + bytesToParse = bytesToParse[runeSize:] + + if unicode.IsLetter(rune) { + letters += string(rune) + } else { + break + } + } + return letters +} + +func (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) { + currentVerb := letters + for i := 0; i < len(letters); i++ { + function, ok := formatterFuncs[currentVerb] + if ok { + return function, len(currentVerb), ok + } + currentVerb = currentVerb[:len(currentVerb)-1] + } + + return nil, 0, false +} + +func (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) { + currentVerb := letters + for i := 0; i < len(letters); i++ { + functionCreator, ok := formatterFuncsParameterized[currentVerb] + if ok { + parameter := "" + parameterLen := 0 + isVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless + if isVerbEqualsLetters { + userParameter := "" + var err error + userParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb)) + if ok { + parameter = userParameter + } else if err != nil { + return nil, 0, false, err + } + } + + return functionCreator(parameter), len(currentVerb) + parameterLen, true, nil + } + + currentVerb = currentVerb[:len(currentVerb)-1] + } + + return nil, 0, false, nil +} + +func (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) { + if len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart { + return "", 0, false, nil + } + + endIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd)) + if endIndex == -1 { + return "", 0, false, fmt.Errorf("Unmatched parenthesis or invalid parameter at %d: %s", + startIndex, formatter.fmtStringOriginal[startIndex:]) + } + endIndex += startIndex + + length := endIndex - startIndex + 1 + + return formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil +} + +// Format processes a message with special formatters, log level, and context. Returns formatted string +// with all formatter identifiers changed to appropriate values. +func (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string { + if len(formatter.formatterFuncs) == 0 { + return formatter.fmtString + } + + params := make([]interface{}, len(formatter.formatterFuncs)) + for i, function := range formatter.formatterFuncs { + params[i] = function(message, level, context) + } + + return fmt.Sprintf(formatter.fmtString, params...) +} + +func (formatter *formatter) String() string { + return formatter.fmtStringOriginal +} + +//===================================================== + +const ( + wrongLogLevel = "WRONG_LOGLEVEL" + wrongEscapeCode = "WRONG_ESCAPE" +) + +var levelToString = map[LogLevel]string{ + TraceLvl: "Trace", + DebugLvl: "Debug", + InfoLvl: "Info", + WarnLvl: "Warn", + ErrorLvl: "Error", + CriticalLvl: "Critical", + Off: "Off", +} + +var levelToShortString = map[LogLevel]string{ + TraceLvl: "Trc", + DebugLvl: "Dbg", + InfoLvl: "Inf", + WarnLvl: "Wrn", + ErrorLvl: "Err", + CriticalLvl: "Crt", + Off: "Off", +} + +var levelToShortestString = map[LogLevel]string{ + TraceLvl: "t", + DebugLvl: "d", + InfoLvl: "i", + WarnLvl: "w", + ErrorLvl: "e", + CriticalLvl: "c", + Off: "o", +} + +func formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} { + levelStr, ok := levelToString[level] + if !ok { + return wrongLogLevel + } + return levelStr +} + +func formatterLev(message string, level LogLevel, context LogContextInterface) interface{} { + levelStr, ok := levelToShortString[level] + if !ok { + return wrongLogLevel + } + return levelStr +} + +func formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} { + return strings.ToTitle(formatterLevel(message, level, context).(string)) +} + +func formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} { + return strings.ToTitle(formatterLev(message, level, context).(string)) +} + +func formatterl(message string, level LogLevel, context LogContextInterface) interface{} { + levelStr, ok := levelToShortestString[level] + if !ok { + return wrongLogLevel + } + return levelStr +} + +func formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} { + return message +} + +func formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} { + return context.FullPath() +} + +func formatterFile(message string, level LogLevel, context LogContextInterface) interface{} { + return context.FileName() +} + +func formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} { + return context.ShortPath() +} + +func FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} { + return context.Func() +} + +func FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} { + f := context.Func() + spl := strings.Split(f, ".") + return spl[len(spl)-1] +} + +func formatterLine(message string, level LogLevel, context LogContextInterface) interface{} { + return context.Line() +} + +func formatterTime(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().Format(TimeFormat) +} + +func formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().UTC().Format(TimeFormat) +} + +func formatterNs(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().UnixNano() +} + +func formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().UTC().UnixNano() +} + +func formatterr(message string, level LogLevel, context LogContextInterface) interface{} { + return "\r" +} + +func formattern(message string, level LogLevel, context LogContextInterface) interface{} { + return "\n" +} + +func formattert(message string, level LogLevel, context LogContextInterface) interface{} { + return "\t" +} + +func createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { + format := dateTimeFormat + if format == "" { + format = DateDefaultFormat + } + return func(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().Format(format) + } +} + +func createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { + format := dateTimeFormat + if format == "" { + format = DateDefaultFormat + } + return func(message string, level LogLevel, context LogContextInterface) interface{} { + return context.CallTime().UTC().Format(format) + } +} + +func createANSIEscapeFunc(escapeCodeString string) FormatterFunc { + return func(message string, level LogLevel, context LogContextInterface) interface{} { + if len(escapeCodeString) == 0 { + return wrongEscapeCode + } + + return fmt.Sprintf("%c[%sm", 0x1B, escapeCodeString) + } +} diff --git a/agent/vendor/github.com/cihub/seelog/internals_baseerror.go b/agent/vendor/github.com/cihub/seelog/internals_baseerror.go new file mode 100644 index 00000000000..c0b271d7ddd --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/internals_baseerror.go @@ -0,0 +1,10 @@ +package seelog + +// Base struct for custom errors. +type baseError struct { + message string +} + +func (be baseError) Error() string { + return be.message +} diff --git a/agent/vendor/github.com/cihub/seelog/internals_fsutils.go b/agent/vendor/github.com/cihub/seelog/internals_fsutils.go new file mode 100644 index 00000000000..c0a0e0e4686 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/internals_fsutils.go @@ -0,0 +1,320 @@ +package seelog + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// File and directory permitions. +const ( + defaultFilePermissions = 0666 + defaultDirectoryPermissions = 0767 +) + +const ( + // Max number of directories can be read asynchronously. + maxDirNumberReadAsync = 1000 +) + +type cannotOpenFileError struct { + baseError +} + +func newCannotOpenFileError(fname string) *cannotOpenFileError { + return &cannotOpenFileError{baseError{message: "Cannot open file: " + fname}} +} + +type notDirectoryError struct { + baseError +} + +func newNotDirectoryError(dname string) *notDirectoryError { + return ¬DirectoryError{baseError{message: dname + " is not directory"}} +} + +// fileFilter is a filtering criteria function for '*os.File'. +// Must return 'false' to set aside the given file. +type fileFilter func(os.FileInfo, *os.File) bool + +// filePathFilter is a filtering creteria function for file path. +// Must return 'false' to set aside the given file. +type filePathFilter func(filePath string) bool + +// GetSubdirNames returns a list of directories found in +// the given one with dirPath. +func getSubdirNames(dirPath string) ([]string, error) { + fi, err := os.Stat(dirPath) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, newNotDirectoryError(dirPath) + } + dd, err := os.Open(dirPath) + // Cannot open file. + if err != nil { + if dd != nil { + dd.Close() + } + return nil, err + } + defer dd.Close() + // TODO: Improve performance by buffering reading. + allEntities, err := dd.Readdir(-1) + if err != nil { + return nil, err + } + subDirs := []string{} + for _, entity := range allEntities { + if entity.IsDir() { + subDirs = append(subDirs, entity.Name()) + } + } + return subDirs, nil +} + +// getSubdirAbsPaths recursively visit all the subdirectories +// starting from the given directory and returns absolute paths for them. +func getAllSubdirAbsPaths(dirPath string) (res []string, err error) { + dps, err := getSubdirAbsPaths(dirPath) + if err != nil { + res = []string{} + return + } + res = append(res, dps...) + for _, dp := range dps { + sdps, err := getAllSubdirAbsPaths(dp) + if err != nil { + return []string{}, err + } + res = append(res, sdps...) + } + return +} + +// getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory. +// Input: (I1) dirPath - absolute path of a directory in question. +// Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation. +// Remark: If error (O2) is non-nil then (O1) is nil and vice versa. +func getSubdirAbsPaths(dirPath string) ([]string, error) { + sdns, err := getSubdirNames(dirPath) + if err != nil { + return nil, err + } + rsdns := []string{} + for _, sdn := range sdns { + rsdns = append(rsdns, filepath.Join(dirPath, sdn)) + } + return rsdns, nil +} + +// getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory. +// Remark: Ignores files for which fileFilter returns false +func getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) { + dfi, err := os.Open(dirPath) + if err != nil { + return nil, newCannotOpenFileError("Cannot open directory " + dirPath) + } + defer dfi.Close() + // Size of read buffer (i.e. chunk of items read at a time). + rbs := 64 + resFiles := []*os.File{} +L: + for { + // Read directory entities by reasonable chuncks + // to prevent overflows on big number of files. + fis, e := dfi.Readdir(rbs) + switch e { + // It's OK. + case nil: + // Do nothing, just continue cycle. + case io.EOF: + break L + // Something went wrong. + default: + return nil, e + } + // THINK: Maybe, use async running. + for _, fi := range fis { + // NB: On Linux this could be a problem as + // there are lots of file types available. + if !fi.IsDir() { + f, e := os.Open(filepath.Join(dirPath, fi.Name())) + if e != nil { + if f != nil { + f.Close() + } + // THINK: Add nil as indicator that a problem occurred. + resFiles = append(resFiles, nil) + continue + } + // Check filter condition. + if fFilter != nil && !fFilter(fi, f) { + continue + } + resFiles = append(resFiles, f) + } + } + } + return resFiles, nil +} + +func isRegular(m os.FileMode) bool { + return m&os.ModeType == 0 +} + +// getDirFilePaths return full paths of the files located in the directory. +// Remark: Ignores files for which fileFilter returns false. +func getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) { + dfi, err := os.Open(dirPath) + if err != nil { + return nil, newCannotOpenFileError("Cannot open directory " + dirPath) + } + defer dfi.Close() + + var absDirPath string + if !filepath.IsAbs(dirPath) { + absDirPath, err = filepath.Abs(dirPath) + if err != nil { + return nil, fmt.Errorf("cannot get absolute path of directory: %s", err.Error()) + } + } else { + absDirPath = dirPath + } + + // TODO: check if dirPath is really directory. + // Size of read buffer (i.e. chunk of items read at a time). + rbs := 2 << 5 + filePaths := []string{} + + var fp string +L: + for { + // Read directory entities by reasonable chuncks + // to prevent overflows on big number of files. + fis, e := dfi.Readdir(rbs) + switch e { + // It's OK. + case nil: + // Do nothing, just continue cycle. + case io.EOF: + break L + // Indicate that something went wrong. + default: + return nil, e + } + // THINK: Maybe, use async running. + for _, fi := range fis { + // NB: Should work on every Windows and non-Windows OS. + if isRegular(fi.Mode()) { + if pathIsName { + fp = fi.Name() + } else { + // Build full path of a file. + fp = filepath.Join(absDirPath, fi.Name()) + } + // Check filter condition. + if fpFilter != nil && !fpFilter(fp) { + continue + } + filePaths = append(filePaths, fp) + } + } + } + return filePaths, nil +} + +// getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs +// in map 'filesInDirMap': Key - directory name, value - *os.File slice. +func getOpenFilesByDirectoryAsync( + dirPaths []string, + fFilter fileFilter, + filesInDirMap map[string][]*os.File, +) error { + n := len(dirPaths) + if n > maxDirNumberReadAsync { + return fmt.Errorf("number of input directories to be read exceeded max value %d", maxDirNumberReadAsync) + } + type filesInDirResult struct { + DirName string + Files []*os.File + Error error + } + dirFilesChan := make(chan *filesInDirResult, n) + var wg sync.WaitGroup + // Register n goroutines which are going to do work. + wg.Add(n) + for i := 0; i < n; i++ { + // Launch asynchronously the piece of work. + go func(dirPath string) { + fs, e := getOpenFilesInDir(dirPath, fFilter) + dirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e} + // Mark the current goroutine as finished (work is done). + wg.Done() + }(dirPaths[i]) + } + // Wait for all goroutines to finish their work. + wg.Wait() + // Close the error channel to let for-range clause + // get all the buffered values without blocking and quit in the end. + close(dirFilesChan) + for fidr := range dirFilesChan { + if fidr.Error == nil { + // THINK: What will happen if the key is already present? + filesInDirMap[fidr.DirName] = fidr.Files + } else { + return fidr.Error + } + } + return nil +} + +// fileExists return flag whether a given file exists +// and operation error if an unclassified failure occurs. +func fileExists(path string) (bool, error) { + _, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// createDirectory makes directory with a given name +// making all parent directories if necessary. +func createDirectory(dirPath string) error { + var dPath string + var err error + if !filepath.IsAbs(dirPath) { + dPath, err = filepath.Abs(dirPath) + if err != nil { + return err + } + } else { + dPath = dirPath + } + exists, err := fileExists(dPath) + if err != nil { + return err + } + if exists { + return nil + } + return os.MkdirAll(dPath, os.ModeDir) +} + +// tryRemoveFile gives a try removing the file +// only ignoring an error when the file does not exist. +func tryRemoveFile(filePath string) (err error) { + err = os.Remove(filePath) + if os.IsNotExist(err) { + err = nil + return + } + return +} diff --git a/agent/vendor/github.com/cihub/seelog/internals_xmlnode.go b/agent/vendor/github.com/cihub/seelog/internals_xmlnode.go new file mode 100644 index 00000000000..98588493354 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/internals_xmlnode.go @@ -0,0 +1,175 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "strings" +) + +type xmlNode struct { + name string + attributes map[string]string + children []*xmlNode + value string +} + +func newNode() *xmlNode { + node := new(xmlNode) + node.children = make([]*xmlNode, 0) + node.attributes = make(map[string]string) + return node +} + +func (node *xmlNode) String() string { + str := fmt.Sprintf("<%s", node.name) + + for attrName, attrVal := range node.attributes { + str += fmt.Sprintf(" %s=\"%s\"", attrName, attrVal) + } + + str += ">" + str += node.value + + if len(node.children) != 0 { + for _, child := range node.children { + str += fmt.Sprintf("%s", child) + } + } + + str += fmt.Sprintf("", node.name) + + return str +} + +func (node *xmlNode) unmarshal(startEl xml.StartElement) error { + node.name = startEl.Name.Local + + for _, v := range startEl.Attr { + _, alreadyExists := node.attributes[v.Name.Local] + if alreadyExists { + return errors.New("tag '" + node.name + "' has duplicated attribute: '" + v.Name.Local + "'") + } + node.attributes[v.Name.Local] = v.Value + } + + return nil +} + +func (node *xmlNode) add(child *xmlNode) { + if node.children == nil { + node.children = make([]*xmlNode, 0) + } + + node.children = append(node.children, child) +} + +func (node *xmlNode) hasChildren() bool { + return node.children != nil && len(node.children) > 0 +} + +//============================================= + +func unmarshalConfig(reader io.Reader) (*xmlNode, error) { + xmlParser := xml.NewDecoder(reader) + + config, err := unmarshalNode(xmlParser, nil) + if err != nil { + return nil, err + } + if config == nil { + return nil, errors.New("xml has no content") + } + + nextConfigEntry, err := unmarshalNode(xmlParser, nil) + if nextConfigEntry != nil { + return nil, errors.New("xml contains more than one root element") + } + + return config, nil +} + +func unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) { + firstLoop := true + for { + var tok xml.Token + if firstLoop && curToken != nil { + tok = curToken + firstLoop = false + } else { + tok, err = getNextToken(xmlParser) + if err != nil || tok == nil { + return + } + } + + switch tt := tok.(type) { + case xml.SyntaxError: + err = errors.New(tt.Error()) + return + case xml.CharData: + value := strings.TrimSpace(string([]byte(tt))) + if node != nil { + node.value += value + } + case xml.StartElement: + if node == nil { + node = newNode() + err := node.unmarshal(tt) + if err != nil { + return nil, err + } + } else { + childNode, childErr := unmarshalNode(xmlParser, tok) + if childErr != nil { + return nil, childErr + } + + if childNode != nil { + node.add(childNode) + } else { + return + } + } + case xml.EndElement: + return + } + } +} + +func getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) { + if tok, err = xmlParser.Token(); err != nil { + if err == io.EOF { + err = nil + return + } + return + } + + return +} diff --git a/agent/vendor/github.com/cihub/seelog/log.go b/agent/vendor/github.com/cihub/seelog/log.go new file mode 100644 index 00000000000..f775e1fdb34 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/log.go @@ -0,0 +1,307 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "sync" + "time" +) + +const ( + staticFuncCallDepth = 3 // See 'commonLogger.log' method comments + loggerFuncCallDepth = 3 +) + +// Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc. +var Current LoggerInterface + +// Default logger that is created from an empty config: "". It is not closed by a ReplaceLogger call. +var Default LoggerInterface + +// Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call. +var Disabled LoggerInterface + +var pkgOperationsMutex *sync.Mutex + +func init() { + pkgOperationsMutex = new(sync.Mutex) + var err error + + if Default == nil { + Default, err = LoggerFromConfigAsBytes([]byte("")) + } + + if Disabled == nil { + Disabled, err = LoggerFromConfigAsBytes([]byte("")) + } + + if err != nil { + panic(fmt.Sprintf("Seelog couldn't start. Error: %s", err.Error())) + } + + Current = Default +} + +func createLoggerFromFullConfig(config *configForParsing) (LoggerInterface, error) { + if config.LogType == syncloggerTypeFromString { + return NewSyncLogger(&config.logConfig), nil + } else if config.LogType == asyncLooploggerTypeFromString { + return NewAsyncLoopLogger(&config.logConfig), nil + } else if config.LogType == asyncTimerloggerTypeFromString { + logData := config.LoggerData + if logData == nil { + return nil, errors.New("async timer data not set") + } + + asyncInt, ok := logData.(asyncTimerLoggerData) + if !ok { + return nil, errors.New("invalid async timer data") + } + + logger, err := NewAsyncTimerLogger(&config.logConfig, time.Duration(asyncInt.AsyncInterval)) + if !ok { + return nil, err + } + + return logger, nil + } else if config.LogType == adaptiveLoggerTypeFromString { + logData := config.LoggerData + if logData == nil { + return nil, errors.New("adaptive logger parameters not set") + } + + adaptData, ok := logData.(adaptiveLoggerData) + if !ok { + return nil, errors.New("invalid adaptive logger parameters") + } + + logger, err := NewAsyncAdaptiveLogger( + &config.logConfig, + time.Duration(adaptData.MinInterval), + time.Duration(adaptData.MaxInterval), + adaptData.CriticalMsgCount, + ) + if err != nil { + return nil, err + } + + return logger, nil + } + return nil, errors.New("invalid config log type/data") +} + +// UseLogger sets the 'Current' package level logger variable to the specified value. +// This variable is used in all Trace/Debug/... package level convenience funcs. +// +// Example: +// +// after calling +// seelog.UseLogger(somelogger) +// the following: +// seelog.Debug("abc") +// will be equal to +// somelogger.Debug("abc") +// +// IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if +// you constantly use it to replace loggers and don't close them in other code, you'll +// end up having memory leaks. +// +// To safely replace loggers, use ReplaceLogger. +func UseLogger(logger LoggerInterface) error { + if logger == nil { + return errors.New("logger can not be nil") + } + + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + + oldLogger := Current + Current = logger + + if oldLogger != nil { + oldLogger.Flush() + } + + return nil +} + +// ReplaceLogger acts as UseLogger but the logger that was previously +// used is disposed (except Default and Disabled loggers). +// +// Example: +// import log "github.com/cihub/seelog" +// +// func main() { +// logger, err := log.LoggerFromConfigAsFile("seelog.xml") +// +// if err != nil { +// panic(err) +// } +// +// log.ReplaceLogger(logger) +// defer log.Flush() +// +// log.Trace("test") +// log.Debugf("var = %s", "abc") +// } +func ReplaceLogger(logger LoggerInterface) error { + if logger == nil { + return errors.New("logger can not be nil") + } + + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + + defer func() { + if err := recover(); err != nil { + reportInternalError(fmt.Errorf("recovered from panic during ReplaceLogger: %s", err)) + } + }() + + if Current == Default { + Current.Flush() + } else if Current != nil && !Current.Closed() && Current != Disabled { + Current.Flush() + Current.Close() + } + + Current = logger + + return nil +} + +// Tracef formats message according to format specifier +// and writes to default logger with log level = Trace. +func Tracef(format string, params ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) +} + +// Debugf formats message according to format specifier +// and writes to default logger with log level = Debug. +func Debugf(format string, params ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) +} + +// Infof formats message according to format specifier +// and writes to default logger with log level = Info. +func Infof(format string, params ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) +} + +// Warnf formats message according to format specifier and writes to default logger with log level = Warn +func Warnf(format string, params ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogFormattedMessage(format, params) + Current.warnWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Errorf formats message according to format specifier and writes to default logger with log level = Error +func Errorf(format string, params ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogFormattedMessage(format, params) + Current.errorWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Criticalf formats message according to format specifier and writes to default logger with log level = Critical +func Criticalf(format string, params ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogFormattedMessage(format, params) + Current.criticalWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Trace formats message using the default formats for its operands and writes to default logger with log level = Trace +func Trace(v ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v)) +} + +// Debug formats message using the default formats for its operands and writes to default logger with log level = Debug +func Debug(v ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v)) +} + +// Info formats message using the default formats for its operands and writes to default logger with log level = Info +func Info(v ...interface{}) { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v)) +} + +// Warn formats message using the default formats for its operands and writes to default logger with log level = Warn +func Warn(v ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogMessage(v) + Current.warnWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Error formats message using the default formats for its operands and writes to default logger with log level = Error +func Error(v ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogMessage(v) + Current.errorWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Critical formats message using the default formats for its operands and writes to default logger with log level = Critical +func Critical(v ...interface{}) error { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + message := newLogMessage(v) + Current.criticalWithCallDepth(staticFuncCallDepth, message) + return errors.New(message.String()) +} + +// Flush immediately processes all currently queued messages and all currently buffered messages. +// It is a blocking call which returns only after the queue is empty and all the buffers are empty. +// +// If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '' receivers) +// , because there is no queue. +// +// Call this method when your app is going to shut down not to lose any log messages. +func Flush() { + pkgOperationsMutex.Lock() + defer pkgOperationsMutex.Unlock() + Current.Flush() +} diff --git a/agent/vendor/github.com/cihub/seelog/logger.go b/agent/vendor/github.com/cihub/seelog/logger.go new file mode 100644 index 00000000000..fc96aed4929 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/logger.go @@ -0,0 +1,370 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "os" + "sync" +) + +func reportInternalError(err error) { + fmt.Fprintf(os.Stderr, "seelog internal error: %s\n", err) +} + +// LoggerInterface represents structs capable of logging Seelog messages +type LoggerInterface interface { + + // Tracef formats message according to format specifier + // and writes to log with level = Trace. + Tracef(format string, params ...interface{}) + + // Debugf formats message according to format specifier + // and writes to log with level = Debug. + Debugf(format string, params ...interface{}) + + // Infof formats message according to format specifier + // and writes to log with level = Info. + Infof(format string, params ...interface{}) + + // Warnf formats message according to format specifier + // and writes to log with level = Warn. + Warnf(format string, params ...interface{}) error + + // Errorf formats message according to format specifier + // and writes to log with level = Error. + Errorf(format string, params ...interface{}) error + + // Criticalf formats message according to format specifier + // and writes to log with level = Critical. + Criticalf(format string, params ...interface{}) error + + // Trace formats message using the default formats for its operands + // and writes to log with level = Trace + Trace(v ...interface{}) + + // Debug formats message using the default formats for its operands + // and writes to log with level = Debug + Debug(v ...interface{}) + + // Info formats message using the default formats for its operands + // and writes to log with level = Info + Info(v ...interface{}) + + // Warn formats message using the default formats for its operands + // and writes to log with level = Warn + Warn(v ...interface{}) error + + // Error formats message using the default formats for its operands + // and writes to log with level = Error + Error(v ...interface{}) error + + // Critical formats message using the default formats for its operands + // and writes to log with level = Critical + Critical(v ...interface{}) error + + traceWithCallDepth(callDepth int, message fmt.Stringer) + debugWithCallDepth(callDepth int, message fmt.Stringer) + infoWithCallDepth(callDepth int, message fmt.Stringer) + warnWithCallDepth(callDepth int, message fmt.Stringer) + errorWithCallDepth(callDepth int, message fmt.Stringer) + criticalWithCallDepth(callDepth int, message fmt.Stringer) + + // Close flushes all the messages in the logger and closes it. It cannot be used after this operation. + Close() + + // Flush flushes all the messages in the logger. + Flush() + + // Closed returns true if the logger was previously closed. + Closed() bool + + // SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller + // when getting function information needed to print seelog format identifiers such as %Func or %File. + // + // This func may be used when you wrap seelog funcs and want to print caller info of you own + // wrappers instead of seelog func callers. In this case you should set depth = 1. If you then + // wrap your wrapper, you should set depth = 2, etc. + // + // NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect + // function/file names in log files. Do not use it if you are not going to wrap seelog funcs. + // You may reset the value to default using a SetAdditionalStackDepth(0) call. + SetAdditionalStackDepth(depth int) error + + // Sets logger context that can be used in formatter funcs and custom receivers + SetContext(context interface{}) +} + +// innerLoggerInterface is an internal logging interface +type innerLoggerInterface interface { + innerLog(level LogLevel, context LogContextInterface, message fmt.Stringer) + Flush() +} + +// [file path][func name][level] -> [allowed] +type allowedContextCache map[string]map[string]map[LogLevel]bool + +// commonLogger contains all common data needed for logging and contains methods used to log messages. +type commonLogger struct { + config *logConfig // Config used for logging + contextCache allowedContextCache // Caches whether log is enabled for specific "full path-func name-level" sets + closed bool // 'true' when all writers are closed, all data is flushed, logger is unusable. Must be accessed while holding closedM + closedM sync.RWMutex + m sync.Mutex // Mutex for main operations + unusedLevels []bool + innerLogger innerLoggerInterface + addStackDepth int // Additional stack depth needed for correct seelog caller context detection + customContext interface{} +} + +func newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger { + cLogger := new(commonLogger) + + cLogger.config = config + cLogger.contextCache = make(allowedContextCache) + cLogger.unusedLevels = make([]bool, Off) + cLogger.fillUnusedLevels() + cLogger.innerLogger = internalLogger + + return cLogger +} + +func (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error { + if depth < 0 { + return fmt.Errorf("negative depth: %d", depth) + } + cLogger.m.Lock() + cLogger.addStackDepth = depth + cLogger.m.Unlock() + return nil +} + +func (cLogger *commonLogger) Tracef(format string, params ...interface{}) { + cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) +} + +func (cLogger *commonLogger) Debugf(format string, params ...interface{}) { + cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) +} + +func (cLogger *commonLogger) Infof(format string, params ...interface{}) { + cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) +} + +func (cLogger *commonLogger) Warnf(format string, params ...interface{}) error { + message := newLogFormattedMessage(format, params) + cLogger.warnWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Errorf(format string, params ...interface{}) error { + message := newLogFormattedMessage(format, params) + cLogger.errorWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error { + message := newLogFormattedMessage(format, params) + cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Trace(v ...interface{}) { + cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) +} + +func (cLogger *commonLogger) Debug(v ...interface{}) { + cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) +} + +func (cLogger *commonLogger) Info(v ...interface{}) { + cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) +} + +func (cLogger *commonLogger) Warn(v ...interface{}) error { + message := newLogMessage(v) + cLogger.warnWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Error(v ...interface{}) error { + message := newLogMessage(v) + cLogger.errorWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) Critical(v ...interface{}) error { + message := newLogMessage(v) + cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) + return errors.New(message.String()) +} + +func (cLogger *commonLogger) SetContext(c interface{}) { + cLogger.customContext = c +} + +func (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(TraceLvl, message, callDepth) +} + +func (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(DebugLvl, message, callDepth) +} + +func (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(InfoLvl, message, callDepth) +} + +func (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(WarnLvl, message, callDepth) +} + +func (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(ErrorLvl, message, callDepth) +} + +func (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) { + cLogger.log(CriticalLvl, message, callDepth) + cLogger.innerLogger.Flush() +} + +func (cLogger *commonLogger) Closed() bool { + cLogger.closedM.RLock() + defer cLogger.closedM.RUnlock() + return cLogger.closed +} + +func (cLogger *commonLogger) fillUnusedLevels() { + for i := 0; i < len(cLogger.unusedLevels); i++ { + cLogger.unusedLevels[i] = true + } + + cLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints) + + for _, exception := range cLogger.config.Exceptions { + cLogger.fillUnusedLevelsByContraint(exception) + } +} + +func (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) { + for i := 0; i < len(cLogger.unusedLevels); i++ { + if constraint.IsAllowed(LogLevel(i)) { + cLogger.unusedLevels[i] = false + } + } +} + +// stackCallDepth is used to indicate the call depth of 'log' func. +// This depth level is used in the runtime.Caller(...) call. See +// common_context.go -> specifyContext, extractCallerInfo for details. +func (cLogger *commonLogger) log(level LogLevel, message fmt.Stringer, stackCallDepth int) { + if cLogger.unusedLevels[level] { + return + } + cLogger.m.Lock() + defer cLogger.m.Unlock() + + if cLogger.Closed() { + return + } + context, _ := specifyContext(stackCallDepth+cLogger.addStackDepth, cLogger.customContext) + // Context errors are not reported because there are situations + // in which context errors are normal Seelog usage cases. For + // example in executables with stripped symbols. + // Error contexts are returned instead. See common_context.go. + /*if err != nil { + reportInternalError(err) + return + }*/ + cLogger.innerLogger.innerLog(level, context, message) +} + +func (cLogger *commonLogger) processLogMsg(level LogLevel, message fmt.Stringer, context LogContextInterface) { + defer func() { + if err := recover(); err != nil { + reportInternalError(fmt.Errorf("recovered from panic during message processing: %s", err)) + } + }() + if cLogger.config.IsAllowed(level, context) { + cLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError) + } +} + +func (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool { + funcMap, ok := cLogger.contextCache[context.FullPath()] + if !ok { + funcMap = make(map[string]map[LogLevel]bool, 0) + cLogger.contextCache[context.FullPath()] = funcMap + } + + levelMap, ok := funcMap[context.Func()] + if !ok { + levelMap = make(map[LogLevel]bool, 0) + funcMap[context.Func()] = levelMap + } + + isAllowValue, ok := levelMap[level] + if !ok { + isAllowValue = cLogger.config.IsAllowed(level, context) + levelMap[level] = isAllowValue + } + + return isAllowValue +} + +type logMessage struct { + params []interface{} +} + +type logFormattedMessage struct { + format string + params []interface{} +} + +func newLogMessage(params []interface{}) fmt.Stringer { + message := new(logMessage) + + message.params = params + + return message +} + +func newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage { + message := new(logFormattedMessage) + + message.params = params + message.format = format + + return message +} + +func (message *logMessage) String() string { + return fmt.Sprint(message.params...) +} + +func (message *logFormattedMessage) String() string { + return fmt.Sprintf(message.format, message.params...) +} diff --git a/agent/vendor/github.com/cihub/seelog/writers_bufferedwriter.go b/agent/vendor/github.com/cihub/seelog/writers_bufferedwriter.go new file mode 100644 index 00000000000..37d75c82e72 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/writers_bufferedwriter.go @@ -0,0 +1,161 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "bufio" + "errors" + "fmt" + "io" + "sync" + "time" +) + +// bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full +type bufferedWriter struct { + flushPeriod time.Duration // data flushes interval (in microseconds) + bufferMutex *sync.Mutex // mutex for buffer operations syncronization + innerWriter io.Writer // inner writer + buffer *bufio.Writer // buffered wrapper for inner writer + bufferSize int // max size of data chunk in bytes +} + +// NewBufferedWriter creates a new buffered writer struct. +// bufferSize -- size of memory buffer in bytes +// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality +func NewBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) { + + if innerWriter == nil { + return nil, errors.New("argument is nil: innerWriter") + } + if flushPeriod < 0 { + return nil, fmt.Errorf("flushPeriod can not be less than 0. Got: %d", flushPeriod) + } + + if bufferSize <= 0 { + return nil, fmt.Errorf("bufferSize can not be less or equal to 0. Got: %d", bufferSize) + } + + buffer := bufio.NewWriterSize(innerWriter, bufferSize) + + /*if err != nil { + return nil, err + }*/ + + newWriter := new(bufferedWriter) + + newWriter.innerWriter = innerWriter + newWriter.buffer = buffer + newWriter.bufferSize = bufferSize + newWriter.flushPeriod = flushPeriod * 1e6 + newWriter.bufferMutex = new(sync.Mutex) + + if flushPeriod != 0 { + go newWriter.flushPeriodically() + } + + return newWriter, nil +} + +func (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) { + bufferedLen := bufWriter.buffer.Buffered() + + n, err = bufWriter.flushInner() + if err != nil { + return + } + + written, writeErr := bufWriter.innerWriter.Write(bytes) + return bufferedLen + written, writeErr +} + +// Sends data to buffer manager. Waits until all buffers are full. +func (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) { + + bufWriter.bufferMutex.Lock() + defer bufWriter.bufferMutex.Unlock() + + bytesLen := len(bytes) + + if bytesLen > bufWriter.bufferSize { + return bufWriter.writeBigChunk(bytes) + } + + if bytesLen > bufWriter.buffer.Available() { + n, err = bufWriter.flushInner() + if err != nil { + return + } + } + + bufWriter.buffer.Write(bytes) + + return len(bytes), nil +} + +func (bufWriter *bufferedWriter) Close() error { + closer, ok := bufWriter.innerWriter.(io.Closer) + if ok { + return closer.Close() + } + + return nil +} + +func (bufWriter *bufferedWriter) Flush() { + + bufWriter.bufferMutex.Lock() + defer bufWriter.bufferMutex.Unlock() + + bufWriter.flushInner() +} + +func (bufWriter *bufferedWriter) flushInner() (n int, err error) { + bufferedLen := bufWriter.buffer.Buffered() + flushErr := bufWriter.buffer.Flush() + + return bufWriter.buffer.Buffered() - bufferedLen, flushErr +} + +func (bufWriter *bufferedWriter) flushBuffer() { + bufWriter.bufferMutex.Lock() + defer bufWriter.bufferMutex.Unlock() + + bufWriter.buffer.Flush() +} + +func (bufWriter *bufferedWriter) flushPeriodically() { + if bufWriter.flushPeriod > 0 { + ticker := time.NewTicker(bufWriter.flushPeriod) + for { + <-ticker.C + bufWriter.flushBuffer() + } + } +} + +func (bufWriter *bufferedWriter) String() string { + return fmt.Sprintf("bufferedWriter size: %d, flushPeriod: %d", bufWriter.bufferSize, bufWriter.flushPeriod) +} diff --git a/agent/vendor/github.com/cihub/seelog/writers_connwriter.go b/agent/vendor/github.com/cihub/seelog/writers_connwriter.go new file mode 100644 index 00000000000..d199894e729 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/writers_connwriter.go @@ -0,0 +1,144 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "crypto/tls" + "fmt" + "io" + "net" +) + +// connWriter is used to write to a stream-oriented network connection. +type connWriter struct { + innerWriter io.WriteCloser + reconnectOnMsg bool + reconnect bool + net string + addr string + useTLS bool + configTLS *tls.Config +} + +// Creates writer to the address addr on the network netName. +// Connection will be opened on each write if reconnectOnMsg = true +func NewConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter { + newWriter := new(connWriter) + + newWriter.net = netName + newWriter.addr = addr + newWriter.reconnectOnMsg = reconnectOnMsg + + return newWriter +} + +// Creates a writer that uses SSL/TLS +func newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter { + newWriter := new(connWriter) + + newWriter.net = netName + newWriter.addr = addr + newWriter.reconnectOnMsg = reconnectOnMsg + newWriter.useTLS = true + newWriter.configTLS = config + + return newWriter +} + +func (connWriter *connWriter) Close() error { + if connWriter.innerWriter == nil { + return nil + } + + return connWriter.innerWriter.Close() +} + +func (connWriter *connWriter) Write(bytes []byte) (n int, err error) { + if connWriter.neededConnectOnMsg() { + err = connWriter.connect() + if err != nil { + return 0, err + } + } + + if connWriter.reconnectOnMsg { + defer connWriter.innerWriter.Close() + } + + n, err = connWriter.innerWriter.Write(bytes) + if err != nil { + connWriter.reconnect = true + } + + return +} + +func (connWriter *connWriter) String() string { + return fmt.Sprintf("Conn writer: [%s, %s, %v]", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg) +} + +func (connWriter *connWriter) connect() error { + if connWriter.innerWriter != nil { + connWriter.innerWriter.Close() + connWriter.innerWriter = nil + } + + if connWriter.useTLS { + conn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS) + if err != nil { + return err + } + connWriter.innerWriter = conn + + return nil + } + + conn, err := net.Dial(connWriter.net, connWriter.addr) + if err != nil { + return err + } + + tcpConn, ok := conn.(*net.TCPConn) + if ok { + tcpConn.SetKeepAlive(true) + } + + connWriter.innerWriter = conn + + return nil +} + +func (connWriter *connWriter) neededConnectOnMsg() bool { + if connWriter.reconnect { + connWriter.reconnect = false + return true + } + + if connWriter.innerWriter == nil { + return true + } + + return connWriter.reconnectOnMsg +} diff --git a/agent/vendor/github.com/cihub/seelog/writers_consolewriter.go b/agent/vendor/github.com/cihub/seelog/writers_consolewriter.go new file mode 100644 index 00000000000..3eb79afa9d9 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/writers_consolewriter.go @@ -0,0 +1,47 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import "fmt" + +// consoleWriter is used to write to console +type consoleWriter struct { +} + +// Creates a new console writer. Returns error, if the console writer couldn't be created. +func NewConsoleWriter() (writer *consoleWriter, err error) { + newWriter := new(consoleWriter) + + return newWriter, nil +} + +// Create folder and file on WriteLog/Write first call +func (console *consoleWriter) Write(bytes []byte) (int, error) { + return fmt.Print(string(bytes)) +} + +func (console *consoleWriter) String() string { + return "Console writer" +} diff --git a/agent/vendor/github.com/cihub/seelog/writers_filewriter.go b/agent/vendor/github.com/cihub/seelog/writers_filewriter.go new file mode 100644 index 00000000000..8d3ae270e83 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/writers_filewriter.go @@ -0,0 +1,92 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" + "io" + "os" + "path/filepath" +) + +// fileWriter is used to write to a file. +type fileWriter struct { + innerWriter io.WriteCloser + fileName string +} + +// Creates a new file and a corresponding writer. Returns error, if the file couldn't be created. +func NewFileWriter(fileName string) (writer *fileWriter, err error) { + newWriter := new(fileWriter) + newWriter.fileName = fileName + + return newWriter, nil +} + +func (fw *fileWriter) Close() error { + if fw.innerWriter != nil { + err := fw.innerWriter.Close() + if err != nil { + return err + } + fw.innerWriter = nil + } + return nil +} + +// Create folder and file on WriteLog/Write first call +func (fw *fileWriter) Write(bytes []byte) (n int, err error) { + if fw.innerWriter == nil { + if err := fw.createFile(); err != nil { + return 0, err + } + } + return fw.innerWriter.Write(bytes) +} + +func (fw *fileWriter) createFile() error { + folder, _ := filepath.Split(fw.fileName) + var err error + + if 0 != len(folder) { + err = os.MkdirAll(folder, defaultDirectoryPermissions) + if err != nil { + return err + } + } + + // If exists + fw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions) + + if err != nil { + return err + } + + return nil +} + +func (fw *fileWriter) String() string { + return fmt.Sprintf("File writer: %s", fw.fileName) +} diff --git a/agent/vendor/github.com/cihub/seelog/writers_formattedwriter.go b/agent/vendor/github.com/cihub/seelog/writers_formattedwriter.go new file mode 100644 index 00000000000..bf44a4103e6 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/writers_formattedwriter.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "errors" + "fmt" + "io" +) + +type formattedWriter struct { + writer io.Writer + formatter *formatter +} + +func NewFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) { + if formatter == nil { + return nil, errors.New("formatter can not be nil") + } + + return &formattedWriter{writer, formatter}, nil +} + +func (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error { + str := formattedWriter.formatter.Format(message, level, context) + _, err := formattedWriter.writer.Write([]byte(str)) + return err +} + +func (formattedWriter *formattedWriter) String() string { + return fmt.Sprintf("writer: %s, format: %s", formattedWriter.writer, formattedWriter.formatter) +} + +func (formattedWriter *formattedWriter) Writer() io.Writer { + return formattedWriter.writer +} + +func (formattedWriter *formattedWriter) Format() *formatter { + return formattedWriter.formatter +} diff --git a/agent/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go b/agent/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go new file mode 100644 index 00000000000..9535a579811 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go @@ -0,0 +1,763 @@ +// Copyright (c) 2013 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/cihub/seelog/archive" + "github.com/cihub/seelog/archive/gzip" + "github.com/cihub/seelog/archive/tar" + "github.com/cihub/seelog/archive/zip" +) + +// Common constants +const ( + rollingLogHistoryDelimiter = "." +) + +// Types of the rolling writer: roll by date, by time, etc. +type rollingType uint8 + +const ( + rollingTypeSize = iota + rollingTypeTime +) + +// Types of the rolled file naming mode: prefix, postfix, etc. +type rollingNameMode uint8 + +const ( + rollingNameModePostfix = iota + rollingNameModePrefix +) + +var rollingNameModesStringRepresentation = map[rollingNameMode]string{ + rollingNameModePostfix: "postfix", + rollingNameModePrefix: "prefix", +} + +func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) { + for tp, tpStr := range rollingNameModesStringRepresentation { + if tpStr == rollingNameStr { + return tp, true + } + } + + return 0, false +} + +var rollingTypesStringRepresentation = map[rollingType]string{ + rollingTypeSize: "size", + rollingTypeTime: "date", +} + +func rollingTypeFromString(rollingTypeStr string) (rollingType, bool) { + for tp, tpStr := range rollingTypesStringRepresentation { + if tpStr == rollingTypeStr { + return tp, true + } + } + + return 0, false +} + +// Old logs archivation type. +type rollingArchiveType uint8 + +const ( + rollingArchiveNone = iota + rollingArchiveZip + rollingArchiveGzip +) + +var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{ + rollingArchiveNone: "none", + rollingArchiveZip: "zip", + rollingArchiveGzip: "gzip", +} + +type archiver func(f *os.File, exploded bool) archive.WriteCloser + +type unarchiver func(f *os.File) (archive.ReadCloser, error) + +type compressionType struct { + extension string + handleMultipleEntries bool + archiver archiver + unarchiver unarchiver +} + +var compressionTypes = map[rollingArchiveType]compressionType{ + rollingArchiveZip: { + extension: ".zip", + handleMultipleEntries: true, + archiver: func(f *os.File, _ bool) archive.WriteCloser { + return zip.NewWriter(f) + }, + unarchiver: func(f *os.File) (archive.ReadCloser, error) { + fi, err := f.Stat() + if err != nil { + return nil, err + } + r, err := zip.NewReader(f, fi.Size()) + if err != nil { + return nil, err + } + return archive.NopCloser(r), nil + }, + }, + rollingArchiveGzip: { + extension: ".gz", + handleMultipleEntries: false, + archiver: func(f *os.File, exploded bool) archive.WriteCloser { + gw := gzip.NewWriter(f) + if exploded { + return gw + } + return tar.NewWriteMultiCloser(gw, gw) + }, + unarchiver: func(f *os.File) (archive.ReadCloser, error) { + gr, err := gzip.NewReader(f, f.Name()) + if err != nil { + return nil, err + } + + // Determine if the gzip is a tar + tr := tar.NewReader(gr) + _, err = tr.Next() + isTar := err == nil + + // Reset to beginning of file + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + gr.Reset(f) + + if isTar { + return archive.NopCloser(tar.NewReader(gr)), nil + } + return gr, nil + }, + }, +} + +func (compressionType *compressionType) rollingArchiveTypeName(name string, exploded bool) string { + if !compressionType.handleMultipleEntries && !exploded { + return name + ".tar" + compressionType.extension + } else { + return name + compressionType.extension + } + +} + +func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) { + for tp, tpStr := range rollingArchiveTypesStringRepresentation { + if tpStr == rollingArchiveTypeStr { + return tp, true + } + } + + return 0, false +} + +// Default names for different archive types +var rollingArchiveDefaultExplodedName = "old" + +func rollingArchiveTypeDefaultName(archiveType rollingArchiveType, exploded bool) (string, error) { + compressionType, ok := compressionTypes[archiveType] + if !ok { + return "", fmt.Errorf("cannot get default filename for archive type = %v", archiveType) + } + return compressionType.rollingArchiveTypeName("log", exploded), nil +} + +// rollerVirtual is an interface that represents all virtual funcs that are +// called in different rolling writer subtypes. +type rollerVirtual interface { + needsToRoll() bool // Returns true if needs to switch to another file. + isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok. + sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger. + + // getNewHistoryRollFileName is called whenever we are about to roll the + // current log file. It returns the name the current log file should be + // rolled to. + getNewHistoryRollFileName(otherHistoryFiles []string) string + + getCurrentFileName() string +} + +// rollingFileWriter writes received messages to a file, until time interval passes +// or file exceeds a specified limit. After that the current log file is renamed +// and writer starts to log into a new file. You can set a limit for such renamed +// files count, if you want, and then the rolling writer would delete older ones when +// the files count exceed the specified limit. +type rollingFileWriter struct { + fileName string // log file name + currentDirPath string + currentFile *os.File + currentName string + currentFileSize int64 + rollingType rollingType // Rolling mode (Files roll by size/date/...) + archiveType rollingArchiveType + archivePath string + archiveExploded bool + fullName bool + maxRolls int + nameMode rollingNameMode + self rollerVirtual // Used for virtual calls + rollLock sync.Mutex +} + +func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode, + archiveExploded bool, fullName bool) (*rollingFileWriter, error) { + rw := new(rollingFileWriter) + rw.currentDirPath, rw.fileName = filepath.Split(fpath) + if len(rw.currentDirPath) == 0 { + rw.currentDirPath = "." + } + + rw.rollingType = rtype + rw.archiveType = atype + rw.archivePath = apath + rw.nameMode = namemode + rw.maxRolls = maxr + rw.archiveExploded = archiveExploded + rw.fullName = fullName + return rw, nil +} + +func (rw *rollingFileWriter) hasRollName(file string) bool { + switch rw.nameMode { + case rollingNameModePostfix: + rname := rw.fileName + rollingLogHistoryDelimiter + return strings.HasPrefix(file, rname) + case rollingNameModePrefix: + rname := rollingLogHistoryDelimiter + rw.fileName + return strings.HasSuffix(file, rname) + } + return false +} + +func (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string { + switch rw.nameMode { + case rollingNameModePostfix: + return originalName + rollingLogHistoryDelimiter + rollname + case rollingNameModePrefix: + return rollname + rollingLogHistoryDelimiter + originalName + } + return "" +} + +func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) { + files, err := getDirFilePaths(rw.currentDirPath, nil, true) + if err != nil { + return nil, err + } + var validRollNames []string + for _, file := range files { + if rw.hasRollName(file) { + rname := rw.getFileRollName(file) + if rw.self.isFileRollNameValid(rname) { + validRollNames = append(validRollNames, rname) + } + } + } + sortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames) + if err != nil { + return nil, err + } + validSortedFiles := make([]string, len(sortedTails)) + for i, v := range sortedTails { + validSortedFiles[i] = rw.createFullFileName(rw.fileName, v) + } + return validSortedFiles, nil +} + +func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error { + var err error + + if len(rw.currentDirPath) != 0 { + err = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions) + + if err != nil { + return err + } + } + rw.currentName = rw.self.getCurrentFileName() + filePath := filepath.Join(rw.currentDirPath, rw.currentName) + + // This will either open the existing file (without truncating it) or + // create if necessary. Append mode avoids any race conditions. + rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions) + if err != nil { + return err + } + + stat, err := rw.currentFile.Stat() + if err != nil { + rw.currentFile.Close() + rw.currentFile = nil + return err + } + + rw.currentFileSize = stat.Size() + return nil +} + +func (rw *rollingFileWriter) archiveExplodedLogs(logFilename string, compressionType compressionType) (err error) { + closeWithError := func(c io.Closer) { + if cerr := c.Close(); cerr != nil && err == nil { + err = cerr + } + } + + rollPath := filepath.Join(rw.currentDirPath, logFilename) + src, err := os.Open(rollPath) + if err != nil { + return err + } + defer src.Close() // Read-only + + // Buffer to a temporary file on the same partition + // Note: archivePath is a path to a directory when handling exploded logs + dst, err := rw.tempArchiveFile(rw.archivePath) + if err != nil { + return err + } + defer func() { + closeWithError(dst) + if err != nil { + os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file + return + } + + // Finalize archive by swapping the buffered archive into place + err = os.Rename(dst.Name(), filepath.Join(rw.archivePath, + compressionType.rollingArchiveTypeName(logFilename, true))) + }() + + // archive entry + w := compressionType.archiver(dst, true) + defer closeWithError(w) + fi, err := src.Stat() + if err != nil { + return err + } + if err := w.NextFile(logFilename, fi); err != nil { + return err + } + _, err = io.Copy(w, src) + return err +} + +func (rw *rollingFileWriter) archiveUnexplodedLogs(compressionType compressionType, rollsToDelete int, history []string) (err error) { + closeWithError := func(c io.Closer) { + if cerr := c.Close(); cerr != nil && err == nil { + err = cerr + } + } + + // Buffer to a temporary file on the same partition + // Note: archivePath is a path to a file when handling unexploded logs + dst, err := rw.tempArchiveFile(filepath.Dir(rw.archivePath)) + if err != nil { + return err + } + defer func() { + closeWithError(dst) + if err != nil { + os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file + return + } + + // Finalize archive by moving the buffered archive into place + err = os.Rename(dst.Name(), rw.archivePath) + }() + + w := compressionType.archiver(dst, false) + defer closeWithError(w) + + src, err := os.Open(rw.archivePath) + switch { + // Archive exists + case err == nil: + defer src.Close() // Read-only + + r, err := compressionType.unarchiver(src) + if err != nil { + return err + } + defer r.Close() // Read-only + + if err := archive.Copy(w, r); err != nil { + return err + } + + // Failed to stat + case !os.IsNotExist(err): + return err + } + + // Add new files to the archive + for i := 0; i < rollsToDelete; i++ { + rollPath := filepath.Join(rw.currentDirPath, history[i]) + src, err := os.Open(rollPath) + if err != nil { + return err + } + defer src.Close() // Read-only + fi, err := src.Stat() + if err != nil { + return err + } + if err := w.NextFile(src.Name(), fi); err != nil { + return err + } + if _, err := io.Copy(w, src); err != nil { + return err + } + } + return nil +} + +func (rw *rollingFileWriter) deleteOldRolls(history []string) error { + if rw.maxRolls <= 0 { + return nil + } + + rollsToDelete := len(history) - rw.maxRolls + if rollsToDelete <= 0 { + return nil + } + + if rw.archiveType != rollingArchiveNone { + if rw.archiveExploded { + os.MkdirAll(rw.archivePath, defaultDirectoryPermissions) + + // Archive logs + for i := 0; i < rollsToDelete; i++ { + rw.archiveExplodedLogs(history[i], compressionTypes[rw.archiveType]) + } + } else { + os.MkdirAll(filepath.Dir(rw.archivePath), defaultDirectoryPermissions) + + rw.archiveUnexplodedLogs(compressionTypes[rw.archiveType], rollsToDelete, history) + } + } + + var err error + // In all cases (archive files or not) the files should be deleted. + for i := 0; i < rollsToDelete; i++ { + // Try best to delete files without breaking the loop. + if err = tryRemoveFile(filepath.Join(rw.currentDirPath, history[i])); err != nil { + reportInternalError(err) + } + } + + return nil +} + +func (rw *rollingFileWriter) getFileRollName(fileName string) string { + switch rw.nameMode { + case rollingNameModePostfix: + return fileName[len(rw.fileName+rollingLogHistoryDelimiter):] + case rollingNameModePrefix: + return fileName[:len(fileName)-len(rw.fileName+rollingLogHistoryDelimiter)] + } + return "" +} + +func (rw *rollingFileWriter) roll() error { + // First, close current file. + err := rw.currentFile.Close() + if err != nil { + return err + } + rw.currentFile = nil + + // Current history of all previous log files. + // For file roller it may be like this: + // * ... + // * file.log.4 + // * file.log.5 + // * file.log.6 + // + // For date roller it may look like this: + // * ... + // * file.log.11.Aug.13 + // * file.log.15.Aug.13 + // * file.log.16.Aug.13 + // Sorted log history does NOT include current file. + history, err := rw.getSortedLogHistory() + if err != nil { + return err + } + // Renames current file to create a new roll history entry + // For file roller it may be like this: + // * ... + // * file.log.4 + // * file.log.5 + // * file.log.6 + // n file.log.7 <---- RENAMED (from file.log) + newHistoryName := rw.createFullFileName(rw.fileName, + rw.self.getNewHistoryRollFileName(history)) + + err = os.Rename(filepath.Join(rw.currentDirPath, rw.currentName), filepath.Join(rw.currentDirPath, newHistoryName)) + if err != nil { + return err + } + + // Finally, add the newly added history file to the history archive + // and, if after that the archive exceeds the allowed max limit, older rolls + // must the removed/archived. + history = append(history, newHistoryName) + if len(history) > rw.maxRolls { + err = rw.deleteOldRolls(history) + if err != nil { + return err + } + } + + return nil +} + +func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) { + rw.rollLock.Lock() + defer rw.rollLock.Unlock() + + if rw.self.needsToRoll() { + if err := rw.roll(); err != nil { + return 0, err + } + } + + if rw.currentFile == nil { + err := rw.createFileAndFolderIfNeeded(true) + if err != nil { + return 0, err + } + } + + n, err = rw.currentFile.Write(bytes) + rw.currentFileSize += int64(n) + return n, err +} + +func (rw *rollingFileWriter) Close() error { + if rw.currentFile != nil { + e := rw.currentFile.Close() + if e != nil { + return e + } + rw.currentFile = nil + } + return nil +} + +func (rw *rollingFileWriter) tempArchiveFile(archiveDir string) (*os.File, error) { + tmp := filepath.Join(archiveDir, ".seelog_tmp") + if err := os.MkdirAll(tmp, defaultDirectoryPermissions); err != nil { + return nil, err + } + return ioutil.TempFile(tmp, "archived_logs") +} + +// ============================================================================================= +// Different types of rolling writers +// ============================================================================================= + +// -------------------------------------------------- +// Rolling writer by SIZE +// -------------------------------------------------- + +// rollingFileWriterSize performs roll when file exceeds a specified limit. +type rollingFileWriterSize struct { + *rollingFileWriter + maxFileSize int64 +} + +func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode, archiveExploded bool) (*rollingFileWriterSize, error) { + rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode, archiveExploded, false) + if err != nil { + return nil, err + } + rws := &rollingFileWriterSize{rw, maxSize} + rws.self = rws + return rws, nil +} + +func (rws *rollingFileWriterSize) needsToRoll() bool { + return rws.currentFileSize >= rws.maxFileSize +} + +func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool { + if len(rname) == 0 { + return false + } + _, err := strconv.Atoi(rname) + return err == nil +} + +type rollSizeFileTailsSlice []string + +func (p rollSizeFileTailsSlice) Len() int { + return len(p) +} +func (p rollSizeFileTailsSlice) Less(i, j int) bool { + v1, _ := strconv.Atoi(p[i]) + v2, _ := strconv.Atoi(p[j]) + return v1 < v2 +} +func (p rollSizeFileTailsSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) { + ss := rollSizeFileTailsSlice(fs) + sort.Sort(ss) + return ss, nil +} + +func (rws *rollingFileWriterSize) getNewHistoryRollFileName(otherLogFiles []string) string { + v := 0 + if len(otherLogFiles) != 0 { + latest := otherLogFiles[len(otherLogFiles)-1] + v, _ = strconv.Atoi(rws.getFileRollName(latest)) + } + return fmt.Sprintf("%d", v+1) +} + +func (rws *rollingFileWriterSize) getCurrentFileName() string { + return rws.fileName +} + +func (rws *rollingFileWriterSize) String() string { + return fmt.Sprintf("Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v", + rws.fileName, + rollingArchiveTypesStringRepresentation[rws.archiveType], + rws.archivePath, + rws.maxFileSize, + rws.maxRolls) +} + +// -------------------------------------------------- +// Rolling writer by TIME +// -------------------------------------------------- + +// rollingFileWriterTime performs roll when a specified time interval has passed. +type rollingFileWriterTime struct { + *rollingFileWriter + timePattern string + currentTimeFileName string +} + +func NewRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int, + timePattern string, namemode rollingNameMode, archiveExploded bool, fullName bool) (*rollingFileWriterTime, error) { + + rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode, archiveExploded, fullName) + if err != nil { + return nil, err + } + rws := &rollingFileWriterTime{rw, timePattern, ""} + rws.self = rws + return rws, nil +} + +func (rwt *rollingFileWriterTime) needsToRoll() bool { + newName := time.Now().Format(rwt.timePattern) + + if rwt.currentTimeFileName == "" { + // first run; capture the current name + rwt.currentTimeFileName = newName + return false + } + + return newName != rwt.currentTimeFileName +} + +func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool { + if len(rname) == 0 { + return false + } + _, err := time.ParseInLocation(rwt.timePattern, rname, time.Local) + return err == nil +} + +type rollTimeFileTailsSlice struct { + data []string + pattern string +} + +func (p rollTimeFileTailsSlice) Len() int { + return len(p.data) +} + +func (p rollTimeFileTailsSlice) Less(i, j int) bool { + t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local) + t2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local) + return t1.Before(t2) +} + +func (p rollTimeFileTailsSlice) Swap(i, j int) { + p.data[i], p.data[j] = p.data[j], p.data[i] +} + +func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) { + ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern} + sort.Sort(ss) + return ss.data, nil +} + +func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(_ []string) string { + newFileName := rwt.currentTimeFileName + rwt.currentTimeFileName = time.Now().Format(rwt.timePattern) + return newFileName +} + +func (rwt *rollingFileWriterTime) getCurrentFileName() string { + if rwt.fullName { + return rwt.createFullFileName(rwt.fileName, time.Now().Format(rwt.timePattern)) + } + return rwt.fileName +} + +func (rwt *rollingFileWriterTime) String() string { + return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, pattern: %s, maxRolls: %v", + rwt.fileName, + rollingArchiveTypesStringRepresentation[rwt.archiveType], + rwt.archivePath, + rwt.timePattern, + rwt.maxRolls) +} diff --git a/agent/vendor/github.com/cihub/seelog/writers_smtpwriter.go b/agent/vendor/github.com/cihub/seelog/writers_smtpwriter.go new file mode 100644 index 00000000000..31b79438337 --- /dev/null +++ b/agent/vendor/github.com/cihub/seelog/writers_smtpwriter.go @@ -0,0 +1,214 @@ +// Copyright (c) 2012 - Cloud Instruments Co., Ltd. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package seelog + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net/smtp" + "path/filepath" + "strings" +) + +const ( + // Default subject phrase for sending emails. + DefaultSubjectPhrase = "Diagnostic message from server: " + + // Message subject pattern composed according to RFC 5321. + rfc5321SubjectPattern = "From: %s <%s>\nSubject: %s\n\n" +) + +// smtpWriter is used to send emails via given SMTP-server. +type smtpWriter struct { + auth smtp.Auth + hostName string + hostPort string + hostNameWithPort string + senderAddress string + senderName string + recipientAddresses []string + caCertDirPaths []string + mailHeaders []string + subject string +} + +// NewSMTPWriter returns a new SMTP-writer. +func NewSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter { + return &smtpWriter{ + auth: smtp.PlainAuth("", un, pwd, hn), + hostName: hn, + hostPort: hp, + hostNameWithPort: fmt.Sprintf("%s:%s", hn, hp), + senderAddress: sa, + senderName: sn, + recipientAddresses: ras, + caCertDirPaths: cacdps, + subject: subj, + mailHeaders: headers, + } +} + +func prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte { + headerLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject) + // Build header lines if configured. + if headers != nil && len(headers) > 0 { + headerLines += strings.Join(headers, "\n") + headerLines += "\n" + } + return append([]byte(headerLines), body...) +} + +// getTLSConfig gets paths of PEM files with certificates, +// host server name and tries to create an appropriate TLS.Config. +func getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) { + if pemFileDirPaths == nil || len(pemFileDirPaths) == 0 { + err = errors.New("invalid PEM file paths") + return + } + pemEncodedContent := []byte{} + var ( + e error + bytes []byte + ) + // Create a file-filter-by-extension, set aside non-pem files. + pemFilePathFilter := func(fp string) bool { + if filepath.Ext(fp) == ".pem" { + return true + } + return false + } + for _, pemFileDirPath := range pemFileDirPaths { + pemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false) + if err != nil { + return nil, err + } + + // Put together all the PEM files to decode them as a whole byte slice. + for _, pfp := range pemFilePaths { + if bytes, e = ioutil.ReadFile(pfp); e == nil { + pemEncodedContent = append(pemEncodedContent, bytes...) + } else { + return nil, fmt.Errorf("cannot read file: %s: %s", pfp, e.Error()) + } + } + } + config = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName} + isAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent) + if !isAppended { + // Extract this into a separate error. + err = errors.New("invalid PEM content") + return + } + return +} + +// SendMail accepts TLS configuration, connects to the server at addr, +// switches to TLS if possible, authenticates with mechanism a if possible, +// and then sends an email from address from, to addresses to, with message msg. +func sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error { + c, err := smtp.Dial(addr) + if err != nil { + return err + } + // Check if the server supports STARTTLS extension. + if ok, _ := c.Extension("STARTTLS"); ok { + if err = c.StartTLS(config); err != nil { + return err + } + } + // Check if the server supports AUTH extension and use given smtp.Auth. + if a != nil { + if isSupported, _ := c.Extension("AUTH"); isSupported { + if err = c.Auth(a); err != nil { + return err + } + } + } + // Portion of code from the official smtp.SendMail function, + // see http://golang.org/src/pkg/net/smtp/smtp.go. + if err = c.Mail(from); err != nil { + return err + } + for _, addr := range to { + if err = c.Rcpt(addr); err != nil { + return err + } + } + w, err := c.Data() + if err != nil { + return err + } + _, err = w.Write(msg) + if err != nil { + return err + } + err = w.Close() + if err != nil { + return err + } + return c.Quit() +} + +// Write pushes a text message properly composed according to RFC 5321 +// to a post server, which sends it to the recipients. +func (smtpw *smtpWriter) Write(data []byte) (int, error) { + var err error + + if smtpw.caCertDirPaths == nil { + err = smtp.SendMail( + smtpw.hostNameWithPort, + smtpw.auth, + smtpw.senderAddress, + smtpw.recipientAddresses, + prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), + ) + } else { + config, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName) + if e != nil { + return 0, e + } + err = sendMailWithTLSConfig( + config, + smtpw.hostNameWithPort, + smtpw.auth, + smtpw.senderAddress, + smtpw.recipientAddresses, + prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), + ) + } + if err != nil { + return 0, err + } + return len(data), nil +} + +// Close closes down SMTP-connection. +func (smtpw *smtpWriter) Close() error { + // Do nothing as Write method opens and closes connection automatically. + return nil +} diff --git a/agent/vendor/github.com/containerd/cgroups/.gitignore b/agent/vendor/github.com/containerd/cgroups/.gitignore new file mode 100644 index 00000000000..528cd5b39b7 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/.gitignore @@ -0,0 +1 @@ +example/example diff --git a/agent/vendor/github.com/containerd/cgroups/.travis.yml b/agent/vendor/github.com/containerd/cgroups/.travis.yml new file mode 100644 index 00000000000..2b22c37aa88 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.8.x + +install: + - mkdir -p $GOPATH/src/github.com/prometheus $GOPATH/src/github.com/opencontainers + - cd $GOPATH/src/github.com/opencontainers && git clone https://github.com/opencontainers/runtime-spec && cd runtime-spec && git checkout 198f23f827eea397d4331d7eb048d9d4c7ff7bee + - cd $GOPATH/src/github.com/containerd/cgroups + - go get -t ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/agent/vendor/github.com/containerd/cgroups/LICENSE b/agent/vendor/github.com/containerd/cgroups/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/containerd/cgroups/README.md b/agent/vendor/github.com/containerd/cgroups/README.md new file mode 100644 index 00000000000..89007ebbb7b --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/README.md @@ -0,0 +1,107 @@ +# cgroups + +[![Build Status](https://travis-ci.org/containerd/cgroups.svg?branch=master)](https://travis-ci.org/containerd/cgroups) + +[![codecov](https://codecov.io/gh/containerd/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups) + +Go package for creating, managing, inspecting, and destroying cgroups. +The resources format for settings on the cgroup uses the OCI runtime-spec found +[here](https://github.com/opencontainers/runtime-spec). + +## Examples + +### Create a new cgroup + +This creates a new cgroup using a static path for all subsystems under `/test`. + +* /sys/fs/cgroup/cpu/test +* /sys/fs/cgroup/memory/test +* etc.... + +It uses a single hierarchy and specifies cpu shares as a resource constraint and +uses the v1 implementation of cgroups. + + +```go +shares := uint64(100) +control, err := cgroups.New(cgroups.V1, cgroups.StaticPath("/test"), &specs.LinuxResources{ + CPU: &specs.CPU{ + Shares: &shares, + }, +}) +defer control.Delete() +``` + +### Create with systemd slice support + + +```go +control, err := cgroups.New(cgroups.Systemd, cgroups.Slice("system.slice", "runc-test"), &specs.LinuxResources{ + CPU: &specs.CPU{ + Shares: &shares, + }, +}) + +``` + +### Load an existing cgroup + +```go +control, err = cgroups.Load(cgroups.V1, cgroups.StaticPath("/test")) +``` + +### Add a process to the cgroup + +```go +if err := control.Add(cgroups.Process{Pid:1234}); err != nil { +} +``` + +### Update the cgroup + +To update the resources applied in the cgroup + +```go +shares = uint64(200) +if err := control.Update(&specs.LinuxResources{ + CPU: &specs.CPU{ + Shares: &shares, + }, +}); err != nil { +} +``` + +### Freeze and Thaw the cgroup + +```go +if err := control.Freeze(); err != nil { +} +if err := control.Thaw(); err != nil { +} +``` + +### List all processes in the cgroup or recursively + +```go +processes, err := control.Processes(cgroups.Devices, recursive) +``` + +### Get Stats on the cgroup + +```go +stats, err := control.Stat() +``` + +### Move process across cgroups + +This allows you to take processes from one cgroup and move them to another. + +```go +err := control.MoveTo(destination) +``` + +### Create subcgroup + +```go +subCgroup, err := control.New("child", resources) +``` diff --git a/agent/vendor/github.com/containerd/cgroups/blkio.go b/agent/vendor/github.com/containerd/cgroups/blkio.go new file mode 100644 index 00000000000..e1fd73ce0f7 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/blkio.go @@ -0,0 +1,323 @@ +package cgroups + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewBlkio(root string) *blkioController { + return &blkioController{ + root: filepath.Join(root, string(Blkio)), + } +} + +type blkioController struct { + root string +} + +func (b *blkioController) Name() Name { + return Blkio +} + +func (b *blkioController) Path(path string) string { + return filepath.Join(b.root, path) +} + +func (b *blkioController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.BlockIO == nil { + return nil + } + for _, t := range createBlkioSettings(resources.BlockIO) { + if t.value != nil { + if err := ioutil.WriteFile( + filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)), + t.format(t.value), + defaultFilePerm, + ); err != nil { + return err + } + } + } + return nil +} + +func (b *blkioController) Update(path string, resources *specs.LinuxResources) error { + return b.Create(path, resources) +} + +func (b *blkioController) Stat(path string, stats *Stats) error { + stats.Blkio = &BlkioStat{} + settings := []blkioStatSettings{ + { + name: "throttle.io_serviced", + entry: &stats.Blkio.IoServicedRecursive, + }, + { + name: "throttle.io_service_bytes", + entry: &stats.Blkio.IoServiceBytesRecursive, + }, + } + // Try to read CFQ stats available on all CFQ enabled kernels first + if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil { + settings = append(settings, + blkioStatSettings{ + name: "sectors_recursive", + entry: &stats.Blkio.SectorsRecursive, + }, + blkioStatSettings{ + name: "io_service_bytes_recursive", + entry: &stats.Blkio.IoServiceBytesRecursive, + }, + blkioStatSettings{ + name: "io_serviced_recursive", + entry: &stats.Blkio.IoServicedRecursive, + }, + blkioStatSettings{ + name: "io_queued_recursive", + entry: &stats.Blkio.IoQueuedRecursive, + }, + blkioStatSettings{ + name: "io_service_time_recursive", + entry: &stats.Blkio.IoServiceTimeRecursive, + }, + blkioStatSettings{ + name: "io_wait_time_recursive", + entry: &stats.Blkio.IoWaitTimeRecursive, + }, + blkioStatSettings{ + name: "io_merged_recursive", + entry: &stats.Blkio.IoMergedRecursive, + }, + blkioStatSettings{ + name: "time_recursive", + entry: &stats.Blkio.IoTimeRecursive, + }, + ) + } + + devices, err := getDevices("/dev") + if err != nil { + return err + } + + for _, t := range settings { + if err := b.readEntry(devices, path, t.name, t.entry); err != nil { + return err + } + } + return nil +} + +func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]BlkioEntry) error { + f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name))) + if err != nil { + return err + } + defer f.Close() + sc := bufio.NewScanner(f) + for sc.Scan() { + if err := sc.Err(); err != nil { + return err + } + // format: dev type amount + fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine) + if len(fields) < 3 { + if len(fields) == 2 && fields[0] == "Total" { + // skip total line + continue + } else { + return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) + } + } + major, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return err + } + minor, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return err + } + op := "" + valueField := 2 + if len(fields) == 4 { + op = fields[2] + valueField = 3 + } + v, err := strconv.ParseUint(fields[valueField], 10, 64) + if err != nil { + return err + } + *entry = append(*entry, BlkioEntry{ + Device: devices[deviceKey{major, minor}], + Major: major, + Minor: minor, + Op: op, + Value: v, + }) + } + return nil +} + +func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings { + settings := []blkioSettings{ + { + name: "weight", + value: blkio.Weight, + format: uintf, + }, + { + name: "leaf_weight", + value: blkio.LeafWeight, + format: uintf, + }, + } + for _, wd := range blkio.WeightDevice { + settings = append(settings, + blkioSettings{ + name: "weight_device", + value: wd, + format: weightdev, + }, + blkioSettings{ + name: "leaf_weight_device", + value: wd, + format: weightleafdev, + }) + } + for _, t := range []struct { + name string + list []specs.LinuxThrottleDevice + }{ + { + name: "throttle.read_bps_device", + list: blkio.ThrottleReadBpsDevice, + }, + { + name: "throttle.read_iops_device", + list: blkio.ThrottleReadIOPSDevice, + }, + { + name: "throttle.write_bps_device", + list: blkio.ThrottleWriteBpsDevice, + }, + { + name: "throttle.write_iops_device", + list: blkio.ThrottleWriteIOPSDevice, + }, + } { + for _, td := range t.list { + settings = append(settings, blkioSettings{ + name: t.name, + value: td, + format: throttleddev, + }) + } + } + return settings +} + +type blkioSettings struct { + name string + value interface{} + format func(v interface{}) []byte +} + +type blkioStatSettings struct { + name string + entry *[]BlkioEntry +} + +func uintf(v interface{}) []byte { + return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10)) +} + +func weightdev(v interface{}) []byte { + wd := v.(specs.LinuxWeightDevice) + return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight)) +} + +func weightleafdev(v interface{}) []byte { + wd := v.(specs.LinuxWeightDevice) + return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight)) +} + +func throttleddev(v interface{}) []byte { + td := v.(specs.LinuxThrottleDevice) + return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)) +} + +func splitBlkioStatLine(r rune) bool { + return r == ' ' || r == ':' +} + +type deviceKey struct { + major, minor uint64 +} + +// getDevices makes a best effort attempt to read all the devices into a map +// keyed by major and minor number. Since devices may be mapped multiple times, +// we err on taking the first occurrence. +func getDevices(path string) (map[deviceKey]string, error) { + // TODO(stevvooe): We are ignoring lots of errors. It might be kind of + // challenging to debug this if we aren't mapping devices correctly. + // Consider logging these errors. + devices := map[deviceKey]string{} + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + switch { + case fi.IsDir(): + switch fi.Name() { + case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts": + return filepath.SkipDir + default: + return nil + } + case fi.Name() == "console": + return nil + default: + if fi.Mode()&os.ModeDevice == 0 { + // skip non-devices + return nil + } + + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("%s: unable to convert to system stat", p) + } + + key := deviceKey{major(st.Rdev), minor(st.Rdev)} + if _, ok := devices[key]; ok { + return nil // skip it if we have already populated the path. + } + + devices[key] = p + } + + return nil + }); err != nil { + return nil, err + } + + return devices, nil +} + +func major(devNumber uint64) uint64 { + return (devNumber >> 8) & 0xfff +} + +func minor(devNumber uint64) uint64 { + return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00) +} diff --git a/agent/vendor/github.com/containerd/cgroups/cgroup.go b/agent/vendor/github.com/containerd/cgroups/cgroup.go new file mode 100644 index 00000000000..0aea83222dc --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/cgroup.go @@ -0,0 +1,386 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// New returns a new control via the cgroup cgroups interface +func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources) (Cgroup, error) { + subsystems, err := hierarchy() + if err != nil { + return nil, err + } + for _, s := range subsystems { + if err := initializeSubsystem(s, path, resources); err != nil { + return nil, err + } + } + return &cgroup{ + path: path, + subsystems: subsystems, + }, nil +} + +// Load will load an existing cgroup and allow it to be controlled +func Load(hierarchy Hierarchy, path Path) (Cgroup, error) { + subsystems, err := hierarchy() + if err != nil { + return nil, err + } + // check the the subsystems still exist + for _, s := range pathers(subsystems) { + p, err := path(s.Name()) + if err != nil { + return nil, err + } + if _, err := os.Lstat(s.Path(p)); err != nil { + if os.IsNotExist(err) { + return nil, ErrCgroupDeleted + } + return nil, err + } + } + return &cgroup{ + path: path, + subsystems: subsystems, + }, nil +} + +type cgroup struct { + path Path + + subsystems []Subsystem + mu sync.Mutex + err error +} + +// New returns a new sub cgroup +func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return nil, c.err + } + path := subPath(c.path, name) + for _, s := range c.subsystems { + if err := initializeSubsystem(s, path, resources); err != nil { + return nil, err + } + } + return &cgroup{ + path: path, + subsystems: c.subsystems, + }, nil +} + +// Subsystems returns all the subsystems that are currently being +// consumed by the group +func (c *cgroup) Subsystems() []Subsystem { + return c.subsystems +} + +// Add moves the provided process into the new cgroup +func (c *cgroup) Add(process Process) error { + if process.Pid <= 0 { + return ErrInvalidPid + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + return c.add(process) +} + +func (c *cgroup) add(process Process) error { + for _, s := range pathers(c.subsystems) { + p, err := c.path(s.Name()) + if err != nil { + return err + } + if err := ioutil.WriteFile( + filepath.Join(s.Path(p), cgroupProcs), + []byte(strconv.Itoa(process.Pid)), + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +// Delete will remove the control group from each of the subsystems registered +func (c *cgroup) Delete() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + var errors []string + for _, s := range c.subsystems { + if d, ok := s.(deleter); ok { + sp, err := c.path(s.Name()) + if err != nil { + return err + } + if err := d.Delete(sp); err != nil { + errors = append(errors, string(s.Name())) + } + continue + } + if p, ok := s.(pather); ok { + sp, err := c.path(s.Name()) + if err != nil { + return err + } + path := p.Path(sp) + if err := remove(path); err != nil { + errors = append(errors, path) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", ")) + } + c.err = ErrCgroupDeleted + return nil +} + +// Stat returns the current stats for the cgroup +func (c *cgroup) Stat(handlers ...ErrorHandler) (*Stats, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return nil, c.err + } + if len(handlers) == 0 { + handlers = append(handlers, errPassthrough) + } + var ( + stats = &Stats{} + wg = &sync.WaitGroup{} + errs = make(chan error, len(c.subsystems)) + ) + for _, s := range c.subsystems { + if ss, ok := s.(stater); ok { + sp, err := c.path(s.Name()) + if err != nil { + return nil, err + } + wg.Add(1) + go func() { + defer wg.Done() + if err := ss.Stat(sp, stats); err != nil { + for _, eh := range handlers { + if herr := eh(err); herr != nil { + errs <- herr + } + } + } + }() + } + } + wg.Wait() + close(errs) + for err := range errs { + return nil, err + } + return stats, nil +} + +// Update updates the cgroup with the new resource values provided +// +// Be prepared to handle EBUSY when trying to update a cgroup with +// live processes and other operations like Stats being performed at the +// same time +func (c *cgroup) Update(resources *specs.LinuxResources) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + for _, s := range c.subsystems { + if u, ok := s.(updater); ok { + sp, err := c.path(s.Name()) + if err != nil { + return err + } + if err := u.Update(sp, resources); err != nil { + return err + } + } + } + return nil +} + +// Processes returns the processes running inside the cgroup along +// with the subsystem used, pid, and path +func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return nil, c.err + } + return c.processes(subsystem, recursive) +} + +func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) { + s := c.getSubsystem(subsystem) + sp, err := c.path(subsystem) + if err != nil { + return nil, err + } + path := s.(pather).Path(sp) + var processes []Process + err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !recursive && info.IsDir() { + return filepath.SkipDir + } + dir, name := filepath.Split(p) + if name != cgroupProcs { + return nil + } + procs, err := readPids(dir, subsystem) + if err != nil { + return err + } + processes = append(processes, procs...) + return nil + }) + return processes, err +} + +// Freeze freezes the entire cgroup and all the processes inside it +func (c *cgroup) Freeze() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + s := c.getSubsystem(Freezer) + if s == nil { + return ErrFreezerNotSupported + } + sp, err := c.path(Freezer) + if err != nil { + return err + } + return s.(*freezerController).Freeze(sp) +} + +// Thaw thaws out the cgroup and all the processes inside it +func (c *cgroup) Thaw() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + s := c.getSubsystem(Freezer) + if s == nil { + return ErrFreezerNotSupported + } + sp, err := c.path(Freezer) + if err != nil { + return err + } + return s.(*freezerController).Thaw(sp) +} + +// OOMEventFD returns the memory cgroup's out of memory event fd that triggers +// when processes inside the cgroup receive an oom event +func (c *cgroup) OOMEventFD() (uintptr, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return 0, c.err + } + s := c.getSubsystem(Memory) + if s == nil { + return 0, ErrMemoryNotSupported + } + sp, err := c.path(Memory) + if err != nil { + return 0, err + } + return s.(*memoryController).OOMEventFD(sp) +} + +// State returns the state of the cgroup and its processes +func (c *cgroup) State() State { + c.mu.Lock() + defer c.mu.Unlock() + c.checkExists() + if c.err != nil && c.err == ErrCgroupDeleted { + return Deleted + } + s := c.getSubsystem(Freezer) + if s == nil { + return Thawed + } + sp, err := c.path(Freezer) + if err != nil { + return Unknown + } + state, err := s.(*freezerController).state(sp) + if err != nil { + return Unknown + } + return state +} + +// MoveTo does a recursive move subsystem by subsystem of all the processes +// inside the group +func (c *cgroup) MoveTo(destination Cgroup) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.err != nil { + return c.err + } + for _, s := range c.subsystems { + processes, err := c.processes(s.Name(), true) + if err != nil { + return err + } + for _, p := range processes { + if err := destination.Add(p); err != nil { + return err + } + } + } + return nil +} + +func (c *cgroup) getSubsystem(n Name) Subsystem { + for _, s := range c.subsystems { + if s.Name() == n { + return s + } + } + return nil +} + +func (c *cgroup) checkExists() { + for _, s := range pathers(c.subsystems) { + p, err := c.path(s.Name()) + if err != nil { + return + } + if _, err := os.Lstat(s.Path(p)); err != nil { + if os.IsNotExist(err) { + c.err = ErrCgroupDeleted + return + } + } + } +} diff --git a/agent/vendor/github.com/containerd/cgroups/control.go b/agent/vendor/github.com/containerd/cgroups/control.go new file mode 100644 index 00000000000..c83b2417f48 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/control.go @@ -0,0 +1,58 @@ +package cgroups + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + cgroupProcs = "cgroup.procs" + defaultDirPerm = 0755 +) + +// defaultFilePerm is a var so that the test framework can change the filemode +// of all files created when the tests are running. The difference between the +// tests and real world use is that files like "cgroup.procs" will exist when writing +// to a read cgroup filesystem and do not exist prior when running in the tests. +// this is set to a non 0 value in the test code +var defaultFilePerm = os.FileMode(0) + +type Process struct { + // Subsystem is the name of the subsystem that the process is in + Subsystem Name + // Pid is the process id of the process + Pid int + // Path is the full path of the subsystem and location that the process is in + Path string +} + +// Cgroup handles interactions with the individual groups to perform +// actions on them as them main interface to this cgroup package +type Cgroup interface { + // New creates a new cgroup under the calling cgroup + New(string, *specs.LinuxResources) (Cgroup, error) + // Add adds a process to the cgroup + Add(Process) error + // Delete removes the cgroup as a whole + Delete() error + // MoveTo moves all the processes under the calling cgroup to the provided one + // subsystems are moved one at a time + MoveTo(Cgroup) error + // Stat returns the stats for all subsystems in the cgroup + Stat(...ErrorHandler) (*Stats, error) + // Update updates all the subsystems with the provided resource changes + Update(resources *specs.LinuxResources) error + // Processes returns all the processes in a select subsystem for the cgroup + Processes(Name, bool) ([]Process, error) + // Freeze freezes or pauses all processes inside the cgroup + Freeze() error + // Thaw thaw or resumes all processes inside the cgroup + Thaw() error + // OOMEventFD returns the memory subsystem's event fd for OOM events + OOMEventFD() (uintptr, error) + // State returns the cgroups current state + State() State + // Subsystems returns all the subsystems in the cgroup + Subsystems() []Subsystem +} diff --git a/agent/vendor/github.com/containerd/cgroups/cpu.go b/agent/vendor/github.com/containerd/cgroups/cpu.go new file mode 100644 index 00000000000..036bb8fd0e5 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/cpu.go @@ -0,0 +1,120 @@ +package cgroups + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewCpu(root string) *cpuController { + return &cpuController{ + root: filepath.Join(root, string(Cpu)), + } +} + +type cpuController struct { + root string +} + +func (c *cpuController) Name() Name { + return Cpu +} + +func (c *cpuController) Path(path string) string { + return filepath.Join(c.root, path) +} + +func (c *cpuController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil { + return err + } + if cpu := resources.CPU; cpu != nil { + for _, t := range []struct { + name string + ivalue *int64 + uvalue *uint64 + }{ + { + name: "rt_period_us", + uvalue: cpu.RealtimePeriod, + }, + { + name: "rt_runtime_us", + ivalue: cpu.RealtimeRuntime, + }, + { + name: "shares", + uvalue: cpu.Shares, + }, + { + name: "cfs_period_us", + uvalue: cpu.Period, + }, + { + name: "cfs_quota_us", + ivalue: cpu.Quota, + }, + } { + var value []byte + if t.uvalue != nil { + value = []byte(strconv.FormatUint(*t.uvalue, 10)) + } else if t.ivalue != nil { + value = []byte(strconv.FormatInt(*t.ivalue, 10)) + } + if value != nil { + if err := ioutil.WriteFile( + filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)), + value, + defaultFilePerm, + ); err != nil { + return err + } + } + } + } + return nil +} + +func (c *cpuController) Update(path string, resources *specs.LinuxResources) error { + return c.Create(path, resources) +} + +func (c *cpuController) Stat(path string, stats *Stats) error { + f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat")) + if err != nil { + return err + } + defer f.Close() + // get or create the cpu field because cpuacct can also set values on this struct + stats.cpuMu.Lock() + cpu := stats.Cpu + if cpu == nil { + cpu = &CpuStat{} + stats.Cpu = cpu + } + stats.cpuMu.Unlock() + sc := bufio.NewScanner(f) + for sc.Scan() { + if err := sc.Err(); err != nil { + return err + } + key, v, err := parseKV(sc.Text()) + if err != nil { + return err + } + switch key { + case "nr_periods": + cpu.Throttling.Periods = v + case "nr_throttled": + cpu.Throttling.ThrottledPeriods = v + case "throttled_time": + cpu.Throttling.ThrottledTime = v + } + } + return nil +} diff --git a/agent/vendor/github.com/containerd/cgroups/cpuacct.go b/agent/vendor/github.com/containerd/cgroups/cpuacct.go new file mode 100644 index 00000000000..329234887a5 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/cpuacct.go @@ -0,0 +1,112 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +const nanosecondsInSecond = 1000000000 + +var clockTicks = getClockTicks() + +func NewCpuacct(root string) *cpuacctController { + return &cpuacctController{ + root: filepath.Join(root, string(Cpuacct)), + } +} + +type cpuacctController struct { + root string +} + +func (c *cpuacctController) Name() Name { + return Cpuacct +} + +func (c *cpuacctController) Path(path string) string { + return filepath.Join(c.root, path) +} + +func (c *cpuacctController) Stat(path string, stats *Stats) error { + user, kernel, err := c.getUsage(path) + if err != nil { + return err + } + total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage")) + if err != nil { + return err + } + percpu, err := c.percpuUsage(path) + if err != nil { + return err + } + stats.cpuMu.Lock() + cpu := stats.Cpu + if cpu == nil { + cpu = &CpuStat{} + stats.Cpu = cpu + } + stats.cpuMu.Unlock() + cpu.Usage.Total = total + cpu.Usage.User = user + cpu.Usage.Kernel = kernel + cpu.Usage.PerCpu = percpu + return nil +} + +func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) { + var usage []uint64 + data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu")) + if err != nil { + return nil, err + } + for _, v := range strings.Fields(string(data)) { + u, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return nil, err + } + usage = append(usage, u) + } + return usage, nil +} + +func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) { + statPath := filepath.Join(c.Path(path), "cpuacct.stat") + data, err := ioutil.ReadFile(statPath) + if err != nil { + return 0, 0, err + } + fields := strings.Fields(string(data)) + if len(fields) != 4 { + return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath) + } + for _, t := range []struct { + index int + name string + value *uint64 + }{ + { + index: 0, + name: "user", + value: &user, + }, + { + index: 2, + name: "system", + value: &kernel, + }, + } { + if fields[t.index] != t.name { + return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath) + } + v, err := strconv.ParseUint(fields[t.index+1], 10, 64) + if err != nil { + return 0, 0, err + } + *t.value = v + } + return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil +} diff --git a/agent/vendor/github.com/containerd/cgroups/cpuset.go b/agent/vendor/github.com/containerd/cgroups/cpuset.go new file mode 100644 index 00000000000..c5bed1d15d5 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/cpuset.go @@ -0,0 +1,140 @@ +package cgroups + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewCputset(root string) *cpusetController { + return &cpusetController{ + root: filepath.Join(root, string(Cpuset)), + } +} + +type cpusetController struct { + root string +} + +func (c *cpusetController) Name() Name { + return Cpuset +} + +func (c *cpusetController) Path(path string) string { + return filepath.Join(c.root, path) +} + +func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error { + if err := c.ensureParent(c.Path(path), c.root); err != nil { + return err + } + if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil { + return err + } + if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil { + return err + } + if resources.CPU != nil { + for _, t := range []struct { + name string + value *string + }{ + { + name: "cpus", + value: &resources.CPU.Cpus, + }, + { + name: "mems", + value: &resources.CPU.Mems, + }, + } { + if t.value != nil { + if err := ioutil.WriteFile( + filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)), + []byte(*t.value), + defaultFilePerm, + ); err != nil { + return err + } + } + } + } + return nil +} + +func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) { + if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) { + return + } + if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) { + return + } + return cpus, mems, nil +} + +// ensureParent makes sure that the parent directory of current is created +// and populated with the proper cpus and mems files copied from +// it's parent. +func (c *cpusetController) ensureParent(current, root string) error { + parent := filepath.Dir(current) + if _, err := filepath.Rel(root, parent); err != nil { + return nil + } + if cleanPath(parent) == root { + return nil + } + // Avoid infinite recursion. + if parent == current { + return fmt.Errorf("cpuset: cgroup parent path outside cgroup root") + } + if err := c.ensureParent(parent, root); err != nil { + return err + } + if err := os.MkdirAll(current, defaultDirPerm); err != nil { + return err + } + return c.copyIfNeeded(current, parent) +} + +// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent +// directory to the current directory if the file's contents are 0 +func (c *cpusetController) copyIfNeeded(current, parent string) error { + var ( + err error + currentCpus, currentMems []byte + parentCpus, parentMems []byte + ) + if currentCpus, currentMems, err = c.getValues(current); err != nil { + return err + } + if parentCpus, parentMems, err = c.getValues(parent); err != nil { + return err + } + if isEmpty(currentCpus) { + if err := ioutil.WriteFile( + filepath.Join(current, "cpuset.cpus"), + parentCpus, + defaultFilePerm, + ); err != nil { + return err + } + } + if isEmpty(currentMems) { + if err := ioutil.WriteFile( + filepath.Join(current, "cpuset.mems"), + parentMems, + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +func isEmpty(b []byte) bool { + return len(bytes.Trim(b, "\n")) == 0 +} diff --git a/agent/vendor/github.com/containerd/cgroups/devices.go b/agent/vendor/github.com/containerd/cgroups/devices.go new file mode 100644 index 00000000000..f0dca5c546d --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/devices.go @@ -0,0 +1,74 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + allowDeviceFile = "devices.allow" + denyDeviceFile = "devices.deny" + wildcard = -1 +) + +func NewDevices(root string) *devicesController { + return &devicesController{ + root: filepath.Join(root, string(Devices)), + } +} + +type devicesController struct { + root string +} + +func (d *devicesController) Name() Name { + return Devices +} + +func (d *devicesController) Path(path string) string { + return filepath.Join(d.root, path) +} + +func (d *devicesController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil { + return err + } + for _, device := range resources.Devices { + file := denyDeviceFile + if device.Allow { + file = allowDeviceFile + } + if err := ioutil.WriteFile( + filepath.Join(d.Path(path), file), + []byte(deviceString(device)), + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +func (d *devicesController) Update(path string, resources *specs.LinuxResources) error { + return d.Create(path, resources) +} + +func deviceString(device specs.LinuxDeviceCgroup) string { + return fmt.Sprintf("%c %s:%s %s", + &device.Type, + deviceNumber(device.Major), + deviceNumber(device.Minor), + &device.Access, + ) +} + +func deviceNumber(number *int64) string { + if number == nil || *number == wildcard { + return "*" + } + return fmt.Sprint(*number) +} diff --git a/agent/vendor/github.com/containerd/cgroups/errors.go b/agent/vendor/github.com/containerd/cgroups/errors.go new file mode 100644 index 00000000000..d18b4b1df68 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/errors.go @@ -0,0 +1,31 @@ +package cgroups + +import ( + "errors" + "os" +) + +var ( + ErrInvalidPid = errors.New("cgroups: pid must be greater than 0") + ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist") + ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed") + ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system") + ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system") + ErrCgroupDeleted = errors.New("cgroups: cgroup deleted") + ErrNoCgroupMountDestination = errors.New("cgroups: cannot found cgroup mount destination") +) + +// ErrorHandler is a function that handles and acts on errors +type ErrorHandler func(err error) error + +// IgnoreNotExist ignores any errors that are for not existing files +func IgnoreNotExist(err error) error { + if os.IsNotExist(err) { + return nil + } + return err +} + +func errPassthrough(err error) error { + return err +} diff --git a/agent/vendor/github.com/containerd/cgroups/freezer.go b/agent/vendor/github.com/containerd/cgroups/freezer.go new file mode 100644 index 00000000000..f53430b51d1 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/freezer.go @@ -0,0 +1,69 @@ +package cgroups + +import ( + "io/ioutil" + "path/filepath" + "strings" + "time" +) + +func NewFreezer(root string) *freezerController { + return &freezerController{ + root: filepath.Join(root, string(Freezer)), + } +} + +type freezerController struct { + root string +} + +func (f *freezerController) Name() Name { + return Freezer +} + +func (f *freezerController) Path(path string) string { + return filepath.Join(f.root, path) +} + +func (f *freezerController) Freeze(path string) error { + if err := f.changeState(path, Frozen); err != nil { + return err + } + return f.waitState(path, Frozen) +} + +func (f *freezerController) Thaw(path string) error { + if err := f.changeState(path, Thawed); err != nil { + return err + } + return f.waitState(path, Thawed) +} + +func (f *freezerController) changeState(path string, state State) error { + return ioutil.WriteFile( + filepath.Join(f.root, path, "freezer.state"), + []byte(strings.ToUpper(string(state))), + defaultFilePerm, + ) +} + +func (f *freezerController) state(path string) (State, error) { + current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state")) + if err != nil { + return "", err + } + return State(strings.ToLower(strings.TrimSpace(string(current)))), nil +} + +func (f *freezerController) waitState(path string, state State) error { + for { + current, err := f.state(path) + if err != nil { + return err + } + if current == state { + return nil + } + time.Sleep(1 * time.Millisecond) + } +} diff --git a/agent/vendor/github.com/containerd/cgroups/hierarchy.go b/agent/vendor/github.com/containerd/cgroups/hierarchy.go new file mode 100644 index 00000000000..b61660d4131 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/hierarchy.go @@ -0,0 +1,4 @@ +package cgroups + +// Hierarchy enableds both unified and split hierarchy for cgroups +type Hierarchy func() ([]Subsystem, error) diff --git a/agent/vendor/github.com/containerd/cgroups/hugetlb.go b/agent/vendor/github.com/containerd/cgroups/hugetlb.go new file mode 100644 index 00000000000..650bb069f49 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/hugetlb.go @@ -0,0 +1,92 @@ +package cgroups + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewHugetlb(root string) (*hugetlbController, error) { + sizes, err := hugePageSizes() + if err != nil { + return nil, err + } + + return &hugetlbController{ + root: filepath.Join(root, string(Hugetlb)), + sizes: sizes, + }, nil +} + +type hugetlbController struct { + root string + sizes []string +} + +func (h *hugetlbController) Name() Name { + return Hugetlb +} + +func (h *hugetlbController) Path(path string) string { + return filepath.Join(h.root, path) +} + +func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil { + return err + } + for _, limit := range resources.HugepageLimits { + if err := ioutil.WriteFile( + filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")), + []byte(strconv.FormatUint(limit.Limit, 10)), + defaultFilePerm, + ); err != nil { + return err + } + } + return nil +} + +func (h *hugetlbController) Stat(path string, stats *Stats) error { + stats.Hugetlb = make(map[string]HugetlbStat) + for _, size := range h.sizes { + s, err := h.readSizeStat(path, size) + if err != nil { + return err + } + stats.Hugetlb[size] = s + } + return nil +} + +func (h *hugetlbController) readSizeStat(path, size string) (HugetlbStat, error) { + var s HugetlbStat + for _, t := range []struct { + name string + value *uint64 + }{ + { + name: "usage_in_bytes", + value: &s.Usage, + }, + { + name: "max_usage_in_bytes", + value: &s.Max, + }, + { + name: "failcnt", + value: &s.Failcnt, + }, + } { + v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, "."))) + if err != nil { + return s, err + } + *t.value = v + } + return s, nil +} diff --git a/agent/vendor/github.com/containerd/cgroups/memory.go b/agent/vendor/github.com/containerd/cgroups/memory.go new file mode 100644 index 00000000000..45c87962c70 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/memory.go @@ -0,0 +1,304 @@ +package cgroups + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + + "golang.org/x/sys/unix" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewMemory(root string) *memoryController { + return &memoryController{ + root: filepath.Join(root, string(Memory)), + } +} + +type memoryController struct { + root string +} + +func (m *memoryController) Name() Name { + return Memory +} + +func (m *memoryController) Path(path string) string { + return filepath.Join(m.root, path) +} + +func (m *memoryController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.Memory == nil { + return nil + } + if resources.Memory.Kernel != nil { + // Check if kernel memory is enabled + // We have to limit the kernel memory here as it won't be accounted at all + // until a limit is set on the cgroup and limit cannot be set once the + // cgroup has children, or if there are already tasks in the cgroup. + for _, i := range []int64{1, -1} { + if err := ioutil.WriteFile( + filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"), + []byte(strconv.FormatInt(i, 10)), + defaultFilePerm, + ); err != nil { + return checkEBUSY(err) + } + } + } + return m.set(path, getMemorySettings(resources)) +} + +func (m *memoryController) Update(path string, resources *specs.LinuxResources) error { + if resources.Memory == nil { + return nil + } + g := func(v *int64) bool { + return v != nil && *v > 0 + } + settings := getMemorySettings(resources) + if g(resources.Memory.Limit) && g(resources.Memory.Swap) { + // if the updated swap value is larger than the current memory limit set the swap changes first + // then set the memory limit as swap must always be larger than the current limit + current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes")) + if err != nil { + return err + } + if current < uint64(*resources.Memory.Swap) { + settings[0], settings[1] = settings[1], settings[0] + } + } + return m.set(path, settings) +} + +func (m *memoryController) Stat(path string, stats *Stats) error { + f, err := os.Open(filepath.Join(m.Path(path), "memory.stat")) + if err != nil { + return err + } + defer f.Close() + stats.Memory = &MemoryStat{} + if err := m.parseStats(f, stats.Memory); err != nil { + return err + } + for _, t := range []struct { + module string + entry *MemoryEntry + }{ + { + module: "", + entry: &stats.Memory.Usage, + }, + { + module: "memsw", + entry: &stats.Memory.Swap, + }, + { + module: "kmem", + entry: &stats.Memory.Kernel, + }, + { + module: "kmem.tcp", + entry: &stats.Memory.KernelTCP, + }, + } { + for _, tt := range []struct { + name string + value *uint64 + }{ + { + name: "usage_in_bytes", + value: &t.entry.Usage, + }, + { + name: "max_usage_in_bytes", + value: &t.entry.Max, + }, + { + name: "failcnt", + value: &t.entry.Failcnt, + }, + { + name: "limit_in_bytes", + value: &t.entry.Limit, + }, + } { + parts := []string{"memory"} + if t.module != "" { + parts = append(parts, t.module) + } + parts = append(parts, tt.name) + v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, "."))) + if err != nil { + return err + } + *tt.value = v + } + } + return nil +} + +func (m *memoryController) OOMEventFD(path string) (uintptr, error) { + root := m.Path(path) + f, err := os.Open(filepath.Join(root, "memory.oom_control")) + if err != nil { + return 0, err + } + defer f.Close() + fd, _, serr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.FD_CLOEXEC, 0) + if serr != 0 { + return 0, serr + } + if err := writeEventFD(root, f.Fd(), fd); err != nil { + unix.Close(int(fd)) + return 0, err + } + return fd, nil +} + +func writeEventFD(root string, cfd, efd uintptr) error { + f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0) + if err != nil { + return err + } + _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd)) + f.Close() + return err +} + +func (m *memoryController) parseStats(r io.Reader, stat *MemoryStat) error { + var ( + raw = make(map[string]uint64) + sc = bufio.NewScanner(r) + line int + ) + for sc.Scan() { + if err := sc.Err(); err != nil { + return err + } + key, v, err := parseKV(sc.Text()) + if err != nil { + return fmt.Errorf("%d: %v", line, err) + } + raw[key] = v + line++ + } + stat.Cache = raw["cache"] + stat.RSS = raw["rss"] + stat.RSSHuge = raw["rss_huge"] + stat.MappedFile = raw["mapped_file"] + stat.Dirty = raw["dirty"] + stat.Writeback = raw["writeback"] + stat.PgPgIn = raw["pgpgin"] + stat.PgPgOut = raw["pgpgout"] + stat.PgFault = raw["pgfault"] + stat.PgMajFault = raw["pgmajfault"] + stat.InactiveAnon = raw["inactive_anon"] + stat.ActiveAnon = raw["active_anon"] + stat.InactiveFile = raw["inactive_file"] + stat.ActiveFile = raw["active_file"] + stat.Unevictable = raw["unevictable"] + stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"] + stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"] + stat.TotalCache = raw["total_cache"] + stat.TotalRSS = raw["total_rss"] + stat.TotalRSSHuge = raw["total_rss_huge"] + stat.TotalMappedFile = raw["total_mapped_file"] + stat.TotalDirty = raw["total_dirty"] + stat.TotalWriteback = raw["total_writeback"] + stat.TotalPgPgIn = raw["total_pgpgin"] + stat.TotalPgPgOut = raw["total_pgpgout"] + stat.TotalPgFault = raw["total_pgfault"] + stat.TotalPgMajFault = raw["total_pgmajfault"] + stat.TotalInactiveAnon = raw["total_inactive_anon"] + stat.TotalActiveAnon = raw["total_active_anon"] + stat.TotalInactiveFile = raw["total_inactive_file"] + stat.TotalActiveFile = raw["total_active_file"] + stat.TotalUnevictable = raw["total_unevictable"] + return nil +} + +func (m *memoryController) set(path string, settings []memorySettings) error { + for _, t := range settings { + if t.value != nil { + if err := ioutil.WriteFile( + filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)), + []byte(strconv.FormatInt(*t.value, 10)), + defaultFilePerm, + ); err != nil { + return err + } + } + } + return nil +} + +type memorySettings struct { + name string + value *int64 +} + +func getMemorySettings(resources *specs.LinuxResources) []memorySettings { + mem := resources.Memory + var swappiness *int64 + if mem.Swappiness != nil { + v := int64(*mem.Swappiness) + swappiness = &v + } + return []memorySettings{ + { + name: "limit_in_bytes", + value: mem.Limit, + }, + { + name: "memsw.limit_in_bytes", + value: mem.Swap, + }, + { + name: "kmem.limit_in_bytes", + value: mem.Kernel, + }, + { + name: "kmem.tcp.limit_in_bytes", + value: mem.KernelTCP, + }, + { + name: "oom_control", + value: getOomControlValue(resources), + }, + { + name: "swappiness", + value: swappiness, + }, + } +} + +func checkEBUSY(err error) error { + if pathErr, ok := err.(*os.PathError); ok { + if errNo, ok := pathErr.Err.(syscall.Errno); ok { + if errNo == unix.EBUSY { + return fmt.Errorf( + "failed to set memory.kmem.limit_in_bytes, because either tasks have already joined this cgroup or it has children") + } + } + } + return err +} + +func getOomControlValue(resources *specs.LinuxResources) *int64 { + if resources.DisableOOMKiller != nil && *resources.DisableOOMKiller { + i := int64(1) + return &i + } + return nil +} diff --git a/agent/vendor/github.com/containerd/cgroups/named.go b/agent/vendor/github.com/containerd/cgroups/named.go new file mode 100644 index 00000000000..f0fbf018ef4 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/named.go @@ -0,0 +1,23 @@ +package cgroups + +import "path/filepath" + +func NewNamed(root string, name Name) *namedController { + return &namedController{ + root: root, + name: name, + } +} + +type namedController struct { + root string + name Name +} + +func (n *namedController) Name() Name { + return n.name +} + +func (n *namedController) Path(path string) string { + return filepath.Join(n.root, string(n.name), path) +} diff --git a/agent/vendor/github.com/containerd/cgroups/net_cls.go b/agent/vendor/github.com/containerd/cgroups/net_cls.go new file mode 100644 index 00000000000..b5a4802d3b9 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/net_cls.go @@ -0,0 +1,42 @@ +package cgroups + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewNetCls(root string) *netclsController { + return &netclsController{ + root: filepath.Join(root, string(NetCLS)), + } +} + +type netclsController struct { + root string +} + +func (n *netclsController) Name() Name { + return NetCLS +} + +func (n *netclsController) Path(path string) string { + return filepath.Join(n.root, path) +} + +func (n *netclsController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 { + return ioutil.WriteFile( + filepath.Join(n.Path(path), "net_cls_classid_u"), + []byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)), + defaultFilePerm, + ) + } + return nil +} diff --git a/agent/vendor/github.com/containerd/cgroups/net_prio.go b/agent/vendor/github.com/containerd/cgroups/net_prio.go new file mode 100644 index 00000000000..0959b8e70fb --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/net_prio.go @@ -0,0 +1,50 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewNetPrio(root string) *netprioController { + return &netprioController{ + root: filepath.Join(root, string(NetPrio)), + } +} + +type netprioController struct { + root string +} + +func (n *netprioController) Name() Name { + return NetPrio +} + +func (n *netprioController) Path(path string) string { + return filepath.Join(n.root, path) +} + +func (n *netprioController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.Network != nil { + for _, prio := range resources.Network.Priorities { + if err := ioutil.WriteFile( + filepath.Join(n.Path(path), "net_prio_ifpriomap"), + formatPrio(prio.Name, prio.Priority), + defaultFilePerm, + ); err != nil { + return err + } + } + } + return nil +} + +func formatPrio(name string, prio uint32) []byte { + return []byte(fmt.Sprintf("%s %d", name, prio)) +} diff --git a/agent/vendor/github.com/containerd/cgroups/paths.go b/agent/vendor/github.com/containerd/cgroups/paths.go new file mode 100644 index 00000000000..63ac270fb62 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/paths.go @@ -0,0 +1,85 @@ +package cgroups + +import ( + "fmt" + "path/filepath" +) + +type Path func(subsystem Name) (string, error) + +func RootPath(subsysem Name) (string, error) { + return "/", nil +} + +// StaticPath returns a static path to use for all cgroups +func StaticPath(path string) Path { + return func(_ Name) (string, error) { + return path, nil + } +} + +// NestedPath will nest the cgroups based on the calling processes cgroup +// placing its child processes inside its own path +func NestedPath(suffix string) Path { + paths, err := parseCgroupFile("/proc/self/cgroup") + if err != nil { + return errorPath(err) + } + return existingPath(paths, suffix) +} + +// PidPath will return the correct cgroup paths for an existing process running inside a cgroup +// This is commonly used for the Load function to restore an existing container +func PidPath(pid int) Path { + paths, err := parseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + if err != nil { + return errorPath(err) + } + return existingPath(paths, "") +} + +func existingPath(paths map[string]string, suffix string) Path { + // localize the paths based on the root mount dest for nested cgroups + for n, p := range paths { + dest, err := getCgroupDestination(string(n)) + if err != nil { + return errorPath(err) + } + rel, err := filepath.Rel(dest, p) + if err != nil { + return errorPath(err) + } + if rel == "." { + rel = dest + } + paths[n] = filepath.Join("/", rel) + } + return func(name Name) (string, error) { + root, ok := paths[string(name)] + if !ok { + if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok { + return "", fmt.Errorf("unable to find %q in controller set", name) + } + } + if suffix != "" { + return filepath.Join(root, suffix), nil + } + return root, nil + } +} + +func subPath(path Path, subName string) Path { + return func(name Name) (string, error) { + p, err := path(name) + if err != nil { + return "", err + } + return filepath.Join(p, subName), nil + } +} + +func errorPath(err error) Path { + return func(_ Name) (string, error) { + return "", err + } +} diff --git a/agent/vendor/github.com/containerd/cgroups/perf_event.go b/agent/vendor/github.com/containerd/cgroups/perf_event.go new file mode 100644 index 00000000000..0fa43ec1f68 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/perf_event.go @@ -0,0 +1,21 @@ +package cgroups + +import "path/filepath" + +func NewPerfEvent(root string) *PerfEventController { + return &PerfEventController{ + root: filepath.Join(root, string(PerfEvent)), + } +} + +type PerfEventController struct { + root string +} + +func (p *PerfEventController) Name() Name { + return PerfEvent +} + +func (p *PerfEventController) Path(path string) string { + return filepath.Join(p.root, path) +} diff --git a/agent/vendor/github.com/containerd/cgroups/pids.go b/agent/vendor/github.com/containerd/cgroups/pids.go new file mode 100644 index 00000000000..bdcc10a9071 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/pids.go @@ -0,0 +1,69 @@ +package cgroups + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewPids(root string) *pidsController { + return &pidsController{ + root: filepath.Join(root, string(Pids)), + } +} + +type pidsController struct { + root string +} + +func (p *pidsController) Name() Name { + return Pids +} + +func (p *pidsController) Path(path string) string { + return filepath.Join(p.root, path) +} + +func (p *pidsController) Create(path string, resources *specs.LinuxResources) error { + if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil { + return err + } + if resources.Pids != nil && resources.Pids.Limit > 0 { + return ioutil.WriteFile( + filepath.Join(p.Path(path), "pids.max"), + []byte(strconv.FormatInt(resources.Pids.Limit, 10)), + defaultFilePerm, + ) + } + return nil +} + +func (p *pidsController) Update(path string, resources *specs.LinuxResources) error { + return p.Create(path, resources) +} + +func (p *pidsController) Stat(path string, stats *Stats) error { + current, err := readUint(filepath.Join(p.Path(path), "pids.current")) + if err != nil { + return err + } + var max uint64 + maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max")) + if err != nil { + return err + } + if maxS := strings.TrimSpace(string(maxData)); maxS != "max" { + if max, err = parseUint(maxS, 10, 64); err != nil { + return err + } + } + stats.Pids = &PidsStat{ + Current: current, + Limit: max, + } + return nil +} diff --git a/agent/vendor/github.com/containerd/cgroups/state.go b/agent/vendor/github.com/containerd/cgroups/state.go new file mode 100644 index 00000000000..f1d7b7b6081 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/state.go @@ -0,0 +1,12 @@ +package cgroups + +// State is a type that represents the state of the current cgroup +type State string + +const ( + Unknown State = "" + Thawed State = "thawed" + Frozen State = "frozen" + Freezing State = "freezing" + Deleted State = "deleted" +) diff --git a/agent/vendor/github.com/containerd/cgroups/stats.go b/agent/vendor/github.com/containerd/cgroups/stats.go new file mode 100644 index 00000000000..47fbfa96b5d --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/stats.go @@ -0,0 +1,109 @@ +package cgroups + +import "sync" + +type Stats struct { + cpuMu sync.Mutex + + Hugetlb map[string]HugetlbStat + Pids *PidsStat + Cpu *CpuStat + Memory *MemoryStat + Blkio *BlkioStat +} + +type HugetlbStat struct { + Usage uint64 + Max uint64 + Failcnt uint64 +} + +type PidsStat struct { + Current uint64 + Limit uint64 +} + +type CpuStat struct { + Usage CpuUsage + Throttling Throttle +} + +type CpuUsage struct { + // Units: nanoseconds. + Total uint64 + PerCpu []uint64 + Kernel uint64 + User uint64 +} + +type Throttle struct { + Periods uint64 + ThrottledPeriods uint64 + ThrottledTime uint64 +} + +type MemoryStat struct { + Cache uint64 + RSS uint64 + RSSHuge uint64 + MappedFile uint64 + Dirty uint64 + Writeback uint64 + PgPgIn uint64 + PgPgOut uint64 + PgFault uint64 + PgMajFault uint64 + InactiveAnon uint64 + ActiveAnon uint64 + InactiveFile uint64 + ActiveFile uint64 + Unevictable uint64 + HierarchicalMemoryLimit uint64 + HierarchicalSwapLimit uint64 + TotalCache uint64 + TotalRSS uint64 + TotalRSSHuge uint64 + TotalMappedFile uint64 + TotalDirty uint64 + TotalWriteback uint64 + TotalPgPgIn uint64 + TotalPgPgOut uint64 + TotalPgFault uint64 + TotalPgMajFault uint64 + TotalInactiveAnon uint64 + TotalActiveAnon uint64 + TotalInactiveFile uint64 + TotalActiveFile uint64 + TotalUnevictable uint64 + + Usage MemoryEntry + Swap MemoryEntry + Kernel MemoryEntry + KernelTCP MemoryEntry +} + +type MemoryEntry struct { + Limit uint64 + Usage uint64 + Max uint64 + Failcnt uint64 +} + +type BlkioStat struct { + IoServiceBytesRecursive []BlkioEntry + IoServicedRecursive []BlkioEntry + IoQueuedRecursive []BlkioEntry + IoServiceTimeRecursive []BlkioEntry + IoWaitTimeRecursive []BlkioEntry + IoMergedRecursive []BlkioEntry + IoTimeRecursive []BlkioEntry + SectorsRecursive []BlkioEntry +} + +type BlkioEntry struct { + Op string + Device string + Major uint64 + Minor uint64 + Value uint64 +} diff --git a/agent/vendor/github.com/containerd/cgroups/subsystem.go b/agent/vendor/github.com/containerd/cgroups/subsystem.go new file mode 100644 index 00000000000..aab403b8a47 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/subsystem.go @@ -0,0 +1,94 @@ +package cgroups + +import ( + "fmt" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Name is a typed name for a cgroup subsystem +type Name string + +const ( + Devices Name = "devices" + Hugetlb Name = "hugetlb" + Freezer Name = "freezer" + Pids Name = "pids" + NetCLS Name = "net_cls" + NetPrio Name = "net_prio" + PerfEvent Name = "perf_event" + Cpuset Name = "cpuset" + Cpu Name = "cpu" + Cpuacct Name = "cpuacct" + Memory Name = "memory" + Blkio Name = "blkio" +) + +// Subsystems returns a complete list of the default cgroups +// avaliable on most linux systems +func Subsystems() []Name { + n := []Name{ + Hugetlb, + Freezer, + Pids, + NetCLS, + NetPrio, + PerfEvent, + Cpuset, + Cpu, + Cpuacct, + Memory, + Blkio, + } + if !isUserNS { + n = append(n, Devices) + } + return n +} + +type Subsystem interface { + Name() Name +} + +type pather interface { + Subsystem + Path(path string) string +} + +type creator interface { + Subsystem + Create(path string, resources *specs.LinuxResources) error +} + +type deleter interface { + Subsystem + Delete(path string) error +} + +type stater interface { + Subsystem + Stat(path string, stats *Stats) error +} + +type updater interface { + Subsystem + Update(path string, resources *specs.LinuxResources) error +} + +// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy +func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy { + return func() ([]Subsystem, error) { + subsystems, err := baseHierarchy() + if err != nil { + return nil, err + } + for _, s := range subsystems { + if s.Name() == subsystem { + return []Subsystem{ + s, + }, nil + } + } + return nil, fmt.Errorf("unable to find subsystem %s", subsystem) + } +} diff --git a/agent/vendor/github.com/containerd/cgroups/systemd.go b/agent/vendor/github.com/containerd/cgroups/systemd.go new file mode 100644 index 00000000000..5e052a82188 --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/systemd.go @@ -0,0 +1,101 @@ +package cgroups + +import ( + "fmt" + "path/filepath" + "strings" + "sync" + + systemdDbus "github.com/coreos/go-systemd/dbus" + "github.com/godbus/dbus" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + SystemdDbus Name = "systemd" + defaultSlice = "system.slice" +) + +func Systemd() ([]Subsystem, error) { + root, err := v1MountPoint() + if err != nil { + return nil, err + } + defaultSubsystems, err := defaults(root) + if err != nil { + return nil, err + } + s, err := NewSystemd(root) + if err != nil { + return nil, err + } + // make sure the systemd controller is added first + return append([]Subsystem{s}, defaultSubsystems...), nil +} + +func Slice(slice, name string) Path { + if slice == "" { + slice = defaultSlice + } + return func(subsystem Name) (string, error) { + return filepath.Join(slice, unitName(name)), nil + } +} + +func NewSystemd(root string) (*SystemdController, error) { + conn, err := systemdDbus.New() + if err != nil { + return nil, err + } + return &SystemdController{ + root: root, + conn: conn, + }, nil +} + +type SystemdController struct { + mu sync.Mutex + conn *systemdDbus.Conn + root string +} + +func (s *SystemdController) Name() Name { + return SystemdDbus +} + +func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error { + slice, name := splitName(path) + properties := []systemdDbus.Property{ + systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), + systemdDbus.PropWants(slice), + newProperty("DefaultDependencies", false), + newProperty("Delegate", true), + newProperty("MemoryAccounting", true), + newProperty("CPUAccounting", true), + newProperty("BlockIOAccounting", true), + } + _, err := s.conn.StartTransientUnit(name, "replace", properties, nil) + return err +} + +func (s *SystemdController) Delete(path string) error { + _, name := splitName(path) + _, err := s.conn.StopUnit(name, "replace", nil) + return err +} + +func newProperty(name string, units interface{}) systemdDbus.Property { + return systemdDbus.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +func unitName(name string) string { + return fmt.Sprintf("%s.slice", name) +} + +func splitName(path string) (slice string, unit string) { + slice, unit = filepath.Split(path) + return strings.TrimSuffix(slice, "/"), unit +} diff --git a/agent/vendor/github.com/containerd/cgroups/ticks.go b/agent/vendor/github.com/containerd/cgroups/ticks.go new file mode 100644 index 00000000000..e907ab364ed --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/ticks.go @@ -0,0 +1,5 @@ +package cgroups + +func getClockTicks() uint64 { + return 100 +} diff --git a/agent/vendor/github.com/containerd/cgroups/utils.go b/agent/vendor/github.com/containerd/cgroups/utils.go new file mode 100644 index 00000000000..823568b900a --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/utils.go @@ -0,0 +1,278 @@ +package cgroups + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + units "github.com/docker/go-units" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +var isUserNS = runningInUserNS() + +// runningInUserNS detects whether we are currently running in a user namespace. +// Copied from github.com/lxc/lxd/shared/util.go +func runningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return false + } + return true +} + +// defaults returns all known groups +func defaults(root string) ([]Subsystem, error) { + h, err := NewHugetlb(root) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + s := []Subsystem{ + NewNamed(root, "systemd"), + NewFreezer(root), + NewPids(root), + NewNetCls(root), + NewNetPrio(root), + NewPerfEvent(root), + NewCputset(root), + NewCpu(root), + NewCpuacct(root), + NewMemory(root), + NewBlkio(root), + } + // only add the devices cgroup if we are not in a user namespace + // because modifications are not allowed + if !isUserNS { + s = append(s, NewDevices(root)) + } + // add the hugetlb cgroup if error wasn't due to missing hugetlb + // cgroup support on the host + if err == nil { + s = append(s, h) + } + return s, nil +} + +// remove will remove a cgroup path handling EAGAIN and EBUSY errors and +// retrying the remove after a exp timeout +func remove(path string) error { + for i := 0; i < 5; i++ { + delay := 10 * time.Millisecond + if i != 0 { + time.Sleep(delay) + delay *= 2 + } + if err := os.RemoveAll(path); err == nil { + return nil + } + } + return fmt.Errorf("cgroups: unable to remove path %q", path) +} + +// readPids will read all the pids in a cgroup by the provided path +func readPids(path string, subsystem Name) ([]Process, error) { + f, err := os.Open(filepath.Join(path, cgroupProcs)) + if err != nil { + return nil, err + } + defer f.Close() + var ( + out []Process + s = bufio.NewScanner(f) + ) + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, Process{ + Pid: pid, + Subsystem: subsystem, + Path: path, + }) + } + } + return out, nil +} + +func hugePageSizes() ([]string, error) { + var ( + pageSizes []string + sizeList = []string{"B", "kB", "MB", "GB", "TB", "PB"} + ) + files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") + if err != nil { + return nil, err + } + for _, st := range files { + nameArray := strings.Split(st.Name(), "-") + pageSize, err := units.RAMInBytes(nameArray[1]) + if err != nil { + return nil, err + } + pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList)) + } + return pageSizes, nil +} + +func readUint(path string) (uint64, error) { + v, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return parseUint(strings.TrimSpace(string(v)), 10, 64) +} + +func parseUint(s string, base, bitSize int) (uint64, error) { + v, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && + intErr.(*strconv.NumError).Err == strconv.ErrRange && + intValue < 0 { + return 0, nil + } + return 0, err + } + return v, nil +} + +func parseKV(raw string) (string, uint64, error) { + parts := strings.Fields(raw) + switch len(parts) { + case 2: + v, err := parseUint(parts[1], 10, 64) + if err != nil { + return "", 0, err + } + return parts[0], v, nil + default: + return "", 0, ErrInvalidFormat + } +} + +func parseCgroupFile(path string) (map[string]string, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return parseCgroupFromReader(f) +} + +func parseCgroupFromReader(r io.Reader) (map[string]string, error) { + var ( + cgroups = make(map[string]string) + s = bufio.NewScanner(r) + ) + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + var ( + text = s.Text() + parts = strings.SplitN(text, ":", 3) + ) + if len(parts) < 3 { + return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text) + } + for _, subs := range strings.Split(parts[1], ",") { + cgroups[subs] = parts[2] + } + } + return cgroups, nil +} + +func getCgroupDestination(subsystem string) (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return "", err + } + fields := strings.Fields(s.Text()) + for _, opt := range strings.Split(fields[len(fields)-1], ",") { + if opt == subsystem { + return fields[3], nil + } + } + } + return "", ErrNoCgroupMountDestination +} + +func pathers(subystems []Subsystem) []pather { + var out []pather + for _, s := range subystems { + if p, ok := s.(pather); ok { + out = append(out, p) + } + } + return out +} + +func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error { + if c, ok := s.(creator); ok { + p, err := path(s.Name()) + if err != nil { + return err + } + if err := c.Create(p, resources); err != nil { + return err + } + } else if c, ok := s.(pather); ok { + p, err := path(s.Name()) + if err != nil { + return err + } + // do the default create if the group does not have a custom one + if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil { + return err + } + } + return nil +} + +func cleanPath(path string) string { + if path == "" { + return "" + } + path = filepath.Clean(path) + if !filepath.IsAbs(path) { + path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path)) + } + return filepath.Clean(path) +} diff --git a/agent/vendor/github.com/containerd/cgroups/v1.go b/agent/vendor/github.com/containerd/cgroups/v1.go new file mode 100644 index 00000000000..f6608f7c72f --- /dev/null +++ b/agent/vendor/github.com/containerd/cgroups/v1.go @@ -0,0 +1,65 @@ +package cgroups + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strings" +) + +// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy +func V1() ([]Subsystem, error) { + root, err := v1MountPoint() + if err != nil { + return nil, err + } + subsystems, err := defaults(root) + if err != nil { + return nil, err + } + var enabled []Subsystem + for _, s := range pathers(subsystems) { + // check and remove the default groups that do not exist + if _, err := os.Lstat(s.Path("/")); err == nil { + enabled = append(enabled, s) + } + } + return enabled, nil +} + +// v1MountPoint returns the mount point where the cgroup +// mountpoints are mounted in a single hiearchy +func v1MountPoint() (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return "", err + } + var ( + text = scanner.Text() + fields = strings.Split(text, " ") + // safe as mountinfo encodes mountpoints with spaces as \040. + index = strings.Index(text, " - ") + postSeparatorFields = strings.Fields(text[index+3:]) + numPostFields = len(postSeparatorFields) + ) + // this is an error as we can't detect if the mount is for "cgroup" + if numPostFields == 0 { + return "", fmt.Errorf("Found no fields post '-' in %q", text) + } + if postSeparatorFields[0] == "cgroup" { + // check that the mount is properly formated. + if numPostFields < 3 { + return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + return filepath.Dir(fields[4]), nil + } + } + return "", ErrMountPointNotExist +} diff --git a/agent/vendor/github.com/containerd/continuity/AUTHORS b/agent/vendor/github.com/containerd/continuity/AUTHORS new file mode 100644 index 00000000000..4043394cc50 --- /dev/null +++ b/agent/vendor/github.com/containerd/continuity/AUTHORS @@ -0,0 +1,16 @@ +Aaron Lehmann +Akash Gupta +Akihiro Suda +Andrew Pennebaker +Brandon Philips +Christopher Jones +Daniel, Dao Quang Minh +Derek McGowan +Edward Pilatowicz +Ian Campbell +Justin Cormack +Justin Cummins +Phil Estes +Stephen J Day +Tobias Klauser +Tonis Tiigi diff --git a/agent/vendor/github.com/containerd/continuity/LICENSE b/agent/vendor/github.com/containerd/continuity/LICENSE new file mode 100644 index 00000000000..584149b6ee2 --- /dev/null +++ b/agent/vendor/github.com/containerd/continuity/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/agent/vendor/github.com/containerd/continuity/pathdriver/path_driver.go new file mode 100644 index 00000000000..b0d5a6b5670 --- /dev/null +++ b/agent/vendor/github.com/containerd/continuity/pathdriver/path_driver.go @@ -0,0 +1,101 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pathdriver + +import ( + "path/filepath" +) + +// PathDriver provides all of the path manipulation functions in a common +// interface. The context should call these and never use the `filepath` +// package or any other package to manipulate paths. +type PathDriver interface { + Join(paths ...string) string + IsAbs(path string) bool + Rel(base, target string) (string, error) + Base(path string) string + Dir(path string) string + Clean(path string) string + Split(path string) (dir, file string) + Separator() byte + Abs(path string) (string, error) + Walk(string, filepath.WalkFunc) error + FromSlash(path string) string + ToSlash(path string) string + Match(pattern, name string) (matched bool, err error) +} + +// pathDriver is a simple default implementation calls the filepath package. +type pathDriver struct{} + +// LocalPathDriver is the exported pathDriver struct for convenience. +var LocalPathDriver PathDriver = &pathDriver{} + +func (*pathDriver) Join(paths ...string) string { + return filepath.Join(paths...) +} + +func (*pathDriver) IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +func (*pathDriver) Rel(base, target string) (string, error) { + return filepath.Rel(base, target) +} + +func (*pathDriver) Base(path string) string { + return filepath.Base(path) +} + +func (*pathDriver) Dir(path string) string { + return filepath.Dir(path) +} + +func (*pathDriver) Clean(path string) string { + return filepath.Clean(path) +} + +func (*pathDriver) Split(path string) (dir, file string) { + return filepath.Split(path) +} + +func (*pathDriver) Separator() byte { + return filepath.Separator +} + +func (*pathDriver) Abs(path string) (string, error) { + return filepath.Abs(path) +} + +// Note that filepath.Walk calls os.Stat, so if the context wants to +// to call Driver.Stat() for Walk, they need to create a new struct that +// overrides this method. +func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error { + return filepath.Walk(root, walkFn) +} + +func (*pathDriver) FromSlash(path string) string { + return filepath.FromSlash(path) +} + +func (*pathDriver) ToSlash(path string) string { + return filepath.ToSlash(path) +} + +func (*pathDriver) Match(pattern, name string) (bool, error) { + return filepath.Match(pattern, name) +} diff --git a/agent/vendor/github.com/containernetworking/cni/LICENSE b/agent/vendor/github.com/containernetworking/cni/LICENSE new file mode 100644 index 00000000000..8f71f43fee3 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/agent/vendor/github.com/containernetworking/cni/libcni/api.go b/agent/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 00000000000..0f14d3427e9 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,497 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // A cache directory in which to library data. Defaults to CacheDir + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + Plugins []*NetworkConfig + Bytes []byte +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + + return injectRuntimeConfig(orig, rt) +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +func getResultCacheFilePath(netName string, rt *RuntimeConf) string { + cacheDir := rt.CacheDir + if cacheDir == "" { + cacheDir = CacheDir + } + return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)) +} + +func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error { + data, err := json.Marshal(result) + if err != nil { + return err + } + fname := getResultCacheFilePath(netName, rt) + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + return ioutil.WriteFile(fname, data, 0600) +} + +func delCachedResult(netName string, rt *RuntimeConf) error { + fname := getResultCacheFilePath(netName, rt) + return os.Remove(fname) +} + +func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname := getResultCacheFilePath(netName, rt) + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Read the version of the cached result + decoder := version.ConfigDecoder{} + resultCniVersion, err := decoder.Decode(data) + if err != nil { + return nil, err + } + + // Ensure we can understand the result + result, err := version.NewResult(resultCniVersion, data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil && resultCniVersion != cniVersion { + return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err) + } + return result, err +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// previous AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// previous AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, err + } + } + + if err = setCachedResult(result, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + _ = delCachedResult(list.Name, rt) + + return nil +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = setCachedResult(result, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + } + + cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = delCachedResult(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + pluginPath, err := invoke.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/agent/vendor/github.com/containernetworking/cni/libcni/conf.go b/agent/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 00000000000..ea56c509d01 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,268 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes} + if err := json.Unmarshal(bytes, &conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %s", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %s", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("Failed to marshal plugin config %d: %v", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("Failed to parse plugin config %d: %v", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := ioutil.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %s", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/args.go new file mode 100644 index 00000000000..913528c1d59 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -0,0 +1,128 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "strings" +) + +type CNIArgs interface { + // For use with os/exec; i.e., return nil to inherit the + // environment from this process + // For use in delegation; inherit the environment from this + // process and allow overrides + AsEnv() []string +} + +type inherited struct{} + +var inheritArgsFromEnv inherited + +func (_ *inherited) AsEnv() []string { + return nil +} + +func ArgsFromEnv() CNIArgs { + return &inheritArgsFromEnv +} + +type Args struct { + Command string + ContainerID string + NetNS string + PluginArgs [][2]string + PluginArgsStr string + IfName string + Path string +} + +// Args implements the CNIArgs interface +var _ CNIArgs = &Args{} + +func (args *Args) AsEnv() []string { + env := os.Environ() + pluginArgsStr := args.PluginArgsStr + if pluginArgsStr == "" { + pluginArgsStr = stringify(args.PluginArgs) + } + + // Duplicated values which come first will be overrided, so we must put the + // custom values in the end to avoid being overrided by the process environments. + env = append(env, + "CNI_COMMAND="+args.Command, + "CNI_CONTAINERID="+args.ContainerID, + "CNI_NETNS="+args.NetNS, + "CNI_ARGS="+pluginArgsStr, + "CNI_IFNAME="+args.IfName, + "CNI_PATH="+args.Path, + ) + return dedupEnv(env) +} + +// taken from rkt/networking/net_plugin.go +func stringify(pluginArgs [][2]string) string { + entries := make([]string, len(pluginArgs)) + + for i, kv := range pluginArgs { + entries[i] = strings.Join(kv[:], "=") + } + + return strings.Join(entries, ";") +} + +// DelegateArgs implements the CNIArgs interface +// used for delegation to inherit from environments +// and allow some overrides like CNI_COMMAND +var _ CNIArgs = &DelegateArgs{} + +type DelegateArgs struct { + Command string +} + +func (d *DelegateArgs) AsEnv() []string { + env := os.Environ() + + // The custom values should come in the end to override the existing + // process environment of the same key. + env = append(env, + "CNI_COMMAND="+d.Command, + ) + return dedupEnv(env) +} + +// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. +// Items not of the normal environment "key=value" form are preserved unchanged. +func dedupEnv(env []string) []string { + out := make([]string, 0, len(env)) + envMap := map[string]string{} + + for _, kv := range env { + // find the first "=" in environment, if not, just keep it + eq := strings.Index(kv, "=") + if eq < 0 { + out = append(out, kv) + continue + } + envMap[kv[:eq]] = kv[eq+1:] + } + + for k, v := range envMap { + out = append(out, fmt.Sprintf("%s=%s", k, v)) + } + + return out +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go new file mode 100644 index 00000000000..8defe4dd398 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -0,0 +1,80 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "os" + "path/filepath" + + "github.com/containernetworking/cni/pkg/types" +) + +func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { + if exec == nil { + exec = defaultExec + } + + paths := filepath.SplitList(os.Getenv("CNI_PATH")) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) + if err != nil { + return "", nil, err + } + + return pluginPath, exec, nil +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return nil, err + } + + // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD + return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) +} + +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and +// JSON configuration +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateCheck will override the original CNI_COMMAND env from process with CHECK + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) +} + +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateDel will override the original CNI_COMMAND env from process with DEL + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +} + +// return CNIArgs used by delegation +func delegateArgs(action string) *DelegateArgs { + return &DelegateArgs{ + Command: action, + } +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go new file mode 100644 index 00000000000..8e6d30b8290 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -0,0 +1,144 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "fmt" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" +) + +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) +} + +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} + +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec + } + + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + if err != nil { + return nil, err + } + + // Plugin must return result in same version as specified in netconf + versionDecoder := &version.ConfigDecoder{} + confVersion, err := versionDecoder.Decode(netconf) + if err != nil { + return nil, err + } + + return version.NewResult(confVersion, stdoutBytes) +} + +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + return err +} + +// GetVersionInfo returns the version information available about the plugin. +// For recent-enough plugins, it uses the information returned by the VERSION +// command. For older plugins which do not recognize that command, it reports +// version 0.1.0 +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } + args := &Args{ + Command: "VERSION", + + // set fake values required by plugins built against an older version of skel + NetNS: "dummy", + IfName: "dummy", + Path: "dummy", + } + stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) + if err != nil { + if err.Error() == "unknown CNI_COMMAND: VERSION" { + return version.PluginSupports("0.1.0"), nil + } + return nil, err + } + + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/find.go new file mode 100644 index 00000000000..e815404c859 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/find.go @@ -0,0 +1,43 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "path/filepath" +) + +// FindInPath returns the full path of the plugin by searching in the provided path +func FindInPath(plugin string, paths []string) (string, error) { + if plugin == "" { + return "", fmt.Errorf("no plugin name provided") + } + + if len(paths) == 0 { + return "", fmt.Errorf("no paths provided") + } + + for _, path := range paths { + for _, fe := range ExecutableFileExtensions { + fullpath := filepath.Join(path, plugin) + fe + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + } + + return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go new file mode 100644 index 00000000000..9bcfb455367 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -0,0 +1,20 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{""} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go new file mode 100644 index 00000000000..7665125b133 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go @@ -0,0 +1,18 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{".exe", ""} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go new file mode 100644 index 00000000000..ad8498ba27d --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -0,0 +1,62 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + + "github.com/containernetworking/cni/pkg/types" +) + +type RawExec struct { + Stderr io.Writer +} + +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = e.Stderr + if err := c.Run(); err != nil { + return nil, pluginErr(err, stdout.Bytes()) + } + + return stdout.Bytes(), nil +} + +func pluginErr(err error, output []byte) error { + if _, ok := err.(*exec.ExitError); ok { + emsg := types.Error{} + if len(output) == 0 { + emsg.Msg = "netplugin failed with no error message" + } else if perr := json.Unmarshal(output, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr) + } + return &emsg + } + + return err +} + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/agent/vendor/github.com/containernetworking/cni/pkg/types/020/types.go new file mode 100644 index 00000000000..53256167fad --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -0,0 +1,140 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types020 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" +) + +const ImplementedSpecVersion string = "0.2.0" + +var SupportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} + +// Compatibility types for CNI version 0.1.0 and 0.2.0 + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + return result, nil +} + +func GetResult(r types.Result) (*Result, error) { + // We expect version 0.1.0/0.2.0 results + result020, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := result020.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func (r *Result) Version() string { + return ImplementedSpecVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + for _, supportedVersion := range SupportedVersions { + if version == supportedVersion { + r.CNIVersion = version + return r, nil + } + } + return nil, fmt.Errorf("cannot convert version %q to %s", SupportedVersions, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// String returns a formatted string in the form of "[IP4: $1,][ IP6: $2,] DNS: $3" where +// $1 represents the receiver's IPv4, $2 represents the receiver's IPv6 and $3 the +// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string. +func (r *Result) String() string { + var str string + if r.IP4 != nil { + str = fmt.Sprintf("IP4:%+v, ", *r.IP4) + } + if r.IP6 != nil { + str += fmt.Sprintf("IP6:%+v, ", *r.IP6) + } + return fmt.Sprintf("%sDNS:%+v", str, r.DNS) +} + +// IPConfig contains values necessary to configure an interface +type IPConfig struct { + IP net.IPNet + Gateway net.IP + Routes []types.Route +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type ipConfig struct { + IP types.IPNet `json:"ip"` + Gateway net.IP `json:"gateway,omitempty"` + Routes []types.Route `json:"routes,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + IP: types.IPNet(c.IP), + Gateway: c.Gateway, + Routes: c.Routes, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.IP = net.IPNet(ipc.IP) + c.Gateway = ipc.Gateway + c.Routes = ipc.Routes + return nil +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/types/args.go b/agent/vendor/github.com/containernetworking/cni/pkg/types/args.go new file mode 100644 index 00000000000..bd8640fc969 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -0,0 +1,112 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding" + "fmt" + "reflect" + "strings" +) + +// UnmarshallableBool typedef for builtin bool +// because builtin type's methods can't be declared +type UnmarshallableBool bool + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns boolean true if the string is "1" or "[Tt]rue" +// Returns boolean false if the string is "0" or "[Ff]alse" +func (b *UnmarshallableBool) UnmarshalText(data []byte) error { + s := strings.ToLower(string(data)) + switch s { + case "1", "true": + *b = true + case "0", "false": + *b = false + default: + return fmt.Errorf("Boolean unmarshal error: invalid input %s", s) + } + return nil +} + +// UnmarshallableString typedef for builtin string +type UnmarshallableString string + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns the string +func (s *UnmarshallableString) UnmarshalText(data []byte) error { + *s = UnmarshallableString(data) + return nil +} + +// CommonArgs contains the IgnoreUnknown argument +// and must be embedded by all Arg structs +type CommonArgs struct { + IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` +} + +// GetKeyField is a helper function to receive Values +// Values that represent a pointer to a struct +func GetKeyField(keyString string, v reflect.Value) reflect.Value { + return v.Elem().FieldByName(keyString) +} + +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + +// LoadArgs parses args from a string in the form "K=V;K2=V2;..." +func LoadArgs(args string, container interface{}) error { + if args == "" { + return nil + } + + containerValue := reflect.ValueOf(container) + + pairs := strings.Split(args, ";") + unknownArgs := []string{} + for _, pair := range pairs { + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return fmt.Errorf("ARGS: invalid pair %q", pair) + } + keyString := kv[0] + valueString := kv[1] + keyField := GetKeyField(keyString, containerValue) + if !keyField.IsValid() { + unknownArgs = append(unknownArgs, pair) + continue + } + keyFieldIface := keyField.Addr().Interface() + u, ok := keyFieldIface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldIface))} + } + err := u.UnmarshalText([]byte(valueString)) + if err != nil { + return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err) + } + } + + isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() + if len(unknownArgs) > 0 && !isIgnoreUnknown { + return fmt.Errorf("ARGS: unknown args %q", unknownArgs) + } + return nil +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/agent/vendor/github.com/containernetworking/cni/pkg/types/current/types.go new file mode 100644 index 00000000000..7267a2e6d1f --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/types/current/types.go @@ -0,0 +1,293 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package current + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/020" +) + +const ImplementedSpecVersion string = "0.4.0" + +var SupportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + return result, nil +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +var resultConverters = []struct { + versions []string + convert func(types.Result) (*Result, error) +}{ + {types020.SupportedVersions, convertFrom020}, + {SupportedVersions, convertFrom030}, +} + +func convertFrom020(result types.Result) (*Result, error) { + oldResult, err := types020.GetResult(result) + if err != nil { + return nil, err + } + + newResult := &Result{ + CNIVersion: ImplementedSpecVersion, + DNS: oldResult.DNS, + Routes: []*types.Route{}, + } + + if oldResult.IP4 != nil { + newResult.IPs = append(newResult.IPs, &IPConfig{ + Version: "4", + Address: oldResult.IP4.IP, + Gateway: oldResult.IP4.Gateway, + }) + for _, route := range oldResult.IP4.Routes { + newResult.Routes = append(newResult.Routes, &types.Route{ + Dst: route.Dst, + GW: route.GW, + }) + } + } + + if oldResult.IP6 != nil { + newResult.IPs = append(newResult.IPs, &IPConfig{ + Version: "6", + Address: oldResult.IP6.IP, + Gateway: oldResult.IP6.Gateway, + }) + for _, route := range oldResult.IP6.Routes { + newResult.Routes = append(newResult.Routes, &types.Route{ + Dst: route.Dst, + GW: route.GW, + }) + } + } + + return newResult, nil +} + +func convertFrom030(result types.Result) (*Result, error) { + newResult, ok := result.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + newResult.CNIVersion = ImplementedSpecVersion + return newResult, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + version := result.Version() + for _, converter := range resultConverters { + for _, supportedVersion := range converter.versions { + if version == supportedVersion { + return converter.convert(result) + } + } + } + return nil, fmt.Errorf("unsupported CNI result22 version %q", version) +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +// Convert to the older 0.2.0 CNI spec Result type +func (r *Result) convertTo020() (*types020.Result, error) { + oldResult := &types020.Result{ + CNIVersion: types020.ImplementedSpecVersion, + DNS: r.DNS, + } + + for _, ip := range r.IPs { + // Only convert the first IP address of each version as 0.2.0 + // and earlier cannot handle multiple IP addresses + if ip.Version == "4" && oldResult.IP4 == nil { + oldResult.IP4 = &types020.IPConfig{ + IP: ip.Address, + Gateway: ip.Gateway, + } + } else if ip.Version == "6" && oldResult.IP6 == nil { + oldResult.IP6 = &types020.IPConfig{ + IP: ip.Address, + Gateway: ip.Gateway, + } + } + + if oldResult.IP4 != nil && oldResult.IP6 != nil { + break + } + } + + for _, route := range r.Routes { + is4 := route.Dst.IP.To4() != nil + if is4 && oldResult.IP4 != nil { + oldResult.IP4.Routes = append(oldResult.IP4.Routes, types.Route{ + Dst: route.Dst, + GW: route.GW, + }) + } else if !is4 && oldResult.IP6 != nil { + oldResult.IP6.Routes = append(oldResult.IP6.Routes, types.Route{ + Dst: route.Dst, + GW: route.GW, + }) + } + } + + if oldResult.IP4 == nil && oldResult.IP6 == nil { + return nil, fmt.Errorf("cannot convert: no valid IP addresses") + } + + return oldResult, nil +} + +func (r *Result) Version() string { + return ImplementedSpecVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + switch version { + case "0.3.0", "0.3.1", ImplementedSpecVersion: + r.CNIVersion = version + return r, nil + case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]: + return r.convertTo020() + } + return nil, fmt.Errorf("cannot convert version 0.3.x to %q", version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// String returns a formatted string in the form of "[Interfaces: $1,][ IP: $2,] DNS: $3" where +// $1 represents the receiver's Interfaces, $2 represents the receiver's IP addresses and $3 the +// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string. +func (r *Result) String() string { + var str string + if len(r.Interfaces) > 0 { + str += fmt.Sprintf("Interfaces:%+v, ", r.Interfaces) + } + if len(r.IPs) > 0 { + str += fmt.Sprintf("IP:%+v, ", r.IPs) + } + if len(r.Routes) > 0 { + str += fmt.Sprintf("Routes:%+v, ", r.Routes) + } + return fmt.Sprintf("%sDNS:%+v", str, r.DNS) +} + +// Convert this old version result to the current CNI version result +func (r *Result) Convert() (*Result, error) { + return r, nil +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // IP version, either "4" or "6" + Version string + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +// JSON (un)marshallable types +type ipConfig struct { + Version string `json:"version"` + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Version: c.Version, + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Version = ipc.Version + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/types/types.go b/agent/vendor/github.com/containernetworking/cni/pkg/types/types.go new file mode 100644 index 00000000000..d0d11006a05 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -0,0 +1,199 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net" + "os" +) + +// like net.IPNet but adds JSON marshalling and unmarshalling +type IPNet net.IPNet + +// ParseCIDR takes a string like "10.2.3.1/24" and +// return IPNet with "10.2.3.1" and /24 mask +func ParseCIDR(s string) (*net.IPNet, error) { + ip, ipn, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + ipn.IP = ip + return ipn, nil +} + +func (n IPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(&n).String()) +} + +func (n *IPNet) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + tmp, err := ParseCIDR(s) + if err != nil { + return err + } + + *n = IPNet(*tmp) + return nil +} + +// NetConf describes a network. +type NetConf struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` +} + +// NetConfList describes an ordered list of networks. +type NetConfList struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` +} + +type ResultFactoryFunc func([]byte) (Result, error) + +// Result is an interface that provides the result of plugin execution +type Result interface { + // The highest CNI specification result version the result supports + // without having to convert + Version() string + + // Returns the result converted into the requested CNI specification + // result version, or an error if conversion failed + GetAsVersion(version string) (Result, error) + + // Prints the result in JSON format to stdout + Print() error + + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error + + // Returns a JSON string representation of the result + String() string +} + +func PrintResult(result Result, version string) error { + newResult, err := result.GetAsVersion(version) + if err != nil { + return err + } + return newResult.Print() +} + +// DNS contains values interesting for DNS resolvers +type DNS struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` + Options []string `json:"options,omitempty"` +} + +type Route struct { + Dst net.IPNet + GW net.IP +} + +func (r *Route) String() string { + return fmt.Sprintf("%+v", *r) +} + +// Well known error codes +// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +const ( + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 +) + +type Error struct { + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +func (e *Error) Error() string { + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) +} + +func (e *Error) Print() error { + return prettyPrint(e) +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type route struct { + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` +} + +func (r *Route) UnmarshalJSON(data []byte) error { + rt := route{} + if err := json.Unmarshal(data, &rt); err != nil { + return err + } + + r.Dst = net.IPNet(rt.Dst) + r.GW = rt.GW + return nil +} + +func (r Route) MarshalJSON() ([]byte, error) { + rt := route{ + Dst: IPNet(r.Dst), + GW: r.GW, + } + + return json.Marshal(rt) +} + +func prettyPrint(obj interface{}) error { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(data) + return err +} + +// NotImplementedError is used to indicate that a method is not implemented for the given platform +var NotImplementedError = errors.New("Not Implemented") diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/agent/vendor/github.com/containernetworking/cni/pkg/version/conf.go new file mode 100644 index 00000000000..3cca58bbeb8 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/version/conf.go @@ -0,0 +1,37 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" +) + +// ConfigDecoder can decode the CNI version available in network config data +type ConfigDecoder struct{} + +func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { + var conf struct { + CNIVersion string `json:"cniVersion"` + } + err := json.Unmarshal(jsonBytes, &conf) + if err != nil { + return "", fmt.Errorf("decoding version from network config: %s", err) + } + if conf.CNIVersion == "" { + return "0.1.0", nil + } + return conf.CNIVersion, nil +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/agent/vendor/github.com/containernetworking/cni/pkg/version/plugin.go new file mode 100644 index 00000000000..1df427243f3 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -0,0 +1,144 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// PluginInfo reports information about CNI versioning +type PluginInfo interface { + // SupportedVersions returns one or more CNI spec versions that the plugin + // supports. If input is provided in one of these versions, then the plugin + // promises to use the same CNI version in its response + SupportedVersions() []string + + // Encode writes this CNI version information as JSON to the given Writer + Encode(io.Writer) error +} + +type pluginInfo struct { + CNIVersion_ string `json:"cniVersion"` + SupportedVersions_ []string `json:"supportedVersions,omitempty"` +} + +// pluginInfo implements the PluginInfo interface +var _ PluginInfo = &pluginInfo{} + +func (p *pluginInfo) Encode(w io.Writer) error { + return json.NewEncoder(w).Encode(p) +} + +func (p *pluginInfo) SupportedVersions() []string { + return p.SupportedVersions_ +} + +// PluginSupports returns a new PluginInfo that will report the given versions +// as supported +func PluginSupports(supportedVersions ...string) PluginInfo { + if len(supportedVersions) < 1 { + panic("programmer error: you must support at least one version") + } + return &pluginInfo{ + CNIVersion_: Current(), + SupportedVersions_: supportedVersions, + } +} + +// PluginDecoder can decode the response returned by a plugin's VERSION command +type PluginDecoder struct{} + +func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { + var info pluginInfo + err := json.Unmarshal(jsonBytes, &info) + if err != nil { + return nil, fmt.Errorf("decoding version info: %s", err) + } + if info.CNIVersion_ == "" { + return nil, fmt.Errorf("decoding version info: missing field cniVersion") + } + if len(info.SupportedVersions_) == 0 { + if info.CNIVersion_ == "0.2.0" { + return PluginSupports("0.1.0", "0.2.0"), nil + } + return nil, fmt.Errorf("decoding version info: missing field supportedVersions") + } + return &info, nil +} + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + if version == "" { + return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version) + } + + parts := strings.Split(version, ".") + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %v", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %v", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %v", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// numbers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/agent/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go new file mode 100644 index 00000000000..25c3810b2aa --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go @@ -0,0 +1,49 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "fmt" + +type ErrorIncompatible struct { + Config string + Supported []string +} + +func (e *ErrorIncompatible) Details() string { + return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) +} + +func (e *ErrorIncompatible) Error() string { + return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) +} + +type Reconciler struct{} + +func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { + return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) +} + +func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { + for _, supportedVersion := range supportedVersions { + if configVersion == supportedVersion { + return nil + } + } + + return &ErrorIncompatible{ + Config: configVersion, + Supported: supportedVersions, + } +} diff --git a/agent/vendor/github.com/containernetworking/cni/pkg/version/version.go b/agent/vendor/github.com/containernetworking/cni/pkg/version/version.go new file mode 100644 index 00000000000..8f3508e61f3 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -0,0 +1,83 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/020" + "github.com/containernetworking/cni/pkg/types/current" +) + +// Current reports the version of the CNI spec implemented by this library +func Current() string { + return "0.4.0" +} + +// Legacy PluginInfo describes a plugin that is backwards compatible with the +// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 +// library ought to work correctly with a plugin that reports support for +// Legacy versions. +// +// Any future CNI spec versions which meet this definition should be added to +// this list. +var Legacy = PluginSupports("0.1.0", "0.2.0") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0") + +var resultFactories = []struct { + supportedVersions []string + newResult types.ResultFactoryFunc +}{ + {current.SupportedVersions, current.NewResult}, + {types020.SupportedVersions, types020.NewResult}, +} + +// Finds a Result object matching the requested version (if any) and asks +// that object to parse the plugin result, returning an error if parsing failed. +func NewResult(version string, resultBytes []byte) (types.Result, error) { + reconciler := &Reconciler{} + for _, resultFactory := range resultFactories { + err := reconciler.CheckRaw(version, resultFactory.supportedVersions) + if err == nil { + // Result supports this version + return resultFactory.newResult(resultBytes) + } + } + + return nil, fmt.Errorf("unsupported CNI result version %q", version) +} + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %v", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %v", err) + } + + return nil +} diff --git a/agent/vendor/github.com/containernetworking/plugins/LICENSE b/agent/vendor/github.com/containernetworking/plugins/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/plugins/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/agent/vendor/github.com/containernetworking/plugins/pkg/ns/README.md new file mode 100644 index 00000000000..1e265c7a011 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/plugins/pkg/ns/README.md @@ -0,0 +1,41 @@ +### Namespaces, Threads, and Go +On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code. + +### Namespace Switching +Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads. + +Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in. + +For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly. + +### Do() The Recommended Thing +The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example: + +```go +err = targetNs.Do(func(hostNs ns.NetNS) error { + dummy := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "dummy0", + }, + } + return netlink.LinkAdd(dummy) +}) +``` + +Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem. + +When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled. + +In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater. + + +### Creating network namespaces +Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration. + + +### Further Reading + - https://github.com/golang/go/wiki/LockOSThread + - http://morsmachine.dk/go-scheduler + - https://github.com/containernetworking/cni/issues/262 + - https://golang.org/pkg/runtime/ + - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix diff --git a/agent/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/agent/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go new file mode 100644 index 00000000000..a34f97170e3 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go @@ -0,0 +1,229 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "fmt" + "os" + "runtime" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +// Returns an object representing the current OS thread's network namespace +func GetCurrentNS() (NetNS, error) { + return GetNS(getCurrentThreadNetNSPath()) +} + +func getCurrentThreadNetNSPath() string { + // /proc/self/ns/net returns the namespace of the main thread, not + // of whatever thread this goroutine is running on. Make sure we + // use the thread's net namespace since the thread is switching around + return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) +} + +func (ns *netNS) Close() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := ns.file.Close(); err != nil { + return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err) + } + ns.closed = true + + return nil +} + +func (ns *netNS) Set() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err) + } + + return nil +} + +type NetNS interface { + // Executes the passed closure in this object's network namespace, + // attempting to restore the original namespace before returning. + // However, since each OS thread can have a different network namespace, + // and Go's thread scheduling is highly variable, callers cannot + // guarantee any specific namespace is set unless operations that + // require that namespace are wrapped with Do(). Also, no code called + // from Do() should call runtime.UnlockOSThread(), or the risk + // of executing code in an incorrect namespace will be greater. See + // https://github.com/golang/go/wiki/LockOSThread for further details. + Do(toRun func(NetNS) error) error + + // Sets the current network namespace to this object's network namespace. + // Note that since Go's thread scheduling is highly variable, callers + // cannot guarantee the requested namespace will be the current namespace + // after this function is called; to ensure this wrap operations that + // require the namespace with Do() instead. + Set() error + + // Returns the filesystem path representing this object's network namespace + Path() string + + // Returns a file descriptor representing this object's network namespace + Fd() uintptr + + // Cleans up this instance of the network namespace; if this instance + // is the last user the namespace will be destroyed + Close() error +} + +type netNS struct { + file *os.File + closed bool +} + +// netNS implements the NetNS interface +var _ NetNS = &netNS{} + +const ( + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h + NSFS_MAGIC = 0x6e736673 + PROCFS_MAGIC = 0x9fa0 +) + +type NSPathNotExistErr struct{ msg string } + +func (e NSPathNotExistErr) Error() string { return e.msg } + +type NSPathNotNSErr struct{ msg string } + +func (e NSPathNotNSErr) Error() string { return e.msg } + +func IsNSorErr(nspath string) error { + stat := syscall.Statfs_t{} + if err := syscall.Statfs(nspath, &stat); err != nil { + if os.IsNotExist(err) { + err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)} + } else { + err = fmt.Errorf("failed to Statfs %q: %v", nspath, err) + } + return err + } + + switch stat.Type { + case PROCFS_MAGIC, NSFS_MAGIC: + return nil + default: + return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)} + } +} + +// Returns an object representing the namespace referred to by @path +func GetNS(nspath string) (NetNS, error) { + err := IsNSorErr(nspath) + if err != nil { + return nil, err + } + + fd, err := os.Open(nspath) + if err != nil { + return nil, err + } + + return &netNS{file: fd}, nil +} + +func (ns *netNS) Path() string { + return ns.file.Name() +} + +func (ns *netNS) Fd() uintptr { + return ns.file.Fd() +} + +func (ns *netNS) errorIfClosed() error { + if ns.closed { + return fmt.Errorf("%q has already been closed", ns.file.Name()) + } + return nil +} + +func (ns *netNS) Do(toRun func(NetNS) error) error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + containedCall := func(hostNS NetNS) error { + threadNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("failed to open current netns: %v", err) + } + defer threadNS.Close() + + // switch to target namespace + if err = ns.Set(); err != nil { + return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err) + } + defer func() { + err := threadNS.Set() // switch back + if err == nil { + // Unlock the current thread only when we successfully switched back + // to the original namespace; otherwise leave the thread locked which + // will force the runtime to scrap the current thread, that is maybe + // not as optimal but at least always safe to do. + runtime.UnlockOSThread() + } + }() + + return toRun(hostNS) + } + + // save a handle to current network namespace + hostNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("Failed to open current namespace: %v", err) + } + defer hostNS.Close() + + var wg sync.WaitGroup + wg.Add(1) + + // Start the callback in a new green thread so that if we later fail + // to switch the namespace back to the original one, we can safely + // leave the thread locked to die without a risk of the current thread + // left lingering with incorrect namespace. + var innerError error + go func() { + defer wg.Done() + runtime.LockOSThread() + innerError = containedCall(hostNS) + }() + wg.Wait() + + return innerError +} + +// WithNetNSPath executes the passed closure under the given network +// namespace, restoring the original namespace afterwards. +func WithNetNSPath(nspath string, toRun func(NetNS) error) error { + ns, err := GetNS(nspath) + if err != nil { + return err + } + defer ns.Close() + return ns.Do(toRun) +} diff --git a/agent/vendor/github.com/containernetworking/plugins/plugins/main/windows/CONTRIBUTORS.md b/agent/vendor/github.com/containernetworking/plugins/plugins/main/windows/CONTRIBUTORS.md new file mode 100644 index 00000000000..ea050a8ede6 --- /dev/null +++ b/agent/vendor/github.com/containernetworking/plugins/plugins/main/windows/CONTRIBUTORS.md @@ -0,0 +1,6 @@ +# Contributors +This is the official list of the Windows CNI network plugins contributors: + - @rakelkar + - @madhanrm + - @thxCode + - @nagiesek \ No newline at end of file diff --git a/agent/vendor/github.com/coreos/go-systemd/LICENSE b/agent/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/agent/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/agent/vendor/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 00000000000..c1694fb522e --- /dev/null +++ b/agent/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,213 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } +} + +// New establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func New() (*Conn, error) { + conn, err := NewSystemConnection() + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnection() + } + return conn, err +} + +// NewSystemConnection establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection +func NewSystemConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} + +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func() (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/agent/vendor/github.com/coreos/go-systemd/dbus/methods.go b/agent/vendor/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 00000000000..ab17f7cc75a --- /dev/null +++ b/agent/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,565 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "path" + "strconv" + + "github.com/godbus/dbus" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetServiceProperty returns property for given service name and property name +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// ListUnitsFiltered returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// ListUnitsByPatterns returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// ListUnitsByNames returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// MaskUnitFiles masks one or more units in the system +// +// It takes three arguments: +// * list of units to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +// * force flag +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// UnmaskUnitFiles unmasks one or more units in the system +// +// It takes two arguments: +// * list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} diff --git a/agent/vendor/github.com/coreos/go-systemd/dbus/properties.go b/agent/vendor/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 00000000000..6c818958763 --- /dev/null +++ b/agent/vendor/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/agent/vendor/github.com/coreos/go-systemd/dbus/set.go b/agent/vendor/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 00000000000..f92e6fbed1e --- /dev/null +++ b/agent/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/agent/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/agent/vendor/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 00000000000..996451445c0 --- /dev/null +++ b/agent/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,250 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + c.subscriber.updateCh = updateCh + c.subscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + + if c.shouldIgnore(path) { + return + } + + info, err := c.GetUnitProperties(string(path)) + if err != nil { + select { + case c.subscriber.errCh <- err: + default: + } + } + + name := info["Id"].(string) + substate := info["SubState"].(string) + + update := &SubStateUpdate{name, substate} + select { + case c.subscriber.updateCh <- update: + default: + select { + case c.subscriber.errCh <- errors.New("update channel full!"): + default: + } + } + + c.updateIgnore(path, info) +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + c.cleanIgnore() + + // unit is unloaded - it will trigger bad systemd dbus behavior + if info["LoadState"].(string) == "not-found" { + c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subscriber.cleanIgnore < now { + c.subscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subscriber.ignore { + if t < now { + delete(c.subscriber.ignore, p) + } + } + } +} diff --git a/agent/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/agent/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 00000000000..5b408d5847a --- /dev/null +++ b/agent/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/agent/vendor/github.com/davecgh/go-spew/LICENSE b/agent/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 00000000000..c836416192d --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/agent/vendor/github.com/davecgh/go-spew/spew/bypass.go b/agent/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 00000000000..8a4a6589a2d --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/agent/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/agent/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 00000000000..1fe3cf3d5d1 --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/agent/vendor/github.com/davecgh/go-spew/spew/common.go b/agent/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 00000000000..7c519ff47ac --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/agent/vendor/github.com/davecgh/go-spew/spew/config.go b/agent/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 00000000000..2e3d22f3120 --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/agent/vendor/github.com/davecgh/go-spew/spew/doc.go b/agent/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 00000000000..aacaac6f1e1 --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/agent/vendor/github.com/davecgh/go-spew/spew/dump.go b/agent/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 00000000000..df1d582a728 --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/agent/vendor/github.com/davecgh/go-spew/spew/format.go b/agent/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 00000000000..c49875bacbb --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/agent/vendor/github.com/davecgh/go-spew/spew/spew.go b/agent/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 00000000000..32c0e338825 --- /dev/null +++ b/agent/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/agent/vendor/github.com/deniswernert/udev/.gitignore b/agent/vendor/github.com/deniswernert/udev/.gitignore new file mode 100644 index 00000000000..836562412fe --- /dev/null +++ b/agent/vendor/github.com/deniswernert/udev/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/agent/vendor/github.com/deniswernert/udev/LICENSE b/agent/vendor/github.com/deniswernert/udev/LICENSE new file mode 100644 index 00000000000..91dbc4a7e6f --- /dev/null +++ b/agent/vendor/github.com/deniswernert/udev/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 deniswernert + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/agent/vendor/github.com/deniswernert/udev/README.md b/agent/vendor/github.com/deniswernert/udev/README.md new file mode 100644 index 00000000000..924c59b2648 --- /dev/null +++ b/agent/vendor/github.com/deniswernert/udev/README.md @@ -0,0 +1,4 @@ +udev +==== + +Aims to provide a standalone udev replacement diff --git a/agent/vendor/github.com/deniswernert/udev/modalias.go b/agent/vendor/github.com/deniswernert/udev/modalias.go new file mode 100644 index 00000000000..307d40457fe --- /dev/null +++ b/agent/vendor/github.com/deniswernert/udev/modalias.go @@ -0,0 +1,27 @@ +package udev + +import ( + "errors" + "strings" +) + +// ModAlias abstracts a MODALIAS identifier string +type ModAlias struct { + Type string + Value string +} + +// ParseModAlias parses a MODALIAS identifier +func ParseModAlias(input string) (*ModAlias, error) { + bits := strings.SplitN(input, ":", 2) + if len(bits) != 2 { + return nil, errors.New("Invalid string prefix") + } + + modalias := &ModAlias{ + Type: bits[0], + Value: bits[1], + } + + return modalias, nil +} diff --git a/agent/vendor/github.com/deniswernert/udev/monitor.go b/agent/vendor/github.com/deniswernert/udev/monitor.go new file mode 100644 index 00000000000..4369ae23fd9 --- /dev/null +++ b/agent/vendor/github.com/deniswernert/udev/monitor.go @@ -0,0 +1,98 @@ +package udev + +import ( + "os" + "syscall" +) + +// UDevMonitor is a UDev netlink socket +type UDevMonitor struct { + Fd int + Address syscall.SockaddrNetlink +} + +// NewMonitor creates and connects a new monitor +func NewMonitor() (mon *UDevMonitor, err error) { + mon = new(UDevMonitor) + if err = mon.Connect(); err != nil { + mon = nil + } + return +} + +// Connect connects the monitoring socket +func (mon *UDevMonitor) Connect() (err error) { + mon.Fd, err = syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_DGRAM, syscall.NETLINK_KOBJECT_UEVENT) + if nil != err { + return + } + + mon.Address = syscall.SockaddrNetlink{ + Family: syscall.AF_NETLINK, + Groups: 4294967295, + Pid: uint32(os.Getpid()), + } + + err = syscall.Bind(mon.Fd, &mon.Address) + if nil != err { + syscall.Close(mon.Fd) + } + + return +} + +// Close closes the monitor socket +func (mon *UDevMonitor) Close() error { + return syscall.Close(mon.Fd) +} + +func (mon *UDevMonitor) Read(b []byte) (n int, err error) { + return syscall.Read(mon.Fd, b) +} + +// Process processes one packet from the socket, and sends the event on the notify channel +func (mon *UDevMonitor) Process(notify chan *UEvent) { + buffer := make([]byte, syscall.Getpagesize()) + bytesRead, from, err := syscall.Recvfrom(mon.Fd, buffer, 0) + switch v := from.(type) { + case *syscall.SockaddrNetlink: + if v.Pid != 0 { + // Filter out non-kernel messages. + // TODO: this should probably be configurable + return + } + + default: + // Shouldn't happen, we might want to panic here actually + return + } + + if bytesRead <= 0 { + return + } + + if nil == err { + buffer = buffer[:bytesRead] + if parsed, err := ParseUEvent(buffer); err == nil { + notify <- parsed + } + } +} + +// Monitor starts udev event monitoring. Events are sent on the notify channel, and the watch can be +// terminated by sending true on the returned shutdown channel +func (mon *UDevMonitor) Monitor(notify chan *UEvent) (shutdown chan bool) { + shutdown = make(chan bool) + go func() { + done: + for { + select { + case <-shutdown: + break done + default: + mon.Process(notify) + } + } + }() + return shutdown +} diff --git a/agent/vendor/github.com/deniswernert/udev/uevent.go b/agent/vendor/github.com/deniswernert/udev/uevent.go new file mode 100644 index 00000000000..fc62f216f4b --- /dev/null +++ b/agent/vendor/github.com/deniswernert/udev/uevent.go @@ -0,0 +1,58 @@ +package udev + +import ( + "errors" + "strings" +) + +// UEvent is a kernel UDev event +type UEvent struct { + Action string + Devpath string + Env map[string]string +} + +func (event *UEvent) String() (str string) { + str = event.Action + "@" + event.Devpath + for k, v := range event.Env { + str += "\n" + k + "=" + v + } + return +} + +// GetEnv returns the variable for Key +func (event *UEvent) GetEnv(Key string) string { + return event.Env[strings.ToUpper(Key)] +} + +// Modalias returns the module alias information, if present +func (event *UEvent) Modalias() (*ModAlias, error) { + return ParseModAlias(event.GetEnv("MODALIAS")) +} + +// ParseUEvent parses a byte sequence into a UEvent object +func ParseUEvent(packet []byte) (event *UEvent, err error) { + lines := strings.Split(string(packet), "\000") + header := strings.Split(lines[0], "@") + if 2 != len(header) { + return nil, errors.New("Invalid action/devpath line") + } + + event = &UEvent{ + Action: header[0], + Devpath: header[1], + Env: make(map[string]string), + } + + for _, line := range lines[1 : len(lines)-2] { + bits := strings.SplitN(line, "=", 2) + // Check well-formedness of lines. + if len(bits) == 2 { + event.Env[bits[0]] = bits[1] + } else { + return nil, errors.New("Invalid environment line") + } + } + + return +} diff --git a/agent/vendor/github.com/didip/tollbooth/.gitignore b/agent/vendor/github.com/didip/tollbooth/.gitignore new file mode 100644 index 00000000000..1043f539b1a --- /dev/null +++ b/agent/vendor/github.com/didip/tollbooth/.gitignore @@ -0,0 +1,2 @@ +/debug +/.vscode diff --git a/agent/vendor/github.com/didip/tollbooth/LICENSE b/agent/vendor/github.com/didip/tollbooth/LICENSE new file mode 100644 index 00000000000..349ee1c29e8 --- /dev/null +++ b/agent/vendor/github.com/didip/tollbooth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Didip Kerabat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/agent/vendor/github.com/didip/tollbooth/README.md b/agent/vendor/github.com/didip/tollbooth/README.md new file mode 100644 index 00000000000..d1971b84dd7 --- /dev/null +++ b/agent/vendor/github.com/didip/tollbooth/README.md @@ -0,0 +1,164 @@ +[![GoDoc](https://godoc.org/github.com/didip/tollbooth?status.svg)](http://godoc.org/github.com/didip/tollbooth) +[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/didip/tollbooth/master/LICENSE) + +## Tollbooth + +This is a generic middleware to rate-limit HTTP requests. + +**NOTE 1:** This library is considered finished. + +**NOTE 2:** In the coming weeks, I will be removing thirdparty modules and moving them to their own dedicated repos. + +**NOTE 3:** Major version changes are backward-incompatible. `v2.0.0` streamlines the ugliness of the old API. + + +## Versions + +**v1.0.0:** This version maintains the old API but all of the thirdparty modules are moved to their own repo. + +**v2.x.x:** Brand new API for the sake of code cleanup, thread safety, & auto-expiring data structures. + +**v3.x.x:** Apparently we have been using golang.org/x/time/rate incorrectly. See issue #48. It always limit X number per 1 second. The time duration is not changeable, so it does not make sense to pass TTL to tollbooth. + + +## Five Minutes Tutorial +```go +package main + +import ( + "github.com/didip/tollbooth" + "net/http" + "time" +) + +func HelloHandler(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("Hello, World!")) +} + +func main() { + // Create a request limiter per handler. + http.Handle("/", tollbooth.LimitFuncHandler(tollbooth.NewLimiter(1, nil), HelloHandler)) + http.ListenAndServe(":12345", nil) +} +``` + +## Features + +1. Rate-limit by request's remote IP, path, methods, custom headers, & basic auth usernames. + ```go + import ( + "time" + "github.com/didip/tollbooth/limiter" + ) + + lmt := tollbooth.NewLimiter(1, nil) + + // or create a limiter with expirable token buckets + // This setting means: + // create a 1 request/second limiter and + // every token bucket in it will expire 1 hour after it was initially set. + lmt = tollbooth.NewLimiter(1, &limiter.ExpirableOptions{DefaultExpirationTTL: time.Hour}) + + // Configure list of places to look for IP address. + // By default it's: "RemoteAddr", "X-Forwarded-For", "X-Real-IP" + // If your application is behind a proxy, set "X-Forwarded-For" first. + lmt.SetIPLookups([]string{"RemoteAddr", "X-Forwarded-For", "X-Real-IP"}) + + // Limit only GET and POST requests. + lmt.SetMethods([]string{"GET", "POST"}) + + // Limit based on basic auth usernames. + // You add them on-load, or later as you handle requests. + lmt.SetBasicAuthUsers([]string{"bob", "jane", "didip", "vip"}) + // You can remove them later as well. + lmt.RemoveBasicAuthUsers([]string{"vip"}) + + // Limit request headers containing certain values. + // You add them on-load, or later as you handle requests. + lmt.SetHeader("X-Access-Token", []string{"abc123", "xyz098"}) + // You can remove all entries at once. + lmt.RemoveHeader("X-Access-Token") + // Or remove specific ones. + lmt.RemoveHeaderEntries("X-Access-Token", []string{"limitless-token"}) + + // By the way, the setters are chainable. Example: + lmt.SetIPLookups([]string{"RemoteAddr", "X-Forwarded-For", "X-Real-IP"}). + SetMethods([]string{"GET", "POST"}). + SetBasicAuthUsers([]string{"sansa"}). + SetBasicAuthUsers([]string{"tyrion"}) + ``` + +2. Compose your own middleware by using `LimitByKeys()`. + +3. Header entries and basic auth users can expire over time (to conserve memory). + + ```go + import "time" + + lmt := tollbooth.NewLimiter(1, nil) + + // Set a custom expiration TTL for token bucket. + lmt.SetTokenBucketExpirationTTL(time.Hour) + + // Set a custom expiration TTL for basic auth users. + lmt.SetBasicAuthExpirationTTL(time.Hour) + + // Set a custom expiration TTL for header entries. + lmt.SetHeaderEntryExpirationTTL(time.Hour) + ``` + +4. Upon rejection, the following HTTP response headers are available to users: + + * `X-Rate-Limit-Limit` The maximum request limit. + + * `X-Rate-Limit-Duration` The rate-limiter duration. + + * `X-Rate-Limit-Request-Forwarded-For` The rejected request `X-Forwarded-For`. + + * `X-Rate-Limit-Request-Remote-Addr` The rejected request `RemoteAddr`. + + +5. Customize your own message or function when limit is reached. + + ```go + lmt := tollbooth.NewLimiter(1, nil) + + // Set a custom message. + lmt.SetMessage("You have reached maximum request limit.") + + // Set a custom content-type. + lmt.SetMessageContentType("text/plain; charset=utf-8") + + // Set a custom function for rejection. + lmt.SetOnLimitReached(func(w http.ResponseWriter, r *http.Request) { fmt.Println("A request was rejected") }) + ``` + +6. Tollbooth does not require external storage since it uses an algorithm called [Token Bucket](http://en.wikipedia.org/wiki/Token_bucket) [(Go library: golang.org/x/time/rate)](//godoc.org/golang.org/x/time/rate). + + +## Other Web Frameworks + +Sometimes, other frameworks require a little bit of shim to use Tollbooth. These shims below are contributed by the community, so I make no promises on how well they work. The one I am familiar with are: Chi, Gin, and Negroni. + +* [Chi](https://github.com/didip/tollbooth_chi) + +* [Echo](https://github.com/didip/tollbooth_echo) + +* [FastHTTP](https://github.com/didip/tollbooth_fasthttp) + +* [Gin](https://github.com/didip/tollbooth_gin) + +* [GoRestful](https://github.com/didip/tollbooth_gorestful) + +* [HTTPRouter](https://github.com/didip/tollbooth_httprouter) + +* [Iris](https://github.com/didip/tollbooth_iris) + +* [Negroni](https://github.com/didip/tollbooth_negroni) + + +## My other Go libraries + +* [Stopwatch](https://github.com/didip/stopwatch): A small library to measure latency of things. Useful if you want to report latency data to Graphite. + +* [Gomet](https://github.com/didip/gomet): Simple HTTP client & server long poll library for Go. Useful for receiving live updates without needing Websocket. diff --git a/agent/vendor/github.com/didip/tollbooth/errors/errors.go b/agent/vendor/github.com/didip/tollbooth/errors/errors.go new file mode 100644 index 00000000000..149bc5a161e --- /dev/null +++ b/agent/vendor/github.com/didip/tollbooth/errors/errors.go @@ -0,0 +1,15 @@ +// Package errors provide data structure for errors. +package errors + +import "fmt" + +// HTTPError is an error struct that returns both message and status code. +type HTTPError struct { + Message string + StatusCode int +} + +// Error returns error message. +func (httperror *HTTPError) Error() string { + return fmt.Sprintf("%v: %v", httperror.StatusCode, httperror.Message) +} diff --git a/agent/vendor/github.com/didip/tollbooth/libstring/libstring.go b/agent/vendor/github.com/didip/tollbooth/libstring/libstring.go new file mode 100644 index 00000000000..fef29afe6d2 --- /dev/null +++ b/agent/vendor/github.com/didip/tollbooth/libstring/libstring.go @@ -0,0 +1,55 @@ +// Package libstring provides various string related functions. +package libstring + +import ( + "net" + "net/http" + "strings" +) + +// StringInSlice finds needle in a slice of strings. +func StringInSlice(sliceString []string, needle string) bool { + for _, b := range sliceString { + if b == needle { + return true + } + } + return false +} + +// RemoteIP finds IP Address given http.Request struct. +func RemoteIP(ipLookups []string, forwardedForIndexFromBehind int, r *http.Request) string { + realIP := r.Header.Get("X-Real-IP") + forwardedFor := r.Header.Get("X-Forwarded-For") + + for _, lookup := range ipLookups { + if lookup == "RemoteAddr" { + // 1. Cover the basic use cases for both ipv4 and ipv6 + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + // 2. Upon error, just return the remote addr. + return r.RemoteAddr + } + return ip + } + if lookup == "X-Forwarded-For" && forwardedFor != "" { + // X-Forwarded-For is potentially a list of addresses separated with "," + parts := strings.Split(forwardedFor, ",") + for i, p := range parts { + parts[i] = strings.TrimSpace(p) + } + + partIndex := len(parts) - 1 - forwardedForIndexFromBehind + if partIndex < 0 { + partIndex = 0 + } + + return parts[partIndex] + } + if lookup == "X-Real-IP" && realIP != "" { + return realIP + } + } + + return "" +} diff --git a/agent/vendor/github.com/didip/tollbooth/limiter/limiter.go b/agent/vendor/github.com/didip/tollbooth/limiter/limiter.go new file mode 100644 index 00000000000..5fdb6fd3294 --- /dev/null +++ b/agent/vendor/github.com/didip/tollbooth/limiter/limiter.go @@ -0,0 +1,474 @@ +// Package limiter provides data structure to configure rate-limiter. +package limiter + +import ( + "net/http" + "sync" + "time" + + gocache "github.com/patrickmn/go-cache" + "golang.org/x/time/rate" +) + +// New is a constructor for Limiter. +func New(generalExpirableOptions *ExpirableOptions) *Limiter { + lmt := &Limiter{} + + lmt.SetMessageContentType("text/plain; charset=utf-8"). + SetMessage("You have reached maximum request limit."). + SetStatusCode(429). + SetOnLimitReached(nil). + SetIPLookups([]string{"RemoteAddr", "X-Forwarded-For", "X-Real-IP"}). + SetForwardedForIndexFromBehind(0). + SetHeaders(make(map[string][]string)) + + if generalExpirableOptions != nil { + lmt.generalExpirableOptions = generalExpirableOptions + } else { + lmt.generalExpirableOptions = &ExpirableOptions{} + } + + // Default for ExpireJobInterval is every minute. + if lmt.generalExpirableOptions.ExpireJobInterval <= 0 { + lmt.generalExpirableOptions.ExpireJobInterval = time.Minute + } + + // Default for DefaultExpirationTTL is 10 years. + if lmt.generalExpirableOptions.DefaultExpirationTTL <= 0 { + lmt.generalExpirableOptions.DefaultExpirationTTL = 87600 * time.Hour + } + + lmt.tokenBuckets = gocache.New( + lmt.generalExpirableOptions.DefaultExpirationTTL, + lmt.generalExpirableOptions.ExpireJobInterval, + ) + + lmt.basicAuthUsers = gocache.New( + lmt.generalExpirableOptions.DefaultExpirationTTL, + lmt.generalExpirableOptions.ExpireJobInterval, + ) + + return lmt +} + +// Limiter is a config struct to limit a particular request handler. +type Limiter struct { + // Maximum number of requests to limit per second. + max int64 + + // Limiter burst size + burst int + + // HTTP message when limit is reached. + message string + + // Content-Type for Message + messageContentType string + + // HTTP status code when limit is reached. + statusCode int + + // A function to call when a request is rejected. + onLimitReached func(w http.ResponseWriter, r *http.Request) + + // List of places to look up IP address. + // Default is "RemoteAddr", "X-Forwarded-For", "X-Real-IP". + // You can rearrange the order as you like. + ipLookups []string + + forwardedForIndex int + + // List of HTTP Methods to limit (GET, POST, PUT, etc.). + // Empty means limit all methods. + methods []string + + // Able to configure token bucket expirations. + generalExpirableOptions *ExpirableOptions + + // List of basic auth usernames to limit. + basicAuthUsers *gocache.Cache + + // Map of HTTP headers to limit. + // Empty means skip headers checking. + headers map[string]*gocache.Cache + + // Map of limiters with TTL + tokenBuckets *gocache.Cache + + tokenBucketExpirationTTL time.Duration + basicAuthExpirationTTL time.Duration + headerEntryExpirationTTL time.Duration + + sync.RWMutex +} + +// SetTokenBucketExpirationTTL is thread-safe way of setting custom token bucket expiration TTL. +func (l *Limiter) SetTokenBucketExpirationTTL(ttl time.Duration) *Limiter { + l.Lock() + l.tokenBucketExpirationTTL = ttl + l.Unlock() + + return l +} + +// GettokenBucketExpirationTTL is thread-safe way of getting custom token bucket expiration TTL. +func (l *Limiter) GetTokenBucketExpirationTTL() time.Duration { + l.RLock() + defer l.RUnlock() + return l.tokenBucketExpirationTTL +} + +// SetBasicAuthExpirationTTL is thread-safe way of setting custom basic auth expiration TTL. +func (l *Limiter) SetBasicAuthExpirationTTL(ttl time.Duration) *Limiter { + l.Lock() + l.basicAuthExpirationTTL = ttl + l.Unlock() + + return l +} + +// GetBasicAuthExpirationTTL is thread-safe way of getting custom basic auth expiration TTL. +func (l *Limiter) GetBasicAuthExpirationTTL() time.Duration { + l.RLock() + defer l.RUnlock() + return l.basicAuthExpirationTTL +} + +// SetHeaderEntryExpirationTTL is thread-safe way of setting custom basic auth expiration TTL. +func (l *Limiter) SetHeaderEntryExpirationTTL(ttl time.Duration) *Limiter { + l.Lock() + l.headerEntryExpirationTTL = ttl + l.Unlock() + + return l +} + +// GetHeaderEntryExpirationTTL is thread-safe way of getting custom basic auth expiration TTL. +func (l *Limiter) GetHeaderEntryExpirationTTL() time.Duration { + l.RLock() + defer l.RUnlock() + return l.headerEntryExpirationTTL +} + +// SetMax is thread-safe way of setting maximum number of requests to limit per duration. +func (l *Limiter) SetMax(max int64) *Limiter { + l.Lock() + l.max = max + l.Unlock() + + return l +} + +// GetMax is thread-safe way of getting maximum number of requests to limit per duration. +func (l *Limiter) GetMax() int64 { + l.RLock() + defer l.RUnlock() + return l.max +} + +// SetBurst is thread-safe way of setting maximum burst size. +func (l *Limiter) SetBurst(burst int) *Limiter { + l.Lock() + l.burst = burst + l.Unlock() + + return l +} + +// GetBurst is thread-safe way of setting maximum burst size. +func (l *Limiter) GetBurst() int { + l.RLock() + defer l.RUnlock() + + return l.burst +} + +// SetMessage is thread-safe way of setting HTTP message when limit is reached. +func (l *Limiter) SetMessage(msg string) *Limiter { + l.Lock() + l.message = msg + l.Unlock() + + return l +} + +// GetMessage is thread-safe way of getting HTTP message when limit is reached. +func (l *Limiter) GetMessage() string { + l.RLock() + defer l.RUnlock() + return l.message +} + +// SetMessageContentType is thread-safe way of setting HTTP message Content-Type when limit is reached. +func (l *Limiter) SetMessageContentType(contentType string) *Limiter { + l.Lock() + l.messageContentType = contentType + l.Unlock() + + return l +} + +// GetMessageContentType is thread-safe way of getting HTTP message Content-Type when limit is reached. +func (l *Limiter) GetMessageContentType() string { + l.RLock() + defer l.RUnlock() + return l.messageContentType +} + +// SetStatusCode is thread-safe way of setting HTTP status code when limit is reached. +func (l *Limiter) SetStatusCode(statusCode int) *Limiter { + l.Lock() + l.statusCode = statusCode + l.Unlock() + + return l +} + +// GetStatusCode is thread-safe way of getting HTTP status code when limit is reached. +func (l *Limiter) GetStatusCode() int { + l.RLock() + defer l.RUnlock() + return l.statusCode +} + +// SetOnLimitReached is thread-safe way of setting after-rejection function when limit is reached. +func (l *Limiter) SetOnLimitReached(fn func(w http.ResponseWriter, r *http.Request)) *Limiter { + l.Lock() + l.onLimitReached = fn + l.Unlock() + + return l +} + +// ExecOnLimitReached is thread-safe way of executing after-rejection function when limit is reached. +func (l *Limiter) ExecOnLimitReached(w http.ResponseWriter, r *http.Request) { + l.RLock() + defer l.RUnlock() + + fn := l.onLimitReached + if fn != nil { + fn(w, r) + } +} + +// SetIPLookups is thread-safe way of setting list of places to look up IP address. +func (l *Limiter) SetIPLookups(ipLookups []string) *Limiter { + l.Lock() + l.ipLookups = ipLookups + l.Unlock() + + return l +} + +// GetIPLookups is thread-safe way of getting list of places to look up IP address. +func (l *Limiter) GetIPLookups() []string { + l.RLock() + defer l.RUnlock() + return l.ipLookups +} + +// SetForwardedForIndexFromBehind is thread-safe way of setting which X-Forwarded-For index to choose. +func (l *Limiter) SetForwardedForIndexFromBehind(forwardedForIndex int) *Limiter { + l.Lock() + l.forwardedForIndex = forwardedForIndex + l.Unlock() + + return l +} + +// GetForwardedForIndexFromBehind is thread-safe way of getting which X-Forwarded-For index to choose. +func (l *Limiter) GetForwardedForIndexFromBehind() int { + l.RLock() + defer l.RUnlock() + return l.forwardedForIndex +} + +// SetMethods is thread-safe way of setting list of HTTP Methods to limit (GET, POST, PUT, etc.). +func (l *Limiter) SetMethods(methods []string) *Limiter { + l.Lock() + l.methods = methods + l.Unlock() + + return l +} + +// GetMethods is thread-safe way of getting list of HTTP Methods to limit (GET, POST, PUT, etc.). +func (l *Limiter) GetMethods() []string { + l.RLock() + defer l.RUnlock() + return l.methods +} + +// SetBasicAuthUsers is thread-safe way of setting list of basic auth usernames to limit. +func (l *Limiter) SetBasicAuthUsers(basicAuthUsers []string) *Limiter { + ttl := l.GetBasicAuthExpirationTTL() + if ttl <= 0 { + ttl = l.generalExpirableOptions.DefaultExpirationTTL + } + + for _, basicAuthUser := range basicAuthUsers { + l.basicAuthUsers.Set(basicAuthUser, true, ttl) + } + + return l +} + +// GetBasicAuthUsers is thread-safe way of getting list of basic auth usernames to limit. +func (l *Limiter) GetBasicAuthUsers() []string { + asMap := l.basicAuthUsers.Items() + + var basicAuthUsers []string + for basicAuthUser, _ := range asMap { + basicAuthUsers = append(basicAuthUsers, basicAuthUser) + } + + return basicAuthUsers +} + +// RemoveBasicAuthUsers is thread-safe way of removing basic auth usernames from existing list. +func (l *Limiter) RemoveBasicAuthUsers(basicAuthUsers []string) *Limiter { + for _, toBeRemoved := range basicAuthUsers { + l.basicAuthUsers.Delete(toBeRemoved) + } + + return l +} + +// SetHeaders is thread-safe way of setting map of HTTP headers to limit. +func (l *Limiter) SetHeaders(headers map[string][]string) *Limiter { + if l.headers == nil { + l.headers = make(map[string]*gocache.Cache) + } + + for header, entries := range headers { + l.SetHeader(header, entries) + } + + return l +} + +// GetHeaders is thread-safe way of getting map of HTTP headers to limit. +func (l *Limiter) GetHeaders() map[string][]string { + results := make(map[string][]string) + + l.RLock() + defer l.RUnlock() + + for header, entriesAsGoCache := range l.headers { + entries := make([]string, 0) + + for entry, _ := range entriesAsGoCache.Items() { + entries = append(entries, entry) + } + + results[header] = entries + } + + return results +} + +// SetHeader is thread-safe way of setting entries of 1 HTTP header. +func (l *Limiter) SetHeader(header string, entries []string) *Limiter { + l.RLock() + existing, found := l.headers[header] + l.RUnlock() + + ttl := l.GetHeaderEntryExpirationTTL() + if ttl <= 0 { + ttl = l.generalExpirableOptions.DefaultExpirationTTL + } + + if !found { + existing = gocache.New(ttl, l.generalExpirableOptions.ExpireJobInterval) + } + + for _, entry := range entries { + existing.Set(entry, true, ttl) + } + + l.Lock() + l.headers[header] = existing + l.Unlock() + + return l +} + +// GetHeader is thread-safe way of getting entries of 1 HTTP header. +func (l *Limiter) GetHeader(header string) []string { + l.RLock() + entriesAsGoCache := l.headers[header] + l.RUnlock() + + entriesAsMap := entriesAsGoCache.Items() + entries := make([]string, 0) + + for entry, _ := range entriesAsMap { + entries = append(entries, entry) + } + + return entries +} + +// RemoveHeader is thread-safe way of removing entries of 1 HTTP header. +func (l *Limiter) RemoveHeader(header string) *Limiter { + ttl := l.GetHeaderEntryExpirationTTL() + if ttl <= 0 { + ttl = l.generalExpirableOptions.DefaultExpirationTTL + } + + l.Lock() + l.headers[header] = gocache.New(ttl, l.generalExpirableOptions.ExpireJobInterval) + l.Unlock() + + return l +} + +// RemoveHeaderEntries is thread-safe way of adding new entries to 1 HTTP header rule. +func (l *Limiter) RemoveHeaderEntries(header string, entriesForRemoval []string) *Limiter { + l.RLock() + entries, found := l.headers[header] + l.RUnlock() + + if !found { + return l + } + + for _, toBeRemoved := range entriesForRemoval { + entries.Delete(toBeRemoved) + } + + return l +} + +func (l *Limiter) limitReachedWithTokenBucketTTL(key string, tokenBucketTTL time.Duration) bool { + lmtMax := l.GetMax() + lmtBurst := l.GetBurst() + l.Lock() + defer l.Unlock() + + if _, found := l.tokenBuckets.Get(key); !found { + l.tokenBuckets.Set( + key, + rate.NewLimiter(rate.Limit(lmtMax), lmtBurst), + tokenBucketTTL, + ) + } + + expiringMap, found := l.tokenBuckets.Get(key) + if !found { + return false + } + + return !expiringMap.(*rate.Limiter).Allow() +} + +// LimitReached returns a bool indicating if the Bucket identified by key ran out of tokens. +func (l *Limiter) LimitReached(key string) bool { + ttl := l.GetTokenBucketExpirationTTL() + + if ttl <= 0 { + ttl = l.generalExpirableOptions.DefaultExpirationTTL + } + + return l.limitReachedWithTokenBucketTTL(key, ttl) +} diff --git a/agent/vendor/github.com/didip/tollbooth/limiter/limiter_options.go b/agent/vendor/github.com/didip/tollbooth/limiter/limiter_options.go new file mode 100644 index 00000000000..9836c502b4a --- /dev/null +++ b/agent/vendor/github.com/didip/tollbooth/limiter/limiter_options.go @@ -0,0 +1,12 @@ +package limiter + +import ( + "time" +) + +type ExpirableOptions struct { + DefaultExpirationTTL time.Duration + + // How frequently expire job triggers + ExpireJobInterval time.Duration +} diff --git a/agent/vendor/github.com/didip/tollbooth/tollbooth.go b/agent/vendor/github.com/didip/tollbooth/tollbooth.go new file mode 100644 index 00000000000..a557b2bd8c7 --- /dev/null +++ b/agent/vendor/github.com/didip/tollbooth/tollbooth.go @@ -0,0 +1,179 @@ +// Package tollbooth provides rate-limiting logic to HTTP request handler. +package tollbooth + +import ( + "net/http" + "strconv" + "strings" + + "github.com/didip/tollbooth/errors" + "github.com/didip/tollbooth/libstring" + "github.com/didip/tollbooth/limiter" +) + +// setResponseHeaders configures X-Rate-Limit-Limit and X-Rate-Limit-Duration +func setResponseHeaders(lmt *limiter.Limiter, w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Rate-Limit-Limit", strconv.FormatInt(lmt.GetMax(), 10)) + w.Header().Add("X-Rate-Limit-Duration", "1") + w.Header().Add("X-Rate-Limit-Request-Forwarded-For", r.Header.Get("X-Forwarded-For")) + w.Header().Add("X-Rate-Limit-Request-Remote-Addr", r.RemoteAddr) +} + +// NewLimiter is a convenience function to limiter.New. +func NewLimiter(max int64, tbOptions *limiter.ExpirableOptions) *limiter.Limiter { + return limiter.New(tbOptions).SetMax(max).SetBurst(int(max)) +} + +// LimitByKeys keeps track number of request made by keys separated by pipe. +// It returns HTTPError when limit is exceeded. +func LimitByKeys(lmt *limiter.Limiter, keys []string) *errors.HTTPError { + if lmt.LimitReached(strings.Join(keys, "|")) { + return &errors.HTTPError{Message: lmt.GetMessage(), StatusCode: lmt.GetStatusCode()} + } + + return nil +} + +// BuildKeys generates a slice of keys to rate-limit by given limiter and request structs. +func BuildKeys(lmt *limiter.Limiter, r *http.Request) [][]string { + remoteIP := libstring.RemoteIP(lmt.GetIPLookups(), lmt.GetForwardedForIndexFromBehind(), r) + path := r.URL.Path + sliceKeys := make([][]string, 0) + + // Don't BuildKeys if remoteIP is blank. + if remoteIP == "" { + return sliceKeys + } + + lmtMethods := lmt.GetMethods() + lmtHeaders := lmt.GetHeaders() + lmtBasicAuthUsers := lmt.GetBasicAuthUsers() + + lmtHeadersIsSet := len(lmtHeaders) > 0 + lmtBasicAuthUsersIsSet := len(lmtBasicAuthUsers) > 0 + + if lmtMethods != nil && lmtHeadersIsSet && lmtBasicAuthUsersIsSet { + // Limit by HTTP methods and HTTP headers+values and Basic Auth credentials. + if libstring.StringInSlice(lmtMethods, r.Method) { + for headerKey, headerValues := range lmtHeaders { + if (headerValues == nil || len(headerValues) <= 0) && r.Header.Get(headerKey) != "" { + // If header values are empty, rate-limit all request with headerKey. + username, _, ok := r.BasicAuth() + if ok && libstring.StringInSlice(lmtBasicAuthUsers, username) { + sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, headerKey, username}) + } + + } else if len(headerValues) > 0 && r.Header.Get(headerKey) != "" { + // If header values are not empty, rate-limit all request with headerKey and headerValues. + for _, headerValue := range headerValues { + username, _, ok := r.BasicAuth() + if ok && libstring.StringInSlice(lmtBasicAuthUsers, username) { + sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, headerKey, headerValue, username}) + } + } + } + } + } + + } else if lmtMethods != nil && lmtHeadersIsSet { + // Limit by HTTP methods and HTTP headers+values. + if libstring.StringInSlice(lmtMethods, r.Method) { + for headerKey, headerValues := range lmtHeaders { + if (headerValues == nil || len(headerValues) <= 0) && r.Header.Get(headerKey) != "" { + // If header values are empty, rate-limit all request with headerKey. + sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, headerKey}) + + } else if len(headerValues) > 0 && r.Header.Get(headerKey) != "" { + // If header values are not empty, rate-limit all request with headerKey and headerValues. + for _, headerValue := range headerValues { + sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, headerKey, headerValue}) + } + } + } + } + + } else if lmtMethods != nil && lmtBasicAuthUsersIsSet { + // Limit by HTTP methods and Basic Auth credentials. + if libstring.StringInSlice(lmtMethods, r.Method) { + username, _, ok := r.BasicAuth() + if ok && libstring.StringInSlice(lmtBasicAuthUsers, username) { + sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, username}) + } + } + + } else if lmtMethods != nil { + // Limit by HTTP methods. + if libstring.StringInSlice(lmtMethods, r.Method) { + sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method}) + } + + } else if lmtHeadersIsSet { + // Limit by HTTP headers+values. + for headerKey, headerValues := range lmtHeaders { + if (headerValues == nil || len(headerValues) <= 0) && r.Header.Get(headerKey) != "" { + // If header values are empty, rate-limit all request with headerKey. + sliceKeys = append(sliceKeys, []string{remoteIP, path, headerKey}) + + } else if len(headerValues) > 0 && r.Header.Get(headerKey) != "" { + // If header values are not empty, rate-limit all request with headerKey and headerValues. + for _, headerValue := range headerValues { + sliceKeys = append(sliceKeys, []string{remoteIP, path, headerKey, headerValue}) + } + } + } + + } else if lmtBasicAuthUsersIsSet { + // Limit by Basic Auth credentials. + username, _, ok := r.BasicAuth() + if ok && libstring.StringInSlice(lmtBasicAuthUsers, username) { + sliceKeys = append(sliceKeys, []string{remoteIP, path, username}) + } + } else { + // Default: Limit by remoteIP and path. + sliceKeys = append(sliceKeys, []string{remoteIP, path}) + } + + return sliceKeys +} + +// LimitByRequest builds keys based on http.Request struct, +// loops through all the keys, and check if any one of them returns HTTPError. +func LimitByRequest(lmt *limiter.Limiter, w http.ResponseWriter, r *http.Request) *errors.HTTPError { + setResponseHeaders(lmt, w, r) + + sliceKeys := BuildKeys(lmt, r) + + // Loop sliceKeys and check if one of them has error. + for _, keys := range sliceKeys { + httpError := LimitByKeys(lmt, keys) + if httpError != nil { + return httpError + } + } + + return nil +} + +// LimitHandler is a middleware that performs rate-limiting given http.Handler struct. +func LimitHandler(lmt *limiter.Limiter, next http.Handler) http.Handler { + middle := func(w http.ResponseWriter, r *http.Request) { + httpError := LimitByRequest(lmt, w, r) + if httpError != nil { + lmt.ExecOnLimitReached(w, r) + w.Header().Add("Content-Type", lmt.GetMessageContentType()) + w.WriteHeader(httpError.StatusCode) + w.Write([]byte(httpError.Message)) + return + } + + // There's no rate-limit error, serve the next handler. + next.ServeHTTP(w, r) + } + + return http.HandlerFunc(middle) +} + +// LimitFuncHandler is a middleware that performs rate-limiting given request handler function. +func LimitFuncHandler(lmt *limiter.Limiter, nextFunc func(http.ResponseWriter, *http.Request)) http.Handler { + return LimitHandler(lmt, http.HandlerFunc(nextFunc)) +} diff --git a/agent/vendor/github.com/docker/distribution/LICENSE b/agent/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 00000000000..e06d2081865 --- /dev/null +++ b/agent/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/agent/vendor/github.com/docker/distribution/digestset/set.go b/agent/vendor/github.com/docker/distribution/digestset/set.go new file mode 100644 index 00000000000..71327dca720 --- /dev/null +++ b/agent/vendor/github.com/docker/distribution/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/agent/vendor/github.com/docker/distribution/reference/helpers.go b/agent/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 00000000000..978df7eabbf --- /dev/null +++ b/agent/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/agent/vendor/github.com/docker/distribution/reference/normalize.go b/agent/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 00000000000..2d71fc5e9ff --- /dev/null +++ b/agent/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/agent/vendor/github.com/docker/distribution/reference/reference.go b/agent/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 00000000000..2f66cca87a3 --- /dev/null +++ b/agent/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/agent/vendor/github.com/docker/distribution/reference/regexp.go b/agent/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 00000000000..78603493203 --- /dev/null +++ b/agent/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/agent/vendor/github.com/docker/docker/AUTHORS b/agent/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 00000000000..c6c8fb40e39 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,2016 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Aaron.L.Xu +Abel Muiño +Abhijeet Kasurde +Abhinandan Prativadi +Abhinav Ajgaonkar +Abhishek Chanda +Abhishek Sharma +Abin Shahab +Adam Avilla +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Pointer +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adnan Khan +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akash Gupta +Akihiro Matsushima +Akihiro Suda +Akim Demaille +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Alejandro González Hevia +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Goodman +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Midlash +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandre Jomin +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amr Gawish +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anda Xu +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Elvers +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew He +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Pennebaker +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anran Qiao +Anshul Pundir +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asad Saeeduddin +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Baker +Benjamin Boudreau +Benjamin Yolken +Benoit Chesneau +Bernerd Schaefer +Bernhard M. Wiedemann +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Bin Liu +Bingshen Wang +Blake Geno +Boaz Shuster +bobby abbott +Boris Pruessmann +Boshi Lian +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brett Randall +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Schwind +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Brielle Broder +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlo Mion +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Catalin Pirvu +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander Govindarajan +Chanhun Jeong +Chao Wang +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Min +Chen Mingjie +Chen Qiu +Cheng-mean Liu +Chengguang Xu +chenyuzhu +Chetan Birajdar +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dias +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris McKinnel +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Telfer +Chris Wahl +Chris Weyl +Chris White +Christian Berendt +Christian Brauner +Christian Böhme +Christian Muehlhaeuser +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +Christophe Mehay +Christophe Troestler +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Cody Roseborough +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Corbin Coleman +Corey Farrell +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +CUI Wei +Cyprian Gracz +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Williams +Dani Louca +Daniel Antlinger +Daniel Dao +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel Watkins +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Danny Berger +Danny Yates +Danyal Khaliq +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Goodchild +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Chung +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Glasser +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David McKay +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Wang <00107082@163.com> +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +Debayan De +Deborah Gertrude Digges +deed02392 +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dhawal Yogesh Bhanushali +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donghwa Kim +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Douglas Curtis +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Drew Hubl +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elango Sivanandam +Elena Morozova +Eli Uriegas +Elias Faxö +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Davtyan +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Soderstrom +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Ethan Bell +Euan Kemp +Eugen Krizo +Eugene Yakubovich +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeny Shmarnev +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabian Lauer +Fabian Raetz +Fabiano Rosas +Fabio Falci +Fabio Kung +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangming Fang +Fangyuan Gao <21551127@zju.edu.cn> +fanjiyun +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Feng Yan +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +Flavio Castelli +Flavio Crisciani +Florian +Florian Klein +Florian Maier +Florian Noeding +Florian Weingarten +Florin Asavoaie +Florin Patan +fonglh +Foysal Iqbal +Francesc Campoy +Francesco Mari +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +Frieder Bluemle +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +gautam, prasanna +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoffrey Bachelet +Geon Kim +George Kontridze +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim Feiken +Ghislain Bourgeois +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gopikannan Venugopalsamy +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Millar +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Stephens +Greg Thornton +Grzegorz Jaśkiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Guri +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +haikuoliu +Hakan Özler +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xiaoxi +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hiroshi Hatake +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +Hyzhou Zhy +Iago López Galeiras +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Chen +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Igor Karpovich +Iliana Weller +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +Ilya Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Iskander Sharipov +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jack Laxson +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jacob Vallejo +Jacob Wen +Jaivish Kothari +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeeva S. Chelladhurai +Jeff Anderson +Jeff Hajewski +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +Jhon Honce +Ji.Zhilong +Jian Zhang +Jie Luo +Jihyun Hwang +Jilles Oldenbeuving +Jim Alateras +Jim Galasyn +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +jimmyxian +Jinsoo Park +Jiri Popelka +Jiuyue Ma +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard (VM) +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Surrell +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Choy +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan Arentsen +Jordan Jennings +Jordan Sissel +Jorge Marin +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Bonczkowski +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Soref +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +Joyce Jang +JP +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Menga +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu (Kennan) +Kamil Domański +Kamjar Gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Karanth +Karthik Nayak +Kasper Fabæch Brandt +Kate Heddleston +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Kazuhiro Sera +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Ken Reese +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin Meredith +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +Kun Zhang +Kunal Kushwaha +Kunal Tyagi +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Gong +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Daly +Lewis Marshall +Lewis Peckover +Li Yi +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Lotus Fenn +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Lucas Molas +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Rüger +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcel Edmund Franke +Marcelo Horacio Fortino +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Martins +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Oates +Mark Parker +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Markus Kortlang +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Muzatko +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masato Ohba +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Champlon +Mathieu Le Marec - Pasquet +Mathieu Parent +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Rickard +Matt Robenolt +Matt Schurenko +Matt Williams +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mosesohn +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Nussbaum +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michał Gryko +Michiel de Jong +Mickaël Fortunato +Mickaël Remars +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Estes +Mike Gaffney +Mike Goelzer +Mike Leone +Mike Lundy +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miklos Szegedi +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +Mizuki Urushida +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Moorthy RS +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mrfly +Mrunal Patel +Muayyad Alsadi +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nace Oroz +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Naveed Jamil +Neal McBurnett +Neil Horman +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick DeCoursin +Nick Irvine +Nick Neisen +Nick Parker +Nick Payne +Nick Russo +Nick Stenning +Nick Stinemates +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolas Sterchele +Nicolas V Castet +Nicolás Hock Isaza +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Meyerhans +Noah Treuhaft +NobodyOnSE +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +Oskar Niburski +Otto Kekäläinen +Ouyang Liduo +Ovidio Mallo +Panagiotis Moustafellos +Paolo G. Giarrusso +Pascal +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +Patrik Cyvoct +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pletenev +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Pawel Konczalski +Peeyush Gupta +Peggy Li +Pei Su +Peng Tao +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Bücker +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Alexander Etling +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Pradip Dhara +Prasanna Gautam +Pratik Karki +Prayag Verma +Priya Wadhwa +Projjol Banerji +Przemek Hejman +Pure White +pysqz +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Raja Sami +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +Renaud Gaubert +Rhys Hiltner +Ri Xu +Ricardo N Feliciano +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Schneider +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Dudin +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Royce Remer +Rozhnov Alexandr +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Russ Magee +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Simmen +Ryan Stelly +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sakeven Jiang +Salahuddin Khan +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Sargun Dhillon +Sascha Andres +Satnam Singh +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Serhat Gülçiçek +SeungUkLee +Sevki Hasirci +Shane Canon +Shane da Silva +Shaun Kaasten +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shijun Qin +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silas Sewell +Silvan Jegen +Simei He +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Menke +Simon Taranto +Simon Vikstrom +Sindhu S +Sjoerd Langkemper +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Stanislav Bondarenko +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephan Spindler +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien HOUZÉ +Sébastien Luttringer +Sébastien Stormacq +Tabakhase +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi Colin +Tatsuki Sugiura +Tatsushi Inagaki +Taylan Isikdemir +Taylor Jones +Ted M. Young +Tehmasp Chaudhri +Tejaswini Duggaraju +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas Léveil +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Ti Zhou +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bart +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Potter +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Sweeney +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +tpng +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +Trishna Guha +Tristan Carel +Troy Denton +Tycho Andersen +Tyler Brock +Tyler Brown +Tzu-Jung Lee +uhayate +Ulysse Carion +Umesh Yadav +Utz Bacher +vagrant +Vaidas Jablonskis +vanderliang +Veres Lajos +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vlastimil Zeman +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +Wang Chao +Wang Guoliang +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Ward Vandewege +WarheadsSE +Wassim Dhif +Wayne Chang +Wayne Song +Weerasak Chongnguluam +Wei Wu +Wei-Ting Kuo +weipeng +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenjun Tang +Wenkai Yin +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +WiseTrem +Wolfgang Powisch +Wonjun Kim +xamyzhao +Xianglin Gao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +Xiaoyu Zhang +xiekeyang +Ximo Guanter Gonzálbez +Xinbo Weng +Xinzi Zhou +Xiuming Chen +Xuecong Liao +xuzhaokui +Yadnyawalkya Tale +Yahya +YAMADA Tsuyoshi +Yamasaki Masahide +Yan Feng +Yang Bai +Yang Pengfei +yangchenliang +Yanqiang Miao +Yao Zaiyong +Yassine Tijani +Yasunori Mahata +Yazhong Liu +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +Yosef Fertel +You-Sheng Yang (楊有勝) +Youcef YEKHLEF +Yu Changchun +Yu Chengxia +Yu Peng +Yu-Ju Hong +Yuan Sun +Yuanhong Peng +Yuhao Fang +Yuichiro Kaneko +Yunxiang Huang +Yurii Rashkovskii +Yves Junqueira +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenkun Bi +Zhou Hao +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +Zou Yu +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +徐俊杰 +慕陶 +搏通 +黄艳红00139573 diff --git a/agent/vendor/github.com/docker/docker/LICENSE b/agent/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 00000000000..6d8d58fb676 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/docker/docker/NOTICE b/agent/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 00000000000..0c74e15b057 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/agent/vendor/github.com/docker/docker/api/README.md b/agent/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 00000000000..f136c3433af --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/agent/vendor/github.com/docker/docker/api/common.go b/agent/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 00000000000..4582eb92e0c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,11 @@ +package api // import "github.com/docker/docker/api" + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion = "1.39" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier = "scratch" +) diff --git a/agent/vendor/github.com/docker/docker/api/common_unix.go b/agent/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 00000000000..504b0c90d7b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +const MinVersion = "1.12" diff --git a/agent/vendor/github.com/docker/docker/api/common_windows.go b/agent/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 00000000000..590ba5479be --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/agent/vendor/github.com/docker/docker/api/swagger-gen.yaml b/agent/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 00000000000..f07a02737f7 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/agent/vendor/github.com/docker/docker/api/swagger.yaml b/agent/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 00000000000..dc8a430b831 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,10256 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.39" +info: + title: "Docker Engine API" + version: "1.39" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.39) is used. + For example, calling `/info` is the same as calling `/v1.39/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + example: "0-3" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." + type: "boolean" + x-nullable: true + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + ResourceObject: + description: "An object describing the resources which can be advertised by a node and requested by a task" + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)" + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + StartPeriod: + description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + type: "integer" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when the container starts. + The allocated port might be changed when restarting the container. + + The port is selected from the ephemeral port range that depends on the kernel. + For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + MaskedPaths: + type: "array" + description: "The list of paths to be masked inside the container (this overrides the default set of paths)" + items: + type: "string" + ReadonlyPaths: + type: "array" + description: "The list of paths to be set as read-only inside the container (this overrides the default set of paths)" + items: + type: "string" + + ContainerConfig: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: "array" + items: + type: "string" + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: Name of the network'a bridge (for example, `docker0`). + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: IPv6 unicast address using the link-local prefix. + type: "string" + example: "fe80::42:acff:fe11:1" + LinkLocalIPv6PrefixLen: + description: Prefix length of the IPv6 unicast address. + type: "integer" + example: "64" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey identifies the sandbox + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + # TODO is SecondaryIPAddresses actually used? + SecondaryIPAddresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO is SecondaryIPv6Addresses actually used? + SecondaryIPv6Addresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + x-nullable: true + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + GraphDriverData: + description: "Information about a container's graph driver." + type: "object" + required: [Name, Data] + properties: + Name: + type: "string" + x-nullable: false + Data: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + + Image: + type: "object" + required: + - Id + - Parent + - Comment + - Created + - Container + - DockerVersion + - Author + - Architecture + - Os + - Size + - VirtualSize + - GraphDriver + - RootFS + properties: + Id: + type: "string" + x-nullable: false + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Created: + type: "string" + x-nullable: false + Container: + type: "string" + x-nullable: false + ContainerConfig: + $ref: "#/definitions/ContainerConfig" + DockerVersion: + type: "string" + x-nullable: false + Author: + type: "string" + x-nullable: false + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + type: "string" + x-nullable: false + Os: + type: "string" + x-nullable: false + OsVersion: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + VirtualSize: + type: "integer" + format: "int64" + x-nullable: false + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + Metadata: + type: "object" + properties: + LastTagTime: + type: "string" + format: "dateTime" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + x-nullable: true + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + CreatedAt: "2016-06-07T20:31:11.853781916Z" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + properties: + ID: + type: "string" + Parent: + type: "string" + Type: + type: "string" + Description: + type: "string" + InUse: + type: "boolean" + Shared: + type: "boolean" + Size: + type: "integer" + CreatedAt: + type: "integer" + LastUsedAt: + type: "integer" + x-nullable: true + UsageCount: + type: "integer" + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + MacAddress: + description: | + MAC address for the endpoint on this network. + type: "string" + example: "02:42:ac:11:00:04" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: "True if the plugin is running. False if the plugin is not running, only installed." + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. + The client must send the version number along with the modified specification when updating these objects. + This approach ensures safe concurrency and determinism in that the change on the object + may not be applied if the version number has changed from the last read. In other words, + if two update requests specify the same base version, only one of the requests can succeed. + As a result, two separate update requests that happen at the same time will not + unintentionally overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "logentries" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" + type: "object" + properties: + TrustRoot: + description: "The root CA certificate(s) that are used to validate leaf TLS certificates" + type: "string" + CertIssuerSubject: + description: "The base64-url-safe-encoded raw subject bytes of the issuer" + type: "string" + CertIssuerPublicKey: + description: "The base64-url-safe-encoded raw public key bytes of the issuer" + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + CACert: + description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." + type: "string" + SigningCACert: + description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." + type: "string" + SigningCAKey: + description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." + type: "string" + ForceRotate: + description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: "Whether there is currently a root CA rotation in progress for the swarm" + type: "boolean" + example: false + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + File: + type: "string" + description: | + Load credential spec from this file. The file is read by the daemon, and must be present in the + `CredentialSpecs` subdirectory in the docker data directory, which defaults to + `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows registry. The specified registry value must be + located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Secrets: + description: "Secrets contains references to zero or more secrets that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: "SecretID represents the ID of the specific secret that we're referencing." + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, but this is just provided for + lookup/display purposes. The secret in the reference will be identified by its ID. + type: "string" + Configs: + description: "Configs contains references to zero or more configs that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + ConfigID: + description: "ConfigID represents the ID of the specific config that we're referencing." + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, but this is just provided for + lookup/display purposes. The config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: "Isolation technology of the containers running the service. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + Init: + description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/ResourceObject" + Reservation: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + Preferences: + description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: "label descriptor, such as engine.labels.az" + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Runtime: + description: "Runtime is the type of runtime specified for the task executor." + type: "string" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between rollback iterations, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an rolled back task fails to run, or stops running during the rollback." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Order: + description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: "Name of the secrets driver used to fetch the secret's value from an external secret store" + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + SystemStatus: + description: | + Status information about this node (standalone Swarm API). + +


+ + > **Note**: The information returned in this field is only propagated + > by the Swarm standalone API, and is empty (`null`) when using + > built-in swarm mode. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Role", "primary"] + - ["State", "Healthy"] + - ["Strategy", "spread"] + - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"] + - ["Nodes", "2"] + - [" swarm-agent-00", "192.168.99.102:2376"] + - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"] + - [" └ Status", "Healthy"] + - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"] + - [" └ Reserved CPUs", "0 / 1"] + - [" └ Reserved Memory", "0 B / 1.021 GiB"] + - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] + - [" └ UpdatedAt", "2017-08-09T10:03:46Z"] + - [" └ ServerVersion", "17.06.0-ce"] + - [" swarm-manager", "192.168.99.101:2376"] + - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"] + - [" └ Status", "Healthy"] + - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"] + - [" └ Reserved CPUs", "0 / 1"] + - [" └ Reserved Memory", "0 B / 1.021 GiB"] + - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] + - [" └ UpdatedAt", "2017-08-09T10:04:11Z"] + - [" └ ServerVersion", "17.06.0-ce"] + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemory: + description: "Indicates if the host has kernel memory limit support enabled." + type: "boolean" + example: true + CpuCfsPeriod: + description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." + type: "boolean" + example: true + CpuCfsQuota: + description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host." + type: "boolean" + example: true + CPUShares: + description: "Indicates if CPU Shares limiting is supported by the host." + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled." + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd"] + default: "cgroupfs" + example: "cgroupfs" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in kilobytes (kB). + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + + > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) + > returns the Swarm version instead of the daemon version, for example + > `swarm/1.2.8`. + type: "string" + example: "17.06.0-ce" + ClusterStore: + description: | + URL of the distributed storage backend. + + + The storage backend is used for multihost networking (to store + network and endpoint information) and by the node discovery mechanism. + +


+ + > **Note**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "consul://consul.corp.example.com:8600/some/path" + ClusterAdvertise: + description: | + The network endpoint that the Engine advertises for the purpose of + node discovery. ClusterAdvertise is a `host:port` combination on which + the daemon is reachable by other hosts. + +


+ + > **Note**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "node5.corp.example.com:8000" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, and user-namespaces (userns). + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container than inspecting a single container. For example, + the list of linked containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + title: "ContainerCreateResponse" + description: "OK response to ContainerCreate operation" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerInspectResponse" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: | + The status of the container. For example, `"running"` or `"exited"`. + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the cgroups freezer is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + title: "ContainerChangeResponseItem" + description: "change item in response to ContainerChanges operation" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + title: "ContainerWaitResponse" + description: "OK response to ContainerWait operation" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + Error: + description: "container waiting error, if any" + type: "object" + properties: + Message: + description: "Details of an error" + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." + type: "string" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "A base64 - encoded JSON object with some filesystem header information about the path" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: + - `unused-for=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + Data: {} + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemVersionResponse" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + type: "string" + Version: + type: "string" + x-nullable: false + Details: + type: "object" + x-nullable: true + + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "17.04.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.7.5" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.27" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + BuildKit-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag` + + Volumes report these events: `create`, `mount`, `unmount`, and `destroy` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + WorkingDir: + type: "string" + description: "The working directory for the exec process inside the container." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + title: "VolumeListResponse" + description: "Volume list response" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - CreatedAt: "2017-07-19T12:00:26Z" + Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + description: "Volume configuration" + title: "VolumeConfig" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than inspecting a single network. For example, + the list of containers attached to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + title: "NetworkCreateResponse" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." + type: "boolean" + Ingress: + description: "Ingress network is the network which provides the routing-mesh in swarm mode." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + title: "PluginPrivilegeItem" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. + type: "string" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the default subnet pool + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, + or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` + is used. + + The `DataPathAddr` specifies the address that global scope network drivers will publish towards other + nodes in order to reach the containers running on this node. Using this parameter it is possible to + separate the container data traffic from the management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + title: "ServiceCreateResponse" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. + This is required to avoid conflicting writes. + This version number should be the value as currently set on the service *before* the update. + You can find the current version by calling `GET /services/{id}`" + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "rollback" + in: "query" + type: "string" + description: "Set to this parameter to `previous` to cause a + server-side rollback to the previous service spec. The supplied spec will be + ignored in this case." + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values." + - name: "version" + in: "query" + description: "The version number of the config object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: "Return image digest and platform information by contacting the registry." + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: "A descriptor struct containing digest, media type, and size" + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: "An array containing all platforms supported by the image" + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. + + > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental + > features enabled. The specifications for this endpoint may still change in a future version of the API. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session (experimental)"] diff --git a/agent/vendor/github.com/docker/docker/api/types/auth.go b/agent/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 00000000000..ddf15bb182d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,22 @@ +package types // import "github.com/docker/docker/api/types" + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/agent/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 00000000000..bf3463b90e7 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/agent/vendor/github.com/docker/docker/api/types/client.go b/agent/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 00000000000..3b698c2c240 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,406 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit = "2" +) + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + Platform string +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/agent/vendor/github.com/docker/docker/api/types/configs.go b/agent/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 00000000000..178e911a7af --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,64 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/config.go b/agent/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 00000000000..89ad08c2346 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,69 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/container_changes.go b/agent/vendor/github.com/docker/docker/api/types/container/container_changes.go new file mode 100644 index 00000000000..c909d6ca3e9 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/container_create.go b/agent/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 00000000000..49efa0f2c09 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/container_top.go b/agent/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 00000000000..ba41edcf3f8 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process is an array of values corresponding to the titles + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/container_update.go b/agent/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 00000000000..7630ae54cd6 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/container_wait.go b/agent/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 00000000000..9e3910a6b42 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,29 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} + +// ContainerWaitOKBody OK response to ContainerWait operation +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // error + // Required: true + Error *ContainerWaitOKBodyError `json:"Error"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/host_config.go b/agent/vendor/github.com/docker/docker/api/types/container/host_config.go new file mode 100644 index 00000000000..4ef26fa6c87 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -0,0 +1,412 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +const ( + // IsolationEmpty is unspecified (same behavior as default) + IsolationEmpty = Isolation("") + // IsolationDefault is the default isolation mode on current daemon + IsolationDefault = Isolation("default") + // IsolationProcess is process isolation mode + IsolationProcess = Isolation("process") + // IsolationHyperV is HyperV isolation mode + IsolationHyperV = Isolation("hyperv") +) + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == "private" +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == "shareable" +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == "none" +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == "container" { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/agent/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 00000000000..cf6fdf44026 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,41 @@ +// +build !windows + +package container // import "github.com/docker/docker/api/types/container" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/agent/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 00000000000..99f803a5bb1 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,40 @@ +package container // import "github.com/docker/docker/api/types/container" + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/agent/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/agent/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 00000000000..cd8311f99cf --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/agent/vendor/github.com/docker/docker/api/types/error_response.go b/agent/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 00000000000..dc942d9d9ef --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/events/events.go b/agent/vendor/github.com/docker/docker/api/types/events/events.go new file mode 100644 index 00000000000..027c6edb722 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/events/events.go @@ -0,0 +1,52 @@ +package events // import "github.com/docker/docker/api/types/events" + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" + // ConfigEventType is the event type that configs generate + ConfigEventType = "config" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/filters/parse.go b/agent/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 00000000000..a41e3d8d96a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,350 @@ +/*Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" + +import ( + "encoding/json" + "errors" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores a mapping of keys to a set of multiple values. +type Args struct { + fields map[string]map[string]bool +} + +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string +} + +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// ParseFlag parses a key=value string and adds it to an Args. +// +// Deprecated: Use Args.Add() +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned when a filter is not in the form key=value +// +// Deprecated: this error will be removed in a future version +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam encodes the Args as args JSON encoded string +// +// Deprecated: use ToJSON +func ToParam(a Args) (string, error) { + return ToJSON(a) +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte{}, nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + buf, err := json.Marshal(a) + return string(buf), err +} + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: Use ToJSON +func ToParamWithVersion(version string, a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + + if version != "" && versions.LessThan(version, "1.22") { + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err + } + + return ToJSON(a) +} + +// FromParam decodes a JSON encoded string into Args +// +// Deprecated: use FromJSON +func FromParam(p string) (Args, error) { + return FromJSON(p) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, err + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + if len(raw) == 0 { + return nil + } + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true + } else { + args.fields[key] = map[string]bool{value: true} + } +} + +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) + } + } +} + +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) +} + +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { + return true + } + + fieldValues := args.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(args.fields[key]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { + return true + } + + fieldValues := args.fields[key] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the key exists in the mapping +// +// Deprecated: use Contains +func (args Args) Include(field string) bool { + _, ok := args.fields[field] + return ok +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] + return ok +} + +type invalidFilter string + +func (e invalidFilter) Error() string { + return "Invalid filter '" + string(e) + "'" +} + +func (invalidFilter) InvalidParameter() {} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { + if !accepted[name] { + return invalidFilter(name) + } + } + return nil +} + +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { + return nil + } + for v := range args.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/agent/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/agent/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 00000000000..4d9bf1c62c8 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/id_response.go b/agent/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 00000000000..7592d2f8b15 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/image/image_history.go b/agent/vendor/github.com/docker/docker/api/types/image/image_history.go new file mode 100644 index 00000000000..d6b354bcdf3 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -0,0 +1,37 @@ +package image + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem individual image layer information in response to ImageHistory operation +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/agent/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 00000000000..b9a65a0d8e8 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/image_summary.go b/agent/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 00000000000..e145b3dcfcd --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/mount/mount.go b/agent/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 00000000000..3fef974df88 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,130 @@ +package mount // import "github.com/docker/docker/api/types/mount" + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/agent/vendor/github.com/docker/docker/api/types/network/network.go b/agent/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 00000000000..ccb448f23a2 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,126 @@ +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" +) + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} + +var acceptedFilters = map[string]bool{ + "driver": true, + "type": true, + "name": true, + "id": true, + "label": true, + "scope": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return errdefs.InvalidParameter(filter.Validate(acceptedFilters)) +} diff --git a/agent/vendor/github.com/docker/docker/api/types/plugin.go b/agent/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 00000000000..abae48b9ab0 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,203 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/plugin_device.go b/agent/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 00000000000..56990106755 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/plugin_env.go b/agent/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 00000000000..32962dc2ebe --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/agent/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 00000000000..c82f204e870 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/plugin_mount.go b/agent/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 00000000000..5c031cf8b5c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/plugin_responses.go b/agent/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 00000000000..60d1fb5ad85 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,71 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/agent/vendor/github.com/docker/docker/api/types/port.go b/agent/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 00000000000..d91234744c6 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // Host IP address that the container's port is mapped to + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/agent/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 00000000000..f0a2113e405 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/registry/registry.go b/agent/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 00000000000..8789ad3b321 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,119 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +import ( + "encoding/json" + "net" + + "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/agent/vendor/github.com/docker/docker/api/types/seccomp.go b/agent/vendor/github.com/docker/docker/api/types/seccomp.go new file mode 100644 index 00000000000..67a41e1a89e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/seccomp.go @@ -0,0 +1,93 @@ +package types // import "github.com/docker/docker/api/types" + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent a specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/service_update_response.go b/agent/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 00000000000..74ea64b1bb6 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/stats.go b/agent/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 00000000000..20daebed14b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types // import "github.com/docker/docker/api/types" + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/agent/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 00000000000..82921cebc15 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice // import "github.com/docker/docker/api/types/strslice" + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/common.go b/agent/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 00000000000..ef020f458bd --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/config.go b/agent/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 00000000000..a1555cf43ee --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,35 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget + ConfigID string + ConfigName string +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/container.go b/agent/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 00000000000..e12f09837fc --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,75 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/network.go b/agent/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 00000000000..98ef3284d1d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,121 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/node.go b/agent/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 00000000000..1e30f5fa10d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,115 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/agent/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 00000000000..0c77403ccff --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,27 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 00000000000..98c2806c31d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 00000000000..1fdc9b04361 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,712 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 00000000000..6d63b7783fd --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/secret.go b/agent/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 00000000000..d5213ec981c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,36 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/service.go b/agent/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 00000000000..abf192e7594 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,124 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/agent/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 00000000000..b742cf1bfbb --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,223 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/agent/vendor/github.com/docker/docker/api/types/swarm/task.go b/agent/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 00000000000..b35605d12fd --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,191 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/agent/vendor/github.com/docker/docker/api/types/time/duration_convert.go new file mode 100644 index 00000000000..84b6f073224 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/agent/vendor/github.com/docker/docker/api/types/time/timestamp.go b/agent/vendor/github.com/docker/docker/api/types/time/timestamp.go new file mode 100644 index 00000000000..ea3495efeb8 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -0,0 +1,129 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/agent/vendor/github.com/docker/docker/api/types/types.go b/agent/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 00000000000..a8fae3ba32d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,613 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + Warnings []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuildCache []*BuildCache + BuilderSize int64 // deprecated +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Parent string + Type string + Description string + InUse bool + Shared bool + Size int64 + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/agent/vendor/github.com/docker/docker/api/types/versions/README.md b/agent/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 00000000000..1ef911edb0f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/agent/vendor/github.com/docker/docker/api/types/versions/compare.go b/agent/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 00000000000..8ccb0aa92eb --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions // import "github.com/docker/docker/api/types/versions" + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/agent/vendor/github.com/docker/docker/api/types/volume.go b/agent/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 00000000000..b5ee96a5005 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,69 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/agent/vendor/github.com/docker/docker/api/types/volume/volume_create.go new file mode 100644 index 00000000000..f12e48612c5 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -0,0 +1,29 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// VolumeCreateBody Volume configuration +// swagger:model VolumeCreateBody +type VolumeCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // Required: true + Name string `json:"Name"` +} diff --git a/agent/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/agent/vendor/github.com/docker/docker/api/types/volume/volume_list.go new file mode 100644 index 00000000000..020198f7371 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -0,0 +1,23 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumeListOKBody Volume list response +// swagger:model VolumeListOKBody +type VolumeListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/agent/vendor/github.com/docker/docker/client/README.md b/agent/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 00000000000..059dfb3ce7b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/agent/vendor/github.com/docker/docker/client/build_cancel.go b/agent/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 00000000000..74df4950891 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + if err != nil { + return err + } + defer ensureReaderClosed(serverResp) + + return nil +} diff --git a/agent/vendor/github.com/docker/docker/client/build_prune.go b/agent/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 00000000000..42bbf99ef10 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,45 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) + filters, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", filters) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + + if err != nil { + return nil, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/checkpoint_create.go b/agent/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 00000000000..921024fe4fb --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/checkpoint_delete.go b/agent/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 00000000000..54f55fa76e6 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/checkpoint_list.go b/agent/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 00000000000..2b73fb553f2 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + if err != nil { + return checkpoints, wrapResponseError(err, resp, "container", container) + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/agent/vendor/github.com/docker/docker/client/client.go b/agent/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 00000000000..5031502acf1 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,422 @@ +/* +Package client is a Go client for the Docker Engine API. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/reference/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewEnvClient, or +configured manually with NewClient. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewEnvClient initializes a new API client based on environment variables. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use NewClientWithOpts(FromEnv) +func NewEnvClient() (*Client, error) { + return NewClientWithOpts(FromEnv) +} + +// FromEnv configures the client with values from environment variables. +// +// Supported environment variables: +// DOCKER_HOST to set the url to the docker server. +// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// DOCKER_CERT_PATH to load the TLS certificates from. +// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func FromEnv(c *Client) error { + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + } + + if host := os.Getenv("DOCKER_HOST"); host != "" { + if err := WithHost(host)(c); err != nil { + return err + } + } + + if version := os.Getenv("DOCKER_API_VERSION"); version != "" { + c.version = version + c.manualOverride = true + } + return nil +} + +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) error { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } +} + +// WithDialer applies the dialer.DialContext to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +// Deprecated: use WithDialContext +func WithDialer(dialer *net.Dialer) func(*Client) error { + return WithDialContext(dialer.DialContext) +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) func(*Client) error { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithVersion overrides the client version with the specified one +func WithVersion(version string) func(*Client) error { + return func(c *Client) error { + c.version = version + return nil + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) func(*Client) error { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) func(*Client) error { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) func(*Client) error { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// NewClientWithOpts initializes a new API client with default values. It takes functors +// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { + client, err := defaultHTTPClient(DefaultDockerHost) + if err != nil { + return nil, err + } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + scheme: "http", + client: client, + proto: defaultProto, + addr: defaultAddr, + } + + for _, op := range ops { + if err := op(c); err != nil { + return nil, err + } + } + + if _, ok := c.client.Transport.(http.RoundTripper); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) + } + tlsConfig := resolveTLSConfig(c.client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + c.scheme = "https" + } + + return c, nil +} + +func defaultHTTPClient(host string) (*http.Client, error) { + url, err := ParseHostURL(host) + if err != nil { + return nil, err + } + transport := new(http.Transport) + sockets.ConfigureTransport(transport, url.Scheme, url.Host) + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + }, nil +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// Deprecated: use NewClientWithOpts +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} + +// Close the transport used by the client +func (cli *Client) Close() error { + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = path.Join(cli.basePath, "/v"+v, p) + } else { + apiPath = path.Join(cli.basePath, p) + } + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() +} + +// ClientVersion returns the API version used by this client. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// NegotiateAPIVersion queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + ping, _ := cli.Ping(ctx) + cli.NegotiateAPIVersionPing(ping) +} + +// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion +// if the ping version is less than the default version. +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if cli.manualOverride { + return + } + + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(p.APIVersion, cli.version) { + cli.version = p.APIVersion + } +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + return &*cli.client +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return nil, fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return nil, err + } + addr = parsed.Host + basePath = parsed.Path + } + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +// CustomHTTPHeaders returns the custom http headers stored by the client. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. +// Deprecated: use WithHTTPHeaders when creating the client. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers +} + +// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/agent/vendor/github.com/docker/docker/client/client_unix.go b/agent/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 00000000000..3d24470ba3a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd openbsd darwin + +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" + +const defaultProto = "unix" +const defaultAddr = "/var/run/docker.sock" diff --git a/agent/vendor/github.com/docker/docker/client/client_windows.go b/agent/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 00000000000..c649e54412c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,7 @@ +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +const defaultProto = "npipe" +const defaultAddr = "//./pipe/docker_engine" diff --git a/agent/vendor/github.com/docker/docker/client/config_create.go b/agent/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 00000000000..c8b802ad356 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/config_inspect.go b/agent/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 00000000000..4ac566ad89f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/config_list.go b/agent/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 00000000000..2b9d54606bf --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + ensureReaderClosed(resp) + return configs, err +} diff --git a/agent/vendor/github.com/docker/docker/client/config_remove.go b/agent/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 00000000000..a96871e98b7 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "config", id) +} diff --git a/agent/vendor/github.com/docker/docker/client/config_update.go b/agent/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 00000000000..39e59cf8589 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_attach.go b/agent/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 00000000000..88ba1ef6396 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,57 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/agent/vendor/github.com/docker/docker/client/container_commit.go b/agent/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 00000000000..377a2ea681c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if !options.Pause { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_copy.go b/agent/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 00000000000..d706260cee0 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,101 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := "/containers/" + containerID + "/archive" + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := "/containers/" + containerID + "/archive" + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_create.go b/agent/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 00000000000..d269a61894a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,56 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, objectNotFoundError{object: "image", id: config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_diff.go b/agent/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 00000000000..3b7c90c96c9 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_exec.go b/agent/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 00000000000..535536b1e0f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,54 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_export.go b/agent/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 00000000000..d0c0a5cbadf --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/container_inspect.go b/agent/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 00000000000..f453064cf8f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_kill.go b/agent/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 00000000000..4d6f1d23da9 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_list.go b/agent/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 00000000000..9c218e2218a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,56 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_logs.go b/agent/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 00000000000..5b6541f0359 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,80 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, wrapResponseError(err, resp, "container", container) + } + return resp.body, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/container_pause.go b/agent/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 00000000000..5e7271a371c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_prune.go b/agent/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 00000000000..14f88d93bac --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/container_remove.go b/agent/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 00000000000..ab4cfc16f87 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "container", containerID) +} diff --git a/agent/vendor/github.com/docker/docker/client/container_rename.go b/agent/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 00000000000..240fdf552b4 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_resize.go b/agent/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 00000000000..a9d4c0c79a0 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_restart.go b/agent/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 00000000000..41e421969f4 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_start.go b/agent/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 00000000000..c2e0b15dca8 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_stats.go b/agent/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 00000000000..6ef44c77480 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_stop.go b/agent/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 00000000000..629d7ab64c8 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_top.go b/agent/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 00000000000..9c9fce7a040 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_unpause.go b/agent/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 00000000000..1d8f873169b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_update.go b/agent/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 00000000000..14e7f23dfb8 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + + ensureReaderClosed(serverResp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/container_wait.go b/agent/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 00000000000..6ab8c1da96a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,83 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/agent/vendor/github.com/docker/docker/client/disk_usage.go b/agent/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 00000000000..8eb30eb5de2 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + if err != nil { + return du, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/distribution_inspect.go b/agent/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 00000000000..7245bbeeda4 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + ensureReaderClosed(resp) + return distributionInspect, err +} diff --git a/agent/vendor/github.com/docker/docker/client/errors.go b/agent/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 00000000000..0461af329d5 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,132 @@ +package client // import "github.com/docker/docker/client" + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + _, ok := errors.Cause(err).(errConnectionFailed) + return ok +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +type objectNotFoundError struct { + object string + id string +} + +func (e objectNotFoundError) NotFound() bool { + return true +} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +func wrapResponseError(err error, resp serverResponse, object, id string) error { + switch { + case err == nil: + return nil + case resp.statusCode == http.StatusNotFound: + return objectNotFoundError{object: object, id: id} + case resp.statusCode == http.StatusNotImplemented: + return notImplementedError{message: err.Error()} + default: + return err + } +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +type notImplementedError struct { + message string +} + +func (e notImplementedError) Error() string { + return e.message +} + +func (e notImplementedError) NotImplemented() bool { + return true +} + +// IsErrNotImplemented returns true if the error is a NotImplemented error. +// This is returned by the API when a requested feature has not been +// implemented. +func IsErrNotImplemented(err error) bool { + te, ok := err.(notImplementedError) + return ok && te.NotImplemented() +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} diff --git a/agent/vendor/github.com/docker/docker/client/events.go b/agent/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 00000000000..6e56538955e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,101 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/hijack.go b/agent/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 00000000000..0ac8248f2c7 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,132 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + conn, err := cli.setupHijackConn(ctx, req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err +} + +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + return tls.Dial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + dialer := cli.Dialer() + conn, err := dialer(ctx) + if err != nil { + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite iff the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } + } else { + br.Reset(nil) + } + + return c, nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() +} diff --git a/agent/vendor/github.com/docker/docker/client/image_build.go b/agent/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 00000000000..9add3c10b34 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,138 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return query, err + } + query.Set("platform", strings.ToLower(options.Platform)) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) + return query, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/image_create.go b/agent/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 00000000000..239380474e6 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/agent/vendor/github.com/docker/docker/client/image_history.go b/agent/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 00000000000..0151b9517f2 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/agent/vendor/github.com/docker/docker/client/image_import.go b/agent/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 00000000000..c2972ea950e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,40 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/image_inspect.go b/agent/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 00000000000..2f8f6d2f141 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + if err != nil { + return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/image_list.go b/agent/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 00000000000..32fae27b375 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,45 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/agent/vendor/github.com/docker/docker/client/image_load.go b/agent/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 00000000000..91016e493c4 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/image_prune.go b/agent/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 00000000000..78ee3f6c496 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/image_pull.go b/agent/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 00000000000..d97aacf8c52 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,64 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/agent/vendor/github.com/docker/docker/client/image_push.go b/agent/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 00000000000..a15871c2b44 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "errors" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + tag := "" + name := reference.FamiliarName(ref) + + if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/agent/vendor/github.com/docker/docker/client/image_remove.go b/agent/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 00000000000..45d6e6f0dba --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + var dels []types.ImageDeleteResponseItem + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return dels, wrapResponseError(err, resp, "image", imageID) + } + + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/agent/vendor/github.com/docker/docker/client/image_save.go b/agent/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 00000000000..d1314e4b22f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/image_search.go b/agent/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 00000000000..176de3c5824 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/agent/vendor/github.com/docker/docker/client/image_tag.go b/agent/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 00000000000..5652bfc252b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/info.go b/agent/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 00000000000..121f256ab1c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/interface.go b/agent/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 00000000000..d190f8e58db --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,199 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net" + "net/http" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client + ServerVersion(ctx context.Context) (types.Version, error) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) + Close() error +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, network string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/agent/vendor/github.com/docker/docker/client/interface_experimental.go b/agent/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 00000000000..402ffb512cd --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/agent/vendor/github.com/docker/docker/client/interface_stable.go b/agent/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 00000000000..5502cd74266 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/agent/vendor/github.com/docker/docker/client/login.go b/agent/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 00000000000..7d661819004 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp.statusCode == http.StatusUnauthorized { + return registry.AuthenticateOKBody{}, unauthorizedError{err} + } + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/network_connect.go b/agent/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 00000000000..57189461341 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/network_create.go b/agent/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 00000000000..41da2ac6109 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/network_disconnect.go b/agent/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 00000000000..dd156766566 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/network_inspect.go b/agent/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 00000000000..025f6d8757d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + if err != nil { + return networkResource, nil, wrapResponseError(err, resp, "network", networkID) + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/network_list.go b/agent/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 00000000000..f16b2f56240 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/agent/vendor/github.com/docker/docker/client/network_prune.go b/agent/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 00000000000..6418b8b6070 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/network_remove.go b/agent/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 00000000000..12741437be3 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "network", networkID) +} diff --git a/agent/vendor/github.com/docker/docker/client/node_inspect.go b/agent/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 00000000000..593b2e9f0b3 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + } + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/node_list.go b/agent/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 00000000000..9883f6fc52d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/agent/vendor/github.com/docker/docker/client/node_remove.go b/agent/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 00000000000..e7a75057155 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "node", nodeID) +} diff --git a/agent/vendor/github.com/docker/docker/client/node_update.go b/agent/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 00000000000..de32a617fb0 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/ping.go b/agent/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 00000000000..dec1423e389 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,35 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "path" + + "github.com/docker/docker/api/types" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", "Builder-Version", "OS-Type" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + if serverResp.header != nil { + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + ping.OSType = serverResp.header.Get("OSType") + if bv := serverResp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } + } + return ping, cli.checkResponseErr(serverResp) +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_create.go b/agent/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 00000000000..4591db50fda --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_disable.go b/agent/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 00000000000..01f6574f952 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_enable.go b/agent/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 00000000000..736da48bd10 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_inspect.go b/agent/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 00000000000..0ab7beaee8f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + return nil, nil, wrapResponseError(err, resp, "plugin", name) + } + + defer ensureReaderClosed(resp) + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_install.go b/agent/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 00000000000..13baa40a9b0 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,113 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_list.go b/agent/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 00000000000..ade1051a970 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + if err != nil { + return plugins, wrapResponseError(err, resp, "plugin", "") + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_push.go b/agent/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 00000000000..d20bfe84479 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_remove.go b/agent/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 00000000000..8563bab0dbc --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "plugin", name) +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_set.go b/agent/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 00000000000..dcf5752ca2b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/plugin_upgrade.go b/agent/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 00000000000..115cea945ba --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) +} diff --git a/agent/vendor/github.com/docker/docker/client/request.go b/agent/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 00000000000..bf98962fcba --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,267 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int + reqURL *url.URL +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, err + } + return resp, cli.checkResponseErr(resp) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} + + req = req.WithContext(ctx) + resp, err := cli.client.Do(req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + switch err { + case context.Canceled, context.DeadlineExceeded: + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header + } + return serverResp, nil +} + +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.body, + N: int64(bodyMax), + } + body, err := ioutil.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, response.body, 512) + response.body.Close() + } +} diff --git a/agent/vendor/github.com/docker/docker/client/secret_create.go b/agent/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 00000000000..09fae82f2a1 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/secret_inspect.go b/agent/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 00000000000..e8322f45891 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/secret_list.go b/agent/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 00000000000..f6bf7ba470d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/agent/vendor/github.com/docker/docker/client/secret_remove.go b/agent/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 00000000000..e9d52182932 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "secret", id) +} diff --git a/agent/vendor/github.com/docker/docker/client/secret_update.go b/agent/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 00000000000..164256bbc15 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretUpdate attempts to update a Secret +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/service_create.go b/agent/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 00000000000..8fadda4a906 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,166 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var distErr error + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return types.ServiceCreateResponse{}, err + } + + // ensure that the image is tagged + var imgPlatforms []swarm.Platform + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + + ensureReaderClosed(resp) + return response, err +} + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// an empty string if there are no updates. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return "" +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/agent/vendor/github.com/docker/docker/client/service_inspect.go b/agent/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 00000000000..de6aa22de76 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + if err != nil { + return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/service_list.go b/agent/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 00000000000..7d53e2b9b9e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,35 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/agent/vendor/github.com/docker/docker/client/service_logs.go b/agent/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 00000000000..906fd4059e6 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/service_remove.go b/agent/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 00000000000..fe3421bec82 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "service", serviceID) +} diff --git a/agent/vendor/github.com/docker/docker/client/service_update.go b/agent/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 00000000000..5a7a61b01ff --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,92 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + distErr error + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + if err := validateServiceSpec(service); err != nil { + return types.ServiceUpdateResponse{}, err + } + + var imgPlatforms []swarm.Platform + // ensure that the image is tagged + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + + var response types.ServiceUpdateResponse + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + + ensureReaderClosed(resp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/session.go b/agent/vendor/github.com/docker/docker/client/session.go new file mode 100644 index 00000000000..df199f3d03a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/session.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net" + "net/http" +) + +// DialSession returns a connection that can be used communication with daemon +func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", "/session", nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(ctx, req, proto) +} diff --git a/agent/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/agent/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 00000000000..0c50c01a8c0 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/swarm_init.go b/agent/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 00000000000..742ca0f0416 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/swarm_inspect.go b/agent/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 00000000000..cfaabb25b1e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/agent/vendor/github.com/docker/docker/client/swarm_join.go b/agent/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 00000000000..a1cf0455d2b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/swarm_leave.go b/agent/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 00000000000..90ca84b363b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/swarm_unlock.go b/agent/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 00000000000..d2412f7d441 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/swarm_update.go b/agent/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 00000000000..56a5bea761e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/agent/vendor/github.com/docker/docker/client/task_inspect.go b/agent/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 00000000000..e1c0a736da5 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + } + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/task_list.go b/agent/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 00000000000..42d20c1b8d1 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/agent/vendor/github.com/docker/docker/client/task_logs.go b/agent/vendor/github.com/docker/docker/client/task_logs.go new file mode 100644 index 00000000000..6222fab577d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/task_logs.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/transport.go b/agent/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 00000000000..5541344366b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "crypto/tls" + "net/http" +) + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/agent/vendor/github.com/docker/docker/client/utils.go b/agent/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 00000000000..7f3ff44eb80 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,34 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToJSON(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/version.go b/agent/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 00000000000..1989f6d6d2b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/agent/vendor/github.com/docker/docker/client/volume_create.go b/agent/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 00000000000..f1f6fcdc4a2 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/agent/vendor/github.com/docker/docker/client/volume_inspect.go b/agent/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 00000000000..f840682d2ec --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + if volumeID == "" { + return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + return volume, nil, wrapResponseError(err, resp, "volume", volumeID) + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/agent/vendor/github.com/docker/docker/client/volume_list.go b/agent/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 00000000000..284554d67c0 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + var volumes volumetypes.VolumeListOKBody + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/agent/vendor/github.com/docker/docker/client/volume_prune.go b/agent/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 00000000000..70041efed8b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/agent/vendor/github.com/docker/docker/client/volume_remove.go b/agent/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 00000000000..fc5a71d3349 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/versions" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "volume", volumeID) +} diff --git a/agent/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE b/agent/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE new file mode 100644 index 00000000000..e67cdabd22e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/agent/vendor/github.com/docker/docker/docs/static_files/contributors.png b/agent/vendor/github.com/docker/docker/docs/static_files/contributors.png new file mode 100644 index 00000000000..63c0a0c09b5 Binary files /dev/null and b/agent/vendor/github.com/docker/docker/docs/static_files/contributors.png differ diff --git a/agent/vendor/github.com/docker/docker/errdefs/defs.go b/agent/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 00000000000..e6a2275b2d7 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,74 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrAlreadyExists is a special case of ErrConflict which signals that the desired object already exists +type ErrAlreadyExists interface { + AlreadyExists() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/agent/vendor/github.com/docker/docker/errdefs/doc.go b/agent/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 00000000000..c211f174fc1 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/agent/vendor/github.com/docker/docker/errdefs/helpers.go b/agent/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 00000000000..6169c2bc62a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,240 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil { + return nil + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil { + return nil + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil { + return nil + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil { + return nil + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil { + return nil + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil { + return nil + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil { + return nil + } + return errNotModified{err} +} + +type errAlreadyExists struct{ error } + +func (errAlreadyExists) AlreadyExists() {} + +func (e errAlreadyExists) Cause() error { + return e.error +} + +// AlreadyExists is a helper to create an error of the class with the same name from any error type +func AlreadyExists(err error) error { + if err == nil { + return nil + } + return errAlreadyExists{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil { + return nil + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil { + return nil + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil { + return nil + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil { + return nil + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil { + return nil + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/agent/vendor/github.com/docker/docker/errdefs/is.go b/agent/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 00000000000..e0513331bbd --- /dev/null +++ b/agent/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,114 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrAlreadyExists, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsAlreadyExists returns if the passed in error is a AlreadyExists error +func IsAlreadyExists(err error) bool { + _, ok := getImplementer(err).(ErrAlreadyExists) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/agent/vendor/github.com/docker/docker/hack/generate-authors.sh b/agent/vendor/github.com/docker/docker/hack/generate-authors.sh new file mode 100755 index 00000000000..680bdb7b3f7 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem new file mode 120000 index 00000000000..70a3e6ce54d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/ca.pem \ No newline at end of file diff --git a/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem new file mode 120000 index 00000000000..458882026e4 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/client-cert.pem \ No newline at end of file diff --git a/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem new file mode 120000 index 00000000000..d5f6bbee571 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/client-key.pem \ No newline at end of file diff --git a/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem new file mode 120000 index 00000000000..c18601067aa --- /dev/null +++ b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/server-cert.pem \ No newline at end of file diff --git a/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem new file mode 120000 index 00000000000..48b9c2df658 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/server-key.pem \ No newline at end of file diff --git a/agent/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/agent/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 00000000000..466f79294b8 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/agent/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 00000000000..d4bbf3c9dca --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/agent/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 00000000000..534d66ac268 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/agent/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/agent/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 00000000000..1f657bd3dca --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,157 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "io" +) + +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { + io.Reader + closer func() error +} + +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &ReadCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/agent/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 00000000000..dc894f91314 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/agent/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 00000000000..ecaba2e36d2 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,16 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/agent/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 00000000000..91b8d182662 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/agent/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/agent/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 00000000000..61c679497da --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/agent/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/agent/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 00000000000..4177affba24 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath // import "github.com/docker/docker/pkg/longpath" + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/flags.go b/agent/vendor/github.com/docker/docker/pkg/mount/flags.go new file mode 100644 index 00000000000..272363b6854 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/agent/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000000..ef35ef90591 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -0,0 +1,49 @@ +// +build freebsd,cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 + mntDetach = 0 +) diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/agent/vendor/github.com/docker/docker/pkg/mount/flags_linux.go new file mode 100644 index 00000000000..a1b199a31ac --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/agent/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000000..cc6c4759083 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mount.go b/agent/vendor/github.com/docker/docker/pkg/mount/mount.go new file mode 100644 index 00000000000..874aff6545c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -0,0 +1,141 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "sort" + "strings" + "syscall" + + "github.com/sirupsen/logrus" +) + +// FilterFunc is a type defining a callback function +// to filter out unwanted entries. It takes a pointer +// to an Info struct (not fully populated, currently +// only Mountpoint is filled in), and returns two booleans: +// - skip: true if the entry should be skipped +// - stop: true if parsing should be stopped after the entry +type FilterFunc func(*Info) (skip, stop bool) + +// PrefixFilter discards all entries whose mount points +// do not start with a prefix specified +func PrefixFilter(prefix string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(m.Mountpoint, prefix) + return skip, false + } +} + +// SingleEntryFilter looks for a specific entry +func SingleEntryFilter(mp string) FilterFunc { + return func(m *Info) (bool, bool) { + if m.Mountpoint == mp { + return false, true // don't skip, stop now + } + return true, false // skip, keep going + } +} + +// ParentsFilter returns all entries whose mount points +// can be parents of a path specified, discarding others. +// For example, given `/var/lib/docker/something`, entries +// like `/var/lib/docker`, `/var` and `/` are returned. +func ParentsFilter(path string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(path, m.Mountpoint) + return skip, false + } +} + +// GetMounts retrieves a list of mounts for the current running process, +// with an optional filter applied (use nil for no filter). +func GetMounts(f FilterFunc) ([]*Info, error) { + return parseMountTable(f) +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo. +func Mounted(mountpoint string) (bool, error) { + entries, err := GetMounts(SingleEntryFilter(mountpoint)) + if err != nil { + return false, err + } + + return len(entries) > 0, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + err := unmount(target, mntDetach) + if err == syscall.EINVAL { + // ignore "not mounted" error + err = nil + } + return err +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := parseMountTable(PrefixFilter(target)) + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + for i, m := range mounts { + logrus.Debugf("Trying to unmount %s", m.Mountpoint) + err = unmount(m.Mountpoint, mntDetach) + if err != nil { + // If the error is EINVAL either this whole package is wrong (invalid flags passed to unmount(2)) or this is + // not a mountpoint (which is ok in this case). + // Meanwhile calling `Mounted()` is very expensive. + // + // We've purposefully used `syscall.EINVAL` here instead of `unix.EINVAL` to avoid platform branching + // Since `EINVAL` is defined for both Windows and Linux in the `syscall` package (and other platforms), + // this is nicer than defining a custom value that we can refer to in each platform file. + if err == syscall.EINVAL { + continue + } + if i == len(mounts)-1 { + if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { + return err + } + continue + } + // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem + logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) + continue + } + + logrus.Debugf("Unmounted %s", m.Mountpoint) + } + return nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/agent/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000000..b6ab83a2307 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -0,0 +1,60 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/agent/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go new file mode 100644 index 00000000000..631daf10a5a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -0,0 +1,57 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return err + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + } + + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/agent/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000000..1428dffa529 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo.go new file mode 100644 index 00000000000..ecd03fc0221 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 00000000000..36c89dc1a24 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,55 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable(filter FilterFunc) ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + var skip, stop bool + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + + out = append(out, &mountinfo) + if stop { + break + } + } + return out, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000000..c1dba01fc31 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,132 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { + s := bufio.NewScanner(r) + out := []*Info{} + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + /* + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options + */ + + text := s.Text() + fields := strings.Split(text, " ") + numFields := len(fields) + if numFields < 10 { + // should be at least 10 fields + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + } + + p := &Info{} + // ignore any numbers parsing errors, as there should not be any + p.ID, _ = strconv.Atoi(fields[0]) + p.Parent, _ = strconv.Atoi(fields[1]) + mm := strings.Split(fields[2], ":") + if len(mm) != 2 { + return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + } + p.Major, _ = strconv.Atoi(mm[0]) + p.Minor, _ = strconv.Atoi(mm[1]) + + p.Root = fields[3] + p.Mountpoint = fields[4] + p.Opts = fields[5] + + var skip, stop bool + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + + // one or more optional fields, when a separator (-) + i := 6 + for ; i < numFields && fields[i] != "-"; i++ { + switch i { + case 6: + p.Optional = fields[6] + default: + /* NOTE there might be more optional fields before the such as + fields[7]...fields[N] (where N < sepIndex), although + as of Linux kernel 4.15 the only known ones are + mount propagation flags in fields[6]. The correct + behavior is to ignore any unknown optional fields. + */ + break + } + } + if i == numFields { + return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) + } + + // There should be 3 fields after the separator... + if i+4 > numFields { + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) + } + // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name + // (like "//serv/My Documents") _may_ end up having a space in the last field + // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs + // option unc= is ignored, so a space should not appear. In here we ignore + // those "extra" fields caused by extra spaces. + p.Fstype = fields[i+1] + p.Source = fields[i+2] + p.VfsOpts = fields[i+3] + + out = append(out, p) + if stop { + break + } + } + return out, nil +} + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable(filter FilterFunc) ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, filter) +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, nil) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 00000000000..fd16d3ed698 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd freebsd,!cgo + +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "fmt" + "runtime" +) + +func parseMountTable(f FilterFunc) ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go new file mode 100644 index 00000000000..27e0f6976ec --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +func parseMountTable(f FilterFunc) ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/agent/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 00000000000..8a100f0bc85 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,71 @@ +package mount // import "github.com/docker/docker/pkg/mount" + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +// MakeMount ensures that the file or directory given is a mount point, +// bind mounting it to itself it case it is not. +func MakeMount(mnt string) error { + mounted, err := Mounted(mnt) + if err != nil { + return err + } + if mounted { + return nil + } + + return Mount(mnt, mnt, "none", "bind") +} + +func ensureMountedAs(mountPoint, options string) error { + if err := MakeMount(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/client.go b/agent/vendor/github.com/docker/docker/pkg/plugins/client.go new file mode 100644 index 00000000000..0353305358f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -0,0 +1,242 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeout), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: timeout, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// RequestOpts is the set of options that can be passed into a request +type RequestOpts struct { + Timeout time.Duration +} + +// WithRequestTimeout sets a timeout duration for plugin requests +func WithRequestTimeout(t time.Duration) func(*RequestOpts) { + return func(o *RequestOpts) { + o.Timeout = t + } +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args, ret interface{}) error { + return c.CallWithOptions(serviceMethod, args, ret) +} + +// CallWithOptions is just like call except it takes options +func (c *Client) CallWithOptions(serviceMethod string, args interface{}, ret interface{}, opts ...func(*RequestOpts)) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true, opts...) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool, reqOpts ...func(*RequestOpts)) (io.ReadCloser, error) { + var retries int + start := time.Now() + + var opts RequestOpts + for _, o := range reqOpts { + o(&opts) + } + + for { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + cancelRequest := func() {} + if opts.Timeout > 0 { + var ctx context.Context + ctx, cancelRequest = context.WithTimeout(req.Context(), opts.Timeout) + req = req.WithContext(ctx) + } + + resp, err := c.http.Do(req) + if err != nil { + cancelRequest() + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + cancelRequest() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return ioutils.NewReadCloserWrapper(resp.Body, func() error { + err := resp.Body.Close() + cancelRequest() + return err + }), nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/agent/vendor/github.com/docker/docker/pkg/plugins/discovery.go new file mode 100644 index 00000000000..4b79bd29ad2 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -0,0 +1,154 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + dirEntries, err := ioutil.ReadDir(socketsPath) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + fi, err = os.Stat(filepath.Join(socketsPath, fi.Name(), fi.Name()+".sock")) + if err != nil { + continue + } + } + + if fi.Mode()&os.ModeSocket != 0 { + names = append(names, strings.TrimSuffix(filepath.Base(fi.Name()), filepath.Ext(fi.Name()))) + } + } + + for _, p := range specsPaths { + dirEntries, err := ioutil.ReadDir(p) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + infos, err := ioutil.ReadDir(filepath.Join(p, fi.Name())) + if err != nil { + continue + } + + for _, info := range infos { + if strings.TrimSuffix(info.Name(), filepath.Ext(info.Name())) == fi.Name() { + fi = info + break + } + } + } + + ext := filepath.Ext(fi.Name()) + switch ext { + case ".spec", ".json": + plugin := strings.TrimSuffix(fi.Name(), ext) + names = append(names, plugin) + default: + } + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, errors.Wrapf(ErrNotFound, "could not find plugin %s in v1 plugin registry", name) +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/agent/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go new file mode 100644 index 00000000000..58058f28286 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/agent/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go new file mode 100644 index 00000000000..f0af3477f42 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/errors.go b/agent/vendor/github.com/docker/docker/pkg/plugins/errors.go new file mode 100644 index 00000000000..6735c304bfd --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/agent/vendor/github.com/docker/docker/pkg/plugins/plugins.go new file mode 100644 index 00000000000..28c06ff6930 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -0,0 +1,337 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "errors" + "sync" + "time" + + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +// ProtocolSchemeHTTPV1 is the name of the protocol used for interacting with plugins using this package. +const ProtocolSchemeHTTPV1 = "moby.plugins.http/v1" + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// Protocol returns the protocol name/version used for plugins in this package. +func (p *Plugin) Protocol() string { + return ProtocolSchemeHTTPV1 +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/agent/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go new file mode 100644 index 00000000000..cdfbe934582 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/agent/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go new file mode 100644 index 00000000000..ddf1d786c62 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go @@ -0,0 +1,7 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/agent/vendor/github.com/docker/docker/pkg/plugins/transport/http.go new file mode 100644 index 00000000000..76d3bdb7127 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/agent/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go new file mode 100644 index 00000000000..9cb13335a8b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/agent/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000000..5d80670bc0f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/agent/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000000..2ee8768d315 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/agent/vendor/github.com/docker/docker/pkg/system/chtimes.go b/agent/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 00000000000..c26a4e24b66 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,31 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + return setCTime(name, mtime) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/agent/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go new file mode 100644 index 00000000000..259138a45b5 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 00000000000..d3a115ff42b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -0,0 +1,26 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" + + "golang.org/x/sys/windows" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/errors.go b/agent/vendor/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 00000000000..2573d716222 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") + + // ErrNotSupportedOperatingSystem means the operating system is not supported. + ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") +) diff --git a/agent/vendor/github.com/docker/docker/pkg/system/exitcode.go b/agent/vendor/github.com/docker/docker/pkg/system/exitcode.go new file mode 100644 index 00000000000..4ba8fe35bfd --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -0,0 +1,19 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/filesys.go b/agent/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 00000000000..adeb1630520 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,67 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 00000000000..a1f6013f139 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,296 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/init.go b/agent/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 00000000000..a17597aaba2 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/init_unix.go b/agent/vendor/github.com/docker/docker/pkg/system/init_unix.go new file mode 100644 index 00000000000..4996a67c12e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/init_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(experimental bool) { +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/init_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 00000000000..4910ff69d66 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,12 @@ +package system // import "github.com/docker/docker/pkg/system" + +// lcowSupported determines if Linux Containers on Windows are supported. +var lcowSupported = false + +// InitLCOW sets whether LCOW is supported or not +func InitLCOW(experimental bool) { + v := GetOSVersion() + if experimental && v.Build >= 16299 { + lcowSupported = true + } +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/lcow.go b/agent/vendor/github.com/docker/docker/pkg/system/lcow.go new file mode 100644 index 00000000000..5be3e2182ba --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -0,0 +1,32 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// IsOSSupported determines if an operating system is supported by the host +func IsOSSupported(os string) bool { + if strings.EqualFold(runtime.GOOS, os) { + return true + } + if LCOWSupported() && strings.EqualFold(os, "linux") { + return true + } + return false +} + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary windows-only function, should be replaced by +// comparison of worker capabilities +func ValidatePlatform(platform specs.Platform) error { + if runtime.GOOS == "windows" { + if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { + return errors.Errorf("unsupported os %s", platform.OS) + } + } + return nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/agent/vendor/github.com/docker/docker/pkg/system/lcow_unix.go new file mode 100644 index 00000000000..26397fb8a17 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/lcow_windows.go new file mode 100644 index 00000000000..f0139df8f7e --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system // import "github.com/docker/docker/pkg/system" + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/agent/vendor/github.com/docker/docker/pkg/system/lstat_unix.go new file mode 100644 index 00000000000..7477995f1bf --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 00000000000..359c791d9b6 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/meminfo.go b/agent/vendor/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 00000000000..6667eb84dcb --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system // import "github.com/docker/docker/pkg/system" + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/agent/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 00000000000..d79e8b07653 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/agent/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000000..56f44942680 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows + +package system // import "github.com/docker/docker/pkg/system" + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 00000000000..6ed93f2fe26 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/mknod.go b/agent/vendor/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 00000000000..b132482e038 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(unix.Mkdev(uint32(major), uint32(minor))) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 00000000000..ec89d7a15ea --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/path.go b/agent/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 00000000000..a3d957afab7 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,60 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/containerd/continuity/pathdriver" +) + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(os string) string { + if runtime.GOOS == "windows" { + if os != runtime.GOOS { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { + if runtime.GOOS != "windows" || LCOWSupported() { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/path_unix.go b/agent/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 00000000000..b0b93196a15 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + return path, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/path_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 00000000000..188f2c2957a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,24 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg + p := syscall.StringToUTF16(path) + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + _, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/process_unix.go b/agent/vendor/github.com/docker/docker/pkg/system/process_unix.go new file mode 100644 index 00000000000..0195a891b27 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd darwin + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/process_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 00000000000..4e70c97b18f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + p.Kill() + } +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/rm.go b/agent/vendor/github.com/docker/docker/pkg/system/rm.go new file mode 100644 index 00000000000..b3109918004 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 50 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return nil + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/agent/vendor/github.com/docker/docker/pkg/system/stat_darwin.go new file mode 100644 index 00000000000..c1c0ee9f386 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/agent/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 00000000000..c1c0ee9f386 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/agent/vendor/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 00000000000..98c9eb18d18 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/agent/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 00000000000..756b92d1e6c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/agent/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 00000000000..756b92d1e6c --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/agent/vendor/github.com/docker/docker/pkg/system/stat_unix.go new file mode 100644 index 00000000000..3d7e2ebbefa --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -0,0 +1,65 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// IsDir reports whether s describes a directory. +func (s StatT) IsDir() bool { + return s.mode&syscall.S_IFDIR != 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 00000000000..b2456cb8870 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/agent/vendor/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 00000000000..919a412a7b3 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 00000000000..4ae92fa6c7d --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,193 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "syscall" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 + ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +const ( + SE_UNKNOWN_OBJECT_TYPE = iota + SE_FILE_OBJECT + SE_SERVICE + SE_PRINTER + SE_REGISTRY_KEY + SE_LMSHARE + SE_KERNEL_OBJECT + SE_WINDOW_OBJECT + SE_DS_OBJECT + SE_DS_OBJECT_ALL + SE_PROVIDER_DEFINED_OBJECT + SE_WMIGUID_OBJECT + SE_REGISTRY_WOW64_32KEY +) + +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") + procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} + +func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + result = syscall.Errno(r0) + } + return +} + +func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) + if r1 == 0 { + if e1 != 0 { + result = syscall.Errno(e1) + } else { + result = syscall.EINVAL + } + } + return +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/umask.go b/agent/vendor/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 00000000000..9912a2babb3 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/agent/vendor/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 00000000000..fc62388c389 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,7 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/agent/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000000..ed1b9fad59b --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/agent/vendor/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 00000000000..0afe854589f --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/agent/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000000..095e072e1df --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/agent/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 00000000000..66d4895b27a --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,29 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { + return nil, nil + } + if errno == unix.ERANGE { + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/agent/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/agent/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000000..d780a90cd38 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system // import "github.com/docker/docker/pkg/system" + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/agent/vendor/github.com/docker/docker/project/CONTRIBUTING.md b/agent/vendor/github.com/docker/docker/project/CONTRIBUTING.md new file mode 120000 index 00000000000..44fcc634393 --- /dev/null +++ b/agent/vendor/github.com/docker/docker/project/CONTRIBUTING.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/agent/vendor/github.com/docker/go-connections/LICENSE b/agent/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 00000000000..b55b37bc316 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/docker/go-connections/nat/nat.go b/agent/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 00000000000..4d5f5ae63af --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/agent/vendor/github.com/docker/go-connections/nat/parse.go b/agent/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 00000000000..892adf8c667 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/agent/vendor/github.com/docker/go-connections/nat/sort.go b/agent/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 00000000000..ce950171e31 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/agent/vendor/github.com/docker/go-connections/sockets/README.md b/agent/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/agent/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/agent/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 00000000000..99846ffddb1 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,81 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} diff --git a/agent/vendor/github.com/docker/go-connections/sockets/proxy.go b/agent/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 00000000000..98e9a1dc61b --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/agent/vendor/github.com/docker/go-connections/sockets/sockets.go b/agent/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 00000000000..a1d7beb4d80 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,38 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "errors" + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + return configureUnixTransport(tr, proto, addr) + case "npipe": + return configureNpipeTransport(tr, proto, addr) + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/agent/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/agent/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 00000000000..386cf0dbbde --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package sockets + +import ( + "fmt" + "net" + "net/http" + "syscall" + "time" +) + +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/agent/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/agent/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 00000000000..5c21644e1fe --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,27 @@ +package sockets + +import ( + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/agent/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/agent/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 00000000000..53cbb6c79e4 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/agent/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/agent/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 00000000000..a8b5dbb6fdc --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,32 @@ +// +build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := os.Chown(path, 0, gid); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} diff --git a/agent/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/agent/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 00000000000..1ca0965e06e --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/agent/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/agent/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 00000000000..9ca974539a7 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" + +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/agent/vendor/github.com/docker/go-connections/tlsconfig/config.go b/agent/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 00000000000..1b31bbb8b1b --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,244 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault() *tls.Config { + return &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/agent/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/agent/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 00000000000..6b4c6a7c0d0 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/agent/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/agent/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 00000000000..ee22df47cb2 --- /dev/null +++ b/agent/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/agent/vendor/github.com/docker/go-units/CONTRIBUTING.md b/agent/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 00000000000..9ea86d784ec --- /dev/null +++ b/agent/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/agent/vendor/github.com/docker/go-units/LICENSE b/agent/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 00000000000..b55b37bc316 --- /dev/null +++ b/agent/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/agent/vendor/github.com/docker/go-units/MAINTAINERS b/agent/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 00000000000..477be8b214b --- /dev/null +++ b/agent/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/agent/vendor/github.com/docker/go-units/README.md b/agent/vendor/github.com/docker/go-units/README.md new file mode 100644 index 00000000000..4f70a4e1345 --- /dev/null +++ b/agent/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/agent/vendor/github.com/docker/go-units/circle.yml b/agent/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 00000000000..9043b35478c --- /dev/null +++ b/agent/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/agent/vendor/github.com/docker/go-units/duration.go b/agent/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 00000000000..ba02af26dc5 --- /dev/null +++ b/agent/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 46 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/agent/vendor/github.com/docker/go-units/size.go b/agent/vendor/github.com/docker/go-units/size.go new file mode 100644 index 00000000000..44616c27187 --- /dev/null +++ b/agent/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,108 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} diff --git a/agent/vendor/github.com/docker/go-units/ulimit.go b/agent/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 00000000000..5ac7fd825fc --- /dev/null +++ b/agent/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,118 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/.gitignore b/agent/vendor/github.com/etcd-io/bbolt/.gitignore new file mode 100644 index 00000000000..3bcd8cbaf02 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/.gitignore @@ -0,0 +1,5 @@ +*.prof +*.test +*.swp +/bin/ +cover.out diff --git a/agent/vendor/github.com/etcd-io/bbolt/.travis.yml b/agent/vendor/github.com/etcd-io/bbolt/.travis.yml new file mode 100644 index 00000000000..257dfdfee48 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/.travis.yml @@ -0,0 +1,17 @@ +language: go +go_import_path: go.etcd.io/bbolt + +sudo: false + +go: +- 1.12 + +before_install: +- go get -v honnef.co/go/tools/... +- go get -v github.com/kisielk/errcheck + +script: +- make fmt +- make test +- make race +# - make errcheck diff --git a/agent/vendor/github.com/etcd-io/bbolt/LICENSE b/agent/vendor/github.com/etcd-io/bbolt/LICENSE new file mode 100644 index 00000000000..004e77fe5d2 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/agent/vendor/github.com/etcd-io/bbolt/Makefile b/agent/vendor/github.com/etcd-io/bbolt/Makefile new file mode 100644 index 00000000000..2968aaa61dd --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/Makefile @@ -0,0 +1,38 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" + @echo "array freelist test" + @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)" + +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +# go get honnef.co/go/tools/simple +gosimple: + gosimple ./... + +# go get honnef.co/go/tools/unused +unused: + unused ./... + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt + +test: + TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt + + @echo "array freelist test" + + @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt + +.PHONY: race fmt errcheck test gosimple unused diff --git a/agent/vendor/github.com/etcd-io/bbolt/README.md b/agent/vendor/github.com/etcd-io/bbolt/README.md new file mode 100644 index 00000000000..c9e64b1a615 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/README.md @@ -0,0 +1,957 @@ +bbolt +===== + +[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) +[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) +[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) +[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Project versioning + +bbolt uses [semantic versioning](http://semver.org). +API should not change between patch and minor releases. +New minor versions may add additional features to the API. + +## Table of Contents + + - [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) + - [Resources](#resources) + - [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) + - [Caveats & Limitations](#caveats--limitations) + - [Reading the Source](#reading-the-source) + - [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get go.etcd.io/bbolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Importing bbolt + +To use bbolt as an embedded key-value store, import as: + +```go +import bolt "go.etcd.io/bbolt" + +db, err := bolt.Open(path, 0666, nil) +if err != nil { + return err +} +defer db.Close() +``` + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + bolt "go.etcd.io/bbolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Transactions should not depend on one another and generally shouldn't be opened +simultaneously in the same goroutine. This can cause a deadlock as the read-write +transaction needs to periodically re-map the data file but it cannot do so while +any read-only transaction is open. Even a nested read-only transaction can cause +a deadlock, as the child transaction can block the parent transaction from releasing +its resources. + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `Tx.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more) +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_386.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_386.go new file mode 100644 index 00000000000..aee25960ff9 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_386.go @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_amd64.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_amd64.go new file mode 100644 index 00000000000..5dd8f3f2aeb --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_amd64.go @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_arm.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_arm.go new file mode 100644 index 00000000000..aee25960ff9 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_arm64.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_arm64.go new file mode 100644 index 00000000000..810dfd55c53 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_linux.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_linux.go new file mode 100644 index 00000000000..7707bcacf03 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bbolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go new file mode 100644 index 00000000000..dd8ffe12393 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go @@ -0,0 +1,9 @@ +// +build mips64 mips64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go new file mode 100644 index 00000000000..a669703a4e3 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go @@ -0,0 +1,9 @@ +// +build mips mipsle + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x40000000 // 1GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go new file mode 100644 index 00000000000..d7f50358ef1 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bbolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc.go new file mode 100644 index 00000000000..84e545ef3e7 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go new file mode 100644 index 00000000000..a76120908cb --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go new file mode 100644 index 00000000000..c830f2fc77a --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go new file mode 100644 index 00000000000..c967613b006 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go @@ -0,0 +1,9 @@ +// +build riscv64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_s390x.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_s390x.go new file mode 100644 index 00000000000..ff2a5609707 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_unix.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_unix.go new file mode 100644 index 00000000000..2938fed5845 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_unix.go @@ -0,0 +1,93 @@ +// +build !windows,!plan9,!solaris,!aix + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } + for { + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_unix_aix.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_unix_aix.go new file mode 100644 index 00000000000..a64c16f5129 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_unix_aix.go @@ -0,0 +1,90 @@ +// +build aix + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go new file mode 100644 index 00000000000..babad657862 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go @@ -0,0 +1,88 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/bolt_windows.go b/agent/vendor/github.com/etcd-io/bbolt/bolt_windows.go new file mode 100644 index 00000000000..fca178bd290 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bolt_windows.go @@ -0,0 +1,141 @@ +package bbolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + for { + // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range + // -1..0 as the lock on the database file. + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) + + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/boltsync_unix.go b/agent/vendor/github.com/etcd-io/bbolt/boltsync_unix.go new file mode 100644 index 00000000000..9587afefee4 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bbolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/bucket.go b/agent/vendor/github.com/etcd-io/bbolt/bucket.go new file mode 100644 index 00000000000..d8750b14871 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/bucket.go @@ -0,0 +1,777 @@ +package bbolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // Unaligned access requires a copy to be made. + const unalignedMask = unsafe.Alignof(struct { + bucket + page + }{}) - 1 + unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exist, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * uintptr(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += int(used) + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += int(used) + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += uintptr(lastElement.pos + lastElement.ksize) + s.BranchInuse += int(used) + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() uintptr { + return uintptr(b.tx.db.pageSize / 4) +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/cursor.go b/agent/vendor/github.com/etcd-io/bbolt/cursor.go new file mode 100644 index 00000000000..98aeb449a4c --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/cursor.go @@ -0,0 +1,396 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(ref.index) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/db.go b/agent/vendor/github.com/etcd-io/bbolt/db.go new file mode 100644 index 00000000000..80b0095cc34 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/db.go @@ -0,0 +1,1174 @@ +package bbolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "sort" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +const pgidNoFreelist pgid = 0xffffffffffffffff + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + +// FreelistType is the type of the freelist backend +type FreelistType string + +const ( + // FreelistArrayType indicates backend freelist type is array + FreelistArrayType = FreelistType("array") + // FreelistMapType indicates backend freelist type is hashmap + FreelistMapType = FreelistType("hashmap") +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures + // dramatic performance degradation if database is large and framentation in freelist is common. + // The alternative one is using hashmap, it is faster in almost all circumstances + // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. + // The default type is array + FreelistType FreelistType + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + openFile func(string, int, os.FileMode) (*os.File, error) + file *os.File + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + stats Stats + + freelist *freelist + freelistLoad sync.Once + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + db := &DB{ + opened: true, + } + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoSync = options.NoSync + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync + db.FreelistType = options.FreelistType + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + db.openFile = options.OpenFile + if db.openFile == nil { + db.openFile = os.OpenFile + } + + // Open data file and separate sync handler for metadata writes. + var err error + if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + db.path = db.file.Name() + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + _ = db.close() + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + // clean up file descriptor on initialization fail + _ = db.close() + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + db.pageSize = int(m.pageSize) + } + } else { + _ = db.close() + return nil, ErrInvalid + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } + + // Mark the database as opened and return. + return db, nil +} + +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist(db.FreelistType) + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = db.freelist.free_count() + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// It will block waiting for any open transactions to finish +// before closing the database and returning. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + db.freePages() + return t, nil +} + +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid + } + if minid > 0 { + db.freelist.release(minid - 1) + } + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. +} + +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Rollback() +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + c.err <- err + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(txid txid, count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(txid, count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures + // dramatic performance degradation if database is large and framentation in freelist is common. + // The alternative one is using hashmap, it is faster in almost all circumstances + // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. + // The default type is array + FreelistType FreelistType + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool + + // OpenFile is used to open files. It defaults to os.OpenFile. This option + // is useful for writing hermetic tests. + OpenFile func(string, int, os.FileMode) (*os.File, error) +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, + FreelistType: FreelistArrayType, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/doc.go b/agent/vendor/github.com/etcd-io/bbolt/doc.go new file mode 100644 index 00000000000..95f25f01c6a --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/doc.go @@ -0,0 +1,44 @@ +/* +package bbolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bbolt diff --git a/agent/vendor/github.com/etcd-io/bbolt/errors.go b/agent/vendor/github.com/etcd-io/bbolt/errors.go new file mode 100644 index 00000000000..48758ca5770 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/errors.go @@ -0,0 +1,71 @@ +package bbolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/agent/vendor/github.com/etcd-io/bbolt/freelist.go b/agent/vendor/github.com/etcd-io/bbolt/freelist.go new file mode 100644 index 00000000000..697a46968ba --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/freelist.go @@ -0,0 +1,404 @@ +package bbolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[pgid]struct{} + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + freelistType FreelistType // freelist type + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[pgid]uint64 // key is start pgid, value is its span size + backwardMap map[pgid]uint64 // key is end pgid, value is its span size + allocate func(txid txid, n int) pgid // the freelist allocate func + free_count func() int // the function which gives you free page number + mergeSpans func(ids pgids) // the mergeSpan func + getFreePageIDs func() []pgid // get free pgids func + readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist(freelistType FreelistType) *freelist { + f := &freelist{ + freelistType: freelistType, + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), + cache: make(map[pgid]bool), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[pgid]uint64), + backwardMap: make(map[pgid]uint64), + } + + if freelistType == FreelistMapType { + f.allocate = f.hashmapAllocate + f.free_count = f.hashmapFreeCount + f.mergeSpans = f.hashmapMergeSpans + f.getFreePageIDs = f.hashmapGetFreePageIDs + f.readIDs = f.hashmapReadIDs + } else { + f.allocate = f.arrayAllocate + f.free_count = f.arrayFreeCount + f.mergeSpans = f.arrayMergeSpans + f.getFreePageIDs = f.arrayGetFreePageIDs + f.readIDs = f.arrayReadIDs + } + + return f +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// arrayFreeCount returns count of free pages(array version) +func (f *freelist) arrayFreeCount() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, txp := range f.pending { + count += len(txp.ids) + } + return count +} + +// copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) + } + sort.Sort(m) + mergepgids(dst, f.getFreePageIDs(), m) +} + +// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) arrayAllocate(txid txid, n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + f.cache[id] = true + } +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, txp := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + f.mergeSpans(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(f.pending, tid) + } + } + f.mergeSpans(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + txp := f.pending[txid] + if txp == nil { + return + } + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(f.pending, txid) + f.mergeSpans(m) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + var ids []pgid + data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) + unsafeSlice(unsafe.Pointer(&ids), data, count) + + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]pgid, count) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(pgids(idsCopy)) + + f.readIDs(idsCopy) + } +} + +// arrayReadIDs initializes the freelist from a given list of ids. +func (f *freelist) arrayReadIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + +func (f *freelist) arrayGetFreePageIDs() []pgid { + return f.ids +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := f.count() + if l == 0 { + p.count = uint16(l) + } else if l < 0xFFFF { + p.count = uint16(l) + var ids []pgid + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&ids), data, l) + f.copyall(ids) + } else { + p.count = 0xFFFF + var ids []pgid + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&ids), data, l+1) + ids[0] = pgid(l) + f.copyall(ids[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.getFreePageIDs() { + if !pcache[id] { + a = append(a, id) + } + } + + f.readIDs(a) +} + +// noSyncReload reads the freelist from pgids and filters out pending items. +func (f *freelist) noSyncReload(pgids []pgid) { + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range pgids { + if !pcache[id] { + a = append(a, id) + } + } + + f.readIDs(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + ids := f.getFreePageIDs() + f.cache = make(map[pgid]bool, len(ids)) + for _, id := range ids { + f.cache[id] = true + } + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + f.cache[pendingID] = true + } + } +} + +// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array +func (f *freelist) arrayMergeSpans(ids pgids) { + sort.Sort(ids) + f.ids = pgids(f.ids).merge(ids) +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/freelist_hmap.go b/agent/vendor/github.com/etcd-io/bbolt/freelist_hmap.go new file mode 100644 index 00000000000..02ef2be0441 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/freelist_hmap.go @@ -0,0 +1,178 @@ +package bbolt + +import "sort" + +// hashmapFreeCount returns count of free pages(hashmap version) +func (f *freelist) hashmapFreeCount() int { + // use the forwardmap to get the total count + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend +func (f *freelist) hashmapAllocate(txid txid, n int) pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, uint64(size)) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+pgid(n), remain) + + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, pid+pgid(i)) + } + return pid + } + } + + return 0 +} + +// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) +func (f *freelist) hashmapReadIDs(pgids []pgid) { + f.init(pgids) + + // Rebuild the page cache. + f.reindex() +} + +// hashmapGetFreePageIDs returns the sorted free page ids +func (f *freelist) hashmapGetFreePageIDs() []pgid { + count := f.free_count() + if count == 0 { + return nil + } + + m := make([]pgid, 0, count) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + m = append(m, start+pgid(i)) + } + } + sort.Sort(pgids(m)) + + return m +} + +// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans +func (f *freelist) hashmapMergeSpans(ids pgids) { + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *freelist) mergeWithExistingSpan(pid pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - pgid(preSize) + f.delSpan(start, preSize) + + newStart -= pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +func (f *freelist) addSpan(start pgid, size uint64) { + f.backwardMap[start-1+pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} +} + +func (f *freelist) delSpan(start pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } +} + +// initial from pgids using when use hashmap version +// pgids must be sorted +func (f *freelist) init(pgids []pgid) { + if len(pgids) == 0 { + return + } + + size := uint64(1) + start := pgids[0] + + if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[pgid]uint64) + f.backwardMap = make(map[pgid]uint64) + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/go.mod b/agent/vendor/github.com/etcd-io/bbolt/go.mod new file mode 100644 index 00000000000..c2366daef6b --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/go.mod @@ -0,0 +1,5 @@ +module go.etcd.io/bbolt + +go 1.12 + +require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 diff --git a/agent/vendor/github.com/etcd-io/bbolt/go.sum b/agent/vendor/github.com/etcd-io/bbolt/go.sum new file mode 100644 index 00000000000..4ad15a488c7 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/agent/vendor/github.com/etcd-io/bbolt/node.go b/agent/vendor/github.com/etcd-io/bbolt/node.go new file mode 100644 index 00000000000..73988b5c4c0 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/node.go @@ -0,0 +1,602 @@ +package bbolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + } + return int(sz) +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v uintptr) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() uintptr { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.key) + len(item.value) + b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.key) + copy(b[l:], item.value) + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize uintptr) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize uintptr) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz uintptr) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = uintptr(i) + inode := n.inodes[i] + elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(uintptr(tx.db.pageSize)) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { + return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 +} + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/agent/vendor/github.com/etcd-io/bbolt/page.go b/agent/vendor/github.com/etcd-io/bbolt/page.go new file mode 100644 index 00000000000..c9a158fb066 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/page.go @@ -0,0 +1,204 @@ +package bbolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = unsafe.Sizeof(page{}) + +const minKeysPerPage = 2 + +const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + leafPageElementSize, int(index))) +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + var elems []leafPageElement + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + var elems []branchPageElement + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/tx.go b/agent/vendor/github.com/etcd-io/bbolt/tx.go new file mode 100644 index 00000000000..4b1a64a8b8a --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/tx.go @@ -0,0 +1,724 @@ +package bbolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + return fn(k, tx.root.Bucket(k)) + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + } + + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { + return err + } + } else { + tx.meta.freelist = pgidNoFreelist + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.nonPhysicalRollback() + return nil +} + +// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk. +func (tx *Tx) nonPhysicalRollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + } + tx.close() +} + +// rollback needs to reload the free pages from disk in case some system error happens like fsync error. +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + if !tx.db.hasSyncedFreelist() { + // Reconstruct free page list by scanning the DB to get the whole free page list. + // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.noSyncReload(tx.db.freepages()) + } else { + // Read free page list from freelist page. + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. +// +// Deprecated; Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, nil +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(tx.meta.txid, count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount += count + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) + offset := int64(p.id) * int64(tx.db.pageSize) + var written uintptr + + // Write out page in "max allocation" sized chunks. + for { + sz := rem + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + rem -= sz + if rem == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + written += uintptr(sz) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/agent/vendor/github.com/etcd-io/bbolt/unsafe.go b/agent/vendor/github.com/etcd-io/bbolt/unsafe.go new file mode 100644 index 00000000000..c0e50375007 --- /dev/null +++ b/agent/vendor/github.com/etcd-io/bbolt/unsafe.go @@ -0,0 +1,39 @@ +package bbolt + +import ( + "reflect" + "unsafe" +) + +func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset) +} + +func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) +} + +func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { + // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices + // + // This memory is not allocated from C, but it is unmanaged by Go's + // garbage collector and should behave similarly, and the compiler + // should produce similar code. Note that this conversion allows a + // subslice to begin after the base address, with an optional offset, + // while the URL above does not cover this case and only slices from + // index 0. However, the wiki never says that the address must be to + // the beginning of a C allocation (or even that malloc was used at + // all), so this is believed to be correct. + return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] +} + +// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by +// the slice parameter. This helper should be used over other direct +// manipulation of reflect.SliceHeader to prevent misuse, namely, converting +// from reflect.SliceHeader to a Go slice type. +func unsafeSlice(slice, data unsafe.Pointer, len int) { + s := (*reflect.SliceHeader)(slice) + s.Data = uintptr(data) + s.Cap = len + s.Len = len +} diff --git a/agent/vendor/github.com/godbus/dbus/.travis.yml b/agent/vendor/github.com/godbus/dbus/.travis.yml new file mode 100644 index 00000000000..2e1bbb78c39 --- /dev/null +++ b/agent/vendor/github.com/godbus/dbus/.travis.yml @@ -0,0 +1,40 @@ +dist: precise +language: go +go_import_path: github.com/godbus/dbus +sudo: true + +go: + - 1.6.3 + - 1.7.3 + - tip + +env: + global: + matrix: + - TARGET=amd64 + - TARGET=arm64 + - TARGET=arm + - TARGET=386 + - TARGET=ppc64le + +matrix: + fast_finish: true + allow_failures: + - go: tip + exclude: + - go: tip + env: TARGET=arm + - go: tip + env: TARGET=arm64 + - go: tip + env: TARGET=386 + - go: tip + env: TARGET=ppc64le + +addons: + apt: + packages: + - dbus + - dbus-x11 + +before_install: diff --git a/agent/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/agent/vendor/github.com/godbus/dbus/CONTRIBUTING.md new file mode 100644 index 00000000000..c88f9b2bdd0 --- /dev/null +++ b/agent/vendor/github.com/godbus/dbus/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +